Merge pull request #11011 from Vexu/stage2

stage2: tuple/slice mul/cat
This commit is contained in:
Andrew Kelley 2022-02-28 15:39:43 -05:00 committed by GitHub
commit 2dd5e8b6f8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 331 additions and 70 deletions

View File

@ -738,7 +738,7 @@ pub fn HashMapUnmanaged(
value: V,
};
const Header = packed struct {
const Header = struct {
values: [*]V,
keys: [*]K,
capacity: Size,
@ -932,7 +932,7 @@ pub fn HashMapUnmanaged(
}
fn header(self: *const Self) *Header {
return @ptrCast(*Header, @ptrCast([*]Header, self.metadata.?) - 1);
return @ptrCast(*Header, @ptrCast([*]Header, @alignCast(@alignOf(Header), self.metadata.?)) - 1);
}
fn keys(self: *const Self) [*]K {

View File

@ -341,9 +341,15 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index);
const stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc);
const addr = bucket.page + slot_index * size_class;
log.err("memory address 0x{x} leaked: {s}", .{
@ptrToInt(addr), stack_trace,
});
if (builtin.zig_backend == .stage1) {
log.err("memory address 0x{x} leaked: {s}", .{
@ptrToInt(addr), stack_trace,
});
} else { // TODO
log.err("memory address 0x{x} leaked", .{
@ptrToInt(addr),
});
}
leaks = true;
}
if (bit_index == math.maxInt(u3))
@ -372,9 +378,16 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
var it = self.large_allocations.valueIterator();
while (it.next()) |large_alloc| {
if (config.retain_metadata and large_alloc.freed) continue;
log.err("memory address 0x{x} leaked: {s}", .{
@ptrToInt(large_alloc.bytes.ptr), large_alloc.getStackTrace(.alloc),
});
const stack_trace = large_alloc.getStackTrace(.alloc);
if (builtin.zig_backend == .stage1) {
log.err("memory address 0x{x} leaked: {s}", .{
@ptrToInt(large_alloc.bytes.ptr), stack_trace,
});
} else { // TODO
log.err("memory address 0x{x} leaked", .{
@ptrToInt(large_alloc.bytes.ptr),
});
}
leaks = true;
}
return leaks;

View File

@ -46,9 +46,10 @@ pub fn main() void {
var leaks: usize = 0;
for (test_fn_list) |test_fn, i| {
if (builtin.zig_backend != .stage2_llvm) std.testing.allocator_instance = .{};
const gpa_works = builtin.zig_backend == .stage1 or builtin.os.tag != .macos;
if (gpa_works) std.testing.allocator_instance = .{};
defer {
if (builtin.zig_backend != .stage2_llvm and std.testing.allocator_instance.deinit()) {
if (gpa_works and std.testing.allocator_instance.deinit()) {
leaks += 1;
}
}

View File

@ -218,6 +218,9 @@ pub const Inst = struct {
/// Yields the return address of the current function.
/// Uses the `no_op` field.
ret_addr,
/// Implements @frameAddress builtin.
/// Uses the `no_op` field.
frame_addr,
/// Function call.
/// Result type is the return type of the function being called.
/// Uses the `pl_op` field with the `Call` payload. operand is the callee.
@ -939,6 +942,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.ptrtoint,
.slice_len,
.ret_addr,
.frame_addr,
=> return Type.initTag(.usize),
.bool_to_int => return Type.initTag(.u1),

View File

@ -317,6 +317,7 @@ fn analyzeInst(
.unreach,
.fence,
.ret_addr,
.frame_addr,
=> return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }),
.not,

View File

@ -8059,6 +8059,79 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return block.addTyOp(.not, operand_type, operand);
}
fn analyzeTupleCat(
sema: *Sema,
block: *Block,
src_node: i32,
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node };
const lhs_tuple = lhs_ty.tupleFields();
const rhs_tuple = rhs_ty.tupleFields();
const dest_fields = lhs_tuple.types.len + rhs_tuple.types.len;
if (dest_fields == 0) {
return sema.addConstant(Type.initTag(.empty_struct_literal), Value.initTag(.empty_struct_value));
}
const final_len = try sema.usizeCast(block, rhs_src, dest_fields);
const types = try sema.arena.alloc(Type, final_len);
const values = try sema.arena.alloc(Value, final_len);
const opt_runtime_src = rs: {
var runtime_src: ?LazySrcLoc = null;
for (lhs_tuple.types) |ty, i| {
types[i] = ty;
values[i] = lhs_tuple.values[i];
const operand_src = lhs_src; // TODO better source location
if (values[i].tag() == .unreachable_value) {
runtime_src = operand_src;
}
}
const offset = lhs_tuple.types.len;
for (rhs_tuple.types) |ty, i| {
types[i + offset] = ty;
values[i + offset] = rhs_tuple.values[i];
const operand_src = rhs_src; // TODO better source location
if (rhs_tuple.values[i].tag() == .unreachable_value) {
runtime_src = operand_src;
}
}
break :rs runtime_src;
};
const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{
.types = types,
.values = values,
});
const runtime_src = opt_runtime_src orelse {
const tuple_val = try Value.Tag.@"struct".create(sema.arena, values);
return sema.addConstant(tuple_ty, tuple_val);
};
try sema.requireRuntimeBlock(block, runtime_src);
const element_refs = try sema.arena.alloc(Air.Inst.Ref, final_len);
for (lhs_tuple.types) |_, i| {
const operand_src = lhs_src; // TODO better source location
element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, lhs, @intCast(u32, i), lhs_ty);
}
const offset = lhs_tuple.types.len;
for (rhs_tuple.types) |_, i| {
const operand_src = rhs_src; // TODO better source location
element_refs[i + offset] =
try sema.tupleFieldValByIndex(block, operand_src, rhs, @intCast(u32, i), rhs_ty);
}
return block.addAggregateInit(tuple_ty, element_refs);
}
fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@ -8069,12 +8142,17 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs = sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
if (lhs_ty.isTuple() and rhs_ty.isTuple()) {
return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs);
}
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const lhs_info = getArrayCatInfo(lhs_ty) orelse
const lhs_info = (try sema.getArrayCatInfo(block, lhs_src, lhs)) orelse
return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty});
const rhs_info = getArrayCatInfo(rhs_ty) orelse
const rhs_info = (try sema.getArrayCatInfo(block, rhs_src, rhs)) orelse
return sema.fail(block, rhs_src, "expected array, found '{}'", .{rhs_ty});
if (!lhs_info.elem_type.eql(rhs_info.elem_type)) {
return sema.fail(block, rhs_src, "expected array of type '{}', found '{}'", .{ lhs_info.elem_type, rhs_ty });
@ -8095,9 +8173,10 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs_len = try sema.usizeCast(block, lhs_src, rhs_info.len);
const final_len = lhs_len + rhs_len;
const final_len_including_sent = final_len + @boolToInt(res_sent != null);
const is_pointer = lhs_ty.zigTypeTag() == .Pointer;
const lhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
const rhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val;
const lhs_single_ptr = lhs_ty.zigTypeTag() == .Pointer and !lhs_ty.isSlice();
const rhs_single_ptr = rhs_ty.zigTypeTag() == .Pointer and !rhs_ty.isSlice();
const lhs_sub_val = if (lhs_single_ptr) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
const rhs_sub_val = if (rhs_single_ptr) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val;
var anon_decl = try block.startAnonDecl(LazySrcLoc.unneeded);
defer anon_decl.deinit();
@ -8129,7 +8208,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
});
const val = try Value.Tag.array.create(anon_decl.arena(), buf);
const decl = try anon_decl.finish(ty, val);
if (is_pointer) {
if (lhs_single_ptr or rhs_single_ptr) {
return sema.analyzeDeclRef(decl);
} else {
return sema.analyzeDeclVal(block, .unneeded, decl);
@ -8142,11 +8221,20 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
}
fn getArrayCatInfo(t: Type) ?Type.ArrayInfo {
fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.Ref) !?Type.ArrayInfo {
const t = sema.typeOf(inst);
return switch (t.zigTypeTag()) {
.Array => t.arrayInfo(),
.Pointer => blk: {
const ptrinfo = t.ptrInfo().data;
if (ptrinfo.size == .Slice) {
const val = try sema.resolveConstValue(block, src, inst);
return Type.ArrayInfo{
.elem_type = t.childType(),
.sentinel = t.sentinel(),
.len = val.sliceLen(),
};
}
if (ptrinfo.pointee_type.zigTypeTag() != .Array) return null;
if (ptrinfo.size != .One) return null;
break :blk ptrinfo.pointee_type.arrayInfo();
@ -8155,6 +8243,73 @@ fn getArrayCatInfo(t: Type) ?Type.ArrayInfo {
};
}
fn analyzeTupleMul(
sema: *Sema,
block: *Block,
src_node: i32,
operand: Air.Inst.Ref,
factor: u64,
) CompileError!Air.Inst.Ref {
const operand_ty = sema.typeOf(operand);
const operand_tuple = operand_ty.tupleFields();
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node };
const tuple_len = operand_tuple.types.len;
const final_len_u64 = std.math.mul(u64, tuple_len, factor) catch
return sema.fail(block, rhs_src, "operation results in overflow", .{});
if (final_len_u64 == 0) {
return sema.addConstant(Type.initTag(.empty_struct_literal), Value.initTag(.empty_struct_value));
}
const final_len = try sema.usizeCast(block, rhs_src, final_len_u64);
const types = try sema.arena.alloc(Type, final_len);
const values = try sema.arena.alloc(Value, final_len);
const opt_runtime_src = rs: {
var runtime_src: ?LazySrcLoc = null;
for (operand_tuple.types) |ty, i| {
types[i] = ty;
values[i] = operand_tuple.values[i];
const operand_src = lhs_src; // TODO better source location
if (values[i].tag() == .unreachable_value) {
runtime_src = operand_src;
}
}
var i: usize = 1;
while (i < factor) : (i += 1) {
mem.copy(Type, types[tuple_len * i ..], operand_tuple.types);
mem.copy(Value, values[tuple_len * i ..], operand_tuple.values);
}
break :rs runtime_src;
};
const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{
.types = types,
.values = values,
});
const runtime_src = opt_runtime_src orelse {
const tuple_val = try Value.Tag.@"struct".create(sema.arena, values);
return sema.addConstant(tuple_ty, tuple_val);
};
try sema.requireRuntimeBlock(block, runtime_src);
const element_refs = try sema.arena.alloc(Air.Inst.Ref, final_len);
for (operand_tuple.types) |_, i| {
const operand_src = lhs_src; // TODO better source location
element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, operand, @intCast(u32, i), operand_ty);
}
var i: usize = 1;
while (i < factor) : (i += 1) {
mem.copy(Air.Inst.Ref, element_refs[tuple_len * i ..], element_refs[0..tuple_len]);
}
return block.addAggregateInit(tuple_ty, element_refs);
}
fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@ -8169,7 +8324,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// In `**` rhs has to be comptime-known, but lhs can be runtime-known
const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize);
const mulinfo = getArrayCatInfo(lhs_ty) orelse
if (lhs_ty.isTuple()) {
return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor);
}
const mulinfo = (try sema.getArrayCatInfo(block, lhs_src, lhs)) orelse
return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty});
const final_len_u64 = std.math.mul(u64, mulinfo.len, factor) catch
@ -8180,7 +8340,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const final_len_including_sent = final_len + @boolToInt(mulinfo.sentinel != null);
const lhs_len = try sema.usizeCast(block, lhs_src, mulinfo.len);
const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
const is_single_ptr = lhs_ty.zigTypeTag() == .Pointer and !lhs_ty.isSlice();
const lhs_sub_val = if (is_single_ptr) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
@ -8220,7 +8381,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :blk try Value.Tag.array.create(anon_decl.arena(), buf);
};
const decl = try anon_decl.finish(final_ty, val);
if (lhs_ty.zigTypeTag() == .Pointer) {
if (is_single_ptr) {
return sema.analyzeDeclRef(decl);
} else {
return sema.analyzeDeclVal(block, .unneeded, decl);
@ -9832,14 +9993,21 @@ fn zirRetAddr(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
try sema.requireRuntimeBlock(block, src);
return try block.addNoOp(.ret_addr);
}
fn zirFrameAddress(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
try sema.requireRuntimeBlock(block, src);
return try block.addNoOp(.frame_addr);
}
fn zirBuiltinSrc(
sema: *Sema,
block: *Block,
@ -11728,15 +11896,6 @@ fn zirFrame(
return sema.fail(block, src, "TODO: Sema.zirFrame", .{});
}
fn zirFrameAddress(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
return sema.fail(block, src, "TODO: Sema.zirFrameAddress", .{});
}
fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@ -14743,6 +14902,11 @@ fn tupleFieldVal(
tuple_ty, field_name, @errorName(err),
});
};
if (field_index >= tuple_ty.structFieldCount()) {
return sema.fail(block, field_name_src, "tuple {} has no such field '{s}'", .{
tuple_ty, field_name,
});
}
return tupleFieldValByIndex(sema, block, src, tuple_byval, field_index, tuple_ty);
}
@ -19093,6 +19257,7 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr
/// Used to convert a u64 value to a usize value, emitting a compile error if the number
/// is too big to fit.
fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError!usize {
if (@bitSizeOf(u64) <= @bitSizeOf(usize)) return int;
return std.math.cast(usize, int) catch |err| switch (err) {
error.Overflow => return sema.fail(block, src, "expression produces integer value {d} which is too big for this compiler implementation to handle", .{int}),
};

View File

@ -579,7 +579,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
.fence => try self.airFence(),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
@ -2178,8 +2179,14 @@ fn airBreakpoint(self: *Self) !void {
return self.finishAirBookkeeping();
}
fn airRetAddr(self: *Self) !void {
return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch});
fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airRetAddr for aarch64", .{});
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFrameAddress for aarch64", .{});
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airFence(self: *Self) !void {

View File

@ -565,7 +565,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
.fence => try self.airFence(),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
@ -2449,8 +2450,14 @@ fn airBreakpoint(self: *Self) !void {
return self.finishAirBookkeeping();
}
fn airRetAddr(self: *Self) !void {
return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch});
fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airRetAddr for arm", .{});
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFrameAddress for arm", .{});
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airFence(self: *Self) !void {

View File

@ -550,7 +550,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
.fence => try self.airFence(),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
@ -1438,8 +1439,14 @@ fn airBreakpoint(self: *Self) !void {
return self.finishAirBookkeeping();
}
fn airRetAddr(self: *Self) !void {
return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch});
fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airRetAddr for riscv64", .{});
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFrameAddress for riscv64", .{});
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airFence(self: *Self) !void {

View File

@ -1683,6 +1683,7 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
.assembly,
.shl_sat,
.ret_addr,
.frame_addr,
.clz,
.ctz,
.popcount,

View File

@ -662,7 +662,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.breakpoint => try self.airBreakpoint(),
.ret_addr => try self.airRetAddr(),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
.fence => try self.airFence(),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
@ -3127,8 +3128,14 @@ fn airBreakpoint(self: *Self) !void {
return self.finishAirBookkeeping();
}
fn airRetAddr(self: *Self) !void {
return self.fail("TODO implement airRetAddr for {}", .{self.target.cpu.arch});
fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airRetAddr for x86_64", .{});
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFrameAddress for x86_64", .{});
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airFence(self: *Self) !void {

View File

@ -1588,7 +1588,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.arg => airArg(f),
.breakpoint => try airBreakpoint(f),
.ret_addr => try airRetAddr(f),
.ret_addr => try airRetAddr(f, inst),
.frame_addr => try airFrameAddress(f, inst),
.unreach => try airUnreach(f),
.fence => try airFence(f, inst),
@ -2717,12 +2718,20 @@ fn airBreakpoint(f: *Function) !CValue {
return CValue.none;
}
fn airRetAddr(f: *Function) !CValue {
fn airRetAddr(f: *Function, inst: Air.Inst.Index) !CValue {
if (f.liveness.isUnused(inst)) return CValue.none;
const local = try f.allocLocal(Type.usize, .Const);
try f.object.writer().writeAll(" = zig_return_address();\n");
return local;
}
fn airFrameAddress(f: *Function, inst: Air.Inst.Index) !CValue {
if (f.liveness.isUnused(inst)) return CValue.none;
const local = try f.allocLocal(Type.usize, .Const);
try f.object.writer().writeAll(" = zig_frame_address();\n");
return local;
}
fn airFence(f: *Function, inst: Air.Inst.Index) !CValue {
const atomic_order = f.air.instructions.items(.data)[inst].fence;
const writer = f.object.writer();

View File

@ -2135,6 +2135,7 @@ pub const FuncGen = struct {
.switch_br => try self.airSwitchBr(inst),
.breakpoint => try self.airBreakpoint(inst),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
.intcast => try self.airIntCast(inst),
@ -4100,12 +4101,25 @@ pub const FuncGen = struct {
}
fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
_ = inst;
const i32_zero = self.context.intType(32).constNull();
const usize_llvm_ty = try self.dg.llvmType(Type.usize);
if (self.liveness.isUnused(inst)) return null;
const llvm_i32 = self.context.intType(32);
const llvm_fn = self.getIntrinsic("llvm.returnaddress", &.{});
const ptr_val = self.builder.buildCall(llvm_fn, &[_]*const llvm.Value{i32_zero}, 1, .Fast, .Auto, "");
return self.builder.buildPtrToInt(ptr_val, usize_llvm_ty, "");
const params = [_]*const llvm.Value{llvm_i32.constNull()};
const ptr_val = self.builder.buildCall(llvm_fn, &params, params.len, .Fast, .Auto, "");
const llvm_usize = try self.dg.llvmType(Type.usize);
return self.builder.buildPtrToInt(ptr_val, llvm_usize, "");
}
fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const llvm_i32 = self.context.intType(32);
const llvm_fn = self.getIntrinsic("llvm.frameaddress", &.{llvm_i32});
const params = [_]*const llvm.Value{llvm_i32.constNull()};
const ptr_val = self.builder.buildCall(llvm_fn, &params, params.len, .Fast, .Auto, "");
const llvm_usize = try self.dg.llvmType(Type.usize);
return self.builder.buildPtrToInt(ptr_val, llvm_usize, "");
}
fn airFence(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {

View File

@ -171,6 +171,7 @@ const Writer = struct {
.breakpoint,
.unreach,
.ret_addr,
.frame_addr,
=> try w.writeNoOp(s, inst),
.const_ty,

View File

@ -4531,7 +4531,15 @@ pub const Type = extern union {
};
pub fn isTuple(ty: Type) bool {
return ty.tag() == .tuple;
return ty.tag() == .tuple or ty.tag() == .empty_struct_literal;
}
pub fn tupleFields(ty: Type) Payload.Tuple.Data {
return switch (ty.tag()) {
.tuple => ty.castTag(.tuple).?.data,
.empty_struct_literal => .{ .types = &.{}, .values = &.{} },
else => unreachable,
};
}
/// The sub-types are named after what fields they contain.
@ -4683,11 +4691,13 @@ pub const Type = extern union {
pub const Tuple = struct {
base: Payload = .{ .tag = .tuple },
data: struct {
data: Data,
pub const Data = struct {
types: []Type,
/// unreachable_value elements are used to indicate runtime-known.
values: []Value,
},
};
};
pub const Union = struct {

View File

@ -1829,7 +1829,7 @@ pub const Value = extern union {
assert(a_tag != .undef);
assert(b_tag != .undef);
if (a_tag == b_tag) switch (a_tag) {
.void_value, .null_value, .the_only_possible_value => return true,
.void_value, .null_value, .the_only_possible_value, .empty_struct_value => return true,
.enum_literal => {
const a_name = a.castTag(.enum_literal).?.data;
const b_name = b.castTag(.enum_literal).?.data;
@ -1892,10 +1892,18 @@ pub const Value = extern union {
return a_payload == b_payload;
},
.@"struct" => {
const fields = ty.structFields().values();
const a_field_vals = a.castTag(.@"struct").?.data;
const b_field_vals = b.castTag(.@"struct").?.data;
assert(a_field_vals.len == b_field_vals.len);
if (ty.isTuple()) {
const types = ty.tupleFields().types;
assert(types.len == a_field_vals.len);
for (types) |field_ty, i| {
if (!eql(a_field_vals[i], b_field_vals[i], field_ty)) return false;
}
return true;
}
const fields = ty.structFields().values();
assert(fields.len == a_field_vals.len);
for (fields) |field, i| {
if (!eql(a_field_vals[i], b_field_vals[i], field.ty)) return false;
@ -1967,11 +1975,10 @@ pub const Value = extern union {
return true;
},
.Struct => {
// must be a struct with no fields since we checked for if
// both have the struct tag above.
const fields = ty.structFields().values();
assert(fields.len == 0);
return true;
// A tuple can be represented with .empty_struct_value,
// the_one_possible_value, .@"struct" in which case we could
// end up here and the values are equal if the type has zero fields.
return ty.structFieldCount() != 0;
},
else => return order(a, b).compare(.eq),
}
@ -2024,6 +2031,13 @@ pub const Value = extern union {
}
},
.Struct => {
if (ty.isTuple()) {
const fields = ty.tupleFields();
for (fields.values) |field_val, i| {
field_val.hash(fields.types[i], hasher);
}
return;
}
const fields = ty.structFields().values();
if (fields.len == 0) return;
const field_values = val.castTag(.@"struct").?.data;

View File

@ -23,28 +23,30 @@ test "tuple concatenation" {
}
test "tuple multiplication" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
{
const t = .{} ** 4;
try expectEqual(0, @typeInfo(@TypeOf(t)).Struct.fields.len);
try expect(@typeInfo(@TypeOf(t)).Struct.fields.len == 0);
}
{
const t = .{'a'} ** 4;
try expectEqual(4, @typeInfo(@TypeOf(t)).Struct.fields.len);
inline for (t) |x| try expectEqual('a', x);
try expect(@typeInfo(@TypeOf(t)).Struct.fields.len == 4);
inline for (t) |x| try expect(x == 'a');
}
{
const t = .{ 1, 2, 3 } ** 4;
try expectEqual(12, @typeInfo(@TypeOf(t)).Struct.fields.len);
inline for (t) |x, i| try expectEqual(1 + i % 3, x);
try expect(@typeInfo(@TypeOf(t)).Struct.fields.len == 12);
inline for (t) |x, i| try expect(x == 1 + i % 3);
}
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
test "tuple concatenation" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
const T = struct {
fn consume_tuple(tuple: anytype, len: usize) !void {
@ -86,8 +88,6 @@ test "tuple multiplication" {
}
test "pass tuple to comptime var parameter" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
const S = struct {
fn Foo(comptime args: anytype) !void {
try expect(args[0] == 1);