stage2: change @bitCast to always be by-value

After a discussion about language specs, this seems like the best way to
go, because it's simpler to reason about both for humans and compilers.

The `bitcast_result_ptr` ZIR instruction is no longer needed.

This commit also implements writing enums, arrays, and vectors to
virtual memory at compile-time.

This unlocked some more of compiler-rt being able to build, which
in turn unlocks saturating arithmetic behavior tests.

There was also a memory leak in the comptime closure system which is now
fixed.
This commit is contained in:
Andrew Kelley 2021-10-22 15:12:22 -07:00
parent 86b9280963
commit 069c83d58c
10 changed files with 140 additions and 153 deletions

View File

@ -81,6 +81,81 @@ comptime {
@export(__mulodi4, .{ .name = "__mulodi4", .linkage = linkage });
}
if (builtin.os.tag == .windows) {
// Default stack-probe functions emitted by LLVM
if (is_mingw) {
const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
@export(_chkstk, .{ .name = "_alloca", .linkage = strong_linkage });
const ___chkstk_ms = @import("compiler_rt/stack_probe.zig").___chkstk_ms;
@export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = strong_linkage });
} else if (!builtin.link_libc) {
// This symbols are otherwise exported by MSVCRT.lib
const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
@export(_chkstk, .{ .name = "_chkstk", .linkage = strong_linkage });
const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
@export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
}
switch (arch) {
.i386 => {
const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
@export(__divti3, .{ .name = "__divti3", .linkage = linkage });
const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
@export(__modti3, .{ .name = "__modti3", .linkage = linkage });
const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
@export(__multi3, .{ .name = "__multi3", .linkage = linkage });
const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
@export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
@export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
@export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
},
.x86_64 => {
// The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
// that LLVM expects compiler-rt to have.
const __divti3_windows_x86_64 = @import("compiler_rt/divti3.zig").__divti3_windows_x86_64;
@export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = linkage });
const __modti3_windows_x86_64 = @import("compiler_rt/modti3.zig").__modti3_windows_x86_64;
@export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = linkage });
const __multi3_windows_x86_64 = @import("compiler_rt/multi3.zig").__multi3_windows_x86_64;
@export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = linkage });
const __udivti3_windows_x86_64 = @import("compiler_rt/udivti3.zig").__udivti3_windows_x86_64;
@export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = linkage });
const __udivmodti4_windows_x86_64 = @import("compiler_rt/udivmodti4.zig").__udivmodti4_windows_x86_64;
@export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = linkage });
const __umodti3_windows_x86_64 = @import("compiler_rt/umodti3.zig").__umodti3_windows_x86_64;
@export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = linkage });
},
else => {},
}
if (arch.isAARCH64()) {
const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
@export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
const __divti3_windows = @import("compiler_rt/divti3.zig").__divti3;
@export(__divti3_windows, .{ .name = "__divti3", .linkage = linkage });
const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
@export(__modti3, .{ .name = "__modti3", .linkage = linkage });
const __udivti3_windows = @import("compiler_rt/udivti3.zig").__udivti3;
@export(__udivti3_windows, .{ .name = "__udivti3", .linkage = linkage });
const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
@export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
}
} else {
const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
@export(__divti3, .{ .name = "__divti3", .linkage = linkage });
const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
@export(__modti3, .{ .name = "__modti3", .linkage = linkage });
const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
@export(__multi3, .{ .name = "__multi3", .linkage = linkage });
const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
@export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
@export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
@export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
}
if (!builtin.zig_is_stage2) {
if (!long_double_is_f128) {
// TODO implement these
@ -552,81 +627,6 @@ comptime {
@export(__unordtf2, .{ .name = "__unordkf2", .linkage = linkage });
}
if (builtin.os.tag == .windows) {
// Default stack-probe functions emitted by LLVM
if (is_mingw) {
const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
@export(_chkstk, .{ .name = "_alloca", .linkage = strong_linkage });
const ___chkstk_ms = @import("compiler_rt/stack_probe.zig").___chkstk_ms;
@export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = strong_linkage });
} else if (!builtin.link_libc) {
// This symbols are otherwise exported by MSVCRT.lib
const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
@export(_chkstk, .{ .name = "_chkstk", .linkage = strong_linkage });
const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
@export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
}
switch (arch) {
.i386 => {
const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
@export(__divti3, .{ .name = "__divti3", .linkage = linkage });
const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
@export(__modti3, .{ .name = "__modti3", .linkage = linkage });
const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
@export(__multi3, .{ .name = "__multi3", .linkage = linkage });
const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
@export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
@export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
@export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
},
.x86_64 => {
// The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
// that LLVM expects compiler-rt to have.
const __divti3_windows_x86_64 = @import("compiler_rt/divti3.zig").__divti3_windows_x86_64;
@export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = linkage });
const __modti3_windows_x86_64 = @import("compiler_rt/modti3.zig").__modti3_windows_x86_64;
@export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = linkage });
const __multi3_windows_x86_64 = @import("compiler_rt/multi3.zig").__multi3_windows_x86_64;
@export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = linkage });
const __udivti3_windows_x86_64 = @import("compiler_rt/udivti3.zig").__udivti3_windows_x86_64;
@export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = linkage });
const __udivmodti4_windows_x86_64 = @import("compiler_rt/udivmodti4.zig").__udivmodti4_windows_x86_64;
@export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = linkage });
const __umodti3_windows_x86_64 = @import("compiler_rt/umodti3.zig").__umodti3_windows_x86_64;
@export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = linkage });
},
else => {},
}
if (arch.isAARCH64()) {
const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
@export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
const __divti3_windows = @import("compiler_rt/divti3.zig").__divti3;
@export(__divti3_windows, .{ .name = "__divti3", .linkage = linkage });
const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
@export(__modti3, .{ .name = "__modti3", .linkage = linkage });
const __udivti3_windows = @import("compiler_rt/udivti3.zig").__udivti3;
@export(__udivti3_windows, .{ .name = "__udivti3", .linkage = linkage });
const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
@export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
}
} else {
const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
@export(__divti3, .{ .name = "__divti3", .linkage = linkage });
const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
@export(__modti3, .{ .name = "__modti3", .linkage = linkage });
const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
@export(__multi3, .{ .name = "__multi3", .linkage = linkage });
const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
@export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
@export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
@export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
}
_ = @import("compiler_rt/atomics.zig");
@export(fmaq, .{ .name = "fmaq", .linkage = linkage });

View File

@ -276,17 +276,30 @@ fn typeExpr(gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index) InnerError!Zi
return expr(gz, scope, coerced_type_rl, type_node);
}
fn reachableTypeExpr(
gz: *GenZir,
scope: *Scope,
type_node: Ast.Node.Index,
reachable_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const prev_force_comptime = gz.force_comptime;
gz.force_comptime = true;
defer gz.force_comptime = prev_force_comptime;
return reachableExpr(gz, scope, coerced_type_rl, type_node, reachable_node);
}
/// Same as `expr` but fails with a compile error if the result type is `noreturn`.
fn reachableExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: Ast.Node.Index,
src_node: Ast.Node.Index,
reachable_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const result_inst = try expr(gz, scope, rl, node);
if (gz.refIsNoReturn(result_inst)) {
return gz.astgen.failNodeNotes(src_node, "unreachable code", .{}, &[_]u32{
return gz.astgen.failNodeNotes(reachable_node, "unreachable code", .{}, &[_]u32{
try gz.astgen.errNoteNode(node, "control flow is diverted here", .{}),
});
}
@ -2040,7 +2053,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.as_node,
.bit_and,
.bitcast,
.bitcast_result_ptr,
.bit_or,
.block,
.block_inline,
@ -7160,42 +7172,13 @@ fn bitCast(
lhs: Ast.Node.Index,
rhs: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const dest_type = try typeExpr(gz, scope, lhs);
switch (rl) {
.none, .discard, .ty, .coerced_ty => {
const operand = try expr(gz, scope, .none, rhs);
const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{
.lhs = dest_type,
.rhs = operand,
});
return rvalue(gz, rl, result, node);
},
.ref => {
return astgen.failNode(node, "cannot take address of `@bitCast` result", .{});
},
.ptr, .inferred_ptr => |result_ptr| {
return bitCastRlPtr(gz, scope, node, dest_type, result_ptr, rhs);
},
.block_ptr => |block| {
return bitCastRlPtr(gz, scope, node, dest_type, block.rl_ptr, rhs);
},
}
}
fn bitCastRlPtr(
gz: *GenZir,
scope: *Scope,
node: Ast.Node.Index,
dest_type: Zir.Inst.Ref,
result_ptr: Zir.Inst.Ref,
rhs: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const casted_result_ptr = try gz.addPlNode(.bitcast_result_ptr, node, Zir.Inst.Bin{
const dest_type = try reachableTypeExpr(gz, scope, lhs, node);
const operand = try reachableExpr(gz, scope, .none, rhs, node);
const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{
.lhs = dest_type,
.rhs = result_ptr,
.rhs = operand,
});
return expr(gz, scope, .{ .ptr = casted_result_ptr }, rhs);
return rvalue(gz, rl, result, node);
}
fn typeOf(

View File

@ -317,6 +317,7 @@ pub const WipCaptureScope = struct {
assert(!self.finalized);
// use a temp to avoid unintentional aliasing due to RLS
const tmp = try self.scope.captures.clone(self.perm_arena);
self.scope.captures.deinit(self.gpa);
self.scope.captures = tmp;
self.finalized = true;
}

View File

@ -513,7 +513,6 @@ pub fn analyzeBody(
.bit_not => try sema.zirBitNot(block, inst),
.bit_or => try sema.zirBitwise(block, inst, .bit_or),
.bitcast => try sema.zirBitcast(block, inst),
.bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst),
.suspend_block => try sema.zirSuspendBlock(block, inst),
.bool_not => try sema.zirBoolNot(block, inst),
.bool_br_and => try sema.zirBoolBr(block, inst, false),
@ -1385,12 +1384,6 @@ pub fn resolveInstValue(
};
}
fn zirBitcastResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
return sema.fail(block, src, "TODO implement zir_sema.zirBitcastResultPtr", .{});
}
fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();

View File

@ -240,10 +240,6 @@ pub const Inst = struct {
/// Reinterpret the memory representation of a value as a different type.
/// Uses the pl_node field with payload `Bin`.
bitcast,
/// A typed result location pointer is bitcasted to a new result location pointer.
/// The new result location pointer has an inferred type.
/// Uses the pl_node field with payload `Bin`.
bitcast_result_ptr,
/// Bitwise NOT. `~`
/// Uses `un_node`.
bit_not,
@ -977,7 +973,6 @@ pub const Inst = struct {
.as_node,
.bit_and,
.bitcast,
.bitcast_result_ptr,
.bit_or,
.block,
.block_inline,
@ -1235,7 +1230,6 @@ pub const Inst = struct {
.as_node = .pl_node,
.bit_and = .pl_node,
.bitcast = .pl_node,
.bitcast_result_ptr = .pl_node,
.bit_not = .un_node,
.bit_or = .pl_node,
.block = .pl_node,

View File

@ -349,7 +349,6 @@ const Writer = struct {
.reduce,
.atomic_load,
.bitcast,
.bitcast_result_ptr,
.vector_type,
.maximum,
.minimum,

View File

@ -1015,6 +1015,14 @@ pub const Value = extern union {
const bits = ty.intInfo(target).bits;
bigint.writeTwosComplement(buffer, bits, target.cpu.arch.endian());
},
.Enum => {
var enum_buffer: Payload.U64 = undefined;
const int_val = val.enumToInt(ty, &enum_buffer);
var bigint_buffer: BigIntSpace = undefined;
const bigint = int_val.toBigInt(&bigint_buffer);
const bits = ty.intInfo(target).bits;
bigint.writeTwosComplement(buffer, bits, target.cpu.arch.endian());
},
.Float => switch (ty.floatBits(target)) {
16 => return floatWriteToMemory(f16, val.toFloat(f16), target, buffer),
32 => return floatWriteToMemory(f32, val.toFloat(f32), target, buffer),
@ -1022,6 +1030,19 @@ pub const Value = extern union {
128 => return floatWriteToMemory(f128, val.toFloat(f128), target, buffer),
else => unreachable,
},
.Array, .Vector => {
const len = ty.arrayLen();
const elem_ty = ty.childType();
const elem_size = elem_ty.abiSize(target);
var elem_i: usize = 0;
var elem_value_buf: ElemValueBuffer = undefined;
var buf_off: usize = 0;
while (elem_i < len) : (elem_i += 1) {
const elem_val = val.elemValueBuffer(elem_i, &elem_value_buf);
writeToMemory(elem_val, elem_ty, target, buffer[buf_off..]);
buf_off += elem_size;
}
},
else => @panic("TODO implement writeToMemory for more types"),
}
}

View File

@ -47,6 +47,7 @@ test {
_ = @import("behavior/pointers.zig");
_ = @import("behavior/ptrcast.zig");
_ = @import("behavior/pub_enum.zig");
_ = @import("behavior/saturating_arithmetic.zig");
_ = @import("behavior/sizeof_and_typeof.zig");
_ = @import("behavior/slice.zig");
_ = @import("behavior/struct.zig");
@ -150,11 +151,6 @@ test {
_ = @import("behavior/ptrcast_stage1.zig");
_ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig");
_ = @import("behavior/reflection.zig");
{
// Checklist for getting saturating_arithmetic.zig passing for stage2:
// * add __udivti3 to compiler-rt
_ = @import("behavior/saturating_arithmetic.zig");
}
_ = @import("behavior/select.zig");
_ = @import("behavior/shuffle.zig");
_ = @import("behavior/sizeof_and_typeof_stage1.zig");

View File

@ -42,3 +42,25 @@ test "nested bitcast" {
try S.foo(42);
comptime try S.foo(42);
}
test "@bitCast enum to its integer type" {
const SOCK = enum(c_int) {
A,
B,
fn testBitCastExternEnum() !void {
var SOCK_DGRAM = @This().B;
var sock_dgram = @bitCast(c_int, SOCK_DGRAM);
try expect(sock_dgram == 1);
}
};
try SOCK.testBitCastExternEnum();
comptime try SOCK.testBitCastExternEnum();
}
// issue #3010: compiler segfault
test "bitcast literal [4]u8 param to u32" {
const ip = @bitCast(u32, [_]u8{ 255, 255, 255, 255 });
try expect(ip == maxInt(u32));
}

View File

@ -5,22 +5,6 @@ const expectEqual = std.testing.expectEqual;
const maxInt = std.math.maxInt;
const native_endian = builtin.target.cpu.arch.endian();
test "@bitCast enum to its integer type" {
const SOCK = enum(c_int) {
A,
B,
fn testBitCastExternEnum() !void {
var SOCK_DGRAM = @This().B;
var sock_dgram = @bitCast(c_int, SOCK_DGRAM);
try expect(sock_dgram == 1);
}
};
try SOCK.testBitCastExternEnum();
comptime try SOCK.testBitCastExternEnum();
}
test "@bitCast packed structs at runtime and comptime" {
const Full = packed struct {
number: u16,
@ -111,12 +95,6 @@ test "implicit cast to error union by returning" {
comptime try S.entry();
}
// issue #3010: compiler segfault
test "bitcast literal [4]u8 param to u32" {
const ip = @bitCast(u32, [_]u8{ 255, 255, 255, 255 });
try expect(ip == maxInt(u32));
}
test "bitcast packed struct literal to byte" {
const Foo = packed struct {
value: u8,