mirror of
https://github.com/ziglang/zig.git
synced 2025-12-27 16:43:07 +00:00
The main motivation for this change is eliminating the `block_ptr` result location and corresponding `store_to_block_ptr` ZIR instruction. This is achieved through a simple pass over the AST before AstGen which determines, for AST nodes which have a choice on whether to provide a result location, which choice to make, based on whether the result pointer is consumed non-trivially. This eliminates so much logic from AstGen that we almost break even on line count! AstGen no longer has to worry about instruction rewriting based on whether or not a result location was consumed: it always knows what to do ahead of time, which simplifies a *lot* of logic. This also incidentally fixes a few random AstGen bugs related to result location handling, leading to the changes in `test/` and `lib/std/`. This opens the door to future RLS improvements by making them much easier to implement correctly, and fixes many bugs. Most ZIR is made more compact after this commit, mainly due to not having redundant `store_to_block_ptr` instructions lying around, but also due to a few bugs in the old system which are implicitly fixed here.
37164 lines
1.5 MiB
37164 lines
1.5 MiB
//! Semantic analysis of ZIR instructions.
|
|
//! Shared to every Block. Stored on the stack.
|
|
//! State used for compiling a ZIR into AIR.
|
|
//! Transforms untyped ZIR instructions into semantically-analyzed AIR instructions.
|
|
//! Does type checking, comptime control flow, and safety-check generation.
|
|
//! This is the the heart of the Zig compiler.
|
|
|
|
mod: *Module,
|
|
/// Alias to `mod.gpa`.
|
|
gpa: Allocator,
|
|
/// Points to the temporary arena allocator of the Sema.
|
|
/// This arena will be cleared when the sema is destroyed.
|
|
arena: Allocator,
|
|
code: Zir,
|
|
air_instructions: std.MultiArrayList(Air.Inst) = .{},
|
|
air_extra: std.ArrayListUnmanaged(u32) = .{},
|
|
/// Maps ZIR to AIR.
|
|
inst_map: InstMap = .{},
|
|
/// When analyzing an inline function call, owner_decl is the Decl of the caller
|
|
/// and `src_decl` of `Block` is the `Decl` of the callee.
|
|
/// This `Decl` owns the arena memory of this `Sema`.
|
|
owner_decl: *Decl,
|
|
owner_decl_index: Decl.Index,
|
|
/// For an inline or comptime function call, this will be the root parent function
|
|
/// which contains the callsite. Corresponds to `owner_decl`.
|
|
/// This could be `none`, a `func_decl`, or a `func_instance`.
|
|
owner_func_index: InternPool.Index,
|
|
/// The function this ZIR code is the body of, according to the source code.
|
|
/// This starts out the same as `owner_func_index` and then diverges in the case of
|
|
/// an inline or comptime function call.
|
|
/// This could be `none`, a `func_decl`, or a `func_instance`.
|
|
func_index: InternPool.Index,
|
|
/// Whether the type of func_index has a calling convention of `.Naked`.
|
|
func_is_naked: bool,
|
|
/// Used to restore the error return trace when returning a non-error from a function.
|
|
error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none,
|
|
/// When semantic analysis needs to know the return type of the function whose body
|
|
/// is being analyzed, this `Type` should be used instead of going through `func`.
|
|
/// This will correctly handle the case of a comptime/inline function call of a
|
|
/// generic function which uses a type expression for the return type.
|
|
/// The type will be `void` in the case that `func` is `null`.
|
|
fn_ret_ty: Type,
|
|
/// In case of the return type being an error union with an inferred error
|
|
/// set, this is the inferred error set. `null` otherwise. Allocated with
|
|
/// `Sema.arena`.
|
|
fn_ret_ty_ies: ?*InferredErrorSet,
|
|
branch_quota: u32 = default_branch_quota,
|
|
branch_count: u32 = 0,
|
|
/// Populated when returning `error.ComptimeBreak`. Used to communicate the
|
|
/// break instruction up the stack to find the corresponding Block.
|
|
comptime_break_inst: Zir.Inst.Index = undefined,
|
|
/// This field is updated when a new source location becomes active, so that
|
|
/// instructions which do not have explicitly mapped source locations still have
|
|
/// access to the source location set by the previous instruction which did
|
|
/// contain a mapped source location.
|
|
src: LazySrcLoc = .{ .token_offset = 0 },
|
|
decl_val_table: std.AutoHashMapUnmanaged(Decl.Index, Air.Inst.Ref) = .{},
|
|
/// When doing a generic function instantiation, this array collects a value
|
|
/// for each parameter of the generic owner. `none` for non-comptime parameters.
|
|
/// This is a separate array from `block.params` so that it can be passed
|
|
/// directly to `comptime_args` when calling `InternPool.getFuncInstance`.
|
|
/// This memory is allocated by a parent `Sema` in the temporary arena, and is
|
|
/// used only to add a `func_instance` into the `InternPool`.
|
|
comptime_args: []InternPool.Index = &.{},
|
|
/// Used to communicate from a generic function instantiation to the logic that
|
|
/// creates a generic function instantiation value in `funcCommon`.
|
|
generic_owner: InternPool.Index = .none,
|
|
/// When `generic_owner` is not none, this contains the generic function
|
|
/// instantiation callsite so that compile errors on the parameter types of the
|
|
/// instantiation can point back to the instantiation site in addition to the
|
|
/// declaration site.
|
|
generic_call_src: LazySrcLoc = .unneeded,
|
|
/// Corresponds to `generic_call_src`.
|
|
generic_call_decl: Decl.OptionalIndex = .none,
|
|
/// The key is types that must be fully resolved prior to machine code
|
|
/// generation pass. Types are added to this set when resolving them
|
|
/// immediately could cause a dependency loop, but they do need to be resolved
|
|
/// before machine code generation passes process the AIR.
|
|
/// It would work fine if this were an array list instead of an array hash map.
|
|
/// I chose array hash map with the intention to save time by omitting
|
|
/// duplicates.
|
|
types_to_resolve: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
|
|
/// These are lazily created runtime blocks from block_inline instructions.
|
|
/// They are created when an break_inline passes through a runtime condition, because
|
|
/// Sema must convert comptime control flow to runtime control flow, which means
|
|
/// breaking from a block.
|
|
post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{},
|
|
/// Populated with the last compile error created.
|
|
err: ?*Module.ErrorMsg = null,
|
|
/// Set to true when analyzing a func type instruction so that nested generic
|
|
/// function types will emit generic poison instead of a partial type.
|
|
no_partial_func_ty: bool = false,
|
|
|
|
/// The temporary arena is used for the memory of the `InferredAlloc` values
|
|
/// here so the values can be dropped without any cleanup.
|
|
unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{},
|
|
|
|
/// Indices of comptime-mutable decls created by this Sema. These decls' values
|
|
/// should be interned after analysis completes, as they may refer to memory in
|
|
/// the Sema arena.
|
|
/// TODO: this is a workaround for memory bugs triggered by the removal of
|
|
/// Decl.value_arena. A better solution needs to be found. Probably this will
|
|
/// involve transitioning comptime-mutable memory away from using Decls at all.
|
|
comptime_mutable_decls: *std.ArrayList(Decl.Index),
|
|
|
|
/// This is populated when `@setAlignStack` occurs so that if there is a duplicate
|
|
/// one encountered, the conflicting source location can be shown.
|
|
prev_stack_alignment_src: ?LazySrcLoc = null,
|
|
|
|
/// While analyzing a type which has a special InternPool index, this is set to the index at which
|
|
/// the struct/enum/union type created should be placed. Otherwise, it is `.none`.
|
|
builtin_type_target_index: InternPool.Index = .none,
|
|
|
|
const std = @import("std");
|
|
const math = std.math;
|
|
const mem = std.mem;
|
|
const Allocator = mem.Allocator;
|
|
const assert = std.debug.assert;
|
|
const log = std.log.scoped(.sema);
|
|
|
|
const Sema = @This();
|
|
const Value = @import("value.zig").Value;
|
|
const Type = @import("type.zig").Type;
|
|
const TypedValue = @import("TypedValue.zig");
|
|
const Air = @import("Air.zig");
|
|
const Zir = @import("Zir.zig");
|
|
const Module = @import("Module.zig");
|
|
const trace = @import("tracy.zig").trace;
|
|
const Namespace = Module.Namespace;
|
|
const CompileError = Module.CompileError;
|
|
const SemaError = Module.SemaError;
|
|
const Decl = Module.Decl;
|
|
const CaptureScope = Module.CaptureScope;
|
|
const WipCaptureScope = Module.WipCaptureScope;
|
|
const LazySrcLoc = Module.LazySrcLoc;
|
|
const RangeSet = @import("RangeSet.zig");
|
|
const target_util = @import("target.zig");
|
|
const Package = @import("Package.zig");
|
|
const crash_report = @import("crash_report.zig");
|
|
const build_options = @import("build_options");
|
|
const Compilation = @import("Compilation.zig");
|
|
const InternPool = @import("InternPool.zig");
|
|
const Alignment = InternPool.Alignment;
|
|
|
|
pub const default_branch_quota = 1000;
|
|
pub const default_reference_trace_len = 2;
|
|
|
|
pub const InferredErrorSet = struct {
|
|
/// The function body from which this error set originates.
|
|
/// This is `none` in the case of a comptime/inline function call, corresponding to
|
|
/// `InternPool.Index.adhoc_inferred_error_set_type`.
|
|
/// The function's resolved error set is not set until analysis of the
|
|
/// function body completes.
|
|
func: InternPool.Index,
|
|
/// All currently known errors that this error set contains. This includes
|
|
/// direct additions via `return error.Foo;`, and possibly also errors that
|
|
/// are returned from any dependent functions.
|
|
errors: NameMap = .{},
|
|
/// Other inferred error sets which this inferred error set should include.
|
|
inferred_error_sets: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
|
|
/// The regular error set created by resolving this inferred error set.
|
|
resolved: InternPool.Index = .none,
|
|
|
|
pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
|
|
|
|
pub fn addErrorSet(
|
|
self: *InferredErrorSet,
|
|
err_set_ty: Type,
|
|
ip: *InternPool,
|
|
arena: Allocator,
|
|
) !void {
|
|
switch (err_set_ty.toIntern()) {
|
|
.anyerror_type => self.resolved = .anyerror_type,
|
|
.adhoc_inferred_error_set_type => {}, // Adding an inferred error set to itself.
|
|
|
|
else => switch (ip.indexToKey(err_set_ty.toIntern())) {
|
|
.error_set_type => |error_set_type| {
|
|
for (error_set_type.names.get(ip)) |name| {
|
|
try self.errors.put(arena, name, {});
|
|
}
|
|
},
|
|
.inferred_error_set_type => {
|
|
try self.inferred_error_sets.put(arena, err_set_ty.toIntern(), {});
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
}
|
|
};
|
|
|
|
/// Stores the mapping from `Zir.Inst.Index -> Air.Inst.Ref`, which is used by sema to resolve
|
|
/// instructions during analysis.
|
|
/// Instead of a hash table approach, InstMap is simply a slice that is indexed into using the
|
|
/// zir instruction index and a start offset. An index is not pressent in the map if the value
|
|
/// at the index is `Air.Inst.Ref.none`.
|
|
/// `ensureSpaceForInstructions` can be called to force InstMap to have a mapped range that
|
|
/// includes all instructions in a slice. After calling this function, `putAssumeCapacity*` can
|
|
/// be called safely for any of the instructions passed in.
|
|
pub const InstMap = struct {
|
|
items: []Air.Inst.Ref = &[_]Air.Inst.Ref{},
|
|
start: Zir.Inst.Index = 0,
|
|
|
|
pub fn deinit(map: InstMap, allocator: mem.Allocator) void {
|
|
allocator.free(map.items);
|
|
}
|
|
|
|
pub fn get(map: InstMap, key: Zir.Inst.Index) ?Air.Inst.Ref {
|
|
if (!map.contains(key)) return null;
|
|
return map.items[key - map.start];
|
|
}
|
|
|
|
pub fn putAssumeCapacity(
|
|
map: *InstMap,
|
|
key: Zir.Inst.Index,
|
|
ref: Air.Inst.Ref,
|
|
) void {
|
|
map.items[key - map.start] = ref;
|
|
}
|
|
|
|
pub fn putAssumeCapacityNoClobber(
|
|
map: *InstMap,
|
|
key: Zir.Inst.Index,
|
|
ref: Air.Inst.Ref,
|
|
) void {
|
|
assert(!map.contains(key));
|
|
map.putAssumeCapacity(key, ref);
|
|
}
|
|
|
|
pub const GetOrPutResult = struct {
|
|
value_ptr: *Air.Inst.Ref,
|
|
found_existing: bool,
|
|
};
|
|
|
|
pub fn getOrPutAssumeCapacity(
|
|
map: *InstMap,
|
|
key: Zir.Inst.Index,
|
|
) GetOrPutResult {
|
|
const index = key - map.start;
|
|
return GetOrPutResult{
|
|
.value_ptr = &map.items[index],
|
|
.found_existing = map.items[index] != .none,
|
|
};
|
|
}
|
|
|
|
pub fn remove(map: InstMap, key: Zir.Inst.Index) bool {
|
|
if (!map.contains(key)) return false;
|
|
map.items[key - map.start] = .none;
|
|
return true;
|
|
}
|
|
|
|
pub fn contains(map: InstMap, key: Zir.Inst.Index) bool {
|
|
return map.items[key - map.start] != .none;
|
|
}
|
|
|
|
pub fn ensureSpaceForInstructions(
|
|
map: *InstMap,
|
|
allocator: mem.Allocator,
|
|
insts: []const Zir.Inst.Index,
|
|
) !void {
|
|
const min_max = mem.minMax(Zir.Inst.Index, insts);
|
|
const start = min_max.min;
|
|
const end = min_max.max;
|
|
if (map.start <= start and end < map.items.len + map.start)
|
|
return;
|
|
|
|
const old_start = if (map.items.len == 0) start else map.start;
|
|
var better_capacity = map.items.len;
|
|
var better_start = old_start;
|
|
while (true) {
|
|
const extra_capacity = better_capacity / 2 + 16;
|
|
better_capacity += extra_capacity;
|
|
better_start -|= @as(Zir.Inst.Index, @intCast(extra_capacity / 2));
|
|
if (better_start <= start and end < better_capacity + better_start)
|
|
break;
|
|
}
|
|
|
|
const start_diff = old_start - better_start;
|
|
const new_items = try allocator.alloc(Air.Inst.Ref, better_capacity);
|
|
@memset(new_items[0..start_diff], .none);
|
|
@memcpy(new_items[start_diff..][0..map.items.len], map.items);
|
|
@memset(new_items[start_diff + map.items.len ..], .none);
|
|
|
|
allocator.free(map.items);
|
|
map.items = new_items;
|
|
map.start = @as(Zir.Inst.Index, @intCast(better_start));
|
|
}
|
|
};
|
|
|
|
/// This is the context needed to semantically analyze ZIR instructions and
|
|
/// produce AIR instructions.
|
|
/// This is a temporary structure stored on the stack; references to it are valid only
|
|
/// during semantic analysis of the block.
|
|
pub const Block = struct {
|
|
parent: ?*Block,
|
|
/// Shared among all child blocks.
|
|
sema: *Sema,
|
|
/// The namespace to use for lookups from this source block
|
|
/// When analyzing fields, this is different from src_decl.src_namespace.
|
|
namespace: Namespace.Index,
|
|
/// The AIR instructions generated for this block.
|
|
instructions: std.ArrayListUnmanaged(Air.Inst.Index),
|
|
// `param` instructions are collected here to be used by the `func` instruction.
|
|
/// When doing a generic function instantiation, this array collects a type
|
|
/// for each *runtime-known* parameter. This array corresponds to the instance
|
|
/// function type, while `Sema.comptime_args` corresponds to the generic owner
|
|
/// function type.
|
|
/// This memory is allocated by a parent `Sema` in the temporary arena, and is
|
|
/// used to add a `func_instance` into the `InternPool`.
|
|
params: std.MultiArrayList(Param) = .{},
|
|
|
|
wip_capture_scope: *CaptureScope,
|
|
|
|
label: ?*Label = null,
|
|
inlining: ?*Inlining,
|
|
/// If runtime_index is not 0 then one of these is guaranteed to be non null.
|
|
runtime_cond: ?LazySrcLoc = null,
|
|
runtime_loop: ?LazySrcLoc = null,
|
|
/// This Decl is the Decl according to the Zig source code corresponding to this Block.
|
|
/// This can vary during inline or comptime function calls. See `Sema.owner_decl`
|
|
/// for the one that will be the same for all Block instances.
|
|
src_decl: Decl.Index,
|
|
/// Non zero if a non-inline loop or a runtime conditional have been encountered.
|
|
/// Stores to comptime variables are only allowed when var.runtime_index <= runtime_index.
|
|
runtime_index: Value.RuntimeIndex = .zero,
|
|
inline_block: Zir.Inst.Index = 0,
|
|
|
|
comptime_reason: ?*const ComptimeReason = null,
|
|
// TODO is_comptime and comptime_reason should probably be merged together.
|
|
is_comptime: bool,
|
|
is_typeof: bool = false,
|
|
|
|
/// Keep track of the active error return trace index around blocks so that we can correctly
|
|
/// pop the error trace upon block exit.
|
|
error_return_trace_index: Air.Inst.Ref = .none,
|
|
|
|
/// when null, it is determined by build mode, changed by @setRuntimeSafety
|
|
want_safety: ?bool = null,
|
|
|
|
/// What mode to generate float operations in, set by @setFloatMode
|
|
float_mode: std.builtin.FloatMode = .Strict,
|
|
|
|
c_import_buf: ?*std.ArrayList(u8) = null,
|
|
|
|
const ComptimeReason = union(enum) {
|
|
c_import: struct {
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
},
|
|
comptime_ret_ty: struct {
|
|
block: *Block,
|
|
func: Air.Inst.Ref,
|
|
func_src: LazySrcLoc,
|
|
return_ty: Type,
|
|
},
|
|
|
|
fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void {
|
|
const parent = msg orelse return;
|
|
const mod = sema.mod;
|
|
const prefix = "expression is evaluated at comptime because ";
|
|
switch (cr) {
|
|
.c_import => |ci| {
|
|
try sema.errNote(ci.block, ci.src, parent, prefix ++ "it is inside a @cImport", .{});
|
|
},
|
|
.comptime_ret_ty => |rt| {
|
|
const src_loc = if (try sema.funcDeclSrc(rt.func)) |fn_decl| blk: {
|
|
var src_loc = fn_decl.srcLoc(mod);
|
|
src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 };
|
|
break :blk src_loc;
|
|
} else blk: {
|
|
const src_decl = mod.declPtr(rt.block.src_decl);
|
|
break :blk rt.func_src.toSrcLoc(src_decl, mod);
|
|
};
|
|
if (rt.return_ty.isGenericPoison()) {
|
|
return mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{});
|
|
}
|
|
try mod.errNoteNonLazy(
|
|
src_loc,
|
|
parent,
|
|
prefix ++ "the function returns a comptime-only type '{}'",
|
|
.{rt.return_ty.fmt(mod)},
|
|
);
|
|
try sema.explainWhyTypeIsComptime(parent, src_loc, rt.return_ty);
|
|
},
|
|
}
|
|
}
|
|
};
|
|
|
|
const Param = struct {
|
|
/// `none` means `anytype`.
|
|
ty: InternPool.Index,
|
|
is_comptime: bool,
|
|
name: Zir.NullTerminatedString,
|
|
};
|
|
|
|
/// This `Block` maps a block ZIR instruction to the corresponding
|
|
/// AIR instruction for break instruction analysis.
|
|
pub const Label = struct {
|
|
zir_block: Zir.Inst.Index,
|
|
merges: Merges,
|
|
};
|
|
|
|
/// This `Block` indicates that an inline function call is happening
|
|
/// and return instructions should be analyzed as a break instruction
|
|
/// to this AIR block instruction.
|
|
/// It is shared among all the blocks in an inline or comptime called
|
|
/// function.
|
|
pub const Inlining = struct {
|
|
/// Might be `none`.
|
|
func: InternPool.Index,
|
|
comptime_result: Air.Inst.Ref,
|
|
merges: Merges,
|
|
};
|
|
|
|
pub const Merges = struct {
|
|
block_inst: Air.Inst.Index,
|
|
/// Separate array list from break_inst_list so that it can be passed directly
|
|
/// to resolvePeerTypes.
|
|
results: std.ArrayListUnmanaged(Air.Inst.Ref),
|
|
/// Keeps track of the break instructions so that the operand can be replaced
|
|
/// if we need to add type coercion at the end of block analysis.
|
|
/// Same indexes, capacity, length as `results`.
|
|
br_list: std.ArrayListUnmanaged(Air.Inst.Index),
|
|
/// Keeps the source location of the rhs operand of the break instruction,
|
|
/// to enable more precise compile errors.
|
|
/// Same indexes, capacity, length as `results`.
|
|
src_locs: std.ArrayListUnmanaged(?LazySrcLoc),
|
|
|
|
pub fn deinit(merges: *@This(), allocator: mem.Allocator) void {
|
|
merges.results.deinit(allocator);
|
|
merges.br_list.deinit(allocator);
|
|
merges.src_locs.deinit(allocator);
|
|
}
|
|
};
|
|
|
|
/// For debugging purposes.
|
|
pub fn dump(block: *Block, mod: Module) void {
|
|
Zir.dumpBlock(mod, block);
|
|
}
|
|
|
|
pub fn makeSubBlock(parent: *Block) Block {
|
|
return .{
|
|
.parent = parent,
|
|
.sema = parent.sema,
|
|
.src_decl = parent.src_decl,
|
|
.namespace = parent.namespace,
|
|
.instructions = .{},
|
|
.wip_capture_scope = parent.wip_capture_scope,
|
|
.label = null,
|
|
.inlining = parent.inlining,
|
|
.is_comptime = parent.is_comptime,
|
|
.comptime_reason = parent.comptime_reason,
|
|
.is_typeof = parent.is_typeof,
|
|
.runtime_cond = parent.runtime_cond,
|
|
.runtime_loop = parent.runtime_loop,
|
|
.runtime_index = parent.runtime_index,
|
|
.want_safety = parent.want_safety,
|
|
.float_mode = parent.float_mode,
|
|
.c_import_buf = parent.c_import_buf,
|
|
.error_return_trace_index = parent.error_return_trace_index,
|
|
};
|
|
}
|
|
|
|
pub fn wantSafety(block: *const Block) bool {
|
|
return block.want_safety orelse switch (block.sema.mod.optimizeMode()) {
|
|
.Debug => true,
|
|
.ReleaseSafe => true,
|
|
.ReleaseFast => false,
|
|
.ReleaseSmall => false,
|
|
};
|
|
}
|
|
|
|
pub fn getFileScope(block: *Block, mod: *Module) *Module.File {
|
|
return mod.namespacePtr(block.namespace).file_scope;
|
|
}
|
|
|
|
fn addTy(
|
|
block: *Block,
|
|
tag: Air.Inst.Tag,
|
|
ty: Type,
|
|
) error{OutOfMemory}!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .ty = ty },
|
|
});
|
|
}
|
|
|
|
fn addTyOp(
|
|
block: *Block,
|
|
tag: Air.Inst.Tag,
|
|
ty: Type,
|
|
operand: Air.Inst.Ref,
|
|
) error{OutOfMemory}!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .ty_op = .{
|
|
.ty = Air.internedToRef(ty.toIntern()),
|
|
.operand = operand,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addBitCast(block: *Block, ty: Type, operand: Air.Inst.Ref) Allocator.Error!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = .bitcast,
|
|
.data = .{ .ty_op = .{
|
|
.ty = Air.internedToRef(ty.toIntern()),
|
|
.operand = operand,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .no_op = {} },
|
|
});
|
|
}
|
|
|
|
fn addUnOp(
|
|
block: *Block,
|
|
tag: Air.Inst.Tag,
|
|
operand: Air.Inst.Ref,
|
|
) error{OutOfMemory}!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .un_op = operand },
|
|
});
|
|
}
|
|
|
|
fn addBr(
|
|
block: *Block,
|
|
target_block: Air.Inst.Index,
|
|
operand: Air.Inst.Ref,
|
|
) error{OutOfMemory}!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = .br,
|
|
.data = .{ .br = .{
|
|
.block_inst = target_block,
|
|
.operand = operand,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addBinOp(
|
|
block: *Block,
|
|
tag: Air.Inst.Tag,
|
|
lhs: Air.Inst.Ref,
|
|
rhs: Air.Inst.Ref,
|
|
) error{OutOfMemory}!Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .bin_op = .{
|
|
.lhs = lhs,
|
|
.rhs = rhs,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addStructFieldPtr(
|
|
block: *Block,
|
|
struct_ptr: Air.Inst.Ref,
|
|
field_index: u32,
|
|
ptr_field_ty: Type,
|
|
) !Air.Inst.Ref {
|
|
const ty = Air.internedToRef(ptr_field_ty.toIntern());
|
|
const tag: Air.Inst.Tag = switch (field_index) {
|
|
0 => .struct_field_ptr_index_0,
|
|
1 => .struct_field_ptr_index_1,
|
|
2 => .struct_field_ptr_index_2,
|
|
3 => .struct_field_ptr_index_3,
|
|
else => {
|
|
return block.addInst(.{
|
|
.tag = .struct_field_ptr,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = ty,
|
|
.payload = try block.sema.addExtra(Air.StructField{
|
|
.struct_operand = struct_ptr,
|
|
.field_index = field_index,
|
|
}),
|
|
} },
|
|
});
|
|
},
|
|
};
|
|
return block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .ty_op = .{
|
|
.ty = ty,
|
|
.operand = struct_ptr,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addStructFieldVal(
|
|
block: *Block,
|
|
struct_val: Air.Inst.Ref,
|
|
field_index: u32,
|
|
field_ty: Type,
|
|
) !Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = .struct_field_val,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(field_ty.toIntern()),
|
|
.payload = try block.sema.addExtra(Air.StructField{
|
|
.struct_operand = struct_val,
|
|
.field_index = field_index,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addSliceElemPtr(
|
|
block: *Block,
|
|
slice: Air.Inst.Ref,
|
|
elem_index: Air.Inst.Ref,
|
|
elem_ptr_ty: Type,
|
|
) !Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = .slice_elem_ptr,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(elem_ptr_ty.toIntern()),
|
|
.payload = try block.sema.addExtra(Air.Bin{
|
|
.lhs = slice,
|
|
.rhs = elem_index,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addPtrElemPtr(
|
|
block: *Block,
|
|
array_ptr: Air.Inst.Ref,
|
|
elem_index: Air.Inst.Ref,
|
|
elem_ptr_ty: Type,
|
|
) !Air.Inst.Ref {
|
|
const ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
|
|
return block.addPtrElemPtrTypeRef(array_ptr, elem_index, ty_ref);
|
|
}
|
|
|
|
fn addPtrElemPtrTypeRef(
|
|
block: *Block,
|
|
array_ptr: Air.Inst.Ref,
|
|
elem_index: Air.Inst.Ref,
|
|
elem_ptr_ty: Air.Inst.Ref,
|
|
) !Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = .ptr_elem_ptr,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = elem_ptr_ty,
|
|
.payload = try block.sema.addExtra(Air.Bin{
|
|
.lhs = array_ptr,
|
|
.rhs = elem_index,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref {
|
|
const sema = block.sema;
|
|
const mod = sema.mod;
|
|
return block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef((try mod.vectorType(.{
|
|
.len = sema.typeOf(lhs).vectorLen(mod),
|
|
.child = .bool_type,
|
|
})).toIntern()),
|
|
.payload = try sema.addExtra(Air.VectorCmp{
|
|
.lhs = lhs,
|
|
.rhs = rhs,
|
|
.op = Air.VectorCmp.encodeOp(cmp_op),
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addAggregateInit(
|
|
block: *Block,
|
|
aggregate_ty: Type,
|
|
elements: []const Air.Inst.Ref,
|
|
) !Air.Inst.Ref {
|
|
const sema = block.sema;
|
|
const ty_ref = Air.internedToRef(aggregate_ty.toIntern());
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements.len);
|
|
const extra_index = @as(u32, @intCast(sema.air_extra.items.len));
|
|
sema.appendRefsAssumeCapacity(elements);
|
|
|
|
return block.addInst(.{
|
|
.tag = .aggregate_init,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = ty_ref,
|
|
.payload = extra_index,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn addUnionInit(
|
|
block: *Block,
|
|
union_ty: Type,
|
|
field_index: u32,
|
|
init: Air.Inst.Ref,
|
|
) !Air.Inst.Ref {
|
|
return block.addInst(.{
|
|
.tag = .union_init,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(union_ty.toIntern()),
|
|
.payload = try block.sema.addExtra(Air.UnionInit{
|
|
.field_index = field_index,
|
|
.init = init,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref {
|
|
return Air.indexToRef(try block.addInstAsIndex(inst));
|
|
}
|
|
|
|
pub fn addInstAsIndex(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index {
|
|
const sema = block.sema;
|
|
const gpa = sema.gpa;
|
|
|
|
try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
|
|
try block.instructions.ensureUnusedCapacity(gpa, 1);
|
|
|
|
const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
|
|
sema.air_instructions.appendAssumeCapacity(inst);
|
|
block.instructions.appendAssumeCapacity(result_index);
|
|
return result_index;
|
|
}
|
|
|
|
/// Insert an instruction into the block at `index`. Moves all following
|
|
/// instructions forward in the block to make room. Operation is O(N).
|
|
pub fn insertInst(block: *Block, index: Air.Inst.Index, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref {
|
|
return Air.indexToRef(try block.insertInstAsIndex(index, inst));
|
|
}
|
|
|
|
pub fn insertInstAsIndex(block: *Block, index: Air.Inst.Index, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index {
|
|
const sema = block.sema;
|
|
const gpa = sema.gpa;
|
|
|
|
try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
|
|
|
|
const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
|
|
sema.air_instructions.appendAssumeCapacity(inst);
|
|
|
|
try block.instructions.insert(gpa, index, result_index);
|
|
return result_index;
|
|
}
|
|
|
|
fn addUnreachable(block: *Block, src: LazySrcLoc, safety_check: bool) !void {
|
|
if (safety_check and block.wantSafety()) {
|
|
try block.sema.safetyPanic(block, src, .unreach);
|
|
} else {
|
|
_ = try block.addNoOp(.unreach);
|
|
}
|
|
}
|
|
|
|
pub fn startAnonDecl(block: *Block) !WipAnonDecl {
|
|
return WipAnonDecl{
|
|
.block = block,
|
|
.finished = false,
|
|
};
|
|
}
|
|
|
|
pub const WipAnonDecl = struct {
|
|
block: *Block,
|
|
finished: bool,
|
|
|
|
pub fn deinit(wad: *WipAnonDecl) void {
|
|
wad.* = undefined;
|
|
}
|
|
|
|
/// `alignment` value of 0 means to use ABI alignment.
|
|
pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value, alignment: Alignment) !Decl.Index {
|
|
const sema = wad.block.sema;
|
|
// Do this ahead of time because `createAnonymousDecl` depends on calling
|
|
// `type.hasRuntimeBits()`.
|
|
_ = try sema.typeHasRuntimeBits(ty);
|
|
const new_decl_index = try sema.mod.createAnonymousDecl(wad.block, .{
|
|
.ty = ty,
|
|
.val = val,
|
|
});
|
|
const new_decl = sema.mod.declPtr(new_decl_index);
|
|
new_decl.alignment = alignment;
|
|
errdefer sema.mod.abortAnonDecl(new_decl_index);
|
|
wad.finished = true;
|
|
try sema.mod.finalizeAnonDecl(new_decl_index);
|
|
return new_decl_index;
|
|
}
|
|
};
|
|
};
|
|
|
|
const LabeledBlock = struct {
|
|
block: Block,
|
|
label: Block.Label,
|
|
|
|
fn destroy(lb: *LabeledBlock, gpa: Allocator) void {
|
|
lb.block.instructions.deinit(gpa);
|
|
lb.label.merges.deinit(gpa);
|
|
gpa.destroy(lb);
|
|
}
|
|
};
|
|
|
|
/// The value stored in the inferred allocation. This will go into
|
|
/// peer type resolution. This is stored in a separate list so that
|
|
/// the items are contiguous in memory and thus can be passed to
|
|
/// `Module.resolvePeerTypes`.
|
|
const InferredAlloc = struct {
|
|
prongs: std.MultiArrayList(struct {
|
|
/// The dummy instruction used as a peer to resolve the type.
|
|
/// Although this has a redundant type with placeholder, this is
|
|
/// needed in addition because it may be a constant value, which
|
|
/// affects peer type resolution.
|
|
stored_inst: Air.Inst.Ref,
|
|
/// The bitcast instruction used as a placeholder when the
|
|
/// new result pointer type is not yet known.
|
|
placeholder: Air.Inst.Index,
|
|
}) = .{},
|
|
};
|
|
|
|
pub fn deinit(sema: *Sema) void {
|
|
const gpa = sema.gpa;
|
|
sema.air_instructions.deinit(gpa);
|
|
sema.air_extra.deinit(gpa);
|
|
sema.inst_map.deinit(gpa);
|
|
sema.decl_val_table.deinit(gpa);
|
|
sema.types_to_resolve.deinit(gpa);
|
|
{
|
|
var it = sema.post_hoc_blocks.iterator();
|
|
while (it.next()) |entry| {
|
|
const labeled_block = entry.value_ptr.*;
|
|
labeled_block.destroy(gpa);
|
|
}
|
|
sema.post_hoc_blocks.deinit(gpa);
|
|
}
|
|
sema.unresolved_inferred_allocs.deinit(gpa);
|
|
sema.* = undefined;
|
|
}
|
|
|
|
/// Returns only the result from the body that is specified.
|
|
/// Only appropriate to call when it is determined at comptime that this body
|
|
/// has no peers.
|
|
fn resolveBody(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
body: []const Zir.Inst.Index,
|
|
/// This is the instruction that a break instruction within `body` can
|
|
/// use to return from the body.
|
|
body_inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const break_data = (try sema.analyzeBodyBreak(block, body)) orelse
|
|
return Air.Inst.Ref.unreachable_value;
|
|
// For comptime control flow, we need to detect when `analyzeBody` reports
|
|
// that we need to break from an outer block. In such case we
|
|
// use Zig's error mechanism to send control flow up the stack until
|
|
// we find the corresponding block to this break.
|
|
if (block.is_comptime and break_data.block_inst != body_inst) {
|
|
sema.comptime_break_inst = break_data.inst;
|
|
return error.ComptimeBreak;
|
|
}
|
|
return try sema.resolveInst(break_data.operand);
|
|
}
|
|
|
|
fn analyzeBodyRuntimeBreak(sema: *Sema, block: *Block, body: []const Zir.Inst.Index) !void {
|
|
_ = sema.analyzeBodyInner(block, body) catch |err| switch (err) {
|
|
error.ComptimeBreak => {
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
const break_data = zir_datas[sema.comptime_break_inst].@"break";
|
|
const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data;
|
|
try sema.addRuntimeBreak(block, .{
|
|
.block_inst = extra.block_inst,
|
|
.operand = break_data.operand,
|
|
.inst = sema.comptime_break_inst,
|
|
});
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
pub fn analyzeBody(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
body: []const Zir.Inst.Index,
|
|
) !void {
|
|
_ = sema.analyzeBodyInner(block, body) catch |err| switch (err) {
|
|
error.ComptimeBreak => unreachable, // unexpected comptime control flow
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
const BreakData = struct {
|
|
block_inst: Zir.Inst.Index,
|
|
operand: Zir.Inst.Ref,
|
|
inst: Zir.Inst.Index,
|
|
};
|
|
|
|
pub fn analyzeBodyBreak(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
body: []const Zir.Inst.Index,
|
|
) CompileError!?BreakData {
|
|
const break_inst = sema.analyzeBodyInner(block, body) catch |err| switch (err) {
|
|
error.ComptimeBreak => sema.comptime_break_inst,
|
|
else => |e| return e,
|
|
};
|
|
if (block.instructions.items.len != 0 and
|
|
sema.isNoReturn(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1])))
|
|
return null;
|
|
const break_data = sema.code.instructions.items(.data)[break_inst].@"break";
|
|
const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data;
|
|
return BreakData{
|
|
.block_inst = extra.block_inst,
|
|
.operand = break_data.operand,
|
|
.inst = break_inst,
|
|
};
|
|
}
|
|
|
|
/// ZIR instructions which are always `noreturn` return this. This matches the
|
|
/// return type of `analyzeBody` so that we can tail call them.
|
|
/// Only appropriate to return when the instruction is known to be NoReturn
|
|
/// solely based on the ZIR tag.
|
|
const always_noreturn: CompileError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined);
|
|
|
|
/// This function is the main loop of `Sema` and it can be used in two different ways:
|
|
/// * The traditional way where there are N breaks out of the block and peer type
|
|
/// resolution is done on the break operands. In this case, the `Zir.Inst.Index`
|
|
/// part of the return value will be `undefined`, and callsites should ignore it,
|
|
/// finding the block result value via the block scope.
|
|
/// * The "flat" way. There is only 1 break out of the block, and it is with a `break_inline`
|
|
/// instruction. In this case, the `Zir.Inst.Index` part of the return value will be
|
|
/// the break instruction. This communicates both which block the break applies to, as
|
|
/// well as the operand. No block scope needs to be created for this strategy.
|
|
fn analyzeBodyInner(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
body: []const Zir.Inst.Index,
|
|
) CompileError!Zir.Inst.Index {
|
|
// No tracy calls here, to avoid interfering with the tail call mechanism.
|
|
|
|
try sema.inst_map.ensureSpaceForInstructions(sema.gpa, body);
|
|
|
|
// Most of the time, we don't need to construct a new capture scope for a
|
|
// block. However, successive iterations of comptime loops can capture
|
|
// different values for the same Zir.Inst.Index, so in those cases, we will
|
|
// have to create nested capture scopes; see the `.repeat` case below.
|
|
const parent_capture_scope = block.wip_capture_scope;
|
|
parent_capture_scope.incRef();
|
|
var wip_captures: WipCaptureScope = .{
|
|
.scope = parent_capture_scope,
|
|
.gpa = sema.gpa,
|
|
.finalized = true, // don't finalize the parent scope
|
|
};
|
|
defer wip_captures.deinit();
|
|
|
|
const mod = sema.mod;
|
|
const map = &sema.inst_map;
|
|
const tags = sema.code.instructions.items(.tag);
|
|
const datas = sema.code.instructions.items(.data);
|
|
|
|
var orig_captures: usize = parent_capture_scope.captures.count();
|
|
|
|
var crash_info = crash_report.prepAnalyzeBody(sema, block, body);
|
|
crash_info.push();
|
|
defer crash_info.pop();
|
|
|
|
var dbg_block_begins: u32 = 0;
|
|
|
|
// We use a while (true) loop here to avoid a redundant way of breaking out of
|
|
// the loop. The only way to break out of the loop is with a `noreturn`
|
|
// instruction.
|
|
var i: u32 = 0;
|
|
const result = while (true) {
|
|
crash_info.setBodyIndex(i);
|
|
const inst = body[i];
|
|
std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{
|
|
mod.namespacePtr(mod.declPtr(block.src_decl).src_namespace).file_scope.sub_file_path, inst,
|
|
});
|
|
const air_inst: Air.Inst.Ref = switch (tags[inst]) {
|
|
// zig fmt: off
|
|
.alloc => try sema.zirAlloc(block, inst),
|
|
.alloc_inferred => try sema.zirAllocInferred(block, inst, true),
|
|
.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, false),
|
|
.alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, true),
|
|
.alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, false),
|
|
.alloc_mut => try sema.zirAllocMut(block, inst),
|
|
.alloc_comptime_mut => try sema.zirAllocComptime(block, inst),
|
|
.make_ptr_const => try sema.zirMakePtrConst(block, inst),
|
|
.anyframe_type => try sema.zirAnyframeType(block, inst),
|
|
.array_cat => try sema.zirArrayCat(block, inst),
|
|
.array_mul => try sema.zirArrayMul(block, inst),
|
|
.array_type => try sema.zirArrayType(block, inst),
|
|
.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst),
|
|
.vector_type => try sema.zirVectorType(block, inst),
|
|
.as => try sema.zirAs(block, inst),
|
|
.as_node => try sema.zirAsNode(block, inst),
|
|
.as_shift_operand => try sema.zirAsShiftOperand(block, inst),
|
|
.bit_and => try sema.zirBitwise(block, inst, .bit_and),
|
|
.bit_not => try sema.zirBitNot(block, inst),
|
|
.bit_or => try sema.zirBitwise(block, inst, .bit_or),
|
|
.bitcast => try sema.zirBitcast(block, inst),
|
|
.suspend_block => try sema.zirSuspendBlock(block, inst),
|
|
.bool_not => try sema.zirBoolNot(block, inst),
|
|
.bool_br_and => try sema.zirBoolBr(block, inst, false),
|
|
.bool_br_or => try sema.zirBoolBr(block, inst, true),
|
|
.c_import => try sema.zirCImport(block, inst),
|
|
.call => try sema.zirCall(block, inst, .direct),
|
|
.field_call => try sema.zirCall(block, inst, .field),
|
|
.closure_get => try sema.zirClosureGet(block, inst),
|
|
.cmp_lt => try sema.zirCmp(block, inst, .lt),
|
|
.cmp_lte => try sema.zirCmp(block, inst, .lte),
|
|
.cmp_eq => try sema.zirCmpEq(block, inst, .eq, Air.Inst.Tag.fromCmpOp(.eq, block.float_mode == .Optimized)),
|
|
.cmp_gte => try sema.zirCmp(block, inst, .gte),
|
|
.cmp_gt => try sema.zirCmp(block, inst, .gt),
|
|
.cmp_neq => try sema.zirCmpEq(block, inst, .neq, Air.Inst.Tag.fromCmpOp(.neq, block.float_mode == .Optimized)),
|
|
.coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst),
|
|
.decl_ref => try sema.zirDeclRef(block, inst),
|
|
.decl_val => try sema.zirDeclVal(block, inst),
|
|
.load => try sema.zirLoad(block, inst),
|
|
.elem_ptr => try sema.zirElemPtr(block, inst),
|
|
.elem_ptr_node => try sema.zirElemPtrNode(block, inst),
|
|
.elem_ptr_imm => try sema.zirElemPtrImm(block, inst),
|
|
.elem_val => try sema.zirElemVal(block, inst),
|
|
.elem_val_node => try sema.zirElemValNode(block, inst),
|
|
.elem_type_index => try sema.zirElemTypeIndex(block, inst),
|
|
.elem_type => try sema.zirElemType(block, inst),
|
|
.vector_elem_type => try sema.zirVectorElemType(block, inst),
|
|
.enum_literal => try sema.zirEnumLiteral(block, inst),
|
|
.int_from_enum => try sema.zirIntFromEnum(block, inst),
|
|
.enum_from_int => try sema.zirEnumFromInt(block, inst),
|
|
.err_union_code => try sema.zirErrUnionCode(block, inst),
|
|
.err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst),
|
|
.err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst),
|
|
.err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst),
|
|
.error_union_type => try sema.zirErrorUnionType(block, inst),
|
|
.error_value => try sema.zirErrorValue(block, inst),
|
|
.field_ptr => try sema.zirFieldPtr(block, inst, false),
|
|
.field_ptr_init => try sema.zirFieldPtr(block, inst, true),
|
|
.field_ptr_named => try sema.zirFieldPtrNamed(block, inst),
|
|
.field_val => try sema.zirFieldVal(block, inst),
|
|
.field_val_named => try sema.zirFieldValNamed(block, inst),
|
|
.func => try sema.zirFunc(block, inst, false),
|
|
.func_inferred => try sema.zirFunc(block, inst, true),
|
|
.func_fancy => try sema.zirFuncFancy(block, inst),
|
|
.import => try sema.zirImport(block, inst),
|
|
.indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst),
|
|
.int => try sema.zirInt(block, inst),
|
|
.int_big => try sema.zirIntBig(block, inst),
|
|
.float => try sema.zirFloat(block, inst),
|
|
.float128 => try sema.zirFloat128(block, inst),
|
|
.int_type => try sema.zirIntType(inst),
|
|
.is_non_err => try sema.zirIsNonErr(block, inst),
|
|
.is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst),
|
|
.ret_is_non_err => try sema.zirRetIsNonErr(block, inst),
|
|
.is_non_null => try sema.zirIsNonNull(block, inst),
|
|
.is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst),
|
|
.merge_error_sets => try sema.zirMergeErrorSets(block, inst),
|
|
.negate => try sema.zirNegate(block, inst),
|
|
.negate_wrap => try sema.zirNegateWrap(block, inst),
|
|
.optional_payload_safe => try sema.zirOptionalPayload(block, inst, true),
|
|
.optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true),
|
|
.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false),
|
|
.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false),
|
|
.optional_type => try sema.zirOptionalType(block, inst),
|
|
.ptr_type => try sema.zirPtrType(block, inst),
|
|
.ref => try sema.zirRef(block, inst),
|
|
.ret_err_value_code => try sema.zirRetErrValueCode(inst),
|
|
.shr => try sema.zirShr(block, inst, .shr),
|
|
.shr_exact => try sema.zirShr(block, inst, .shr_exact),
|
|
.slice_end => try sema.zirSliceEnd(block, inst),
|
|
.slice_sentinel => try sema.zirSliceSentinel(block, inst),
|
|
.slice_start => try sema.zirSliceStart(block, inst),
|
|
.slice_length => try sema.zirSliceLength(block, inst),
|
|
.str => try sema.zirStr(block, inst),
|
|
.switch_block => try sema.zirSwitchBlock(block, inst, false),
|
|
.switch_block_ref => try sema.zirSwitchBlock(block, inst, true),
|
|
.type_info => try sema.zirTypeInfo(block, inst),
|
|
.size_of => try sema.zirSizeOf(block, inst),
|
|
.bit_size_of => try sema.zirBitSizeOf(block, inst),
|
|
.typeof => try sema.zirTypeof(block, inst),
|
|
.typeof_builtin => try sema.zirTypeofBuiltin(block, inst),
|
|
.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst),
|
|
.xor => try sema.zirBitwise(block, inst, .xor),
|
|
.struct_init_empty => try sema.zirStructInitEmpty(block, inst),
|
|
.struct_init => try sema.zirStructInit(block, inst, false),
|
|
.struct_init_ref => try sema.zirStructInit(block, inst, true),
|
|
.struct_init_anon => try sema.zirStructInitAnon(block, inst, false),
|
|
.struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true),
|
|
.array_init => try sema.zirArrayInit(block, inst, false),
|
|
.array_init_ref => try sema.zirArrayInit(block, inst, true),
|
|
.array_init_anon => try sema.zirArrayInitAnon(block, inst, false),
|
|
.array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true),
|
|
.union_init => try sema.zirUnionInit(block, inst),
|
|
.field_type => try sema.zirFieldType(block, inst),
|
|
.field_type_ref => try sema.zirFieldTypeRef(block, inst),
|
|
.int_from_ptr => try sema.zirIntFromPtr(block, inst),
|
|
.align_of => try sema.zirAlignOf(block, inst),
|
|
.int_from_bool => try sema.zirIntFromBool(block, inst),
|
|
.embed_file => try sema.zirEmbedFile(block, inst),
|
|
.error_name => try sema.zirErrorName(block, inst),
|
|
.tag_name => try sema.zirTagName(block, inst),
|
|
.type_name => try sema.zirTypeName(block, inst),
|
|
.frame_type => try sema.zirFrameType(block, inst),
|
|
.frame_size => try sema.zirFrameSize(block, inst),
|
|
.int_from_float => try sema.zirIntFromFloat(block, inst),
|
|
.float_from_int => try sema.zirFloatFromInt(block, inst),
|
|
.ptr_from_int => try sema.zirPtrFromInt(block, inst),
|
|
.float_cast => try sema.zirFloatCast(block, inst),
|
|
.int_cast => try sema.zirIntCast(block, inst),
|
|
.ptr_cast => try sema.zirPtrCast(block, inst),
|
|
.truncate => try sema.zirTruncate(block, inst),
|
|
.has_decl => try sema.zirHasDecl(block, inst),
|
|
.has_field => try sema.zirHasField(block, inst),
|
|
.byte_swap => try sema.zirByteSwap(block, inst),
|
|
.bit_reverse => try sema.zirBitReverse(block, inst),
|
|
.bit_offset_of => try sema.zirBitOffsetOf(block, inst),
|
|
.offset_of => try sema.zirOffsetOf(block, inst),
|
|
.splat => try sema.zirSplat(block, inst),
|
|
.reduce => try sema.zirReduce(block, inst),
|
|
.shuffle => try sema.zirShuffle(block, inst),
|
|
.atomic_load => try sema.zirAtomicLoad(block, inst),
|
|
.atomic_rmw => try sema.zirAtomicRmw(block, inst),
|
|
.mul_add => try sema.zirMulAdd(block, inst),
|
|
.builtin_call => try sema.zirBuiltinCall(block, inst),
|
|
.field_parent_ptr => try sema.zirFieldParentPtr(block, inst),
|
|
.@"resume" => try sema.zirResume(block, inst),
|
|
.@"await" => try sema.zirAwait(block, inst),
|
|
.array_base_ptr => try sema.zirArrayBasePtr(block, inst),
|
|
.field_base_ptr => try sema.zirFieldBasePtr(block, inst),
|
|
.for_len => try sema.zirForLen(block, inst),
|
|
.opt_eu_base_ty => try sema.zirOptEuBaseTy(block, inst),
|
|
|
|
.clz => try sema.zirBitCount(block, inst, .clz, Value.clz),
|
|
.ctz => try sema.zirBitCount(block, inst, .ctz, Value.ctz),
|
|
.pop_count => try sema.zirBitCount(block, inst, .popcount, Value.popCount),
|
|
|
|
.sqrt => try sema.zirUnaryMath(block, inst, .sqrt, Value.sqrt),
|
|
.sin => try sema.zirUnaryMath(block, inst, .sin, Value.sin),
|
|
.cos => try sema.zirUnaryMath(block, inst, .cos, Value.cos),
|
|
.tan => try sema.zirUnaryMath(block, inst, .tan, Value.tan),
|
|
.exp => try sema.zirUnaryMath(block, inst, .exp, Value.exp),
|
|
.exp2 => try sema.zirUnaryMath(block, inst, .exp2, Value.exp2),
|
|
.log => try sema.zirUnaryMath(block, inst, .log, Value.log),
|
|
.log2 => try sema.zirUnaryMath(block, inst, .log2, Value.log2),
|
|
.log10 => try sema.zirUnaryMath(block, inst, .log10, Value.log10),
|
|
.fabs => try sema.zirUnaryMath(block, inst, .fabs, Value.fabs),
|
|
.floor => try sema.zirUnaryMath(block, inst, .floor, Value.floor),
|
|
.ceil => try sema.zirUnaryMath(block, inst, .ceil, Value.ceil),
|
|
.round => try sema.zirUnaryMath(block, inst, .round, Value.round),
|
|
.trunc => try sema.zirUnaryMath(block, inst, .trunc_float, Value.trunc),
|
|
|
|
.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent),
|
|
.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon),
|
|
.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func),
|
|
|
|
.add => try sema.zirArithmetic(block, inst, .add, true),
|
|
.addwrap => try sema.zirArithmetic(block, inst, .addwrap, true),
|
|
.add_sat => try sema.zirArithmetic(block, inst, .add_sat, true),
|
|
.add_unsafe => try sema.zirArithmetic(block, inst, .add_unsafe, false),
|
|
.mul => try sema.zirArithmetic(block, inst, .mul, true),
|
|
.mulwrap => try sema.zirArithmetic(block, inst, .mulwrap, true),
|
|
.mul_sat => try sema.zirArithmetic(block, inst, .mul_sat, true),
|
|
.sub => try sema.zirArithmetic(block, inst, .sub, true),
|
|
.subwrap => try sema.zirArithmetic(block, inst, .subwrap, true),
|
|
.sub_sat => try sema.zirArithmetic(block, inst, .sub_sat, true),
|
|
|
|
.div => try sema.zirDiv(block, inst),
|
|
.div_exact => try sema.zirDivExact(block, inst),
|
|
.div_floor => try sema.zirDivFloor(block, inst),
|
|
.div_trunc => try sema.zirDivTrunc(block, inst),
|
|
|
|
.mod_rem => try sema.zirModRem(block, inst),
|
|
.mod => try sema.zirMod(block, inst),
|
|
.rem => try sema.zirRem(block, inst),
|
|
|
|
.max => try sema.zirMinMax(block, inst, .max),
|
|
.min => try sema.zirMinMax(block, inst, .min),
|
|
|
|
.shl => try sema.zirShl(block, inst, .shl),
|
|
.shl_exact => try sema.zirShl(block, inst, .shl_exact),
|
|
.shl_sat => try sema.zirShl(block, inst, .shl_sat),
|
|
|
|
.ret_ptr => try sema.zirRetPtr(block),
|
|
.ret_type => Air.internedToRef(sema.fn_ret_ty.toIntern()),
|
|
|
|
// Instructions that we know to *always* be noreturn based solely on their tag.
|
|
// These functions match the return type of analyzeBody so that we can
|
|
// tail call them here.
|
|
.compile_error => break sema.zirCompileError(block, inst),
|
|
.ret_implicit => break sema.zirRetImplicit(block, inst),
|
|
.ret_node => break sema.zirRetNode(block, inst),
|
|
.ret_load => break sema.zirRetLoad(block, inst),
|
|
.ret_err_value => break sema.zirRetErrValue(block, inst),
|
|
.@"unreachable" => break sema.zirUnreachable(block, inst),
|
|
.panic => break sema.zirPanic(block, inst),
|
|
.trap => break sema.zirTrap(block, inst),
|
|
// zig fmt: on
|
|
|
|
.extended => ext: {
|
|
const extended = datas[inst].extended;
|
|
break :ext switch (extended.opcode) {
|
|
// zig fmt: off
|
|
.variable => try sema.zirVarExtended( block, extended),
|
|
.struct_decl => try sema.zirStructDecl( block, extended, inst),
|
|
.enum_decl => try sema.zirEnumDecl( block, extended, inst),
|
|
.union_decl => try sema.zirUnionDecl( block, extended, inst),
|
|
.opaque_decl => try sema.zirOpaqueDecl( block, extended, inst),
|
|
.this => try sema.zirThis( block, extended),
|
|
.ret_addr => try sema.zirRetAddr( block, extended),
|
|
.builtin_src => try sema.zirBuiltinSrc( block, extended),
|
|
.error_return_trace => try sema.zirErrorReturnTrace( block),
|
|
.frame => try sema.zirFrame( block, extended),
|
|
.frame_address => try sema.zirFrameAddress( block, extended),
|
|
.alloc => try sema.zirAllocExtended( block, extended),
|
|
.builtin_extern => try sema.zirBuiltinExtern( block, extended),
|
|
.@"asm" => try sema.zirAsm( block, extended, false),
|
|
.asm_expr => try sema.zirAsm( block, extended, true),
|
|
.typeof_peer => try sema.zirTypeofPeer( block, extended),
|
|
.compile_log => try sema.zirCompileLog( extended),
|
|
.min_multi => try sema.zirMinMaxMulti( block, extended, .min),
|
|
.max_multi => try sema.zirMinMaxMulti( block, extended, .max),
|
|
.add_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode),
|
|
.sub_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode),
|
|
.mul_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode),
|
|
.shl_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode),
|
|
.c_undef => try sema.zirCUndef( block, extended),
|
|
.c_include => try sema.zirCInclude( block, extended),
|
|
.c_define => try sema.zirCDefine( block, extended),
|
|
.wasm_memory_size => try sema.zirWasmMemorySize( block, extended),
|
|
.wasm_memory_grow => try sema.zirWasmMemoryGrow( block, extended),
|
|
.prefetch => try sema.zirPrefetch( block, extended),
|
|
.err_set_cast => try sema.zirErrSetCast( block, extended),
|
|
.await_nosuspend => try sema.zirAwaitNosuspend( block, extended),
|
|
.select => try sema.zirSelect( block, extended),
|
|
.int_from_error => try sema.zirIntFromError( block, extended),
|
|
.error_from_int => try sema.zirErrorFromInt( block, extended),
|
|
.reify => try sema.zirReify( block, extended, inst),
|
|
.builtin_async_call => try sema.zirBuiltinAsyncCall( block, extended),
|
|
.cmpxchg => try sema.zirCmpxchg( block, extended),
|
|
.c_va_arg => try sema.zirCVaArg( block, extended),
|
|
.c_va_copy => try sema.zirCVaCopy( block, extended),
|
|
.c_va_end => try sema.zirCVaEnd( block, extended),
|
|
.c_va_start => try sema.zirCVaStart( block, extended),
|
|
.ptr_cast_full => try sema.zirPtrCastFull( block, extended),
|
|
.ptr_cast_no_dest => try sema.zirPtrCastNoDest( block, extended),
|
|
.work_item_id => try sema.zirWorkItem( block, extended, extended.opcode),
|
|
.work_group_size => try sema.zirWorkItem( block, extended, extended.opcode),
|
|
.work_group_id => try sema.zirWorkItem( block, extended, extended.opcode),
|
|
.in_comptime => try sema.zirInComptime( block),
|
|
// zig fmt: on
|
|
|
|
.fence => {
|
|
try sema.zirFence(block, extended);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.set_float_mode => {
|
|
try sema.zirSetFloatMode(block, extended);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.set_align_stack => {
|
|
try sema.zirSetAlignStack(block, extended);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.set_cold => {
|
|
try sema.zirSetCold(block, extended);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.breakpoint => {
|
|
if (!block.is_comptime) {
|
|
_ = try block.addNoOp(.breakpoint);
|
|
}
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.value_placeholder => unreachable, // never appears in a body
|
|
};
|
|
},
|
|
|
|
// Instructions that we know can *never* be noreturn based solely on
|
|
// their tag. We avoid needlessly checking if they are noreturn and
|
|
// continue the loop.
|
|
// We also know that they cannot be referenced later, so we avoid
|
|
// putting them into the map.
|
|
.dbg_stmt => {
|
|
try sema.zirDbgStmt(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.dbg_var_ptr => {
|
|
try sema.zirDbgVar(block, inst, .dbg_var_ptr);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.dbg_var_val => {
|
|
try sema.zirDbgVar(block, inst, .dbg_var_val);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.dbg_block_begin => {
|
|
dbg_block_begins += 1;
|
|
try sema.zirDbgBlockBegin(block);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.dbg_block_end => {
|
|
dbg_block_begins -= 1;
|
|
try sema.zirDbgBlockEnd(block);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.ensure_err_union_payload_void => {
|
|
try sema.zirEnsureErrUnionPayloadVoid(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.ensure_result_non_error => {
|
|
try sema.zirEnsureResultNonError(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.ensure_result_used => {
|
|
try sema.zirEnsureResultUsed(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.set_eval_branch_quota => {
|
|
try sema.zirSetEvalBranchQuota(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.atomic_store => {
|
|
try sema.zirAtomicStore(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.store => {
|
|
try sema.zirStore(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.store_node => {
|
|
try sema.zirStoreNode(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.store_to_inferred_ptr => {
|
|
try sema.zirStoreToInferredPtr(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.resolve_inferred_alloc => {
|
|
try sema.zirResolveInferredAlloc(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_array_init_ty => {
|
|
try sema.zirValidateArrayInitTy(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_struct_init_ty => {
|
|
try sema.zirValidateStructInitTy(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_struct_init => {
|
|
try sema.zirValidateStructInit(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_array_init => {
|
|
try sema.zirValidateArrayInit(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.validate_deref => {
|
|
try sema.zirValidateDeref(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.@"export" => {
|
|
try sema.zirExport(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.export_value => {
|
|
try sema.zirExportValue(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.set_runtime_safety => {
|
|
try sema.zirSetRuntimeSafety(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.param => {
|
|
try sema.zirParam(block, inst, false);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.param_comptime => {
|
|
try sema.zirParam(block, inst, true);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.param_anytype => {
|
|
try sema.zirParamAnytype(block, inst, false);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.param_anytype_comptime => {
|
|
try sema.zirParamAnytype(block, inst, true);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.closure_capture => {
|
|
try sema.zirClosureCapture(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.memcpy => {
|
|
try sema.zirMemcpy(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.memset => {
|
|
try sema.zirMemset(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.check_comptime_control_flow => {
|
|
if (!block.is_comptime) {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const inline_block = Zir.refToIndex(inst_data.operand).?;
|
|
|
|
var check_block = block;
|
|
const target_runtime_index = while (true) {
|
|
if (check_block.inline_block == inline_block) {
|
|
break check_block.runtime_index;
|
|
}
|
|
check_block = check_block.parent.?;
|
|
};
|
|
|
|
if (@intFromEnum(target_runtime_index) < @intFromEnum(block.runtime_index)) {
|
|
const runtime_src = block.runtime_cond orelse block.runtime_loop.?;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "comptime control flow inside runtime block", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.errNote(block, runtime_src, msg, "runtime control flow here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.save_err_ret_index => {
|
|
try sema.zirSaveErrRetIndex(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
.restore_err_ret_index => {
|
|
try sema.zirRestoreErrRetIndex(block, inst);
|
|
i += 1;
|
|
continue;
|
|
},
|
|
|
|
// Special case instructions to handle comptime control flow.
|
|
.@"break" => {
|
|
if (block.is_comptime) {
|
|
break inst; // same as break_inline
|
|
} else {
|
|
break sema.zirBreak(block, inst);
|
|
}
|
|
},
|
|
.break_inline => {
|
|
if (block.is_comptime) {
|
|
break inst;
|
|
} else {
|
|
sema.comptime_break_inst = inst;
|
|
return error.ComptimeBreak;
|
|
}
|
|
},
|
|
.repeat => {
|
|
if (block.is_comptime) {
|
|
// Send comptime control flow back to the beginning of this block.
|
|
const src = LazySrcLoc.nodeOffset(datas[inst].node);
|
|
try sema.emitBackwardBranch(block, src);
|
|
if (wip_captures.scope.captures.count() != orig_captures) {
|
|
// We need to construct new capture scopes for the next loop iteration so it
|
|
// can capture values without clobbering the earlier iteration's captures.
|
|
// At first, we reused the parent capture scope as an optimization, but for
|
|
// successive scopes we have to create new ones as children of the parent
|
|
// scope.
|
|
try wip_captures.reset(parent_capture_scope);
|
|
block.wip_capture_scope = wip_captures.scope;
|
|
orig_captures = 0;
|
|
}
|
|
i = 0;
|
|
continue;
|
|
} else {
|
|
break always_noreturn;
|
|
}
|
|
},
|
|
.repeat_inline => {
|
|
// Send comptime control flow back to the beginning of this block.
|
|
const src = LazySrcLoc.nodeOffset(datas[inst].node);
|
|
try sema.emitBackwardBranch(block, src);
|
|
if (wip_captures.scope.captures.count() != orig_captures) {
|
|
// We need to construct new capture scopes for the next loop iteration so it
|
|
// can capture values without clobbering the earlier iteration's captures.
|
|
// At first, we reused the parent capture scope as an optimization, but for
|
|
// successive scopes we have to create new ones as children of the parent
|
|
// scope.
|
|
try wip_captures.reset(parent_capture_scope);
|
|
block.wip_capture_scope = wip_captures.scope;
|
|
orig_captures = 0;
|
|
}
|
|
i = 0;
|
|
continue;
|
|
},
|
|
.loop => blk: {
|
|
if (!block.is_comptime) break :blk try sema.zirLoop(block, inst);
|
|
// Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220
|
|
const inst_data = datas[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
|
|
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
|
|
break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.block, .block_comptime => blk: {
|
|
if (!block.is_comptime) {
|
|
break :blk try sema.zirBlock(block, inst, tags[inst] == .block_comptime);
|
|
}
|
|
// Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220
|
|
const inst_data = datas[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
|
|
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
// If this block contains a function prototype, we need to reset the
|
|
// current list of parameters and restore it later.
|
|
// Note: this probably needs to be resolved in a more general manner.
|
|
const prev_params = block.params;
|
|
block.params = .{};
|
|
defer block.params = prev_params;
|
|
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
|
|
break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.block_inline => blk: {
|
|
// Directly analyze the block body without introducing a new block.
|
|
// However, in the case of a corresponding break_inline which reaches
|
|
// through a runtime conditional branch, we must retroactively emit
|
|
// a block, so we remember the block index here just in case.
|
|
const block_index = block.instructions.items.len;
|
|
const inst_data = datas[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
|
|
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
const gpa = sema.gpa;
|
|
|
|
const opt_break_data = b: {
|
|
// Create a temporary child block so that this inline block is properly
|
|
// labeled for any .restore_err_ret_index instructions
|
|
var child_block = block.makeSubBlock();
|
|
|
|
// If this block contains a function prototype, we need to reset the
|
|
// current list of parameters and restore it later.
|
|
// Note: this probably needs to be resolved in a more general manner.
|
|
child_block.inline_block =
|
|
if (tags[inline_body[inline_body.len - 1]] == .repeat_inline) inline_body[0] else inst;
|
|
|
|
var label: Block.Label = .{
|
|
.zir_block = inst,
|
|
.merges = undefined,
|
|
};
|
|
child_block.label = &label;
|
|
|
|
// Write these instructions directly into the parent block
|
|
child_block.instructions = block.instructions;
|
|
defer block.instructions = child_block.instructions;
|
|
|
|
break :b try sema.analyzeBodyBreak(&child_block, inline_body);
|
|
};
|
|
|
|
// A runtime conditional branch that needs a post-hoc block to be
|
|
// emitted communicates this by mapping the block index into the inst map.
|
|
if (map.get(inst)) |new_block_ref| ph: {
|
|
// Comptime control flow populates the map, so we don't actually know
|
|
// if this is a post-hoc runtime block until we check the
|
|
// post_hoc_block map.
|
|
const new_block_inst = Air.refToIndex(new_block_ref) orelse break :ph;
|
|
const labeled_block = sema.post_hoc_blocks.get(new_block_inst) orelse
|
|
break :ph;
|
|
|
|
// In this case we need to move all the instructions starting at
|
|
// block_index from the current block into this new one.
|
|
|
|
if (opt_break_data) |break_data| {
|
|
// This is a comptime break which we now change to a runtime break
|
|
// since it crosses a runtime branch.
|
|
// It may pass through our currently being analyzed block_inline or it
|
|
// may point directly to it. In the latter case, this modifies the
|
|
// block that we are about to look up in the post_hoc_blocks map below.
|
|
try sema.addRuntimeBreak(block, break_data);
|
|
} else {
|
|
// Here the comptime control flow ends with noreturn; however
|
|
// we have runtime control flow continuing after this block.
|
|
// This branch is therefore handled by the `i += 1; continue;`
|
|
// logic below.
|
|
}
|
|
|
|
try labeled_block.block.instructions.appendSlice(gpa, block.instructions.items[block_index..]);
|
|
block.instructions.items.len = block_index;
|
|
|
|
const block_result = try sema.analyzeBlockBody(block, inst_data.src(), &labeled_block.block, &labeled_block.label.merges);
|
|
{
|
|
// Destroy the ad-hoc block entry so that it does not interfere with
|
|
// the next iteration of comptime control flow, if any.
|
|
labeled_block.destroy(gpa);
|
|
assert(sema.post_hoc_blocks.remove(new_block_inst));
|
|
}
|
|
map.putAssumeCapacity(inst, block_result);
|
|
i += 1;
|
|
continue;
|
|
}
|
|
|
|
const break_data = opt_break_data orelse break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.condbr => blk: {
|
|
if (!block.is_comptime) break sema.zirCondbr(block, inst);
|
|
// Same as condbr_inline. TODO https://github.com/ziglang/zig/issues/8220
|
|
const inst_data = datas[inst].pl_node;
|
|
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
|
|
const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len];
|
|
const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
|
|
const cond = sema.resolveInstConst(block, cond_src, extra.data.condition, "condition in comptime branch must be comptime-known") catch |err| {
|
|
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
|
|
return err;
|
|
};
|
|
const inline_body = if (cond.val.toBool()) then_body else else_body;
|
|
|
|
try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src);
|
|
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
|
|
break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.condbr_inline => blk: {
|
|
const inst_data = datas[inst].pl_node;
|
|
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
|
|
const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len];
|
|
const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
|
|
const cond = sema.resolveInstConst(block, cond_src, extra.data.condition, "condition in comptime branch must be comptime-known") catch |err| {
|
|
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
|
|
return err;
|
|
};
|
|
const inline_body = if (cond.val.toBool()) then_body else else_body;
|
|
|
|
try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src);
|
|
const old_runtime_index = block.runtime_index;
|
|
defer block.runtime_index = old_runtime_index;
|
|
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
|
|
break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.@"try" => blk: {
|
|
if (!block.is_comptime) break :blk try sema.zirTry(block, inst);
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
|
|
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
const err_union = try sema.resolveInst(extra.data.operand);
|
|
const err_union_ty = sema.typeOf(err_union);
|
|
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
|
|
err_union_ty.fmt(mod),
|
|
});
|
|
}
|
|
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
|
|
assert(is_non_err != .none);
|
|
const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| {
|
|
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
|
|
return err;
|
|
};
|
|
if (is_non_err_val.toBool()) {
|
|
break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false);
|
|
}
|
|
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
|
|
break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.try_ptr => blk: {
|
|
if (!block.is_comptime) break :blk try sema.zirTryPtr(block, inst);
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
|
|
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
const operand = try sema.resolveInst(extra.data.operand);
|
|
const err_union = try sema.analyzeLoad(block, src, operand, operand_src);
|
|
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
|
|
assert(is_non_err != .none);
|
|
const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| {
|
|
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
|
|
return err;
|
|
};
|
|
if (is_non_err_val.toBool()) {
|
|
break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false);
|
|
}
|
|
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
|
|
break always_noreturn;
|
|
if (inst == break_data.block_inst) {
|
|
break :blk try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
break break_data.inst;
|
|
}
|
|
},
|
|
.@"defer" => blk: {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].@"defer";
|
|
const defer_body = sema.code.extra[inst_data.index..][0..inst_data.len];
|
|
const break_inst = sema.analyzeBodyInner(block, defer_body) catch |err| switch (err) {
|
|
error.ComptimeBreak => sema.comptime_break_inst,
|
|
else => |e| return e,
|
|
};
|
|
if (break_inst != defer_body[defer_body.len - 1]) break always_noreturn;
|
|
break :blk Air.Inst.Ref.void_value;
|
|
},
|
|
.defer_err_code => blk: {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].defer_err_code;
|
|
const extra = sema.code.extraData(Zir.Inst.DeferErrCode, inst_data.payload_index).data;
|
|
const defer_body = sema.code.extra[extra.index..][0..extra.len];
|
|
const err_code = try sema.resolveInst(inst_data.err_code);
|
|
map.putAssumeCapacity(extra.remapped_err_code, err_code);
|
|
const break_inst = sema.analyzeBodyInner(block, defer_body) catch |err| switch (err) {
|
|
error.ComptimeBreak => sema.comptime_break_inst,
|
|
else => |e| return e,
|
|
};
|
|
if (break_inst != defer_body[defer_body.len - 1]) break always_noreturn;
|
|
break :blk Air.Inst.Ref.void_value;
|
|
},
|
|
};
|
|
if (sema.isNoReturn(air_inst)) {
|
|
// We're going to assume that the body itself is noreturn, so let's ensure that now
|
|
assert(block.instructions.items.len > 0);
|
|
assert(sema.isNoReturn(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1])));
|
|
break always_noreturn;
|
|
}
|
|
map.putAssumeCapacity(inst, air_inst);
|
|
i += 1;
|
|
};
|
|
|
|
// balance out dbg_block_begins in case of early noreturn
|
|
const noreturn_inst = block.instructions.popOrNull();
|
|
while (dbg_block_begins > 0) {
|
|
dbg_block_begins -= 1;
|
|
if (block.is_comptime or mod.comp.bin_file.options.strip) continue;
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = .dbg_block_end,
|
|
.data = undefined,
|
|
});
|
|
}
|
|
if (noreturn_inst) |some| try block.instructions.append(sema.gpa, some);
|
|
|
|
if (!wip_captures.finalized) {
|
|
// We've updated the capture scope due to a `repeat` instruction where
|
|
// the body had a capture; finalize our child scope and reset
|
|
try wip_captures.finalize();
|
|
block.wip_capture_scope = parent_capture_scope;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
pub fn resolveInstAllowNone(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref {
|
|
if (zir_ref == .none) {
|
|
return .none;
|
|
} else {
|
|
return resolveInst(sema, zir_ref);
|
|
}
|
|
}
|
|
|
|
pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref {
|
|
assert(zir_ref != .none);
|
|
const i = @intFromEnum(zir_ref);
|
|
// First section of indexes correspond to a set number of constant values.
|
|
// We intentionally map the same indexes to the same values between ZIR and AIR.
|
|
if (i < InternPool.static_len) return @as(Air.Inst.Ref, @enumFromInt(i));
|
|
// The last section of indexes refers to the map of ZIR => AIR.
|
|
const inst = sema.inst_map.get(i - InternPool.static_len).?;
|
|
if (inst == .generic_poison) return error.GenericPoison;
|
|
return inst;
|
|
}
|
|
|
|
fn resolveConstBool(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
reason: []const u8,
|
|
) !bool {
|
|
const air_inst = try sema.resolveInst(zir_ref);
|
|
const wanted_type = Type.bool;
|
|
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
|
|
const val = try sema.resolveConstValue(block, src, coerced_inst, reason);
|
|
return val.toBool();
|
|
}
|
|
|
|
pub fn resolveConstString(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
reason: []const u8,
|
|
) ![]u8 {
|
|
const air_inst = try sema.resolveInst(zir_ref);
|
|
const wanted_type = Type.slice_const_u8;
|
|
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
|
|
const val = try sema.resolveConstValue(block, src, coerced_inst, reason);
|
|
return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod);
|
|
}
|
|
|
|
pub fn resolveConstStringIntern(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
reason: []const u8,
|
|
) !InternPool.NullTerminatedString {
|
|
const air_inst = try sema.resolveInst(zir_ref);
|
|
const wanted_type = Type.slice_const_u8;
|
|
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
|
|
const val = try sema.resolveConstValue(block, src, coerced_inst, reason);
|
|
return val.toIpString(wanted_type, sema.mod);
|
|
}
|
|
|
|
pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type {
|
|
const air_inst = try sema.resolveInst(zir_ref);
|
|
assert(air_inst != .var_args_param_type);
|
|
const ty = try sema.analyzeAsType(block, src, air_inst);
|
|
if (ty.isGenericPoison()) return error.GenericPoison;
|
|
return ty;
|
|
}
|
|
|
|
fn resolveDestType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
strat: enum { remove_eu_opt, remove_eu, remove_opt },
|
|
builtin_name: []const u8,
|
|
) !Type {
|
|
const mod = sema.mod;
|
|
const remove_eu = switch (strat) {
|
|
.remove_eu_opt, .remove_eu => true,
|
|
.remove_opt => false,
|
|
};
|
|
const remove_opt = switch (strat) {
|
|
.remove_eu_opt, .remove_opt => true,
|
|
.remove_eu => false,
|
|
};
|
|
|
|
const raw_ty = sema.resolveType(block, src, zir_ref) catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
// Cast builtins use their result type as the destination type, but
|
|
// it could be an anytype argument, which we can't catch in AstGen.
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "{s} must have a known result type", .{builtin_name});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "result type is unknown due to anytype parameter", .{});
|
|
try sema.errNote(block, src, msg, "use @as to provide explicit result type", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (remove_eu and raw_ty.zigTypeTag(mod) == .ErrorUnion) {
|
|
const eu_child = raw_ty.errorUnionPayload(mod);
|
|
if (remove_opt and eu_child.zigTypeTag(mod) == .Optional) {
|
|
return eu_child.childType(mod);
|
|
}
|
|
return eu_child;
|
|
}
|
|
if (remove_opt and raw_ty.zigTypeTag(mod) == .Optional) {
|
|
return raw_ty.childType(mod);
|
|
}
|
|
return raw_ty;
|
|
}
|
|
|
|
fn analyzeAsType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
air_inst: Air.Inst.Ref,
|
|
) !Type {
|
|
const wanted_type = Type.type;
|
|
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
|
|
const val = try sema.resolveConstValue(block, src, coerced_inst, "types must be comptime-known");
|
|
return val.toType();
|
|
}
|
|
|
|
pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
if (!mod.backendSupportsFeature(.error_return_trace)) return;
|
|
|
|
assert(!block.is_comptime);
|
|
var err_trace_block = block.makeSubBlock();
|
|
defer err_trace_block.instructions.deinit(gpa);
|
|
|
|
const src: LazySrcLoc = .unneeded;
|
|
|
|
// var addrs: [err_return_trace_addr_count]usize = undefined;
|
|
const err_return_trace_addr_count = 32;
|
|
const addr_arr_ty = try mod.arrayType(.{
|
|
.len = err_return_trace_addr_count,
|
|
.child = .usize_type,
|
|
});
|
|
const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty));
|
|
|
|
// var st: StackTrace = undefined;
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty));
|
|
|
|
// st.instruction_addresses = &addrs;
|
|
const instruction_addresses_field_name = try ip.getOrPutString(gpa, "instruction_addresses");
|
|
const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, instruction_addresses_field_name, src, true);
|
|
try sema.storePtr2(&err_trace_block, src, addr_field_ptr, src, addrs_ptr, src, .store);
|
|
|
|
// st.index = 0;
|
|
const index_field_name = try ip.getOrPutString(gpa, "index");
|
|
const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, index_field_name, src, true);
|
|
try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store);
|
|
|
|
// @errorReturnTrace() = &st;
|
|
_ = try err_trace_block.addUnOp(.set_err_return_trace, st_ptr);
|
|
|
|
try block.instructions.insertSlice(gpa, last_arg_index, err_trace_block.instructions.items);
|
|
}
|
|
|
|
/// May return Value Tags: `variable`, `undef`.
|
|
/// See `resolveConstValue` for an alternative.
|
|
/// Value Tag `generic_poison` causes `error.GenericPoison` to be returned.
|
|
fn resolveValue(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
air_ref: Air.Inst.Ref,
|
|
reason: []const u8,
|
|
) CompileError!Value {
|
|
if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| {
|
|
if (val.isGenericPoison()) return error.GenericPoison;
|
|
return val;
|
|
}
|
|
return sema.failWithNeededComptime(block, src, reason);
|
|
}
|
|
|
|
/// Value Tag `variable` will cause a compile error.
|
|
/// Value Tag `undef` may be returned.
|
|
fn resolveConstMaybeUndefVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
inst: Air.Inst.Ref,
|
|
reason: []const u8,
|
|
) CompileError!Value {
|
|
if (try sema.resolveMaybeUndefValAllowVariables(inst)) |val| {
|
|
if (val.isGenericPoison()) return error.GenericPoison;
|
|
if (sema.mod.intern_pool.isVariable(val.toIntern()))
|
|
return sema.failWithNeededComptime(block, src, reason);
|
|
return val;
|
|
}
|
|
return sema.failWithNeededComptime(block, src, reason);
|
|
}
|
|
|
|
/// Will not return Value Tags: `variable`, `undef`. Instead they will emit compile errors.
|
|
/// See `resolveValue` for an alternative.
|
|
fn resolveConstValue(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
air_ref: Air.Inst.Ref,
|
|
reason: []const u8,
|
|
) CompileError!Value {
|
|
if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| {
|
|
if (val.isGenericPoison()) return error.GenericPoison;
|
|
if (val.isUndef(sema.mod)) return sema.failWithUseOfUndef(block, src);
|
|
if (sema.mod.intern_pool.isVariable(val.toIntern()))
|
|
return sema.failWithNeededComptime(block, src, reason);
|
|
return val;
|
|
}
|
|
return sema.failWithNeededComptime(block, src, reason);
|
|
}
|
|
|
|
/// Will not return Value Tags: `variable`, `undef`. Instead they will emit compile errors.
|
|
/// Lazy values are recursively resolved.
|
|
fn resolveConstLazyValue(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
air_ref: Air.Inst.Ref,
|
|
reason: []const u8,
|
|
) CompileError!Value {
|
|
return sema.resolveLazyValue(try sema.resolveConstValue(block, src, air_ref, reason));
|
|
}
|
|
|
|
/// Value Tag `variable` causes this function to return `null`.
|
|
/// Value Tag `undef` causes this function to return a compile error.
|
|
fn resolveDefinedValue(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
air_ref: Air.Inst.Ref,
|
|
) CompileError!?Value {
|
|
const mod = sema.mod;
|
|
if (try sema.resolveMaybeUndefVal(air_ref)) |val| {
|
|
if (val.isUndef(mod)) {
|
|
if (block.is_typeof) return null;
|
|
return sema.failWithUseOfUndef(block, src);
|
|
}
|
|
return val;
|
|
}
|
|
return null;
|
|
}
|
|
|
|
/// Value Tag `variable` causes this function to return `null`.
|
|
/// Value Tag `undef` causes this function to return the Value.
|
|
/// Value Tag `generic_poison` causes `error.GenericPoison` to be returned.
|
|
fn resolveMaybeUndefVal(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
|
|
const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null;
|
|
if (val.isGenericPoison()) return error.GenericPoison;
|
|
if (val.ip_index != .none and sema.mod.intern_pool.isVariable(val.toIntern())) return null;
|
|
return val;
|
|
}
|
|
|
|
/// Value Tag `variable` causes this function to return `null`.
|
|
/// Value Tag `undef` causes this function to return the Value.
|
|
/// Value Tag `generic_poison` causes `error.GenericPoison` to be returned.
|
|
/// Lazy values are recursively resolved.
|
|
fn resolveMaybeUndefLazyVal(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
|
|
return try sema.resolveLazyValue((try sema.resolveMaybeUndefVal(inst)) orelse return null);
|
|
}
|
|
|
|
/// Value Tag `variable` results in `null`.
|
|
/// Value Tag `undef` results in the Value.
|
|
/// Value Tag `generic_poison` causes `error.GenericPoison` to be returned.
|
|
/// Value Tag `decl_ref` and `decl_ref_mut` or any nested such value results in `null`.
|
|
/// Lazy values are recursively resolved.
|
|
fn resolveMaybeUndefValIntable(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
|
|
const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null;
|
|
if (val.isGenericPoison()) return error.GenericPoison;
|
|
if (val.ip_index == .none) return val;
|
|
if (sema.mod.intern_pool.isVariable(val.toIntern())) return null;
|
|
if (sema.mod.intern_pool.getBackingAddrTag(val.toIntern())) |addr| switch (addr) {
|
|
.decl, .mut_decl, .comptime_field => return null,
|
|
.int => {},
|
|
.eu_payload, .opt_payload, .elem, .field => unreachable,
|
|
};
|
|
return try sema.resolveLazyValue(val);
|
|
}
|
|
|
|
/// Returns all Value tags including `variable` and `undef`.
|
|
fn resolveMaybeUndefValAllowVariables(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
|
|
var make_runtime = false;
|
|
if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(inst, &make_runtime)) |val| {
|
|
if (make_runtime) return null;
|
|
return val;
|
|
}
|
|
return null;
|
|
}
|
|
|
|
/// Returns all Value tags including `variable`, `undef` and `runtime_value`.
|
|
fn resolveMaybeUndefValAllowVariablesMaybeRuntime(
|
|
sema: *Sema,
|
|
inst: Air.Inst.Ref,
|
|
make_runtime: *bool,
|
|
) CompileError!?Value {
|
|
assert(inst != .none);
|
|
// First section of indexes correspond to a set number of constant values.
|
|
if (@intFromEnum(inst) < InternPool.static_len) {
|
|
return @as(InternPool.Index, @enumFromInt(@intFromEnum(inst))).toValue();
|
|
}
|
|
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| {
|
|
if (Air.refToInterned(inst)) |ip_index| {
|
|
const val = ip_index.toValue();
|
|
if (val.getVariable(sema.mod) != null) return val;
|
|
}
|
|
return opv;
|
|
}
|
|
const ip_index = Air.refToInterned(inst) orelse {
|
|
switch (air_tags[Air.refToIndex(inst).?]) {
|
|
.inferred_alloc => unreachable,
|
|
.inferred_alloc_comptime => unreachable,
|
|
else => return null,
|
|
}
|
|
};
|
|
const val = ip_index.toValue();
|
|
if (val.isRuntimeValue(sema.mod)) make_runtime.* = true;
|
|
if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true;
|
|
return val;
|
|
}
|
|
|
|
fn failWithNeededComptime(sema: *Sema, block: *Block, src: LazySrcLoc, reason: []const u8) CompileError {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "unable to resolve comptime value", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.errNote(block, src, msg, "{s}", .{reason});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn failWithUseOfUndef(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError {
|
|
return sema.fail(block, src, "use of undefined value here causes undefined behavior", .{});
|
|
}
|
|
|
|
fn failWithDivideByZero(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError {
|
|
return sema.fail(block, src, "division by zero here causes undefined behavior", .{});
|
|
}
|
|
|
|
fn failWithModRemNegative(sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type) CompileError {
|
|
return sema.fail(block, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{
|
|
lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, optional_ty: Type) CompileError {
|
|
return sema.fail(block, src, "expected optional type, found '{}'", .{optional_ty.fmt(sema.mod)});
|
|
}
|
|
|
|
fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError {
|
|
const mod = sema.mod;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "type '{}' does not support array initialization syntax", .{
|
|
ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
if (ty.isSlice(mod)) {
|
|
try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn failWithStructInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError {
|
|
return sema.fail(block, src, "type '{}' does not support struct initialization syntax", .{
|
|
ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
fn failWithErrorSetCodeMissing(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
dest_err_set_ty: Type,
|
|
src_err_set_ty: Type,
|
|
) CompileError {
|
|
return sema.fail(block, src, "expected type '{}', found type '{}'", .{
|
|
dest_err_set_ty.fmt(sema.mod), src_err_set_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError {
|
|
const mod = sema.mod;
|
|
if (int_ty.zigTypeTag(mod) == .Vector) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "overflow of vector type '{}' with value '{}'", .{
|
|
int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "when computing vector element at index '{d}'", .{vector_index});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
return sema.fail(block, src, "overflow of integer type '{}' with value '{}'", .{
|
|
int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod),
|
|
});
|
|
}
|
|
|
|
fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazySrcLoc, container_ty: Type, field_index: usize) CompileError {
|
|
const mod = sema.mod;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, init_src, "value stored in comptime field does not match the default value of the field", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const struct_ty = mod.typeToStruct(container_ty) orelse break :msg msg;
|
|
const default_value_src = mod.fieldSrcLoc(struct_ty.owner_decl, .{
|
|
.index = field_index,
|
|
.range = .value,
|
|
});
|
|
try mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "async has not been implemented in the self-hosted compiler yet", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn failWithInvalidFieldAccess(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
object_ty: Type,
|
|
field_name: InternPool.NullTerminatedString,
|
|
) CompileError {
|
|
const mod = sema.mod;
|
|
const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty;
|
|
|
|
if (inner_ty.zigTypeTag(mod) == .Optional) opt: {
|
|
const child_ty = inner_ty.optionalChild(mod);
|
|
if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "consider using '.?', 'orelse', or 'if'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
} else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: {
|
|
const child_ty = inner_ty.errorUnionPayload(mod);
|
|
if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
|
|
}
|
|
|
|
fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: InternPool.NullTerminatedString) bool {
|
|
const ip = &mod.intern_pool;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Array => return ip.stringEqlSlice(field_name, "len"),
|
|
.Pointer => {
|
|
const ptr_info = ty.ptrInfo(mod);
|
|
if (ptr_info.flags.size == .Slice) {
|
|
return ip.stringEqlSlice(field_name, "ptr") or ip.stringEqlSlice(field_name, "len");
|
|
} else if (ptr_info.child.toType().zigTypeTag(mod) == .Array) {
|
|
return ip.stringEqlSlice(field_name, "len");
|
|
} else return false;
|
|
},
|
|
.Type, .Struct, .Union => return true,
|
|
else => return false,
|
|
}
|
|
}
|
|
|
|
/// We don't return a pointer to the new error note because the pointer
|
|
/// becomes invalid when you add another one.
|
|
fn errNote(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
parent: *Module.ErrorMsg,
|
|
comptime format: []const u8,
|
|
args: anytype,
|
|
) error{OutOfMemory}!void {
|
|
const mod = sema.mod;
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
return mod.errNoteNonLazy(src.toSrcLoc(src_decl, mod), parent, format, args);
|
|
}
|
|
|
|
fn addFieldErrNote(
|
|
sema: *Sema,
|
|
container_ty: Type,
|
|
field_index: usize,
|
|
parent: *Module.ErrorMsg,
|
|
comptime format: []const u8,
|
|
args: anytype,
|
|
) !void {
|
|
@setCold(true);
|
|
const mod = sema.mod;
|
|
const decl_index = container_ty.getOwnerDecl(mod);
|
|
const decl = mod.declPtr(decl_index);
|
|
|
|
const field_src = blk: {
|
|
const tree = decl.getFileScope(mod).getTree(sema.gpa) catch |err| {
|
|
log.err("unable to load AST to report compile error: {s}", .{@errorName(err)});
|
|
break :blk decl.srcLoc(mod);
|
|
};
|
|
|
|
const container_node = decl.relativeToNodeIndex(0);
|
|
const node_tags = tree.nodes.items(.tag);
|
|
var buf: [2]std.zig.Ast.Node.Index = undefined;
|
|
const container_decl = tree.fullContainerDecl(&buf, container_node) orelse break :blk decl.srcLoc(mod);
|
|
|
|
var it_index: usize = 0;
|
|
for (container_decl.ast.members) |member_node| {
|
|
switch (node_tags[member_node]) {
|
|
.container_field_init,
|
|
.container_field_align,
|
|
.container_field,
|
|
=> {
|
|
if (it_index == field_index) {
|
|
break :blk decl.nodeOffsetSrcLoc(decl.nodeIndexToRelative(member_node), mod);
|
|
}
|
|
it_index += 1;
|
|
},
|
|
else => continue,
|
|
}
|
|
}
|
|
unreachable;
|
|
};
|
|
try mod.errNoteNonLazy(field_src, parent, format, args);
|
|
}
|
|
|
|
fn errMsg(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
comptime format: []const u8,
|
|
args: anytype,
|
|
) error{OutOfMemory}!*Module.ErrorMsg {
|
|
const mod = sema.mod;
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(src_decl, mod), format, args);
|
|
}
|
|
|
|
pub fn fail(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
comptime format: []const u8,
|
|
args: anytype,
|
|
) CompileError {
|
|
const err_msg = try sema.errMsg(block, src, format, args);
|
|
return sema.failWithOwnedErrorMsg(err_msg);
|
|
}
|
|
|
|
fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
|
|
@setCold(true);
|
|
const gpa = sema.gpa;
|
|
const mod = sema.mod;
|
|
|
|
if (crash_report.is_enabled and mod.comp.debug_compile_errors) {
|
|
if (err_msg.src_loc.lazy == .unneeded) return error.NeededSourceLocation;
|
|
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
|
|
wip_errors.init(gpa) catch unreachable;
|
|
Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*) catch unreachable;
|
|
std.debug.print("compile error during Sema:\n", .{});
|
|
var error_bundle = wip_errors.toOwnedBundle("") catch unreachable;
|
|
error_bundle.renderToStdErr(.{ .ttyconf = .no_color });
|
|
crash_report.compilerPanic("unexpected compile error occurred", null, null);
|
|
}
|
|
|
|
ref: {
|
|
errdefer err_msg.destroy(gpa);
|
|
if (err_msg.src_loc.lazy == .unneeded) {
|
|
return error.NeededSourceLocation;
|
|
}
|
|
try mod.failed_decls.ensureUnusedCapacity(gpa, 1);
|
|
try mod.failed_files.ensureUnusedCapacity(gpa, 1);
|
|
|
|
const max_references = blk: {
|
|
if (mod.comp.reference_trace) |num| break :blk num;
|
|
// Do not add multiple traces without explicit request.
|
|
if (mod.failed_decls.count() != 0) break :ref;
|
|
break :blk default_reference_trace_len;
|
|
};
|
|
|
|
var referenced_by = if (sema.func_index != .none)
|
|
mod.funcOwnerDeclIndex(sema.func_index)
|
|
else
|
|
sema.owner_decl_index;
|
|
var reference_stack = std.ArrayList(Module.ErrorMsg.Trace).init(gpa);
|
|
defer reference_stack.deinit();
|
|
|
|
// Avoid infinite loops.
|
|
var seen = std.AutoHashMap(Decl.Index, void).init(gpa);
|
|
defer seen.deinit();
|
|
|
|
var cur_reference_trace: u32 = 0;
|
|
while (sema.mod.reference_table.get(referenced_by)) |ref| : (cur_reference_trace += 1) {
|
|
const gop = try seen.getOrPut(ref.referencer);
|
|
if (gop.found_existing) break;
|
|
if (cur_reference_trace < max_references) {
|
|
const decl = sema.mod.declPtr(ref.referencer);
|
|
try reference_stack.append(.{
|
|
.decl = decl.name.toOptional(),
|
|
.src_loc = ref.src.toSrcLoc(decl, mod),
|
|
});
|
|
}
|
|
referenced_by = ref.referencer;
|
|
}
|
|
if (sema.mod.comp.reference_trace == null and cur_reference_trace > 0) {
|
|
try reference_stack.append(.{
|
|
.decl = .none,
|
|
.src_loc = undefined,
|
|
.hidden = 0,
|
|
});
|
|
} else if (cur_reference_trace > max_references) {
|
|
try reference_stack.append(.{
|
|
.decl = undefined,
|
|
.src_loc = undefined,
|
|
.hidden = cur_reference_trace - max_references,
|
|
});
|
|
}
|
|
err_msg.reference_trace = try reference_stack.toOwnedSlice();
|
|
}
|
|
const ip = &mod.intern_pool;
|
|
if (sema.owner_func_index != .none) {
|
|
ip.funcAnalysis(sema.owner_func_index).state = .sema_failure;
|
|
} else {
|
|
sema.owner_decl.analysis = .sema_failure;
|
|
sema.owner_decl.generation = mod.generation;
|
|
}
|
|
if (sema.func_index != .none) {
|
|
ip.funcAnalysis(sema.func_index).state = .sema_failure;
|
|
}
|
|
const gop = mod.failed_decls.getOrPutAssumeCapacity(sema.owner_decl_index);
|
|
if (gop.found_existing) {
|
|
// If there are multiple errors for the same Decl, prefer the first one added.
|
|
sema.err = null;
|
|
err_msg.destroy(gpa);
|
|
} else {
|
|
sema.err = err_msg;
|
|
gop.value_ptr.* = err_msg;
|
|
}
|
|
return error.AnalysisFail;
|
|
}
|
|
|
|
/// Given an ErrorMsg, modify its message and source location to the given values, turning the
|
|
/// original message into a note. Notes on the original message are preserved as further notes.
|
|
/// Reference trace is preserved.
|
|
fn reparentOwnedErrorMsg(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
msg: *Module.ErrorMsg,
|
|
comptime format: []const u8,
|
|
args: anytype,
|
|
) !void {
|
|
const mod = sema.mod;
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
const resolved_src = src.toSrcLoc(src_decl, mod);
|
|
const msg_str = try std.fmt.allocPrint(mod.gpa, format, args);
|
|
|
|
const orig_notes = msg.notes.len;
|
|
msg.notes = try sema.gpa.realloc(msg.notes, orig_notes + 1);
|
|
std.mem.copyBackwards(Module.ErrorMsg, msg.notes[1..], msg.notes[0..orig_notes]);
|
|
msg.notes[0] = .{
|
|
.src_loc = msg.src_loc,
|
|
.msg = msg.msg,
|
|
};
|
|
|
|
msg.src_loc = resolved_src;
|
|
msg.msg = msg_str;
|
|
}
|
|
|
|
const align_ty = Type.u29;
|
|
|
|
fn analyzeAsAlign(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
air_ref: Air.Inst.Ref,
|
|
) !Alignment {
|
|
const alignment_big = try sema.analyzeAsInt(block, src, air_ref, align_ty, "alignment must be comptime-known");
|
|
const alignment = @as(u32, @intCast(alignment_big)); // We coerce to u29 in the prev line.
|
|
try sema.validateAlign(block, src, alignment);
|
|
return Alignment.fromNonzeroByteUnits(alignment);
|
|
}
|
|
|
|
fn validateAlign(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
alignment: u32,
|
|
) !void {
|
|
if (alignment == 0) return sema.fail(block, src, "alignment must be >= 1", .{});
|
|
if (!std.math.isPowerOfTwo(alignment)) {
|
|
return sema.fail(block, src, "alignment value '{d}' is not a power of two", .{
|
|
alignment,
|
|
});
|
|
}
|
|
}
|
|
|
|
pub fn resolveAlign(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
) !Alignment {
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
return sema.analyzeAsAlign(block, src, air_ref);
|
|
}
|
|
|
|
fn resolveInt(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
dest_ty: Type,
|
|
reason: []const u8,
|
|
) !u64 {
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
return sema.analyzeAsInt(block, src, air_ref, dest_ty, reason);
|
|
}
|
|
|
|
fn analyzeAsInt(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
air_ref: Air.Inst.Ref,
|
|
dest_ty: Type,
|
|
reason: []const u8,
|
|
) !u64 {
|
|
const mod = sema.mod;
|
|
const coerced = try sema.coerce(block, dest_ty, air_ref, src);
|
|
const val = try sema.resolveConstValue(block, src, coerced, reason);
|
|
return (try val.getUnsignedIntAdvanced(mod, sema)).?;
|
|
}
|
|
|
|
// Returns a compile error if the value has tag `variable`. See `resolveInstValue` for
|
|
// a function that does not.
|
|
pub fn resolveInstConst(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
reason: []const u8,
|
|
) CompileError!TypedValue {
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
const val = try sema.resolveConstValue(block, src, air_ref, reason);
|
|
return TypedValue{
|
|
.ty = sema.typeOf(air_ref),
|
|
.val = val,
|
|
};
|
|
}
|
|
|
|
// Value Tag may be `undef` or `variable`.
|
|
// See `resolveInstConst` for an alternative.
|
|
pub fn resolveInstValue(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
reason: []const u8,
|
|
) CompileError!TypedValue {
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
const val = try sema.resolveValue(block, src, air_ref, reason);
|
|
return TypedValue{
|
|
.ty = sema.typeOf(air_ref),
|
|
.val = val,
|
|
};
|
|
}
|
|
|
|
fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const pointee_ty = try sema.resolveType(block, src, extra.lhs);
|
|
const ptr = try sema.resolveInst(extra.rhs);
|
|
const target = mod.getTarget();
|
|
const addr_space = target_util.defaultAddressSpace(target, .local);
|
|
|
|
if (Air.refToIndex(ptr)) |ptr_inst| {
|
|
switch (sema.air_instructions.items(.tag)[ptr_inst]) {
|
|
.inferred_alloc => {
|
|
const ia1 = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc;
|
|
const ia2 = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?;
|
|
// Add the stored instruction to the set we will use to resolve peer types
|
|
// for the inferred allocation.
|
|
// This instruction will not make it to codegen; it is only to participate
|
|
// in the `stored_inst_list` of the `inferred_alloc`.
|
|
var trash_block = block.makeSubBlock();
|
|
defer trash_block.instructions.deinit(sema.gpa);
|
|
const operand = try trash_block.addBitCast(pointee_ty, .void_value);
|
|
|
|
const ptr_ty = try mod.ptrType(.{
|
|
.child = pointee_ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = ia1.alignment,
|
|
.address_space = addr_space,
|
|
},
|
|
});
|
|
const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
|
|
|
|
try ia2.prongs.append(sema.arena, .{
|
|
.stored_inst = operand,
|
|
.placeholder = Air.refToIndex(bitcasted_ptr).?,
|
|
});
|
|
|
|
return bitcasted_ptr;
|
|
},
|
|
.inferred_alloc_comptime => {
|
|
const alignment = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime.alignment;
|
|
// There will be only one coerce_result_ptr because we are running at comptime.
|
|
// The alloc will turn into a Decl.
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
const decl_index = try anon_decl.finish(
|
|
pointee_ty,
|
|
(try mod.intern(.{ .undef = pointee_ty.toIntern() })).toValue(),
|
|
alignment,
|
|
);
|
|
sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime.decl_index = decl_index;
|
|
if (alignment != .none) {
|
|
try sema.resolveTypeLayout(pointee_ty);
|
|
}
|
|
const ptr_ty = try mod.ptrType(.{
|
|
.child = pointee_ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = alignment,
|
|
.address_space = addr_space,
|
|
},
|
|
});
|
|
try sema.maybeQueueFuncBodyAnalysis(decl_index);
|
|
try sema.comptime_mutable_decls.append(decl_index);
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_ty.toIntern(),
|
|
.addr = .{ .mut_decl = .{
|
|
.decl = decl_index,
|
|
.runtime_index = block.runtime_index,
|
|
} },
|
|
} })));
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
// Make a dummy store through the pointer to test the coercion.
|
|
// We will then use the generated instructions to decide what
|
|
// kind of transformations to make on the result pointer.
|
|
var trash_block = block.makeSubBlock();
|
|
trash_block.is_comptime = false;
|
|
defer trash_block.instructions.deinit(sema.gpa);
|
|
|
|
const dummy_ptr = try trash_block.addTy(.alloc, sema.typeOf(ptr));
|
|
const dummy_operand = try trash_block.addBitCast(pointee_ty, .void_value);
|
|
return sema.coerceResultPtr(block, src, ptr, dummy_ptr, dummy_operand, &trash_block);
|
|
}
|
|
|
|
fn coerceResultPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr: Air.Inst.Ref,
|
|
dummy_ptr: Air.Inst.Ref,
|
|
dummy_operand: Air.Inst.Ref,
|
|
trash_block: *Block,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const target = sema.mod.getTarget();
|
|
const addr_space = target_util.defaultAddressSpace(target, .local);
|
|
const pointee_ty = sema.typeOf(dummy_operand);
|
|
const prev_trash_len = trash_block.instructions.items.len;
|
|
|
|
try sema.storePtr2(trash_block, src, dummy_ptr, src, dummy_operand, src, .bitcast);
|
|
|
|
{
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
|
|
//std.debug.print("dummy storePtr instructions:\n", .{});
|
|
//for (trash_block.instructions.items) |item| {
|
|
// std.debug.print(" {s}\n", .{@tagName(air_tags[item])});
|
|
//}
|
|
|
|
// The last one is always `store`.
|
|
const trash_inst = trash_block.instructions.items[trash_block.instructions.items.len - 1];
|
|
if (air_tags[trash_inst] != .store and air_tags[trash_inst] != .store_safe) {
|
|
// no store instruction is generated for zero sized types
|
|
assert((try sema.typeHasOnePossibleValue(pointee_ty)) != null);
|
|
} else {
|
|
trash_block.instructions.items.len -= 1;
|
|
assert(trash_inst == sema.air_instructions.len - 1);
|
|
sema.air_instructions.len -= 1;
|
|
}
|
|
}
|
|
|
|
const ptr_ty = try mod.ptrType(.{
|
|
.child = pointee_ty.toIntern(),
|
|
.flags = .{ .address_space = addr_space },
|
|
});
|
|
|
|
var new_ptr = ptr;
|
|
|
|
while (true) {
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
|
|
if (trash_block.instructions.items.len == prev_trash_len) {
|
|
if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| {
|
|
return Air.internedToRef(ptr_val.toIntern());
|
|
}
|
|
if (pointee_ty.eql(Type.null, sema.mod)) {
|
|
const null_inst = Air.internedToRef(Value.null.toIntern());
|
|
_ = try block.addBinOp(.store, new_ptr, null_inst);
|
|
return Air.Inst.Ref.void_value;
|
|
}
|
|
return sema.bitCast(block, ptr_ty, new_ptr, src, null);
|
|
}
|
|
|
|
const trash_inst = trash_block.instructions.pop();
|
|
|
|
switch (air_tags[trash_inst]) {
|
|
// Array coerced to Vector where element size is not equal but coercible.
|
|
.aggregate_init => {
|
|
const ty_pl = air_datas[trash_inst].ty_pl;
|
|
const ptr_operand_ty = try mod.ptrType(.{
|
|
.child = (try sema.analyzeAsType(block, src, ty_pl.ty)).toIntern(),
|
|
.flags = .{ .address_space = addr_space },
|
|
});
|
|
|
|
if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| {
|
|
return Air.internedToRef(ptr_val.toIntern());
|
|
} else {
|
|
return sema.bitCast(block, ptr_operand_ty, new_ptr, src, null);
|
|
}
|
|
},
|
|
.bitcast => {
|
|
const ty_op = air_datas[trash_inst].ty_op;
|
|
const operand_ty = sema.typeOf(ty_op.operand);
|
|
const ptr_operand_ty = try mod.ptrType(.{
|
|
.child = operand_ty.toIntern(),
|
|
.flags = .{ .address_space = addr_space },
|
|
});
|
|
if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| {
|
|
new_ptr = Air.internedToRef((try mod.getCoerced(ptr_val, ptr_operand_ty)).toIntern());
|
|
} else {
|
|
new_ptr = try sema.bitCast(block, ptr_operand_ty, new_ptr, src, null);
|
|
}
|
|
},
|
|
.wrap_optional => {
|
|
new_ptr = try sema.analyzeOptionalPayloadPtr(block, src, new_ptr, false, true);
|
|
},
|
|
.wrap_errunion_err => {
|
|
return sema.fail(block, src, "TODO coerce_result_ptr wrap_errunion_err", .{});
|
|
},
|
|
.wrap_errunion_payload => {
|
|
new_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, new_ptr, false, true);
|
|
},
|
|
.array_to_slice => {
|
|
return sema.fail(block, src, "TODO coerce_result_ptr array_to_slice", .{});
|
|
},
|
|
.get_union_tag => {
|
|
return sema.fail(block, src, "TODO coerce_result_ptr get_union_tag", .{});
|
|
},
|
|
else => {
|
|
if (std.debug.runtime_safety) {
|
|
std.debug.panic("unexpected AIR tag for coerce_result_ptr: {}", .{
|
|
air_tags[trash_inst],
|
|
});
|
|
} else {
|
|
unreachable;
|
|
}
|
|
},
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn analyzeStructDecl(
|
|
sema: *Sema,
|
|
new_decl: *Decl,
|
|
inst: Zir.Inst.Index,
|
|
struct_index: Module.Struct.Index,
|
|
) SemaError!void {
|
|
const mod = sema.mod;
|
|
const struct_obj = mod.structPtr(struct_index);
|
|
const extended = sema.code.instructions.items(.data)[inst].extended;
|
|
assert(extended.opcode == .struct_decl);
|
|
const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
|
|
|
|
struct_obj.known_non_opv = small.known_non_opv;
|
|
if (small.known_comptime_only) {
|
|
struct_obj.requires_comptime = .yes;
|
|
}
|
|
|
|
var extra_index: usize = extended.operand;
|
|
extra_index += @intFromBool(small.has_src_node);
|
|
extra_index += @intFromBool(small.has_fields_len);
|
|
const decls_len = if (small.has_decls_len) blk: {
|
|
const decls_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk decls_len;
|
|
} else 0;
|
|
|
|
if (small.has_backing_int) {
|
|
const backing_int_body_len = sema.code.extra[extra_index];
|
|
extra_index += 1; // backing_int_body_len
|
|
if (backing_int_body_len == 0) {
|
|
extra_index += 1; // backing_int_ref
|
|
} else {
|
|
extra_index += backing_int_body_len; // backing_int_body_inst
|
|
}
|
|
}
|
|
|
|
_ = try mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl);
|
|
}
|
|
|
|
fn zirStructDecl(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
|
|
const src: LazySrcLoc = if (small.has_src_node) blk: {
|
|
const node_offset = @as(i32, @bitCast(sema.code.extra[extended.operand]));
|
|
break :blk LazySrcLoc.nodeOffset(node_offset);
|
|
} else sema.src;
|
|
|
|
// Because these three things each reference each other, `undefined`
|
|
// placeholders are used before being set after the struct type gains an
|
|
// InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, small.name_strategy, "struct", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer mod.abortAnonDecl(new_decl_index);
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.ty = undefined,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
const new_namespace = mod.namespacePtr(new_namespace_index);
|
|
errdefer mod.destroyNamespace(new_namespace_index);
|
|
|
|
const struct_index = try mod.createStruct(.{
|
|
.owner_decl = new_decl_index,
|
|
.fields = .{},
|
|
.zir_index = inst,
|
|
.layout = small.layout,
|
|
.status = .none,
|
|
.known_non_opv = undefined,
|
|
.is_tuple = small.is_tuple,
|
|
.namespace = new_namespace_index,
|
|
});
|
|
errdefer mod.destroyStruct(struct_index);
|
|
|
|
const struct_ty = ty: {
|
|
const ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
|
|
.index = struct_index.toOptional(),
|
|
.namespace = new_namespace_index.toOptional(),
|
|
} });
|
|
if (sema.builtin_type_target_index != .none) {
|
|
mod.intern_pool.resolveBuiltinType(sema.builtin_type_target_index, ty);
|
|
break :ty sema.builtin_type_target_index;
|
|
}
|
|
break :ty ty;
|
|
};
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer mod.intern_pool.remove(struct_ty);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = struct_ty.toValue();
|
|
new_namespace.ty = struct_ty.toType();
|
|
|
|
try sema.analyzeStructDecl(new_decl, inst, struct_index);
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
}
|
|
|
|
fn createAnonymousDeclTypeNamed(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
typed_value: TypedValue,
|
|
name_strategy: Zir.Inst.NameStrategy,
|
|
anon_prefix: []const u8,
|
|
inst: ?Zir.Inst.Index,
|
|
) !Decl.Index {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const gpa = sema.gpa;
|
|
const namespace = block.namespace;
|
|
const src_scope = block.wip_capture_scope;
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
const src_node = src_decl.relativeToNodeIndex(src.node_offset.x);
|
|
const new_decl_index = try mod.allocateNewDecl(namespace, src_node, src_scope);
|
|
errdefer mod.destroyDecl(new_decl_index);
|
|
|
|
switch (name_strategy) {
|
|
.anon => {
|
|
// It would be neat to have "struct:line:column" but this name has
|
|
// to survive incremental updates, where it may have been shifted down
|
|
// or up to a different line, but unchanged, and thus not unnecessarily
|
|
// semantically analyzed.
|
|
// This name is also used as the key in the parent namespace so it cannot be
|
|
// renamed.
|
|
|
|
const name = mod.intern_pool.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{
|
|
src_decl.name.fmt(&mod.intern_pool), anon_prefix, @intFromEnum(new_decl_index),
|
|
}) catch unreachable;
|
|
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
|
|
return new_decl_index;
|
|
},
|
|
.parent => {
|
|
const name = mod.declPtr(block.src_decl).name;
|
|
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
|
|
return new_decl_index;
|
|
},
|
|
.func => {
|
|
const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index));
|
|
const zir_tags = sema.code.instructions.items(.tag);
|
|
|
|
var buf = std.ArrayList(u8).init(gpa);
|
|
defer buf.deinit();
|
|
|
|
const writer = buf.writer();
|
|
try writer.print("{}(", .{mod.declPtr(block.src_decl).name.fmt(&mod.intern_pool)});
|
|
|
|
var arg_i: usize = 0;
|
|
for (fn_info.param_body) |zir_inst| switch (zir_tags[zir_inst]) {
|
|
.param, .param_comptime, .param_anytype, .param_anytype_comptime => {
|
|
const arg = sema.inst_map.get(zir_inst).?;
|
|
// If this is being called in a generic function then analyzeCall will
|
|
// have already resolved the args and this will work.
|
|
// If not then this is a struct type being returned from a non-generic
|
|
// function and the name doesn't matter since it will later
|
|
// result in a compile error.
|
|
const arg_val = sema.resolveConstMaybeUndefVal(block, .unneeded, arg, "") catch
|
|
return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null);
|
|
|
|
if (arg_i != 0) try writer.writeByte(',');
|
|
try writer.print("{}", .{arg_val.fmtValue(sema.typeOf(arg), sema.mod)});
|
|
|
|
arg_i += 1;
|
|
continue;
|
|
},
|
|
else => continue,
|
|
};
|
|
|
|
try writer.writeByte(')');
|
|
const name = try mod.intern_pool.getOrPutString(gpa, buf.items);
|
|
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
|
|
return new_decl_index;
|
|
},
|
|
.dbg_var => {
|
|
const ref = Zir.indexToRef(inst.?);
|
|
const zir_tags = sema.code.instructions.items(.tag);
|
|
const zir_data = sema.code.instructions.items(.data);
|
|
var i = inst.?;
|
|
while (i < zir_tags.len) : (i += 1) switch (zir_tags[i]) {
|
|
.dbg_var_ptr, .dbg_var_val => {
|
|
if (zir_data[i].str_op.operand != ref) continue;
|
|
|
|
const name = try mod.intern_pool.getOrPutStringFmt(gpa, "{}.{s}", .{
|
|
src_decl.name.fmt(&mod.intern_pool), zir_data[i].str_op.getStr(sema.code),
|
|
});
|
|
|
|
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
|
|
return new_decl_index;
|
|
},
|
|
else => {},
|
|
};
|
|
return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn zirEnumDecl(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small));
|
|
var extra_index: usize = extended.operand;
|
|
|
|
const src: LazySrcLoc = if (small.has_src_node) blk: {
|
|
const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
break :blk LazySrcLoc.nodeOffset(node_offset);
|
|
} else sema.src;
|
|
const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x };
|
|
|
|
const tag_type_ref = if (small.has_tag_type) blk: {
|
|
const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
break :blk tag_type_ref;
|
|
} else .none;
|
|
|
|
const body_len = if (small.has_body_len) blk: {
|
|
const body_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk body_len;
|
|
} else 0;
|
|
|
|
const fields_len = if (small.has_fields_len) blk: {
|
|
const fields_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk fields_len;
|
|
} else 0;
|
|
|
|
const decls_len = if (small.has_decls_len) blk: {
|
|
const decls_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk decls_len;
|
|
} else 0;
|
|
|
|
// Because these three things each reference each other, `undefined`
|
|
// placeholders are used before being set after the enum type gains an
|
|
// InternPool index.
|
|
|
|
var done = false;
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, small.name_strategy, "enum", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer if (!done) mod.abortAnonDecl(new_decl_index);
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.ty = undefined,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
const new_namespace = mod.namespacePtr(new_namespace_index);
|
|
errdefer if (!done) mod.destroyNamespace(new_namespace_index);
|
|
|
|
extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl);
|
|
|
|
const body = sema.code.extra[extra_index..][0..body_len];
|
|
extra_index += body.len;
|
|
|
|
const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
|
|
const body_end = extra_index;
|
|
extra_index += bit_bags_count;
|
|
|
|
const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| {
|
|
if (bag != 0) break true;
|
|
} else false;
|
|
|
|
const incomplete_enum = incomplete_enum: {
|
|
var incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{
|
|
.decl = new_decl_index,
|
|
.namespace = new_namespace_index.toOptional(),
|
|
.fields_len = fields_len,
|
|
.has_values = any_values,
|
|
.tag_mode = if (small.nonexhaustive)
|
|
.nonexhaustive
|
|
else if (tag_type_ref == .none)
|
|
.auto
|
|
else
|
|
.explicit,
|
|
});
|
|
if (sema.builtin_type_target_index != .none) {
|
|
mod.intern_pool.resolveBuiltinType(sema.builtin_type_target_index, incomplete_enum.index);
|
|
incomplete_enum.index = sema.builtin_type_target_index;
|
|
}
|
|
break :incomplete_enum incomplete_enum;
|
|
};
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer if (!done) mod.intern_pool.remove(incomplete_enum.index);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = incomplete_enum.index.toValue();
|
|
new_namespace.ty = incomplete_enum.index.toType();
|
|
|
|
const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
done = true;
|
|
|
|
const int_tag_ty = ty: {
|
|
// We create a block for the field type instructions because they
|
|
// may need to reference Decls from inside the enum namespace.
|
|
// Within the field type, default value, and alignment expressions, the "owner decl"
|
|
// should be the enum itself.
|
|
|
|
const prev_owner_decl = sema.owner_decl;
|
|
const prev_owner_decl_index = sema.owner_decl_index;
|
|
sema.owner_decl = new_decl;
|
|
sema.owner_decl_index = new_decl_index;
|
|
defer {
|
|
sema.owner_decl = prev_owner_decl;
|
|
sema.owner_decl_index = prev_owner_decl_index;
|
|
}
|
|
|
|
const prev_owner_func_index = sema.owner_func_index;
|
|
sema.owner_func_index = .none;
|
|
defer sema.owner_func_index = prev_owner_func_index;
|
|
|
|
const prev_func_index = sema.func_index;
|
|
sema.func_index = .none;
|
|
defer sema.func_index = prev_func_index;
|
|
|
|
var wip_captures = try WipCaptureScope.init(gpa, new_decl.src_scope);
|
|
defer wip_captures.deinit();
|
|
|
|
var enum_block: Block = .{
|
|
.parent = null,
|
|
.sema = sema,
|
|
.src_decl = new_decl_index,
|
|
.namespace = new_namespace_index,
|
|
.wip_capture_scope = wip_captures.scope,
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer enum_block.instructions.deinit(sema.gpa);
|
|
|
|
if (body.len != 0) {
|
|
try sema.analyzeBody(&enum_block, body);
|
|
}
|
|
|
|
try wip_captures.finalize();
|
|
|
|
if (tag_type_ref != .none) {
|
|
const ty = try sema.resolveType(block, tag_ty_src, tag_type_ref);
|
|
if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) {
|
|
return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)});
|
|
}
|
|
incomplete_enum.setTagType(&mod.intern_pool, ty.toIntern());
|
|
break :ty ty;
|
|
} else if (fields_len == 0) {
|
|
break :ty try mod.intType(.unsigned, 0);
|
|
} else {
|
|
const bits = std.math.log2_int_ceil(usize, fields_len);
|
|
break :ty try mod.intType(.unsigned, bits);
|
|
}
|
|
};
|
|
|
|
if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) {
|
|
if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) {
|
|
return sema.fail(block, src, "non-exhaustive enum specifies every value", .{});
|
|
}
|
|
}
|
|
|
|
var bit_bag_index: usize = body_end;
|
|
var cur_bit_bag: u32 = undefined;
|
|
var field_i: u32 = 0;
|
|
var last_tag_val: ?Value = null;
|
|
while (field_i < fields_len) : (field_i += 1) {
|
|
if (field_i % 32 == 0) {
|
|
cur_bit_bag = sema.code.extra[bit_bag_index];
|
|
bit_bag_index += 1;
|
|
}
|
|
const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
|
|
const field_name_zir = sema.code.nullTerminatedString(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
|
|
// doc comment
|
|
extra_index += 1;
|
|
|
|
const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir);
|
|
if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name)) |other_index| {
|
|
const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy;
|
|
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, field_src, "duplicate enum field '{s}'", .{field_name_zir});
|
|
errdefer msg.destroy(gpa);
|
|
try sema.errNote(block, other_field_src, msg, "other field here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
const tag_overflow = if (has_tag_value) overflow: {
|
|
const tag_val_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const tag_inst = try sema.resolveInst(tag_val_ref);
|
|
last_tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const value_src = mod.fieldSrcLoc(new_decl_index, .{
|
|
.index = field_i,
|
|
.range = .value,
|
|
}).lazy;
|
|
_ = try sema.resolveConstValue(block, value_src, tag_inst, "enum tag value must be comptime-known");
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true;
|
|
last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty);
|
|
if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| {
|
|
const value_src = mod.fieldSrcLoc(new_decl_index, .{
|
|
.index = field_i,
|
|
.range = .value,
|
|
}).lazy;
|
|
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)});
|
|
errdefer msg.destroy(gpa);
|
|
try sema.errNote(block, other_field_src, msg, "other occurrence here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
break :overflow false;
|
|
} else if (any_values) overflow: {
|
|
var overflow: ?usize = null;
|
|
last_tag_val = if (last_tag_val) |val|
|
|
try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty, &overflow)
|
|
else
|
|
try mod.intValue(int_tag_ty, 0);
|
|
if (overflow != null) break :overflow true;
|
|
if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| {
|
|
const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy;
|
|
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)});
|
|
errdefer msg.destroy(gpa);
|
|
try sema.errNote(block, other_field_src, msg, "other occurrence here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
break :overflow false;
|
|
} else overflow: {
|
|
last_tag_val = try mod.intValue(Type.comptime_int, field_i);
|
|
if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true;
|
|
last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty);
|
|
break :overflow false;
|
|
};
|
|
|
|
if (tag_overflow) {
|
|
const value_src = mod.fieldSrcLoc(new_decl_index, .{
|
|
.index = field_i,
|
|
.range = if (has_tag_value) .value else .name,
|
|
}).lazy;
|
|
const msg = try sema.errMsg(block, value_src, "enumeration value '{}' too large for type '{}'", .{
|
|
last_tag_val.?.fmtValue(int_tag_ty, mod), int_tag_ty.fmt(mod),
|
|
});
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
return decl_val;
|
|
}
|
|
|
|
fn zirUnionDecl(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small));
|
|
var extra_index: usize = extended.operand;
|
|
|
|
const src: LazySrcLoc = if (small.has_src_node) blk: {
|
|
const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
break :blk LazySrcLoc.nodeOffset(node_offset);
|
|
} else sema.src;
|
|
|
|
extra_index += @intFromBool(small.has_tag_type);
|
|
extra_index += @intFromBool(small.has_body_len);
|
|
extra_index += @intFromBool(small.has_fields_len);
|
|
|
|
const decls_len = if (small.has_decls_len) blk: {
|
|
const decls_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk decls_len;
|
|
} else 0;
|
|
|
|
// Because these three things each reference each other, `undefined`
|
|
// placeholders are used before being set after the union type gains an
|
|
// InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, small.name_strategy, "union", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer mod.abortAnonDecl(new_decl_index);
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.ty = undefined,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
const new_namespace = mod.namespacePtr(new_namespace_index);
|
|
errdefer mod.destroyNamespace(new_namespace_index);
|
|
|
|
const union_index = try mod.createUnion(.{
|
|
.owner_decl = new_decl_index,
|
|
.tag_ty = Type.null,
|
|
.fields = .{},
|
|
.zir_index = inst,
|
|
.layout = small.layout,
|
|
.status = .none,
|
|
.namespace = new_namespace_index,
|
|
});
|
|
errdefer mod.destroyUnion(union_index);
|
|
|
|
const union_ty = ty: {
|
|
const ty = try mod.intern_pool.get(gpa, .{ .union_type = .{
|
|
.index = union_index,
|
|
.runtime_tag = if (small.has_tag_type or small.auto_enum_tag)
|
|
.tagged
|
|
else if (small.layout != .Auto)
|
|
.none
|
|
else switch (block.sema.mod.optimizeMode()) {
|
|
.Debug, .ReleaseSafe => .safety,
|
|
.ReleaseFast, .ReleaseSmall => .none,
|
|
},
|
|
} });
|
|
if (sema.builtin_type_target_index != .none) {
|
|
mod.intern_pool.resolveBuiltinType(sema.builtin_type_target_index, ty);
|
|
break :ty sema.builtin_type_target_index;
|
|
}
|
|
break :ty ty;
|
|
};
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer mod.intern_pool.remove(union_ty);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = union_ty.toValue();
|
|
new_namespace.ty = union_ty.toType();
|
|
|
|
_ = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl);
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
}
|
|
|
|
fn zirOpaqueDecl(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small));
|
|
var extra_index: usize = extended.operand;
|
|
|
|
const src: LazySrcLoc = if (small.has_src_node) blk: {
|
|
const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
break :blk LazySrcLoc.nodeOffset(node_offset);
|
|
} else sema.src;
|
|
|
|
const decls_len = if (small.has_decls_len) blk: {
|
|
const decls_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk decls_len;
|
|
} else 0;
|
|
|
|
// Because these three things each reference each other, `undefined`
|
|
// placeholders are used in two places before being set after the opaque
|
|
// type gains an InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, small.name_strategy, "opaque", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer mod.abortAnonDecl(new_decl_index);
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.ty = undefined,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
const new_namespace = mod.namespacePtr(new_namespace_index);
|
|
errdefer mod.destroyNamespace(new_namespace_index);
|
|
|
|
const opaque_ty = try mod.intern(.{ .opaque_type = .{
|
|
.decl = new_decl_index,
|
|
.namespace = new_namespace_index,
|
|
} });
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer mod.intern_pool.remove(opaque_ty);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = opaque_ty.toValue();
|
|
new_namespace.ty = opaque_ty.toType();
|
|
|
|
extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl);
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
}
|
|
|
|
fn zirErrorSetDecl(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
name_strategy: Zir.Inst.NameStrategy,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index);
|
|
|
|
var names: InferredErrorSet.NameMap = .{};
|
|
try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len);
|
|
|
|
var extra_index = @as(u32, @intCast(extra.end));
|
|
const extra_index_end = extra_index + (extra.data.fields_len * 2);
|
|
while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string
|
|
const str_index = sema.code.extra[extra_index];
|
|
const name = sema.code.nullTerminatedString(str_index);
|
|
const name_ip = try mod.intern_pool.getOrPutString(gpa, name);
|
|
_ = try mod.getErrorValue(name_ip);
|
|
const result = names.getOrPutAssumeCapacity(name_ip);
|
|
assert(!result.found_existing); // verified in AstGen
|
|
}
|
|
|
|
const error_set_ty = try mod.errorSetFromUnsortedNames(names.keys());
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.type,
|
|
.val = error_set_ty.toValue(),
|
|
}, name_strategy, "error", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer mod.abortAnonDecl(new_decl_index);
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
}
|
|
|
|
fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
if (block.is_comptime or try sema.typeRequiresComptime(sema.fn_ret_ty)) {
|
|
try sema.resolveTypeFields(sema.fn_ret_ty);
|
|
return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, .none);
|
|
}
|
|
|
|
const target = sema.mod.getTarget();
|
|
const ptr_type = try sema.mod.ptrType(.{
|
|
.child = sema.fn_ret_ty.toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
|
|
if (block.inlining != null) {
|
|
// We are inlining a function call; this should be emitted as an alloc, not a ret_ptr.
|
|
// TODO when functions gain result location support, the inlining struct in
|
|
// Block should contain the return pointer, and we would pass that through here.
|
|
return block.addTy(.alloc, ptr_type);
|
|
}
|
|
|
|
return block.addTy(.ret_ptr, ptr_type);
|
|
}
|
|
|
|
fn zirRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_tok;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
return sema.analyzeRef(block, inst_data.src(), operand);
|
|
}
|
|
|
|
fn zirEnsureResultUsed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const src = inst_data.src();
|
|
|
|
return sema.ensureResultUsed(block, sema.typeOf(operand), src);
|
|
}
|
|
|
|
fn ensureResultUsed(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty: Type,
|
|
src: LazySrcLoc,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Void, .NoReturn => return,
|
|
.ErrorSet, .ErrorUnion => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "error is ignored", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
else => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "value of type '{}' ignored", .{ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "all non-void values must be used", .{});
|
|
try sema.errNote(block, src, msg, "this error can be suppressed by assigning the value to '_'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const src = inst_data.src();
|
|
const operand_ty = sema.typeOf(operand);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.ErrorSet, .ErrorUnion => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "error is discarded", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
else => return,
|
|
}
|
|
}
|
|
|
|
fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer)
|
|
operand_ty.childType(mod)
|
|
else
|
|
operand_ty;
|
|
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return;
|
|
const payload_ty = err_union_ty.errorUnionPayload(mod).zigTypeTag(mod);
|
|
if (payload_ty != .Void and payload_ty != .NoReturn) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "error union payload is ignored", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "payload value can be explicitly ignored with '|_|'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const object = try sema.resolveInst(inst_data.operand);
|
|
|
|
return indexablePtrLen(sema, block, src, object);
|
|
}
|
|
|
|
fn indexablePtrLen(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
object: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const object_ty = sema.typeOf(object);
|
|
const is_pointer_to = object_ty.isSinglePointer(mod);
|
|
const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty;
|
|
try checkIndexable(sema, block, src, indexable_ty);
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len");
|
|
return sema.fieldVal(block, src, object, field_name, src);
|
|
}
|
|
|
|
fn indexablePtrLenOrNone(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
try checkMemOperand(sema, block, src, operand_ty);
|
|
if (operand_ty.ptrSize(mod) == .Many) return .none;
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len");
|
|
return sema.fieldVal(block, src, operand, field_name, src);
|
|
}
|
|
|
|
fn zirAllocExtended(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const gpa = sema.gpa;
|
|
const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand);
|
|
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = extra.data.src_node };
|
|
const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = extra.data.src_node };
|
|
const small = @as(Zir.Inst.AllocExtended.Small, @bitCast(extended.small));
|
|
|
|
var extra_index: usize = extra.end;
|
|
|
|
const var_ty: Type = if (small.has_type) blk: {
|
|
const type_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
break :blk try sema.resolveType(block, ty_src, type_ref);
|
|
} else undefined;
|
|
|
|
const alignment = if (small.has_align) blk: {
|
|
const align_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const alignment = try sema.resolveAlign(block, align_src, align_ref);
|
|
break :blk alignment;
|
|
} else .none;
|
|
|
|
if (block.is_comptime or small.is_comptime) {
|
|
if (small.has_type) {
|
|
return sema.analyzeComptimeAlloc(block, var_ty, alignment);
|
|
} else {
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .inferred_alloc_comptime,
|
|
.data = .{ .inferred_alloc_comptime = .{
|
|
.decl_index = undefined,
|
|
.alignment = alignment,
|
|
.is_const = small.is_const,
|
|
} },
|
|
});
|
|
return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1)));
|
|
}
|
|
}
|
|
|
|
if (small.has_type) {
|
|
if (!small.is_const) {
|
|
try sema.validateVarType(block, ty_src, var_ty, false);
|
|
}
|
|
const target = sema.mod.getTarget();
|
|
try sema.resolveTypeLayout(var_ty);
|
|
const ptr_type = try sema.mod.ptrType(.{
|
|
.child = var_ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = alignment,
|
|
.address_space = target_util.defaultAddressSpace(target, .local),
|
|
},
|
|
});
|
|
return block.addTy(.alloc, ptr_type);
|
|
}
|
|
|
|
const result_index = try block.addInstAsIndex(.{
|
|
.tag = .inferred_alloc,
|
|
.data = .{ .inferred_alloc = .{
|
|
.alignment = alignment,
|
|
.is_const = small.is_const,
|
|
} },
|
|
});
|
|
try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, .{});
|
|
return Air.indexToRef(result_index);
|
|
}
|
|
|
|
fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
|
|
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
|
|
return sema.analyzeComptimeAlloc(block, var_ty, .none);
|
|
}
|
|
|
|
fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const alloc = try sema.resolveInst(inst_data.operand);
|
|
const alloc_ty = sema.typeOf(alloc);
|
|
|
|
var ptr_info = alloc_ty.ptrInfo(mod);
|
|
const elem_ty = ptr_info.child.toType();
|
|
|
|
// Detect if all stores to an `.alloc` were comptime-known.
|
|
ct: {
|
|
var search_index: usize = block.instructions.items.len;
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
|
|
const store_inst = while (true) {
|
|
if (search_index == 0) break :ct;
|
|
search_index -= 1;
|
|
|
|
const candidate = block.instructions.items[search_index];
|
|
switch (air_tags[candidate]) {
|
|
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
|
|
.store, .store_safe => break candidate,
|
|
else => break :ct,
|
|
}
|
|
};
|
|
|
|
while (true) {
|
|
if (search_index == 0) break :ct;
|
|
search_index -= 1;
|
|
|
|
const candidate = block.instructions.items[search_index];
|
|
switch (air_tags[candidate]) {
|
|
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
|
|
.alloc => {
|
|
if (Air.indexToRef(candidate) != alloc) break :ct;
|
|
break;
|
|
},
|
|
else => break :ct,
|
|
}
|
|
}
|
|
|
|
const store_op = air_datas[store_inst].bin_op;
|
|
const store_val = (try sema.resolveMaybeUndefVal(store_op.rhs)) orelse break :ct;
|
|
if (store_op.lhs != alloc) break :ct;
|
|
|
|
// Remove all the unnecessary runtime instructions.
|
|
block.instructions.shrinkRetainingCapacity(search_index);
|
|
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
return sema.analyzeDeclRef(try anon_decl.finish(elem_ty, store_val, ptr_info.flags.alignment));
|
|
}
|
|
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const alloc_ty = sema.typeOf(alloc);
|
|
|
|
var ptr_info = alloc_ty.ptrInfo(mod);
|
|
ptr_info.flags.is_const = true;
|
|
const const_ptr_ty = try mod.ptrType(ptr_info);
|
|
|
|
// Detect if a comptime value simply needs to have its type changed.
|
|
if (try sema.resolveMaybeUndefVal(alloc)) |val| {
|
|
return Air.internedToRef((try mod.getCoerced(val, const_ptr_ty)).toIntern());
|
|
}
|
|
|
|
return block.addBitCast(const_ptr_ty, alloc);
|
|
}
|
|
|
|
fn zirAllocInferredComptime(
|
|
sema: *Sema,
|
|
inst: Zir.Inst.Index,
|
|
is_const: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const gpa = sema.gpa;
|
|
const src_node = sema.code.instructions.items(.data)[inst].node;
|
|
const src = LazySrcLoc.nodeOffset(src_node);
|
|
sema.src = src;
|
|
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .inferred_alloc_comptime,
|
|
.data = .{ .inferred_alloc_comptime = .{
|
|
.decl_index = undefined,
|
|
.alignment = .none,
|
|
.is_const = is_const,
|
|
} },
|
|
});
|
|
return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1)));
|
|
}
|
|
|
|
fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
|
|
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
|
|
if (block.is_comptime) {
|
|
return sema.analyzeComptimeAlloc(block, var_ty, .none);
|
|
}
|
|
const target = sema.mod.getTarget();
|
|
const ptr_type = try sema.mod.ptrType(.{
|
|
.child = var_ty.toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
try sema.queueFullTypeResolution(var_ty);
|
|
return block.addTy(.alloc, ptr_type);
|
|
}
|
|
|
|
fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
|
|
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
|
|
if (block.is_comptime) {
|
|
return sema.analyzeComptimeAlloc(block, var_ty, .none);
|
|
}
|
|
try sema.validateVarType(block, ty_src, var_ty, false);
|
|
const target = sema.mod.getTarget();
|
|
const ptr_type = try sema.mod.ptrType(.{
|
|
.child = var_ty.toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
try sema.queueFullTypeResolution(var_ty);
|
|
return block.addTy(.alloc, ptr_type);
|
|
}
|
|
|
|
fn zirAllocInferred(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
is_const: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const gpa = sema.gpa;
|
|
const src_node = sema.code.instructions.items(.data)[inst].node;
|
|
const src = LazySrcLoc.nodeOffset(src_node);
|
|
sema.src = src;
|
|
|
|
if (block.is_comptime) {
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .inferred_alloc_comptime,
|
|
.data = .{ .inferred_alloc_comptime = .{
|
|
.decl_index = undefined,
|
|
.alignment = .none,
|
|
.is_const = is_const,
|
|
} },
|
|
});
|
|
return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1)));
|
|
}
|
|
|
|
const result_index = try block.addInstAsIndex(.{
|
|
.tag = .inferred_alloc,
|
|
.data = .{ .inferred_alloc = .{
|
|
.alignment = .none,
|
|
.is_const = is_const,
|
|
} },
|
|
});
|
|
try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, .{});
|
|
return Air.indexToRef(result_index);
|
|
}
|
|
|
|
fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
|
|
const ptr = try sema.resolveInst(inst_data.operand);
|
|
const ptr_inst = Air.refToIndex(ptr).?;
|
|
const target = mod.getTarget();
|
|
|
|
switch (sema.air_instructions.items(.tag)[ptr_inst]) {
|
|
.inferred_alloc_comptime => {
|
|
const iac = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime;
|
|
const decl_index = iac.decl_index;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
|
|
|
|
const decl = mod.declPtr(decl_index);
|
|
if (iac.is_const) _ = try decl.internValue(mod);
|
|
const final_elem_ty = decl.ty;
|
|
const final_ptr_ty = try mod.ptrType(.{
|
|
.child = final_elem_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = false,
|
|
.alignment = iac.alignment,
|
|
.address_space = target_util.defaultAddressSpace(target, .local),
|
|
},
|
|
});
|
|
|
|
if (std.debug.runtime_safety) {
|
|
// The inferred_alloc_comptime should never be referenced again
|
|
sema.air_instructions.set(ptr_inst, .{ .tag = undefined, .data = undefined });
|
|
}
|
|
|
|
try sema.maybeQueueFuncBodyAnalysis(decl_index);
|
|
|
|
const interned = try mod.intern(.{ .ptr = .{
|
|
.ty = final_ptr_ty.toIntern(),
|
|
.addr = if (!iac.is_const) .{ .mut_decl = .{
|
|
.decl = decl_index,
|
|
.runtime_index = block.runtime_index,
|
|
} } else .{ .decl = decl_index },
|
|
} });
|
|
|
|
// Remap the ZIR operand to the resolved pointer value
|
|
sema.inst_map.putAssumeCapacity(Zir.refToIndex(inst_data.operand).?, Air.internedToRef(interned));
|
|
},
|
|
.inferred_alloc => {
|
|
const ia1 = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc;
|
|
const ia2 = sema.unresolved_inferred_allocs.fetchRemove(ptr_inst).?.value;
|
|
const peer_inst_list = ia2.prongs.items(.stored_inst);
|
|
const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none);
|
|
|
|
const final_ptr_ty = try mod.ptrType(.{
|
|
.child = final_elem_ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = ia1.alignment,
|
|
.address_space = target_util.defaultAddressSpace(target, .local),
|
|
},
|
|
});
|
|
|
|
if (!ia1.is_const) {
|
|
try sema.validateVarType(block, ty_src, final_elem_ty, false);
|
|
} else ct: {
|
|
// Detect if the value is comptime-known. In such case, the
|
|
// last 3 AIR instructions of the block will look like this:
|
|
//
|
|
// %a = inferred_alloc
|
|
// %b = bitcast(%a)
|
|
// %c = store(%b, %d)
|
|
//
|
|
// If `%d` is comptime-known, then we want to store the value
|
|
// inside an anonymous Decl and then erase these three AIR
|
|
// instructions from the block, replacing the inst_map entry
|
|
// corresponding to the ZIR alloc instruction with a constant
|
|
// decl_ref pointing at our new Decl.
|
|
// dbg_stmt instructions may be interspersed into this pattern
|
|
// which must be ignored.
|
|
if (block.instructions.items.len < 3) break :ct;
|
|
var search_index: usize = block.instructions.items.len;
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
|
|
const store_inst = while (true) {
|
|
if (search_index == 0) break :ct;
|
|
search_index -= 1;
|
|
|
|
const candidate = block.instructions.items[search_index];
|
|
switch (air_tags[candidate]) {
|
|
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
|
|
.store, .store_safe => break candidate,
|
|
else => break :ct,
|
|
}
|
|
};
|
|
|
|
const bitcast_inst = while (true) {
|
|
if (search_index == 0) break :ct;
|
|
search_index -= 1;
|
|
|
|
const candidate = block.instructions.items[search_index];
|
|
switch (air_tags[candidate]) {
|
|
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
|
|
.bitcast => break candidate,
|
|
else => break :ct,
|
|
}
|
|
};
|
|
|
|
while (true) {
|
|
if (search_index == 0) break :ct;
|
|
search_index -= 1;
|
|
|
|
const candidate = block.instructions.items[search_index];
|
|
if (candidate == ptr_inst) break;
|
|
switch (air_tags[candidate]) {
|
|
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
|
|
else => break :ct,
|
|
}
|
|
}
|
|
|
|
const store_op = air_datas[store_inst].bin_op;
|
|
const store_val = (try sema.resolveMaybeUndefVal(store_op.rhs)) orelse break :ct;
|
|
if (store_op.lhs != Air.indexToRef(bitcast_inst)) break :ct;
|
|
if (air_datas[bitcast_inst].ty_op.operand != ptr) break :ct;
|
|
|
|
const new_decl_index = d: {
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
const new_decl_index = try anon_decl.finish(final_elem_ty, store_val, ia1.alignment);
|
|
break :d new_decl_index;
|
|
};
|
|
try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index);
|
|
|
|
// Remove the instruction from the block so that codegen does not see it.
|
|
block.instructions.shrinkRetainingCapacity(search_index);
|
|
try sema.maybeQueueFuncBodyAnalysis(new_decl_index);
|
|
|
|
if (std.debug.runtime_safety) {
|
|
// The inferred_alloc should never be referenced again
|
|
sema.air_instructions.set(ptr_inst, .{ .tag = undefined, .data = undefined });
|
|
}
|
|
|
|
const interned = try mod.intern(.{ .ptr = .{
|
|
.ty = final_ptr_ty.toIntern(),
|
|
.addr = .{ .decl = new_decl_index },
|
|
} });
|
|
|
|
// Remap the ZIR oeprand to the resolved pointer value
|
|
sema.inst_map.putAssumeCapacity(Zir.refToIndex(inst_data.operand).?, Air.internedToRef(interned));
|
|
|
|
// Unless the block is comptime, `alloc_inferred` always produces
|
|
// a runtime constant. The final inferred type needs to be
|
|
// fully resolved so it can be lowered in codegen.
|
|
try sema.resolveTypeFully(final_elem_ty);
|
|
|
|
return;
|
|
}
|
|
|
|
try sema.queueFullTypeResolution(final_elem_ty);
|
|
|
|
// Change it to a normal alloc.
|
|
sema.air_instructions.set(ptr_inst, .{
|
|
.tag = .alloc,
|
|
.data = .{ .ty = final_ptr_ty },
|
|
});
|
|
|
|
// Now we need to go back over all the coerce_result_ptr instructions, which
|
|
// previously inserted a bitcast as a placeholder, and do the logic as if
|
|
// the new result ptr type was available.
|
|
const placeholders = ia2.prongs.items(.placeholder);
|
|
const gpa = sema.gpa;
|
|
|
|
var trash_block = block.makeSubBlock();
|
|
trash_block.is_comptime = false;
|
|
defer trash_block.instructions.deinit(gpa);
|
|
|
|
const mut_final_ptr_ty = try mod.ptrType(.{
|
|
.child = final_elem_ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = ia1.alignment,
|
|
.address_space = target_util.defaultAddressSpace(target, .local),
|
|
},
|
|
});
|
|
const dummy_ptr = try trash_block.addTy(.alloc, mut_final_ptr_ty);
|
|
const empty_trash_count = trash_block.instructions.items.len;
|
|
|
|
for (peer_inst_list, placeholders) |peer_inst, placeholder_inst| {
|
|
const sub_ptr_ty = sema.typeOf(Air.indexToRef(placeholder_inst));
|
|
|
|
if (mut_final_ptr_ty.eql(sub_ptr_ty, mod)) {
|
|
// New result location type is the same as the old one; nothing
|
|
// to do here.
|
|
continue;
|
|
}
|
|
|
|
var replacement_block = block.makeSubBlock();
|
|
defer replacement_block.instructions.deinit(gpa);
|
|
|
|
const result = switch (sema.air_instructions.items(.tag)[placeholder_inst]) {
|
|
.bitcast => result: {
|
|
trash_block.instructions.shrinkRetainingCapacity(empty_trash_count);
|
|
const sub_ptr = try sema.coerceResultPtr(&replacement_block, src, ptr, dummy_ptr, peer_inst, &trash_block);
|
|
|
|
assert(replacement_block.instructions.items.len > 0);
|
|
break :result sub_ptr;
|
|
},
|
|
.store, .store_safe => result: {
|
|
const bin_op = sema.air_instructions.items(.data)[placeholder_inst].bin_op;
|
|
try sema.storePtr2(&replacement_block, src, bin_op.lhs, src, bin_op.rhs, src, .bitcast);
|
|
break :result .void_value;
|
|
},
|
|
else => unreachable,
|
|
};
|
|
|
|
// If only one instruction is produced then we can replace the bitcast
|
|
// placeholder instruction with this instruction; no need for an entire block.
|
|
if (replacement_block.instructions.items.len == 1) {
|
|
const only_inst = replacement_block.instructions.items[0];
|
|
sema.air_instructions.set(placeholder_inst, sema.air_instructions.get(only_inst));
|
|
continue;
|
|
}
|
|
|
|
// Here we replace the placeholder bitcast instruction with a block
|
|
// that does the coerce_result_ptr logic.
|
|
_ = try replacement_block.addBr(placeholder_inst, result);
|
|
const ty_inst = if (result == .void_value)
|
|
.void_type
|
|
else
|
|
sema.air_instructions.items(.data)[placeholder_inst].ty_op.ty;
|
|
try sema.air_extra.ensureUnusedCapacity(
|
|
gpa,
|
|
@typeInfo(Air.Block).Struct.fields.len + replacement_block.instructions.items.len,
|
|
);
|
|
sema.air_instructions.set(placeholder_inst, .{
|
|
.tag = .block,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = ty_inst,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Block{
|
|
.body_len = @as(u32, @intCast(replacement_block.instructions.items.len)),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(replacement_block.instructions.items);
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn zirArrayBasePtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
|
|
const start_ptr = try sema.resolveInst(inst_data.operand);
|
|
var base_ptr = start_ptr;
|
|
while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) {
|
|
.ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
|
|
.Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true),
|
|
else => break,
|
|
};
|
|
|
|
const elem_ty = sema.typeOf(base_ptr).childType(mod);
|
|
switch (elem_ty.zigTypeTag(mod)) {
|
|
.Array, .Vector => return base_ptr,
|
|
.Struct => if (elem_ty.isTuple(mod)) {
|
|
// TODO validate element count
|
|
return base_ptr;
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod));
|
|
}
|
|
|
|
fn zirFieldBasePtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
|
|
const start_ptr = try sema.resolveInst(inst_data.operand);
|
|
var base_ptr = start_ptr;
|
|
while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) {
|
|
.ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
|
|
.Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true),
|
|
else => break,
|
|
};
|
|
|
|
const elem_ty = sema.typeOf(base_ptr).childType(mod);
|
|
switch (elem_ty.zigTypeTag(mod)) {
|
|
.Struct, .Union => return base_ptr,
|
|
else => {},
|
|
}
|
|
return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod));
|
|
}
|
|
|
|
fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
|
|
const args = sema.code.refSlice(extra.end, extra.data.operands_len);
|
|
const src = inst_data.src();
|
|
|
|
var len: Air.Inst.Ref = .none;
|
|
var len_val: ?Value = null;
|
|
var len_idx: u32 = undefined;
|
|
var any_runtime = false;
|
|
|
|
const runtime_arg_lens = try gpa.alloc(Air.Inst.Ref, args.len);
|
|
defer gpa.free(runtime_arg_lens);
|
|
|
|
// First pass to look for comptime values.
|
|
for (args, 0..) |zir_arg, i_usize| {
|
|
const i = @as(u32, @intCast(i_usize));
|
|
runtime_arg_lens[i] = .none;
|
|
if (zir_arg == .none) continue;
|
|
const object = try sema.resolveInst(zir_arg);
|
|
const object_ty = sema.typeOf(object);
|
|
// Each arg could be an indexable, or a range, in which case the length
|
|
// is passed directly as an integer.
|
|
const is_int = switch (object_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt => true,
|
|
else => false,
|
|
};
|
|
const arg_src: LazySrcLoc = .{ .for_input = .{
|
|
.for_node_offset = inst_data.src_node,
|
|
.input_index = i,
|
|
} };
|
|
const arg_len_uncoerced = if (is_int) object else l: {
|
|
if (!object_ty.isIndexable(mod)) {
|
|
// Instead of using checkIndexable we customize this error.
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, arg_src, msg, "for loop operand must be a range, array, slice, tuple, or vector", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (!object_ty.indexableHasLen(mod)) continue;
|
|
|
|
break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, "len"), arg_src);
|
|
};
|
|
const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src);
|
|
if (len == .none) {
|
|
len = arg_len;
|
|
len_idx = i;
|
|
}
|
|
if (try sema.resolveDefinedValue(block, src, arg_len)) |arg_val| {
|
|
if (len_val) |v| {
|
|
if (!(try sema.valuesEqual(arg_val, v, Type.usize))) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "non-matching for loop lengths", .{});
|
|
errdefer msg.destroy(gpa);
|
|
const a_src: LazySrcLoc = .{ .for_input = .{
|
|
.for_node_offset = inst_data.src_node,
|
|
.input_index = len_idx,
|
|
} };
|
|
try sema.errNote(block, a_src, msg, "length {} here", .{
|
|
v.fmtValue(Type.usize, sema.mod),
|
|
});
|
|
try sema.errNote(block, arg_src, msg, "length {} here", .{
|
|
arg_val.fmtValue(Type.usize, sema.mod),
|
|
});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
} else {
|
|
len = arg_len;
|
|
len_val = arg_val;
|
|
len_idx = i;
|
|
}
|
|
continue;
|
|
}
|
|
runtime_arg_lens[i] = arg_len;
|
|
any_runtime = true;
|
|
}
|
|
|
|
if (len == .none) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "unbounded for loop", .{});
|
|
errdefer msg.destroy(gpa);
|
|
for (args, 0..) |zir_arg, i_usize| {
|
|
const i = @as(u32, @intCast(i_usize));
|
|
if (zir_arg == .none) continue;
|
|
const object = try sema.resolveInst(zir_arg);
|
|
const object_ty = sema.typeOf(object);
|
|
// Each arg could be an indexable, or a range, in which case the length
|
|
// is passed directly as an integer.
|
|
switch (object_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt => continue,
|
|
else => {},
|
|
}
|
|
const arg_src: LazySrcLoc = .{ .for_input = .{
|
|
.for_node_offset = inst_data.src_node,
|
|
.input_index = i,
|
|
} };
|
|
try sema.errNote(block, arg_src, msg, "type '{}' has no upper bound", .{
|
|
object_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
// Now for the runtime checks.
|
|
if (any_runtime and block.wantSafety()) {
|
|
for (runtime_arg_lens, 0..) |arg_len, i| {
|
|
if (arg_len == .none) continue;
|
|
if (i == len_idx) continue;
|
|
const ok = try block.addBinOp(.cmp_eq, len, arg_len);
|
|
try sema.addSafetyCheck(block, src, ok, .for_len_mismatch);
|
|
}
|
|
}
|
|
|
|
return len;
|
|
}
|
|
|
|
fn zirOptEuBaseTy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
var ty = sema.resolveType(block, .unneeded, inst_data.operand) catch |err| switch (err) {
|
|
// Since this is a ZIR instruction that returns a type, encountering
|
|
// generic poison should not result in a failed compilation, but the
|
|
// generic poison type. This prevents unnecessary failures when
|
|
// constructing types at compile-time.
|
|
error.GenericPoison => return .generic_poison_type,
|
|
else => |e| return e,
|
|
};
|
|
while (true) {
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Optional => ty = ty.optionalChild(mod),
|
|
.ErrorUnion => ty = ty.errorUnionPayload(mod),
|
|
else => return Air.internedToRef(ty.toIntern()),
|
|
}
|
|
}
|
|
}
|
|
|
|
fn zirValidateArrayInitTy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const ty_src: LazySrcLoc = .{ .node_offset_init_ty = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.ArrayInit, inst_data.payload_index).data;
|
|
const ty = sema.resolveType(block, ty_src, extra.ty) catch |err| switch (err) {
|
|
// It's okay for the type to be unknown: this will result in an anonymous array init.
|
|
error.GenericPoison => return,
|
|
else => |e| return e,
|
|
};
|
|
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Array => {
|
|
const array_len = ty.arrayLen(mod);
|
|
if (extra.init_count != array_len) {
|
|
return sema.fail(block, src, "expected {d} array elements; found {d}", .{
|
|
array_len, extra.init_count,
|
|
});
|
|
}
|
|
return;
|
|
},
|
|
.Vector => {
|
|
const array_len = ty.arrayLen(mod);
|
|
if (extra.init_count != array_len) {
|
|
return sema.fail(block, src, "expected {d} vector elements; found {d}", .{
|
|
array_len, extra.init_count,
|
|
});
|
|
}
|
|
return;
|
|
},
|
|
.Struct => if (ty.isTuple(mod)) {
|
|
try sema.resolveTypeFields(ty);
|
|
const array_len = ty.arrayLen(mod);
|
|
if (extra.init_count > array_len) {
|
|
return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{
|
|
array_len, extra.init_count,
|
|
});
|
|
}
|
|
return;
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.failWithArrayInitNotSupported(block, ty_src, ty);
|
|
}
|
|
|
|
fn zirValidateStructInitTy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const ty = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) {
|
|
// It's okay for the type to be unknown: this will result in an anonymous struct init.
|
|
error.GenericPoison => return,
|
|
else => |e| return e,
|
|
};
|
|
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Struct, .Union => return,
|
|
else => {},
|
|
}
|
|
return sema.failWithStructInitNotSupported(block, src, ty);
|
|
}
|
|
|
|
fn zirValidateStructInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const validate_inst = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const init_src = validate_inst.src();
|
|
const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
|
|
const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len];
|
|
const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node;
|
|
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
|
|
const object_ptr = try sema.resolveInst(field_ptr_extra.lhs);
|
|
const agg_ty = sema.typeOf(object_ptr).childType(mod);
|
|
switch (agg_ty.zigTypeTag(mod)) {
|
|
.Struct => return sema.validateStructInit(
|
|
block,
|
|
agg_ty,
|
|
init_src,
|
|
instrs,
|
|
),
|
|
.Union => return sema.validateUnionInit(
|
|
block,
|
|
agg_ty,
|
|
init_src,
|
|
instrs,
|
|
object_ptr,
|
|
),
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn validateUnionInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
union_ty: Type,
|
|
init_src: LazySrcLoc,
|
|
instrs: []const Zir.Inst.Index,
|
|
union_ptr: Air.Inst.Ref,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
|
|
if (instrs.len != 1) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
init_src,
|
|
"cannot initialize multiple union fields at once; unions can only have one active field",
|
|
.{},
|
|
);
|
|
errdefer msg.destroy(gpa);
|
|
|
|
for (instrs[1..]) |inst| {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const inst_src: LazySrcLoc = .{ .node_offset_initializer = inst_data.src_node };
|
|
try sema.errNote(block, inst_src, msg, "additional initializer here", .{});
|
|
}
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (block.is_comptime and
|
|
(try sema.resolveDefinedValue(block, init_src, union_ptr)) != null)
|
|
{
|
|
// In this case, comptime machinery already did everything. No work to do here.
|
|
return;
|
|
}
|
|
|
|
const field_ptr = instrs[0];
|
|
const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node;
|
|
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node };
|
|
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
|
|
const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_ptr_extra.field_name_start));
|
|
// Validate the field access but ignore the index since we want the tag enum field index.
|
|
_ = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
const field_ptr_ref = sema.inst_map.get(field_ptr).?;
|
|
|
|
// Our task here is to determine if the union is comptime-known. In such case,
|
|
// we erase the runtime AIR instructions for initializing the union, and replace
|
|
// the mapping with the comptime value. Either way, we will need to populate the tag.
|
|
|
|
// We expect to see something like this in the current block AIR:
|
|
// %a = alloc(*const U)
|
|
// %b = bitcast(*U, %a)
|
|
// %c = field_ptr(..., %b)
|
|
// %e!= store(%c!, %d!)
|
|
// If %d is a comptime operand, the union is comptime.
|
|
// If the union is comptime, we want `first_block_index`
|
|
// to point at %c so that the bitcast becomes the last instruction in the block.
|
|
//
|
|
// In the case of a comptime-known pointer to a union, the
|
|
// the field_ptr instruction is missing, so we have to pattern-match
|
|
// based only on the store instructions.
|
|
// `first_block_index` needs to point to the `field_ptr` if it exists;
|
|
// the `store` otherwise.
|
|
//
|
|
// It's also possible for there to be no store instruction, in the case
|
|
// of nested `coerce_result_ptr` instructions. If we see the `field_ptr`
|
|
// but we have not found a `store`, treat as a runtime-known field.
|
|
var first_block_index = block.instructions.items.len;
|
|
var block_index = block.instructions.items.len - 1;
|
|
var init_val: ?Value = null;
|
|
var make_runtime = false;
|
|
while (block_index > 0) : (block_index -= 1) {
|
|
const store_inst = block.instructions.items[block_index];
|
|
if (Air.indexToRef(store_inst) == field_ptr_ref) break;
|
|
switch (air_tags[store_inst]) {
|
|
.store, .store_safe => {},
|
|
else => continue,
|
|
}
|
|
const bin_op = air_datas[store_inst].bin_op;
|
|
var ptr_ref = bin_op.lhs;
|
|
if (Air.refToIndex(ptr_ref)) |ptr_inst| if (air_tags[ptr_inst] == .bitcast) {
|
|
ptr_ref = air_datas[ptr_inst].ty_op.operand;
|
|
};
|
|
if (ptr_ref != field_ptr_ref) continue;
|
|
first_block_index = @min(if (Air.refToIndex(field_ptr_ref)) |field_ptr_inst|
|
|
std.mem.lastIndexOfScalar(
|
|
Air.Inst.Index,
|
|
block.instructions.items[0..block_index],
|
|
field_ptr_inst,
|
|
).?
|
|
else
|
|
block_index, first_block_index);
|
|
init_val = try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime);
|
|
break;
|
|
}
|
|
|
|
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
|
|
const enum_field_index = @as(u32, @intCast(tag_ty.enumFieldIndex(field_name, mod).?));
|
|
const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
|
|
|
|
if (init_val) |val| {
|
|
// Our task is to delete all the `field_ptr` and `store` instructions, and insert
|
|
// instead a single `store` to the result ptr with a comptime union value.
|
|
block_index = first_block_index;
|
|
for (block.instructions.items[first_block_index..]) |cur_inst| {
|
|
switch (air_tags[cur_inst]) {
|
|
.struct_field_ptr,
|
|
.struct_field_ptr_index_0,
|
|
.struct_field_ptr_index_1,
|
|
.struct_field_ptr_index_2,
|
|
.struct_field_ptr_index_3,
|
|
=> if (Air.indexToRef(cur_inst) == field_ptr_ref) continue,
|
|
.bitcast => if (air_datas[cur_inst].ty_op.operand == field_ptr_ref) continue,
|
|
.store, .store_safe => {
|
|
var ptr_ref = air_datas[cur_inst].bin_op.lhs;
|
|
if (Air.refToIndex(ptr_ref)) |ptr_inst| if (air_tags[ptr_inst] == .bitcast) {
|
|
ptr_ref = air_datas[ptr_inst].ty_op.operand;
|
|
};
|
|
if (ptr_ref == field_ptr_ref) continue;
|
|
},
|
|
else => {},
|
|
}
|
|
block.instructions.items[block_index] = cur_inst;
|
|
block_index += 1;
|
|
}
|
|
block.instructions.shrinkRetainingCapacity(block_index);
|
|
|
|
var union_val = try mod.intern(.{ .un = .{
|
|
.ty = union_ty.toIntern(),
|
|
.tag = tag_val.toIntern(),
|
|
.val = val.toIntern(),
|
|
} });
|
|
if (make_runtime) union_val = try mod.intern(.{ .runtime_value = .{
|
|
.ty = union_ty.toIntern(),
|
|
.val = union_val,
|
|
} });
|
|
const union_init = Air.internedToRef(union_val);
|
|
try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store);
|
|
return;
|
|
} else if (try sema.typeRequiresComptime(union_ty)) {
|
|
return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only union must be comptime-known");
|
|
}
|
|
|
|
const new_tag = Air.internedToRef(tag_val.toIntern());
|
|
_ = try block.addBinOp(.set_union_tag, union_ptr, new_tag);
|
|
}
|
|
|
|
fn validateStructInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
struct_ty: Type,
|
|
init_src: LazySrcLoc,
|
|
instrs: []const Zir.Inst.Index,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const field_indices = try gpa.alloc(u32, instrs.len);
|
|
defer gpa.free(field_indices);
|
|
|
|
// Maps field index to field_ptr index of where it was already initialized.
|
|
const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount(mod));
|
|
defer gpa.free(found_fields);
|
|
@memset(found_fields, 0);
|
|
|
|
var struct_ptr_zir_ref: Zir.Inst.Ref = undefined;
|
|
|
|
for (instrs, field_indices) |field_ptr, *field_index| {
|
|
const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node;
|
|
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node };
|
|
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
|
|
struct_ptr_zir_ref = field_ptr_extra.lhs;
|
|
const field_name = try ip.getOrPutString(
|
|
gpa,
|
|
sema.code.nullTerminatedString(field_ptr_extra.field_name_start),
|
|
);
|
|
field_index.* = if (struct_ty.isTuple(mod))
|
|
try sema.tupleFieldIndex(block, struct_ty, field_name, field_src)
|
|
else
|
|
try sema.structFieldIndex(block, struct_ty, field_name, field_src);
|
|
if (found_fields[field_index.*] != 0) {
|
|
const other_field_ptr = found_fields[field_index.*];
|
|
const other_field_ptr_data = sema.code.instructions.items(.data)[other_field_ptr].pl_node;
|
|
const other_field_src: LazySrcLoc = .{ .node_offset_initializer = other_field_ptr_data.src_node };
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, field_src, "duplicate field", .{});
|
|
errdefer msg.destroy(gpa);
|
|
try sema.errNote(block, other_field_src, msg, "other field here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
found_fields[field_index.*] = field_ptr;
|
|
}
|
|
|
|
var root_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
const struct_ptr = try sema.resolveInst(struct_ptr_zir_ref);
|
|
if (block.is_comptime and
|
|
(try sema.resolveDefinedValue(block, init_src, struct_ptr)) != null)
|
|
{
|
|
try sema.resolveStructLayout(struct_ty);
|
|
// In this case the only thing we need to do is evaluate the implicit
|
|
// store instructions for default field values, and report any missing fields.
|
|
// Avoid the cost of the extra machinery for detecting a comptime struct init value.
|
|
for (found_fields, 0..) |field_ptr, i| {
|
|
if (field_ptr != 0) continue;
|
|
|
|
const default_val = struct_ty.structFieldDefaultValue(i, mod);
|
|
if (default_val.toIntern() == .unreachable_value) {
|
|
if (struct_ty.isTuple(mod)) {
|
|
const template = "missing tuple field with index {d}";
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, .{i});
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, .{i});
|
|
}
|
|
continue;
|
|
}
|
|
const field_name = struct_ty.structFieldName(i, mod);
|
|
const template = "missing struct field: {}";
|
|
const args = .{field_name.fmt(ip)};
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, args);
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, args);
|
|
}
|
|
continue;
|
|
}
|
|
|
|
const field_src = init_src; // TODO better source location
|
|
const default_field_ptr = if (struct_ty.isTuple(mod))
|
|
try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @as(u32, @intCast(i)), true)
|
|
else
|
|
try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @as(u32, @intCast(i)), field_src, struct_ty, true);
|
|
const init = Air.internedToRef(default_val.toIntern());
|
|
try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store);
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
if (mod.typeToStruct(struct_ty)) |struct_obj| {
|
|
const fqn = try struct_obj.getFullyQualifiedName(mod);
|
|
try mod.errNoteNonLazy(
|
|
struct_obj.srcLoc(mod),
|
|
msg,
|
|
"struct '{}' declared here",
|
|
.{fqn.fmt(ip)},
|
|
);
|
|
}
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
var struct_is_comptime = true;
|
|
var first_block_index = block.instructions.items.len;
|
|
var make_runtime = false;
|
|
|
|
const require_comptime = try sema.typeRequiresComptime(struct_ty);
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
|
|
// We collect the comptime field values in case the struct initialization
|
|
// ends up being comptime-known.
|
|
const field_values = try sema.arena.alloc(InternPool.Index, struct_ty.structFieldCount(mod));
|
|
|
|
field: for (found_fields, 0..) |field_ptr, i| {
|
|
if (field_ptr != 0) {
|
|
// Determine whether the value stored to this pointer is comptime-known.
|
|
const field_ty = struct_ty.structFieldType(i, mod);
|
|
if (try sema.typeHasOnePossibleValue(field_ty)) |opv| {
|
|
field_values[i] = opv.toIntern();
|
|
continue;
|
|
}
|
|
|
|
const field_ptr_ref = sema.inst_map.get(field_ptr).?;
|
|
|
|
//std.debug.print("validateStructInit (field_ptr_air_inst=%{d}):\n", .{
|
|
// field_ptr_air_inst,
|
|
//});
|
|
//for (block.instructions.items) |item| {
|
|
// std.debug.print(" %{d} = {s}\n", .{item, @tagName(air_tags[item])});
|
|
//}
|
|
|
|
// We expect to see something like this in the current block AIR:
|
|
// %a = field_ptr(...)
|
|
// store(%a, %b)
|
|
// With an optional bitcast between the store and the field_ptr.
|
|
// If %b is a comptime operand, this field is comptime.
|
|
//
|
|
// However, in the case of a comptime-known pointer to a struct, the
|
|
// the field_ptr instruction is missing, so we have to pattern-match
|
|
// based only on the store instructions.
|
|
// `first_block_index` needs to point to the `field_ptr` if it exists;
|
|
// the `store` otherwise.
|
|
//
|
|
// It's also possible for there to be no store instruction, in the case
|
|
// of nested `coerce_result_ptr` instructions. If we see the `field_ptr`
|
|
// but we have not found a `store`, treat as a runtime-known field.
|
|
|
|
// Possible performance enhancement: save the `block_index` between iterations
|
|
// of the for loop.
|
|
var block_index = block.instructions.items.len - 1;
|
|
while (block_index > 0) : (block_index -= 1) {
|
|
const store_inst = block.instructions.items[block_index];
|
|
if (Air.indexToRef(store_inst) == field_ptr_ref) {
|
|
struct_is_comptime = false;
|
|
continue :field;
|
|
}
|
|
switch (air_tags[store_inst]) {
|
|
.store, .store_safe => {},
|
|
else => continue,
|
|
}
|
|
const bin_op = air_datas[store_inst].bin_op;
|
|
var ptr_ref = bin_op.lhs;
|
|
if (Air.refToIndex(ptr_ref)) |ptr_inst| if (air_tags[ptr_inst] == .bitcast) {
|
|
ptr_ref = air_datas[ptr_inst].ty_op.operand;
|
|
};
|
|
if (ptr_ref != field_ptr_ref) continue;
|
|
first_block_index = @min(if (Air.refToIndex(field_ptr_ref)) |field_ptr_inst|
|
|
std.mem.lastIndexOfScalar(
|
|
Air.Inst.Index,
|
|
block.instructions.items[0..block_index],
|
|
field_ptr_inst,
|
|
).?
|
|
else
|
|
block_index, first_block_index);
|
|
if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| {
|
|
field_values[i] = val.toIntern();
|
|
} else if (require_comptime) {
|
|
const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node;
|
|
return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only struct must be comptime-known");
|
|
} else {
|
|
struct_is_comptime = false;
|
|
}
|
|
continue :field;
|
|
}
|
|
struct_is_comptime = false;
|
|
continue :field;
|
|
}
|
|
|
|
const default_val = struct_ty.structFieldDefaultValue(i, mod);
|
|
if (default_val.toIntern() == .unreachable_value) {
|
|
if (struct_ty.isTuple(mod)) {
|
|
const template = "missing tuple field with index {d}";
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, .{i});
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, .{i});
|
|
}
|
|
continue;
|
|
}
|
|
const field_name = struct_ty.structFieldName(i, mod);
|
|
const template = "missing struct field: {}";
|
|
const args = .{field_name.fmt(ip)};
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, args);
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, args);
|
|
}
|
|
continue;
|
|
}
|
|
field_values[i] = default_val.toIntern();
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
if (mod.typeToStruct(struct_ty)) |struct_obj| {
|
|
const fqn = try struct_obj.getFullyQualifiedName(mod);
|
|
try mod.errNoteNonLazy(
|
|
struct_obj.srcLoc(mod),
|
|
msg,
|
|
"struct '{}' declared here",
|
|
.{fqn.fmt(ip)},
|
|
);
|
|
}
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (struct_is_comptime) {
|
|
// Our task is to delete all the `field_ptr` and `store` instructions, and insert
|
|
// instead a single `store` to the struct_ptr with a comptime struct value.
|
|
var init_index: usize = 0;
|
|
var field_ptr_ref = Air.Inst.Ref.none;
|
|
var block_index = first_block_index;
|
|
for (block.instructions.items[first_block_index..]) |cur_inst| {
|
|
while (field_ptr_ref == .none and init_index < instrs.len) : (init_index += 1) {
|
|
const field_ty = struct_ty.structFieldType(field_indices[init_index], mod);
|
|
if (try field_ty.onePossibleValue(mod)) |_| continue;
|
|
field_ptr_ref = sema.inst_map.get(instrs[init_index]).?;
|
|
}
|
|
switch (air_tags[cur_inst]) {
|
|
.struct_field_ptr,
|
|
.struct_field_ptr_index_0,
|
|
.struct_field_ptr_index_1,
|
|
.struct_field_ptr_index_2,
|
|
.struct_field_ptr_index_3,
|
|
=> if (Air.indexToRef(cur_inst) == field_ptr_ref) continue,
|
|
.bitcast => if (air_datas[cur_inst].ty_op.operand == field_ptr_ref) continue,
|
|
.store, .store_safe => {
|
|
var ptr_ref = air_datas[cur_inst].bin_op.lhs;
|
|
if (Air.refToIndex(ptr_ref)) |ptr_inst| if (air_tags[ptr_inst] == .bitcast) {
|
|
ptr_ref = air_datas[ptr_inst].ty_op.operand;
|
|
};
|
|
if (ptr_ref == field_ptr_ref) {
|
|
field_ptr_ref = .none;
|
|
continue;
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
block.instructions.items[block_index] = cur_inst;
|
|
block_index += 1;
|
|
}
|
|
block.instructions.shrinkRetainingCapacity(block_index);
|
|
|
|
var struct_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = struct_ty.toIntern(),
|
|
.storage = .{ .elems = field_values },
|
|
} });
|
|
if (make_runtime) struct_val = try mod.intern(.{ .runtime_value = .{
|
|
.ty = struct_ty.toIntern(),
|
|
.val = struct_val,
|
|
} });
|
|
const struct_init = Air.internedToRef(struct_val);
|
|
try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store);
|
|
return;
|
|
}
|
|
try sema.resolveStructLayout(struct_ty);
|
|
|
|
// Our task is to insert `store` instructions for all the default field values.
|
|
for (found_fields, 0..) |field_ptr, i| {
|
|
if (field_ptr != 0) continue;
|
|
|
|
const field_src = init_src; // TODO better source location
|
|
const default_field_ptr = if (struct_ty.isTuple(mod))
|
|
try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @as(u32, @intCast(i)), true)
|
|
else
|
|
try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @as(u32, @intCast(i)), field_src, struct_ty, true);
|
|
const init = Air.internedToRef(field_values[i]);
|
|
try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store);
|
|
}
|
|
}
|
|
|
|
fn zirValidateArrayInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const validate_inst = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const init_src = validate_inst.src();
|
|
const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
|
|
const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len];
|
|
const first_elem_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node;
|
|
const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, first_elem_ptr_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(elem_ptr_extra.ptr);
|
|
const array_ty = sema.typeOf(array_ptr).childType(mod);
|
|
const array_len = array_ty.arrayLen(mod);
|
|
|
|
if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) {
|
|
.Struct => {
|
|
var root_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
var i = instrs.len;
|
|
while (i < array_len) : (i += 1) {
|
|
const default_val = array_ty.structFieldDefaultValue(i, mod);
|
|
if (default_val.toIntern() == .unreachable_value) {
|
|
const template = "missing tuple field with index {d}";
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, .{i});
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, .{i});
|
|
}
|
|
}
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
},
|
|
.Array => {
|
|
return sema.fail(block, init_src, "expected {d} array elements; found {d}", .{
|
|
array_len, instrs.len,
|
|
});
|
|
},
|
|
.Vector => {
|
|
return sema.fail(block, init_src, "expected {d} vector elements; found {d}", .{
|
|
array_len, instrs.len,
|
|
});
|
|
},
|
|
else => unreachable,
|
|
};
|
|
|
|
if (block.is_comptime and
|
|
(try sema.resolveDefinedValue(block, init_src, array_ptr)) != null)
|
|
{
|
|
// In this case the comptime machinery will have evaluated the store instructions
|
|
// at comptime so we have almost nothing to do here. However, in case of a
|
|
// sentinel-terminated array, the sentinel will not have been populated by
|
|
// any ZIR instructions at comptime; we need to do that here.
|
|
if (array_ty.sentinel(mod)) |sentinel_val| {
|
|
const array_len_ref = try mod.intRef(Type.usize, array_len);
|
|
const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true);
|
|
const sentinel = Air.internedToRef(sentinel_val.toIntern());
|
|
try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store);
|
|
}
|
|
return;
|
|
}
|
|
|
|
// If the array has one possible value, the value is always comptime-known.
|
|
if (try sema.typeHasOnePossibleValue(array_ty)) |array_opv| {
|
|
const array_init = Air.internedToRef(array_opv.toIntern());
|
|
try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store);
|
|
return;
|
|
}
|
|
|
|
var array_is_comptime = true;
|
|
var first_block_index = block.instructions.items.len;
|
|
var make_runtime = false;
|
|
|
|
// Collect the comptime element values in case the array literal ends up
|
|
// being comptime-known.
|
|
const element_vals = try sema.arena.alloc(
|
|
InternPool.Index,
|
|
try sema.usizeCast(block, init_src, array_len),
|
|
);
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
|
|
outer: for (instrs, 0..) |elem_ptr, i| {
|
|
// Determine whether the value stored to this pointer is comptime-known.
|
|
|
|
if (array_ty.isTuple(mod)) {
|
|
if (try array_ty.structFieldValueComptime(mod, i)) |opv| {
|
|
element_vals[i] = opv.toIntern();
|
|
continue;
|
|
}
|
|
}
|
|
|
|
const elem_ptr_ref = sema.inst_map.get(elem_ptr).?;
|
|
|
|
// We expect to see something like this in the current block AIR:
|
|
// %a = elem_ptr(...)
|
|
// store(%a, %b)
|
|
// With an optional bitcast between the store and the elem_ptr.
|
|
// If %b is a comptime operand, this element is comptime.
|
|
//
|
|
// However, in the case of a comptime-known pointer to an array, the
|
|
// the elem_ptr instruction is missing, so we have to pattern-match
|
|
// based only on the store instructions.
|
|
// `first_block_index` needs to point to the `elem_ptr` if it exists;
|
|
// the `store` otherwise.
|
|
//
|
|
// It's also possible for there to be no store instruction, in the case
|
|
// of nested `coerce_result_ptr` instructions. If we see the `elem_ptr`
|
|
// but we have not found a `store`, treat as a runtime-known element.
|
|
//
|
|
// This is nearly identical to similar logic in `validateStructInit`.
|
|
|
|
// Possible performance enhancement: save the `block_index` between iterations
|
|
// of the for loop.
|
|
var block_index = block.instructions.items.len - 1;
|
|
while (block_index > 0) : (block_index -= 1) {
|
|
const store_inst = block.instructions.items[block_index];
|
|
if (Air.indexToRef(store_inst) == elem_ptr_ref) {
|
|
array_is_comptime = false;
|
|
continue :outer;
|
|
}
|
|
switch (air_tags[store_inst]) {
|
|
.store, .store_safe => {},
|
|
else => continue,
|
|
}
|
|
const bin_op = air_datas[store_inst].bin_op;
|
|
var ptr_ref = bin_op.lhs;
|
|
if (Air.refToIndex(ptr_ref)) |ptr_inst| if (air_tags[ptr_inst] == .bitcast) {
|
|
ptr_ref = air_datas[ptr_inst].ty_op.operand;
|
|
};
|
|
if (ptr_ref != elem_ptr_ref) continue;
|
|
first_block_index = @min(if (Air.refToIndex(elem_ptr_ref)) |elem_ptr_inst|
|
|
std.mem.lastIndexOfScalar(
|
|
Air.Inst.Index,
|
|
block.instructions.items[0..block_index],
|
|
elem_ptr_inst,
|
|
).?
|
|
else
|
|
block_index, first_block_index);
|
|
if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| {
|
|
element_vals[i] = val.toIntern();
|
|
} else {
|
|
array_is_comptime = false;
|
|
}
|
|
continue :outer;
|
|
}
|
|
array_is_comptime = false;
|
|
continue :outer;
|
|
}
|
|
|
|
if (array_is_comptime) {
|
|
if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| {
|
|
switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) {
|
|
.ptr => |ptr| switch (ptr.addr) {
|
|
.comptime_field => return, // This store was validated by the individual elem ptrs.
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
// Our task is to delete all the `elem_ptr` and `store` instructions, and insert
|
|
// instead a single `store` to the array_ptr with a comptime struct value.
|
|
var elem_index: usize = 0;
|
|
var elem_ptr_ref = Air.Inst.Ref.none;
|
|
var block_index = first_block_index;
|
|
for (block.instructions.items[first_block_index..]) |cur_inst| {
|
|
while (elem_ptr_ref == .none and elem_index < instrs.len) : (elem_index += 1) {
|
|
if (array_ty.isTuple(mod) and array_ty.structFieldIsComptime(elem_index, mod)) continue;
|
|
elem_ptr_ref = sema.inst_map.get(instrs[elem_index]).?;
|
|
}
|
|
switch (air_tags[cur_inst]) {
|
|
.ptr_elem_ptr => if (Air.indexToRef(cur_inst) == elem_ptr_ref) continue,
|
|
.bitcast => if (air_datas[cur_inst].ty_op.operand == elem_ptr_ref) continue,
|
|
.store, .store_safe => {
|
|
var ptr_ref = air_datas[cur_inst].bin_op.lhs;
|
|
if (Air.refToIndex(ptr_ref)) |ptr_inst| if (air_tags[ptr_inst] == .bitcast) {
|
|
ptr_ref = air_datas[ptr_inst].ty_op.operand;
|
|
};
|
|
if (ptr_ref == elem_ptr_ref) {
|
|
elem_ptr_ref = .none;
|
|
continue;
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
block.instructions.items[block_index] = cur_inst;
|
|
block_index += 1;
|
|
}
|
|
block.instructions.shrinkRetainingCapacity(block_index);
|
|
|
|
var array_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = array_ty.toIntern(),
|
|
.storage = .{ .elems = element_vals },
|
|
} });
|
|
if (make_runtime) array_val = try mod.intern(.{ .runtime_value = .{
|
|
.ty = array_ty.toIntern(),
|
|
.val = array_val,
|
|
} });
|
|
const array_init = Air.internedToRef(array_val);
|
|
try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store);
|
|
}
|
|
}
|
|
|
|
fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
if (operand_ty.zigTypeTag(mod) != .Pointer) {
|
|
return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(mod)});
|
|
} else switch (operand_ty.ptrSize(mod)) {
|
|
.One, .C => {},
|
|
.Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(mod)}),
|
|
.Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(mod)}),
|
|
}
|
|
|
|
if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) {
|
|
// No need to validate the actual pointer value, we don't need it!
|
|
return;
|
|
}
|
|
|
|
const elem_ty = operand_ty.elemType2(mod);
|
|
if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
if (val.isUndef(mod)) {
|
|
return sema.fail(block, src, "cannot dereference undefined value", .{});
|
|
}
|
|
} else if (try sema.typeRequiresComptime(elem_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"values of type '{}' must be comptime-known, but operand value is runtime-known",
|
|
.{elem_ty.fmt(mod)},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), elem_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
fn failWithBadMemberAccess(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
agg_ty: Type,
|
|
field_src: LazySrcLoc,
|
|
field_name: InternPool.NullTerminatedString,
|
|
) CompileError {
|
|
const mod = sema.mod;
|
|
const kw_name = switch (agg_ty.zigTypeTag(mod)) {
|
|
.Union => "union",
|
|
.Struct => "struct",
|
|
.Opaque => "opaque",
|
|
.Enum => "enum",
|
|
else => unreachable,
|
|
};
|
|
if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (mod.declIsRoot(some)) {
|
|
return sema.fail(block, field_src, "root struct of file '{}' has no member named '{}'", .{
|
|
agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool),
|
|
});
|
|
};
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{}'", .{
|
|
kw_name, agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, agg_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn failWithBadStructFieldAccess(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
struct_obj: *Module.Struct,
|
|
field_src: LazySrcLoc,
|
|
field_name: InternPool.NullTerminatedString,
|
|
) CompileError {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
|
|
const fqn = try struct_obj.getFullyQualifiedName(mod);
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
field_src,
|
|
"no field named '{}' in struct '{}'",
|
|
.{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) },
|
|
);
|
|
errdefer msg.destroy(gpa);
|
|
try mod.errNoteNonLazy(struct_obj.srcLoc(mod), msg, "struct declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn failWithBadUnionFieldAccess(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
union_obj: *Module.Union,
|
|
field_src: LazySrcLoc,
|
|
field_name: InternPool.NullTerminatedString,
|
|
) CompileError {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
|
|
const fqn = try union_obj.getFullyQualifiedName(mod);
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
field_src,
|
|
"no field named '{}' in union '{}'",
|
|
.{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) },
|
|
);
|
|
errdefer msg.destroy(gpa);
|
|
try mod.errNoteNonLazy(union_obj.srcLoc(mod), msg, "union declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void {
|
|
const mod = sema.mod;
|
|
const src_loc = decl_ty.declSrcLocOrNull(mod) orelse return;
|
|
const category = switch (decl_ty.zigTypeTag(mod)) {
|
|
.Union => "union",
|
|
.Struct => "struct",
|
|
.Enum => "enum",
|
|
.Opaque => "opaque",
|
|
.ErrorSet => "error set",
|
|
else => unreachable,
|
|
};
|
|
try mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category});
|
|
}
|
|
|
|
fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const src: LazySrcLoc = sema.src;
|
|
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
|
|
const ptr = try sema.resolveInst(bin_inst.lhs);
|
|
const operand = try sema.resolveInst(bin_inst.rhs);
|
|
const ptr_inst = Air.refToIndex(ptr).?;
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
|
|
switch (sema.air_instructions.items(.tag)[ptr_inst]) {
|
|
.inferred_alloc_comptime => {
|
|
const iac = &air_datas[ptr_inst].inferred_alloc_comptime;
|
|
return sema.storeToInferredAllocComptime(block, src, operand, iac);
|
|
},
|
|
.inferred_alloc => {
|
|
const ia = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?;
|
|
return sema.storeToInferredAlloc(block, ptr, operand, ia);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn storeToInferredAlloc(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ptr: Air.Inst.Ref,
|
|
operand: Air.Inst.Ref,
|
|
inferred_alloc: *InferredAlloc,
|
|
) CompileError!void {
|
|
// Create a store instruction as a placeholder. This will be replaced by a
|
|
// proper store sequence once we know the stored type.
|
|
const dummy_store = try block.addBinOp(.store, ptr, operand);
|
|
// Add the stored instruction to the set we will use to resolve peer types
|
|
// for the inferred allocation.
|
|
try inferred_alloc.prongs.append(sema.arena, .{
|
|
.stored_inst = operand,
|
|
.placeholder = Air.refToIndex(dummy_store).?,
|
|
});
|
|
}
|
|
|
|
fn storeToInferredAllocComptime(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
iac: *Air.Inst.Data.InferredAllocComptime,
|
|
) CompileError!void {
|
|
const operand_ty = sema.typeOf(operand);
|
|
// There will be only one store_to_inferred_ptr because we are running at comptime.
|
|
// The alloc will turn into a Decl.
|
|
if (try sema.resolveMaybeUndefValAllowVariables(operand)) |operand_val| store: {
|
|
if (operand_val.getVariable(sema.mod) != null) break :store;
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
iac.decl_index = try anon_decl.finish(operand_ty, operand_val, iac.alignment);
|
|
try sema.comptime_mutable_decls.append(iac.decl_index);
|
|
return;
|
|
}
|
|
|
|
return sema.failWithNeededComptime(block, src, "value being stored to a comptime variable must be comptime-known");
|
|
}
|
|
|
|
fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const quota = @as(u32, @intCast(try sema.resolveInt(block, src, inst_data.operand, Type.u32, "eval branch quota must be comptime-known")));
|
|
sema.branch_quota = @max(sema.branch_quota, quota);
|
|
}
|
|
|
|
fn zirStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
|
|
const ptr = try sema.resolveInst(bin_inst.lhs);
|
|
const value = try sema.resolveInst(bin_inst.rhs);
|
|
return sema.storePtr(block, sema.src, ptr, value);
|
|
}
|
|
|
|
fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const zir_tags = sema.code.instructions.items(.tag);
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
const inst_data = zir_datas[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const ptr = try sema.resolveInst(extra.lhs);
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
|
|
const is_ret = if (Zir.refToIndex(extra.lhs)) |ptr_index|
|
|
zir_tags[ptr_index] == .ret_ptr
|
|
else
|
|
false;
|
|
|
|
// Check for the possibility of this pattern:
|
|
// %a = ret_ptr
|
|
// %b = store(%a, %c)
|
|
// Where %c is an error union or error set. In such case we need to add
|
|
// to the current function's inferred error set, if any.
|
|
if (is_ret and sema.fn_ret_ty_ies != null) switch (sema.typeOf(operand).zigTypeTag(mod)) {
|
|
.ErrorUnion, .ErrorSet => try sema.addToInferredErrorSet(operand),
|
|
else => {},
|
|
};
|
|
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_store_ptr = inst_data.src_node };
|
|
const operand_src: LazySrcLoc = .{ .node_offset_store_operand = inst_data.src_node };
|
|
const air_tag: Air.Inst.Tag = if (is_ret)
|
|
.ret_ptr
|
|
else if (block.wantSafety())
|
|
.store_safe
|
|
else
|
|
.store;
|
|
return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag);
|
|
}
|
|
|
|
fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const bytes = sema.code.instructions.items(.data)[inst].str.get(sema.code);
|
|
return sema.addStrLit(block, bytes);
|
|
}
|
|
|
|
fn addStrLit(sema: *Sema, block: *Block, bytes: []const u8) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const duped_bytes = try sema.arena.dupe(u8, bytes);
|
|
const ty = try mod.arrayType(.{
|
|
.len = bytes.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
});
|
|
const val = try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .bytes = duped_bytes },
|
|
} });
|
|
const gop = try mod.memoized_decls.getOrPut(gpa, val);
|
|
if (!gop.found_existing) {
|
|
const new_decl_index = try mod.createAnonymousDecl(block, .{
|
|
.ty = ty,
|
|
.val = val.toValue(),
|
|
});
|
|
gop.value_ptr.* = new_decl_index;
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
}
|
|
return sema.analyzeDeclRef(gop.value_ptr.*);
|
|
}
|
|
|
|
fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const int = sema.code.instructions.items(.data)[inst].int;
|
|
return sema.mod.intRef(Type.comptime_int, int);
|
|
}
|
|
|
|
fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const int = sema.code.instructions.items(.data)[inst].str;
|
|
const byte_count = int.len * @sizeOf(std.math.big.Limb);
|
|
const limb_bytes = sema.code.string_bytes[int.start..][0..byte_count];
|
|
|
|
// TODO: this allocation and copy is only needed because the limbs may be unaligned.
|
|
// If ZIR is adjusted so that big int limbs are guaranteed to be aligned, these
|
|
// two lines can be removed.
|
|
const limbs = try sema.arena.alloc(std.math.big.Limb, int.len);
|
|
@memcpy(mem.sliceAsBytes(limbs), limb_bytes);
|
|
|
|
return Air.internedToRef((try mod.intValue_big(Type.comptime_int, .{
|
|
.limbs = limbs,
|
|
.positive = true,
|
|
})).toIntern());
|
|
}
|
|
|
|
fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const number = sema.code.instructions.items(.data)[inst].float;
|
|
return Air.internedToRef((try sema.mod.floatValue(
|
|
Type.comptime_float,
|
|
number,
|
|
)).toIntern());
|
|
}
|
|
|
|
fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data;
|
|
const number = extra.get();
|
|
return Air.internedToRef((try sema.mod.floatValue(Type.comptime_float, number)).toIntern());
|
|
}
|
|
|
|
fn zirCompileError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const msg = try sema.resolveConstString(block, operand_src, inst_data.operand, "compile error string must be comptime-known");
|
|
return sema.fail(block, src, "{s}", .{msg});
|
|
}
|
|
|
|
fn zirCompileLog(
|
|
sema: *Sema,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
|
|
var managed = mod.compile_log_text.toManaged(sema.gpa);
|
|
defer sema.mod.compile_log_text = managed.moveToUnmanaged();
|
|
const writer = managed.writer();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand);
|
|
const src_node = extra.data.src_node;
|
|
const args = sema.code.refSlice(extra.end, extended.small);
|
|
|
|
for (args, 0..) |arg_ref, i| {
|
|
if (i != 0) try writer.print(", ", .{});
|
|
|
|
const arg = try sema.resolveInst(arg_ref);
|
|
const arg_ty = sema.typeOf(arg);
|
|
if (try sema.resolveMaybeUndefLazyVal(arg)) |val| {
|
|
try writer.print("@as({}, {})", .{
|
|
arg_ty.fmt(mod), val.fmtValue(arg_ty, mod),
|
|
});
|
|
} else {
|
|
try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(mod)});
|
|
}
|
|
}
|
|
try writer.print("\n", .{});
|
|
|
|
const decl_index = if (sema.func_index != .none)
|
|
mod.funcOwnerDeclIndex(sema.func_index)
|
|
else
|
|
sema.owner_decl_index;
|
|
const gop = try mod.compile_log_decls.getOrPut(sema.gpa, decl_index);
|
|
if (!gop.found_existing) {
|
|
gop.value_ptr.* = src_node;
|
|
}
|
|
return Air.Inst.Ref.void_value;
|
|
}
|
|
|
|
fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const msg_inst = try sema.resolveInst(inst_data.operand);
|
|
|
|
if (block.is_comptime) {
|
|
return sema.fail(block, src, "encountered @panic at comptime", .{});
|
|
}
|
|
try sema.panicWithMsg(block, src, msg_inst, .@"@panic");
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn zirTrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const src_node = sema.code.instructions.items(.data)[inst].node;
|
|
const src = LazySrcLoc.nodeOffset(src_node);
|
|
sema.src = src;
|
|
_ = try block.addNoOp(.trap);
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
|
|
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
const gpa = sema.gpa;
|
|
|
|
// AIR expects a block outside the loop block too.
|
|
// Reserve space for a Loop instruction so that generated Break instructions can
|
|
// point to it, even if it doesn't end up getting used because the code ends up being
|
|
// comptime evaluated.
|
|
const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
|
|
const loop_inst = block_inst + 1;
|
|
try sema.air_instructions.ensureUnusedCapacity(gpa, 2);
|
|
sema.air_instructions.appendAssumeCapacity(.{
|
|
.tag = .block,
|
|
.data = undefined,
|
|
});
|
|
sema.air_instructions.appendAssumeCapacity(.{
|
|
.tag = .loop,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = .noreturn_type,
|
|
.payload = undefined,
|
|
} },
|
|
});
|
|
var label: Block.Label = .{
|
|
.zir_block = inst,
|
|
.merges = .{
|
|
.src_locs = .{},
|
|
.results = .{},
|
|
.br_list = .{},
|
|
.block_inst = block_inst,
|
|
},
|
|
};
|
|
var child_block = parent_block.makeSubBlock();
|
|
child_block.label = &label;
|
|
child_block.runtime_cond = null;
|
|
child_block.runtime_loop = src;
|
|
child_block.runtime_index.increment();
|
|
const merges = &child_block.label.?.merges;
|
|
|
|
defer child_block.instructions.deinit(gpa);
|
|
defer merges.deinit(gpa);
|
|
|
|
var loop_block = child_block.makeSubBlock();
|
|
defer loop_block.instructions.deinit(gpa);
|
|
|
|
try sema.analyzeBody(&loop_block, body);
|
|
|
|
const loop_block_len = loop_block.instructions.items.len;
|
|
if (loop_block_len > 0 and sema.typeOf(Air.indexToRef(loop_block.instructions.items[loop_block_len - 1])).isNoReturn(mod)) {
|
|
// If the loop ended with a noreturn terminator, then there is no way for it to loop,
|
|
// so we can just use the block instead.
|
|
try child_block.instructions.appendSlice(gpa, loop_block.instructions.items);
|
|
} else {
|
|
try child_block.instructions.append(gpa, loop_inst);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + loop_block_len);
|
|
sema.air_instructions.items(.data)[loop_inst].ty_pl.payload = sema.addExtraAssumeCapacity(
|
|
Air.Block{ .body_len = @as(u32, @intCast(loop_block_len)) },
|
|
);
|
|
sema.air_extra.appendSliceAssumeCapacity(loop_block.instructions.items);
|
|
}
|
|
return sema.analyzeBlockBody(parent_block, src, &child_block, merges);
|
|
}
|
|
|
|
fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const pl_node = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = pl_node.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index);
|
|
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
|
|
// we check this here to avoid undefined symbols
|
|
if (!@import("build_options").have_llvm)
|
|
return sema.fail(parent_block, src, "C import unavailable; Zig compiler built without LLVM extensions", .{});
|
|
|
|
var c_import_buf = std.ArrayList(u8).init(sema.gpa);
|
|
defer c_import_buf.deinit();
|
|
|
|
var comptime_reason: Block.ComptimeReason = .{ .c_import = .{
|
|
.block = parent_block,
|
|
.src = src,
|
|
} };
|
|
var child_block: Block = .{
|
|
.parent = parent_block,
|
|
.sema = sema,
|
|
.src_decl = parent_block.src_decl,
|
|
.namespace = parent_block.namespace,
|
|
.wip_capture_scope = parent_block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.inlining = parent_block.inlining,
|
|
.is_comptime = true,
|
|
.comptime_reason = &comptime_reason,
|
|
.c_import_buf = &c_import_buf,
|
|
.runtime_cond = parent_block.runtime_cond,
|
|
.runtime_loop = parent_block.runtime_loop,
|
|
.runtime_index = parent_block.runtime_index,
|
|
};
|
|
defer child_block.instructions.deinit(sema.gpa);
|
|
|
|
// Ignore the result, all the relevant operations have written to c_import_buf already.
|
|
_ = try sema.analyzeBodyBreak(&child_block, body);
|
|
|
|
const mod = sema.mod;
|
|
const c_import_res = mod.comp.cImport(c_import_buf.items) catch |err|
|
|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
|
|
|
|
if (c_import_res.errors.len != 0) {
|
|
const msg = msg: {
|
|
defer @import("clang.zig").ErrorMsg.delete(c_import_res.errors.ptr, c_import_res.errors.len);
|
|
|
|
const msg = try sema.errMsg(&child_block, src, "C import failed", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
if (!mod.comp.bin_file.options.link_libc)
|
|
try sema.errNote(&child_block, src, msg, "libc headers not available; compilation does not link against libc", .{});
|
|
|
|
const gop = try mod.cimport_errors.getOrPut(sema.gpa, sema.owner_decl_index);
|
|
if (!gop.found_existing) {
|
|
var errs = try std.ArrayListUnmanaged(Module.CImportError).initCapacity(sema.gpa, c_import_res.errors.len);
|
|
errdefer {
|
|
for (errs.items) |err| err.deinit(sema.gpa);
|
|
errs.deinit(sema.gpa);
|
|
}
|
|
|
|
for (c_import_res.errors) |c_error| {
|
|
const path = if (c_error.filename_ptr) |some|
|
|
try sema.gpa.dupeZ(u8, some[0..c_error.filename_len])
|
|
else
|
|
null;
|
|
errdefer if (path) |some| sema.gpa.free(some);
|
|
|
|
const c_msg = try sema.gpa.dupeZ(u8, c_error.msg_ptr[0..c_error.msg_len]);
|
|
errdefer sema.gpa.free(c_msg);
|
|
|
|
const line = line: {
|
|
const source = c_error.source orelse break :line null;
|
|
var start = c_error.offset;
|
|
while (start > 0) : (start -= 1) {
|
|
if (source[start - 1] == '\n') break;
|
|
}
|
|
var end = c_error.offset;
|
|
while (true) : (end += 1) {
|
|
if (source[end] == 0) break;
|
|
if (source[end] == '\n') break;
|
|
}
|
|
break :line try sema.gpa.dupeZ(u8, source[start..end]);
|
|
};
|
|
errdefer if (line) |some| sema.gpa.free(some);
|
|
|
|
errs.appendAssumeCapacity(.{
|
|
.path = path orelse null,
|
|
.source_line = line orelse null,
|
|
.line = c_error.line,
|
|
.column = c_error.column,
|
|
.offset = c_error.offset,
|
|
.msg = c_msg,
|
|
});
|
|
}
|
|
gop.value_ptr.* = errs.items;
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
const c_import_pkg = Package.create(
|
|
sema.gpa,
|
|
null,
|
|
c_import_res.out_zig_path,
|
|
) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
else => unreachable, // we pass null for root_src_dir_path
|
|
};
|
|
|
|
const result = mod.importPkg(c_import_pkg) catch |err|
|
|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
|
|
|
|
mod.astGenFile(result.file) catch |err|
|
|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
|
|
|
|
try mod.semaFile(result.file);
|
|
const file_root_decl_index = result.file.root_decl.unwrap().?;
|
|
const file_root_decl = mod.declPtr(file_root_decl_index);
|
|
try mod.declareDeclDependency(sema.owner_decl_index, file_root_decl_index);
|
|
return Air.internedToRef(file_root_decl.val.toIntern());
|
|
}
|
|
|
|
fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
return sema.failWithUseOfAsync(parent_block, src);
|
|
}
|
|
|
|
fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, force_comptime: bool) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const pl_node = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = pl_node.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index);
|
|
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
const gpa = sema.gpa;
|
|
|
|
// Reserve space for a Block instruction so that generated Break instructions can
|
|
// point to it, even if it doesn't end up getting used because the code ends up being
|
|
// comptime evaluated or is an unlabeled block.
|
|
const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .block,
|
|
.data = undefined,
|
|
});
|
|
|
|
var label: Block.Label = .{
|
|
.zir_block = inst,
|
|
.merges = .{
|
|
.src_locs = .{},
|
|
.results = .{},
|
|
.br_list = .{},
|
|
.block_inst = block_inst,
|
|
},
|
|
};
|
|
|
|
var child_block: Block = .{
|
|
.parent = parent_block,
|
|
.sema = sema,
|
|
.src_decl = parent_block.src_decl,
|
|
.namespace = parent_block.namespace,
|
|
.wip_capture_scope = parent_block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.label = &label,
|
|
.inlining = parent_block.inlining,
|
|
.is_comptime = parent_block.is_comptime or force_comptime,
|
|
.comptime_reason = parent_block.comptime_reason,
|
|
.is_typeof = parent_block.is_typeof,
|
|
.want_safety = parent_block.want_safety,
|
|
.float_mode = parent_block.float_mode,
|
|
.c_import_buf = parent_block.c_import_buf,
|
|
.runtime_cond = parent_block.runtime_cond,
|
|
.runtime_loop = parent_block.runtime_loop,
|
|
.runtime_index = parent_block.runtime_index,
|
|
.error_return_trace_index = parent_block.error_return_trace_index,
|
|
};
|
|
|
|
defer child_block.instructions.deinit(gpa);
|
|
defer label.merges.deinit(gpa);
|
|
|
|
return sema.resolveBlockBody(parent_block, src, &child_block, body, inst, &label.merges);
|
|
}
|
|
|
|
fn resolveBlockBody(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
child_block: *Block,
|
|
body: []const Zir.Inst.Index,
|
|
/// This is the instruction that a break instruction within `body` can
|
|
/// use to return from the body.
|
|
body_inst: Zir.Inst.Index,
|
|
merges: *Block.Merges,
|
|
) CompileError!Air.Inst.Ref {
|
|
if (child_block.is_comptime) {
|
|
return sema.resolveBody(child_block, body, body_inst);
|
|
} else {
|
|
if (sema.analyzeBodyInner(child_block, body)) |_| {
|
|
return sema.analyzeBlockBody(parent_block, src, child_block, merges);
|
|
} else |err| switch (err) {
|
|
error.ComptimeBreak => {
|
|
// Comptime control flow is happening, however child_block may still contain
|
|
// runtime instructions which need to be copied to the parent block.
|
|
try parent_block.instructions.appendSlice(sema.gpa, child_block.instructions.items);
|
|
|
|
const break_inst = sema.comptime_break_inst;
|
|
const break_data = sema.code.instructions.items(.data)[break_inst].@"break";
|
|
const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data;
|
|
if (extra.block_inst == body_inst) {
|
|
return try sema.resolveInst(break_data.operand);
|
|
} else {
|
|
return error.ComptimeBreak;
|
|
}
|
|
},
|
|
else => |e| return e,
|
|
}
|
|
}
|
|
}
|
|
|
|
fn analyzeBlockBody(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
child_block: *Block,
|
|
merges: *Block.Merges,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const gpa = sema.gpa;
|
|
const mod = sema.mod;
|
|
|
|
// Blocks must terminate with noreturn instruction.
|
|
assert(child_block.instructions.items.len != 0);
|
|
assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn(mod));
|
|
|
|
if (merges.results.items.len == 0) {
|
|
// No need for a block instruction. We can put the new instructions
|
|
// directly into the parent block.
|
|
try parent_block.instructions.appendSlice(gpa, child_block.instructions.items);
|
|
return Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]);
|
|
}
|
|
if (merges.results.items.len == 1) {
|
|
const last_inst_index = child_block.instructions.items.len - 1;
|
|
const last_inst = child_block.instructions.items[last_inst_index];
|
|
if (sema.getBreakBlock(last_inst)) |br_block| {
|
|
if (br_block == merges.block_inst) {
|
|
// No need for a block instruction. We can put the new instructions directly
|
|
// into the parent block. Here we omit the break instruction.
|
|
const without_break = child_block.instructions.items[0..last_inst_index];
|
|
try parent_block.instructions.appendSlice(gpa, without_break);
|
|
return merges.results.items[0];
|
|
}
|
|
}
|
|
}
|
|
// It is impossible to have the number of results be > 1 in a comptime scope.
|
|
assert(!child_block.is_comptime); // Should already got a compile error in the condbr condition.
|
|
|
|
// Need to set the type and emit the Block instruction. This allows machine code generation
|
|
// to emit a jump instruction to after the block when it encounters the break.
|
|
try parent_block.instructions.append(gpa, merges.block_inst);
|
|
const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items, .{ .override = merges.src_locs.items });
|
|
// TODO add note "missing else causes void value"
|
|
|
|
const type_src = src; // TODO: better source location
|
|
if (try sema.typeRequiresComptime(resolved_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(child_block, type_src, "value with comptime-only type '{}' depends on runtime control flow", .{resolved_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const runtime_src = child_block.runtime_cond orelse child_block.runtime_loop.?;
|
|
try sema.errNote(child_block, runtime_src, msg, "runtime control flow here", .{});
|
|
|
|
const child_src_decl = mod.declPtr(child_block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, type_src.toSrcLoc(child_src_decl, mod), resolved_ty);
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
const ty_inst = Air.internedToRef(resolved_ty.toIntern());
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
|
|
child_block.instructions.items.len);
|
|
sema.air_instructions.items(.data)[merges.block_inst] = .{ .ty_pl = .{
|
|
.ty = ty_inst,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Block{
|
|
.body_len = @as(u32, @intCast(child_block.instructions.items.len)),
|
|
}),
|
|
} };
|
|
sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items);
|
|
// Now that the block has its type resolved, we need to go back into all the break
|
|
// instructions, and insert type coercion on the operands.
|
|
for (merges.br_list.items) |br| {
|
|
const br_operand = sema.air_instructions.items(.data)[br].br.operand;
|
|
const br_operand_src = src;
|
|
const br_operand_ty = sema.typeOf(br_operand);
|
|
if (br_operand_ty.eql(resolved_ty, mod)) {
|
|
// No type coercion needed.
|
|
continue;
|
|
}
|
|
var coerce_block = parent_block.makeSubBlock();
|
|
defer coerce_block.instructions.deinit(gpa);
|
|
const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br_operand, br_operand_src);
|
|
// If no instructions were produced, such as in the case of a coercion of a
|
|
// constant value to a new type, we can simply point the br operand to it.
|
|
if (coerce_block.instructions.items.len == 0) {
|
|
sema.air_instructions.items(.data)[br].br.operand = coerced_operand;
|
|
continue;
|
|
}
|
|
assert(Air.indexToRef(coerce_block.instructions.items[coerce_block.instructions.items.len - 1]) == coerced_operand);
|
|
|
|
// Convert the br instruction to a block instruction that has the coercion
|
|
// and then a new br inside that returns the coerced instruction.
|
|
const sub_block_len = @as(u32, @intCast(coerce_block.instructions.items.len + 1));
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
|
|
sub_block_len);
|
|
try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
|
|
const sub_br_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
|
|
|
|
sema.air_instructions.items(.tag)[br] = .block;
|
|
sema.air_instructions.items(.data)[br] = .{ .ty_pl = .{
|
|
.ty = Air.Inst.Ref.noreturn_type,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Block{
|
|
.body_len = sub_block_len,
|
|
}),
|
|
} };
|
|
sema.air_extra.appendSliceAssumeCapacity(coerce_block.instructions.items);
|
|
sema.air_extra.appendAssumeCapacity(sub_br_inst);
|
|
|
|
sema.air_instructions.appendAssumeCapacity(.{
|
|
.tag = .br,
|
|
.data = .{ .br = .{
|
|
.block_inst = merges.block_inst,
|
|
.operand = coerced_operand,
|
|
} },
|
|
});
|
|
}
|
|
return Air.indexToRef(merges.block_inst);
|
|
}
|
|
|
|
fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const decl_name = try mod.intern_pool.getOrPutString(mod.gpa, sema.code.nullTerminatedString(extra.decl_name));
|
|
const decl_index = if (extra.namespace != .none) index_blk: {
|
|
const container_ty = try sema.resolveType(block, operand_src, extra.namespace);
|
|
const container_namespace = container_ty.getNamespaceIndex(mod).unwrap().?;
|
|
|
|
const maybe_index = try sema.lookupInNamespace(block, operand_src, container_namespace, decl_name, false);
|
|
break :index_blk maybe_index orelse
|
|
return sema.failWithBadMemberAccess(block, container_ty, operand_src, decl_name);
|
|
} else try sema.lookupIdentifier(block, operand_src, decl_name);
|
|
const options = sema.resolveExportOptions(block, .unneeded, extra.options) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
_ = try sema.resolveExportOptions(block, options_src, extra.options);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
{
|
|
try mod.ensureDeclAnalyzed(decl_index);
|
|
const exported_decl = mod.declPtr(decl_index);
|
|
if (exported_decl.val.getFunction(mod)) |function| {
|
|
return sema.analyzeExport(block, src, options, function.owner_decl);
|
|
}
|
|
}
|
|
try sema.analyzeExport(block, src, options, decl_index);
|
|
}
|
|
|
|
fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const operand = try sema.resolveInstConst(block, operand_src, extra.operand, "export target must be comptime-known");
|
|
const options = sema.resolveExportOptions(block, .unneeded, extra.options) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
_ = try sema.resolveExportOptions(block, options_src, extra.options);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const decl_index = if (operand.val.getFunction(sema.mod)) |function| function.owner_decl else blk: {
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
break :blk try anon_decl.finish(operand.ty, operand.val, .none);
|
|
};
|
|
try sema.analyzeExport(block, src, options, decl_index);
|
|
}
|
|
|
|
pub fn analyzeExport(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
options: Module.Export.Options,
|
|
exported_decl_index: Decl.Index,
|
|
) !void {
|
|
const Export = Module.Export;
|
|
const mod = sema.mod;
|
|
|
|
if (options.linkage == .Internal) {
|
|
return;
|
|
}
|
|
|
|
try mod.ensureDeclAnalyzed(exported_decl_index);
|
|
const exported_decl = mod.declPtr(exported_decl_index);
|
|
|
|
if (!try sema.validateExternType(exported_decl.ty, .other)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "unable to export type '{}'", .{exported_decl.ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), exported_decl.ty, .other);
|
|
|
|
try sema.addDeclaredHereNote(msg, exported_decl.ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
// TODO: some backends might support re-exporting extern decls
|
|
if (exported_decl.isExtern(mod)) {
|
|
return sema.fail(block, src, "export target cannot be extern", .{});
|
|
}
|
|
|
|
// This decl is alive no matter what, since it's being exported
|
|
try mod.markDeclAlive(exported_decl);
|
|
try sema.maybeQueueFuncBodyAnalysis(exported_decl_index);
|
|
|
|
const gpa = sema.gpa;
|
|
|
|
try mod.decl_exports.ensureUnusedCapacity(gpa, 1);
|
|
try mod.export_owners.ensureUnusedCapacity(gpa, 1);
|
|
|
|
const new_export = try gpa.create(Export);
|
|
errdefer gpa.destroy(new_export);
|
|
|
|
new_export.* = .{
|
|
.opts = options,
|
|
.src = src,
|
|
.owner_decl = sema.owner_decl_index,
|
|
.src_decl = block.src_decl,
|
|
.exported_decl = exported_decl_index,
|
|
.status = .in_progress,
|
|
};
|
|
|
|
// Add to export_owners table.
|
|
const eo_gop = mod.export_owners.getOrPutAssumeCapacity(sema.owner_decl_index);
|
|
if (!eo_gop.found_existing) {
|
|
eo_gop.value_ptr.* = .{};
|
|
}
|
|
try eo_gop.value_ptr.append(gpa, new_export);
|
|
errdefer _ = eo_gop.value_ptr.pop();
|
|
|
|
// Add to exported_decl table.
|
|
const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl_index);
|
|
if (!de_gop.found_existing) {
|
|
de_gop.value_ptr.* = .{};
|
|
}
|
|
try de_gop.value_ptr.append(gpa, new_export);
|
|
errdefer _ = de_gop.value_ptr.pop();
|
|
}
|
|
|
|
fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const alignment = try sema.resolveAlign(block, operand_src, extra.operand);
|
|
if (alignment.order(Alignment.fromNonzeroByteUnits(256)).compare(.gt)) {
|
|
return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{
|
|
alignment.toByteUnitsOptional().?,
|
|
});
|
|
}
|
|
if (sema.func_index == .none) {
|
|
return sema.fail(block, src, "@setAlignStack outside function body", .{});
|
|
}
|
|
|
|
const fn_owner_decl = mod.funcOwnerDeclPtr(sema.func_index);
|
|
switch (fn_owner_decl.ty.fnCallingConvention(mod)) {
|
|
.Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}),
|
|
.Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}),
|
|
else => if (block.inlining != null) {
|
|
return sema.fail(block, src, "@setAlignStack in inline call", .{});
|
|
},
|
|
}
|
|
|
|
if (sema.prev_stack_alignment_src) |prev_src| {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, prev_src, msg, "other instance here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
const ip = &mod.intern_pool;
|
|
const a = ip.funcAnalysis(sema.func_index);
|
|
if (a.stack_alignment != .none) {
|
|
a.stack_alignment = @enumFromInt(@max(
|
|
@intFromEnum(alignment),
|
|
@intFromEnum(a.stack_alignment),
|
|
));
|
|
}
|
|
}
|
|
|
|
fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const is_cold = try sema.resolveConstBool(block, operand_src, extra.operand, "operand to @setCold must be comptime-known");
|
|
if (sema.func_index == .none) return; // does nothing outside a function
|
|
ip.funcAnalysis(sema.func_index).is_cold = is_cold;
|
|
}
|
|
|
|
fn zirSetFloatMode(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
block.float_mode = try sema.resolveBuiltinEnum(block, src, extra.operand, "FloatMode", "operand to @setFloatMode must be comptime-known");
|
|
}
|
|
|
|
fn zirSetRuntimeSafety(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
block.want_safety = try sema.resolveConstBool(block, operand_src, inst_data.operand, "operand to @setRuntimeSafety must be comptime-known");
|
|
}
|
|
|
|
fn zirFence(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
|
if (block.is_comptime) return;
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const order = try sema.resolveAtomicOrder(block, order_src, extra.operand, "atomic order of @fence must be comptime-known");
|
|
|
|
if (@intFromEnum(order) < @intFromEnum(std.builtin.AtomicOrder.Acquire)) {
|
|
return sema.fail(block, order_src, "atomic ordering must be Acquire or stricter", .{});
|
|
}
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = .fence,
|
|
.data = .{ .fence = order },
|
|
});
|
|
}
|
|
|
|
fn zirBreak(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].@"break";
|
|
const extra = sema.code.extraData(Zir.Inst.Break, inst_data.payload_index).data;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const zir_block = extra.block_inst;
|
|
|
|
var block = start_block;
|
|
while (true) {
|
|
if (block.label) |label| {
|
|
if (label.zir_block == zir_block) {
|
|
const br_ref = try start_block.addBr(label.merges.block_inst, operand);
|
|
const src_loc = if (extra.operand_src_node != Zir.Inst.Break.no_src_node)
|
|
LazySrcLoc.nodeOffset(extra.operand_src_node)
|
|
else
|
|
null;
|
|
try label.merges.src_locs.append(sema.gpa, src_loc);
|
|
try label.merges.results.append(sema.gpa, operand);
|
|
try label.merges.br_list.append(sema.gpa, Air.refToIndex(br_ref).?);
|
|
block.runtime_index.increment();
|
|
if (block.runtime_cond == null and block.runtime_loop == null) {
|
|
block.runtime_cond = start_block.runtime_cond orelse start_block.runtime_loop;
|
|
block.runtime_loop = start_block.runtime_loop;
|
|
}
|
|
return inst;
|
|
}
|
|
}
|
|
block = block.parent.?;
|
|
}
|
|
}
|
|
|
|
fn zirDbgStmt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
// We do not set sema.src here because dbg_stmt instructions are only emitted for
|
|
// ZIR code that possibly will need to generate runtime code. So error messages
|
|
// and other source locations must not rely on sema.src being set from dbg_stmt
|
|
// instructions.
|
|
if (block.is_comptime or sema.mod.comp.bin_file.options.strip) return;
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].dbg_stmt;
|
|
|
|
if (block.instructions.items.len != 0) {
|
|
const idx = block.instructions.items[block.instructions.items.len - 1];
|
|
if (sema.air_instructions.items(.tag)[idx] == .dbg_stmt) {
|
|
// The previous dbg_stmt didn't correspond to any actual code, so replace it.
|
|
sema.air_instructions.items(.data)[idx].dbg_stmt = .{
|
|
.line = inst_data.line,
|
|
.column = inst_data.column,
|
|
};
|
|
return;
|
|
}
|
|
}
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = .dbg_stmt,
|
|
.data = .{ .dbg_stmt = .{
|
|
.line = inst_data.line,
|
|
.column = inst_data.column,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirDbgBlockBegin(sema: *Sema, block: *Block) CompileError!void {
|
|
if (block.is_comptime or sema.mod.comp.bin_file.options.strip) return;
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = .dbg_block_begin,
|
|
.data = undefined,
|
|
});
|
|
}
|
|
|
|
fn zirDbgBlockEnd(sema: *Sema, block: *Block) CompileError!void {
|
|
if (block.is_comptime or sema.mod.comp.bin_file.options.strip) return;
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = .dbg_block_end,
|
|
.data = undefined,
|
|
});
|
|
}
|
|
|
|
fn zirDbgVar(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
) CompileError!void {
|
|
if (block.is_comptime or sema.mod.comp.bin_file.options.strip) return;
|
|
|
|
const str_op = sema.code.instructions.items(.data)[inst].str_op;
|
|
const operand = try sema.resolveInst(str_op.operand);
|
|
const name = str_op.getStr(sema.code);
|
|
try sema.addDbgVar(block, operand, air_tag, name);
|
|
}
|
|
|
|
fn addDbgVar(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
operand: Air.Inst.Ref,
|
|
air_tag: Air.Inst.Tag,
|
|
name: []const u8,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
switch (air_tag) {
|
|
.dbg_var_ptr => {
|
|
if (!(try sema.typeHasRuntimeBits(operand_ty.childType(mod)))) return;
|
|
},
|
|
.dbg_var_val => {
|
|
if (!(try sema.typeHasRuntimeBits(operand_ty))) return;
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
try sema.queueFullTypeResolution(operand_ty);
|
|
|
|
// Add the name to the AIR.
|
|
const name_extra_index = @as(u32, @intCast(sema.air_extra.items.len));
|
|
const elements_used = name.len / 4 + 1;
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements_used);
|
|
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
|
|
@memcpy(buffer[0..name.len], name);
|
|
buffer[name.len] = 0;
|
|
sema.air_extra.items.len += elements_used;
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = air_tag,
|
|
.data = .{ .pl_op = .{
|
|
.payload = name_extra_index,
|
|
.operand = operand,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
|
|
const src = inst_data.src();
|
|
const decl_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code));
|
|
const decl_index = try sema.lookupIdentifier(block, src, decl_name);
|
|
try sema.addReferencedBy(block, src, decl_index);
|
|
return sema.analyzeDeclRef(decl_index);
|
|
}
|
|
|
|
fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
|
|
const src = inst_data.src();
|
|
const decl_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code));
|
|
const decl = try sema.lookupIdentifier(block, src, decl_name);
|
|
return sema.analyzeDeclVal(block, src, decl);
|
|
}
|
|
|
|
fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString) !Decl.Index {
|
|
const mod = sema.mod;
|
|
var namespace = block.namespace;
|
|
while (true) {
|
|
if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl_index| {
|
|
return decl_index;
|
|
}
|
|
namespace = mod.namespacePtr(namespace).parent.unwrap() orelse break;
|
|
}
|
|
unreachable; // AstGen detects use of undeclared identifier errors.
|
|
}
|
|
|
|
/// This looks up a member of a specific namespace. It is affected by `usingnamespace` but
|
|
/// only for ones in the specified namespace.
|
|
fn lookupInNamespace(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
namespace_index: Namespace.Index,
|
|
ident_name: InternPool.NullTerminatedString,
|
|
observe_usingnamespace: bool,
|
|
) CompileError!?Decl.Index {
|
|
const mod = sema.mod;
|
|
|
|
const namespace = mod.namespacePtr(namespace_index);
|
|
const namespace_decl_index = namespace.getDeclIndex(mod);
|
|
const namespace_decl = mod.declPtr(namespace_decl_index);
|
|
if (namespace_decl.analysis == .file_failure) {
|
|
try mod.declareDeclDependency(sema.owner_decl_index, namespace_decl_index);
|
|
return error.AnalysisFail;
|
|
}
|
|
|
|
if (observe_usingnamespace and namespace.usingnamespace_set.count() != 0) {
|
|
const src_file = mod.namespacePtr(block.namespace).file_scope;
|
|
|
|
const gpa = sema.gpa;
|
|
var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, bool) = .{};
|
|
defer checked_namespaces.deinit(gpa);
|
|
|
|
// Keep track of name conflicts for error notes.
|
|
var candidates: std.ArrayListUnmanaged(Decl.Index) = .{};
|
|
defer candidates.deinit(gpa);
|
|
|
|
try checked_namespaces.put(gpa, namespace, namespace.file_scope == src_file);
|
|
var check_i: usize = 0;
|
|
|
|
while (check_i < checked_namespaces.count()) : (check_i += 1) {
|
|
const check_ns = checked_namespaces.keys()[check_i];
|
|
if (check_ns.decls.getKeyAdapted(ident_name, Module.DeclAdapter{ .mod = mod })) |decl_index| {
|
|
// Skip decls which are not marked pub, which are in a different
|
|
// file than the `a.b`/`@hasDecl` syntax.
|
|
const decl = mod.declPtr(decl_index);
|
|
if (decl.is_pub or (src_file == decl.getFileScope(mod) and checked_namespaces.values()[check_i])) {
|
|
try candidates.append(gpa, decl_index);
|
|
}
|
|
}
|
|
var it = check_ns.usingnamespace_set.iterator();
|
|
while (it.next()) |entry| {
|
|
const sub_usingnamespace_decl_index = entry.key_ptr.*;
|
|
// Skip the decl we're currently analysing.
|
|
if (sub_usingnamespace_decl_index == sema.owner_decl_index) continue;
|
|
const sub_usingnamespace_decl = mod.declPtr(sub_usingnamespace_decl_index);
|
|
const sub_is_pub = entry.value_ptr.*;
|
|
if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope(mod)) {
|
|
// Skip usingnamespace decls which are not marked pub, which are in
|
|
// a different file than the `a.b`/`@hasDecl` syntax.
|
|
continue;
|
|
}
|
|
try sema.ensureDeclAnalyzed(sub_usingnamespace_decl_index);
|
|
const ns_ty = sub_usingnamespace_decl.val.toType();
|
|
const sub_ns = ns_ty.getNamespace(mod).?;
|
|
try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope(mod));
|
|
}
|
|
}
|
|
|
|
{
|
|
var i: usize = 0;
|
|
while (i < candidates.items.len) {
|
|
if (candidates.items[i] == sema.owner_decl_index) {
|
|
_ = candidates.orderedRemove(i);
|
|
} else {
|
|
i += 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
switch (candidates.items.len) {
|
|
0 => {},
|
|
1 => {
|
|
const decl_index = candidates.items[0];
|
|
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
|
|
return decl_index;
|
|
},
|
|
else => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "ambiguous reference", .{});
|
|
errdefer msg.destroy(gpa);
|
|
for (candidates.items) |candidate_index| {
|
|
const candidate = mod.declPtr(candidate_index);
|
|
const src_loc = candidate.srcLoc(mod);
|
|
try mod.errNoteNonLazy(src_loc, msg, "declared here", .{});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
}
|
|
} else if (namespace.decls.getKeyAdapted(ident_name, Module.DeclAdapter{ .mod = mod })) |decl_index| {
|
|
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
|
|
return decl_index;
|
|
}
|
|
|
|
// TODO This dependency is too strong. Really, it should only be a dependency
|
|
// on the non-existence of `ident_name` in the namespace. We can lessen the number of
|
|
// outdated declarations by making this dependency more sophisticated.
|
|
try mod.declareDeclDependency(sema.owner_decl_index, namespace_decl_index);
|
|
return null;
|
|
}
|
|
|
|
fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl {
|
|
const mod = sema.mod;
|
|
const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null;
|
|
if (func_val.isUndef(mod)) return null;
|
|
const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
|
|
.extern_func => |extern_func| extern_func.decl,
|
|
.func => |func| func.owner_decl,
|
|
.ptr => |ptr| switch (ptr.addr) {
|
|
.decl => |decl| mod.declPtr(decl).val.getFunction(mod).?.owner_decl,
|
|
else => return null,
|
|
},
|
|
else => return null,
|
|
};
|
|
return mod.declPtr(owner_decl_index);
|
|
}
|
|
|
|
pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const src = sema.src;
|
|
|
|
if (!mod.backendSupportsFeature(.error_return_trace)) return .none;
|
|
if (!mod.comp.bin_file.options.error_return_tracing) return .none;
|
|
|
|
if (block.is_comptime)
|
|
return .none;
|
|
|
|
const stack_trace_ty = sema.getBuiltinType("StackTrace") catch |err| switch (err) {
|
|
error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
sema.resolveTypeFields(stack_trace_ty) catch |err| switch (err) {
|
|
error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
const field_name = try mod.intern_pool.getOrPutString(gpa, "index");
|
|
const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, src) catch |err| switch (err) {
|
|
error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
|
|
return try block.addInst(.{
|
|
.tag = .save_err_return_trace_index,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(stack_trace_ty.toIntern()),
|
|
.payload = @as(u32, @intCast(field_index)),
|
|
} },
|
|
});
|
|
}
|
|
|
|
/// Add instructions to block to "pop" the error return trace.
|
|
/// If `operand` is provided, only pops if operand is non-error.
|
|
fn popErrorReturnTrace(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
saved_error_trace_index: Air.Inst.Ref,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
var is_non_error: ?bool = null;
|
|
var is_non_error_inst: Air.Inst.Ref = undefined;
|
|
if (operand != .none) {
|
|
is_non_error_inst = try sema.analyzeIsNonErr(block, src, operand);
|
|
if (try sema.resolveDefinedValue(block, src, is_non_error_inst)) |cond_val|
|
|
is_non_error = cond_val.toBool();
|
|
} else is_non_error = true; // no operand means pop unconditionally
|
|
|
|
if (is_non_error == true) {
|
|
// AstGen determined this result does not go to an error-handling expr (try/catch/return etc.), or
|
|
// the result is comptime-known to be a non-error. Either way, pop unconditionally.
|
|
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
|
|
const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
|
|
const field_name = try mod.intern_pool.getOrPutString(gpa, "index");
|
|
const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true);
|
|
try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store);
|
|
} else if (is_non_error == null) {
|
|
// The result might be an error. If it is, we leave the error trace alone. If it isn't, we need
|
|
// to pop any error trace that may have been propagated from our arguments.
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len);
|
|
const cond_block_inst = try block.addInstAsIndex(.{
|
|
.tag = .block,
|
|
.data = .{
|
|
.ty_pl = .{
|
|
.ty = Air.Inst.Ref.void_type,
|
|
.payload = undefined, // updated below
|
|
},
|
|
},
|
|
});
|
|
|
|
var then_block = block.makeSubBlock();
|
|
defer then_block.instructions.deinit(gpa);
|
|
|
|
// If non-error, then pop the error return trace by restoring the index.
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
|
|
const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty);
|
|
const field_name = try mod.intern_pool.getOrPutString(gpa, "index");
|
|
const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true);
|
|
try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store);
|
|
_ = try then_block.addBr(cond_block_inst, Air.Inst.Ref.void_value);
|
|
|
|
// Otherwise, do nothing
|
|
var else_block = block.makeSubBlock();
|
|
defer else_block.instructions.deinit(gpa);
|
|
_ = try else_block.addBr(cond_block_inst, Air.Inst.Ref.void_value);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
|
|
then_block.instructions.items.len + else_block.instructions.items.len +
|
|
@typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block
|
|
|
|
const cond_br_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
|
|
try sema.air_instructions.append(gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{
|
|
.operand = is_non_error_inst,
|
|
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @as(u32, @intCast(then_block.instructions.items.len)),
|
|
.else_body_len = @as(u32, @intCast(else_block.instructions.items.len)),
|
|
}),
|
|
} } });
|
|
sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items);
|
|
sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items);
|
|
|
|
sema.air_instructions.items(.data)[cond_block_inst].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = 1 });
|
|
sema.air_extra.appendAssumeCapacity(cond_br_inst);
|
|
}
|
|
}
|
|
|
|
fn zirCall(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
comptime kind: enum { direct, field },
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const callee_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node };
|
|
const call_src = inst_data.src();
|
|
const ExtraType = switch (kind) {
|
|
.direct => Zir.Inst.Call,
|
|
.field => Zir.Inst.FieldCall,
|
|
};
|
|
const extra = sema.code.extraData(ExtraType, inst_data.payload_index);
|
|
const args_len = extra.data.flags.args_len;
|
|
|
|
const modifier = @as(std.builtin.CallModifier, @enumFromInt(extra.data.flags.packed_modifier));
|
|
const ensure_result_used = extra.data.flags.ensure_result_used;
|
|
const pop_error_return_trace = extra.data.flags.pop_error_return_trace;
|
|
|
|
const callee: ResolvedFieldCallee = switch (kind) {
|
|
.direct => .{ .direct = try sema.resolveInst(extra.data.callee) },
|
|
.field => blk: {
|
|
const object_ptr = try sema.resolveInst(extra.data.obj_ptr);
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.data.field_name_start));
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
|
|
break :blk try sema.fieldCallBind(block, callee_src, object_ptr, field_name, field_name_src);
|
|
},
|
|
};
|
|
const func: Air.Inst.Ref = switch (callee) {
|
|
.direct => |func_inst| func_inst,
|
|
.method => |method| method.func_inst,
|
|
};
|
|
|
|
const callee_ty = sema.typeOf(func);
|
|
const total_args = args_len + @intFromBool(callee == .method);
|
|
const func_ty = try sema.checkCallArgumentCount(block, func, callee_src, callee_ty, total_args, callee == .method);
|
|
|
|
// The block index before the call, so we can potentially insert an error trace save here later.
|
|
const block_index: Air.Inst.Index = @intCast(block.instructions.items.len);
|
|
|
|
// This will be set by `analyzeCall` to indicate whether any parameter was an error (making the
|
|
// error trace potentially dirty).
|
|
var input_is_error = false;
|
|
|
|
const args_info: CallArgsInfo = .{ .zir_call = .{
|
|
.bound_arg = switch (callee) {
|
|
.direct => .none,
|
|
.method => |method| method.arg0_inst,
|
|
},
|
|
.bound_arg_src = callee_src,
|
|
.call_inst = inst,
|
|
.call_node_offset = inst_data.src_node,
|
|
.num_args = args_len,
|
|
.args_body = sema.code.extra[extra.end..],
|
|
.any_arg_is_error = &input_is_error,
|
|
} };
|
|
|
|
// AstGen ensures that a call instruction is always preceded by a dbg_stmt instruction.
|
|
const call_dbg_node = inst - 1;
|
|
const call_inst = try sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, args_info, call_dbg_node, .call);
|
|
|
|
if (sema.owner_func_index == .none or
|
|
!mod.intern_pool.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn)
|
|
{
|
|
// No errorable fn actually called; we have no error return trace
|
|
input_is_error = false;
|
|
}
|
|
|
|
if (mod.backendSupportsFeature(.error_return_trace) and mod.comp.bin_file.options.error_return_tracing and
|
|
!block.is_comptime and !block.is_typeof and (input_is_error or pop_error_return_trace))
|
|
{
|
|
const return_ty = sema.typeOf(call_inst);
|
|
if (modifier != .always_tail and return_ty.isNoReturn(mod))
|
|
return call_inst; // call to "fn(...) noreturn", don't pop
|
|
|
|
// TODO: we don't fix up the error trace for always_tail correctly, we should be doing it
|
|
// *before* the recursive call. This will be a bit tricky to do and probably requires
|
|
// moving this logic into analyzeCall. But that's probably a good idea anyway.
|
|
if (modifier == .always_tail)
|
|
return call_inst;
|
|
|
|
// If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only
|
|
// need to clean-up our own trace if we were passed to a non-error-handling expression.
|
|
if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) {
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index");
|
|
const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src);
|
|
|
|
// Insert a save instruction before the arg resolution + call instructions we just generated
|
|
const save_inst = try block.insertInst(block_index, .{
|
|
.tag = .save_err_return_trace_index,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(stack_trace_ty.toIntern()),
|
|
.payload = @as(u32, @intCast(field_index)),
|
|
} },
|
|
});
|
|
|
|
// Pop the error return trace, testing the result for non-error if necessary
|
|
const operand = if (pop_error_return_trace or modifier == .always_tail) .none else call_inst;
|
|
try sema.popErrorReturnTrace(block, call_src, operand, save_inst);
|
|
}
|
|
|
|
return call_inst;
|
|
} else {
|
|
return call_inst;
|
|
}
|
|
}
|
|
|
|
fn checkCallArgumentCount(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
func: Air.Inst.Ref,
|
|
func_src: LazySrcLoc,
|
|
callee_ty: Type,
|
|
total_args: usize,
|
|
member_fn: bool,
|
|
) !Type {
|
|
const mod = sema.mod;
|
|
const func_ty = func_ty: {
|
|
switch (callee_ty.zigTypeTag(mod)) {
|
|
.Fn => break :func_ty callee_ty,
|
|
.Pointer => {
|
|
const ptr_info = callee_ty.ptrInfo(mod);
|
|
if (ptr_info.flags.size == .One and ptr_info.child.toType().zigTypeTag(mod) == .Fn) {
|
|
break :func_ty ptr_info.child.toType();
|
|
}
|
|
},
|
|
.Optional => {
|
|
const opt_child = callee_ty.optionalChild(mod);
|
|
if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and
|
|
opt_child.childType(mod).zigTypeTag(mod) == .Fn))
|
|
{
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{
|
|
callee_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, func_src, msg, "consider using '.?', 'orelse' or 'if'", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(mod)});
|
|
};
|
|
|
|
const func_ty_info = mod.typeToFunc(func_ty).?;
|
|
const fn_params_len = func_ty_info.param_types.len;
|
|
const args_len = total_args - @intFromBool(member_fn);
|
|
if (func_ty_info.is_var_args) {
|
|
assert(callConvSupportsVarArgs(func_ty_info.cc));
|
|
if (total_args >= fn_params_len) return func_ty;
|
|
} else if (fn_params_len == total_args) {
|
|
return func_ty;
|
|
}
|
|
|
|
const maybe_decl = try sema.funcDeclSrc(func);
|
|
const member_str = if (member_fn) "member function " else "";
|
|
const variadic_str = if (func_ty_info.is_var_args) "at least " else "";
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
func_src,
|
|
"{s}expected {s}{d} argument(s), found {d}",
|
|
.{
|
|
member_str,
|
|
variadic_str,
|
|
fn_params_len - @intFromBool(member_fn),
|
|
args_len,
|
|
},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn callBuiltin(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
call_src: LazySrcLoc,
|
|
builtin_fn: Air.Inst.Ref,
|
|
modifier: std.builtin.CallModifier,
|
|
args: []const Air.Inst.Ref,
|
|
operation: CallOperation,
|
|
) !void {
|
|
const mod = sema.mod;
|
|
const callee_ty = sema.typeOf(builtin_fn);
|
|
const func_ty = func_ty: {
|
|
switch (callee_ty.zigTypeTag(mod)) {
|
|
.Fn => break :func_ty callee_ty,
|
|
.Pointer => {
|
|
const ptr_info = callee_ty.ptrInfo(mod);
|
|
if (ptr_info.flags.size == .One and ptr_info.child.toType().zigTypeTag(mod) == .Fn) {
|
|
break :func_ty ptr_info.child.toType();
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(mod)});
|
|
};
|
|
|
|
const func_ty_info = mod.typeToFunc(func_ty).?;
|
|
const fn_params_len = func_ty_info.param_types.len;
|
|
if (args.len != fn_params_len or (func_ty_info.is_var_args and args.len < fn_params_len)) {
|
|
std.debug.panic("parameter count mismatch calling builtin fn, expected {d}, found {d}", .{ fn_params_len, args.len });
|
|
}
|
|
|
|
_ = try sema.analyzeCall(
|
|
block,
|
|
builtin_fn,
|
|
func_ty,
|
|
call_src,
|
|
call_src,
|
|
modifier,
|
|
false,
|
|
.{ .resolved = .{ .src = call_src, .args = args } },
|
|
null,
|
|
operation,
|
|
);
|
|
}
|
|
|
|
const CallOperation = enum {
|
|
call,
|
|
@"@call",
|
|
@"@panic",
|
|
@"safety check",
|
|
@"error return",
|
|
};
|
|
|
|
const CallArgsInfo = union(enum) {
|
|
/// The full list of resolved (but uncoerced) arguments is known ahead of time.
|
|
resolved: struct {
|
|
src: LazySrcLoc,
|
|
args: []const Air.Inst.Ref,
|
|
},
|
|
|
|
/// The list of resolved (but uncoerced) arguments is known ahead of time, but
|
|
/// originated from a usage of the @call builtin at the given node offset.
|
|
call_builtin: struct {
|
|
call_node_offset: i32,
|
|
args: []const Air.Inst.Ref,
|
|
},
|
|
|
|
/// This call corresponds to a ZIR call instruction. The arguments have not yet been
|
|
/// resolved. They must be resolved by `analyzeCall` so that argument resolution and
|
|
/// generic instantiation may be interleaved. This is required for RLS to work on
|
|
/// generic parameters.
|
|
zir_call: struct {
|
|
/// This may be `none`, in which case it is ignored. Otherwise, it is the
|
|
/// already-resolved value of the first argument, from method call syntax.
|
|
bound_arg: Air.Inst.Ref,
|
|
/// The source location of `bound_arg` if it is not `null`. Otherwise `undefined`.
|
|
bound_arg_src: LazySrcLoc,
|
|
/// The ZIR call instruction. The parameter type is placed at this index while
|
|
/// analyzing arguments.
|
|
call_inst: Zir.Inst.Index,
|
|
/// The node offset of `call_inst`.
|
|
call_node_offset: i32,
|
|
/// The number of arguments to this call, not including `bound_arg`.
|
|
num_args: u32,
|
|
/// The ZIR corresponding to all function arguments (other than `bound_arg`, if it
|
|
/// is not `none`). Format is precisely the same as trailing data of ZIR `call`.
|
|
args_body: []const Zir.Inst.Index,
|
|
/// This bool will be set to true if any argument evaluated turns out to have an error set or error union type.
|
|
/// This is used by the caller to restore the error return trace when necessary.
|
|
any_arg_is_error: *bool,
|
|
},
|
|
|
|
fn count(cai: CallArgsInfo) usize {
|
|
return switch (cai) {
|
|
inline .resolved, .call_builtin => |resolved| resolved.args.len,
|
|
.zir_call => |zir_call| zir_call.num_args + @intFromBool(zir_call.bound_arg != .none),
|
|
};
|
|
}
|
|
|
|
fn argSrc(cai: CallArgsInfo, block: *Block, arg_index: usize) LazySrcLoc {
|
|
return switch (cai) {
|
|
.resolved => |resolved| resolved.src,
|
|
.call_builtin => |call_builtin| .{ .call_arg = .{
|
|
.decl = block.src_decl,
|
|
.call_node_offset = call_builtin.call_node_offset,
|
|
.arg_index = @intCast(arg_index),
|
|
} },
|
|
.zir_call => |zir_call| if (arg_index == 0 and zir_call.bound_arg != .none) {
|
|
return zir_call.bound_arg_src;
|
|
} else .{ .call_arg = .{
|
|
.decl = block.src_decl,
|
|
.call_node_offset = zir_call.call_node_offset,
|
|
.arg_index = @intCast(arg_index - @intFromBool(zir_call.bound_arg != .none)),
|
|
} },
|
|
};
|
|
}
|
|
|
|
/// Analyzes the arg at `arg_index` and coerces it to `param_ty`.
|
|
/// `param_ty` may be `generic_poison` or `var_args_param`.
|
|
/// `func_ty_info` may be the type before instantiation, even if a generic
|
|
/// instantiation has been partially completed.
|
|
fn analyzeArg(
|
|
cai: CallArgsInfo,
|
|
sema: *Sema,
|
|
block: *Block,
|
|
arg_index: usize,
|
|
param_ty: Type,
|
|
func_ty_info: InternPool.Key.FuncType,
|
|
func_inst: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const param_count = func_ty_info.param_types.len;
|
|
switch (param_ty.toIntern()) {
|
|
.generic_poison_type, .var_args_param_type => {},
|
|
else => try sema.queueFullTypeResolution(param_ty),
|
|
}
|
|
const uncoerced_arg: Air.Inst.Ref = switch (cai) {
|
|
inline .resolved, .call_builtin => |resolved| resolved.args[arg_index],
|
|
.zir_call => |zir_call| arg_val: {
|
|
const has_bound_arg = zir_call.bound_arg != .none;
|
|
if (arg_index == 0 and has_bound_arg) {
|
|
break :arg_val zir_call.bound_arg;
|
|
}
|
|
const real_arg_idx = arg_index - @intFromBool(has_bound_arg);
|
|
|
|
const arg_body = if (real_arg_idx == 0) blk: {
|
|
const start = zir_call.num_args;
|
|
const end = zir_call.args_body[0];
|
|
break :blk zir_call.args_body[start..end];
|
|
} else blk: {
|
|
const start = zir_call.args_body[real_arg_idx - 1];
|
|
const end = zir_call.args_body[real_arg_idx];
|
|
break :blk zir_call.args_body[start..end];
|
|
};
|
|
|
|
// Generate args to comptime params in comptime block
|
|
const parent_comptime = block.is_comptime;
|
|
defer block.is_comptime = parent_comptime;
|
|
// Note that we are indexing into parameters, not arguments, so use `arg_index` instead of `real_arg_idx`
|
|
if (arg_index < @min(param_count, 32) and func_ty_info.paramIsComptime(@intCast(arg_index))) {
|
|
block.is_comptime = true;
|
|
// TODO set comptime_reason
|
|
}
|
|
// Give the arg its result type
|
|
sema.inst_map.putAssumeCapacity(zir_call.call_inst, Air.internedToRef(param_ty.toIntern()));
|
|
// Resolve the arg!
|
|
const uncoerced_arg = try sema.resolveBody(block, arg_body, zir_call.call_inst);
|
|
|
|
if (sema.typeOf(uncoerced_arg).zigTypeTag(mod) == .NoReturn) {
|
|
// This terminates resolution of arguments. The caller should
|
|
// propagate this.
|
|
return uncoerced_arg;
|
|
}
|
|
|
|
if (sema.typeOf(uncoerced_arg).isError(mod)) {
|
|
zir_call.any_arg_is_error.* = true;
|
|
}
|
|
|
|
break :arg_val uncoerced_arg;
|
|
},
|
|
};
|
|
switch (param_ty.toIntern()) {
|
|
.generic_poison_type => return uncoerced_arg,
|
|
.var_args_param_type => return sema.coerceVarArgParam(block, uncoerced_arg, cai.argSrc(block, arg_index)),
|
|
else => return sema.coerceExtra(
|
|
block,
|
|
param_ty,
|
|
uncoerced_arg,
|
|
cai.argSrc(block, arg_index),
|
|
.{ .param_src = .{
|
|
.func_inst = func_inst,
|
|
.param_i = @intCast(arg_index),
|
|
} },
|
|
) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
},
|
|
}
|
|
}
|
|
};
|
|
|
|
/// While performing an inline call, we need to switch between two Sema states a few times: the
|
|
/// state for the caller (with the callee's `code`, `fn_ret_ty`, etc), and the state for the callee.
|
|
/// These cannot be two separate Sema instances as they must share AIR.
|
|
/// Therefore, this struct acts as a helper to switch between the two.
|
|
/// This switching is required during argument evaluation, where function argument analysis must be
|
|
/// interleaved with resolving generic parameter types.
|
|
const InlineCallSema = struct {
|
|
sema: *Sema,
|
|
cur: enum {
|
|
caller,
|
|
callee,
|
|
},
|
|
|
|
other_code: Zir,
|
|
other_func_index: InternPool.Index,
|
|
other_fn_ret_ty: Type,
|
|
other_fn_ret_ty_ies: ?*InferredErrorSet,
|
|
other_inst_map: InstMap,
|
|
other_error_return_trace_index_on_fn_entry: Air.Inst.Ref,
|
|
other_generic_owner: InternPool.Index,
|
|
other_generic_call_src: LazySrcLoc,
|
|
other_generic_call_decl: Decl.OptionalIndex,
|
|
|
|
/// Sema should currently be set up for the caller (i.e. unchanged yet). This init will not
|
|
/// change that. The other parameters contain data for the callee Sema. The other modified
|
|
/// Sema fields are all initialized to default values for the callee.
|
|
/// Must call deinit on the result.
|
|
fn init(
|
|
sema: *Sema,
|
|
callee_code: Zir,
|
|
callee_func_index: InternPool.Index,
|
|
callee_error_return_trace_index_on_fn_entry: Air.Inst.Ref,
|
|
) InlineCallSema {
|
|
return .{
|
|
.sema = sema,
|
|
.cur = .caller,
|
|
.other_code = callee_code,
|
|
.other_func_index = callee_func_index,
|
|
.other_fn_ret_ty = Type.void,
|
|
.other_fn_ret_ty_ies = null,
|
|
.other_inst_map = .{},
|
|
.other_error_return_trace_index_on_fn_entry = callee_error_return_trace_index_on_fn_entry,
|
|
.other_generic_owner = .none,
|
|
.other_generic_call_src = .unneeded,
|
|
.other_generic_call_decl = .none,
|
|
};
|
|
}
|
|
|
|
/// Switch back to the caller Sema if necessary and free all temporary state of the callee Sema.
|
|
fn deinit(ics: *InlineCallSema) void {
|
|
switch (ics.cur) {
|
|
.caller => {},
|
|
.callee => ics.swap(),
|
|
}
|
|
// Callee Sema owns the inst_map memory
|
|
ics.other_inst_map.deinit(ics.sema.gpa);
|
|
ics.* = undefined;
|
|
}
|
|
|
|
/// Returns a Sema instance suitable for usage from the caller context.
|
|
fn caller(ics: *InlineCallSema) *Sema {
|
|
switch (ics.cur) {
|
|
.caller => {},
|
|
.callee => ics.swap(),
|
|
}
|
|
return ics.sema;
|
|
}
|
|
|
|
/// Returns a Sema instance suitable for usage from the callee context.
|
|
fn callee(ics: *InlineCallSema) *Sema {
|
|
switch (ics.cur) {
|
|
.caller => ics.swap(),
|
|
.callee => {},
|
|
}
|
|
return ics.sema;
|
|
}
|
|
|
|
/// Internal use only. Swaps to the other Sema state.
|
|
fn swap(ics: *InlineCallSema) void {
|
|
ics.cur = switch (ics.cur) {
|
|
.caller => .callee,
|
|
.callee => .caller,
|
|
};
|
|
// zig fmt: off
|
|
std.mem.swap(Zir, &ics.sema.code, &ics.other_code);
|
|
std.mem.swap(InternPool.Index, &ics.sema.func_index, &ics.other_func_index);
|
|
std.mem.swap(Type, &ics.sema.fn_ret_ty, &ics.other_fn_ret_ty);
|
|
std.mem.swap(?*InferredErrorSet, &ics.sema.fn_ret_ty_ies, &ics.other_fn_ret_ty_ies);
|
|
std.mem.swap(InstMap, &ics.sema.inst_map, &ics.other_inst_map);
|
|
std.mem.swap(InternPool.Index, &ics.sema.generic_owner, &ics.other_generic_owner);
|
|
std.mem.swap(LazySrcLoc, &ics.sema.generic_call_src, &ics.other_generic_call_src);
|
|
std.mem.swap(Decl.OptionalIndex, &ics.sema.generic_call_decl, &ics.other_generic_call_decl);
|
|
std.mem.swap(Air.Inst.Ref, &ics.sema.error_return_trace_index_on_fn_entry, &ics.other_error_return_trace_index_on_fn_entry);
|
|
// zig fmt: on
|
|
}
|
|
};
|
|
|
|
fn analyzeCall(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
func: Air.Inst.Ref,
|
|
func_ty: Type,
|
|
func_src: LazySrcLoc,
|
|
call_src: LazySrcLoc,
|
|
modifier: std.builtin.CallModifier,
|
|
ensure_result_used: bool,
|
|
args_info: CallArgsInfo,
|
|
call_dbg_node: ?Zir.Inst.Index,
|
|
operation: CallOperation,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const callee_ty = sema.typeOf(func);
|
|
const func_ty_info = mod.typeToFunc(func_ty).?;
|
|
const cc = func_ty_info.cc;
|
|
if (cc == .Naked) {
|
|
const maybe_decl = try sema.funcDeclSrc(func);
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
func_src,
|
|
"unable to call function with naked calling convention",
|
|
.{},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
const call_tag: Air.Inst.Tag = switch (modifier) {
|
|
.auto,
|
|
.always_inline,
|
|
.compile_time,
|
|
.no_async,
|
|
=> Air.Inst.Tag.call,
|
|
|
|
.never_tail => Air.Inst.Tag.call_never_tail,
|
|
.never_inline => Air.Inst.Tag.call_never_inline,
|
|
.always_tail => Air.Inst.Tag.call_always_tail,
|
|
|
|
.async_kw => return sema.failWithUseOfAsync(block, call_src),
|
|
};
|
|
|
|
if (modifier == .never_inline and func_ty_info.cc == .Inline) {
|
|
return sema.fail(block, call_src, "'never_inline' call of inline function", .{});
|
|
}
|
|
if (modifier == .always_inline and func_ty_info.is_noinline) {
|
|
return sema.fail(block, call_src, "'always_inline' call of noinline function", .{});
|
|
}
|
|
|
|
const gpa = sema.gpa;
|
|
|
|
var is_generic_call = func_ty_info.is_generic;
|
|
var is_comptime_call = block.is_comptime or modifier == .compile_time;
|
|
var comptime_reason: ?*const Block.ComptimeReason = null;
|
|
if (!is_comptime_call) {
|
|
if (sema.typeRequiresComptime(func_ty_info.return_type.toType())) |ct| {
|
|
is_comptime_call = ct;
|
|
if (ct) {
|
|
comptime_reason = &.{ .comptime_ret_ty = .{
|
|
.block = block,
|
|
.func = func,
|
|
.func_src = func_src,
|
|
.return_ty = func_ty_info.return_type.toType(),
|
|
} };
|
|
}
|
|
} else |err| switch (err) {
|
|
error.GenericPoison => is_generic_call = true,
|
|
else => |e| return e,
|
|
}
|
|
}
|
|
var is_inline_call = is_comptime_call or modifier == .always_inline or
|
|
func_ty_info.cc == .Inline;
|
|
|
|
if (sema.func_is_naked and !is_inline_call and !is_comptime_call) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, call_src, "runtime {s} not allowed in naked function", .{@tagName(operation)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
switch (operation) {
|
|
.call, .@"@call", .@"@panic", .@"error return" => {},
|
|
.@"safety check" => try sema.errNote(block, call_src, msg, "use @setRuntimeSafety to disable runtime safety", .{}),
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (!is_inline_call and is_generic_call) {
|
|
if (sema.instantiateGenericCall(
|
|
block,
|
|
func,
|
|
func_src,
|
|
call_src,
|
|
ensure_result_used,
|
|
args_info,
|
|
call_tag,
|
|
call_dbg_node,
|
|
)) |some| {
|
|
return some;
|
|
} else |err| switch (err) {
|
|
error.GenericPoison => {
|
|
is_inline_call = true;
|
|
},
|
|
error.ComptimeReturn => {
|
|
is_inline_call = true;
|
|
is_comptime_call = true;
|
|
comptime_reason = &.{ .comptime_ret_ty = .{
|
|
.block = block,
|
|
.func = func,
|
|
.func_src = func_src,
|
|
.return_ty = func_ty_info.return_type.toType(),
|
|
} };
|
|
},
|
|
else => |e| return e,
|
|
}
|
|
}
|
|
|
|
if (is_comptime_call and modifier == .never_inline) {
|
|
return sema.fail(block, call_src, "unable to perform 'never_inline' call at compile-time", .{});
|
|
}
|
|
|
|
const result: Air.Inst.Ref = if (is_inline_call) res: {
|
|
const func_val = sema.resolveConstValue(block, func_src, func, "function being called at comptime must be comptime-known") catch |err| {
|
|
if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err);
|
|
return err;
|
|
};
|
|
const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
|
|
.extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{
|
|
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
|
|
}),
|
|
.func => func_val.toIntern(),
|
|
.ptr => |ptr| switch (ptr.addr) {
|
|
.decl => |decl| mod.declPtr(decl).val.toIntern(),
|
|
else => {
|
|
assert(callee_ty.isPtrAtRuntime(mod));
|
|
return sema.fail(block, call_src, "{s} call of function pointer", .{
|
|
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
|
|
});
|
|
},
|
|
},
|
|
else => unreachable,
|
|
};
|
|
if (func_ty_info.is_var_args) {
|
|
return sema.fail(block, call_src, "{s} call of variadic function", .{
|
|
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
|
|
});
|
|
}
|
|
|
|
// Analyze the ZIR. The same ZIR gets analyzed into a runtime function
|
|
// or an inlined call depending on what union tag the `label` field is
|
|
// set to in the `Block`.
|
|
// This block instruction will be used to capture the return value from the
|
|
// inlined function.
|
|
const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .block,
|
|
.data = undefined,
|
|
});
|
|
// This one is shared among sub-blocks within the same callee, but not
|
|
// shared among the entire inline/comptime call stack.
|
|
var inlining: Block.Inlining = .{
|
|
.func = .none,
|
|
.comptime_result = undefined,
|
|
.merges = .{
|
|
.src_locs = .{},
|
|
.results = .{},
|
|
.br_list = .{},
|
|
.block_inst = block_inst,
|
|
},
|
|
};
|
|
|
|
const module_fn = mod.funcInfo(module_fn_index);
|
|
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
|
|
|
|
// We effectively want a child Sema here, but can't literally do that, because we need AIR
|
|
// to be shared. InlineCallSema is a wrapper which handles this for us. While `ics` is in
|
|
// scope, we should use its `caller`/`callee` methods rather than using `sema` directly
|
|
// whenever performing an operation where the difference matters.
|
|
var ics = InlineCallSema.init(
|
|
sema,
|
|
fn_owner_decl.getFileScope(mod).zir,
|
|
module_fn_index,
|
|
block.error_return_trace_index,
|
|
);
|
|
defer ics.deinit();
|
|
|
|
try mod.declareDeclDependencyType(ics.callee().owner_decl_index, module_fn.owner_decl, .function_body);
|
|
|
|
var wip_captures = try WipCaptureScope.init(gpa, fn_owner_decl.src_scope);
|
|
defer wip_captures.deinit();
|
|
|
|
var child_block: Block = .{
|
|
.parent = null,
|
|
.sema = sema,
|
|
.src_decl = module_fn.owner_decl,
|
|
.namespace = fn_owner_decl.src_namespace,
|
|
.wip_capture_scope = wip_captures.scope,
|
|
.instructions = .{},
|
|
.label = null,
|
|
.inlining = &inlining,
|
|
.is_typeof = block.is_typeof,
|
|
.is_comptime = is_comptime_call,
|
|
.comptime_reason = comptime_reason,
|
|
.error_return_trace_index = block.error_return_trace_index,
|
|
};
|
|
|
|
const merges = &child_block.inlining.?.merges;
|
|
|
|
defer child_block.instructions.deinit(gpa);
|
|
defer merges.deinit(gpa);
|
|
|
|
try sema.emitBackwardBranch(block, call_src);
|
|
|
|
// Whether this call should be memoized, set to false if the call can
|
|
// mutate comptime state.
|
|
var should_memoize = true;
|
|
|
|
// If it's a comptime function call, we need to memoize it as long as no external
|
|
// comptime memory is mutated.
|
|
const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len);
|
|
|
|
const owner_info = mod.typeToFunc(fn_owner_decl.ty).?;
|
|
var new_fn_info: InternPool.GetFuncTypeKey = .{
|
|
.param_types = try sema.arena.alloc(InternPool.Index, owner_info.param_types.len),
|
|
.return_type = owner_info.return_type,
|
|
.comptime_bits = 0,
|
|
.noalias_bits = owner_info.noalias_bits,
|
|
.alignment = if (owner_info.align_is_generic) null else owner_info.alignment,
|
|
.cc = if (owner_info.cc_is_generic) null else owner_info.cc,
|
|
.is_var_args = owner_info.is_var_args,
|
|
.is_noinline = owner_info.is_noinline,
|
|
.section_is_generic = owner_info.section_is_generic,
|
|
.addrspace_is_generic = owner_info.addrspace_is_generic,
|
|
.is_generic = owner_info.is_generic,
|
|
};
|
|
|
|
// This will have return instructions analyzed as break instructions to
|
|
// the block_inst above. Here we are performing "comptime/inline semantic analysis"
|
|
// for a function body, which means we must map the parameter ZIR instructions to
|
|
// the AIR instructions of the callsite. The callee could be a generic function
|
|
// which means its parameter type expressions must be resolved in order and used
|
|
// to successively coerce the arguments.
|
|
const fn_info = ics.callee().code.getFnInfo(module_fn.zir_body_inst);
|
|
try ics.callee().inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
|
|
|
|
var has_comptime_args = false;
|
|
var arg_i: u32 = 0;
|
|
for (fn_info.param_body) |inst| {
|
|
const opt_noreturn_ref = try analyzeInlineCallArg(
|
|
&ics,
|
|
block,
|
|
&child_block,
|
|
inst,
|
|
new_fn_info.param_types,
|
|
&arg_i,
|
|
args_info,
|
|
is_comptime_call,
|
|
&should_memoize,
|
|
memoized_arg_values,
|
|
func_ty_info,
|
|
func,
|
|
&has_comptime_args,
|
|
);
|
|
if (opt_noreturn_ref) |ref| {
|
|
// Analyzing this argument gave a ref of a noreturn type. Terminate argument analysis here.
|
|
return ref;
|
|
}
|
|
}
|
|
|
|
// From here, we only really need to use the callee Sema. Make it the active one, then we
|
|
// can just use `sema` directly.
|
|
_ = ics.callee();
|
|
|
|
if (!has_comptime_args and module_fn.analysis(ip).state == .sema_failure)
|
|
return error.AnalysisFail;
|
|
|
|
const recursive_msg = "inline call is recursive";
|
|
var head = if (!has_comptime_args) block else null;
|
|
while (head) |some| {
|
|
const parent_inlining = some.inlining orelse break;
|
|
if (parent_inlining.func == module_fn_index) {
|
|
return sema.fail(block, call_src, recursive_msg, .{});
|
|
}
|
|
head = some.parent;
|
|
}
|
|
if (!has_comptime_args) inlining.func = module_fn_index;
|
|
|
|
// In case it is a generic function with an expression for the return type that depends
|
|
// on parameters, we must now do the same for the return type as we just did with
|
|
// each of the parameters, resolving the return type and providing it to the child
|
|
// `Sema` so that it can be used for the `ret_ptr` instruction.
|
|
const ret_ty_inst = if (fn_info.ret_ty_body.len != 0)
|
|
try sema.resolveBody(&child_block, fn_info.ret_ty_body, module_fn.zir_body_inst)
|
|
else
|
|
try sema.resolveInst(fn_info.ret_ty_ref);
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
|
|
sema.fn_ret_ty = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst);
|
|
if (module_fn.analysis(ip).inferred_error_set) {
|
|
// Create a fresh inferred error set type for inline/comptime calls.
|
|
const ies = try sema.arena.create(InferredErrorSet);
|
|
ies.* = .{ .func = .none };
|
|
sema.fn_ret_ty_ies = ies;
|
|
sema.fn_ret_ty = (try ip.get(gpa, .{ .error_union_type = .{
|
|
.error_set_type = .adhoc_inferred_error_set_type,
|
|
.payload_type = sema.fn_ret_ty.toIntern(),
|
|
} })).toType();
|
|
}
|
|
|
|
// This `res2` is here instead of directly breaking from `res` due to a stage1
|
|
// bug generating invalid LLVM IR.
|
|
const res2: Air.Inst.Ref = res2: {
|
|
if (should_memoize and is_comptime_call) {
|
|
if (mod.intern_pool.getIfExists(.{ .memoized_call = .{
|
|
.func = module_fn_index,
|
|
.arg_values = memoized_arg_values,
|
|
.result = .none,
|
|
} })) |memoized_call_index| {
|
|
const memoized_call = mod.intern_pool.indexToKey(memoized_call_index).memoized_call;
|
|
break :res2 Air.internedToRef(memoized_call.result);
|
|
}
|
|
}
|
|
|
|
new_fn_info.return_type = sema.fn_ret_ty.toIntern();
|
|
const new_func_resolved_ty = try mod.funcType(new_fn_info);
|
|
if (!is_comptime_call and !block.is_typeof) {
|
|
try sema.emitDbgInline(block, sema.func_index, module_fn_index, new_func_resolved_ty, .dbg_inline_begin);
|
|
|
|
const zir_tags = sema.code.instructions.items(.tag);
|
|
for (fn_info.param_body) |param| switch (zir_tags[param]) {
|
|
.param, .param_comptime => {
|
|
const inst_data = sema.code.instructions.items(.data)[param].pl_tok;
|
|
const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index);
|
|
const param_name = sema.code.nullTerminatedString(extra.data.name);
|
|
const inst = sema.inst_map.get(param).?;
|
|
|
|
try sema.addDbgVar(&child_block, inst, .dbg_var_val, param_name);
|
|
},
|
|
.param_anytype, .param_anytype_comptime => {
|
|
const inst_data = sema.code.instructions.items(.data)[param].str_tok;
|
|
const param_name = inst_data.get(sema.code);
|
|
const inst = sema.inst_map.get(param).?;
|
|
|
|
try sema.addDbgVar(&child_block, inst, .dbg_var_val, param_name);
|
|
},
|
|
else => continue,
|
|
};
|
|
}
|
|
|
|
if (is_comptime_call and ensure_result_used) {
|
|
try sema.ensureResultUsed(block, sema.fn_ret_ty, call_src);
|
|
}
|
|
|
|
const result = result: {
|
|
sema.analyzeBody(&child_block, fn_info.body) catch |err| switch (err) {
|
|
error.ComptimeReturn => break :result inlining.comptime_result,
|
|
error.AnalysisFail => {
|
|
const err_msg = sema.err orelse return err;
|
|
if (mem.eql(u8, err_msg.msg, recursive_msg)) return err;
|
|
try sema.errNote(block, call_src, err_msg, "called from here", .{});
|
|
err_msg.clearTrace(gpa);
|
|
return err;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges);
|
|
};
|
|
|
|
if (!is_comptime_call and !block.is_typeof and
|
|
sema.typeOf(result).zigTypeTag(mod) != .NoReturn)
|
|
{
|
|
try sema.emitDbgInline(
|
|
block,
|
|
module_fn_index,
|
|
sema.func_index,
|
|
mod.funcOwnerDeclPtr(sema.func_index).ty,
|
|
.dbg_inline_end,
|
|
);
|
|
}
|
|
|
|
if (should_memoize and is_comptime_call) {
|
|
const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, "");
|
|
const result_interned = try result_val.intern2(sema.fn_ret_ty, mod);
|
|
|
|
// Transform ad-hoc inferred error set types into concrete error sets.
|
|
const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned);
|
|
|
|
// TODO: check whether any external comptime memory was mutated by the
|
|
// comptime function call. If so, then do not memoize the call here.
|
|
_ = try mod.intern(.{ .memoized_call = .{
|
|
.func = module_fn_index,
|
|
.arg_values = memoized_arg_values,
|
|
.result = result_transformed,
|
|
} });
|
|
|
|
break :res2 Air.internedToRef(result_transformed);
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefVal(result)) |result_val| {
|
|
const result_interned = try result_val.intern2(sema.fn_ret_ty, mod);
|
|
const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned);
|
|
break :res2 Air.internedToRef(result_transformed);
|
|
}
|
|
|
|
const new_ty = try sema.resolveAdHocInferredErrorSetTy(block, call_src, sema.typeOf(result).toIntern());
|
|
if (new_ty != .none) {
|
|
// TODO: mutate in place the previous instruction if possible
|
|
// rather than adding a bitcast instruction.
|
|
break :res2 try block.addBitCast(new_ty.toType(), result);
|
|
}
|
|
|
|
break :res2 result;
|
|
};
|
|
|
|
try wip_captures.finalize();
|
|
|
|
break :res res2;
|
|
} else res: {
|
|
assert(!func_ty_info.is_generic);
|
|
|
|
const args = try sema.arena.alloc(Air.Inst.Ref, args_info.count());
|
|
for (args, 0..) |*arg_out, arg_idx| {
|
|
// Non-generic, so param types are already resolved
|
|
const param_ty = if (arg_idx < func_ty_info.param_types.len) ty: {
|
|
break :ty func_ty_info.param_types.get(ip)[arg_idx].toType();
|
|
} else InternPool.Index.var_args_param_type.toType();
|
|
assert(!param_ty.isGenericPoison());
|
|
arg_out.* = try args_info.analyzeArg(sema, block, arg_idx, param_ty, func_ty_info, func);
|
|
if (sema.typeOf(arg_out.*).zigTypeTag(mod) == .NoReturn) {
|
|
return arg_out.*;
|
|
}
|
|
}
|
|
|
|
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
|
|
|
|
try sema.queueFullTypeResolution(func_ty_info.return_type.toType());
|
|
if (sema.owner_func_index != .none and func_ty_info.return_type.toType().isError(mod)) {
|
|
ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefVal(func)) |func_val| {
|
|
if (mod.intern_pool.isFuncBody(func_val.toIntern())) {
|
|
try mod.ensureFuncBodyAnalysisQueued(func_val.toIntern());
|
|
}
|
|
}
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len +
|
|
args.len);
|
|
const func_inst = try block.addInst(.{
|
|
.tag = call_tag,
|
|
.data = .{ .pl_op = .{
|
|
.operand = func,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Call{
|
|
.args_len = @as(u32, @intCast(args.len)),
|
|
}),
|
|
} },
|
|
});
|
|
sema.appendRefsAssumeCapacity(args);
|
|
|
|
if (call_tag == .call_always_tail) {
|
|
if (ensure_result_used) {
|
|
try sema.ensureResultUsed(block, sema.typeOf(func_inst), call_src);
|
|
}
|
|
return sema.handleTailCall(block, call_src, func_ty, func_inst);
|
|
}
|
|
if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) skip_safety: {
|
|
// Function pointers and extern functions aren't guaranteed to
|
|
// actually be noreturn so we add a safety check for them.
|
|
if (try sema.resolveMaybeUndefVal(func)) |func_val| {
|
|
switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
|
|
.func => break :skip_safety,
|
|
.ptr => |ptr| switch (ptr.addr) {
|
|
.decl => |decl| if (!mod.declPtr(decl).isExtern(mod)) break :skip_safety,
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
try sema.safetyPanic(block, call_src, .noreturn_returned);
|
|
return Air.Inst.Ref.unreachable_value;
|
|
}
|
|
if (func_ty_info.return_type == .noreturn_type) {
|
|
_ = try block.addNoOp(.unreach);
|
|
return Air.Inst.Ref.unreachable_value;
|
|
}
|
|
break :res func_inst;
|
|
};
|
|
|
|
if (ensure_result_used) {
|
|
try sema.ensureResultUsed(block, sema.typeOf(result), call_src);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const target = mod.getTarget();
|
|
const backend = mod.comp.getZigBackend();
|
|
if (!target_util.supportsTailCall(target, backend)) {
|
|
return sema.fail(block, call_src, "unable to perform tail call: compiler backend '{s}' does not support tail calls on target architecture '{s}' with the selected CPU feature flags", .{
|
|
@tagName(backend), @tagName(target.cpu.arch),
|
|
});
|
|
}
|
|
const func_decl = mod.funcOwnerDeclPtr(sema.owner_func_index);
|
|
if (!func_ty.eql(func_decl.ty, mod)) {
|
|
return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{
|
|
func_ty.fmt(mod), func_decl.ty.fmt(mod),
|
|
});
|
|
}
|
|
_ = try block.addUnOp(.ret, result);
|
|
return Air.Inst.Ref.unreachable_value;
|
|
}
|
|
|
|
/// Usually, returns null. If an argument was noreturn, returns that ref (which should become the call result).
|
|
fn analyzeInlineCallArg(
|
|
ics: *InlineCallSema,
|
|
arg_block: *Block,
|
|
param_block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
new_param_types: []InternPool.Index,
|
|
arg_i: *u32,
|
|
args_info: CallArgsInfo,
|
|
is_comptime_call: bool,
|
|
should_memoize: *bool,
|
|
memoized_arg_values: []InternPool.Index,
|
|
func_ty_info: InternPool.Key.FuncType,
|
|
func_inst: Air.Inst.Ref,
|
|
has_comptime_args: *bool,
|
|
) !?Air.Inst.Ref {
|
|
const mod = ics.sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const zir_tags = ics.callee().code.instructions.items(.tag);
|
|
switch (zir_tags[inst]) {
|
|
.param_comptime, .param_anytype_comptime => has_comptime_args.* = true,
|
|
else => {},
|
|
}
|
|
switch (zir_tags[inst]) {
|
|
.param, .param_comptime => {
|
|
// Evaluate the parameter type expression now that previous ones have
|
|
// been mapped, and coerce the corresponding argument to it.
|
|
const pl_tok = ics.callee().code.instructions.items(.data)[inst].pl_tok;
|
|
const param_src = pl_tok.src();
|
|
const extra = ics.callee().code.extraData(Zir.Inst.Param, pl_tok.payload_index);
|
|
const param_body = ics.callee().code.extra[extra.end..][0..extra.data.body_len];
|
|
const param_ty = param_ty: {
|
|
const raw_param_ty = func_ty_info.param_types.get(ip)[arg_i.*];
|
|
if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty;
|
|
const param_ty_inst = try ics.callee().resolveBody(param_block, param_body, inst);
|
|
const param_ty = try ics.callee().analyzeAsType(param_block, param_src, param_ty_inst);
|
|
break :param_ty param_ty.toIntern();
|
|
};
|
|
new_param_types[arg_i.*] = param_ty;
|
|
const casted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, param_ty.toType(), func_ty_info, func_inst);
|
|
if (ics.caller().typeOf(casted_arg).zigTypeTag(mod) == .NoReturn) {
|
|
return casted_arg;
|
|
}
|
|
const arg_src = args_info.argSrc(arg_block, arg_i.*);
|
|
if (try ics.callee().typeRequiresComptime(param_ty.toType())) {
|
|
_ = ics.caller().resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| {
|
|
if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(ics.caller(), ics.caller().err);
|
|
return err;
|
|
};
|
|
} else if (!is_comptime_call and zir_tags[inst] == .param_comptime) {
|
|
_ = try ics.caller().resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "parameter is comptime");
|
|
}
|
|
|
|
if (is_comptime_call) {
|
|
ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
|
|
const arg_val = ics.caller().resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "argument to function being called at comptime must be comptime-known") catch |err| {
|
|
if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(ics.caller(), ics.caller().err);
|
|
return err;
|
|
};
|
|
switch (arg_val.toIntern()) {
|
|
.generic_poison, .generic_poison_type => {
|
|
// This function is currently evaluated as part of an as-of-yet unresolvable
|
|
// parameter or return type.
|
|
return error.GenericPoison;
|
|
},
|
|
else => {},
|
|
}
|
|
// Needed so that lazy values do not trigger
|
|
// assertion due to type not being resolved
|
|
// when the hash function is called.
|
|
const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val);
|
|
should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod);
|
|
memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(param_ty.toType(), mod);
|
|
} else {
|
|
ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
|
|
}
|
|
|
|
if (try ics.caller().resolveMaybeUndefVal(casted_arg)) |_| {
|
|
has_comptime_args.* = true;
|
|
}
|
|
|
|
arg_i.* += 1;
|
|
},
|
|
.param_anytype, .param_anytype_comptime => {
|
|
// No coercion needed.
|
|
const uncasted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.generic_poison, func_ty_info, func_inst);
|
|
if (ics.caller().typeOf(uncasted_arg).zigTypeTag(mod) == .NoReturn) {
|
|
return uncasted_arg;
|
|
}
|
|
const arg_src = args_info.argSrc(arg_block, arg_i.*);
|
|
new_param_types[arg_i.*] = ics.caller().typeOf(uncasted_arg).toIntern();
|
|
|
|
if (is_comptime_call) {
|
|
ics.callee().inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
|
|
const arg_val = ics.caller().resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to function being called at comptime must be comptime-known") catch |err| {
|
|
if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(ics.caller(), ics.caller().err);
|
|
return err;
|
|
};
|
|
switch (arg_val.toIntern()) {
|
|
.generic_poison, .generic_poison_type => {
|
|
// This function is currently evaluated as part of an as-of-yet unresolvable
|
|
// parameter or return type.
|
|
return error.GenericPoison;
|
|
},
|
|
else => {},
|
|
}
|
|
// Needed so that lazy values do not trigger
|
|
// assertion due to type not being resolved
|
|
// when the hash function is called.
|
|
const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val);
|
|
should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod);
|
|
memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(ics.caller().typeOf(uncasted_arg), mod);
|
|
} else {
|
|
if (zir_tags[inst] == .param_anytype_comptime) {
|
|
_ = try ics.caller().resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
|
|
}
|
|
ics.callee().inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
|
|
}
|
|
|
|
if (try ics.caller().resolveMaybeUndefVal(uncasted_arg)) |_| {
|
|
has_comptime_args.* = true;
|
|
}
|
|
|
|
arg_i.* += 1;
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
fn analyzeCallArg(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
arg_src: LazySrcLoc,
|
|
param_ty: Type,
|
|
uncasted_arg: Air.Inst.Ref,
|
|
opts: CoerceOpts,
|
|
) !Air.Inst.Ref {
|
|
try sema.resolveTypeFully(param_ty);
|
|
return sema.coerceExtra(block, param_ty, uncasted_arg, arg_src, opts) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
fn instantiateGenericCall(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
func: Air.Inst.Ref,
|
|
func_src: LazySrcLoc,
|
|
call_src: LazySrcLoc,
|
|
ensure_result_used: bool,
|
|
args_info: CallArgsInfo,
|
|
call_tag: Air.Inst.Tag,
|
|
call_dbg_node: ?Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known");
|
|
const generic_owner = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
|
|
.func => func_val.toIntern(),
|
|
.ptr => |ptr| mod.declPtr(ptr.addr.decl).val.toIntern(),
|
|
else => unreachable,
|
|
};
|
|
const generic_owner_func = mod.intern_pool.indexToKey(generic_owner).func;
|
|
const generic_owner_ty_info = mod.typeToFunc(generic_owner_func.ty.toType()).?;
|
|
|
|
// Even though there may already be a generic instantiation corresponding
|
|
// to this callsite, we must evaluate the expressions of the generic
|
|
// function signature with the values of the callsite plugged in.
|
|
// Importantly, this may include type coercions that determine whether the
|
|
// instantiation is a match of a previous instantiation.
|
|
// The actual monomorphization happens via adding `func_instance` to
|
|
// `InternPool`.
|
|
|
|
const fn_owner_decl = mod.declPtr(generic_owner_func.owner_decl);
|
|
const namespace_index = fn_owner_decl.src_namespace;
|
|
const namespace = mod.namespacePtr(namespace_index);
|
|
const fn_zir = namespace.file_scope.zir;
|
|
const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst);
|
|
|
|
const comptime_args = try sema.arena.alloc(InternPool.Index, args_info.count());
|
|
@memset(comptime_args, .none);
|
|
|
|
// We may overestimate the number of runtime args, but this will definitely be sufficient.
|
|
const max_runtime_args = args_info.count() - @popCount(generic_owner_ty_info.comptime_bits);
|
|
var runtime_args = try std.ArrayListUnmanaged(Air.Inst.Ref).initCapacity(sema.arena, max_runtime_args);
|
|
|
|
// Re-run the block that creates the function, with the comptime parameters
|
|
// pre-populated inside `inst_map`. This causes `param_comptime` and
|
|
// `param_anytype_comptime` ZIR instructions to be ignored, resulting in a
|
|
// new, monomorphized function, with the comptime parameters elided.
|
|
var child_sema: Sema = .{
|
|
.mod = mod,
|
|
.gpa = gpa,
|
|
.arena = sema.arena,
|
|
.code = fn_zir,
|
|
// We pass the generic callsite's owner decl here because whatever `Decl`
|
|
// dependencies are chased at this point should be attached to the
|
|
// callsite, not the `Decl` associated with the `func_instance`.
|
|
.owner_decl = sema.owner_decl,
|
|
.owner_decl_index = sema.owner_decl_index,
|
|
.func_index = sema.owner_func_index,
|
|
// This may not be known yet, since the calling convention could be generic, but there
|
|
// should be no illegal instructions encountered while creating the function anyway.
|
|
.func_is_naked = false,
|
|
.fn_ret_ty = Type.void,
|
|
.fn_ret_ty_ies = null,
|
|
.owner_func_index = .none,
|
|
.comptime_args = comptime_args,
|
|
.generic_owner = generic_owner,
|
|
.generic_call_src = call_src,
|
|
.generic_call_decl = block.src_decl.toOptional(),
|
|
.branch_quota = sema.branch_quota,
|
|
.branch_count = sema.branch_count,
|
|
.comptime_mutable_decls = sema.comptime_mutable_decls,
|
|
};
|
|
defer child_sema.deinit();
|
|
|
|
var wip_captures = try WipCaptureScope.init(gpa, sema.owner_decl.src_scope);
|
|
defer wip_captures.deinit();
|
|
|
|
var child_block: Block = .{
|
|
.parent = null,
|
|
.sema = &child_sema,
|
|
.src_decl = generic_owner_func.owner_decl,
|
|
.namespace = namespace_index,
|
|
.wip_capture_scope = wip_captures.scope,
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer child_block.instructions.deinit(gpa);
|
|
|
|
try child_sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
|
|
|
|
for (fn_info.param_body[0..args_info.count()], 0..) |param_inst, arg_index| {
|
|
const param_tag = fn_zir.instructions.items(.tag)[param_inst];
|
|
|
|
const param_ty = switch (generic_owner_ty_info.param_types.get(ip)[arg_index]) {
|
|
else => |ty| ty.toType(), // parameter is not generic, so type is already resolved
|
|
.generic_poison_type => param_ty: {
|
|
// We have every parameter before this one, so can resolve this parameter's type now.
|
|
// However, first check the param type, since it may be anytype.
|
|
switch (param_tag) {
|
|
.param_anytype, .param_anytype_comptime => {
|
|
// The parameter doesn't have a type.
|
|
break :param_ty Type.generic_poison;
|
|
},
|
|
.param, .param_comptime => {
|
|
// We now know every prior parameter, so can resolve this
|
|
// parameter's type. The child sema has these types.
|
|
const param_data = fn_zir.instructions.items(.data)[param_inst].pl_tok;
|
|
const param_extra = fn_zir.extraData(Zir.Inst.Param, param_data.payload_index);
|
|
const param_ty_body = fn_zir.extra[param_extra.end..][0..param_extra.data.body_len];
|
|
|
|
// Make sure any nested instructions don't clobber our work.
|
|
const prev_params = child_block.params;
|
|
const prev_no_partial_func_ty = child_sema.no_partial_func_ty;
|
|
const prev_generic_owner = child_sema.generic_owner;
|
|
const prev_generic_call_src = child_sema.generic_call_src;
|
|
const prev_generic_call_decl = child_sema.generic_call_decl;
|
|
child_block.params = .{};
|
|
child_sema.no_partial_func_ty = true;
|
|
child_sema.generic_owner = .none;
|
|
child_sema.generic_call_src = .unneeded;
|
|
child_sema.generic_call_decl = .none;
|
|
defer {
|
|
child_block.params = prev_params;
|
|
child_sema.no_partial_func_ty = prev_no_partial_func_ty;
|
|
child_sema.generic_owner = prev_generic_owner;
|
|
child_sema.generic_call_src = prev_generic_call_src;
|
|
child_sema.generic_call_decl = prev_generic_call_decl;
|
|
}
|
|
|
|
const param_ty_inst = try child_sema.resolveBody(&child_block, param_ty_body, param_inst);
|
|
break :param_ty try child_sema.analyzeAsType(&child_block, param_data.src(), param_ty_inst);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
},
|
|
};
|
|
const arg_ref = try args_info.analyzeArg(sema, block, arg_index, param_ty, generic_owner_ty_info, func);
|
|
const arg_ty = sema.typeOf(arg_ref);
|
|
if (arg_ty.zigTypeTag(mod) == .NoReturn) {
|
|
// This terminates argument analysis.
|
|
return arg_ref;
|
|
}
|
|
|
|
const arg_is_comptime = switch (param_tag) {
|
|
.param_comptime, .param_anytype_comptime => true,
|
|
.param, .param_anytype => try sema.typeRequiresComptime(arg_ty),
|
|
else => unreachable,
|
|
};
|
|
|
|
if (arg_is_comptime) {
|
|
if (try sema.resolveMaybeUndefVal(arg_ref)) |arg_val| {
|
|
comptime_args[arg_index] = arg_val.toIntern();
|
|
child_sema.inst_map.putAssumeCapacityNoClobber(
|
|
param_inst,
|
|
Air.internedToRef(arg_val.toIntern()),
|
|
);
|
|
} else switch (param_tag) {
|
|
.param_comptime,
|
|
.param_anytype_comptime,
|
|
=> return sema.failWithOwnedErrorMsg(msg: {
|
|
const arg_src = args_info.argSrc(block, arg_index);
|
|
const msg = try sema.errMsg(block, arg_src, "runtime-known argument passed to comptime parameter", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
const param_src = switch (param_tag) {
|
|
.param_comptime => fn_zir.instructions.items(.data)[param_inst].pl_tok.src(),
|
|
.param_anytype_comptime => fn_zir.instructions.items(.data)[param_inst].str_tok.src(),
|
|
else => unreachable,
|
|
};
|
|
try child_sema.errNote(&child_block, param_src, msg, "declared comptime here", .{});
|
|
break :msg msg;
|
|
}),
|
|
|
|
.param,
|
|
.param_anytype,
|
|
=> return sema.failWithOwnedErrorMsg(msg: {
|
|
const arg_src = args_info.argSrc(block, arg_index);
|
|
const msg = try sema.errMsg(block, arg_src, "runtime-known argument passed to parameter of comptime-only type", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
const param_src = switch (param_tag) {
|
|
.param => fn_zir.instructions.items(.data)[param_inst].pl_tok.src(),
|
|
.param_anytype => fn_zir.instructions.items(.data)[param_inst].str_tok.src(),
|
|
else => unreachable,
|
|
};
|
|
try child_sema.errNote(&child_block, param_src, msg, "declared here", .{});
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, arg_src.toSrcLoc(src_decl, mod), arg_ty);
|
|
break :msg msg;
|
|
}),
|
|
|
|
else => unreachable,
|
|
}
|
|
} else {
|
|
// The parameter is runtime-known.
|
|
try sema.queueFullTypeResolution(arg_ty);
|
|
child_sema.inst_map.putAssumeCapacityNoClobber(param_inst, try child_block.addInst(.{
|
|
.tag = .arg,
|
|
.data = .{ .arg = .{
|
|
.ty = Air.internedToRef(arg_ty.toIntern()),
|
|
.src_index = @intCast(arg_index),
|
|
} },
|
|
}));
|
|
const param_name: Zir.NullTerminatedString = switch (param_tag) {
|
|
.param_anytype => @enumFromInt(fn_zir.instructions.items(.data)[param_inst].str_tok.start),
|
|
.param => name: {
|
|
const inst_data = fn_zir.instructions.items(.data)[param_inst].pl_tok;
|
|
const extra = fn_zir.extraData(Zir.Inst.Param, inst_data.payload_index);
|
|
break :name @enumFromInt(extra.data.name);
|
|
},
|
|
else => unreachable,
|
|
};
|
|
try child_block.params.append(sema.arena, .{
|
|
.ty = arg_ty.toIntern(), // This is the type after coercion
|
|
.is_comptime = false, // We're adding only runtime args to the instantiation
|
|
.name = param_name,
|
|
});
|
|
runtime_args.appendAssumeCapacity(arg_ref);
|
|
}
|
|
}
|
|
|
|
// We've already handled parameters, so don't resolve the whole body. Instead, just
|
|
// do the instructions after the params (i.e. the func itself).
|
|
const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body[args_info.count()..], fn_info.param_body_inst);
|
|
const callee_index = (child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable).toIntern();
|
|
|
|
const callee = mod.funcInfo(callee_index);
|
|
callee.branchQuota(ip).* = @max(callee.branchQuota(ip).*, sema.branch_quota);
|
|
|
|
try sema.addReferencedBy(block, call_src, callee.owner_decl);
|
|
|
|
// Make a runtime call to the new function, making sure to omit the comptime args.
|
|
const func_ty = callee.ty.toType();
|
|
const func_ty_info = mod.typeToFunc(func_ty).?;
|
|
|
|
try wip_captures.finalize();
|
|
|
|
// If the call evaluated to a return type that requires comptime, never mind
|
|
// our generic instantiation. Instead we need to perform a comptime call.
|
|
if (try sema.typeRequiresComptime(func_ty_info.return_type.toType())) {
|
|
return error.ComptimeReturn;
|
|
}
|
|
// Similarly, if the call evaluated to a generic type we need to instead
|
|
// call it inline.
|
|
if (func_ty_info.is_generic or func_ty_info.cc == .Inline) {
|
|
return error.GenericPoison;
|
|
}
|
|
|
|
try sema.queueFullTypeResolution(func_ty_info.return_type.toType());
|
|
|
|
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
|
|
|
|
if (sema.owner_func_index != .none and
|
|
func_ty_info.return_type.toType().isError(mod))
|
|
{
|
|
ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
|
|
}
|
|
|
|
try mod.ensureFuncBodyAnalysisQueued(callee_index);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args.items.len);
|
|
const result = try block.addInst(.{
|
|
.tag = call_tag,
|
|
.data = .{ .pl_op = .{
|
|
.operand = Air.internedToRef(callee_index),
|
|
.payload = sema.addExtraAssumeCapacity(Air.Call{
|
|
.args_len = @intCast(runtime_args.items.len),
|
|
}),
|
|
} },
|
|
});
|
|
sema.appendRefsAssumeCapacity(runtime_args.items);
|
|
|
|
if (ensure_result_used) {
|
|
try sema.ensureResultUsed(block, sema.typeOf(result), call_src);
|
|
}
|
|
if (call_tag == .call_always_tail) {
|
|
return sema.handleTailCall(block, call_src, func_ty, result);
|
|
}
|
|
if (func_ty.fnReturnType(mod).isNoReturn(mod)) {
|
|
_ = try block.addNoOp(.unreach);
|
|
return Air.Inst.Ref.unreachable_value;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
const tuple = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
|
|
.anon_struct_type => |tuple| tuple,
|
|
else => return,
|
|
};
|
|
for (tuple.types, tuple.values) |field_ty, field_val| {
|
|
try sema.resolveTupleLazyValues(block, src, field_ty.toType());
|
|
if (field_val == .none) continue;
|
|
// TODO: mutate in intern pool
|
|
_ = try sema.resolveLazyValue(field_val.toValue());
|
|
}
|
|
}
|
|
|
|
fn emitDbgInline(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
old_func: InternPool.Index,
|
|
new_func: InternPool.Index,
|
|
new_func_ty: Type,
|
|
tag: Air.Inst.Tag,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
if (mod.comp.bin_file.options.strip) return;
|
|
|
|
// Recursive inline call; no dbg_inline needed.
|
|
if (old_func == new_func) return;
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = tag,
|
|
.data = .{ .ty_fn = .{
|
|
.ty = Air.internedToRef(new_func_ty.toIntern()),
|
|
.func = new_func,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirIntType(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const int_type = sema.code.instructions.items(.data)[inst].int_type;
|
|
const ty = try mod.intType(int_type.signedness, int_type.bit_count);
|
|
return Air.internedToRef(ty.toIntern());
|
|
}
|
|
|
|
fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
|
|
const child_type = try sema.resolveType(block, operand_src, inst_data.operand);
|
|
if (child_type.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(mod)});
|
|
} else if (child_type.zigTypeTag(mod) == .Null) {
|
|
return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(mod)});
|
|
}
|
|
const opt_type = try mod.optionalType(child_type.toIntern());
|
|
|
|
return Air.internedToRef(opt_type.toIntern());
|
|
}
|
|
|
|
fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const bin = sema.code.instructions.items(.data)[inst].bin;
|
|
const indexable_ty = sema.resolveType(block, .unneeded, bin.lhs) catch |err| switch (err) {
|
|
// Since this is a ZIR instruction that returns a type, encountering
|
|
// generic poison should not result in a failed compilation, but the
|
|
// generic poison type. This prevents unnecessary failures when
|
|
// constructing types at compile-time.
|
|
error.GenericPoison => return .generic_poison_type,
|
|
else => |e| return e,
|
|
};
|
|
try sema.resolveTypeFields(indexable_ty);
|
|
assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction
|
|
if (indexable_ty.zigTypeTag(mod) == .Struct) {
|
|
const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), mod);
|
|
return Air.internedToRef(elem_type.toIntern());
|
|
} else {
|
|
const elem_type = indexable_ty.elemType2(mod);
|
|
return Air.internedToRef(elem_type.toIntern());
|
|
}
|
|
}
|
|
|
|
fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const un_node = sema.code.instructions.items(.data)[inst].un_node;
|
|
const ptr_ty = try sema.resolveType(block, .unneeded, un_node.operand);
|
|
assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction
|
|
return Air.internedToRef(ptr_ty.childType(mod).toIntern());
|
|
}
|
|
|
|
fn zirVectorElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const un_node = sema.code.instructions.items(.data)[inst].un_node;
|
|
const vec_ty = sema.resolveType(block, .unneeded, un_node.operand) catch |err| switch (err) {
|
|
// Since this is a ZIR instruction that returns a type, encountering
|
|
// generic poison should not result in a failed compilation, but the
|
|
// generic poison type. This prevents unnecessary failures when
|
|
// constructing types at compile-time.
|
|
error.GenericPoison => return .generic_poison_type,
|
|
else => |e| return e,
|
|
};
|
|
if (!vec_ty.isVector(mod)) {
|
|
return sema.fail(block, un_node.src(), "expected vector type, found '{}'", .{vec_ty.fmt(mod)});
|
|
}
|
|
return Air.internedToRef(vec_ty.childType(mod).toIntern());
|
|
}
|
|
|
|
fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const len = @as(u32, @intCast(try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known")));
|
|
const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs);
|
|
try sema.checkVectorElemType(block, elem_type_src, elem_type);
|
|
const vector_type = try mod.vectorType(.{
|
|
.len = len,
|
|
.child = elem_type.toIntern(),
|
|
});
|
|
return Air.internedToRef(vector_type.toIntern());
|
|
}
|
|
|
|
fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const len_src: LazySrcLoc = .{ .node_offset_array_type_len = inst_data.src_node };
|
|
const elem_src: LazySrcLoc = .{ .node_offset_array_type_elem = inst_data.src_node };
|
|
const len = try sema.resolveInt(block, len_src, extra.lhs, Type.usize, "array length must be comptime-known");
|
|
const elem_type = try sema.resolveType(block, elem_src, extra.rhs);
|
|
try sema.validateArrayElemType(block, elem_type, elem_src);
|
|
const array_ty = try sema.mod.arrayType(.{
|
|
.len = len,
|
|
.child = elem_type.toIntern(),
|
|
});
|
|
|
|
return Air.internedToRef(array_ty.toIntern());
|
|
}
|
|
|
|
fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.ArrayTypeSentinel, inst_data.payload_index).data;
|
|
const len_src: LazySrcLoc = .{ .node_offset_array_type_len = inst_data.src_node };
|
|
const sentinel_src: LazySrcLoc = .{ .node_offset_array_type_sentinel = inst_data.src_node };
|
|
const elem_src: LazySrcLoc = .{ .node_offset_array_type_elem = inst_data.src_node };
|
|
const len = try sema.resolveInt(block, len_src, extra.len, Type.usize, "array length must be comptime-known");
|
|
const elem_type = try sema.resolveType(block, elem_src, extra.elem_type);
|
|
try sema.validateArrayElemType(block, elem_type, elem_src);
|
|
const uncasted_sentinel = try sema.resolveInst(extra.sentinel);
|
|
const sentinel = try sema.coerce(block, elem_type, uncasted_sentinel, sentinel_src);
|
|
const sentinel_val = try sema.resolveConstValue(block, sentinel_src, sentinel, "array sentinel value must be comptime-known");
|
|
const array_ty = try sema.mod.arrayType(.{
|
|
.len = len,
|
|
.sentinel = sentinel_val.toIntern(),
|
|
.child = elem_type.toIntern(),
|
|
});
|
|
|
|
return Air.internedToRef(array_ty.toIntern());
|
|
}
|
|
|
|
fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void {
|
|
const mod = sema.mod;
|
|
if (elem_type.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(mod)});
|
|
} else if (elem_type.zigTypeTag(mod) == .NoReturn) {
|
|
return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{});
|
|
}
|
|
}
|
|
|
|
fn zirAnyframeType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
if (true) {
|
|
return sema.failWithUseOfAsync(block, inst_data.src());
|
|
}
|
|
const mod = sema.mod;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_anyframe_type = inst_data.src_node };
|
|
const return_type = try sema.resolveType(block, operand_src, inst_data.operand);
|
|
const anyframe_type = try mod.anyframeType(return_type);
|
|
|
|
return Air.internedToRef(anyframe_type.toIntern());
|
|
}
|
|
|
|
fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const error_set = try sema.resolveType(block, lhs_src, extra.lhs);
|
|
const payload = try sema.resolveType(block, rhs_src, extra.rhs);
|
|
|
|
if (error_set.zigTypeTag(mod) != .ErrorSet) {
|
|
return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{
|
|
error_set.fmt(mod),
|
|
});
|
|
}
|
|
try sema.validateErrorUnionPayloadType(block, payload, rhs_src);
|
|
const err_union_ty = try mod.errorUnionType(error_set, payload);
|
|
return Air.internedToRef(err_union_ty.toIntern());
|
|
}
|
|
|
|
fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void {
|
|
const mod = sema.mod;
|
|
if (payload_ty.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{
|
|
payload_ty.fmt(mod),
|
|
});
|
|
} else if (payload_ty.zigTypeTag(mod) == .ErrorSet) {
|
|
return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{
|
|
payload_ty.fmt(mod),
|
|
});
|
|
}
|
|
}
|
|
|
|
fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
|
|
const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code));
|
|
_ = try mod.getErrorValue(name);
|
|
// Create an error set type with only this error value, and return the value.
|
|
const error_set_type = try mod.singleErrorSetType(name);
|
|
return Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = error_set_type.toIntern(),
|
|
.name = name,
|
|
} })));
|
|
}
|
|
|
|
fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const uncasted_operand = try sema.resolveInst(extra.operand);
|
|
const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src);
|
|
|
|
if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
if (val.isUndef(mod)) {
|
|
return mod.undefRef(Type.err_int);
|
|
}
|
|
const err_name = ip.indexToKey(val.toIntern()).err.name;
|
|
return Air.internedToRef((try mod.intValue(
|
|
Type.err_int,
|
|
try mod.getErrorValue(err_name),
|
|
)).toIntern());
|
|
}
|
|
|
|
const op_ty = sema.typeOf(uncasted_operand);
|
|
switch (try sema.resolveInferredErrorSetTy(block, src, op_ty.toIntern())) {
|
|
.anyerror_type => {},
|
|
else => |err_set_ty_index| {
|
|
const names = ip.indexToKey(err_set_ty_index).error_set_type.names;
|
|
switch (names.len) {
|
|
0 => return Air.internedToRef((try mod.intValue(Type.err_int, 0)).toIntern()),
|
|
1 => {
|
|
const int: Module.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[0]).?);
|
|
return mod.intRef(Type.err_int, int);
|
|
},
|
|
else => {},
|
|
}
|
|
},
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
return block.addBitCast(Type.err_int, operand);
|
|
}
|
|
|
|
fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const uncasted_operand = try sema.resolveInst(extra.operand);
|
|
const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src);
|
|
|
|
if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| {
|
|
const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod));
|
|
if (int > mod.global_error_set.count() or int == 0)
|
|
return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int});
|
|
return Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = .anyerror_type,
|
|
.name = mod.global_error_set.keys()[int],
|
|
} })));
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
if (block.wantSafety()) {
|
|
const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand);
|
|
const zero_val = Air.internedToRef((try mod.intValue(Type.err_int, 0)).toIntern());
|
|
const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val);
|
|
const ok = try block.addBinOp(.bit_and, is_lt_len, is_non_zero);
|
|
try sema.addSafetyCheck(block, src, ok, .invalid_error_code);
|
|
}
|
|
return block.addInst(.{
|
|
.tag = .bitcast,
|
|
.data = .{ .ty_op = .{
|
|
.ty = Air.Inst.Ref.anyerror_type,
|
|
.operand = operand,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
if (sema.typeOf(lhs).zigTypeTag(mod) == .Bool and sema.typeOf(rhs).zigTypeTag(mod) == .Bool) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, lhs_src, "expected error set type, found 'bool'", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "'||' merges error sets; 'or' performs boolean OR", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs);
|
|
const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs);
|
|
if (lhs_ty.zigTypeTag(mod) != .ErrorSet)
|
|
return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(mod)});
|
|
if (rhs_ty.zigTypeTag(mod) != .ErrorSet)
|
|
return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(mod)});
|
|
|
|
// Anything merged with anyerror is anyerror.
|
|
if (lhs_ty.toIntern() == .anyerror_type or rhs_ty.toIntern() == .anyerror_type) {
|
|
return Air.Inst.Ref.anyerror_type;
|
|
}
|
|
|
|
if (ip.isInferredErrorSetType(lhs_ty.toIntern())) {
|
|
switch (try sema.resolveInferredErrorSet(block, src, lhs_ty.toIntern())) {
|
|
// isAnyError might have changed from a false negative to a true
|
|
// positive after resolution.
|
|
.anyerror_type => return .anyerror_type,
|
|
else => {},
|
|
}
|
|
}
|
|
if (ip.isInferredErrorSetType(rhs_ty.toIntern())) {
|
|
switch (try sema.resolveInferredErrorSet(block, src, rhs_ty.toIntern())) {
|
|
// isAnyError might have changed from a false negative to a true
|
|
// positive after resolution.
|
|
.anyerror_type => return .anyerror_type,
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
const err_set_ty = try sema.errorSetMerge(lhs_ty, rhs_ty);
|
|
return Air.internedToRef(err_set_ty.toIntern());
|
|
}
|
|
|
|
fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
|
|
const name = inst_data.get(sema.code);
|
|
return Air.internedToRef((try mod.intern(.{
|
|
.enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name),
|
|
})));
|
|
}
|
|
|
|
fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) {
|
|
.Enum => operand,
|
|
.Union => blk: {
|
|
try sema.resolveTypeFields(operand_ty);
|
|
const tag_ty = operand_ty.unionTagType(mod) orelse {
|
|
return sema.fail(
|
|
block,
|
|
operand_src,
|
|
"untagged union '{}' cannot be converted to integer",
|
|
.{src},
|
|
);
|
|
};
|
|
break :blk try sema.unionToTag(block, tag_ty, operand, operand_src);
|
|
},
|
|
else => {
|
|
return sema.fail(block, operand_src, "expected enum or tagged union, found '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
});
|
|
},
|
|
};
|
|
const enum_tag_ty = sema.typeOf(enum_tag);
|
|
|
|
const int_tag_ty = enum_tag_ty.intTagType(mod);
|
|
|
|
if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| {
|
|
return Air.internedToRef((try mod.getCoerced(opv, int_tag_ty)).toIntern());
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefVal(enum_tag)) |enum_tag_val| {
|
|
const val = try enum_tag_val.intFromEnum(enum_tag_ty, mod);
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
return block.addBitCast(int_tag_ty, enum_tag);
|
|
}
|
|
|
|
fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@enumFromInt");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
|
|
if (dest_ty.zigTypeTag(mod) != .Enum) {
|
|
return sema.fail(block, src, "expected enum, found '{}'", .{dest_ty.fmt(mod)});
|
|
}
|
|
_ = try sema.checkIntType(block, operand_src, sema.typeOf(operand));
|
|
|
|
if (try sema.resolveMaybeUndefVal(operand)) |int_val| {
|
|
if (dest_ty.isNonexhaustiveEnum(mod)) {
|
|
const int_tag_ty = dest_ty.intTagType(mod);
|
|
if (try sema.intFitsInType(int_val, int_tag_ty, null)) {
|
|
return Air.internedToRef((try mod.getCoerced(int_val, dest_ty)).toIntern());
|
|
}
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"int value '{}' out of range of non-exhaustive enum '{}'",
|
|
.{ int_val.fmtValue(sema.typeOf(operand), mod), dest_ty.fmt(mod) },
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, dest_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (int_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, operand_src);
|
|
}
|
|
if (!(try sema.enumHasInt(dest_ty, int_val))) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"enum '{}' has no tag with value '{}'",
|
|
.{ dest_ty.fmt(mod), int_val.fmtValue(sema.typeOf(operand), mod) },
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, dest_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
return Air.internedToRef((try mod.getCoerced(int_val, dest_ty)).toIntern());
|
|
}
|
|
|
|
if (try sema.typeHasOnePossibleValue(dest_ty)) |opv| {
|
|
const result = Air.internedToRef(opv.toIntern());
|
|
// The operand is runtime-known but the result is comptime-known. In
|
|
// this case we still need a safety check.
|
|
// TODO add a safety check here. we can't use is_named_enum_value -
|
|
// it needs to convert the enum back to int and make sure it equals the operand int.
|
|
return result;
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
const result = try block.addTyOp(.intcast, dest_ty, operand);
|
|
if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum(mod) and
|
|
mod.backendSupportsFeature(.is_named_enum_value))
|
|
{
|
|
const ok = try block.addUnOp(.is_named_enum_value, result);
|
|
try sema.addSafetyCheck(block, src, ok, .invalid_enum_value);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
/// Pointer in, pointer out.
|
|
fn zirOptionalPayloadPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
safety_check: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const optional_ptr = try sema.resolveInst(inst_data.operand);
|
|
const src = inst_data.src();
|
|
|
|
return sema.analyzeOptionalPayloadPtr(block, src, optional_ptr, safety_check, false);
|
|
}
|
|
|
|
fn analyzeOptionalPayloadPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
optional_ptr: Air.Inst.Ref,
|
|
safety_check: bool,
|
|
initializing: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const optional_ptr_ty = sema.typeOf(optional_ptr);
|
|
assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer);
|
|
|
|
const opt_type = optional_ptr_ty.childType(mod);
|
|
if (opt_type.zigTypeTag(mod) != .Optional) {
|
|
return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(mod)});
|
|
}
|
|
|
|
const child_type = opt_type.optionalChild(mod);
|
|
const child_pointer = try mod.ptrType(.{
|
|
.child = child_type.toIntern(),
|
|
.flags = .{
|
|
.is_const = optional_ptr_ty.isConstPtr(mod),
|
|
.address_space = optional_ptr_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
|
|
if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| {
|
|
if (initializing) {
|
|
if (!ptr_val.isComptimeMutablePtr(mod)) {
|
|
// If the pointer resulting from this function was stored at comptime,
|
|
// the optional non-null bit would be set that way. But in this case,
|
|
// we need to emit a runtime instruction to do it.
|
|
_ = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = child_pointer.toIntern(),
|
|
.addr = .{ .opt_payload = ptr_val.toIntern() },
|
|
} })));
|
|
}
|
|
if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| {
|
|
if (val.isNull(mod)) {
|
|
return sema.fail(block, src, "unable to unwrap null", .{});
|
|
}
|
|
// The same Value represents the pointer to the optional and the payload.
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = child_pointer.toIntern(),
|
|
.addr = .{ .opt_payload = ptr_val.toIntern() },
|
|
} })));
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
if (safety_check and block.wantSafety()) {
|
|
const is_non_null = try block.addUnOp(.is_non_null_ptr, optional_ptr);
|
|
try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null);
|
|
}
|
|
const air_tag: Air.Inst.Tag = if (initializing)
|
|
.optional_payload_ptr_set
|
|
else
|
|
.optional_payload_ptr;
|
|
return block.addTyOp(air_tag, child_pointer, optional_ptr);
|
|
}
|
|
|
|
/// Value in, value out.
|
|
fn zirOptionalPayload(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
safety_check: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const result_ty = switch (operand_ty.zigTypeTag(mod)) {
|
|
.Optional => operand_ty.optionalChild(mod),
|
|
.Pointer => t: {
|
|
if (operand_ty.ptrSize(mod) != .C) {
|
|
return sema.failWithExpectedOptionalType(block, src, operand_ty);
|
|
}
|
|
// TODO https://github.com/ziglang/zig/issues/6597
|
|
if (true) break :t operand_ty;
|
|
const ptr_info = operand_ty.ptrInfo(mod);
|
|
break :t try mod.ptrType(.{
|
|
.child = ptr_info.child,
|
|
.flags = .{
|
|
.alignment = ptr_info.flags.alignment,
|
|
.is_const = ptr_info.flags.is_const,
|
|
.is_volatile = ptr_info.flags.is_volatile,
|
|
.is_allowzero = ptr_info.flags.is_allowzero,
|
|
.address_space = ptr_info.flags.address_space,
|
|
},
|
|
});
|
|
},
|
|
else => return sema.failWithExpectedOptionalType(block, src, operand_ty),
|
|
};
|
|
|
|
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
|
|
return if (val.optionalValue(mod)) |payload|
|
|
Air.internedToRef(payload.toIntern())
|
|
else
|
|
sema.fail(block, src, "unable to unwrap null", .{});
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
if (safety_check and block.wantSafety()) {
|
|
const is_non_null = try block.addUnOp(.is_non_null, operand);
|
|
try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null);
|
|
}
|
|
return block.addTyOp(.optional_payload, result_ty, operand);
|
|
}
|
|
|
|
/// Value in, value out
|
|
fn zirErrUnionPayload(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_src = src;
|
|
const err_union_ty = sema.typeOf(operand);
|
|
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
|
|
err_union_ty.fmt(mod),
|
|
});
|
|
}
|
|
return sema.analyzeErrUnionPayload(block, src, err_union_ty, operand, operand_src, false);
|
|
}
|
|
|
|
fn analyzeErrUnionPayload(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
err_union_ty: Type,
|
|
operand: Air.Inst.Ref,
|
|
operand_src: LazySrcLoc,
|
|
safety_check: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const payload_ty = err_union_ty.errorUnionPayload(mod);
|
|
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
|
|
if (val.getErrorName(mod).unwrap()) |name| {
|
|
return sema.fail(block, src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)});
|
|
}
|
|
return Air.internedToRef(mod.intern_pool.indexToKey(val.toIntern()).error_union.val.payload);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
|
|
// If the error set has no fields then no safety check is needed.
|
|
if (safety_check and block.wantSafety() and
|
|
!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod))
|
|
{
|
|
try sema.panicUnwrapError(block, src, operand, .unwrap_errunion_err, .is_non_err);
|
|
}
|
|
|
|
return block.addTyOp(.unwrap_errunion_payload, payload_ty, operand);
|
|
}
|
|
|
|
/// Pointer in, pointer out.
|
|
fn zirErrUnionPayloadPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const src = inst_data.src();
|
|
|
|
return sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false);
|
|
}
|
|
|
|
fn analyzeErrUnionPayloadPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
safety_check: bool,
|
|
initializing: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
assert(operand_ty.zigTypeTag(mod) == .Pointer);
|
|
|
|
if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(block, src, "expected error union type, found '{}'", .{
|
|
operand_ty.childType(mod).fmt(mod),
|
|
});
|
|
}
|
|
|
|
const err_union_ty = operand_ty.childType(mod);
|
|
const payload_ty = err_union_ty.errorUnionPayload(mod);
|
|
const operand_pointer_ty = try mod.ptrType(.{
|
|
.child = payload_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = operand_ty.isConstPtr(mod),
|
|
.address_space = operand_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
|
|
if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| {
|
|
if (initializing) {
|
|
if (!ptr_val.isComptimeMutablePtr(mod)) {
|
|
// If the pointer resulting from this function was stored at comptime,
|
|
// the error union error code would be set that way. But in this case,
|
|
// we need to emit a runtime instruction to do it.
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
_ = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = operand_pointer_ty.toIntern(),
|
|
.addr = .{ .eu_payload = ptr_val.toIntern() },
|
|
} })));
|
|
}
|
|
if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| {
|
|
if (val.getErrorName(mod).unwrap()) |name| {
|
|
return sema.fail(block, src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)});
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = operand_pointer_ty.toIntern(),
|
|
.addr = .{ .eu_payload = ptr_val.toIntern() },
|
|
} })));
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
|
|
// If the error set has no fields then no safety check is needed.
|
|
if (safety_check and block.wantSafety() and
|
|
!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod))
|
|
{
|
|
try sema.panicUnwrapError(block, src, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr);
|
|
}
|
|
|
|
const air_tag: Air.Inst.Tag = if (initializing)
|
|
.errunion_payload_ptr_set
|
|
else
|
|
.unwrap_errunion_payload_ptr;
|
|
return block.addTyOp(air_tag, operand_pointer_ty, operand);
|
|
}
|
|
|
|
/// Value in, value out
|
|
fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
return sema.analyzeErrUnionCode(block, src, operand);
|
|
}
|
|
|
|
fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
if (operand_ty.zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(block, src, "expected error union type, found '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
});
|
|
}
|
|
|
|
const result_ty = operand_ty.errorUnionSet(mod);
|
|
|
|
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
|
|
return Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = result_ty.toIntern(),
|
|
.name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.unwrap_errunion_err, result_ty, operand);
|
|
}
|
|
|
|
/// Pointer in, value out
|
|
fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
assert(operand_ty.zigTypeTag(mod) == .Pointer);
|
|
|
|
if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(block, src, "expected error union type, found '{}'", .{
|
|
operand_ty.childType(mod).fmt(mod),
|
|
});
|
|
}
|
|
|
|
const result_ty = operand_ty.childType(mod).errorUnionSet(mod);
|
|
|
|
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
|
|
if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
|
|
assert(val.getErrorName(mod) != .none);
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.unwrap_errunion_err_ptr, result_ty, operand);
|
|
}
|
|
|
|
fn zirFunc(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
inferred_error_set: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index);
|
|
const target = sema.mod.getTarget();
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = inst_data.src_node };
|
|
|
|
var extra_index = extra.end;
|
|
|
|
const ret_ty: Type = switch (extra.data.ret_body_len) {
|
|
0 => Type.void,
|
|
1 => blk: {
|
|
const ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
if (sema.resolveType(block, ret_ty_src, ret_ty_ref)) |ret_ty| {
|
|
break :blk ret_ty;
|
|
} else |err| switch (err) {
|
|
error.GenericPoison => {
|
|
break :blk Type.generic_poison;
|
|
},
|
|
else => |e| return e,
|
|
}
|
|
},
|
|
else => blk: {
|
|
const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len];
|
|
extra_index += ret_ty_body.len;
|
|
|
|
const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, "return type must be comptime-known");
|
|
break :blk ret_ty_val.toType();
|
|
},
|
|
};
|
|
|
|
var src_locs: Zir.Inst.Func.SrcLocs = undefined;
|
|
const has_body = extra.data.body_len != 0;
|
|
if (has_body) {
|
|
extra_index += extra.data.body_len;
|
|
src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data;
|
|
}
|
|
|
|
// If this instruction has a body it means it's the type of the `owner_decl`
|
|
// otherwise it's a function type without a `callconv` attribute and should
|
|
// never be `.C`.
|
|
const cc: std.builtin.CallingConvention = if (has_body and mod.declPtr(block.src_decl).is_exported)
|
|
.C
|
|
else
|
|
.Unspecified;
|
|
|
|
return sema.funcCommon(
|
|
block,
|
|
inst_data.src_node,
|
|
inst,
|
|
.none,
|
|
target_util.defaultAddressSpace(target, .function),
|
|
.default,
|
|
cc,
|
|
ret_ty,
|
|
false,
|
|
inferred_error_set,
|
|
false,
|
|
has_body,
|
|
src_locs,
|
|
null,
|
|
0,
|
|
false,
|
|
);
|
|
}
|
|
|
|
fn resolveGenericBody(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
body: []const Zir.Inst.Index,
|
|
func_inst: Zir.Inst.Index,
|
|
dest_ty: Type,
|
|
reason: []const u8,
|
|
) !Value {
|
|
assert(body.len != 0);
|
|
|
|
const err = err: {
|
|
// Make sure any nested param instructions don't clobber our work.
|
|
const prev_params = block.params;
|
|
const prev_no_partial_func_type = sema.no_partial_func_ty;
|
|
const prev_generic_owner = sema.generic_owner;
|
|
const prev_generic_call_src = sema.generic_call_src;
|
|
const prev_generic_call_decl = sema.generic_call_decl;
|
|
block.params = .{};
|
|
sema.no_partial_func_ty = true;
|
|
sema.generic_owner = .none;
|
|
sema.generic_call_src = .unneeded;
|
|
sema.generic_call_decl = .none;
|
|
defer {
|
|
block.params = prev_params;
|
|
sema.no_partial_func_ty = prev_no_partial_func_type;
|
|
sema.generic_owner = prev_generic_owner;
|
|
sema.generic_call_src = prev_generic_call_src;
|
|
sema.generic_call_decl = prev_generic_call_decl;
|
|
}
|
|
|
|
const uncasted = sema.resolveBody(block, body, func_inst) catch |err| break :err err;
|
|
const result = sema.coerce(block, dest_ty, uncasted, src) catch |err| break :err err;
|
|
const val = sema.resolveConstValue(block, src, result, reason) catch |err| break :err err;
|
|
return val;
|
|
};
|
|
switch (err) {
|
|
error.GenericPoison => {
|
|
if (dest_ty.toIntern() == .type_type) {
|
|
return Value.generic_poison_type;
|
|
} else {
|
|
return Value.generic_poison;
|
|
}
|
|
},
|
|
else => |e| return e,
|
|
}
|
|
}
|
|
|
|
/// Given a library name, examines if the library name should end up in
|
|
/// `link.File.Options.system_libs` table (for example, libc is always
|
|
/// specified via dedicated flag `link.File.Options.link_libc` instead),
|
|
/// and puts it there if it doesn't exist.
|
|
/// It also dupes the library name which can then be saved as part of the
|
|
/// respective `Decl` (either `ExternFn` or `Var`).
|
|
/// The liveness of the duped library name is tied to liveness of `Module`.
|
|
/// To deallocate, call `deinit` on the respective `Decl` (`ExternFn` or `Var`).
|
|
fn handleExternLibName(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src_loc: LazySrcLoc,
|
|
lib_name: []const u8,
|
|
) CompileError![:0]u8 {
|
|
blk: {
|
|
const mod = sema.mod;
|
|
const comp = mod.comp;
|
|
const target = mod.getTarget();
|
|
log.debug("extern fn symbol expected in lib '{s}'", .{lib_name});
|
|
if (target_util.is_libc_lib_name(target, lib_name)) {
|
|
if (!comp.bin_file.options.link_libc and !comp.bin_file.options.parent_compilation_link_libc) {
|
|
return sema.fail(
|
|
block,
|
|
src_loc,
|
|
"dependency on libc must be explicitly specified in the build command",
|
|
.{},
|
|
);
|
|
}
|
|
comp.bin_file.options.link_libc = true;
|
|
break :blk;
|
|
}
|
|
if (target_util.is_libcpp_lib_name(target, lib_name)) {
|
|
if (!comp.bin_file.options.link_libcpp) {
|
|
return sema.fail(
|
|
block,
|
|
src_loc,
|
|
"dependency on libc++ must be explicitly specified in the build command",
|
|
.{},
|
|
);
|
|
}
|
|
comp.bin_file.options.link_libcpp = true;
|
|
break :blk;
|
|
}
|
|
if (mem.eql(u8, lib_name, "unwind")) {
|
|
comp.bin_file.options.link_libunwind = true;
|
|
break :blk;
|
|
}
|
|
if (!target.isWasm() and !comp.bin_file.options.pic) {
|
|
return sema.fail(
|
|
block,
|
|
src_loc,
|
|
"dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by '-l{s}' or '-fPIC'.",
|
|
.{ lib_name, lib_name },
|
|
);
|
|
}
|
|
comp.addLinkLib(lib_name) catch |err| {
|
|
return sema.fail(block, src_loc, "unable to add link lib '{s}': {s}", .{
|
|
lib_name, @errorName(err),
|
|
});
|
|
};
|
|
}
|
|
return sema.gpa.dupeZ(u8, lib_name);
|
|
}
|
|
|
|
/// These are calling conventions that are confirmed to work with variadic functions.
|
|
/// Any calling conventions not included here are either not yet verified to work with variadic
|
|
/// functions or there are no more other calling conventions that support variadic functions.
|
|
const calling_conventions_supporting_var_args = [_]std.builtin.CallingConvention{
|
|
.C,
|
|
};
|
|
fn callConvSupportsVarArgs(cc: std.builtin.CallingConvention) bool {
|
|
return for (calling_conventions_supporting_var_args) |supported_cc| {
|
|
if (cc == supported_cc) return true;
|
|
} else false;
|
|
}
|
|
fn checkCallConvSupportsVarArgs(sema: *Sema, block: *Block, src: LazySrcLoc, cc: std.builtin.CallingConvention) CompileError!void {
|
|
const CallingConventionsSupportingVarArgsList = struct {
|
|
pub fn format(_: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
|
_ = fmt;
|
|
_ = options;
|
|
for (calling_conventions_supporting_var_args, 0..) |cc_inner, i| {
|
|
if (i != 0)
|
|
try writer.writeAll(", ");
|
|
try writer.print("'.{s}'", .{@tagName(cc_inner)});
|
|
}
|
|
}
|
|
};
|
|
|
|
if (!callConvSupportsVarArgs(cc)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "variadic function does not support '.{s}' calling convention", .{@tagName(cc)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "supported calling conventions: {}", .{CallingConventionsSupportingVarArgsList{}});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
const Section = union(enum) {
|
|
generic,
|
|
default,
|
|
explicit: InternPool.NullTerminatedString,
|
|
};
|
|
|
|
fn funcCommon(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src_node_offset: i32,
|
|
func_inst: Zir.Inst.Index,
|
|
/// null means generic poison
|
|
alignment: ?Alignment,
|
|
/// null means generic poison
|
|
address_space: ?std.builtin.AddressSpace,
|
|
section: Section,
|
|
/// null means generic poison
|
|
cc: ?std.builtin.CallingConvention,
|
|
/// this might be Type.generic_poison
|
|
bare_return_type: Type,
|
|
var_args: bool,
|
|
inferred_error_set: bool,
|
|
is_extern: bool,
|
|
has_body: bool,
|
|
src_locs: Zir.Inst.Func.SrcLocs,
|
|
opt_lib_name: ?[]const u8,
|
|
noalias_bits: u32,
|
|
is_noinline: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const target = mod.getTarget();
|
|
const ip = &mod.intern_pool;
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset };
|
|
const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset };
|
|
const func_src = LazySrcLoc.nodeOffset(src_node_offset);
|
|
|
|
var is_generic = bare_return_type.isGenericPoison() or
|
|
alignment == null or
|
|
address_space == null or
|
|
section == .generic or
|
|
cc == null;
|
|
|
|
if (var_args) {
|
|
if (is_generic) {
|
|
return sema.fail(block, func_src, "generic function cannot be variadic", .{});
|
|
}
|
|
try sema.checkCallConvSupportsVarArgs(block, cc_src, cc.?);
|
|
}
|
|
|
|
const is_source_decl = sema.generic_owner == .none;
|
|
|
|
// In the case of generic calling convention, or generic alignment, we use
|
|
// default values which are only meaningful for the generic function, *not*
|
|
// the instantiation, which can depend on comptime parameters.
|
|
// Related proposal: https://github.com/ziglang/zig/issues/11834
|
|
const cc_resolved = cc orelse .Unspecified;
|
|
var comptime_bits: u32 = 0;
|
|
for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| {
|
|
const param_ty = param_ty_ip.toType();
|
|
const is_noalias = blk: {
|
|
const index = std.math.cast(u5, i) orelse break :blk false;
|
|
break :blk @as(u1, @truncate(noalias_bits >> index)) != 0;
|
|
};
|
|
const param_src: LazySrcLoc = .{ .fn_proto_param = .{
|
|
.decl = block.src_decl,
|
|
.fn_proto_node_offset = src_node_offset,
|
|
.param_index = @intCast(i),
|
|
} };
|
|
const requires_comptime = try sema.typeRequiresComptime(param_ty);
|
|
if (param_is_comptime or requires_comptime) {
|
|
comptime_bits |= @as(u32, 1) << @intCast(i); // TODO: handle cast error
|
|
}
|
|
const this_generic = param_ty.isGenericPoison();
|
|
is_generic = is_generic or this_generic;
|
|
if (param_is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) {
|
|
return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
|
|
}
|
|
if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) {
|
|
return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
|
|
}
|
|
if (!param_ty.isValidParamType(mod)) {
|
|
const opaque_str = if (param_ty.zigTypeTag(mod) == .Opaque) "opaque " else "";
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, param_src, "parameter of {s}type '{}' not allowed", .{
|
|
opaque_str, param_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, param_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(param_ty, .param_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{
|
|
param_ty.fmt(mod), @tagName(cc_resolved),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl, mod), param_ty, .param_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, param_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (is_source_decl and requires_comptime and !param_is_comptime and has_body) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{
|
|
param_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl, mod), param_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, param_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (is_source_decl and !this_generic and is_noalias and
|
|
!(param_ty.zigTypeTag(mod) == .Pointer or param_ty.isPtrLikeOptional(mod)))
|
|
{
|
|
return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
|
|
}
|
|
}
|
|
|
|
var ret_ty_requires_comptime = false;
|
|
const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: {
|
|
ret_ty_requires_comptime = ret_comptime;
|
|
break :rp bare_return_type.isGenericPoison();
|
|
} else |err| switch (err) {
|
|
error.GenericPoison => rp: {
|
|
is_generic = true;
|
|
break :rp true;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const final_is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime;
|
|
|
|
const param_types = block.params.items(.ty);
|
|
|
|
if (!is_source_decl) {
|
|
assert(has_body);
|
|
assert(!is_generic);
|
|
assert(comptime_bits == 0);
|
|
assert(cc != null);
|
|
assert(section != .generic);
|
|
assert(address_space != null);
|
|
assert(!var_args);
|
|
if (inferred_error_set) {
|
|
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
|
|
}
|
|
const func_index = try ip.getFuncInstance(gpa, .{
|
|
.param_types = param_types,
|
|
.noalias_bits = noalias_bits,
|
|
.bare_return_type = bare_return_type.toIntern(),
|
|
.cc = cc_resolved,
|
|
.alignment = alignment.?,
|
|
.section = switch (section) {
|
|
.generic => unreachable,
|
|
.default => .none,
|
|
.explicit => |x| x.toOptional(),
|
|
},
|
|
.is_noinline = is_noinline,
|
|
.inferred_error_set = inferred_error_set,
|
|
.generic_owner = sema.generic_owner,
|
|
.comptime_args = sema.comptime_args,
|
|
.generation = mod.generation,
|
|
});
|
|
return finishFunc(
|
|
sema,
|
|
block,
|
|
func_index,
|
|
.none,
|
|
ret_poison,
|
|
bare_return_type,
|
|
ret_ty_src,
|
|
cc_resolved,
|
|
is_source_decl,
|
|
ret_ty_requires_comptime,
|
|
func_inst,
|
|
cc_src,
|
|
is_noinline,
|
|
is_generic,
|
|
final_is_generic,
|
|
);
|
|
}
|
|
|
|
// extern_func and func_decl functions take ownership of `sema.owner_decl`.
|
|
sema.owner_decl.@"linksection" = switch (section) {
|
|
.generic => .none,
|
|
.default => .none,
|
|
.explicit => |section_name| section_name.toOptional(),
|
|
};
|
|
sema.owner_decl.alignment = alignment orelse .none;
|
|
sema.owner_decl.@"addrspace" = address_space orelse .generic;
|
|
|
|
if (inferred_error_set) {
|
|
assert(!is_extern);
|
|
assert(has_body);
|
|
if (!ret_poison)
|
|
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
|
|
const func_index = try ip.getFuncDeclIes(gpa, .{
|
|
.owner_decl = sema.owner_decl_index,
|
|
|
|
.param_types = param_types,
|
|
.noalias_bits = noalias_bits,
|
|
.comptime_bits = comptime_bits,
|
|
.bare_return_type = bare_return_type.toIntern(),
|
|
.cc = cc,
|
|
.alignment = alignment,
|
|
.section_is_generic = section == .generic,
|
|
.addrspace_is_generic = address_space == null,
|
|
.is_var_args = var_args,
|
|
.is_generic = final_is_generic,
|
|
.is_noinline = is_noinline,
|
|
|
|
.zir_body_inst = func_inst,
|
|
.lbrace_line = src_locs.lbrace_line,
|
|
.rbrace_line = src_locs.rbrace_line,
|
|
.lbrace_column = @as(u16, @truncate(src_locs.columns)),
|
|
.rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)),
|
|
});
|
|
return finishFunc(
|
|
sema,
|
|
block,
|
|
func_index,
|
|
.none,
|
|
ret_poison,
|
|
bare_return_type,
|
|
ret_ty_src,
|
|
cc_resolved,
|
|
is_source_decl,
|
|
ret_ty_requires_comptime,
|
|
func_inst,
|
|
cc_src,
|
|
is_noinline,
|
|
is_generic,
|
|
final_is_generic,
|
|
);
|
|
}
|
|
|
|
const func_ty = try ip.getFuncType(gpa, .{
|
|
.param_types = param_types,
|
|
.noalias_bits = noalias_bits,
|
|
.comptime_bits = comptime_bits,
|
|
.return_type = bare_return_type.toIntern(),
|
|
.cc = cc,
|
|
.alignment = alignment,
|
|
.section_is_generic = section == .generic,
|
|
.addrspace_is_generic = address_space == null,
|
|
.is_var_args = var_args,
|
|
.is_generic = final_is_generic,
|
|
.is_noinline = is_noinline,
|
|
});
|
|
|
|
if (is_extern) {
|
|
assert(comptime_bits == 0);
|
|
assert(cc != null);
|
|
assert(section != .generic);
|
|
assert(address_space != null);
|
|
assert(!is_generic);
|
|
const func_index = try ip.getExternFunc(gpa, .{
|
|
.ty = func_ty,
|
|
.decl = sema.owner_decl_index,
|
|
.lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString(
|
|
gpa,
|
|
try sema.handleExternLibName(block, .{
|
|
.node_offset_lib_name = src_node_offset,
|
|
}, lib_name),
|
|
)).toOptional() else .none,
|
|
});
|
|
return finishFunc(
|
|
sema,
|
|
block,
|
|
func_index,
|
|
func_ty,
|
|
ret_poison,
|
|
bare_return_type,
|
|
ret_ty_src,
|
|
cc_resolved,
|
|
is_source_decl,
|
|
ret_ty_requires_comptime,
|
|
func_inst,
|
|
cc_src,
|
|
is_noinline,
|
|
is_generic,
|
|
final_is_generic,
|
|
);
|
|
}
|
|
|
|
if (has_body) {
|
|
const func_index = try ip.getFuncDecl(gpa, .{
|
|
.owner_decl = sema.owner_decl_index,
|
|
.ty = func_ty,
|
|
.cc = cc,
|
|
.is_noinline = is_noinline,
|
|
.zir_body_inst = func_inst,
|
|
.lbrace_line = src_locs.lbrace_line,
|
|
.rbrace_line = src_locs.rbrace_line,
|
|
.lbrace_column = @as(u16, @truncate(src_locs.columns)),
|
|
.rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)),
|
|
});
|
|
return finishFunc(
|
|
sema,
|
|
block,
|
|
func_index,
|
|
func_ty,
|
|
ret_poison,
|
|
bare_return_type,
|
|
ret_ty_src,
|
|
cc_resolved,
|
|
is_source_decl,
|
|
ret_ty_requires_comptime,
|
|
func_inst,
|
|
cc_src,
|
|
is_noinline,
|
|
is_generic,
|
|
final_is_generic,
|
|
);
|
|
}
|
|
|
|
return finishFunc(
|
|
sema,
|
|
block,
|
|
.none,
|
|
func_ty,
|
|
ret_poison,
|
|
bare_return_type,
|
|
ret_ty_src,
|
|
cc_resolved,
|
|
is_source_decl,
|
|
ret_ty_requires_comptime,
|
|
func_inst,
|
|
cc_src,
|
|
is_noinline,
|
|
is_generic,
|
|
final_is_generic,
|
|
);
|
|
}
|
|
|
|
fn finishFunc(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
opt_func_index: InternPool.Index,
|
|
func_ty: InternPool.Index,
|
|
ret_poison: bool,
|
|
bare_return_type: Type,
|
|
ret_ty_src: LazySrcLoc,
|
|
cc_resolved: std.builtin.CallingConvention,
|
|
is_source_decl: bool,
|
|
ret_ty_requires_comptime: bool,
|
|
func_inst: Zir.Inst.Index,
|
|
cc_src: LazySrcLoc,
|
|
is_noinline: bool,
|
|
is_generic: bool,
|
|
final_is_generic: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const gpa = sema.gpa;
|
|
const target = mod.getTarget();
|
|
|
|
const return_type: Type = if (opt_func_index == .none or ret_poison)
|
|
bare_return_type
|
|
else
|
|
ip.funcTypeReturnType(ip.typeOf(opt_func_index)).toType();
|
|
|
|
if (!return_type.isValidReturnType(mod)) {
|
|
const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else "";
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{
|
|
opaque_str, return_type.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, return_type);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and
|
|
!try sema.validateExternType(return_type, .ret_ty))
|
|
{
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{
|
|
return_type.fmt(mod), @tagName(cc_resolved),
|
|
});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, return_type);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
// If the return type is comptime-only but not dependent on parameters then
|
|
// all parameter types also need to be comptime.
|
|
if (is_source_decl and opt_func_index != .none and ret_ty_requires_comptime) comptime_check: {
|
|
for (block.params.items(.is_comptime)) |is_comptime| {
|
|
if (!is_comptime) break;
|
|
} else break :comptime_check;
|
|
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
ret_ty_src,
|
|
"function with comptime-only return type '{}' requires all parameters to be comptime",
|
|
.{return_type.fmt(mod)},
|
|
);
|
|
try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl, mod), return_type);
|
|
|
|
const tags = sema.code.instructions.items(.tag);
|
|
const data = sema.code.instructions.items(.data);
|
|
const param_body = sema.code.getParamBody(func_inst);
|
|
for (
|
|
block.params.items(.is_comptime),
|
|
block.params.items(.name),
|
|
param_body[0..block.params.len],
|
|
) |is_comptime, name_nts, param_index| {
|
|
if (!is_comptime) {
|
|
const param_src = switch (tags[param_index]) {
|
|
.param => data[param_index].pl_tok.src(),
|
|
.param_anytype => data[param_index].str_tok.src(),
|
|
else => unreachable,
|
|
};
|
|
const name = sema.code.nullTerminatedString2(name_nts);
|
|
if (name.len != 0) {
|
|
try sema.errNote(block, param_src, msg, "param '{s}' is required to be comptime", .{name});
|
|
} else {
|
|
try sema.errNote(block, param_src, msg, "param is required to be comptime", .{});
|
|
}
|
|
}
|
|
}
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
const arch = target.cpu.arch;
|
|
if (switch (cc_resolved) {
|
|
.Unspecified, .C, .Naked, .Async, .Inline => null,
|
|
.Interrupt => switch (arch) {
|
|
.x86, .x86_64, .avr, .msp430 => null,
|
|
else => @as([]const u8, "x86, x86_64, AVR, and MSP430"),
|
|
},
|
|
.Signal => switch (arch) {
|
|
.avr => null,
|
|
else => @as([]const u8, "AVR"),
|
|
},
|
|
.Stdcall, .Fastcall, .Thiscall => switch (arch) {
|
|
.x86 => null,
|
|
else => @as([]const u8, "x86"),
|
|
},
|
|
.Vectorcall => switch (arch) {
|
|
.x86, .aarch64, .aarch64_be, .aarch64_32 => null,
|
|
else => @as([]const u8, "x86 and AArch64"),
|
|
},
|
|
.APCS, .AAPCS, .AAPCSVFP => switch (arch) {
|
|
.arm, .armeb, .aarch64, .aarch64_be, .aarch64_32, .thumb, .thumbeb => null,
|
|
else => @as([]const u8, "ARM"),
|
|
},
|
|
.SysV, .Win64 => switch (arch) {
|
|
.x86_64 => null,
|
|
else => @as([]const u8, "x86_64"),
|
|
},
|
|
.Kernel => switch (arch) {
|
|
.nvptx, .nvptx64, .amdgcn, .spirv32, .spirv64 => null,
|
|
else => @as([]const u8, "nvptx, amdgcn and SPIR-V"),
|
|
},
|
|
}) |allowed_platform| {
|
|
return sema.fail(block, cc_src, "callconv '{s}' is only available on {s}, not {s}", .{
|
|
@tagName(cc_resolved),
|
|
allowed_platform,
|
|
@tagName(arch),
|
|
});
|
|
}
|
|
|
|
if (cc_resolved == .Inline and is_noinline) {
|
|
return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{});
|
|
}
|
|
if (is_generic and sema.no_partial_func_ty) return error.GenericPoison;
|
|
|
|
if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) {
|
|
// Make sure that StackTrace's fields are resolved so that the backend can
|
|
// lower this fn type.
|
|
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(unresolved_stack_trace_ty);
|
|
}
|
|
|
|
return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty);
|
|
}
|
|
|
|
fn zirParam(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
comptime_syntax: bool,
|
|
) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_tok;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index);
|
|
const param_name: Zir.NullTerminatedString = @enumFromInt(extra.data.name);
|
|
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
|
|
const param_ty = param_ty: {
|
|
const err = err: {
|
|
// Make sure any nested param instructions don't clobber our work.
|
|
const prev_params = block.params;
|
|
const prev_no_partial_func_type = sema.no_partial_func_ty;
|
|
const prev_generic_owner = sema.generic_owner;
|
|
const prev_generic_call_src = sema.generic_call_src;
|
|
const prev_generic_call_decl = sema.generic_call_decl;
|
|
block.params = .{};
|
|
sema.no_partial_func_ty = true;
|
|
sema.generic_owner = .none;
|
|
sema.generic_call_src = .unneeded;
|
|
sema.generic_call_decl = .none;
|
|
defer {
|
|
block.params = prev_params;
|
|
sema.no_partial_func_ty = prev_no_partial_func_type;
|
|
sema.generic_owner = prev_generic_owner;
|
|
sema.generic_call_src = prev_generic_call_src;
|
|
sema.generic_call_decl = prev_generic_call_decl;
|
|
}
|
|
|
|
if (sema.resolveBody(block, body, inst)) |param_ty_inst| {
|
|
if (sema.analyzeAsType(block, src, param_ty_inst)) |param_ty| {
|
|
break :param_ty param_ty;
|
|
} else |err| break :err err;
|
|
} else |err| break :err err;
|
|
};
|
|
switch (err) {
|
|
error.GenericPoison => {
|
|
// The type is not available until the generic instantiation.
|
|
// We result the param instruction with a poison value and
|
|
// insert an anytype parameter.
|
|
try block.params.append(sema.arena, .{
|
|
.ty = .generic_poison_type,
|
|
.is_comptime = comptime_syntax,
|
|
.name = param_name,
|
|
});
|
|
sema.inst_map.putAssumeCapacityNoClobber(inst, .generic_poison);
|
|
return;
|
|
},
|
|
else => |e| return e,
|
|
}
|
|
};
|
|
|
|
const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
// The type is not available until the generic instantiation.
|
|
// We result the param instruction with a poison value and
|
|
// insert an anytype parameter.
|
|
try block.params.append(sema.arena, .{
|
|
.ty = .generic_poison_type,
|
|
.is_comptime = comptime_syntax,
|
|
.name = param_name,
|
|
});
|
|
sema.inst_map.putAssumeCapacityNoClobber(inst, .generic_poison);
|
|
return;
|
|
},
|
|
else => |e| return e,
|
|
} or comptime_syntax;
|
|
|
|
try block.params.append(sema.arena, .{
|
|
.ty = param_ty.toIntern(),
|
|
.is_comptime = comptime_syntax,
|
|
.name = param_name,
|
|
});
|
|
|
|
if (is_comptime) {
|
|
// If this is a comptime parameter we can add a constant generic_poison
|
|
// since this is also a generic parameter.
|
|
sema.inst_map.putAssumeCapacityNoClobber(inst, .generic_poison);
|
|
} else {
|
|
// Otherwise we need a dummy runtime instruction.
|
|
const result_index: Air.Inst.Index = @intCast(sema.air_instructions.len);
|
|
try sema.air_instructions.append(sema.gpa, .{
|
|
.tag = .alloc,
|
|
.data = .{ .ty = param_ty },
|
|
});
|
|
sema.inst_map.putAssumeCapacityNoClobber(inst, Air.indexToRef(result_index));
|
|
}
|
|
}
|
|
|
|
fn zirParamAnytype(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
comptime_syntax: bool,
|
|
) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
|
|
const param_name: Zir.NullTerminatedString = @enumFromInt(inst_data.start);
|
|
|
|
// We are evaluating a generic function without any comptime args provided.
|
|
|
|
try block.params.append(sema.arena, .{
|
|
.ty = .generic_poison_type,
|
|
.is_comptime = comptime_syntax,
|
|
.name = param_name,
|
|
});
|
|
sema.inst_map.putAssumeCapacity(inst, .generic_poison);
|
|
}
|
|
|
|
fn zirAs(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
|
|
return sema.analyzeAs(block, sema.src, bin_inst.lhs, bin_inst.rhs, false);
|
|
}
|
|
|
|
fn zirAsNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data;
|
|
sema.src = src;
|
|
return sema.analyzeAs(block, src, extra.dest_type, extra.operand, false);
|
|
}
|
|
|
|
fn zirAsShiftOperand(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data;
|
|
return sema.analyzeAs(block, src, extra.dest_type, extra.operand, true);
|
|
}
|
|
|
|
fn analyzeAs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_dest_type: Zir.Inst.Ref,
|
|
zir_operand: Zir.Inst.Ref,
|
|
no_cast_to_comptime_int: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand = try sema.resolveInst(zir_operand);
|
|
if (zir_dest_type == .var_args_param_type) return operand;
|
|
const dest_ty = sema.resolveType(block, src, zir_dest_type) catch |err| switch (err) {
|
|
error.GenericPoison => return operand,
|
|
else => |e| return e,
|
|
};
|
|
if (dest_ty.zigTypeTag(mod) == .NoReturn) {
|
|
return sema.fail(block, src, "cannot cast to noreturn", .{});
|
|
}
|
|
const is_ret = if (Zir.refToIndex(zir_dest_type)) |ptr_index|
|
|
sema.code.instructions.items(.tag)[ptr_index] == .ret_type
|
|
else
|
|
false;
|
|
return sema.coerceExtra(block, dest_ty, operand, src, .{ .is_ret = is_ret, .no_cast_to_comptime_int = no_cast_to_comptime_int }) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ptr = try sema.resolveInst(inst_data.operand);
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
if (!ptr_ty.isPtrAtRuntime(mod)) {
|
|
return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(mod)});
|
|
}
|
|
if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| {
|
|
return Air.internedToRef((try mod.intValue(
|
|
Type.usize,
|
|
(try ptr_val.getUnsignedIntAdvanced(mod, sema)).?,
|
|
)).toIntern());
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src);
|
|
return block.addUnOp(.int_from_ptr, ptr);
|
|
}
|
|
|
|
fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start));
|
|
const object = try sema.resolveInst(extra.lhs);
|
|
return sema.fieldVal(block, src, object, field_name, field_name_src);
|
|
}
|
|
|
|
fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index, initializing: bool) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
|
|
const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start));
|
|
const object_ptr = try sema.resolveInst(extra.lhs);
|
|
return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, initializing);
|
|
}
|
|
|
|
fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
|
|
const object = try sema.resolveInst(extra.lhs);
|
|
const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, "field name must be comptime-known");
|
|
return sema.fieldVal(block, src, object, field_name, field_name_src);
|
|
}
|
|
|
|
fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
|
|
const object_ptr = try sema.resolveInst(extra.lhs);
|
|
const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, "field name must be comptime-known");
|
|
return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, false);
|
|
}
|
|
|
|
fn zirIntCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@intCast");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
|
|
return sema.intCast(block, inst_data.src(), dest_ty, src, operand, operand_src, true);
|
|
}
|
|
|
|
fn intCast(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
dest_ty: Type,
|
|
dest_ty_src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
operand_src: LazySrcLoc,
|
|
runtime_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src);
|
|
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
|
|
|
|
if (try sema.isComptimeKnown(operand)) {
|
|
return sema.coerce(block, dest_ty, operand, operand_src);
|
|
} else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{});
|
|
}
|
|
|
|
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src);
|
|
const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
|
|
|
|
if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| {
|
|
// requirement: intCast(u0, input) iff input == 0
|
|
if (runtime_safety and block.wantSafety()) {
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
const wanted_info = dest_scalar_ty.intInfo(mod);
|
|
const wanted_bits = wanted_info.bits;
|
|
|
|
if (wanted_bits == 0) {
|
|
const ok = if (is_vector) ok: {
|
|
const zeros = try sema.splat(operand_ty, try mod.intValue(operand_scalar_ty, 0));
|
|
const zero_inst = Air.internedToRef(zeros.toIntern());
|
|
const is_in_range = try block.addCmpVector(operand, zero_inst, .eq);
|
|
const all_in_range = try block.addInst(.{
|
|
.tag = .reduce,
|
|
.data = .{ .reduce = .{ .operand = is_in_range, .operation = .And } },
|
|
});
|
|
break :ok all_in_range;
|
|
} else ok: {
|
|
const zero_inst = Air.internedToRef((try mod.intValue(operand_ty, 0)).toIntern());
|
|
const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst);
|
|
break :ok is_in_range;
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .cast_truncated_data);
|
|
}
|
|
}
|
|
|
|
return Air.internedToRef(opv.toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
if (runtime_safety and block.wantSafety()) {
|
|
const actual_info = operand_scalar_ty.intInfo(mod);
|
|
const wanted_info = dest_scalar_ty.intInfo(mod);
|
|
const actual_bits = actual_info.bits;
|
|
const wanted_bits = wanted_info.bits;
|
|
const actual_value_bits = actual_bits - @intFromBool(actual_info.signedness == .signed);
|
|
const wanted_value_bits = wanted_bits - @intFromBool(wanted_info.signedness == .signed);
|
|
|
|
// range shrinkage
|
|
// requirement: int value fits into target type
|
|
if (wanted_value_bits < actual_value_bits) {
|
|
const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_scalar_ty);
|
|
const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar);
|
|
const dest_max = Air.internedToRef(dest_max_val.toIntern());
|
|
const diff = try block.addBinOp(.sub_wrap, dest_max, operand);
|
|
|
|
if (actual_info.signedness == .signed) {
|
|
// Reinterpret the sign-bit as part of the value. This will make
|
|
// negative differences (`operand` > `dest_max`) appear too big.
|
|
const unsigned_operand_ty = try mod.intType(.unsigned, actual_bits);
|
|
const diff_unsigned = try block.addBitCast(unsigned_operand_ty, diff);
|
|
|
|
// If the destination type is signed, then we need to double its
|
|
// range to account for negative values.
|
|
const dest_range_val = if (wanted_info.signedness == .signed) range_val: {
|
|
const one = try mod.intValue(unsigned_operand_ty, 1);
|
|
const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, mod);
|
|
break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty, undefined);
|
|
} else try mod.getCoerced(dest_max_val, unsigned_operand_ty);
|
|
const dest_range = Air.internedToRef(dest_range_val.toIntern());
|
|
|
|
const ok = if (is_vector) ok: {
|
|
const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte);
|
|
const all_in_range = try block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = is_in_range,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
break :ok all_in_range;
|
|
} else ok: {
|
|
const is_in_range = try block.addBinOp(.cmp_lte, diff_unsigned, dest_range);
|
|
break :ok is_in_range;
|
|
};
|
|
// TODO negative_to_unsigned?
|
|
try sema.addSafetyCheck(block, src, ok, .cast_truncated_data);
|
|
} else {
|
|
const ok = if (is_vector) ok: {
|
|
const is_in_range = try block.addCmpVector(diff, dest_max, .lte);
|
|
const all_in_range = try block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = is_in_range,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
break :ok all_in_range;
|
|
} else ok: {
|
|
const is_in_range = try block.addBinOp(.cmp_lte, diff, dest_max);
|
|
break :ok is_in_range;
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .cast_truncated_data);
|
|
}
|
|
} else if (actual_info.signedness == .signed and wanted_info.signedness == .unsigned) {
|
|
// no shrinkage, yes sign loss
|
|
// requirement: signed to unsigned >= 0
|
|
const ok = if (is_vector) ok: {
|
|
const scalar_zero = try mod.intValue(operand_scalar_ty, 0);
|
|
const zero_val = try sema.splat(operand_ty, scalar_zero);
|
|
const zero_inst = Air.internedToRef(zero_val.toIntern());
|
|
const is_in_range = try block.addCmpVector(operand, zero_inst, .gte);
|
|
const all_in_range = try block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = is_in_range,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
break :ok all_in_range;
|
|
} else ok: {
|
|
const zero_inst = Air.internedToRef((try mod.intValue(operand_ty, 0)).toIntern());
|
|
const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst);
|
|
break :ok is_in_range;
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .negative_to_unsigned);
|
|
}
|
|
}
|
|
return block.addTyOp(.intcast, dest_ty, operand);
|
|
}
|
|
|
|
fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@bitCast");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const operand_ty = sema.typeOf(operand);
|
|
switch (dest_ty.zigTypeTag(mod)) {
|
|
.AnyFrame,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.ErrorSet,
|
|
.ErrorUnion,
|
|
.Fn,
|
|
.Frame,
|
|
.NoReturn,
|
|
.Null,
|
|
.Opaque,
|
|
.Optional,
|
|
.Type,
|
|
.Undefined,
|
|
.Void,
|
|
=> return sema.fail(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}),
|
|
|
|
.Enum => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt => try sema.errNote(block, src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(mod)}),
|
|
else => {},
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
|
|
.Pointer => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt => try sema.errNote(block, src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(mod)}),
|
|
.Pointer => try sema.errNote(block, src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(mod)}),
|
|
else => {},
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
.Struct, .Union => if (dest_ty.containerLayout(mod) == .Auto) {
|
|
const container = switch (dest_ty.zigTypeTag(mod)) {
|
|
.Struct => "struct",
|
|
.Union => "union",
|
|
else => unreachable,
|
|
};
|
|
return sema.fail(block, src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{
|
|
dest_ty.fmt(mod), container,
|
|
});
|
|
},
|
|
|
|
.Array,
|
|
.Bool,
|
|
.Float,
|
|
.Int,
|
|
.Vector,
|
|
=> {},
|
|
}
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.AnyFrame,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.ErrorSet,
|
|
.ErrorUnion,
|
|
.Fn,
|
|
.Frame,
|
|
.NoReturn,
|
|
.Null,
|
|
.Opaque,
|
|
.Optional,
|
|
.Type,
|
|
.Undefined,
|
|
.Void,
|
|
=> return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}),
|
|
|
|
.Enum => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
switch (dest_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @intFromEnum to cast to '{}'", .{dest_ty.fmt(mod)}),
|
|
else => {},
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
.Pointer => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
switch (dest_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @intFromPtr to cast to '{}'", .{dest_ty.fmt(mod)}),
|
|
.Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(mod)}),
|
|
else => {},
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
.Struct, .Union => if (operand_ty.containerLayout(mod) == .Auto) {
|
|
const container = switch (operand_ty.zigTypeTag(mod)) {
|
|
.Struct => "struct",
|
|
.Union => "union",
|
|
else => unreachable,
|
|
};
|
|
return sema.fail(block, operand_src, "cannot @bitCast from '{}'; {s} does not have a guaranteed in-memory layout", .{
|
|
operand_ty.fmt(mod), container,
|
|
});
|
|
},
|
|
|
|
.Array,
|
|
.Bool,
|
|
.Float,
|
|
.Int,
|
|
.Vector,
|
|
=> {},
|
|
}
|
|
return sema.bitCast(block, dest_ty, operand, inst_data.src(), operand_src);
|
|
}
|
|
|
|
fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@floatCast");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
|
|
const target = mod.getTarget();
|
|
const dest_is_comptime_float = switch (dest_ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat => true,
|
|
.Float => false,
|
|
else => return sema.fail(
|
|
block,
|
|
src,
|
|
"expected float type, found '{}'",
|
|
.{dest_ty.fmt(mod)},
|
|
),
|
|
};
|
|
|
|
const operand_ty = sema.typeOf(operand);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat, .Float, .ComptimeInt => {},
|
|
else => return sema.fail(
|
|
block,
|
|
operand_src,
|
|
"expected float type, found '{}'",
|
|
.{operand_ty.fmt(mod)},
|
|
),
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
|
|
return Air.internedToRef((try operand_val.floatCast(dest_ty, mod)).toIntern());
|
|
}
|
|
if (dest_is_comptime_float) {
|
|
return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_float'", .{});
|
|
}
|
|
const src_bits = operand_ty.floatBits(target);
|
|
const dst_bits = dest_ty.floatBits(target);
|
|
if (dst_bits >= src_bits) {
|
|
return sema.coerce(block, dest_ty, operand, operand_src);
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
|
|
return block.addTyOp(.fptrunc, dest_ty, operand);
|
|
}
|
|
|
|
fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const array = try sema.resolveInst(extra.lhs);
|
|
const elem_index = try sema.resolveInst(extra.rhs);
|
|
return sema.elemVal(block, src, array, elem_index, src, false);
|
|
}
|
|
|
|
fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const array = try sema.resolveInst(extra.lhs);
|
|
const elem_index = try sema.resolveInst(extra.rhs);
|
|
return sema.elemVal(block, src, array, elem_index, elem_index_src, true);
|
|
}
|
|
|
|
fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.lhs);
|
|
const elem_index = try sema.resolveInst(extra.rhs);
|
|
const indexable_ty = sema.typeOf(array_ptr);
|
|
if (indexable_ty.zigTypeTag(mod) != .Pointer) {
|
|
const capture_src: LazySrcLoc = .{ .for_capture_from_input = inst_data.src_node };
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, capture_src, "pointer capture of non pointer type '{}'", .{
|
|
indexable_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
if (indexable_ty.isIndexable(mod)) {
|
|
try sema.errNote(block, src, msg, "consider using '&' here", .{});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
return sema.elemPtrOneLayerOnly(block, src, array_ptr, elem_index, src, false, false);
|
|
}
|
|
|
|
fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.lhs);
|
|
const elem_index = try sema.resolveInst(extra.rhs);
|
|
return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, true);
|
|
}
|
|
|
|
fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.ptr);
|
|
const elem_index = try sema.mod.intRef(Type.usize, extra.index);
|
|
return sema.elemPtr(block, src, array_ptr, elem_index, src, true, true);
|
|
}
|
|
|
|
fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.lhs);
|
|
const start = try sema.resolveInst(extra.start);
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
|
|
const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
|
|
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
|
|
|
|
return sema.analyzeSlice(block, src, array_ptr, start, .none, .none, .unneeded, ptr_src, start_src, end_src, false);
|
|
}
|
|
|
|
fn zirSliceEnd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.lhs);
|
|
const start = try sema.resolveInst(extra.start);
|
|
const end = try sema.resolveInst(extra.end);
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
|
|
const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
|
|
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
|
|
|
|
return sema.analyzeSlice(block, src, array_ptr, start, end, .none, .unneeded, ptr_src, start_src, end_src, false);
|
|
}
|
|
|
|
fn zirSliceSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const sentinel_src: LazySrcLoc = .{ .node_offset_slice_sentinel = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.lhs);
|
|
const start = try sema.resolveInst(extra.start);
|
|
const end: Air.Inst.Ref = if (extra.end == .none) .none else try sema.resolveInst(extra.end);
|
|
const sentinel = try sema.resolveInst(extra.sentinel);
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
|
|
const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
|
|
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
|
|
|
|
return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src, ptr_src, start_src, end_src, false);
|
|
}
|
|
|
|
fn zirSliceLength(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.SliceLength, inst_data.payload_index).data;
|
|
const array_ptr = try sema.resolveInst(extra.lhs);
|
|
const start = try sema.resolveInst(extra.start);
|
|
const len = try sema.resolveInst(extra.len);
|
|
const sentinel = if (extra.sentinel == .none) .none else try sema.resolveInst(extra.sentinel);
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
|
|
const start_src: LazySrcLoc = .{ .node_offset_slice_start = extra.start_src_node_offset };
|
|
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
|
|
const sentinel_src: LazySrcLoc = if (sentinel == .none)
|
|
.unneeded
|
|
else
|
|
.{ .node_offset_slice_sentinel = inst_data.src_node };
|
|
|
|
return sema.analyzeSlice(block, src, array_ptr, start, len, sentinel, sentinel_src, ptr_src, start_src, end_src, true);
|
|
}
|
|
|
|
/// Holds common data used when analyzing or resolving switch prong bodies,
|
|
/// including setting up captures.
|
|
const SwitchProngAnalysis = struct {
|
|
sema: *Sema,
|
|
/// The block containing the `switch_block` itself.
|
|
parent_block: *Block,
|
|
/// The raw switch operand value (*not* the condition). Always defined.
|
|
operand: Air.Inst.Ref,
|
|
/// May be `undefined` if no prong has a by-ref capture.
|
|
operand_ptr: Air.Inst.Ref,
|
|
/// The switch condition value. For unions, `operand` is the union and `cond` is its tag.
|
|
cond: Air.Inst.Ref,
|
|
/// If this switch is on an error set, this is the type to assign to the
|
|
/// `else` prong. If `null`, the prong should be unreachable.
|
|
else_error_ty: ?Type,
|
|
/// The index of the `switch_block` instruction itself.
|
|
switch_block_inst: Zir.Inst.Index,
|
|
/// The dummy index into which inline tag captures should be placed. May be
|
|
/// undefined if no prong has a tag capture.
|
|
tag_capture_inst: Zir.Inst.Index,
|
|
|
|
/// Resolve a switch prong which is determined at comptime to have no peers.
|
|
/// Uses `resolveBlockBody`. Sets up captures as needed.
|
|
fn resolveProngComptime(
|
|
spa: SwitchProngAnalysis,
|
|
child_block: *Block,
|
|
prong_type: enum { normal, special },
|
|
prong_body: []const Zir.Inst.Index,
|
|
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
|
|
/// Must use the `scalar_capture`, `special_capture`, or `multi_capture` union field.
|
|
raw_capture_src: Module.SwitchProngSrc,
|
|
/// The set of all values which can reach this prong. May be undefined
|
|
/// if the prong is special or contains ranges.
|
|
case_vals: []const Air.Inst.Ref,
|
|
/// The inline capture of this prong. If this is not an inline prong,
|
|
/// this is `.none`.
|
|
inline_case_capture: Air.Inst.Ref,
|
|
/// Whether this prong has an inline tag capture. If `true`, then
|
|
/// `inline_case_capture` cannot be `.none`.
|
|
has_tag_capture: bool,
|
|
merges: *Block.Merges,
|
|
) CompileError!Air.Inst.Ref {
|
|
const sema = spa.sema;
|
|
const src = sema.code.instructions.items(.data)[spa.switch_block_inst].pl_node.src();
|
|
|
|
if (has_tag_capture) {
|
|
const tag_ref = try spa.analyzeTagCapture(child_block, raw_capture_src, inline_case_capture);
|
|
sema.inst_map.putAssumeCapacity(spa.tag_capture_inst, tag_ref);
|
|
}
|
|
defer if (has_tag_capture) assert(sema.inst_map.remove(spa.tag_capture_inst));
|
|
|
|
switch (capture) {
|
|
.none => {
|
|
return sema.resolveBlockBody(spa.parent_block, src, child_block, prong_body, spa.switch_block_inst, merges);
|
|
},
|
|
|
|
.by_val, .by_ref => {
|
|
const capture_ref = try spa.analyzeCapture(
|
|
child_block,
|
|
capture == .by_ref,
|
|
prong_type == .special,
|
|
raw_capture_src,
|
|
case_vals,
|
|
inline_case_capture,
|
|
);
|
|
|
|
if (sema.typeOf(capture_ref).isNoReturn(sema.mod)) {
|
|
// This prong should be unreachable!
|
|
return Air.Inst.Ref.unreachable_value;
|
|
}
|
|
|
|
sema.inst_map.putAssumeCapacity(spa.switch_block_inst, capture_ref);
|
|
defer assert(sema.inst_map.remove(spa.switch_block_inst));
|
|
|
|
return sema.resolveBlockBody(spa.parent_block, src, child_block, prong_body, spa.switch_block_inst, merges);
|
|
},
|
|
}
|
|
}
|
|
|
|
/// Analyze a switch prong which may have peers at runtime.
|
|
/// Uses `analyzeBodyRuntimeBreak`. Sets up captures as needed.
|
|
fn analyzeProngRuntime(
|
|
spa: SwitchProngAnalysis,
|
|
case_block: *Block,
|
|
prong_type: enum { normal, special },
|
|
prong_body: []const Zir.Inst.Index,
|
|
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
|
|
/// Must use the `scalar`, `special`, or `multi_capture` union field.
|
|
raw_capture_src: Module.SwitchProngSrc,
|
|
/// The set of all values which can reach this prong. May be undefined
|
|
/// if the prong is special or contains ranges.
|
|
case_vals: []const Air.Inst.Ref,
|
|
/// The inline capture of this prong. If this is not an inline prong,
|
|
/// this is `.none`.
|
|
inline_case_capture: Air.Inst.Ref,
|
|
/// Whether this prong has an inline tag capture. If `true`, then
|
|
/// `inline_case_capture` cannot be `.none`.
|
|
has_tag_capture: bool,
|
|
) CompileError!void {
|
|
const sema = spa.sema;
|
|
|
|
if (has_tag_capture) {
|
|
const tag_ref = try spa.analyzeTagCapture(case_block, raw_capture_src, inline_case_capture);
|
|
sema.inst_map.putAssumeCapacity(spa.tag_capture_inst, tag_ref);
|
|
}
|
|
defer if (has_tag_capture) assert(sema.inst_map.remove(spa.tag_capture_inst));
|
|
|
|
switch (capture) {
|
|
.none => {
|
|
return sema.analyzeBodyRuntimeBreak(case_block, prong_body);
|
|
},
|
|
|
|
.by_val, .by_ref => {
|
|
const capture_ref = try spa.analyzeCapture(
|
|
case_block,
|
|
capture == .by_ref,
|
|
prong_type == .special,
|
|
raw_capture_src,
|
|
case_vals,
|
|
inline_case_capture,
|
|
);
|
|
|
|
if (sema.typeOf(capture_ref).isNoReturn(sema.mod)) {
|
|
// No need to analyze any further, the prong is unreachable
|
|
return;
|
|
}
|
|
|
|
sema.inst_map.putAssumeCapacity(spa.switch_block_inst, capture_ref);
|
|
defer assert(sema.inst_map.remove(spa.switch_block_inst));
|
|
|
|
return sema.analyzeBodyRuntimeBreak(case_block, prong_body);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn analyzeTagCapture(
|
|
spa: SwitchProngAnalysis,
|
|
block: *Block,
|
|
raw_capture_src: Module.SwitchProngSrc,
|
|
inline_case_capture: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const sema = spa.sema;
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(spa.operand);
|
|
if (operand_ty.zigTypeTag(mod) != .Union) {
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
const switch_node_offset = zir_datas[spa.switch_block_inst].pl_node.src_node;
|
|
const raw_tag_capture_src: Module.SwitchProngSrc = switch (raw_capture_src) {
|
|
.scalar_capture => |i| .{ .scalar_tag_capture = i },
|
|
.multi_capture => |i| .{ .multi_tag_capture = i },
|
|
.special_capture => .special_tag_capture,
|
|
else => unreachable,
|
|
};
|
|
const capture_src = raw_tag_capture_src.resolve(mod, mod.declPtr(block.src_decl), switch_node_offset, .none);
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, capture_src, "cannot capture tag of non-union type '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, operand_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
assert(inline_case_capture != .none);
|
|
return inline_case_capture;
|
|
}
|
|
|
|
fn analyzeCapture(
|
|
spa: SwitchProngAnalysis,
|
|
block: *Block,
|
|
capture_byref: bool,
|
|
is_special_prong: bool,
|
|
raw_capture_src: Module.SwitchProngSrc,
|
|
case_vals: []const Air.Inst.Ref,
|
|
inline_case_capture: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const sema = spa.sema;
|
|
const mod = sema.mod;
|
|
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
const switch_node_offset = zir_datas[spa.switch_block_inst].pl_node.src_node;
|
|
|
|
const operand_ty = sema.typeOf(spa.operand);
|
|
const operand_ptr_ty = if (capture_byref) sema.typeOf(spa.operand_ptr) else undefined;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = switch_node_offset };
|
|
|
|
if (inline_case_capture != .none) {
|
|
const item_val = sema.resolveConstValue(block, .unneeded, inline_case_capture, "") catch unreachable;
|
|
if (operand_ty.zigTypeTag(mod) == .Union) {
|
|
const field_index = @as(u32, @intCast(operand_ty.unionTagFieldIndex(item_val, mod).?));
|
|
const union_obj = mod.typeToUnion(operand_ty).?;
|
|
const field_ty = union_obj.fields.values()[field_index].ty;
|
|
if (capture_byref) {
|
|
const ptr_field_ty = try mod.ptrType(.{
|
|
.child = field_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = !operand_ptr_ty.ptrIsMutable(mod),
|
|
.is_volatile = operand_ptr_ty.isVolatilePtr(mod),
|
|
.address_space = operand_ptr_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
if (try sema.resolveDefinedValue(block, sema.src, spa.operand_ptr)) |union_ptr| {
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = union_ptr.toIntern(),
|
|
.index = field_index,
|
|
} },
|
|
} })));
|
|
}
|
|
return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty);
|
|
} else {
|
|
if (try sema.resolveDefinedValue(block, sema.src, spa.operand)) |union_val| {
|
|
const tag_and_val = mod.intern_pool.indexToKey(union_val.toIntern()).un;
|
|
return Air.internedToRef(tag_and_val.val);
|
|
}
|
|
return block.addStructFieldVal(spa.operand, field_index, field_ty);
|
|
}
|
|
} else if (capture_byref) {
|
|
return sema.addConstantMaybeRef(block, operand_ty, item_val, true);
|
|
} else {
|
|
return inline_case_capture;
|
|
}
|
|
}
|
|
|
|
if (is_special_prong) {
|
|
if (capture_byref) {
|
|
return spa.operand_ptr;
|
|
}
|
|
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.ErrorSet => if (spa.else_error_ty) |ty| {
|
|
return sema.bitCast(block, ty, spa.operand, operand_src, null);
|
|
} else {
|
|
try block.addUnreachable(operand_src, false);
|
|
return Air.Inst.Ref.unreachable_value;
|
|
},
|
|
else => return spa.operand,
|
|
}
|
|
}
|
|
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Union => {
|
|
const union_obj = mod.typeToUnion(operand_ty).?;
|
|
const first_item_val = sema.resolveConstValue(block, .unneeded, case_vals[0], "") catch unreachable;
|
|
|
|
const first_field_index = @as(u32, @intCast(operand_ty.unionTagFieldIndex(first_item_val, mod).?));
|
|
const first_field = union_obj.fields.values()[first_field_index];
|
|
|
|
const field_tys = try sema.arena.alloc(Type, case_vals.len);
|
|
for (case_vals, field_tys) |item, *field_ty| {
|
|
const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable;
|
|
const field_idx = @as(u32, @intCast(operand_ty.unionTagFieldIndex(item_val, sema.mod).?));
|
|
field_ty.* = union_obj.fields.values()[field_idx].ty;
|
|
}
|
|
|
|
// Fast path: if all the operands are the same type already, we don't need to hit
|
|
// PTR! This will also allow us to emit simpler code.
|
|
const same_types = for (field_tys[1..]) |field_ty| {
|
|
if (!field_ty.eql(field_tys[0], sema.mod)) break false;
|
|
} else true;
|
|
|
|
const capture_ty = if (same_types) field_tys[0] else capture_ty: {
|
|
// We need values to run PTR on, so make a bunch of undef constants.
|
|
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
|
|
for (dummy_captures, field_tys) |*dummy, field_ty| {
|
|
dummy.* = try mod.undefRef(field_ty);
|
|
}
|
|
|
|
const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len);
|
|
@memset(case_srcs, .unneeded);
|
|
|
|
break :capture_ty sema.resolvePeerTypes(block, .unneeded, dummy_captures, .{ .override = case_srcs }) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
// This must be a multi-prong so this must be a `multi_capture` src
|
|
const multi_idx = raw_capture_src.multi_capture;
|
|
const src_decl_ptr = sema.mod.declPtr(block.src_decl);
|
|
for (case_srcs, 0..) |*case_src, i| {
|
|
const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @as(u32, @intCast(i)) } };
|
|
case_src.* = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
|
|
}
|
|
const capture_src = raw_capture_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
|
|
_ = sema.resolvePeerTypes(block, capture_src, dummy_captures, .{ .override = case_srcs }) catch |err1| switch (err1) {
|
|
error.AnalysisFail => {
|
|
const msg = sema.err orelse return error.AnalysisFail;
|
|
try sema.reparentOwnedErrorMsg(block, capture_src, msg, "capture group with incompatible types", .{});
|
|
return error.AnalysisFail;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
};
|
|
|
|
// By-reference captures have some further restrictions which make them easier to emit
|
|
if (capture_byref) {
|
|
const operand_ptr_info = operand_ptr_ty.ptrInfo(mod);
|
|
const capture_ptr_ty = try mod.ptrType(.{
|
|
.child = capture_ty.toIntern(),
|
|
.flags = .{
|
|
// TODO: alignment!
|
|
.is_const = operand_ptr_info.flags.is_const,
|
|
.is_volatile = operand_ptr_info.flags.is_volatile,
|
|
.address_space = operand_ptr_info.flags.address_space,
|
|
},
|
|
});
|
|
|
|
// By-ref captures of hetereogeneous types are only allowed if each field
|
|
// pointer type is in-memory coercible to the capture pointer type.
|
|
if (!same_types) {
|
|
for (field_tys, 0..) |field_ty, i| {
|
|
const field_ptr_ty = try mod.ptrType(.{
|
|
.child = field_ty.toIntern(),
|
|
.flags = .{
|
|
// TODO: alignment!
|
|
.is_const = operand_ptr_info.flags.is_const,
|
|
.is_volatile = operand_ptr_info.flags.is_volatile,
|
|
.address_space = operand_ptr_info.flags.address_space,
|
|
},
|
|
});
|
|
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ptr_ty, field_ptr_ty, false, sema.mod.getTarget(), .unneeded, .unneeded)) {
|
|
const multi_idx = raw_capture_src.multi_capture;
|
|
const src_decl_ptr = sema.mod.declPtr(block.src_decl);
|
|
const capture_src = raw_capture_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
|
|
const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @as(u32, @intCast(i)) } };
|
|
const case_src = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, capture_src, "capture group with incompatible types", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, case_src, msg, "pointer type child '{}' cannot cast into resolved pointer type child '{}'", .{
|
|
field_ty.fmt(sema.mod),
|
|
capture_ty.fmt(sema.mod),
|
|
});
|
|
try sema.errNote(block, capture_src, msg, "this coercion is only possible when capturing by value", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |op_ptr_val| {
|
|
if (op_ptr_val.isUndef(mod)) return mod.undefRef(capture_ptr_ty);
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = capture_ptr_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = op_ptr_val.toIntern(),
|
|
.index = first_field_index,
|
|
} },
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, operand_src, null);
|
|
return block.addStructFieldPtr(spa.operand_ptr, first_field_index, capture_ptr_ty);
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |operand_val| {
|
|
if (operand_val.isUndef(mod)) return mod.undefRef(capture_ty);
|
|
const union_val = mod.intern_pool.indexToKey(operand_val.toIntern()).un;
|
|
if (union_val.tag.toValue().isUndef(mod)) return mod.undefRef(capture_ty);
|
|
const uncoerced = Air.internedToRef(union_val.val);
|
|
return sema.coerce(block, capture_ty, uncoerced, operand_src);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, operand_src, null);
|
|
|
|
if (same_types) {
|
|
return block.addStructFieldVal(spa.operand, first_field_index, capture_ty);
|
|
}
|
|
|
|
// We may have to emit a switch block which coerces the operand to the capture type.
|
|
// If we can, try to avoid that using in-memory coercions.
|
|
const first_non_imc = in_mem: {
|
|
for (field_tys, 0..) |field_ty, i| {
|
|
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, sema.mod.getTarget(), .unneeded, .unneeded)) {
|
|
break :in_mem i;
|
|
}
|
|
}
|
|
// All fields are in-memory coercible to the resolved type!
|
|
// Just take the first field and bitcast the result.
|
|
const uncoerced = try block.addStructFieldVal(spa.operand, first_field_index, first_field.ty);
|
|
return block.addBitCast(capture_ty, uncoerced);
|
|
};
|
|
|
|
// By-val capture with heterogeneous types which are not all in-memory coercible to
|
|
// the resolved capture type. We finally have to fall back to the ugly method.
|
|
|
|
// However, let's first track which operands are in-memory coercible. There may well
|
|
// be several, and we can squash all of these cases into the same switch prong using
|
|
// a simple bitcast. We'll make this the 'else' prong.
|
|
|
|
var in_mem_coercible = try std.DynamicBitSet.initFull(sema.arena, field_tys.len);
|
|
in_mem_coercible.unset(first_non_imc);
|
|
{
|
|
const next = first_non_imc + 1;
|
|
for (field_tys[next..], next..) |field_ty, i| {
|
|
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, sema.mod.getTarget(), .unneeded, .unneeded)) {
|
|
in_mem_coercible.unset(i);
|
|
}
|
|
}
|
|
}
|
|
|
|
const capture_block_inst = try block.addInstAsIndex(.{
|
|
.tag = .block,
|
|
.data = .{
|
|
.ty_pl = .{
|
|
.ty = Air.internedToRef(capture_ty.toIntern()),
|
|
.payload = undefined, // updated below
|
|
},
|
|
},
|
|
});
|
|
|
|
const prong_count = field_tys.len - in_mem_coercible.count();
|
|
|
|
const estimated_extra = prong_count * 6; // 2 for Case, 1 item, probably 3 insts
|
|
var cases_extra = try std.ArrayList(u32).initCapacity(sema.gpa, estimated_extra);
|
|
defer cases_extra.deinit();
|
|
|
|
{
|
|
// Non-bitcast cases
|
|
var it = in_mem_coercible.iterator(.{ .kind = .unset });
|
|
while (it.next()) |idx| {
|
|
var coerce_block = block.makeSubBlock();
|
|
defer coerce_block.instructions.deinit(sema.gpa);
|
|
|
|
const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @as(u32, @intCast(idx)), field_tys[idx]);
|
|
const coerced = sema.coerce(&coerce_block, capture_ty, uncoerced, .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const multi_idx = raw_capture_src.multi_capture;
|
|
const src_decl_ptr = sema.mod.declPtr(block.src_decl);
|
|
const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @as(u32, @intCast(idx)) } };
|
|
const case_src = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
|
|
_ = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
_ = try coerce_block.addBr(capture_block_inst, coerced);
|
|
|
|
try cases_extra.ensureUnusedCapacity(3 + coerce_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@as(u32, @intCast(coerce_block.instructions.items.len))); // body_len
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(case_vals[idx])); // item
|
|
cases_extra.appendSliceAssumeCapacity(coerce_block.instructions.items); // body
|
|
}
|
|
}
|
|
const else_body_len = len: {
|
|
// 'else' prong uses a bitcast
|
|
var coerce_block = block.makeSubBlock();
|
|
defer coerce_block.instructions.deinit(sema.gpa);
|
|
|
|
const first_imc = in_mem_coercible.findFirstSet().?;
|
|
const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @as(u32, @intCast(first_imc)), field_tys[first_imc]);
|
|
const coerced = try coerce_block.addBitCast(capture_ty, uncoerced);
|
|
_ = try coerce_block.addBr(capture_block_inst, coerced);
|
|
|
|
try cases_extra.appendSlice(coerce_block.instructions.items);
|
|
break :len coerce_block.instructions.items.len;
|
|
};
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.SwitchBr).Struct.fields.len +
|
|
cases_extra.items.len +
|
|
@typeInfo(Air.Block).Struct.fields.len +
|
|
1);
|
|
|
|
const switch_br_inst = @as(u32, @intCast(sema.air_instructions.len));
|
|
try sema.air_instructions.append(sema.gpa, .{
|
|
.tag = .switch_br,
|
|
.data = .{ .pl_op = .{
|
|
.operand = spa.cond,
|
|
.payload = sema.addExtraAssumeCapacity(Air.SwitchBr{
|
|
.cases_len = @as(u32, @intCast(prong_count)),
|
|
.else_body_len = @as(u32, @intCast(else_body_len)),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(cases_extra.items);
|
|
|
|
// Set up block body
|
|
sema.air_instructions.items(.data)[capture_block_inst].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{
|
|
.body_len = 1,
|
|
});
|
|
sema.air_extra.appendAssumeCapacity(switch_br_inst);
|
|
|
|
return Air.indexToRef(capture_block_inst);
|
|
},
|
|
.ErrorSet => {
|
|
if (capture_byref) {
|
|
const capture_src = raw_capture_src.resolve(mod, mod.declPtr(block.src_decl), switch_node_offset, .none);
|
|
return sema.fail(
|
|
block,
|
|
capture_src,
|
|
"error set cannot be captured by reference",
|
|
.{},
|
|
);
|
|
}
|
|
|
|
if (case_vals.len == 1) {
|
|
const item_val = sema.resolveConstValue(block, .unneeded, case_vals[0], "") catch unreachable;
|
|
const item_ty = try mod.singleErrorSetType(item_val.getErrorName(mod).unwrap().?);
|
|
return sema.bitCast(block, item_ty, spa.operand, operand_src, null);
|
|
}
|
|
|
|
var names: InferredErrorSet.NameMap = .{};
|
|
try names.ensureUnusedCapacity(sema.arena, case_vals.len);
|
|
for (case_vals) |err| {
|
|
const err_val = sema.resolveConstValue(block, .unneeded, err, "") catch unreachable;
|
|
names.putAssumeCapacityNoClobber(err_val.getErrorName(mod).unwrap().?, {});
|
|
}
|
|
const error_ty = try mod.errorSetFromUnsortedNames(names.keys());
|
|
return sema.bitCast(block, error_ty, spa.operand, operand_src, null);
|
|
},
|
|
else => {
|
|
// In this case the capture value is just the passed-through value
|
|
// of the switch condition.
|
|
if (capture_byref) {
|
|
return spa.operand_ptr;
|
|
} else {
|
|
return spa.operand;
|
|
}
|
|
},
|
|
}
|
|
}
|
|
};
|
|
|
|
fn switchCond(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Type,
|
|
.Void,
|
|
.Bool,
|
|
.Int,
|
|
.Float,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.Pointer,
|
|
.Fn,
|
|
.ErrorSet,
|
|
.Enum,
|
|
=> {
|
|
if (operand_ty.isSlice(mod)) {
|
|
return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)});
|
|
}
|
|
if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| {
|
|
return Air.internedToRef(opv.toIntern());
|
|
}
|
|
return operand;
|
|
},
|
|
|
|
.Union => {
|
|
try sema.resolveTypeFields(operand_ty);
|
|
const enum_ty = operand_ty.unionTagType(mod) orelse {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "switch on union with no attached enum", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
if (operand_ty.declSrcLocOrNull(mod)) |union_src| {
|
|
try mod.errNoteNonLazy(union_src, msg, "consider 'union(enum)' here", .{});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
};
|
|
return sema.unionToTag(block, enum_ty, operand, src);
|
|
},
|
|
|
|
.ErrorUnion,
|
|
.NoReturn,
|
|
.Array,
|
|
.Struct,
|
|
.Undefined,
|
|
.Null,
|
|
.Optional,
|
|
.Opaque,
|
|
.Vector,
|
|
.Frame,
|
|
.AnyFrame,
|
|
=> return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)}),
|
|
}
|
|
}
|
|
|
|
const SwitchErrorSet = std.AutoHashMap(InternPool.NullTerminatedString, Module.SwitchProngSrc);
|
|
|
|
fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_ref: bool) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const src_node_offset = inst_data.src_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset };
|
|
const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset };
|
|
const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
|
|
|
|
const raw_operand: struct { val: Air.Inst.Ref, ptr: Air.Inst.Ref } = blk: {
|
|
const maybe_ptr = try sema.resolveInst(extra.data.operand);
|
|
if (operand_is_ref) {
|
|
const val = try sema.analyzeLoad(block, src, maybe_ptr, operand_src);
|
|
break :blk .{ .val = val, .ptr = maybe_ptr };
|
|
} else {
|
|
break :blk .{ .val = maybe_ptr, .ptr = undefined };
|
|
}
|
|
};
|
|
|
|
const operand = try sema.switchCond(block, operand_src, raw_operand.val);
|
|
|
|
// AstGen guarantees that the instruction immediately preceding
|
|
// switch_block(_ref) is a dbg_stmt
|
|
const cond_dbg_node_index = inst - 1;
|
|
|
|
var header_extra_index: usize = extra.end;
|
|
|
|
const scalar_cases_len = extra.data.bits.scalar_cases_len;
|
|
const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
|
|
const multi_cases_len = sema.code.extra[header_extra_index];
|
|
header_extra_index += 1;
|
|
break :blk multi_cases_len;
|
|
} else 0;
|
|
|
|
const tag_capture_inst: Zir.Inst.Index = if (extra.data.bits.any_has_tag_capture) blk: {
|
|
const tag_capture_inst = sema.code.extra[header_extra_index];
|
|
header_extra_index += 1;
|
|
// SwitchProngAnalysis wants inst_map to have space for the tag capture.
|
|
// Note that the normal capture is referred to via the switch block
|
|
// index, which there is already necessarily space for.
|
|
try sema.inst_map.ensureSpaceForInstructions(gpa, &.{tag_capture_inst});
|
|
break :blk tag_capture_inst;
|
|
} else undefined;
|
|
|
|
var case_vals = try std.ArrayListUnmanaged(Air.Inst.Ref).initCapacity(gpa, scalar_cases_len + 2 * multi_cases_len);
|
|
defer case_vals.deinit(gpa);
|
|
|
|
const Special = struct {
|
|
body: []const Zir.Inst.Index,
|
|
end: usize,
|
|
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
|
|
is_inline: bool,
|
|
has_tag_capture: bool,
|
|
};
|
|
|
|
const special_prong = extra.data.bits.specialProng();
|
|
const special: Special = switch (special_prong) {
|
|
.none => .{
|
|
.body = &.{},
|
|
.end = header_extra_index,
|
|
.capture = .none,
|
|
.is_inline = false,
|
|
.has_tag_capture = false,
|
|
},
|
|
.under, .@"else" => blk: {
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[header_extra_index]));
|
|
const extra_body_start = header_extra_index + 1;
|
|
break :blk .{
|
|
.body = sema.code.extra[extra_body_start..][0..info.body_len],
|
|
.end = extra_body_start + info.body_len,
|
|
.capture = info.capture,
|
|
.is_inline = info.is_inline,
|
|
.has_tag_capture = info.has_tag_capture,
|
|
};
|
|
},
|
|
};
|
|
|
|
const maybe_union_ty = sema.typeOf(raw_operand.val);
|
|
const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union;
|
|
|
|
// Duplicate checking variables later also used for `inline else`.
|
|
var seen_enum_fields: []?Module.SwitchProngSrc = &.{};
|
|
var seen_errors = SwitchErrorSet.init(gpa);
|
|
var range_set = RangeSet.init(gpa, mod);
|
|
var true_count: u8 = 0;
|
|
var false_count: u8 = 0;
|
|
|
|
defer {
|
|
range_set.deinit();
|
|
gpa.free(seen_enum_fields);
|
|
seen_errors.deinit();
|
|
}
|
|
|
|
var empty_enum = false;
|
|
|
|
const operand_ty = sema.typeOf(operand);
|
|
const err_set = operand_ty.zigTypeTag(mod) == .ErrorSet;
|
|
|
|
var else_error_ty: ?Type = null;
|
|
|
|
// Validate usage of '_' prongs.
|
|
if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"'_' prong only allowed when switching on non-exhaustive enums",
|
|
.{},
|
|
);
|
|
errdefer msg.destroy(gpa);
|
|
try sema.errNote(
|
|
block,
|
|
special_prong_src,
|
|
msg,
|
|
"'_' prong here",
|
|
.{},
|
|
);
|
|
try sema.errNote(
|
|
block,
|
|
src,
|
|
msg,
|
|
"consider using 'else'",
|
|
.{},
|
|
);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
// Validate for duplicate items, missing else prong, and invalid range.
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Union => unreachable, // handled in zirSwitchCond
|
|
.Enum => {
|
|
seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount(mod));
|
|
empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(mod);
|
|
@memset(seen_enum_fields, null);
|
|
// `range_set` is used for non-exhaustive enum values that do not correspond to any tags.
|
|
|
|
var extra_index: usize = special.end;
|
|
{
|
|
var scalar_i: u32 = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1 + info.body_len;
|
|
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemEnum(
|
|
block,
|
|
seen_enum_fields,
|
|
&range_set,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .scalar = scalar_i },
|
|
));
|
|
}
|
|
}
|
|
{
|
|
var multi_i: u32 = 0;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const items = sema.code.refSlice(extra_index, items_len);
|
|
extra_index += items_len + info.body_len;
|
|
|
|
try case_vals.ensureUnusedCapacity(gpa, items.len);
|
|
for (items, 0..) |item_ref, item_i| {
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemEnum(
|
|
block,
|
|
seen_enum_fields,
|
|
&range_set,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } },
|
|
));
|
|
}
|
|
|
|
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
|
|
}
|
|
}
|
|
const all_tags_handled = for (seen_enum_fields) |seen_src| {
|
|
if (seen_src == null) break false;
|
|
} else true;
|
|
|
|
if (special_prong == .@"else") {
|
|
if (all_tags_handled and !operand_ty.isNonexhaustiveEnum(mod)) return sema.fail(
|
|
block,
|
|
special_prong_src,
|
|
"unreachable else prong; all cases already handled",
|
|
.{},
|
|
);
|
|
} else if (!all_tags_handled) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"switch must handle all possibilities",
|
|
.{},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
for (seen_enum_fields, 0..) |seen_src, i| {
|
|
if (seen_src != null) continue;
|
|
|
|
const field_name = operand_ty.enumFieldName(i, mod);
|
|
try sema.addFieldErrNote(
|
|
operand_ty,
|
|
i,
|
|
msg,
|
|
"unhandled enumeration value: '{}'",
|
|
.{field_name.fmt(&mod.intern_pool)},
|
|
);
|
|
}
|
|
try mod.errNoteNonLazy(
|
|
operand_ty.declSrcLoc(mod),
|
|
msg,
|
|
"enum '{}' declared here",
|
|
.{operand_ty.fmt(mod)},
|
|
);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
} else if (special_prong == .none and operand_ty.isNonexhaustiveEnum(mod) and !union_originally) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"switch on non-exhaustive enum must include 'else' or '_' prong",
|
|
.{},
|
|
);
|
|
}
|
|
},
|
|
.ErrorSet => {
|
|
var extra_index: usize = special.end;
|
|
{
|
|
var scalar_i: u32 = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1 + info.body_len;
|
|
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemError(
|
|
block,
|
|
&seen_errors,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .scalar = scalar_i },
|
|
));
|
|
}
|
|
}
|
|
{
|
|
var multi_i: u32 = 0;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const items = sema.code.refSlice(extra_index, items_len);
|
|
extra_index += items_len + info.body_len;
|
|
|
|
try case_vals.ensureUnusedCapacity(gpa, items.len);
|
|
for (items, 0..) |item_ref, item_i| {
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemError(
|
|
block,
|
|
&seen_errors,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } },
|
|
));
|
|
}
|
|
|
|
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
|
|
}
|
|
}
|
|
|
|
switch (try sema.resolveInferredErrorSetTy(block, src, operand_ty.toIntern())) {
|
|
.anyerror_type => {
|
|
if (special_prong != .@"else") {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"else prong required when switching on type 'anyerror'",
|
|
.{},
|
|
);
|
|
}
|
|
else_error_ty = Type.anyerror;
|
|
},
|
|
else => |err_set_ty_index| else_validation: {
|
|
const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names;
|
|
var maybe_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
for (error_names.get(ip)) |error_name| {
|
|
if (!seen_errors.contains(error_name) and special_prong != .@"else") {
|
|
const msg = maybe_msg orelse blk: {
|
|
maybe_msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"switch must handle all possibilities",
|
|
.{},
|
|
);
|
|
break :blk maybe_msg.?;
|
|
};
|
|
|
|
try sema.errNote(
|
|
block,
|
|
src,
|
|
msg,
|
|
"unhandled error value: 'error.{}'",
|
|
.{error_name.fmt(ip)},
|
|
);
|
|
}
|
|
}
|
|
|
|
if (maybe_msg) |msg| {
|
|
maybe_msg = null;
|
|
try sema.addDeclaredHereNote(msg, operand_ty);
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (special_prong == .@"else" and
|
|
seen_errors.count() == error_names.len)
|
|
{
|
|
// In order to enable common patterns for generic code allow simple else bodies
|
|
// else => unreachable,
|
|
// else => return,
|
|
// else => |e| return e,
|
|
// even if all the possible errors were already handled.
|
|
const tags = sema.code.instructions.items(.tag);
|
|
for (special.body) |else_inst| switch (tags[else_inst]) {
|
|
.dbg_block_begin,
|
|
.dbg_block_end,
|
|
.dbg_stmt,
|
|
.dbg_var_val,
|
|
.ret_type,
|
|
.as_node,
|
|
.ret_node,
|
|
.@"unreachable",
|
|
.@"defer",
|
|
.defer_err_code,
|
|
.err_union_code,
|
|
.ret_err_value_code,
|
|
.restore_err_ret_index,
|
|
.is_non_err,
|
|
.ret_is_non_err,
|
|
.condbr,
|
|
=> {},
|
|
else => break,
|
|
} else break :else_validation;
|
|
|
|
return sema.fail(
|
|
block,
|
|
special_prong_src,
|
|
"unreachable else prong; all cases already handled",
|
|
.{},
|
|
);
|
|
}
|
|
|
|
var names: InferredErrorSet.NameMap = .{};
|
|
try names.ensureUnusedCapacity(sema.arena, error_names.len);
|
|
for (error_names.get(ip)) |error_name| {
|
|
if (seen_errors.contains(error_name)) continue;
|
|
|
|
names.putAssumeCapacityNoClobber(error_name, {});
|
|
}
|
|
// No need to keep the hash map metadata correct; here we
|
|
// extract the (sorted) keys only.
|
|
else_error_ty = try mod.errorSetFromUnsortedNames(names.keys());
|
|
},
|
|
}
|
|
},
|
|
.Int, .ComptimeInt => {
|
|
var extra_index: usize = special.end;
|
|
{
|
|
var scalar_i: u32 = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1 + info.body_len;
|
|
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemInt(
|
|
block,
|
|
&range_set,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .scalar = scalar_i },
|
|
));
|
|
}
|
|
}
|
|
{
|
|
var multi_i: u32 = 0;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const items = sema.code.refSlice(extra_index, items_len);
|
|
extra_index += items_len;
|
|
|
|
try case_vals.ensureUnusedCapacity(gpa, items.len);
|
|
for (items, 0..) |item_ref, item_i| {
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemInt(
|
|
block,
|
|
&range_set,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } },
|
|
));
|
|
}
|
|
|
|
try case_vals.ensureUnusedCapacity(gpa, 2 * ranges_len);
|
|
var range_i: u32 = 0;
|
|
while (range_i < ranges_len) : (range_i += 1) {
|
|
const item_first = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const item_last = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
|
|
const vals = try sema.validateSwitchRange(
|
|
block,
|
|
&range_set,
|
|
item_first,
|
|
item_last,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .range = .{ .prong = multi_i, .item = range_i } },
|
|
);
|
|
case_vals.appendAssumeCapacity(vals[0]);
|
|
case_vals.appendAssumeCapacity(vals[1]);
|
|
}
|
|
|
|
extra_index += info.body_len;
|
|
}
|
|
}
|
|
|
|
check_range: {
|
|
if (operand_ty.zigTypeTag(mod) == .Int) {
|
|
const min_int = try operand_ty.minInt(mod, operand_ty);
|
|
const max_int = try operand_ty.maxInt(mod, operand_ty);
|
|
if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) {
|
|
if (special_prong == .@"else") {
|
|
return sema.fail(
|
|
block,
|
|
special_prong_src,
|
|
"unreachable else prong; all cases already handled",
|
|
.{},
|
|
);
|
|
}
|
|
break :check_range;
|
|
}
|
|
}
|
|
if (special_prong != .@"else") {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"switch must handle all possibilities",
|
|
.{},
|
|
);
|
|
}
|
|
}
|
|
},
|
|
.Bool => {
|
|
var extra_index: usize = special.end;
|
|
{
|
|
var scalar_i: u32 = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1 + info.body_len;
|
|
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemBool(
|
|
block,
|
|
&true_count,
|
|
&false_count,
|
|
item_ref,
|
|
src_node_offset,
|
|
.{ .scalar = scalar_i },
|
|
));
|
|
}
|
|
}
|
|
{
|
|
var multi_i: u32 = 0;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const items = sema.code.refSlice(extra_index, items_len);
|
|
extra_index += items_len + info.body_len;
|
|
|
|
try case_vals.ensureUnusedCapacity(gpa, items.len);
|
|
for (items, 0..) |item_ref, item_i| {
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemBool(
|
|
block,
|
|
&true_count,
|
|
&false_count,
|
|
item_ref,
|
|
src_node_offset,
|
|
.{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } },
|
|
));
|
|
}
|
|
|
|
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
|
|
}
|
|
}
|
|
switch (special_prong) {
|
|
.@"else" => {
|
|
if (true_count + false_count == 2) {
|
|
return sema.fail(
|
|
block,
|
|
special_prong_src,
|
|
"unreachable else prong; all cases already handled",
|
|
.{},
|
|
);
|
|
}
|
|
},
|
|
.under, .none => {
|
|
if (true_count + false_count < 2) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"switch must handle all possibilities",
|
|
.{},
|
|
);
|
|
}
|
|
},
|
|
}
|
|
},
|
|
.EnumLiteral, .Void, .Fn, .Pointer, .Type => {
|
|
if (special_prong != .@"else") {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"else prong required when switching on type '{}'",
|
|
.{operand_ty.fmt(mod)},
|
|
);
|
|
}
|
|
|
|
var seen_values = ValueSrcMap{};
|
|
defer seen_values.deinit(gpa);
|
|
|
|
var extra_index: usize = special.end;
|
|
{
|
|
var scalar_i: u32 = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
extra_index += info.body_len;
|
|
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemSparse(
|
|
block,
|
|
&seen_values,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .scalar = scalar_i },
|
|
));
|
|
}
|
|
}
|
|
{
|
|
var multi_i: u32 = 0;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const items = sema.code.refSlice(extra_index, items_len);
|
|
extra_index += items_len + info.body_len;
|
|
|
|
try case_vals.ensureUnusedCapacity(gpa, items.len);
|
|
for (items, 0..) |item_ref, item_i| {
|
|
case_vals.appendAssumeCapacity(try sema.validateSwitchItemSparse(
|
|
block,
|
|
&seen_values,
|
|
item_ref,
|
|
operand_ty,
|
|
src_node_offset,
|
|
.{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } },
|
|
));
|
|
}
|
|
|
|
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
|
|
}
|
|
}
|
|
},
|
|
|
|
.ErrorUnion,
|
|
.NoReturn,
|
|
.Array,
|
|
.Struct,
|
|
.Undefined,
|
|
.Null,
|
|
.Optional,
|
|
.Opaque,
|
|
.Vector,
|
|
.Frame,
|
|
.AnyFrame,
|
|
.ComptimeFloat,
|
|
.Float,
|
|
=> return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
}),
|
|
}
|
|
|
|
const spa: SwitchProngAnalysis = .{
|
|
.sema = sema,
|
|
.parent_block = block,
|
|
.operand = raw_operand.val,
|
|
.operand_ptr = raw_operand.ptr,
|
|
.cond = operand,
|
|
.else_error_ty = else_error_ty,
|
|
.switch_block_inst = inst,
|
|
.tag_capture_inst = tag_capture_inst,
|
|
};
|
|
|
|
const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .block,
|
|
.data = undefined,
|
|
});
|
|
var label: Block.Label = .{
|
|
.zir_block = inst,
|
|
.merges = .{
|
|
.src_locs = .{},
|
|
.results = .{},
|
|
.br_list = .{},
|
|
.block_inst = block_inst,
|
|
},
|
|
};
|
|
|
|
var child_block: Block = .{
|
|
.parent = block,
|
|
.sema = sema,
|
|
.src_decl = block.src_decl,
|
|
.namespace = block.namespace,
|
|
.wip_capture_scope = block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.label = &label,
|
|
.inlining = block.inlining,
|
|
.is_comptime = block.is_comptime,
|
|
.comptime_reason = block.comptime_reason,
|
|
.is_typeof = block.is_typeof,
|
|
.c_import_buf = block.c_import_buf,
|
|
.runtime_cond = block.runtime_cond,
|
|
.runtime_loop = block.runtime_loop,
|
|
.runtime_index = block.runtime_index,
|
|
.error_return_trace_index = block.error_return_trace_index,
|
|
};
|
|
const merges = &child_block.label.?.merges;
|
|
defer child_block.instructions.deinit(gpa);
|
|
defer merges.deinit(gpa);
|
|
|
|
if (try sema.resolveDefinedValue(&child_block, src, operand)) |operand_val| {
|
|
const resolved_operand_val = try sema.resolveLazyValue(operand_val);
|
|
var extra_index: usize = special.end;
|
|
{
|
|
var scalar_i: usize = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const body = sema.code.extra[extra_index..][0..info.body_len];
|
|
extra_index += info.body_len;
|
|
|
|
const item = case_vals.items[scalar_i];
|
|
const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable;
|
|
if (operand_val.eql(item_val, operand_ty, sema.mod)) {
|
|
if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand);
|
|
return spa.resolveProngComptime(
|
|
&child_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .scalar_capture = @as(u32, @intCast(scalar_i)) },
|
|
&.{item},
|
|
if (info.is_inline) operand else .none,
|
|
info.has_tag_capture,
|
|
merges,
|
|
);
|
|
}
|
|
}
|
|
}
|
|
{
|
|
var multi_i: usize = 0;
|
|
var case_val_idx: usize = scalar_cases_len;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1 + items_len;
|
|
const body = sema.code.extra[extra_index + 2 * ranges_len ..][0..info.body_len];
|
|
|
|
const items = case_vals.items[case_val_idx..][0..items_len];
|
|
case_val_idx += items_len;
|
|
|
|
for (items) |item| {
|
|
// Validation above ensured these will succeed.
|
|
const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable;
|
|
if (operand_val.eql(item_val, operand_ty, sema.mod)) {
|
|
if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand);
|
|
return spa.resolveProngComptime(
|
|
&child_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .multi_capture = @as(u32, @intCast(multi_i)) },
|
|
items,
|
|
if (info.is_inline) operand else .none,
|
|
info.has_tag_capture,
|
|
merges,
|
|
);
|
|
}
|
|
}
|
|
|
|
var range_i: usize = 0;
|
|
while (range_i < ranges_len) : (range_i += 1) {
|
|
const range_items = case_vals.items[case_val_idx..][0..2];
|
|
extra_index += 2;
|
|
case_val_idx += 2;
|
|
|
|
// Validation above ensured these will succeed.
|
|
const first_val = sema.resolveConstValue(&child_block, .unneeded, range_items[0], "") catch unreachable;
|
|
const last_val = sema.resolveConstValue(&child_block, .unneeded, range_items[1], "") catch unreachable;
|
|
if ((try sema.compareAll(resolved_operand_val, .gte, first_val, operand_ty)) and
|
|
(try sema.compareAll(resolved_operand_val, .lte, last_val, operand_ty)))
|
|
{
|
|
if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand);
|
|
return spa.resolveProngComptime(
|
|
&child_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .multi_capture = @as(u32, @intCast(multi_i)) },
|
|
undefined, // case_vals may be undefined for ranges
|
|
if (info.is_inline) operand else .none,
|
|
info.has_tag_capture,
|
|
merges,
|
|
);
|
|
}
|
|
}
|
|
|
|
extra_index += info.body_len;
|
|
}
|
|
}
|
|
if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, special.body, operand);
|
|
if (empty_enum) {
|
|
return Air.Inst.Ref.void_value;
|
|
}
|
|
|
|
return spa.resolveProngComptime(
|
|
&child_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
undefined, // case_vals may be undefined for special prongs
|
|
if (special.is_inline) operand else .none,
|
|
special.has_tag_capture,
|
|
merges,
|
|
);
|
|
}
|
|
|
|
if (scalar_cases_len + multi_cases_len == 0 and !special.is_inline) {
|
|
if (empty_enum) {
|
|
return Air.Inst.Ref.void_value;
|
|
}
|
|
if (special_prong == .none) {
|
|
return sema.fail(block, src, "switch must handle all possibilities", .{});
|
|
}
|
|
if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand, operand_src)) {
|
|
return Air.Inst.Ref.unreachable_value;
|
|
}
|
|
if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and
|
|
(!operand_ty.isNonexhaustiveEnum(mod) or union_originally))
|
|
{
|
|
try sema.zirDbgStmt(block, cond_dbg_node_index);
|
|
const ok = try block.addUnOp(.is_named_enum_value, operand);
|
|
try sema.addSafetyCheck(block, src, ok, .corrupt_switch);
|
|
}
|
|
|
|
return spa.resolveProngComptime(
|
|
&child_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
undefined, // case_vals may be undefined for special prongs
|
|
.none,
|
|
false,
|
|
merges,
|
|
);
|
|
}
|
|
|
|
if (child_block.is_comptime) {
|
|
_ = sema.resolveConstValue(&child_block, operand_src, operand, "condition in comptime switch must be comptime-known") catch |err| {
|
|
if (err == error.AnalysisFail and child_block.comptime_reason != null) try child_block.comptime_reason.?.explain(sema, sema.err);
|
|
return err;
|
|
};
|
|
unreachable;
|
|
}
|
|
|
|
const estimated_cases_extra = (scalar_cases_len + multi_cases_len) *
|
|
@typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2;
|
|
var cases_extra = try std.ArrayListUnmanaged(u32).initCapacity(gpa, estimated_cases_extra);
|
|
defer cases_extra.deinit(gpa);
|
|
|
|
var case_block = child_block.makeSubBlock();
|
|
case_block.runtime_loop = null;
|
|
case_block.runtime_cond = operand_src;
|
|
case_block.runtime_index.increment();
|
|
defer case_block.instructions.deinit(gpa);
|
|
|
|
var extra_index: usize = special.end;
|
|
|
|
var scalar_i: usize = 0;
|
|
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const body = sema.code.extra[extra_index..][0..info.body_len];
|
|
extra_index += info.body_len;
|
|
|
|
var wip_captures = try WipCaptureScope.init(gpa, child_block.wip_capture_scope);
|
|
defer wip_captures.deinit();
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = wip_captures.scope;
|
|
|
|
const item = case_vals.items[scalar_i];
|
|
// `item` is already guaranteed to be constant known.
|
|
|
|
const analyze_body = if (union_originally) blk: {
|
|
const item_val = sema.resolveConstLazyValue(block, .unneeded, item, "") catch unreachable;
|
|
const field_ty = maybe_union_ty.unionFieldType(item_val, mod);
|
|
break :blk field_ty.zigTypeTag(mod) != .NoReturn;
|
|
} else true;
|
|
|
|
if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src)) {
|
|
// nothing to do here
|
|
} else if (analyze_body) {
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .scalar_capture = @as(u32, @intCast(scalar_i)) },
|
|
&.{item},
|
|
if (info.is_inline) item else .none,
|
|
info.has_tag_capture,
|
|
);
|
|
} else {
|
|
_ = try case_block.addNoOp(.unreach);
|
|
}
|
|
|
|
try wip_captures.finalize();
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item));
|
|
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
|
|
}
|
|
|
|
var is_first = true;
|
|
var prev_cond_br: Air.Inst.Index = undefined;
|
|
var first_else_body: []const Air.Inst.Index = &.{};
|
|
defer gpa.free(first_else_body);
|
|
var prev_then_body: []const Air.Inst.Index = &.{};
|
|
defer gpa.free(prev_then_body);
|
|
|
|
var cases_len = scalar_cases_len;
|
|
var case_val_idx: usize = scalar_cases_len;
|
|
var multi_i: u32 = 0;
|
|
while (multi_i < multi_cases_len) : (multi_i += 1) {
|
|
const items_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const ranges_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
|
|
extra_index += 1 + items_len;
|
|
|
|
const items = case_vals.items[case_val_idx..][0..items_len];
|
|
case_val_idx += items_len;
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
// Generate all possible cases as scalar prongs.
|
|
if (info.is_inline) {
|
|
const body_start = extra_index + 2 * ranges_len;
|
|
const body = sema.code.extra[body_start..][0..info.body_len];
|
|
var emit_bb = false;
|
|
|
|
var range_i: u32 = 0;
|
|
while (range_i < ranges_len) : (range_i += 1) {
|
|
const range_items = case_vals.items[case_val_idx..][0..2];
|
|
extra_index += 2;
|
|
case_val_idx += 2;
|
|
|
|
const item_first_ref = range_items[0];
|
|
const item_last_ref = range_items[1];
|
|
|
|
var item = sema.resolveConstValue(block, .unneeded, item_first_ref, undefined) catch unreachable;
|
|
const item_last = sema.resolveConstValue(block, .unneeded, item_last_ref, undefined) catch unreachable;
|
|
|
|
while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({
|
|
// Previous validation has resolved any possible lazy values.
|
|
item = sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty) catch |err| switch (err) {
|
|
error.Overflow => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
}) {
|
|
cases_len += 1;
|
|
|
|
const item_ref = Air.internedToRef(item.toIntern());
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const case_src = Module.SwitchProngSrc{ .range = .{ .prong = multi_i, .item = range_i } };
|
|
const decl = mod.declPtr(case_block.src_decl);
|
|
try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none));
|
|
unreachable;
|
|
},
|
|
else => return err,
|
|
};
|
|
emit_bb = true;
|
|
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .multi_capture = multi_i },
|
|
undefined, // case_vals may be undefined for ranges
|
|
item_ref,
|
|
info.has_tag_capture,
|
|
);
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
|
|
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
|
|
|
|
if (item.compareScalar(.eq, item_last, operand_ty, mod)) break;
|
|
}
|
|
}
|
|
|
|
for (items, 0..) |item, item_i| {
|
|
cases_len += 1;
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
const analyze_body = if (union_originally) blk: {
|
|
const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable;
|
|
const field_ty = maybe_union_ty.unionFieldType(item_val, mod);
|
|
break :blk field_ty.zigTypeTag(mod) != .NoReturn;
|
|
} else true;
|
|
|
|
if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } };
|
|
const decl = mod.declPtr(case_block.src_decl);
|
|
try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none));
|
|
unreachable;
|
|
},
|
|
else => return err,
|
|
};
|
|
emit_bb = true;
|
|
|
|
if (analyze_body) {
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .multi_capture = multi_i },
|
|
&.{item},
|
|
item,
|
|
info.has_tag_capture,
|
|
);
|
|
} else {
|
|
_ = try case_block.addNoOp(.unreach);
|
|
}
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item));
|
|
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
|
|
}
|
|
|
|
extra_index += info.body_len;
|
|
continue;
|
|
}
|
|
|
|
var any_ok: Air.Inst.Ref = .none;
|
|
|
|
// If there are any ranges, we have to put all the items into the
|
|
// else prong. Otherwise, we can take advantage of multiple items
|
|
// mapping to the same body.
|
|
if (ranges_len == 0) {
|
|
cases_len += 1;
|
|
|
|
const analyze_body = if (union_originally)
|
|
for (items) |item| {
|
|
const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable;
|
|
const field_ty = maybe_union_ty.unionFieldType(item_val, mod);
|
|
if (field_ty.zigTypeTag(mod) != .NoReturn) break true;
|
|
} else false
|
|
else
|
|
true;
|
|
|
|
const body = sema.code.extra[extra_index..][0..info.body_len];
|
|
extra_index += info.body_len;
|
|
if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src)) {
|
|
// nothing to do here
|
|
} else if (analyze_body) {
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .multi_capture = multi_i },
|
|
items,
|
|
.none,
|
|
false,
|
|
);
|
|
} else {
|
|
_ = try case_block.addNoOp(.unreach);
|
|
}
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len +
|
|
case_block.instructions.items.len);
|
|
|
|
cases_extra.appendAssumeCapacity(@as(u32, @intCast(items.len)));
|
|
cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
|
|
|
|
for (items) |item| {
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item));
|
|
}
|
|
|
|
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
|
|
} else {
|
|
for (items) |item| {
|
|
const cmp_ok = try case_block.addBinOp(if (case_block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, item);
|
|
if (any_ok != .none) {
|
|
any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok);
|
|
} else {
|
|
any_ok = cmp_ok;
|
|
}
|
|
}
|
|
|
|
var range_i: usize = 0;
|
|
while (range_i < ranges_len) : (range_i += 1) {
|
|
const range_items = case_vals.items[case_val_idx..][0..2];
|
|
extra_index += 2;
|
|
case_val_idx += 2;
|
|
|
|
const item_first = range_items[0];
|
|
const item_last = range_items[1];
|
|
|
|
// operand >= first and operand <= last
|
|
const range_first_ok = try case_block.addBinOp(
|
|
if (case_block.float_mode == .Optimized) .cmp_gte_optimized else .cmp_gte,
|
|
operand,
|
|
item_first,
|
|
);
|
|
const range_last_ok = try case_block.addBinOp(
|
|
if (case_block.float_mode == .Optimized) .cmp_lte_optimized else .cmp_lte,
|
|
operand,
|
|
item_last,
|
|
);
|
|
const range_ok = try case_block.addBinOp(
|
|
.bool_and,
|
|
range_first_ok,
|
|
range_last_ok,
|
|
);
|
|
if (any_ok != .none) {
|
|
any_ok = try case_block.addBinOp(.bool_or, any_ok, range_ok);
|
|
} else {
|
|
any_ok = range_ok;
|
|
}
|
|
}
|
|
|
|
const new_cond_br = try case_block.addInstAsIndex(.{ .tag = .cond_br, .data = .{
|
|
.pl_op = .{
|
|
.operand = any_ok,
|
|
.payload = undefined,
|
|
},
|
|
} });
|
|
var cond_body = try case_block.instructions.toOwnedSlice(gpa);
|
|
defer gpa.free(cond_body);
|
|
|
|
var wip_captures = try WipCaptureScope.init(gpa, child_block.wip_capture_scope);
|
|
defer wip_captures.deinit();
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = wip_captures.scope;
|
|
|
|
const body = sema.code.extra[extra_index..][0..info.body_len];
|
|
extra_index += info.body_len;
|
|
if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src)) {
|
|
// nothing to do here
|
|
} else {
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.normal,
|
|
body,
|
|
info.capture,
|
|
.{ .multi_capture = multi_i },
|
|
items,
|
|
.none,
|
|
false,
|
|
);
|
|
}
|
|
|
|
try wip_captures.finalize();
|
|
|
|
if (is_first) {
|
|
is_first = false;
|
|
first_else_body = cond_body;
|
|
cond_body = &.{};
|
|
} else {
|
|
try sema.air_extra.ensureUnusedCapacity(
|
|
gpa,
|
|
@typeInfo(Air.CondBr).Struct.fields.len + prev_then_body.len + cond_body.len,
|
|
);
|
|
|
|
sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload =
|
|
sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @as(u32, @intCast(prev_then_body.len)),
|
|
.else_body_len = @as(u32, @intCast(cond_body.len)),
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(prev_then_body);
|
|
sema.air_extra.appendSliceAssumeCapacity(cond_body);
|
|
}
|
|
gpa.free(prev_then_body);
|
|
prev_then_body = try case_block.instructions.toOwnedSlice(gpa);
|
|
prev_cond_br = new_cond_br;
|
|
}
|
|
}
|
|
|
|
var final_else_body: []const Air.Inst.Index = &.{};
|
|
if (special.body.len != 0 or !is_first or case_block.wantSafety()) {
|
|
var emit_bb = false;
|
|
if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) {
|
|
.Enum => {
|
|
if (operand_ty.isNonexhaustiveEnum(mod) and !union_originally) {
|
|
return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
|
|
operand_ty.fmt(mod),
|
|
});
|
|
}
|
|
for (seen_enum_fields, 0..) |f, i| {
|
|
if (f != null) continue;
|
|
cases_len += 1;
|
|
|
|
const item_val = try mod.enumValueFieldIndex(operand_ty, @as(u32, @intCast(i)));
|
|
const item_ref = Air.internedToRef(item_val.toIntern());
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
const analyze_body = if (union_originally) blk: {
|
|
const field_ty = maybe_union_ty.unionFieldType(item_val, mod);
|
|
break :blk field_ty.zigTypeTag(mod) != .NoReturn;
|
|
} else true;
|
|
|
|
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
|
|
emit_bb = true;
|
|
|
|
if (analyze_body) {
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
&.{item_ref},
|
|
item_ref,
|
|
special.has_tag_capture,
|
|
);
|
|
} else {
|
|
_ = try case_block.addNoOp(.unreach);
|
|
}
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
|
|
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
|
|
}
|
|
},
|
|
.ErrorSet => {
|
|
if (operand_ty.isAnyError(mod)) {
|
|
return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
|
|
operand_ty.fmt(mod),
|
|
});
|
|
}
|
|
for (0..operand_ty.errorSetNames(mod).len) |i| {
|
|
const error_name = operand_ty.errorSetNames(mod)[i];
|
|
if (seen_errors.contains(error_name)) continue;
|
|
cases_len += 1;
|
|
|
|
const item_val = try mod.intern(.{ .err = .{
|
|
.ty = operand_ty.toIntern(),
|
|
.name = error_name,
|
|
} });
|
|
const item_ref = Air.internedToRef(item_val);
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
|
|
emit_bb = true;
|
|
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
&.{item_ref},
|
|
item_ref,
|
|
special.has_tag_capture,
|
|
);
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
|
|
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
|
|
}
|
|
},
|
|
.Int => {
|
|
var it = try RangeSetUnhandledIterator.init(sema, operand_ty, range_set);
|
|
while (try it.next()) |cur| {
|
|
cases_len += 1;
|
|
|
|
const item_ref = Air.internedToRef(cur);
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
|
|
emit_bb = true;
|
|
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
&.{item_ref},
|
|
item_ref,
|
|
special.has_tag_capture,
|
|
);
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
|
|
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
|
|
}
|
|
},
|
|
.Bool => {
|
|
if (true_count == 0) {
|
|
cases_len += 1;
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
|
|
emit_bb = true;
|
|
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
&.{Air.Inst.Ref.bool_true},
|
|
Air.Inst.Ref.bool_true,
|
|
special.has_tag_capture,
|
|
);
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_true));
|
|
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
|
|
}
|
|
if (false_count == 0) {
|
|
cases_len += 1;
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = child_block.wip_capture_scope;
|
|
|
|
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
|
|
emit_bb = true;
|
|
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
&.{Air.Inst.Ref.bool_false},
|
|
Air.Inst.Ref.bool_false,
|
|
special.has_tag_capture,
|
|
);
|
|
|
|
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
|
|
cases_extra.appendAssumeCapacity(1); // items_len
|
|
cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
|
|
cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_false));
|
|
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
|
|
}
|
|
},
|
|
else => return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
|
|
operand_ty.fmt(mod),
|
|
}),
|
|
};
|
|
|
|
var wip_captures = try WipCaptureScope.init(gpa, child_block.wip_capture_scope);
|
|
defer wip_captures.deinit();
|
|
|
|
case_block.instructions.shrinkRetainingCapacity(0);
|
|
case_block.wip_capture_scope = wip_captures.scope;
|
|
|
|
if (mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and
|
|
operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally))
|
|
{
|
|
try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
|
|
const ok = try case_block.addUnOp(.is_named_enum_value, operand);
|
|
try sema.addSafetyCheck(&case_block, src, ok, .corrupt_switch);
|
|
}
|
|
|
|
const analyze_body = if (union_originally and !special.is_inline)
|
|
for (seen_enum_fields, 0..) |seen_field, index| {
|
|
if (seen_field != null) continue;
|
|
const union_obj = mod.typeToUnion(maybe_union_ty).?;
|
|
const field_ty = union_obj.fields.values()[index].ty;
|
|
if (field_ty.zigTypeTag(mod) != .NoReturn) break true;
|
|
} else false
|
|
else
|
|
true;
|
|
if (special.body.len != 0 and err_set and
|
|
try sema.maybeErrorUnwrap(&case_block, special.body, operand, operand_src))
|
|
{
|
|
// nothing to do here
|
|
} else if (special.body.len != 0 and analyze_body and !special.is_inline) {
|
|
try spa.analyzeProngRuntime(
|
|
&case_block,
|
|
.special,
|
|
special.body,
|
|
special.capture,
|
|
.special_capture,
|
|
undefined, // case_vals may be undefined for special prongs
|
|
.none,
|
|
false,
|
|
);
|
|
} else {
|
|
// We still need a terminator in this block, but we have proven
|
|
// that it is unreachable.
|
|
if (case_block.wantSafety()) {
|
|
try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
|
|
try sema.safetyPanic(&case_block, src, .corrupt_switch);
|
|
} else {
|
|
_ = try case_block.addNoOp(.unreach);
|
|
}
|
|
}
|
|
|
|
try wip_captures.finalize();
|
|
|
|
if (is_first) {
|
|
final_else_body = case_block.instructions.items;
|
|
} else {
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, prev_then_body.len +
|
|
@typeInfo(Air.CondBr).Struct.fields.len + case_block.instructions.items.len);
|
|
|
|
sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload =
|
|
sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @as(u32, @intCast(prev_then_body.len)),
|
|
.else_body_len = @as(u32, @intCast(case_block.instructions.items.len)),
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(prev_then_body);
|
|
sema.air_extra.appendSliceAssumeCapacity(case_block.instructions.items);
|
|
final_else_body = first_else_body;
|
|
}
|
|
}
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len +
|
|
cases_extra.items.len + final_else_body.len);
|
|
|
|
_ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{
|
|
.operand = operand,
|
|
.payload = sema.addExtraAssumeCapacity(Air.SwitchBr{
|
|
.cases_len = @as(u32, @intCast(cases_len)),
|
|
.else_body_len = @as(u32, @intCast(final_else_body.len)),
|
|
}),
|
|
} } });
|
|
sema.air_extra.appendSliceAssumeCapacity(cases_extra.items);
|
|
sema.air_extra.appendSliceAssumeCapacity(final_else_body);
|
|
|
|
return sema.analyzeBlockBody(block, src, &child_block, merges);
|
|
}
|
|
|
|
const RangeSetUnhandledIterator = struct {
|
|
mod: *Module,
|
|
cur: ?InternPool.Index,
|
|
max: InternPool.Index,
|
|
range_i: usize,
|
|
ranges: []const RangeSet.Range,
|
|
limbs: []math.big.Limb,
|
|
|
|
const preallocated_limbs = math.big.int.calcTwosCompLimbCount(128);
|
|
|
|
fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator {
|
|
const mod = sema.mod;
|
|
const int_type = mod.intern_pool.indexToKey(ty.toIntern()).int_type;
|
|
const needed_limbs = math.big.int.calcTwosCompLimbCount(int_type.bits);
|
|
return .{
|
|
.mod = mod,
|
|
.cur = (try ty.minInt(mod, ty)).toIntern(),
|
|
.max = (try ty.maxInt(mod, ty)).toIntern(),
|
|
.range_i = 0,
|
|
.ranges = range_set.ranges.items,
|
|
.limbs = if (needed_limbs > preallocated_limbs)
|
|
try sema.arena.alloc(math.big.Limb, needed_limbs)
|
|
else
|
|
&.{},
|
|
};
|
|
}
|
|
|
|
fn addOne(it: *const RangeSetUnhandledIterator, val: InternPool.Index) !?InternPool.Index {
|
|
if (val == it.max) return null;
|
|
const int = it.mod.intern_pool.indexToKey(val).int;
|
|
|
|
switch (int.storage) {
|
|
inline .u64, .i64 => |val_int| {
|
|
const next_int = @addWithOverflow(val_int, 1);
|
|
if (next_int[1] == 0)
|
|
return (try it.mod.intValue(int.ty.toType(), next_int[0])).toIntern();
|
|
},
|
|
.big_int => {},
|
|
.lazy_align, .lazy_size => unreachable,
|
|
}
|
|
|
|
var val_space: InternPool.Key.Int.Storage.BigIntSpace = undefined;
|
|
const val_bigint = int.storage.toBigInt(&val_space);
|
|
|
|
var result_limbs: [preallocated_limbs]math.big.Limb = undefined;
|
|
var result_bigint = math.big.int.Mutable.init(
|
|
if (it.limbs.len > 0) it.limbs else &result_limbs,
|
|
0,
|
|
);
|
|
|
|
result_bigint.addScalar(val_bigint, 1);
|
|
return (try it.mod.intValue_big(int.ty.toType(), result_bigint.toConst())).toIntern();
|
|
}
|
|
|
|
fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index {
|
|
var cur = it.cur orelse return null;
|
|
while (it.range_i < it.ranges.len and cur == it.ranges[it.range_i].first) {
|
|
defer it.range_i += 1;
|
|
cur = (try it.addOne(it.ranges[it.range_i].last)) orelse {
|
|
it.cur = null;
|
|
return null;
|
|
};
|
|
}
|
|
it.cur = try it.addOne(cur);
|
|
return cur;
|
|
}
|
|
};
|
|
|
|
const ResolvedSwitchItem = struct {
|
|
ref: Air.Inst.Ref,
|
|
val: InternPool.Index,
|
|
};
|
|
fn resolveSwitchItemVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
item_ref: Zir.Inst.Ref,
|
|
/// Coerce `item_ref` to this type.
|
|
coerce_ty: Type,
|
|
switch_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
range_expand: Module.SwitchProngSrc.RangeExpand,
|
|
) CompileError!ResolvedSwitchItem {
|
|
const mod = sema.mod;
|
|
const uncoerced_item = try sema.resolveInst(item_ref);
|
|
|
|
// Constructing a LazySrcLoc is costly because we only have the switch AST node.
|
|
// Only if we know for sure we need to report a compile error do we resolve the
|
|
// full source locations.
|
|
|
|
const item = sema.coerce(block, coerce_ty, uncoerced_item, .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), switch_node_offset, range_expand);
|
|
_ = try sema.coerce(block, coerce_ty, uncoerced_item, src);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
const maybe_lazy = sema.resolveConstValue(block, .unneeded, item, "") catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), switch_node_offset, range_expand);
|
|
_ = try sema.resolveConstValue(block, src, item, "switch prong values must be comptime-known");
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
const val = try sema.resolveLazyValue(maybe_lazy);
|
|
const new_item = if (val.toIntern() != maybe_lazy.toIntern()) blk: {
|
|
break :blk Air.internedToRef(val.toIntern());
|
|
} else item;
|
|
|
|
return .{ .ref = new_item, .val = val.toIntern() };
|
|
}
|
|
|
|
fn validateSwitchRange(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
range_set: *RangeSet,
|
|
first_ref: Zir.Inst.Ref,
|
|
last_ref: Zir.Inst.Ref,
|
|
operand_ty: Type,
|
|
src_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
) CompileError![2]Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const first = try sema.resolveSwitchItemVal(block, first_ref, operand_ty, src_node_offset, switch_prong_src, .first);
|
|
const last = try sema.resolveSwitchItemVal(block, last_ref, operand_ty, src_node_offset, switch_prong_src, .last);
|
|
if (try first.val.toValue().compareAll(.gt, last.val.toValue(), operand_ty, mod)) {
|
|
const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), src_node_offset, .first);
|
|
return sema.fail(block, src, "range start value is greater than the end value", .{});
|
|
}
|
|
const maybe_prev_src = try range_set.add(first.val, last.val, switch_prong_src);
|
|
try sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
|
|
return .{ first.ref, last.ref };
|
|
}
|
|
|
|
fn validateSwitchItemInt(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
range_set: *RangeSet,
|
|
item_ref: Zir.Inst.Ref,
|
|
operand_ty: Type,
|
|
src_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, src_node_offset, switch_prong_src, .none);
|
|
const maybe_prev_src = try range_set.add(item.val, item.val, switch_prong_src);
|
|
try sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
|
|
return item.ref;
|
|
}
|
|
|
|
fn validateSwitchItemEnum(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
seen_fields: []?Module.SwitchProngSrc,
|
|
range_set: *RangeSet,
|
|
item_ref: Zir.Inst.Ref,
|
|
operand_ty: Type,
|
|
src_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const ip = &sema.mod.intern_pool;
|
|
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, src_node_offset, switch_prong_src, .none);
|
|
const int = ip.indexToKey(item.val).enum_tag.int;
|
|
const field_index = ip.indexToKey(ip.typeOf(item.val)).enum_type.tagValueIndex(ip, int) orelse {
|
|
const maybe_prev_src = try range_set.add(int, int, switch_prong_src);
|
|
try sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
|
|
return item.ref;
|
|
};
|
|
const maybe_prev_src = seen_fields[field_index];
|
|
seen_fields[field_index] = switch_prong_src;
|
|
try sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
|
|
return item.ref;
|
|
}
|
|
|
|
fn validateSwitchItemError(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
seen_errors: *SwitchErrorSet,
|
|
item_ref: Zir.Inst.Ref,
|
|
operand_ty: Type,
|
|
src_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const ip = &sema.mod.intern_pool;
|
|
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, src_node_offset, switch_prong_src, .none);
|
|
const error_name = ip.indexToKey(item.val).err.name;
|
|
const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev|
|
|
prev.value
|
|
else
|
|
null;
|
|
try sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
|
|
return item.ref;
|
|
}
|
|
|
|
fn validateSwitchDupe(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
maybe_prev_src: ?Module.SwitchProngSrc,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
src_node_offset: i32,
|
|
) CompileError!void {
|
|
const prev_prong_src = maybe_prev_src orelse return;
|
|
const mod = sema.mod;
|
|
const block_src_decl = mod.declPtr(block.src_decl);
|
|
const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none);
|
|
const prev_src = prev_prong_src.resolve(mod, block_src_decl, src_node_offset, .none);
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"duplicate switch value",
|
|
.{},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(
|
|
block,
|
|
prev_src,
|
|
msg,
|
|
"previous value here",
|
|
.{},
|
|
);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn validateSwitchItemBool(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
true_count: *u8,
|
|
false_count: *u8,
|
|
item_ref: Zir.Inst.Ref,
|
|
src_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const item = try sema.resolveSwitchItemVal(block, item_ref, Type.bool, src_node_offset, switch_prong_src, .none);
|
|
if (item.val.toValue().toBool()) {
|
|
true_count.* += 1;
|
|
} else {
|
|
false_count.* += 1;
|
|
}
|
|
if (true_count.* > 1 or false_count.* > 1) {
|
|
const block_src_decl = sema.mod.declPtr(block.src_decl);
|
|
const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none);
|
|
return sema.fail(block, src, "duplicate switch value", .{});
|
|
}
|
|
return item.ref;
|
|
}
|
|
|
|
const ValueSrcMap = std.AutoHashMapUnmanaged(InternPool.Index, Module.SwitchProngSrc);
|
|
|
|
fn validateSwitchItemSparse(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
seen_values: *ValueSrcMap,
|
|
item_ref: Zir.Inst.Ref,
|
|
operand_ty: Type,
|
|
src_node_offset: i32,
|
|
switch_prong_src: Module.SwitchProngSrc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, src_node_offset, switch_prong_src, .none);
|
|
const kv = (try seen_values.fetchPut(sema.gpa, item.val, switch_prong_src)) orelse return item.ref;
|
|
try sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset);
|
|
unreachable;
|
|
}
|
|
|
|
fn validateSwitchNoRange(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ranges_len: u32,
|
|
operand_ty: Type,
|
|
src_node_offset: i32,
|
|
) CompileError!void {
|
|
if (ranges_len == 0)
|
|
return;
|
|
|
|
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset };
|
|
const range_src: LazySrcLoc = .{ .node_offset_switch_range = src_node_offset };
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
operand_src,
|
|
"ranges not allowed when switching on type '{}'",
|
|
.{operand_ty.fmt(sema.mod)},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(
|
|
block,
|
|
range_src,
|
|
msg,
|
|
"range here",
|
|
.{},
|
|
);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, operand: Air.Inst.Ref, operand_src: LazySrcLoc) !bool {
|
|
const mod = sema.mod;
|
|
if (!mod.backendSupportsFeature(.panic_unwrap_error)) return false;
|
|
|
|
const tags = sema.code.instructions.items(.tag);
|
|
for (body) |inst| {
|
|
switch (tags[inst]) {
|
|
.@"unreachable" => if (!block.wantSafety()) return false,
|
|
.save_err_ret_index,
|
|
.dbg_block_begin,
|
|
.dbg_block_end,
|
|
.dbg_stmt,
|
|
.str,
|
|
.as_node,
|
|
.panic,
|
|
.field_val,
|
|
=> {},
|
|
else => return false,
|
|
}
|
|
}
|
|
|
|
for (body) |inst| {
|
|
const air_inst = switch (tags[inst]) {
|
|
.dbg_block_begin,
|
|
.dbg_block_end,
|
|
=> continue,
|
|
.dbg_stmt => {
|
|
try sema.zirDbgStmt(block, inst);
|
|
continue;
|
|
},
|
|
.save_err_ret_index => {
|
|
try sema.zirSaveErrRetIndex(block, inst);
|
|
continue;
|
|
},
|
|
.str => try sema.zirStr(block, inst),
|
|
.as_node => try sema.zirAsNode(block, inst),
|
|
.field_val => try sema.zirFieldVal(block, inst),
|
|
.@"unreachable" => {
|
|
if (!mod.comp.formatted_panics) {
|
|
try sema.safetyPanic(block, operand_src, .unwrap_error);
|
|
return true;
|
|
}
|
|
|
|
const panic_fn = try sema.getBuiltin("panicUnwrapError");
|
|
const err_return_trace = try sema.getErrorReturnTrace(block);
|
|
const args: [2]Air.Inst.Ref = .{ err_return_trace, operand };
|
|
try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check");
|
|
return true;
|
|
},
|
|
.panic => {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const msg_inst = try sema.resolveInst(inst_data.operand);
|
|
|
|
const panic_fn = try sema.getBuiltin("panic");
|
|
const err_return_trace = try sema.getErrorReturnTrace(block);
|
|
const args: [3]Air.Inst.Ref = .{ msg_inst, err_return_trace, .null_value };
|
|
try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check");
|
|
return true;
|
|
},
|
|
else => unreachable,
|
|
};
|
|
if (sema.typeOf(air_inst).isNoReturn(mod))
|
|
return true;
|
|
sema.inst_map.putAssumeCapacity(inst, air_inst);
|
|
}
|
|
unreachable;
|
|
}
|
|
|
|
fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void {
|
|
const mod = sema.mod;
|
|
const index = Zir.refToIndex(cond) orelse return;
|
|
if (sema.code.instructions.items(.tag)[index] != .is_non_err) return;
|
|
|
|
const err_inst_data = sema.code.instructions.items(.data)[index].un_node;
|
|
const err_operand = try sema.resolveInst(err_inst_data.operand);
|
|
const operand_ty = sema.typeOf(err_operand);
|
|
if (operand_ty.zigTypeTag(mod) == .ErrorSet) {
|
|
try sema.maybeErrorUnwrapComptime(block, body, err_operand);
|
|
return;
|
|
}
|
|
if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| {
|
|
if (!operand_ty.isError(mod)) return;
|
|
if (val.getErrorName(mod) == .none) return;
|
|
try sema.maybeErrorUnwrapComptime(block, body, err_operand);
|
|
}
|
|
}
|
|
|
|
fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, operand: Air.Inst.Ref) !void {
|
|
const tags = sema.code.instructions.items(.tag);
|
|
const inst = for (body) |inst| {
|
|
switch (tags[inst]) {
|
|
.dbg_block_begin,
|
|
.dbg_block_end,
|
|
.dbg_stmt,
|
|
.save_err_ret_index,
|
|
=> {},
|
|
.@"unreachable" => break inst,
|
|
else => return,
|
|
}
|
|
} else return;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].@"unreachable";
|
|
const src = inst_data.src();
|
|
|
|
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
|
|
if (val.getErrorName(sema.mod).unwrap()) |name| {
|
|
return sema.fail(block, src, "caught unexpected error '{}'", .{name.fmt(&sema.mod.intern_pool)});
|
|
}
|
|
}
|
|
}
|
|
|
|
fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const ty = try sema.resolveType(block, ty_src, extra.lhs);
|
|
const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, "field name must be comptime-known");
|
|
try sema.resolveTypeFields(ty);
|
|
const ip = &mod.intern_pool;
|
|
|
|
const has_field = hf: {
|
|
switch (ip.indexToKey(ty.toIntern())) {
|
|
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
|
|
.Slice => {
|
|
if (ip.stringEqlSlice(field_name, "ptr")) break :hf true;
|
|
if (ip.stringEqlSlice(field_name, "len")) break :hf true;
|
|
break :hf false;
|
|
},
|
|
else => {},
|
|
},
|
|
.anon_struct_type => |anon_struct| {
|
|
if (anon_struct.names.len != 0) {
|
|
break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, field_name) != null;
|
|
} else {
|
|
const field_index = field_name.toUnsigned(ip) orelse break :hf false;
|
|
break :hf field_index < ty.structFieldCount(mod);
|
|
}
|
|
},
|
|
.struct_type => |struct_type| {
|
|
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :hf false;
|
|
assert(struct_obj.haveFieldTypes());
|
|
break :hf struct_obj.fields.contains(field_name);
|
|
},
|
|
.union_type => |union_type| {
|
|
const union_obj = mod.unionPtr(union_type.index);
|
|
assert(union_obj.haveFieldTypes());
|
|
break :hf union_obj.fields.contains(field_name);
|
|
},
|
|
.enum_type => |enum_type| {
|
|
break :hf enum_type.nameIndex(ip, field_name) != null;
|
|
},
|
|
.array_type => break :hf ip.stringEqlSlice(field_name, "len"),
|
|
else => {},
|
|
}
|
|
return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{
|
|
ty.fmt(mod),
|
|
});
|
|
};
|
|
if (has_field) {
|
|
return Air.Inst.Ref.bool_true;
|
|
} else {
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
}
|
|
|
|
fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const container_type = try sema.resolveType(block, lhs_src, extra.lhs);
|
|
const decl_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, "decl name must be comptime-known");
|
|
|
|
try sema.checkNamespaceType(block, lhs_src, container_type);
|
|
|
|
const namespace = container_type.getNamespaceIndex(mod).unwrap() orelse
|
|
return Air.Inst.Ref.bool_false;
|
|
if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| {
|
|
const decl = mod.declPtr(decl_index);
|
|
if (decl.is_pub or decl.getFileScope(mod) == block.getFileScope(mod)) {
|
|
return Air.Inst.Ref.bool_true;
|
|
}
|
|
}
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
|
|
fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
|
|
const operand_src = inst_data.src();
|
|
const operand = inst_data.get(sema.code);
|
|
|
|
const result = mod.importFile(block.getFileScope(mod), operand) catch |err| switch (err) {
|
|
error.ImportOutsidePkgPath => {
|
|
return sema.fail(block, operand_src, "import of file outside package path: '{s}'", .{operand});
|
|
},
|
|
error.PackageNotFound => {
|
|
const name = try block.getFileScope(mod).pkg.getName(sema.gpa, mod.*);
|
|
defer sema.gpa.free(name);
|
|
return sema.fail(block, operand_src, "no package named '{s}' available within package '{s}'", .{ operand, name });
|
|
},
|
|
else => {
|
|
// TODO: these errors are file system errors; make sure an update() will
|
|
// retry this and not cache the file system error, which may be transient.
|
|
return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) });
|
|
},
|
|
};
|
|
try mod.semaFile(result.file);
|
|
const file_root_decl_index = result.file.root_decl.unwrap().?;
|
|
const file_root_decl = mod.declPtr(file_root_decl_index);
|
|
try mod.declareDeclDependency(sema.owner_decl_index, file_root_decl_index);
|
|
return Air.internedToRef(file_root_decl.val.toIntern());
|
|
}
|
|
|
|
fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const name = try sema.resolveConstString(block, operand_src, inst_data.operand, "file path name must be comptime-known");
|
|
|
|
if (name.len == 0) {
|
|
return sema.fail(block, operand_src, "file path name cannot be empty", .{});
|
|
}
|
|
|
|
const embed_file = mod.embedFile(block.getFileScope(mod), name) catch |err| switch (err) {
|
|
error.ImportOutsidePkgPath => {
|
|
return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name});
|
|
},
|
|
else => {
|
|
// TODO: these errors are file system errors; make sure an update() will
|
|
// retry this and not cache the file system error, which may be transient.
|
|
return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ name, @errorName(err) });
|
|
},
|
|
};
|
|
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
|
|
// TODO instead of using `.bytes`, create a new value tag for pointing at
|
|
// a `*Module.EmbedFile`. The purpose of this would be:
|
|
// - If only the length is read and the bytes are not inspected by comptime code,
|
|
// there can be an optimization where the codegen backend does a copy_file_range
|
|
// into the final binary, and never loads the data into memory.
|
|
// - When a Decl is destroyed, it can free the `*Module.EmbedFile`.
|
|
const ty = try mod.arrayType(.{
|
|
.len = embed_file.bytes.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
});
|
|
embed_file.owner_decl = try anon_decl.finish(
|
|
ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .bytes = embed_file.bytes },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
|
|
return sema.analyzeDeclRef(embed_file.owner_decl);
|
|
}
|
|
|
|
fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
|
|
const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code));
|
|
_ = try mod.getErrorValue(name);
|
|
const error_set_type = try mod.singleErrorSetType(name);
|
|
return Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = error_set_type.toIntern(),
|
|
.name = name,
|
|
} })));
|
|
}
|
|
|
|
fn zirShl(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
sema.src = src;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
|
|
const scalar_ty = lhs_ty.scalarType(mod);
|
|
const scalar_rhs_ty = rhs_ty.scalarType(mod);
|
|
|
|
// TODO coerce rhs if air_tag is not shl_sat
|
|
const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty);
|
|
|
|
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs);
|
|
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs);
|
|
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(sema.typeOf(lhs));
|
|
}
|
|
// If rhs is 0, return lhs without doing any calculations.
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return lhs;
|
|
}
|
|
if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) {
|
|
const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits);
|
|
if (rhs_ty.zigTypeTag(mod) == .Vector) {
|
|
var i: usize = 0;
|
|
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
|
|
const rhs_elem = try rhs_val.elemValue(mod, i);
|
|
if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
|
|
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
|
|
rhs_elem.fmtValue(scalar_ty, mod),
|
|
i,
|
|
scalar_ty.fmt(mod),
|
|
});
|
|
}
|
|
}
|
|
} else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
|
|
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
|
|
rhs_val.fmtValue(scalar_ty, mod),
|
|
scalar_ty.fmt(mod),
|
|
});
|
|
}
|
|
}
|
|
if (rhs_ty.zigTypeTag(mod) == .Vector) {
|
|
var i: usize = 0;
|
|
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
|
|
const rhs_elem = try rhs_val.elemValue(mod, i);
|
|
if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) {
|
|
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
|
|
rhs_elem.fmtValue(scalar_ty, mod),
|
|
i,
|
|
});
|
|
}
|
|
}
|
|
} else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) {
|
|
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
|
|
rhs_val.fmtValue(scalar_ty, mod),
|
|
});
|
|
}
|
|
}
|
|
|
|
const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
|
|
if (lhs_val.isUndef(mod)) return mod.undefRef(lhs_ty);
|
|
const rhs_val = maybe_rhs_val orelse {
|
|
if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
|
|
}
|
|
break :rs rhs_src;
|
|
};
|
|
|
|
const val = switch (air_tag) {
|
|
.shl_exact => val: {
|
|
const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, mod);
|
|
if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
break :val shifted.wrapped_result;
|
|
}
|
|
if (shifted.overflow_bit.compareAllWithZero(.eq, mod)) {
|
|
break :val shifted.wrapped_result;
|
|
}
|
|
return sema.fail(block, src, "operation caused overflow", .{});
|
|
},
|
|
|
|
.shl_sat => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt)
|
|
try lhs_val.shl(rhs_val, lhs_ty, sema.arena, mod)
|
|
else
|
|
try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, mod),
|
|
|
|
.shl => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt)
|
|
try lhs_val.shl(rhs_val, lhs_ty, sema.arena, mod)
|
|
else
|
|
try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, mod),
|
|
|
|
else => unreachable,
|
|
};
|
|
|
|
return Air.internedToRef(val.toIntern());
|
|
} else lhs_src;
|
|
|
|
const new_rhs = if (air_tag == .shl_sat) rhs: {
|
|
// Limit the RHS type for saturating shl to be an integer as small as the LHS.
|
|
if (rhs_is_comptime_int or
|
|
scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits)
|
|
{
|
|
const max_int = Air.internedToRef((try lhs_ty.maxInt(mod, lhs_ty)).toIntern());
|
|
const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src });
|
|
break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false);
|
|
} else {
|
|
break :rhs rhs;
|
|
}
|
|
} else rhs;
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
if (block.wantSafety()) {
|
|
const bit_count = scalar_ty.intInfo(mod).bits;
|
|
if (!std.math.isPowerOfTwo(bit_count)) {
|
|
const bit_count_val = try mod.intValue(scalar_rhs_ty, bit_count);
|
|
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
|
|
const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern());
|
|
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
|
|
break :ok try block.addInst(.{
|
|
.tag = .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = lt,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else ok: {
|
|
const bit_count_inst = Air.internedToRef(bit_count_val.toIntern());
|
|
break :ok try block.addBinOp(.cmp_lt, rhs, bit_count_inst);
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .shift_rhs_too_big);
|
|
}
|
|
|
|
if (air_tag == .shl_exact) {
|
|
const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(lhs_ty);
|
|
const op_ov = try block.addInst(.{
|
|
.tag = .shl_with_overflow,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(op_ov_tuple_ty.toIntern()),
|
|
.payload = try sema.addExtra(Air.Bin{
|
|
.lhs = lhs,
|
|
.rhs = rhs,
|
|
}),
|
|
} },
|
|
});
|
|
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
|
|
const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector)
|
|
try block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = ov_bit,
|
|
.operation = .Or,
|
|
} },
|
|
})
|
|
else
|
|
ov_bit;
|
|
const zero_ov = Air.internedToRef((try mod.intValue(Type.u1, 0)).toIntern());
|
|
const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
|
|
|
|
try sema.addSafetyCheck(block, src, no_ov, .shl_overflow);
|
|
return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty);
|
|
}
|
|
}
|
|
return block.addBinOp(air_tag, lhs, new_rhs);
|
|
}
|
|
|
|
fn zirShr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
sema.src = src;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
const scalar_ty = lhs_ty.scalarType(mod);
|
|
|
|
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs);
|
|
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs);
|
|
|
|
const runtime_src = if (maybe_rhs_val) |rhs_val| rs: {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(lhs_ty);
|
|
}
|
|
// If rhs is 0, return lhs without doing any calculations.
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return lhs;
|
|
}
|
|
if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) {
|
|
const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits);
|
|
if (rhs_ty.zigTypeTag(mod) == .Vector) {
|
|
var i: usize = 0;
|
|
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
|
|
const rhs_elem = try rhs_val.elemValue(mod, i);
|
|
if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
|
|
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
|
|
rhs_elem.fmtValue(scalar_ty, mod),
|
|
i,
|
|
scalar_ty.fmt(mod),
|
|
});
|
|
}
|
|
}
|
|
} else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
|
|
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
|
|
rhs_val.fmtValue(scalar_ty, mod),
|
|
scalar_ty.fmt(mod),
|
|
});
|
|
}
|
|
}
|
|
if (rhs_ty.zigTypeTag(mod) == .Vector) {
|
|
var i: usize = 0;
|
|
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
|
|
const rhs_elem = try rhs_val.elemValue(mod, i);
|
|
if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) {
|
|
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
|
|
rhs_elem.fmtValue(scalar_ty, mod),
|
|
i,
|
|
});
|
|
}
|
|
}
|
|
} else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) {
|
|
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
|
|
rhs_val.fmtValue(scalar_ty, mod),
|
|
});
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(lhs_ty);
|
|
}
|
|
if (air_tag == .shr_exact) {
|
|
// Detect if any ones would be shifted out.
|
|
const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, mod);
|
|
if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
return sema.fail(block, src, "exact shift shifted out 1 bits", .{});
|
|
}
|
|
}
|
|
const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, mod);
|
|
return Air.internedToRef(val.toIntern());
|
|
} else {
|
|
break :rs lhs_src;
|
|
}
|
|
} else rhs_src;
|
|
|
|
if (maybe_rhs_val == null and scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
const result = try block.addBinOp(air_tag, lhs, rhs);
|
|
if (block.wantSafety()) {
|
|
const bit_count = scalar_ty.intInfo(mod).bits;
|
|
if (!std.math.isPowerOfTwo(bit_count)) {
|
|
const bit_count_val = try mod.intValue(rhs_ty.scalarType(mod), bit_count);
|
|
|
|
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
|
|
const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern());
|
|
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
|
|
break :ok try block.addInst(.{
|
|
.tag = .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = lt,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else ok: {
|
|
const bit_count_inst = Air.internedToRef(bit_count_val.toIntern());
|
|
break :ok try block.addBinOp(.cmp_lt, rhs, bit_count_inst);
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .shift_rhs_too_big);
|
|
}
|
|
|
|
if (air_tag == .shr_exact) {
|
|
const back = try block.addBinOp(.shl, result, rhs);
|
|
|
|
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
|
|
const eql = try block.addCmpVector(lhs, back, .eq);
|
|
break :ok try block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = eql,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else try block.addBinOp(.cmp_eq, lhs, back);
|
|
try sema.addSafetyCheck(block, src, ok, .shr_overflow);
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
fn zirBitwise(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
sema.src = src;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } });
|
|
const scalar_type = resolved_type.scalarType(mod);
|
|
const scalar_tag = scalar_type.zigTypeTag(mod);
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
if (!is_int) {
|
|
return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(mod)), @tagName(rhs_ty.zigTypeTag(mod)) });
|
|
}
|
|
|
|
const runtime_src = runtime: {
|
|
// TODO: ask the linker what kind of relocations are available, and
|
|
// in some cases emit a Value that means "this decl's address AND'd with this operand".
|
|
if (try sema.resolveMaybeUndefValIntable(casted_lhs)) |lhs_val| {
|
|
if (try sema.resolveMaybeUndefValIntable(casted_rhs)) |rhs_val| {
|
|
const result_val = switch (air_tag) {
|
|
.bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, mod),
|
|
.bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, mod),
|
|
.xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, mod),
|
|
else => unreachable,
|
|
};
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else {
|
|
break :runtime rhs_src;
|
|
}
|
|
} else {
|
|
break :runtime lhs_src;
|
|
}
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
|
|
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_type = sema.typeOf(operand);
|
|
const scalar_type = operand_type.scalarType(mod);
|
|
|
|
if (scalar_type.zigTypeTag(mod) != .Int) {
|
|
return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{
|
|
operand_type.fmt(mod),
|
|
});
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
if (val.isUndef(mod)) {
|
|
return mod.undefRef(operand_type);
|
|
} else if (operand_type.zigTypeTag(mod) == .Vector) {
|
|
const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod));
|
|
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
for (elems, 0..) |*elem, i| {
|
|
const elem_val = try val.elemValue(mod, i);
|
|
elem.* = try (try elem_val.bitwiseNot(scalar_type, sema.arena, mod)).intern(scalar_type, mod);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = operand_type.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
} else {
|
|
const result_val = try val.bitwiseNot(operand_type, sema.arena, mod);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.not, operand_type, operand);
|
|
}
|
|
|
|
fn analyzeTupleCat(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src_node: i32,
|
|
lhs: Air.Inst.Ref,
|
|
rhs: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const src = LazySrcLoc.nodeOffset(src_node);
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node };
|
|
|
|
const lhs_len = lhs_ty.structFieldCount(mod);
|
|
const rhs_len = rhs_ty.structFieldCount(mod);
|
|
const dest_fields = lhs_len + rhs_len;
|
|
|
|
if (dest_fields == 0) {
|
|
return Air.internedToRef(Value.empty_struct.toIntern());
|
|
}
|
|
if (lhs_len == 0) {
|
|
return rhs;
|
|
}
|
|
if (rhs_len == 0) {
|
|
return lhs;
|
|
}
|
|
const final_len = try sema.usizeCast(block, rhs_src, dest_fields);
|
|
|
|
const types = try sema.arena.alloc(InternPool.Index, final_len);
|
|
const values = try sema.arena.alloc(InternPool.Index, final_len);
|
|
|
|
const opt_runtime_src = rs: {
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
var i: u32 = 0;
|
|
while (i < lhs_len) : (i += 1) {
|
|
types[i] = lhs_ty.structFieldType(i, mod).toIntern();
|
|
const default_val = lhs_ty.structFieldDefaultValue(i, mod);
|
|
values[i] = default_val.toIntern();
|
|
const operand_src = lhs_src; // TODO better source location
|
|
if (default_val.toIntern() == .unreachable_value) {
|
|
runtime_src = operand_src;
|
|
values[i] = .none;
|
|
}
|
|
}
|
|
i = 0;
|
|
while (i < rhs_len) : (i += 1) {
|
|
types[i + lhs_len] = rhs_ty.structFieldType(i, mod).toIntern();
|
|
const default_val = rhs_ty.structFieldDefaultValue(i, mod);
|
|
values[i + lhs_len] = default_val.toIntern();
|
|
const operand_src = rhs_src; // TODO better source location
|
|
if (default_val.toIntern() == .unreachable_value) {
|
|
runtime_src = operand_src;
|
|
values[i + lhs_len] = .none;
|
|
}
|
|
}
|
|
break :rs runtime_src;
|
|
};
|
|
|
|
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
|
|
.types = types,
|
|
.values = values,
|
|
.names = &.{},
|
|
} });
|
|
|
|
const runtime_src = opt_runtime_src orelse {
|
|
const tuple_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = tuple_ty,
|
|
.storage = .{ .elems = values },
|
|
} });
|
|
return Air.internedToRef(tuple_val);
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, final_len);
|
|
var i: u32 = 0;
|
|
while (i < lhs_len) : (i += 1) {
|
|
const operand_src = lhs_src; // TODO better source location
|
|
element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, lhs, i, lhs_ty);
|
|
}
|
|
i = 0;
|
|
while (i < rhs_len) : (i += 1) {
|
|
const operand_src = rhs_src; // TODO better source location
|
|
element_refs[i + lhs_len] =
|
|
try sema.tupleFieldValByIndex(block, operand_src, rhs, i, rhs_ty);
|
|
}
|
|
|
|
return block.addAggregateInit(tuple_ty.toType(), element_refs);
|
|
}
|
|
|
|
fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const src = inst_data.src();
|
|
|
|
const lhs_is_tuple = lhs_ty.isTuple(mod);
|
|
const rhs_is_tuple = rhs_ty.isTuple(mod);
|
|
if (lhs_is_tuple and rhs_is_tuple) {
|
|
return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs);
|
|
}
|
|
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
|
|
const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: {
|
|
if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined);
|
|
return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)});
|
|
};
|
|
const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse {
|
|
assert(!rhs_is_tuple);
|
|
return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(mod)});
|
|
};
|
|
|
|
const resolved_elem_ty = t: {
|
|
var trash_block = block.makeSubBlock();
|
|
trash_block.is_comptime = false;
|
|
defer trash_block.instructions.deinit(sema.gpa);
|
|
|
|
const instructions = [_]Air.Inst.Ref{
|
|
try trash_block.addBitCast(lhs_info.elem_type, .void_value),
|
|
try trash_block.addBitCast(rhs_info.elem_type, .void_value),
|
|
};
|
|
break :t try sema.resolvePeerTypes(block, src, &instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
};
|
|
|
|
// When there is a sentinel mismatch, no sentinel on the result.
|
|
// Otherwise, use the sentinel value provided by either operand,
|
|
// coercing it to the peer-resolved element type.
|
|
const res_sent_val: ?Value = s: {
|
|
if (lhs_info.sentinel) |lhs_sent_val| {
|
|
const lhs_sent = Air.internedToRef(lhs_sent_val.toIntern());
|
|
if (rhs_info.sentinel) |rhs_sent_val| {
|
|
const rhs_sent = Air.internedToRef(rhs_sent_val.toIntern());
|
|
const lhs_sent_casted = try sema.coerce(block, resolved_elem_ty, lhs_sent, lhs_src);
|
|
const rhs_sent_casted = try sema.coerce(block, resolved_elem_ty, rhs_sent, rhs_src);
|
|
const lhs_sent_casted_val = try sema.resolveConstValue(block, lhs_src, lhs_sent_casted, "array sentinel value must be comptime-known");
|
|
const rhs_sent_casted_val = try sema.resolveConstValue(block, rhs_src, rhs_sent_casted, "array sentinel value must be comptime-known");
|
|
if (try sema.valuesEqual(lhs_sent_casted_val, rhs_sent_casted_val, resolved_elem_ty)) {
|
|
break :s lhs_sent_casted_val;
|
|
} else {
|
|
break :s null;
|
|
}
|
|
} else {
|
|
const lhs_sent_casted = try sema.coerce(block, resolved_elem_ty, lhs_sent, lhs_src);
|
|
const lhs_sent_casted_val = try sema.resolveConstValue(block, lhs_src, lhs_sent_casted, "array sentinel value must be comptime-known");
|
|
break :s lhs_sent_casted_val;
|
|
}
|
|
} else {
|
|
if (rhs_info.sentinel) |rhs_sent_val| {
|
|
const rhs_sent = Air.internedToRef(rhs_sent_val.toIntern());
|
|
const rhs_sent_casted = try sema.coerce(block, resolved_elem_ty, rhs_sent, rhs_src);
|
|
const rhs_sent_casted_val = try sema.resolveConstValue(block, rhs_src, rhs_sent_casted, "array sentinel value must be comptime-known");
|
|
break :s rhs_sent_casted_val;
|
|
} else {
|
|
break :s null;
|
|
}
|
|
}
|
|
};
|
|
|
|
const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
|
|
const rhs_len = try sema.usizeCast(block, lhs_src, rhs_info.len);
|
|
const result_len = std.math.add(usize, lhs_len, rhs_len) catch |err| switch (err) {
|
|
error.Overflow => return sema.fail(
|
|
block,
|
|
src,
|
|
"concatenating arrays of length {d} and {d} produces an array too large for this compiler implementation to handle",
|
|
.{ lhs_len, rhs_len },
|
|
),
|
|
};
|
|
|
|
const result_ty = try mod.arrayType(.{
|
|
.len = result_len,
|
|
.sentinel = if (res_sent_val) |v| v.toIntern() else .none,
|
|
.child = resolved_elem_ty.toIntern(),
|
|
});
|
|
const ptr_addrspace = p: {
|
|
if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod);
|
|
if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod);
|
|
break :p null;
|
|
};
|
|
|
|
const runtime_src = if (switch (lhs_ty.zigTypeTag(mod)) {
|
|
.Array, .Struct => try sema.resolveMaybeUndefVal(lhs),
|
|
.Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs),
|
|
else => unreachable,
|
|
}) |lhs_val| rs: {
|
|
if (switch (rhs_ty.zigTypeTag(mod)) {
|
|
.Array, .Struct => try sema.resolveMaybeUndefVal(rhs),
|
|
.Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs),
|
|
else => unreachable,
|
|
}) |rhs_val| {
|
|
const lhs_sub_val = if (lhs_ty.isSinglePointer(mod))
|
|
(try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).?
|
|
else
|
|
lhs_val;
|
|
|
|
const rhs_sub_val = if (rhs_ty.isSinglePointer(mod))
|
|
(try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).?
|
|
else
|
|
rhs_val;
|
|
|
|
const element_vals = try sema.arena.alloc(InternPool.Index, result_len);
|
|
var elem_i: usize = 0;
|
|
while (elem_i < lhs_len) : (elem_i += 1) {
|
|
const lhs_elem_i = elem_i;
|
|
const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable";
|
|
const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val;
|
|
const elem_val_inst = Air.internedToRef(elem_val.toIntern());
|
|
const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded);
|
|
const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, "");
|
|
element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod);
|
|
}
|
|
while (elem_i < result_len) : (elem_i += 1) {
|
|
const rhs_elem_i = elem_i - lhs_len;
|
|
const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable";
|
|
const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val;
|
|
const elem_val_inst = Air.internedToRef(elem_val.toIntern());
|
|
const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded);
|
|
const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, "");
|
|
element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod);
|
|
}
|
|
return sema.addConstantMaybeRef(block, result_ty, (try mod.intern(.{ .aggregate = .{
|
|
.ty = result_ty.toIntern(),
|
|
.storage = .{ .elems = element_vals },
|
|
} })).toValue(), ptr_addrspace != null);
|
|
} else break :rs rhs_src;
|
|
} else lhs_src;
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (ptr_addrspace) |ptr_as| {
|
|
const alloc_ty = try mod.ptrType(.{
|
|
.child = result_ty.toIntern(),
|
|
.flags = .{ .address_space = ptr_as },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
const elem_ptr_ty = try mod.ptrType(.{
|
|
.child = resolved_elem_ty.toIntern(),
|
|
.flags = .{ .address_space = ptr_as },
|
|
});
|
|
|
|
var elem_i: usize = 0;
|
|
while (elem_i < lhs_len) : (elem_i += 1) {
|
|
const elem_index = try mod.intRef(Type.usize, elem_i);
|
|
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
|
|
const init = try sema.elemVal(block, lhs_src, lhs, elem_index, src, true);
|
|
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
|
|
}
|
|
while (elem_i < result_len) : (elem_i += 1) {
|
|
const elem_index = try mod.intRef(Type.usize, elem_i);
|
|
const rhs_index = try mod.intRef(Type.usize, elem_i - lhs_len);
|
|
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
|
|
const init = try sema.elemVal(block, rhs_src, rhs, rhs_index, src, true);
|
|
try sema.storePtr2(block, src, elem_ptr, src, init, rhs_src, .store);
|
|
}
|
|
if (res_sent_val) |sent_val| {
|
|
const elem_index = try mod.intRef(Type.usize, result_len);
|
|
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
|
|
const init = Air.internedToRef((try mod.getCoerced(sent_val, lhs_info.elem_type)).toIntern());
|
|
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
|
|
}
|
|
|
|
return alloc;
|
|
}
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, result_len);
|
|
{
|
|
var elem_i: usize = 0;
|
|
while (elem_i < lhs_len) : (elem_i += 1) {
|
|
const index = try mod.intRef(Type.usize, elem_i);
|
|
const init = try sema.elemVal(block, lhs_src, lhs, index, src, true);
|
|
element_refs[elem_i] = try sema.coerce(block, resolved_elem_ty, init, lhs_src);
|
|
}
|
|
while (elem_i < result_len) : (elem_i += 1) {
|
|
const index = try mod.intRef(Type.usize, elem_i - lhs_len);
|
|
const init = try sema.elemVal(block, rhs_src, rhs, index, src, true);
|
|
element_refs[elem_i] = try sema.coerce(block, resolved_elem_ty, init, rhs_src);
|
|
}
|
|
}
|
|
|
|
return block.addAggregateInit(result_ty, element_refs);
|
|
}
|
|
|
|
fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Array => return operand_ty.arrayInfo(mod),
|
|
.Pointer => {
|
|
const ptr_info = operand_ty.ptrInfo(mod);
|
|
switch (ptr_info.flags.size) {
|
|
// TODO: in the Many case here this should only work if the type
|
|
// has a sentinel, and this code should compute the length based
|
|
// on the sentinel value.
|
|
.Slice, .Many => {
|
|
const val = try sema.resolveConstValue(block, src, operand, "slice value being concatenated must be comptime-known");
|
|
return Type.ArrayInfo{
|
|
.elem_type = ptr_info.child.toType(),
|
|
.sentinel = switch (ptr_info.sentinel) {
|
|
.none => null,
|
|
else => ptr_info.sentinel.toValue(),
|
|
},
|
|
.len = val.sliceLen(mod),
|
|
};
|
|
},
|
|
.One => {
|
|
if (ptr_info.child.toType().zigTypeTag(mod) == .Array) {
|
|
return ptr_info.child.toType().arrayInfo(mod);
|
|
}
|
|
},
|
|
.C => {},
|
|
}
|
|
},
|
|
.Struct => {
|
|
if (operand_ty.isTuple(mod) and peer_ty.isIndexable(mod)) {
|
|
assert(!peer_ty.isTuple(mod));
|
|
return .{
|
|
.elem_type = peer_ty.elemType2(mod),
|
|
.sentinel = null,
|
|
.len = operand_ty.arrayLen(mod),
|
|
};
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
return null;
|
|
}
|
|
|
|
fn analyzeTupleMul(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src_node: i32,
|
|
operand: Air.Inst.Ref,
|
|
factor: usize,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
const src = LazySrcLoc.nodeOffset(src_node);
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node };
|
|
|
|
const tuple_len = operand_ty.structFieldCount(mod);
|
|
const final_len = std.math.mul(usize, tuple_len, factor) catch
|
|
return sema.fail(block, rhs_src, "operation results in overflow", .{});
|
|
|
|
if (final_len == 0) {
|
|
return Air.internedToRef(Value.empty_struct.toIntern());
|
|
}
|
|
const types = try sema.arena.alloc(InternPool.Index, final_len);
|
|
const values = try sema.arena.alloc(InternPool.Index, final_len);
|
|
|
|
const opt_runtime_src = rs: {
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
for (0..tuple_len) |i| {
|
|
types[i] = operand_ty.structFieldType(i, mod).toIntern();
|
|
values[i] = operand_ty.structFieldDefaultValue(i, mod).toIntern();
|
|
const operand_src = lhs_src; // TODO better source location
|
|
if (values[i] == .unreachable_value) {
|
|
runtime_src = operand_src;
|
|
values[i] = .none; // TODO don't treat unreachable_value as special
|
|
}
|
|
}
|
|
for (0..factor) |i| {
|
|
mem.copyForwards(InternPool.Index, types[tuple_len * i ..], types[0..tuple_len]);
|
|
mem.copyForwards(InternPool.Index, values[tuple_len * i ..], values[0..tuple_len]);
|
|
}
|
|
break :rs runtime_src;
|
|
};
|
|
|
|
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
|
|
.types = types,
|
|
.values = values,
|
|
.names = &.{},
|
|
} });
|
|
|
|
const runtime_src = opt_runtime_src orelse {
|
|
const tuple_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = tuple_ty,
|
|
.storage = .{ .elems = values },
|
|
} });
|
|
return Air.internedToRef(tuple_val);
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, final_len);
|
|
var i: u32 = 0;
|
|
while (i < tuple_len) : (i += 1) {
|
|
const operand_src = lhs_src; // TODO better source location
|
|
element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, operand, @as(u32, @intCast(i)), operand_ty);
|
|
}
|
|
i = 1;
|
|
while (i < factor) : (i += 1) {
|
|
@memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]);
|
|
}
|
|
|
|
return block.addAggregateInit(tuple_ty.toType(), element_refs);
|
|
}
|
|
|
|
fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const src: LazySrcLoc = inst_data.src();
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const operator_src: LazySrcLoc = .{ .node_offset_main_token = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
|
|
if (lhs_ty.isTuple(mod)) {
|
|
// In `**` rhs must be comptime-known, but lhs can be runtime-known
|
|
const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, "array multiplication factor must be comptime-known");
|
|
const factor_casted = try sema.usizeCast(block, rhs_src, factor);
|
|
return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor_casted);
|
|
}
|
|
|
|
// Analyze the lhs first, to catch the case that someone tried to do exponentiation
|
|
const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
switch (lhs_ty.zigTypeTag(mod)) {
|
|
.Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => {
|
|
try sema.errNote(block, operator_src, msg, "this operator multiplies arrays; use std.math.pow for exponentiation", .{});
|
|
},
|
|
else => {},
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
};
|
|
|
|
// In `**` rhs must be comptime-known, but lhs can be runtime-known
|
|
const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, "array multiplication factor must be comptime-known");
|
|
|
|
const result_len_u64 = std.math.mul(u64, lhs_info.len, factor) catch
|
|
return sema.fail(block, rhs_src, "operation results in overflow", .{});
|
|
const result_len = try sema.usizeCast(block, src, result_len_u64);
|
|
|
|
const result_ty = try mod.arrayType(.{
|
|
.len = result_len,
|
|
.sentinel = if (lhs_info.sentinel) |s| s.toIntern() else .none,
|
|
.child = lhs_info.elem_type.toIntern(),
|
|
});
|
|
|
|
const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null;
|
|
const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
|
|
|
|
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
|
|
const lhs_sub_val = if (lhs_ty.isSinglePointer(mod))
|
|
(try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).?
|
|
else
|
|
lhs_val;
|
|
|
|
const val = v: {
|
|
// Optimization for the common pattern of a single element repeated N times, such
|
|
// as zero-filling a byte array.
|
|
if (lhs_len == 1 and lhs_info.sentinel == null) {
|
|
const elem_val = try lhs_sub_val.elemValue(mod, 0);
|
|
break :v try mod.intern(.{ .aggregate = .{
|
|
.ty = result_ty.toIntern(),
|
|
.storage = .{ .repeated_elem = elem_val.toIntern() },
|
|
} });
|
|
}
|
|
|
|
const element_vals = try sema.arena.alloc(InternPool.Index, result_len);
|
|
var elem_i: usize = 0;
|
|
while (elem_i < result_len) {
|
|
var lhs_i: usize = 0;
|
|
while (lhs_i < lhs_len) : (lhs_i += 1) {
|
|
const elem_val = try lhs_sub_val.elemValue(mod, lhs_i);
|
|
element_vals[elem_i] = elem_val.toIntern();
|
|
elem_i += 1;
|
|
}
|
|
}
|
|
break :v try mod.intern(.{ .aggregate = .{
|
|
.ty = result_ty.toIntern(),
|
|
.storage = .{ .elems = element_vals },
|
|
} });
|
|
};
|
|
return sema.addConstantMaybeRef(block, result_ty, val.toValue(), ptr_addrspace != null);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, lhs_src);
|
|
|
|
if (ptr_addrspace) |ptr_as| {
|
|
const alloc_ty = try mod.ptrType(.{
|
|
.child = result_ty.toIntern(),
|
|
.flags = .{ .address_space = ptr_as },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
const elem_ptr_ty = try mod.ptrType(.{
|
|
.child = lhs_info.elem_type.toIntern(),
|
|
.flags = .{ .address_space = ptr_as },
|
|
});
|
|
|
|
var elem_i: usize = 0;
|
|
while (elem_i < result_len) {
|
|
var lhs_i: usize = 0;
|
|
while (lhs_i < lhs_len) : (lhs_i += 1) {
|
|
const elem_index = try mod.intRef(Type.usize, elem_i);
|
|
elem_i += 1;
|
|
const lhs_index = try mod.intRef(Type.usize, lhs_i);
|
|
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
|
|
const init = try sema.elemVal(block, lhs_src, lhs, lhs_index, src, true);
|
|
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
|
|
}
|
|
}
|
|
if (lhs_info.sentinel) |sent_val| {
|
|
const elem_index = try mod.intRef(Type.usize, result_len);
|
|
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
|
|
const init = Air.internedToRef(sent_val.toIntern());
|
|
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
|
|
}
|
|
|
|
return alloc;
|
|
}
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, result_len);
|
|
var elem_i: usize = 0;
|
|
while (elem_i < result_len) {
|
|
var lhs_i: usize = 0;
|
|
while (lhs_i < lhs_len) : (lhs_i += 1) {
|
|
const lhs_index = try mod.intRef(Type.usize, lhs_i);
|
|
const init = try sema.elemVal(block, lhs_src, lhs, lhs_index, src, true);
|
|
element_refs[elem_i] = init;
|
|
elem_i += 1;
|
|
}
|
|
}
|
|
|
|
return block.addAggregateInit(result_ty, element_refs);
|
|
}
|
|
|
|
fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const lhs_src = src;
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
|
|
|
|
const rhs = try sema.resolveInst(inst_data.operand);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const rhs_scalar_ty = rhs_ty.scalarType(mod);
|
|
|
|
if (rhs_scalar_ty.isUnsignedInt(mod) or switch (rhs_scalar_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt, .Float, .ComptimeFloat => false,
|
|
else => true,
|
|
}) {
|
|
return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)});
|
|
}
|
|
|
|
if (rhs_scalar_ty.isAnyFloat()) {
|
|
// We handle float negation here to ensure negative zero is represented in the bits.
|
|
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) return mod.undefRef(rhs_ty);
|
|
return Air.internedToRef((try rhs_val.floatNeg(rhs_ty, sema.arena, mod)).toIntern());
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs);
|
|
}
|
|
|
|
const lhs = Air.internedToRef((try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))).toIntern());
|
|
return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true);
|
|
}
|
|
|
|
fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const lhs_src = src;
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
|
|
|
|
const rhs = try sema.resolveInst(inst_data.operand);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const rhs_scalar_ty = rhs_ty.scalarType(mod);
|
|
|
|
switch (rhs_scalar_ty.zigTypeTag(mod)) {
|
|
.Int, .ComptimeInt, .Float, .ComptimeFloat => {},
|
|
else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)}),
|
|
}
|
|
|
|
const lhs = Air.internedToRef((try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))).toIntern());
|
|
return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true);
|
|
}
|
|
|
|
fn zirArithmetic(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
zir_tag: Zir.Inst.Tag,
|
|
safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
sema.src = .{ .node_offset_bin_op = inst_data.src_node };
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
|
|
return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src, safety);
|
|
}
|
|
|
|
fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
sema.src = src;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const lhs_scalar_ty = lhs_ty.scalarType(mod);
|
|
const rhs_scalar_ty = rhs_ty.scalarType(mod);
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div);
|
|
|
|
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
|
|
|
|
if ((lhs_ty.zigTypeTag(mod) == .ComptimeFloat and rhs_ty.zigTypeTag(mod) == .ComptimeInt) or
|
|
(lhs_ty.zigTypeTag(mod) == .ComptimeInt and rhs_ty.zigTypeTag(mod) == .ComptimeFloat))
|
|
{
|
|
// If it makes a difference whether we coerce to ints or floats before doing the division, error.
|
|
// If lhs % rhs is 0, it doesn't matter.
|
|
const lhs_val = maybe_lhs_val orelse unreachable;
|
|
const rhs_val = maybe_rhs_val orelse unreachable;
|
|
const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable;
|
|
if (!rem.compareAllWithZero(.eq, mod)) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"ambiguous coercion of division operands '{}' and '{}'; non-zero remainder '{}'",
|
|
.{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(resolved_type, mod) },
|
|
);
|
|
}
|
|
}
|
|
|
|
// TODO: emit compile error when .div is used on integers and there would be an
|
|
// ambiguous result between div_floor and div_trunc.
|
|
|
|
// For integers:
|
|
// If the lhs is zero, then zero is returned regardless of rhs.
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined:
|
|
// * if lhs type is signed:
|
|
// * if rhs is comptime-known and not -1, result is undefined
|
|
// * if rhs is -1 or runtime-known, compile error because there is a
|
|
// possible value (-min_int / -1) for which division would be
|
|
// illegal behavior.
|
|
// * if lhs type is unsigned, undef is returned regardless of rhs.
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero:
|
|
// * comptime_float: compile error for division by zero.
|
|
// * other float type:
|
|
// * if the lhs is zero: QNaN
|
|
// * otherwise: +Inf or -Inf depending on lhs sign
|
|
// If the rhs is undefined:
|
|
// * comptime_float: compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// * other float type: result is undefined
|
|
// If the lhs is undefined, result is undefined.
|
|
switch (scalar_tag) {
|
|
.Int, .ComptimeInt, .ComptimeFloat => {
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
|
|
else => unreachable,
|
|
};
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
// TODO: if the RHS is one, return the LHS directly
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
const runtime_src = rs: {
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (is_int) {
|
|
var overflow_idx: ?usize = null;
|
|
const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod);
|
|
if (overflow_idx) |vec_idx| {
|
|
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx);
|
|
}
|
|
return Air.internedToRef(res.toIntern());
|
|
} else {
|
|
return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else {
|
|
break :rs rhs_src;
|
|
}
|
|
} else {
|
|
break :rs lhs_src;
|
|
}
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivIntOverflowSafety(block, src, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int);
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
}
|
|
|
|
const air_tag = if (is_int) blk: {
|
|
if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"division with '{}' and '{}': signed integers must use @divTrunc, @divFloor, or @divExact",
|
|
.{ lhs_ty.fmt(mod), rhs_ty.fmt(mod) },
|
|
);
|
|
}
|
|
break :blk Air.Inst.Tag.div_trunc;
|
|
} else switch (block.float_mode) {
|
|
.Optimized => Air.Inst.Tag.div_float_optimized,
|
|
.Strict => Air.Inst.Tag.div_float,
|
|
};
|
|
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
sema.src = src;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const lhs_scalar_ty = lhs_ty.scalarType(mod);
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact);
|
|
|
|
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
|
|
|
|
const runtime_src = rs: {
|
|
// For integers:
|
|
// If the lhs is zero, then zero is returned regardless of rhs.
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, compile error because there is a possible
|
|
// value for which the division would result in a remainder.
|
|
// TODO: emit runtime safety for if there is a remainder
|
|
// TODO: emit runtime safety for division by zero
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, compile error because there is a possible
|
|
// value for which the division would result in a remainder.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
} else {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
|
|
else => unreachable,
|
|
};
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
// TODO: if the RHS is one, return the LHS directly
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (is_int) {
|
|
const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod);
|
|
if (!(modulus_val.compareAllWithZero(.eq, mod))) {
|
|
return sema.fail(block, src, "exact division produced remainder", .{});
|
|
}
|
|
var overflow_idx: ?usize = null;
|
|
const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod);
|
|
if (overflow_idx) |vec_idx| {
|
|
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx);
|
|
}
|
|
return Air.internedToRef(res.toIntern());
|
|
} else {
|
|
const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod);
|
|
if (!(modulus_val.compareAllWithZero(.eq, mod))) {
|
|
return sema.fail(block, src, "exact division produced remainder", .{});
|
|
}
|
|
return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else break :rs rhs_src;
|
|
} else break :rs lhs_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
// Depending on whether safety is enabled, we will have a slightly different strategy
|
|
// here. The `div_exact` AIR instruction causes undefined behavior if a remainder
|
|
// is produced, so in the safety check case, it cannot be used. Instead we do a
|
|
// div_trunc and check for remainder.
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivIntOverflowSafety(block, src, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int);
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
|
|
const result = try block.addBinOp(.div_trunc, casted_lhs, casted_rhs);
|
|
const ok = if (!is_int) ok: {
|
|
const floored = try block.addUnOp(.floor, result);
|
|
|
|
if (resolved_type.zigTypeTag(mod) == .Vector) {
|
|
const eql = try block.addCmpVector(result, floored, .eq);
|
|
break :ok try block.addInst(.{
|
|
.tag = switch (block.float_mode) {
|
|
.Strict => .reduce,
|
|
.Optimized => .reduce_optimized,
|
|
},
|
|
.data = .{ .reduce = .{
|
|
.operand = eql,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else {
|
|
const is_in_range = try block.addBinOp(switch (block.float_mode) {
|
|
.Strict => .cmp_eq,
|
|
.Optimized => .cmp_eq_optimized,
|
|
}, result, floored);
|
|
break :ok is_in_range;
|
|
}
|
|
} else ok: {
|
|
const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs);
|
|
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
|
|
else => unreachable,
|
|
};
|
|
if (resolved_type.zigTypeTag(mod) == .Vector) {
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
const zero = Air.internedToRef(zero_val.toIntern());
|
|
const eql = try block.addCmpVector(remainder, zero, .eq);
|
|
break :ok try block.addInst(.{
|
|
.tag = .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = eql,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else {
|
|
const zero = Air.internedToRef(scalar_zero.toIntern());
|
|
const is_in_range = try block.addBinOp(.cmp_eq, remainder, zero);
|
|
break :ok is_in_range;
|
|
}
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .exact_division_remainder);
|
|
return result;
|
|
}
|
|
|
|
return block.addBinOp(airTag(block, is_int, .div_exact, .div_exact_optimized), casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
sema.src = src;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const lhs_scalar_ty = lhs_ty.scalarType(mod);
|
|
const rhs_scalar_ty = rhs_ty.scalarType(mod);
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor);
|
|
|
|
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
|
|
|
|
const runtime_src = rs: {
|
|
// For integers:
|
|
// If the lhs is zero, then zero is returned regardless of rhs.
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined:
|
|
// * if lhs type is signed:
|
|
// * if rhs is comptime-known and not -1, result is undefined
|
|
// * if rhs is -1 or runtime-known, compile error because there is a
|
|
// possible value (-min_int / -1) for which division would be
|
|
// illegal behavior.
|
|
// * if lhs type is unsigned, undef is returned regardless of rhs.
|
|
// TODO: emit runtime safety for division by zero
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, result is undefined.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
|
|
else => unreachable,
|
|
};
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
// TODO: if the RHS is one, return the LHS directly
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (is_int) {
|
|
return Air.internedToRef((try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
} else {
|
|
return Air.internedToRef((try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else break :rs rhs_src;
|
|
} else break :rs lhs_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivIntOverflowSafety(block, src, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int);
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
}
|
|
|
|
return block.addBinOp(airTag(block, is_int, .div_floor, .div_floor_optimized), casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
sema.src = src;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const lhs_scalar_ty = lhs_ty.scalarType(mod);
|
|
const rhs_scalar_ty = rhs_ty.scalarType(mod);
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc);
|
|
|
|
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
|
|
|
|
const runtime_src = rs: {
|
|
// For integers:
|
|
// If the lhs is zero, then zero is returned regardless of rhs.
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined:
|
|
// * if lhs type is signed:
|
|
// * if rhs is comptime-known and not -1, result is undefined
|
|
// * if rhs is -1 or runtime-known, compile error because there is a
|
|
// possible value (-min_int / -1) for which division would be
|
|
// illegal behavior.
|
|
// * if lhs type is unsigned, undef is returned regardless of rhs.
|
|
// TODO: emit runtime safety for division by zero
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, result is undefined.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
|
|
else => unreachable,
|
|
};
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (is_int) {
|
|
var overflow_idx: ?usize = null;
|
|
const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod);
|
|
if (overflow_idx) |vec_idx| {
|
|
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx);
|
|
}
|
|
return Air.internedToRef(res.toIntern());
|
|
} else {
|
|
return Air.internedToRef((try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else break :rs rhs_src;
|
|
} else break :rs lhs_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivIntOverflowSafety(block, src, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int);
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
}
|
|
|
|
return block.addBinOp(airTag(block, is_int, .div_trunc, .div_trunc_optimized), casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn addDivIntOverflowSafety(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
resolved_type: Type,
|
|
lhs_scalar_ty: Type,
|
|
maybe_lhs_val: ?Value,
|
|
maybe_rhs_val: ?Value,
|
|
casted_lhs: Air.Inst.Ref,
|
|
casted_rhs: Air.Inst.Ref,
|
|
is_int: bool,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
if (!is_int) return;
|
|
|
|
// If the LHS is unsigned, it cannot cause overflow.
|
|
if (!lhs_scalar_ty.isSignedInt(mod)) return;
|
|
|
|
// If the LHS is widened to a larger integer type, no overflow is possible.
|
|
if (lhs_scalar_ty.intInfo(mod).bits < resolved_type.intInfo(mod).bits) {
|
|
return;
|
|
}
|
|
|
|
const min_int = try resolved_type.minInt(mod, resolved_type);
|
|
const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1);
|
|
const neg_one = try sema.splat(resolved_type, neg_one_scalar);
|
|
|
|
// If the LHS is comptime-known to be not equal to the min int,
|
|
// no overflow is possible.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (try lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return;
|
|
}
|
|
|
|
// If the RHS is comptime-known to not be equal to -1, no overflow is possible.
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (try rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return;
|
|
}
|
|
|
|
var ok: Air.Inst.Ref = .none;
|
|
if (resolved_type.zigTypeTag(mod) == .Vector) {
|
|
if (maybe_lhs_val == null) {
|
|
const min_int_ref = Air.internedToRef(min_int.toIntern());
|
|
ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq);
|
|
}
|
|
if (maybe_rhs_val == null) {
|
|
const neg_one_ref = Air.internedToRef(neg_one.toIntern());
|
|
const rhs_ok = try block.addCmpVector(casted_rhs, neg_one_ref, .neq);
|
|
if (ok == .none) {
|
|
ok = rhs_ok;
|
|
} else {
|
|
ok = try block.addBinOp(.bool_or, ok, rhs_ok);
|
|
}
|
|
}
|
|
assert(ok != .none);
|
|
ok = try block.addInst(.{
|
|
.tag = .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = ok,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else {
|
|
if (maybe_lhs_val == null) {
|
|
const min_int_ref = Air.internedToRef(min_int.toIntern());
|
|
ok = try block.addBinOp(.cmp_neq, casted_lhs, min_int_ref);
|
|
}
|
|
if (maybe_rhs_val == null) {
|
|
const neg_one_ref = Air.internedToRef(neg_one.toIntern());
|
|
const rhs_ok = try block.addBinOp(.cmp_neq, casted_rhs, neg_one_ref);
|
|
if (ok == .none) {
|
|
ok = rhs_ok;
|
|
} else {
|
|
ok = try block.addBinOp(.bool_or, ok, rhs_ok);
|
|
}
|
|
}
|
|
assert(ok != .none);
|
|
}
|
|
try sema.addSafetyCheck(block, src, ok, .integer_overflow);
|
|
}
|
|
|
|
fn addDivByZeroSafety(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
resolved_type: Type,
|
|
maybe_rhs_val: ?Value,
|
|
casted_rhs: Air.Inst.Ref,
|
|
is_int: bool,
|
|
) CompileError!void {
|
|
// Strict IEEE floats have well-defined division by zero.
|
|
if (!is_int and block.float_mode == .Strict) return;
|
|
|
|
// If rhs was comptime-known to be zero a compile error would have been
|
|
// emitted above.
|
|
if (maybe_rhs_val != null) return;
|
|
|
|
const mod = sema.mod;
|
|
const scalar_zero = if (is_int)
|
|
try mod.intValue(resolved_type.scalarType(mod), 0)
|
|
else
|
|
try mod.floatValue(resolved_type.scalarType(mod), 0.0);
|
|
const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: {
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
const zero = Air.internedToRef(zero_val.toIntern());
|
|
const ok = try block.addCmpVector(casted_rhs, zero, .neq);
|
|
break :ok try block.addInst(.{
|
|
.tag = if (is_int) .reduce else .reduce_optimized,
|
|
.data = .{ .reduce = .{
|
|
.operand = ok,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else ok: {
|
|
const zero = Air.internedToRef(scalar_zero.toIntern());
|
|
break :ok try block.addBinOp(if (is_int) .cmp_neq else .cmp_neq_optimized, casted_rhs, zero);
|
|
};
|
|
try sema.addSafetyCheck(block, src, ok, .divide_by_zero);
|
|
}
|
|
|
|
fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst.Tag) Air.Inst.Tag {
|
|
if (is_int) return normal;
|
|
return switch (block.float_mode) {
|
|
.Strict => normal,
|
|
.Optimized => optimized,
|
|
};
|
|
}
|
|
|
|
fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
sema.src = src;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const lhs_scalar_ty = lhs_ty.scalarType(mod);
|
|
const rhs_scalar_ty = rhs_ty.scalarType(mod);
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem);
|
|
|
|
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
|
|
|
|
const runtime_src = rs: {
|
|
// For integers:
|
|
// Either operand being undef is a compile error because there exists
|
|
// a possible value (TODO what is it?) that would invoke illegal behavior.
|
|
// TODO: can lhs undef be handled better?
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, result is undefined.
|
|
//
|
|
// For either one: if the result would be different between @mod and @rem,
|
|
// then emit a compile error saying you have to pick one.
|
|
if (is_int) {
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, lhs_src);
|
|
}
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
|
|
else => unreachable,
|
|
};
|
|
const zero_val = if (is_vector) (try mod.intern(.{ .aggregate = .{
|
|
.ty = resolved_type.toIntern(),
|
|
.storage = .{ .repeated_elem = scalar_zero.toIntern() },
|
|
} })).toValue() else scalar_zero;
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
} else if (lhs_scalar_ty.isSignedInt(mod)) {
|
|
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
|
|
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
const rem_result = try sema.intRem(resolved_type, lhs_val, rhs_val);
|
|
// If this answer could possibly be different by doing `intMod`,
|
|
// we must emit a compile error. Otherwise, it's OK.
|
|
if (!(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) and
|
|
!(try rem_result.compareAllWithZeroAdvanced(.eq, sema)))
|
|
{
|
|
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
return Air.internedToRef(rem_result.toIntern());
|
|
}
|
|
break :rs lhs_src;
|
|
} else if (rhs_scalar_ty.isSignedInt(mod)) {
|
|
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
|
|
} else {
|
|
break :rs rhs_src;
|
|
}
|
|
}
|
|
// float operands
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
|
|
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
|
|
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
} else {
|
|
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
} else {
|
|
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
|
|
}
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
}
|
|
|
|
const air_tag = airTag(block, is_int, .rem, .rem_optimized);
|
|
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn intRem(
|
|
sema: *Sema,
|
|
ty: Type,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
) CompileError!Value {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Vector) {
|
|
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
|
|
const scalar_ty = ty.scalarType(mod);
|
|
for (result_data, 0..) |*scalar, i| {
|
|
const lhs_elem = try lhs.elemValue(mod, i);
|
|
const rhs_elem = try rhs.elemValue(mod, i);
|
|
scalar.* = try (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod);
|
|
}
|
|
return (try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} })).toValue();
|
|
}
|
|
return sema.intRemScalar(lhs, rhs, ty);
|
|
}
|
|
|
|
fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileError!Value {
|
|
const mod = sema.mod;
|
|
// TODO is this a performance issue? maybe we should try the operation without
|
|
// resorting to BigInt first.
|
|
var lhs_space: Value.BigIntSpace = undefined;
|
|
var rhs_space: Value.BigIntSpace = undefined;
|
|
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
|
|
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
|
|
const limbs_q = try sema.arena.alloc(
|
|
math.big.Limb,
|
|
lhs_bigint.limbs.len,
|
|
);
|
|
const limbs_r = try sema.arena.alloc(
|
|
math.big.Limb,
|
|
// TODO: consider reworking Sema to re-use Values rather than
|
|
// always producing new Value objects.
|
|
rhs_bigint.limbs.len,
|
|
);
|
|
const limbs_buffer = try sema.arena.alloc(
|
|
math.big.Limb,
|
|
math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
|
|
);
|
|
var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
|
|
var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
|
|
result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
|
|
return mod.intValue_big(scalar_ty, result_r.toConst());
|
|
}
|
|
|
|
fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
sema.src = src;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod);
|
|
|
|
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
|
|
|
|
const runtime_src = rs: {
|
|
// For integers:
|
|
// Either operand being undef is a compile error because there exists
|
|
// a possible value (TODO what is it?) that would invoke illegal behavior.
|
|
// TODO: can lhs zero be handled better?
|
|
// TODO: can lhs undef be handled better?
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, result is undefined.
|
|
if (is_int) {
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, lhs_src);
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
return Air.internedToRef((try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
break :rs lhs_src;
|
|
} else {
|
|
break :rs rhs_src;
|
|
}
|
|
}
|
|
// float operands
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
return Air.internedToRef((try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
} else break :rs rhs_src;
|
|
} else break :rs lhs_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
}
|
|
|
|
const air_tag = airTag(block, is_int, .mod, .mod_optimized);
|
|
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
sema.src = src;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem);
|
|
|
|
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
|
|
|
|
const runtime_src = rs: {
|
|
// For integers:
|
|
// Either operand being undef is a compile error because there exists
|
|
// a possible value (TODO what is it?) that would invoke illegal behavior.
|
|
// TODO: can lhs zero be handled better?
|
|
// TODO: can lhs undef be handled better?
|
|
//
|
|
// For floats:
|
|
// If the rhs is zero, compile error for division by zero.
|
|
// If the rhs is undefined, compile error because there is a possible
|
|
// value (zero) for which the division would be illegal behavior.
|
|
// If the lhs is undefined, result is undefined.
|
|
if (is_int) {
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, lhs_src);
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
return Air.internedToRef((try sema.intRem(resolved_type, lhs_val, rhs_val)).toIntern());
|
|
}
|
|
break :rs lhs_src;
|
|
} else {
|
|
break :rs rhs_src;
|
|
}
|
|
}
|
|
// float operands
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
}
|
|
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
|
|
return sema.failWithDivideByZero(block, rhs_src);
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
} else break :rs rhs_src;
|
|
} else break :rs lhs_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (block.wantSafety()) {
|
|
try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int);
|
|
}
|
|
|
|
const air_tag = airTag(block, is_int, .rem, .rem_optimized);
|
|
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn zirOverflowArithmetic(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
zir_tag: Zir.Inst.Extended,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
|
|
const uncasted_lhs = try sema.resolveInst(extra.lhs);
|
|
const uncasted_rhs = try sema.resolveInst(extra.rhs);
|
|
|
|
const lhs_ty = sema.typeOf(uncasted_lhs);
|
|
const rhs_ty = sema.typeOf(uncasted_rhs);
|
|
const mod = sema.mod;
|
|
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ uncasted_lhs, uncasted_rhs };
|
|
const dest_ty = if (zir_tag == .shl_with_overflow)
|
|
lhs_ty
|
|
else
|
|
try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const rhs_dest_ty = if (zir_tag == .shl_with_overflow)
|
|
try sema.log2IntType(block, lhs_ty, src)
|
|
else
|
|
dest_ty;
|
|
|
|
const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src);
|
|
const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src);
|
|
|
|
if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) {
|
|
return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(mod)});
|
|
}
|
|
|
|
const maybe_lhs_val = try sema.resolveMaybeUndefVal(lhs);
|
|
const maybe_rhs_val = try sema.resolveMaybeUndefVal(rhs);
|
|
|
|
const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty);
|
|
const overflow_ty = mod.intern_pool.indexToKey(tuple_ty.toIntern()).anon_struct_type.types[1].toType();
|
|
|
|
var result: struct {
|
|
inst: Air.Inst.Ref = .none,
|
|
wrapped: Value = Value.@"unreachable",
|
|
overflow_bit: Value,
|
|
} = result: {
|
|
const zero_bit = try mod.intValue(Type.u1, 0);
|
|
switch (zir_tag) {
|
|
.add_with_overflow => {
|
|
// If either of the arguments is zero, `false` is returned and the other is stored
|
|
// to the result, even if it is undefined..
|
|
// Otherwise, if either of the argument is undefined, undefined is returned.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
|
|
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
|
|
}
|
|
|
|
const result = try sema.intAddWithOverflow(lhs_val, rhs_val, dest_ty);
|
|
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
|
|
}
|
|
}
|
|
},
|
|
.sub_with_overflow => {
|
|
// If the rhs is zero, then the result is lhs and no overflow occured.
|
|
// Otherwise, if either result is undefined, both results are undefined.
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
|
|
} else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
|
|
} else if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
|
|
}
|
|
|
|
const result = try sema.intSubWithOverflow(lhs_val, rhs_val, dest_ty);
|
|
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
|
|
}
|
|
}
|
|
},
|
|
.mul_with_overflow => {
|
|
// If either of the arguments is zero, the result is zero and no overflow occured.
|
|
// If either of the arguments is one, the result is the other and no overflow occured.
|
|
// Otherwise, if either of the arguments is undefined, both results are undefined.
|
|
const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1);
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
|
|
} else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
|
|
}
|
|
}
|
|
}
|
|
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (!rhs_val.isUndef(mod)) {
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
|
|
} else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
|
|
}
|
|
}
|
|
}
|
|
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
|
|
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
|
|
}
|
|
|
|
const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, mod);
|
|
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
|
|
}
|
|
}
|
|
},
|
|
.shl_with_overflow => {
|
|
// If lhs is zero, the result is zero and no overflow occurred.
|
|
// If rhs is zero, the result is lhs (even if undefined) and no overflow occurred.
|
|
// Oterhwise if either of the arguments is undefined, both results are undefined.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
|
|
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
|
|
}
|
|
|
|
const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, mod);
|
|
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
|
|
}
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
const air_tag: Air.Inst.Tag = switch (zir_tag) {
|
|
.add_with_overflow => .add_with_overflow,
|
|
.mul_with_overflow => .mul_with_overflow,
|
|
.sub_with_overflow => .sub_with_overflow,
|
|
.shl_with_overflow => .shl_with_overflow,
|
|
else => unreachable,
|
|
};
|
|
|
|
const runtime_src = if (maybe_lhs_val == null) lhs_src else rhs_src;
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
return block.addInst(.{
|
|
.tag = air_tag,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(tuple_ty.toIntern()),
|
|
.payload = try block.sema.addExtra(Air.Bin{
|
|
.lhs = lhs,
|
|
.rhs = rhs,
|
|
}),
|
|
} },
|
|
});
|
|
};
|
|
|
|
if (result.inst != .none) {
|
|
if (try sema.resolveMaybeUndefVal(result.inst)) |some| {
|
|
result.wrapped = some;
|
|
result.inst = .none;
|
|
}
|
|
}
|
|
|
|
if (result.inst == .none) {
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = tuple_ty.toIntern(),
|
|
.storage = .{ .elems = &.{
|
|
result.wrapped.toIntern(),
|
|
result.overflow_bit.toIntern(),
|
|
} },
|
|
} })));
|
|
}
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2);
|
|
element_refs[0] = result.inst;
|
|
element_refs[1] = Air.internedToRef(result.overflow_bit.toIntern());
|
|
return block.addAggregateInit(tuple_ty, element_refs);
|
|
}
|
|
|
|
fn splat(sema: *Sema, ty: Type, val: Value) !Value {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) != .Vector) return val;
|
|
const repeated = try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .repeated_elem = val.toIntern() },
|
|
} });
|
|
return repeated.toValue();
|
|
}
|
|
|
|
fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
|
|
const mod = sema.mod;
|
|
const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try mod.vectorType(.{
|
|
.len = ty.vectorLen(mod),
|
|
.child = .u1_type,
|
|
}) else Type.u1;
|
|
|
|
const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() };
|
|
const values = [2]InternPool.Index{ .none, .none };
|
|
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
|
|
.types = &types,
|
|
.values = &values,
|
|
.names = &.{},
|
|
} });
|
|
return tuple_ty.toType();
|
|
}
|
|
|
|
fn analyzeArithmetic(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
/// TODO performance investigation: make this comptime?
|
|
zir_tag: Zir.Inst.Tag,
|
|
lhs: Air.Inst.Ref,
|
|
rhs: Air.Inst.Ref,
|
|
src: LazySrcLoc,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
want_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
|
|
if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize(mod)) {
|
|
.One, .Slice => {},
|
|
.Many, .C => {
|
|
const air_tag: Air.Inst.Tag = switch (zir_tag) {
|
|
.add => .ptr_add,
|
|
.sub => .ptr_sub,
|
|
else => return sema.fail(block, src, "invalid pointer arithmetic operator", .{}),
|
|
};
|
|
return sema.analyzePtrArithmetic(block, src, lhs, rhs, air_tag, lhs_src, rhs_src);
|
|
},
|
|
};
|
|
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
|
|
const scalar_type = resolved_type.scalarType(mod);
|
|
const scalar_tag = scalar_type.zigTypeTag(mod);
|
|
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
|
|
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag);
|
|
|
|
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
|
|
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
|
|
const rs: struct {
|
|
src: LazySrcLoc,
|
|
air_tag: Air.Inst.Tag,
|
|
air_tag_safe: Air.Inst.Tag,
|
|
} = rs: {
|
|
switch (zir_tag) {
|
|
.add, .add_unsafe => {
|
|
// For integers:intAddSat
|
|
// If either of the operands are zero, then the other operand is
|
|
// returned, even if it is undefined.
|
|
// If either of the operands are undefined, it's a compile error
|
|
// because there is a possible value for which the addition would
|
|
// overflow (max_int), causing illegal behavior.
|
|
// For floats: either operand being undef makes the result undef.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
return casted_rhs;
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
if (is_int) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
} else {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return casted_lhs;
|
|
}
|
|
}
|
|
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .add_optimized else .add;
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
if (is_int) {
|
|
return sema.failWithUseOfUndef(block, lhs_src);
|
|
} else {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (is_int) {
|
|
var overflow_idx: ?usize = null;
|
|
const sum = try sema.intAdd(lhs_val, rhs_val, resolved_type, &overflow_idx);
|
|
if (overflow_idx) |vec_idx| {
|
|
return sema.failWithIntegerOverflow(block, src, resolved_type, sum, vec_idx);
|
|
}
|
|
return Air.internedToRef(sum.toIntern());
|
|
} else {
|
|
return Air.internedToRef((try Value.floatAdd(lhs_val, rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else break :rs .{ .src = rhs_src, .air_tag = air_tag, .air_tag_safe = .add_safe };
|
|
} else break :rs .{ .src = lhs_src, .air_tag = air_tag, .air_tag_safe = .add_safe };
|
|
},
|
|
.addwrap => {
|
|
// Integers only; floats are checked above.
|
|
// If either of the operands are zero, the other operand is returned.
|
|
// If either of the operands are undefined, the result is undefined.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
return casted_rhs;
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return casted_lhs;
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
return Air.internedToRef((try sema.numberAddWrapScalar(lhs_val, rhs_val, resolved_type)).toIntern());
|
|
} else break :rs .{ .src = lhs_src, .air_tag = .add_wrap, .air_tag_safe = .add_wrap };
|
|
} else break :rs .{ .src = rhs_src, .air_tag = .add_wrap, .air_tag_safe = .add_wrap };
|
|
},
|
|
.add_sat => {
|
|
// Integers only; floats are checked above.
|
|
// If either of the operands are zero, then the other operand is returned.
|
|
// If either of the operands are undefined, the result is undefined.
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
|
|
return casted_rhs;
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return casted_lhs;
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
const val = if (scalar_tag == .ComptimeInt)
|
|
try sema.intAdd(lhs_val, rhs_val, resolved_type, undefined)
|
|
else
|
|
try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, mod);
|
|
|
|
return Air.internedToRef(val.toIntern());
|
|
} else break :rs .{
|
|
.src = lhs_src,
|
|
.air_tag = .add_sat,
|
|
.air_tag_safe = .add_sat,
|
|
};
|
|
} else break :rs .{
|
|
.src = rhs_src,
|
|
.air_tag = .add_sat,
|
|
.air_tag_safe = .add_sat,
|
|
};
|
|
},
|
|
.sub => {
|
|
// For integers:
|
|
// If the rhs is zero, then the other operand is
|
|
// returned, even if it is undefined.
|
|
// If either of the operands are undefined, it's a compile error
|
|
// because there is a possible value for which the subtraction would
|
|
// overflow, causing illegal behavior.
|
|
// For floats: either operand being undef makes the result undef.
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
if (is_int) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
} else {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return casted_lhs;
|
|
}
|
|
}
|
|
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .sub_optimized else .sub;
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
if (is_int) {
|
|
return sema.failWithUseOfUndef(block, lhs_src);
|
|
} else {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (is_int) {
|
|
var overflow_idx: ?usize = null;
|
|
const diff = try sema.intSub(lhs_val, rhs_val, resolved_type, &overflow_idx);
|
|
if (overflow_idx) |vec_idx| {
|
|
return sema.failWithIntegerOverflow(block, src, resolved_type, diff, vec_idx);
|
|
}
|
|
return Air.internedToRef(diff.toIntern());
|
|
} else {
|
|
return Air.internedToRef((try Value.floatSub(lhs_val, rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else break :rs .{ .src = rhs_src, .air_tag = air_tag, .air_tag_safe = .sub_safe };
|
|
} else break :rs .{ .src = lhs_src, .air_tag = air_tag, .air_tag_safe = .sub_safe };
|
|
},
|
|
.subwrap => {
|
|
// Integers only; floats are checked above.
|
|
// If the RHS is zero, then the other operand is returned, even if it is undefined.
|
|
// If either of the operands are undefined, the result is undefined.
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return casted_lhs;
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
return Air.internedToRef((try sema.numberSubWrapScalar(lhs_val, rhs_val, resolved_type)).toIntern());
|
|
} else break :rs .{ .src = rhs_src, .air_tag = .sub_wrap, .air_tag_safe = .sub_wrap };
|
|
} else break :rs .{ .src = lhs_src, .air_tag = .sub_wrap, .air_tag_safe = .sub_wrap };
|
|
},
|
|
.sub_sat => {
|
|
// Integers only; floats are checked above.
|
|
// If the RHS is zero, result is LHS.
|
|
// If either of the operands are undefined, result is undefined.
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
return casted_lhs;
|
|
}
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
const val = if (scalar_tag == .ComptimeInt)
|
|
try sema.intSub(lhs_val, rhs_val, resolved_type, undefined)
|
|
else
|
|
try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, mod);
|
|
|
|
return Air.internedToRef(val.toIntern());
|
|
} else break :rs .{ .src = rhs_src, .air_tag = .sub_sat, .air_tag_safe = .sub_sat };
|
|
} else break :rs .{ .src = lhs_src, .air_tag = .sub_sat, .air_tag_safe = .sub_sat };
|
|
},
|
|
.mul => {
|
|
// For integers:
|
|
// If either of the operands are zero, the result is zero.
|
|
// If either of the operands are one, the result is the other
|
|
// operand, even if it is undefined.
|
|
// If either of the operands are undefined, it's a compile error
|
|
// because there is a possible value for which the addition would
|
|
// overflow (max_int), causing illegal behavior.
|
|
// For floats: either operand being undef makes the result undef.
|
|
// If either of the operands are inf, and the other operand is zero,
|
|
// the result is nan.
|
|
// If either of the operands are nan, the result is nan.
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(scalar_type, 0),
|
|
else => unreachable,
|
|
};
|
|
const scalar_one = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0),
|
|
.ComptimeInt, .Int => try mod.intValue(scalar_type, 1),
|
|
else => unreachable,
|
|
};
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (lhs_val.isNan(mod)) {
|
|
return Air.internedToRef(lhs_val.toIntern());
|
|
}
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) lz: {
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isNan(mod)) {
|
|
return Air.internedToRef(rhs_val.toIntern());
|
|
}
|
|
if (rhs_val.isInf(mod)) {
|
|
return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern());
|
|
}
|
|
} else if (resolved_type.isAnyFloat()) {
|
|
break :lz;
|
|
}
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
|
|
return casted_rhs;
|
|
}
|
|
}
|
|
}
|
|
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mul_optimized else .mul;
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
if (is_int) {
|
|
return sema.failWithUseOfUndef(block, rhs_src);
|
|
} else {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
if (rhs_val.isNan(mod)) {
|
|
return Air.internedToRef(rhs_val.toIntern());
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) rz: {
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isInf(mod)) {
|
|
return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern());
|
|
}
|
|
} else if (resolved_type.isAnyFloat()) {
|
|
break :rz;
|
|
}
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
|
|
return casted_lhs;
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
if (is_int) {
|
|
return sema.failWithUseOfUndef(block, lhs_src);
|
|
} else {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
}
|
|
if (is_int) {
|
|
var overflow_idx: ?usize = null;
|
|
const product = try lhs_val.intMul(rhs_val, resolved_type, &overflow_idx, sema.arena, mod);
|
|
if (overflow_idx) |vec_idx| {
|
|
return sema.failWithIntegerOverflow(block, src, resolved_type, product, vec_idx);
|
|
}
|
|
return Air.internedToRef(product.toIntern());
|
|
} else {
|
|
return Air.internedToRef((try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
}
|
|
} else break :rs .{ .src = lhs_src, .air_tag = air_tag, .air_tag_safe = .mul_safe };
|
|
} else break :rs .{ .src = rhs_src, .air_tag = air_tag, .air_tag_safe = .mul_safe };
|
|
},
|
|
.mulwrap => {
|
|
// Integers only; floats are handled above.
|
|
// If either of the operands are zero, result is zero.
|
|
// If either of the operands are one, result is the other operand.
|
|
// If either of the operands are undefined, result is undefined.
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(scalar_type, 0),
|
|
else => unreachable,
|
|
};
|
|
const scalar_one = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0),
|
|
.ComptimeInt, .Int => try mod.intValue(scalar_type, 1),
|
|
else => unreachable,
|
|
};
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
|
|
return casted_rhs;
|
|
}
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
|
|
return casted_lhs;
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
return Air.internedToRef((try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, mod)).toIntern());
|
|
} else break :rs .{ .src = lhs_src, .air_tag = .mul_wrap, .air_tag_safe = .mul_wrap };
|
|
} else break :rs .{ .src = rhs_src, .air_tag = .mul_wrap, .air_tag_safe = .mul_wrap };
|
|
},
|
|
.mul_sat => {
|
|
// Integers only; floats are checked above.
|
|
// If either of the operands are zero, result is zero.
|
|
// If either of the operands are one, result is the other operand.
|
|
// If either of the operands are undefined, result is undefined.
|
|
const scalar_zero = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0),
|
|
.ComptimeInt, .Int => try mod.intValue(scalar_type, 0),
|
|
else => unreachable,
|
|
};
|
|
const scalar_one = switch (scalar_tag) {
|
|
.ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0),
|
|
.ComptimeInt, .Int => try mod.intValue(scalar_type, 1),
|
|
else => unreachable,
|
|
};
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (!lhs_val.isUndef(mod)) {
|
|
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
|
|
return casted_rhs;
|
|
}
|
|
}
|
|
}
|
|
if (maybe_rhs_val) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
|
|
const zero_val = try sema.splat(resolved_type, scalar_zero);
|
|
return Air.internedToRef(zero_val.toIntern());
|
|
}
|
|
if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
|
|
return casted_lhs;
|
|
}
|
|
if (maybe_lhs_val) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) {
|
|
return mod.undefRef(resolved_type);
|
|
}
|
|
|
|
const val = if (scalar_tag == .ComptimeInt)
|
|
try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, mod)
|
|
else
|
|
try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, mod);
|
|
|
|
return Air.internedToRef(val.toIntern());
|
|
} else break :rs .{ .src = lhs_src, .air_tag = .mul_sat, .air_tag_safe = .mul_sat };
|
|
} else break :rs .{ .src = rhs_src, .air_tag = .mul_sat, .air_tag_safe = .mul_sat };
|
|
},
|
|
else => unreachable,
|
|
}
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, rs.src);
|
|
if (block.wantSafety() and want_safety and scalar_tag == .Int) {
|
|
if (mod.backendSupportsFeature(.safety_checked_instructions)) {
|
|
_ = try sema.preparePanicId(block, .integer_overflow);
|
|
return block.addBinOp(rs.air_tag_safe, casted_lhs, casted_rhs);
|
|
} else {
|
|
const maybe_op_ov: ?Air.Inst.Tag = switch (rs.air_tag) {
|
|
.add => .add_with_overflow,
|
|
.sub => .sub_with_overflow,
|
|
.mul => .mul_with_overflow,
|
|
else => null,
|
|
};
|
|
if (maybe_op_ov) |op_ov_tag| {
|
|
const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(resolved_type);
|
|
const op_ov = try block.addInst(.{
|
|
.tag = op_ov_tag,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(op_ov_tuple_ty.toIntern()),
|
|
.payload = try sema.addExtra(Air.Bin{
|
|
.lhs = casted_lhs,
|
|
.rhs = casted_rhs,
|
|
}),
|
|
} },
|
|
});
|
|
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
|
|
const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector)
|
|
try block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = ov_bit,
|
|
.operation = .Or,
|
|
} },
|
|
})
|
|
else
|
|
ov_bit;
|
|
const zero_ov = Air.internedToRef((try mod.intValue(Type.u1, 0)).toIntern());
|
|
const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
|
|
|
|
try sema.addSafetyCheck(block, src, no_ov, .integer_overflow);
|
|
return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty);
|
|
}
|
|
}
|
|
}
|
|
return block.addBinOp(rs.air_tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
fn analyzePtrArithmetic(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
op_src: LazySrcLoc,
|
|
ptr: Air.Inst.Ref,
|
|
uncasted_offset: Air.Inst.Ref,
|
|
air_tag: Air.Inst.Tag,
|
|
ptr_src: LazySrcLoc,
|
|
offset_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
// TODO if the operand is comptime-known to be negative, or is a negative int,
|
|
// coerce to isize instead of usize.
|
|
const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src);
|
|
const mod = sema.mod;
|
|
const opt_ptr_val = try sema.resolveMaybeUndefVal(ptr);
|
|
const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset);
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const ptr_info = ptr_ty.ptrInfo(mod);
|
|
assert(ptr_info.flags.size == .Many or ptr_info.flags.size == .C);
|
|
|
|
const new_ptr_ty = t: {
|
|
// Calculate the new pointer alignment.
|
|
// This code is duplicated in `elemPtrType`.
|
|
if (ptr_info.flags.alignment == .none) {
|
|
// ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness.
|
|
break :t ptr_ty;
|
|
}
|
|
// If the addend is not a comptime-known value we can still count on
|
|
// it being a multiple of the type size.
|
|
const elem_size = ptr_info.child.toType().abiSize(mod);
|
|
const addend = if (opt_off_val) |off_val| a: {
|
|
const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(mod));
|
|
break :a elem_size * off_int;
|
|
} else elem_size;
|
|
|
|
// The resulting pointer is aligned to the lcd between the offset (an
|
|
// arbitrary number) and the alignment factor (always a power of two,
|
|
// non zero).
|
|
const new_align = @as(Alignment, @enumFromInt(@min(
|
|
@ctz(addend),
|
|
@intFromEnum(ptr_info.flags.alignment),
|
|
)));
|
|
assert(new_align != .none);
|
|
|
|
break :t try mod.ptrType(.{
|
|
.child = ptr_info.child,
|
|
.sentinel = ptr_info.sentinel,
|
|
.flags = .{
|
|
.size = ptr_info.flags.size,
|
|
.alignment = new_align,
|
|
.is_const = ptr_info.flags.is_const,
|
|
.is_volatile = ptr_info.flags.is_volatile,
|
|
.is_allowzero = ptr_info.flags.is_allowzero,
|
|
.address_space = ptr_info.flags.address_space,
|
|
},
|
|
});
|
|
};
|
|
|
|
const runtime_src = rs: {
|
|
if (opt_ptr_val) |ptr_val| {
|
|
if (opt_off_val) |offset_val| {
|
|
if (ptr_val.isUndef(mod)) return mod.undefRef(new_ptr_ty);
|
|
|
|
const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod));
|
|
if (offset_int == 0) return ptr;
|
|
if (try ptr_val.getUnsignedIntAdvanced(mod, sema)) |addr| {
|
|
const elem_size = ptr_info.child.toType().abiSize(mod);
|
|
const new_addr = switch (air_tag) {
|
|
.ptr_add => addr + elem_size * offset_int,
|
|
.ptr_sub => addr - elem_size * offset_int,
|
|
else => unreachable,
|
|
};
|
|
const new_ptr_val = try mod.ptrIntValue(new_ptr_ty, new_addr);
|
|
return Air.internedToRef(new_ptr_val.toIntern());
|
|
}
|
|
if (air_tag == .ptr_sub) {
|
|
return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{});
|
|
}
|
|
const new_ptr_val = try ptr_val.elemPtr(new_ptr_ty, offset_int, mod);
|
|
return Air.internedToRef(new_ptr_val.toIntern());
|
|
} else break :rs offset_src;
|
|
} else break :rs ptr_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, op_src, runtime_src);
|
|
return block.addInst(.{
|
|
.tag = air_tag,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(new_ptr_ty.toIntern()),
|
|
.payload = try sema.addExtra(Air.Bin{
|
|
.lhs = ptr,
|
|
.rhs = offset,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const ptr_src = src; // TODO better source location
|
|
const ptr = try sema.resolveInst(inst_data.operand);
|
|
return sema.analyzeLoad(block, src, ptr, ptr_src);
|
|
}
|
|
|
|
fn zirAsm(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
tmpl_is_expr: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand);
|
|
const src = LazySrcLoc.nodeOffset(extra.data.src_node);
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_asm_ret_ty = extra.data.src_node };
|
|
const outputs_len = @as(u5, @truncate(extended.small));
|
|
const inputs_len = @as(u5, @truncate(extended.small >> 5));
|
|
const clobbers_len = @as(u5, @truncate(extended.small >> 10));
|
|
const is_volatile = @as(u1, @truncate(extended.small >> 15)) != 0;
|
|
const is_global_assembly = sema.func_index == .none;
|
|
|
|
const asm_source: []const u8 = if (tmpl_is_expr) blk: {
|
|
const tmpl = @as(Zir.Inst.Ref, @enumFromInt(extra.data.asm_source));
|
|
const s: []const u8 = try sema.resolveConstString(block, src, tmpl, "assembly code must be comptime-known");
|
|
break :blk s;
|
|
} else sema.code.nullTerminatedString(extra.data.asm_source);
|
|
|
|
if (is_global_assembly) {
|
|
if (outputs_len != 0) {
|
|
return sema.fail(block, src, "module-level assembly does not support outputs", .{});
|
|
}
|
|
if (inputs_len != 0) {
|
|
return sema.fail(block, src, "module-level assembly does not support inputs", .{});
|
|
}
|
|
if (clobbers_len != 0) {
|
|
return sema.fail(block, src, "module-level assembly does not support clobbers", .{});
|
|
}
|
|
if (is_volatile) {
|
|
return sema.fail(block, src, "volatile keyword is redundant on module-level assembly", .{});
|
|
}
|
|
try sema.mod.addGlobalAssembly(sema.owner_decl_index, asm_source);
|
|
return Air.Inst.Ref.void_value;
|
|
}
|
|
|
|
if (block.is_comptime) {
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
}
|
|
|
|
var extra_i = extra.end;
|
|
var output_type_bits = extra.data.output_type_bits;
|
|
var needed_capacity: usize = @typeInfo(Air.Asm).Struct.fields.len + outputs_len + inputs_len;
|
|
|
|
const ConstraintName = struct { c: []const u8, n: []const u8 };
|
|
const out_args = try sema.arena.alloc(Air.Inst.Ref, outputs_len);
|
|
const outputs = try sema.arena.alloc(ConstraintName, outputs_len);
|
|
var expr_ty = Air.Inst.Ref.void_type;
|
|
|
|
for (out_args, 0..) |*arg, out_i| {
|
|
const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i);
|
|
extra_i = output.end;
|
|
|
|
const is_type = @as(u1, @truncate(output_type_bits)) != 0;
|
|
output_type_bits >>= 1;
|
|
|
|
if (is_type) {
|
|
// Indicate the output is the asm instruction return value.
|
|
arg.* = .none;
|
|
const out_ty = try sema.resolveType(block, ret_ty_src, output.data.operand);
|
|
try sema.queueFullTypeResolution(out_ty);
|
|
expr_ty = Air.internedToRef(out_ty.toIntern());
|
|
} else {
|
|
arg.* = try sema.resolveInst(output.data.operand);
|
|
}
|
|
|
|
const constraint = sema.code.nullTerminatedString(output.data.constraint);
|
|
const name = sema.code.nullTerminatedString(output.data.name);
|
|
needed_capacity += (constraint.len + name.len + (2 + 3)) / 4;
|
|
|
|
outputs[out_i] = .{ .c = constraint, .n = name };
|
|
}
|
|
|
|
const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len);
|
|
const inputs = try sema.arena.alloc(ConstraintName, inputs_len);
|
|
const mod = sema.mod;
|
|
|
|
for (args, 0..) |*arg, arg_i| {
|
|
const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i);
|
|
extra_i = input.end;
|
|
|
|
const uncasted_arg = try sema.resolveInst(input.data.operand);
|
|
const uncasted_arg_ty = sema.typeOf(uncasted_arg);
|
|
switch (uncasted_arg_ty.zigTypeTag(mod)) {
|
|
.ComptimeInt => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src),
|
|
.ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src),
|
|
else => {
|
|
arg.* = uncasted_arg;
|
|
try sema.queueFullTypeResolution(uncasted_arg_ty);
|
|
},
|
|
}
|
|
|
|
const constraint = sema.code.nullTerminatedString(input.data.constraint);
|
|
const name = sema.code.nullTerminatedString(input.data.name);
|
|
needed_capacity += (constraint.len + name.len + (2 + 3)) / 4;
|
|
inputs[arg_i] = .{ .c = constraint, .n = name };
|
|
}
|
|
|
|
const clobbers = try sema.arena.alloc([]const u8, clobbers_len);
|
|
for (clobbers) |*name| {
|
|
name.* = sema.code.nullTerminatedString(sema.code.extra[extra_i]);
|
|
extra_i += 1;
|
|
|
|
needed_capacity += name.*.len / 4 + 1;
|
|
}
|
|
|
|
needed_capacity += (asm_source.len + 3) / 4;
|
|
|
|
const gpa = sema.gpa;
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, needed_capacity);
|
|
const asm_air = try block.addInst(.{
|
|
.tag = .assembly,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = expr_ty,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Asm{
|
|
.source_len = @as(u32, @intCast(asm_source.len)),
|
|
.outputs_len = outputs_len,
|
|
.inputs_len = @as(u32, @intCast(args.len)),
|
|
.flags = (@as(u32, @intFromBool(is_volatile)) << 31) | @as(u32, @intCast(clobbers.len)),
|
|
}),
|
|
} },
|
|
});
|
|
sema.appendRefsAssumeCapacity(out_args);
|
|
sema.appendRefsAssumeCapacity(args);
|
|
for (outputs) |o| {
|
|
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
|
|
@memcpy(buffer[0..o.c.len], o.c);
|
|
buffer[o.c.len] = 0;
|
|
@memcpy(buffer[o.c.len + 1 ..][0..o.n.len], o.n);
|
|
buffer[o.c.len + 1 + o.n.len] = 0;
|
|
sema.air_extra.items.len += (o.c.len + o.n.len + (2 + 3)) / 4;
|
|
}
|
|
for (inputs) |input| {
|
|
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
|
|
@memcpy(buffer[0..input.c.len], input.c);
|
|
buffer[input.c.len] = 0;
|
|
@memcpy(buffer[input.c.len + 1 ..][0..input.n.len], input.n);
|
|
buffer[input.c.len + 1 + input.n.len] = 0;
|
|
sema.air_extra.items.len += (input.c.len + input.n.len + (2 + 3)) / 4;
|
|
}
|
|
for (clobbers) |clobber| {
|
|
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
|
|
@memcpy(buffer[0..clobber.len], clobber);
|
|
buffer[clobber.len] = 0;
|
|
sema.air_extra.items.len += clobber.len / 4 + 1;
|
|
}
|
|
{
|
|
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
|
|
@memcpy(buffer[0..asm_source.len], asm_source);
|
|
sema.air_extra.items.len += (asm_source.len + 3) / 4;
|
|
}
|
|
return asm_air;
|
|
}
|
|
|
|
/// Only called for equality operators. See also `zirCmp`.
|
|
fn zirCmpEq(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
op: std.math.CompareOperator,
|
|
air_tag: Air.Inst.Tag,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src: LazySrcLoc = inst_data.src();
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
const lhs_ty_tag = lhs_ty.zigTypeTag(mod);
|
|
const rhs_ty_tag = rhs_ty.zigTypeTag(mod);
|
|
if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) {
|
|
// null == null, null != null
|
|
if (op == .eq) {
|
|
return Air.Inst.Ref.bool_true;
|
|
} else {
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
}
|
|
|
|
// comparing null with optionals
|
|
if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr(mod))) {
|
|
return sema.analyzeIsNull(block, src, rhs, op == .neq);
|
|
}
|
|
if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr(mod))) {
|
|
return sema.analyzeIsNull(block, src, lhs, op == .neq);
|
|
}
|
|
|
|
if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) {
|
|
const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty;
|
|
return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(mod)});
|
|
}
|
|
|
|
if (lhs_ty_tag == .Union and (rhs_ty_tag == .EnumLiteral or rhs_ty_tag == .Enum)) {
|
|
return sema.analyzeCmpUnionTag(block, src, lhs, lhs_src, rhs, rhs_src, op);
|
|
}
|
|
if (rhs_ty_tag == .Union and (lhs_ty_tag == .EnumLiteral or lhs_ty_tag == .Enum)) {
|
|
return sema.analyzeCmpUnionTag(block, src, rhs, rhs_src, lhs, lhs_src, op);
|
|
}
|
|
|
|
if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) {
|
|
const runtime_src: LazySrcLoc = src: {
|
|
if (try sema.resolveMaybeUndefVal(lhs)) |lval| {
|
|
if (try sema.resolveMaybeUndefVal(rhs)) |rval| {
|
|
if (lval.isUndef(mod) or rval.isUndef(mod)) {
|
|
return mod.undefRef(Type.bool);
|
|
}
|
|
const lkey = mod.intern_pool.indexToKey(lval.toIntern());
|
|
const rkey = mod.intern_pool.indexToKey(rval.toIntern());
|
|
if ((lkey.err.name == rkey.err.name) == (op == .eq)) {
|
|
return Air.Inst.Ref.bool_true;
|
|
} else {
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
} else {
|
|
break :src rhs_src;
|
|
}
|
|
} else {
|
|
break :src lhs_src;
|
|
}
|
|
};
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addBinOp(air_tag, lhs, rhs);
|
|
}
|
|
if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) {
|
|
const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs);
|
|
const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs);
|
|
if (lhs_as_type.eql(rhs_as_type, mod) == (op == .eq)) {
|
|
return Air.Inst.Ref.bool_true;
|
|
} else {
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
}
|
|
return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, true);
|
|
}
|
|
|
|
fn analyzeCmpUnionTag(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
un: Air.Inst.Ref,
|
|
un_src: LazySrcLoc,
|
|
tag: Air.Inst.Ref,
|
|
tag_src: LazySrcLoc,
|
|
op: std.math.CompareOperator,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const union_ty = sema.typeOf(un);
|
|
try sema.resolveTypeFields(union_ty);
|
|
const union_tag_ty = union_ty.unionTagType(mod) orelse {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, un_src, "comparison of union and enum literal is only valid for tagged union types", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try mod.errNoteNonLazy(union_ty.declSrcLoc(mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(mod)});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
};
|
|
// Coerce both the union and the tag to the union's tag type, and then execute the
|
|
// enum comparison codepath.
|
|
const coerced_tag = try sema.coerce(block, union_tag_ty, tag, tag_src);
|
|
const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src);
|
|
|
|
if (try sema.resolveMaybeUndefVal(coerced_tag)) |enum_val| {
|
|
if (enum_val.isUndef(mod)) return mod.undefRef(Type.bool);
|
|
const field_ty = union_ty.unionFieldType(enum_val, mod);
|
|
if (field_ty.zigTypeTag(mod) == .NoReturn) {
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
}
|
|
|
|
return sema.cmpSelf(block, src, coerced_union, coerced_tag, op, un_src, tag_src);
|
|
}
|
|
|
|
/// Only called for non-equality operators. See also `zirCmpEq`.
|
|
fn zirCmp(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
op: std.math.CompareOperator,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src: LazySrcLoc = inst_data.src();
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, false);
|
|
}
|
|
|
|
fn analyzeCmp(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
lhs: Air.Inst.Ref,
|
|
rhs: Air.Inst.Ref,
|
|
op: std.math.CompareOperator,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
is_equality_cmp: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) {
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
}
|
|
|
|
if (lhs_ty.zigTypeTag(mod) == .Vector and rhs_ty.zigTypeTag(mod) == .Vector) {
|
|
return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src);
|
|
}
|
|
if (lhs_ty.isNumeric(mod) and rhs_ty.isNumeric(mod)) {
|
|
// This operation allows any combination of integer and float types, regardless of the
|
|
// signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for
|
|
// numeric types.
|
|
return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src);
|
|
}
|
|
if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorUnion and rhs_ty.zigTypeTag(mod) == .ErrorSet) {
|
|
const casted_lhs = try sema.analyzeErrUnionCode(block, lhs_src, lhs);
|
|
return sema.cmpSelf(block, src, casted_lhs, rhs, op, lhs_src, rhs_src);
|
|
}
|
|
if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorSet and rhs_ty.zigTypeTag(mod) == .ErrorUnion) {
|
|
const casted_rhs = try sema.analyzeErrUnionCode(block, rhs_src, rhs);
|
|
return sema.cmpSelf(block, src, lhs, casted_rhs, op, lhs_src, rhs_src);
|
|
}
|
|
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
|
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } });
|
|
if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) {
|
|
return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{
|
|
compareOperatorName(op), resolved_type.fmt(mod),
|
|
});
|
|
}
|
|
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
|
|
return sema.cmpSelf(block, src, casted_lhs, casted_rhs, op, lhs_src, rhs_src);
|
|
}
|
|
|
|
fn compareOperatorName(comp: std.math.CompareOperator) []const u8 {
|
|
return switch (comp) {
|
|
.lt => "<",
|
|
.lte => "<=",
|
|
.eq => "==",
|
|
.gte => ">=",
|
|
.gt => ">",
|
|
.neq => "!=",
|
|
};
|
|
}
|
|
|
|
fn cmpSelf(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
casted_lhs: Air.Inst.Ref,
|
|
casted_rhs: Air.Inst.Ref,
|
|
op: std.math.CompareOperator,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const resolved_type = sema.typeOf(casted_lhs);
|
|
const runtime_src: LazySrcLoc = src: {
|
|
if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| {
|
|
if (lhs_val.isUndef(mod)) return mod.undefRef(Type.bool);
|
|
if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) return mod.undefRef(Type.bool);
|
|
|
|
if (resolved_type.zigTypeTag(mod) == .Vector) {
|
|
const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type);
|
|
return Air.internedToRef(cmp_val.toIntern());
|
|
}
|
|
|
|
if (try sema.compareAll(lhs_val, op, rhs_val, resolved_type)) {
|
|
return Air.Inst.Ref.bool_true;
|
|
} else {
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
} else {
|
|
if (resolved_type.zigTypeTag(mod) == .Bool) {
|
|
// We can lower bool eq/neq more efficiently.
|
|
return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src);
|
|
}
|
|
break :src rhs_src;
|
|
}
|
|
} else {
|
|
// For bools, we still check the other operand, because we can lower
|
|
// bool eq/neq more efficiently.
|
|
if (resolved_type.zigTypeTag(mod) == .Bool) {
|
|
if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| {
|
|
if (rhs_val.isUndef(mod)) return mod.undefRef(Type.bool);
|
|
return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src);
|
|
}
|
|
}
|
|
break :src lhs_src;
|
|
}
|
|
};
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
if (resolved_type.zigTypeTag(mod) == .Vector) {
|
|
return block.addCmpVector(casted_lhs, casted_rhs, op);
|
|
}
|
|
const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized);
|
|
return block.addBinOp(tag, casted_lhs, casted_rhs);
|
|
}
|
|
|
|
/// cmp_eq (x, false) => not(x)
|
|
/// cmp_eq (x, true ) => x
|
|
/// cmp_neq(x, false) => x
|
|
/// cmp_neq(x, true ) => not(x)
|
|
fn runtimeBoolCmp(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
op: std.math.CompareOperator,
|
|
lhs: Air.Inst.Ref,
|
|
rhs: bool,
|
|
runtime_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
if ((op == .neq) == rhs) {
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addTyOp(.not, Type.bool, lhs);
|
|
} else {
|
|
return lhs;
|
|
}
|
|
}
|
|
|
|
fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Fn,
|
|
.NoReturn,
|
|
.Undefined,
|
|
.Null,
|
|
.Opaque,
|
|
=> return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(mod)}),
|
|
|
|
.Type,
|
|
.EnumLiteral,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.Void,
|
|
=> return mod.intRef(Type.comptime_int, 0),
|
|
|
|
.Bool,
|
|
.Int,
|
|
.Float,
|
|
.Pointer,
|
|
.Array,
|
|
.Struct,
|
|
.Optional,
|
|
.ErrorUnion,
|
|
.ErrorSet,
|
|
.Enum,
|
|
.Union,
|
|
.Vector,
|
|
.Frame,
|
|
.AnyFrame,
|
|
=> {},
|
|
}
|
|
const val = try ty.lazyAbiSize(mod);
|
|
if (val.isLazySize(mod)) {
|
|
try sema.queueFullTypeResolution(ty);
|
|
}
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Fn,
|
|
.NoReturn,
|
|
.Undefined,
|
|
.Null,
|
|
.Opaque,
|
|
=> return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(mod)}),
|
|
|
|
.Type,
|
|
.EnumLiteral,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.Void,
|
|
=> return mod.intRef(Type.comptime_int, 0),
|
|
|
|
.Bool,
|
|
.Int,
|
|
.Float,
|
|
.Pointer,
|
|
.Array,
|
|
.Struct,
|
|
.Optional,
|
|
.ErrorUnion,
|
|
.ErrorSet,
|
|
.Enum,
|
|
.Union,
|
|
.Vector,
|
|
.Frame,
|
|
.AnyFrame,
|
|
=> {},
|
|
}
|
|
const bit_size = try operand_ty.bitSizeAdvanced(mod, sema);
|
|
return mod.intRef(Type.comptime_int, bit_size);
|
|
}
|
|
|
|
fn zirThis(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const this_decl_index = mod.namespaceDeclIndex(block.namespace);
|
|
const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand)));
|
|
return sema.analyzeDeclVal(block, src, this_decl_index);
|
|
}
|
|
|
|
fn zirClosureCapture(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_tok;
|
|
// Closures are not necessarily constant values. For example, the
|
|
// code might do something like this:
|
|
// fn foo(x: anytype) void { const S = struct {field: @TypeOf(x)}; }
|
|
// ...in which case the closure_capture instruction has access to a runtime
|
|
// value only. In such case we preserve the type and use a dummy runtime value.
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const ty = sema.typeOf(operand);
|
|
const capture: CaptureScope.Capture = blk: {
|
|
if (try sema.resolveMaybeUndefValAllowVariables(operand)) |val| {
|
|
const ip_index = try val.intern(ty, sema.mod);
|
|
break :blk .{ .comptime_val = ip_index };
|
|
}
|
|
break :blk .{ .runtime_val = ty.toIntern() };
|
|
};
|
|
try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, capture);
|
|
}
|
|
|
|
fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].inst_node;
|
|
var scope: *CaptureScope = mod.declPtr(block.src_decl).src_scope.?;
|
|
// Note: The target closure must be in this scope list.
|
|
// If it's not here, the zir is invalid, or the list is broken.
|
|
const capture = while (true) {
|
|
// Note: We don't need to add a dependency here, because
|
|
// decls always depend on their lexical parents.
|
|
|
|
// Fail this decl if a scope it depended on failed.
|
|
if (scope.failed()) {
|
|
if (sema.owner_func_index != .none) {
|
|
ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
|
|
} else {
|
|
sema.owner_decl.analysis = .dependency_failure;
|
|
}
|
|
return error.AnalysisFail;
|
|
}
|
|
if (scope.captures.get(inst_data.inst)) |capture| {
|
|
break capture;
|
|
}
|
|
scope = scope.parent.?;
|
|
};
|
|
|
|
if (capture == .runtime_val and !block.is_typeof and sema.func_index == .none) {
|
|
const msg = msg: {
|
|
const name = name: {
|
|
const file = sema.owner_decl.getFileScope(mod);
|
|
const tree = file.getTree(sema.gpa) catch |err| {
|
|
// In this case we emit a warning + a less precise source location.
|
|
log.warn("unable to load {s}: {s}", .{
|
|
file.sub_file_path, @errorName(err),
|
|
});
|
|
break :name null;
|
|
};
|
|
const node = sema.owner_decl.relativeToNodeIndex(inst_data.src_node);
|
|
const token = tree.nodes.items(.main_token)[node];
|
|
break :name tree.tokenSlice(token);
|
|
};
|
|
|
|
const msg = if (name) |some|
|
|
try sema.errMsg(block, inst_data.src(), "'{s}' not accessible outside function scope", .{some})
|
|
else
|
|
try sema.errMsg(block, inst_data.src(), "variable not accessible outside function scope", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
// TODO add "declared here" note
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (capture == .runtime_val and !block.is_typeof and !block.is_comptime and sema.func_index != .none) {
|
|
const msg = msg: {
|
|
const name = name: {
|
|
const file = sema.owner_decl.getFileScope(mod);
|
|
const tree = file.getTree(sema.gpa) catch |err| {
|
|
// In this case we emit a warning + a less precise source location.
|
|
log.warn("unable to load {s}: {s}", .{
|
|
file.sub_file_path, @errorName(err),
|
|
});
|
|
break :name null;
|
|
};
|
|
const node = sema.owner_decl.relativeToNodeIndex(inst_data.src_node);
|
|
const token = tree.nodes.items(.main_token)[node];
|
|
break :name tree.tokenSlice(token);
|
|
};
|
|
|
|
const msg = if (name) |some|
|
|
try sema.errMsg(block, inst_data.src(), "'{s}' not accessible from inner function", .{some})
|
|
else
|
|
try sema.errMsg(block, inst_data.src(), "variable not accessible from inner function", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.errNote(block, LazySrcLoc.nodeOffset(0), msg, "crossed function definition here", .{});
|
|
|
|
// TODO add "declared here" note
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
switch (capture) {
|
|
.runtime_val => |ty_ip_index| {
|
|
assert(block.is_typeof);
|
|
// We need a dummy runtime instruction with the correct type.
|
|
return block.addTy(.alloc, ty_ip_index.toType());
|
|
},
|
|
.comptime_val => |val_ip_index| {
|
|
return Air.internedToRef(val_ip_index);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn zirRetAddr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
_ = extended;
|
|
if (block.is_comptime) {
|
|
// TODO: we could give a meaningful lazy value here. #14938
|
|
return sema.mod.intRef(Type.usize, 0);
|
|
} else {
|
|
return block.addNoOp(.ret_addr);
|
|
}
|
|
}
|
|
|
|
fn zirFrameAddress(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand)));
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return try block.addNoOp(.frame_addr);
|
|
}
|
|
|
|
fn zirBuiltinSrc(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
if (sema.func_index == .none) return sema.fail(block, src, "@src outside function", .{});
|
|
const fn_owner_decl = mod.funcOwnerDeclPtr(sema.func_index);
|
|
|
|
const func_name_val = blk: {
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const name = try sema.arena.dupe(u8, mod.intern_pool.stringToSlice(fn_owner_decl.name));
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = name.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl = try anon_decl.finish(
|
|
new_decl_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = name },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :blk try mod.intern(.{ .ptr = .{
|
|
.ty = .slice_const_u8_sentinel_0_type,
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const file_name_val = blk: {
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
// The compiler must not call realpath anywhere.
|
|
const name = try fn_owner_decl.getFileScope(mod).fullPathZ(sema.arena);
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = name.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl = try anon_decl.finish(
|
|
new_decl_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = name },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :blk try mod.intern(.{ .ptr = .{
|
|
.ty = .slice_const_u8_sentinel_0_type,
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const src_loc_ty = try sema.getBuiltinType("SourceLocation");
|
|
const fields = .{
|
|
// file: [:0]const u8,
|
|
file_name_val,
|
|
// fn_name: [:0]const u8,
|
|
func_name_val,
|
|
// line: u32,
|
|
try mod.intern(.{ .runtime_value = .{
|
|
.ty = .u32_type,
|
|
.val = (try mod.intValue(Type.u32, extra.line + 1)).toIntern(),
|
|
} }),
|
|
// column: u32,
|
|
(try mod.intValue(Type.u32, extra.column + 1)).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = src_loc_ty.toIntern(),
|
|
.storage = .{ .elems = &fields },
|
|
} })));
|
|
}
|
|
|
|
fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const ty = try sema.resolveType(block, src, inst_data.operand);
|
|
const type_info_ty = try sema.getBuiltinType("Type");
|
|
const type_info_tag_ty = type_info_ty.unionTagType(mod).?;
|
|
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Type,
|
|
.Void,
|
|
.Bool,
|
|
.NoReturn,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.Undefined,
|
|
.Null,
|
|
.EnumLiteral,
|
|
=> |type_info_tag| return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(type_info_tag))).toIntern(),
|
|
.val = .void_value,
|
|
} }))),
|
|
.Fn => {
|
|
// TODO: look into memoizing this result.
|
|
var params_anon_decl = try block.startAnonDecl();
|
|
defer params_anon_decl.deinit();
|
|
|
|
const fn_info_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Fn"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index);
|
|
try sema.ensureDeclAnalyzed(fn_info_decl_index);
|
|
const fn_info_decl = mod.declPtr(fn_info_decl_index);
|
|
const fn_info_ty = fn_info_decl.val.toType();
|
|
|
|
const param_info_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
fn_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Param"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index);
|
|
try sema.ensureDeclAnalyzed(param_info_decl_index);
|
|
const param_info_decl = mod.declPtr(param_info_decl_index);
|
|
const param_info_ty = param_info_decl.val.toType();
|
|
|
|
const func_ty_info = mod.typeToFunc(ty).?;
|
|
const param_vals = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len);
|
|
for (param_vals, 0..) |*param_val, i| {
|
|
const param_ty = func_ty_info.param_types.get(ip)[i];
|
|
const is_generic = param_ty == .generic_poison_type;
|
|
const param_ty_val = try ip.get(gpa, .{ .opt = .{
|
|
.ty = try ip.get(gpa, .{ .opt_type = .type_type }),
|
|
.val = if (is_generic) .none else param_ty,
|
|
} });
|
|
|
|
const is_noalias = blk: {
|
|
const index = std.math.cast(u5, i) orelse break :blk false;
|
|
break :blk @as(u1, @truncate(func_ty_info.noalias_bits >> index)) != 0;
|
|
};
|
|
|
|
const param_fields = .{
|
|
// is_generic: bool,
|
|
Value.makeBool(is_generic).toIntern(),
|
|
// is_noalias: bool,
|
|
Value.makeBool(is_noalias).toIntern(),
|
|
// type: ?type,
|
|
param_ty_val,
|
|
};
|
|
param_val.* = try mod.intern(.{ .aggregate = .{
|
|
.ty = param_info_ty.toIntern(),
|
|
.storage = .{ .elems = ¶m_fields },
|
|
} });
|
|
}
|
|
|
|
const args_val = v: {
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = param_vals.len,
|
|
.child = param_info_ty.toIntern(),
|
|
});
|
|
const new_decl = try params_anon_decl.finish(
|
|
new_decl_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .elems = param_vals },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :v try mod.intern(.{ .ptr = .{
|
|
.ty = (try mod.ptrType(.{
|
|
.child = param_info_ty.toIntern(),
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.is_const = true,
|
|
},
|
|
})).toIntern(),
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const ret_ty_opt = try mod.intern(.{ .opt = .{
|
|
.ty = try ip.get(gpa, .{ .opt_type = .type_type }),
|
|
.val = if (func_ty_info.return_type == .generic_poison_type)
|
|
.none
|
|
else
|
|
func_ty_info.return_type,
|
|
} });
|
|
|
|
const callconv_ty = try sema.getBuiltinType("CallingConvention");
|
|
|
|
const field_values = .{
|
|
// calling_convention: CallingConvention,
|
|
(try mod.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(),
|
|
// alignment: comptime_int,
|
|
(try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).toIntern(),
|
|
// is_generic: bool,
|
|
Value.makeBool(func_ty_info.is_generic).toIntern(),
|
|
// is_var_args: bool,
|
|
Value.makeBool(func_ty_info.is_var_args).toIntern(),
|
|
// return_type: ?type,
|
|
ret_ty_opt,
|
|
// args: []const Fn.Param,
|
|
args_val,
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Fn))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = fn_info_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Int => {
|
|
const int_info_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Int"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, int_info_decl_index);
|
|
try sema.ensureDeclAnalyzed(int_info_decl_index);
|
|
const int_info_decl = mod.declPtr(int_info_decl_index);
|
|
const int_info_ty = int_info_decl.val.toType();
|
|
|
|
const signedness_ty = try sema.getBuiltinType("Signedness");
|
|
const info = ty.intInfo(mod);
|
|
const field_values = .{
|
|
// signedness: Signedness,
|
|
try (try mod.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).intern(signedness_ty, mod),
|
|
// bits: u16,
|
|
(try mod.intValue(Type.u16, info.bits)).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Int))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = int_info_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Float => {
|
|
const float_info_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Float"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, float_info_decl_index);
|
|
try sema.ensureDeclAnalyzed(float_info_decl_index);
|
|
const float_info_decl = mod.declPtr(float_info_decl_index);
|
|
const float_info_ty = float_info_decl.val.toType();
|
|
|
|
const field_vals = .{
|
|
// bits: u16,
|
|
(try mod.intValue(Type.u16, ty.bitSize(mod))).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Float))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = float_info_ty.toIntern(),
|
|
.storage = .{ .elems = &field_vals },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Pointer => {
|
|
const info = ty.ptrInfo(mod);
|
|
const alignment = if (info.flags.alignment.toByteUnitsOptional()) |alignment|
|
|
try mod.intValue(Type.comptime_int, alignment)
|
|
else
|
|
try info.child.toType().lazyAbiAlignment(mod);
|
|
|
|
const addrspace_ty = try sema.getBuiltinType("AddressSpace");
|
|
const pointer_ty = t: {
|
|
const decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
(try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Pointer"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
|
|
try sema.ensureDeclAnalyzed(decl_index);
|
|
const decl = mod.declPtr(decl_index);
|
|
break :t decl.val.toType();
|
|
};
|
|
const ptr_size_ty = t: {
|
|
const decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
pointer_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Size"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
|
|
try sema.ensureDeclAnalyzed(decl_index);
|
|
const decl = mod.declPtr(decl_index);
|
|
break :t decl.val.toType();
|
|
};
|
|
|
|
const field_values = .{
|
|
// size: Size,
|
|
try (try mod.enumValueFieldIndex(ptr_size_ty, @intFromEnum(info.flags.size))).intern(ptr_size_ty, mod),
|
|
// is_const: bool,
|
|
Value.makeBool(info.flags.is_const).toIntern(),
|
|
// is_volatile: bool,
|
|
Value.makeBool(info.flags.is_volatile).toIntern(),
|
|
// alignment: comptime_int,
|
|
alignment.toIntern(),
|
|
// address_space: AddressSpace
|
|
try (try mod.enumValueFieldIndex(addrspace_ty, @intFromEnum(info.flags.address_space))).intern(addrspace_ty, mod),
|
|
// child: type,
|
|
info.child,
|
|
// is_allowzero: bool,
|
|
Value.makeBool(info.flags.is_allowzero).toIntern(),
|
|
// sentinel: ?*const anyopaque,
|
|
(try sema.optRefValue(block, info.child.toType(), switch (info.sentinel) {
|
|
.none => null,
|
|
else => info.sentinel.toValue(),
|
|
})).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Pointer))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = pointer_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Array => {
|
|
const array_field_ty = t: {
|
|
const array_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Array"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, array_field_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(array_field_ty_decl_index);
|
|
const array_field_ty_decl = mod.declPtr(array_field_ty_decl_index);
|
|
break :t array_field_ty_decl.val.toType();
|
|
};
|
|
|
|
const info = ty.arrayInfo(mod);
|
|
const field_values = .{
|
|
// len: comptime_int,
|
|
(try mod.intValue(Type.comptime_int, info.len)).toIntern(),
|
|
// child: type,
|
|
info.elem_type.toIntern(),
|
|
// sentinel: ?*const anyopaque,
|
|
(try sema.optRefValue(block, info.elem_type, info.sentinel)).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Array))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = array_field_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Vector => {
|
|
const vector_field_ty = t: {
|
|
const vector_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Vector"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, vector_field_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(vector_field_ty_decl_index);
|
|
const vector_field_ty_decl = mod.declPtr(vector_field_ty_decl_index);
|
|
break :t vector_field_ty_decl.val.toType();
|
|
};
|
|
|
|
const info = ty.arrayInfo(mod);
|
|
const field_values = .{
|
|
// len: comptime_int,
|
|
(try mod.intValue(Type.comptime_int, info.len)).toIntern(),
|
|
// child: type,
|
|
info.elem_type.toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Vector))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = vector_field_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Optional => {
|
|
const optional_field_ty = t: {
|
|
const optional_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Optional"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, optional_field_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(optional_field_ty_decl_index);
|
|
const optional_field_ty_decl = mod.declPtr(optional_field_ty_decl_index);
|
|
break :t optional_field_ty_decl.val.toType();
|
|
};
|
|
|
|
const field_values = .{
|
|
// child: type,
|
|
ty.optionalChild(mod).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Optional))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = optional_field_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.ErrorSet => {
|
|
var fields_anon_decl = try block.startAnonDecl();
|
|
defer fields_anon_decl.deinit();
|
|
|
|
// Get the Error type
|
|
const error_field_ty = t: {
|
|
const set_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Error"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(set_field_ty_decl_index);
|
|
const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index);
|
|
break :t set_field_ty_decl.val.toType();
|
|
};
|
|
|
|
try sema.queueFullTypeResolution(error_field_ty);
|
|
|
|
// Build our list of Error values
|
|
// Optional value is only null if anyerror
|
|
// Value can be zero-length slice otherwise
|
|
const error_field_vals = switch (try sema.resolveInferredErrorSetTy(block, src, ty.toIntern())) {
|
|
.anyerror_type => null,
|
|
else => |err_set_ty_index| blk: {
|
|
const names = ip.indexToKey(err_set_ty_index).error_set_type.names;
|
|
const vals = try sema.arena.alloc(InternPool.Index, names.len);
|
|
for (vals, 0..) |*field_val, i| {
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const name = try sema.arena.dupe(u8, ip.stringToSlice(names.get(ip)[i]));
|
|
const name_val = v: {
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = name.len,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl = try anon_decl.finish(
|
|
new_decl_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = name },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :v try mod.intern(.{ .ptr = .{
|
|
.ty = .slice_const_u8_type,
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const error_field_fields = .{
|
|
// name: []const u8,
|
|
name_val,
|
|
};
|
|
field_val.* = try mod.intern(.{ .aggregate = .{
|
|
.ty = error_field_ty.toIntern(),
|
|
.storage = .{ .elems = &error_field_fields },
|
|
} });
|
|
}
|
|
|
|
break :blk vals;
|
|
},
|
|
};
|
|
|
|
// Build our ?[]const Error value
|
|
const slice_errors_ty = try mod.ptrType(.{
|
|
.child = error_field_ty.toIntern(),
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.is_const = true,
|
|
},
|
|
});
|
|
const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.toIntern());
|
|
const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: {
|
|
const array_errors_ty = try mod.arrayType(.{
|
|
.len = vals.len,
|
|
.child = error_field_ty.toIntern(),
|
|
});
|
|
const new_decl = try fields_anon_decl.finish(
|
|
array_errors_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = array_errors_ty.toIntern(),
|
|
.storage = .{ .elems = vals },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :v try mod.intern(.{ .ptr = .{
|
|
.ty = slice_errors_ty.toIntern(),
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, vals.len)).toIntern(),
|
|
} });
|
|
} else .none;
|
|
const errors_val = try mod.intern(.{ .opt = .{
|
|
.ty = opt_slice_errors_ty.toIntern(),
|
|
.val = errors_payload_val,
|
|
} });
|
|
|
|
// Construct Type{ .ErrorSet = errors_val }
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorSet))).toIntern(),
|
|
.val = errors_val,
|
|
} })));
|
|
},
|
|
.ErrorUnion => {
|
|
const error_union_field_ty = t: {
|
|
const error_union_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "ErrorUnion"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, error_union_field_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index);
|
|
const error_union_field_ty_decl = mod.declPtr(error_union_field_ty_decl_index);
|
|
break :t error_union_field_ty_decl.val.toType();
|
|
};
|
|
|
|
const field_values = .{
|
|
// error_set: type,
|
|
ty.errorUnionSet(mod).toIntern(),
|
|
// payload: type,
|
|
ty.errorUnionPayload(mod).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorUnion))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = error_union_field_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Enum => {
|
|
// TODO: look into memoizing this result.
|
|
const is_exhaustive = Value.makeBool(ip.indexToKey(ty.toIntern()).enum_type.tag_mode != .nonexhaustive);
|
|
|
|
var fields_anon_decl = try block.startAnonDecl();
|
|
defer fields_anon_decl.deinit();
|
|
|
|
const enum_field_ty = t: {
|
|
const enum_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "EnumField"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(enum_field_ty_decl_index);
|
|
const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index);
|
|
break :t enum_field_ty_decl.val.toType();
|
|
};
|
|
|
|
const enum_field_vals = try sema.arena.alloc(InternPool.Index, ip.indexToKey(ty.toIntern()).enum_type.names.len);
|
|
for (enum_field_vals, 0..) |*field_val, i| {
|
|
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
|
|
const value_val = if (enum_type.values.len > 0)
|
|
try mod.intern_pool.getCoerced(gpa, enum_type.values.get(ip)[i], .comptime_int_type)
|
|
else
|
|
try mod.intern(.{ .int = .{
|
|
.ty = .comptime_int_type,
|
|
.storage = .{ .u64 = @as(u64, @intCast(i)) },
|
|
} });
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const name = try sema.arena.dupe(u8, ip.stringToSlice(enum_type.names.get(ip)[i]));
|
|
const name_val = v: {
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = name.len,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl = try anon_decl.finish(
|
|
new_decl_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = name },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :v try mod.intern(.{ .ptr = .{
|
|
.ty = .slice_const_u8_type,
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const enum_field_fields = .{
|
|
// name: []const u8,
|
|
name_val,
|
|
// value: comptime_int,
|
|
value_val,
|
|
};
|
|
field_val.* = try mod.intern(.{ .aggregate = .{
|
|
.ty = enum_field_ty.toIntern(),
|
|
.storage = .{ .elems = &enum_field_fields },
|
|
} });
|
|
}
|
|
|
|
const fields_val = v: {
|
|
const fields_array_ty = try mod.arrayType(.{
|
|
.len = enum_field_vals.len,
|
|
.child = enum_field_ty.toIntern(),
|
|
});
|
|
const new_decl = try fields_anon_decl.finish(
|
|
fields_array_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = fields_array_ty.toIntern(),
|
|
.storage = .{ .elems = enum_field_vals },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :v try mod.intern(.{ .ptr = .{
|
|
.ty = (try mod.ptrType(.{
|
|
.child = enum_field_ty.toIntern(),
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.is_const = true,
|
|
},
|
|
})).toIntern(),
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, enum_field_vals.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ip.indexToKey(ty.toIntern()).enum_type.namespace);
|
|
|
|
const type_enum_ty = t: {
|
|
const type_enum_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Enum"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, type_enum_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(type_enum_ty_decl_index);
|
|
const type_enum_ty_decl = mod.declPtr(type_enum_ty_decl_index);
|
|
break :t type_enum_ty_decl.val.toType();
|
|
};
|
|
|
|
const field_values = .{
|
|
// tag_type: type,
|
|
ip.indexToKey(ty.toIntern()).enum_type.tag_ty,
|
|
// fields: []const EnumField,
|
|
fields_val,
|
|
// decls: []const Declaration,
|
|
decls_val,
|
|
// is_exhaustive: bool,
|
|
is_exhaustive.toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Enum))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = type_enum_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Union => {
|
|
// TODO: look into memoizing this result.
|
|
|
|
var fields_anon_decl = try block.startAnonDecl();
|
|
defer fields_anon_decl.deinit();
|
|
|
|
const type_union_ty = t: {
|
|
const type_union_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Union"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, type_union_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(type_union_ty_decl_index);
|
|
const type_union_ty_decl = mod.declPtr(type_union_ty_decl_index);
|
|
break :t type_union_ty_decl.val.toType();
|
|
};
|
|
|
|
const union_field_ty = t: {
|
|
const union_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "UnionField"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(union_field_ty_decl_index);
|
|
const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index);
|
|
break :t union_field_ty_decl.val.toType();
|
|
};
|
|
|
|
try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
|
|
const layout = ty.containerLayout(mod);
|
|
|
|
const union_fields = ty.unionFields(mod);
|
|
const union_field_vals = try gpa.alloc(InternPool.Index, union_fields.count());
|
|
defer gpa.free(union_field_vals);
|
|
|
|
for (union_field_vals, 0..) |*field_val, i| {
|
|
const field = union_fields.values()[i];
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const name = try sema.arena.dupe(u8, ip.stringToSlice(union_fields.keys()[i]));
|
|
const name_val = v: {
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = name.len,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl = try anon_decl.finish(
|
|
new_decl_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = name },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :v try mod.intern(.{ .ptr = .{
|
|
.ty = .slice_const_u8_type,
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const alignment = switch (layout) {
|
|
.Auto, .Extern => try sema.unionFieldAlignment(field),
|
|
.Packed => 0,
|
|
};
|
|
|
|
const union_field_fields = .{
|
|
// name: []const u8,
|
|
name_val,
|
|
// type: type,
|
|
field.ty.toIntern(),
|
|
// alignment: comptime_int,
|
|
(try mod.intValue(Type.comptime_int, alignment)).toIntern(),
|
|
};
|
|
field_val.* = try mod.intern(.{ .aggregate = .{
|
|
.ty = union_field_ty.toIntern(),
|
|
.storage = .{ .elems = &union_field_fields },
|
|
} });
|
|
}
|
|
|
|
const fields_val = v: {
|
|
const array_fields_ty = try mod.arrayType(.{
|
|
.len = union_field_vals.len,
|
|
.child = union_field_ty.toIntern(),
|
|
});
|
|
const new_decl = try fields_anon_decl.finish(
|
|
array_fields_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = array_fields_ty.toIntern(),
|
|
.storage = .{ .elems = union_field_vals },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :v try mod.intern(.{ .ptr = .{
|
|
.ty = (try mod.ptrType(.{
|
|
.child = union_field_ty.toIntern(),
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.is_const = true,
|
|
},
|
|
})).toIntern(),
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, union_field_vals.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod));
|
|
|
|
const enum_tag_ty_val = try mod.intern(.{ .opt = .{
|
|
.ty = (try mod.optionalType(.type_type)).toIntern(),
|
|
.val = if (ty.unionTagType(mod)) |tag_ty| tag_ty.toIntern() else .none,
|
|
} });
|
|
|
|
const container_layout_ty = t: {
|
|
const decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
(try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "ContainerLayout"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
|
|
try sema.ensureDeclAnalyzed(decl_index);
|
|
const decl = mod.declPtr(decl_index);
|
|
break :t decl.val.toType();
|
|
};
|
|
|
|
const field_values = .{
|
|
// layout: ContainerLayout,
|
|
(try mod.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(),
|
|
|
|
// tag_type: ?type,
|
|
enum_tag_ty_val,
|
|
// fields: []const UnionField,
|
|
fields_val,
|
|
// decls: []const Declaration,
|
|
decls_val,
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Union))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = type_union_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Struct => {
|
|
// TODO: look into memoizing this result.
|
|
|
|
var fields_anon_decl = try block.startAnonDecl();
|
|
defer fields_anon_decl.deinit();
|
|
|
|
const type_struct_ty = t: {
|
|
const type_struct_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Struct"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, type_struct_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(type_struct_ty_decl_index);
|
|
const type_struct_ty_decl = mod.declPtr(type_struct_ty_decl_index);
|
|
break :t type_struct_ty_decl.val.toType();
|
|
};
|
|
|
|
const struct_field_ty = t: {
|
|
const struct_field_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "StructField"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(struct_field_ty_decl_index);
|
|
const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index);
|
|
break :t struct_field_ty_decl.val.toType();
|
|
};
|
|
|
|
try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
|
|
const layout = ty.containerLayout(mod);
|
|
|
|
var struct_field_vals: []InternPool.Index = &.{};
|
|
defer gpa.free(struct_field_vals);
|
|
fv: {
|
|
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
|
|
.anon_struct_type => |tuple| {
|
|
struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len);
|
|
for (struct_field_vals, 0..) |*struct_field_val, i| {
|
|
const anon_struct_type = ip.indexToKey(ty.toIntern()).anon_struct_type;
|
|
const field_ty = anon_struct_type.types[i];
|
|
const field_val = anon_struct_type.values[i];
|
|
const name_val = v: {
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const bytes = if (tuple.names.len != 0)
|
|
// https://github.com/ziglang/zig/issues/15709
|
|
try sema.arena.dupe(u8, ip.stringToSlice(ip.indexToKey(ty.toIntern()).anon_struct_type.names[i]))
|
|
else
|
|
try std.fmt.allocPrint(sema.arena, "{d}", .{i});
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = bytes.len,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl = try anon_decl.finish(
|
|
new_decl_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = bytes },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :v try mod.intern(.{ .ptr = .{
|
|
.ty = .slice_const_u8_type,
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, bytes.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
try sema.resolveTypeLayout(field_ty.toType());
|
|
|
|
const is_comptime = field_val != .none;
|
|
const opt_default_val = if (is_comptime) field_val.toValue() else null;
|
|
const default_val_ptr = try sema.optRefValue(block, field_ty.toType(), opt_default_val);
|
|
const struct_field_fields = .{
|
|
// name: []const u8,
|
|
name_val,
|
|
// type: type,
|
|
field_ty,
|
|
// default_value: ?*const anyopaque,
|
|
default_val_ptr.toIntern(),
|
|
// is_comptime: bool,
|
|
Value.makeBool(is_comptime).toIntern(),
|
|
// alignment: comptime_int,
|
|
(try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).toIntern(),
|
|
};
|
|
struct_field_val.* = try mod.intern(.{ .aggregate = .{
|
|
.ty = struct_field_ty.toIntern(),
|
|
.storage = .{ .elems = &struct_field_fields },
|
|
} });
|
|
}
|
|
break :fv;
|
|
},
|
|
.struct_type => |s| s,
|
|
else => unreachable,
|
|
};
|
|
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :fv;
|
|
struct_field_vals = try gpa.alloc(InternPool.Index, struct_obj.fields.count());
|
|
|
|
for (
|
|
struct_field_vals,
|
|
struct_obj.fields.keys(),
|
|
struct_obj.fields.values(),
|
|
) |*field_val, name_nts, field| {
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const name = try sema.arena.dupe(u8, ip.stringToSlice(name_nts));
|
|
const name_val = v: {
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = name.len,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl = try anon_decl.finish(
|
|
new_decl_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = name },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :v try mod.intern(.{ .ptr = .{
|
|
.ty = .slice_const_u8_type,
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const opt_default_val = if (field.default_val == .none)
|
|
null
|
|
else
|
|
field.default_val.toValue();
|
|
const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val);
|
|
const alignment = field.alignment(mod, layout);
|
|
|
|
const struct_field_fields = .{
|
|
// name: []const u8,
|
|
name_val,
|
|
// type: type,
|
|
field.ty.toIntern(),
|
|
// default_value: ?*const anyopaque,
|
|
default_val_ptr.toIntern(),
|
|
// is_comptime: bool,
|
|
Value.makeBool(field.is_comptime).toIntern(),
|
|
// alignment: comptime_int,
|
|
(try mod.intValue(Type.comptime_int, alignment)).toIntern(),
|
|
};
|
|
field_val.* = try mod.intern(.{ .aggregate = .{
|
|
.ty = struct_field_ty.toIntern(),
|
|
.storage = .{ .elems = &struct_field_fields },
|
|
} });
|
|
}
|
|
}
|
|
|
|
const fields_val = v: {
|
|
const array_fields_ty = try mod.arrayType(.{
|
|
.len = struct_field_vals.len,
|
|
.child = struct_field_ty.toIntern(),
|
|
});
|
|
const new_decl = try fields_anon_decl.finish(
|
|
array_fields_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = array_fields_ty.toIntern(),
|
|
.storage = .{ .elems = struct_field_vals },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :v try mod.intern(.{ .ptr = .{
|
|
.ty = (try mod.ptrType(.{
|
|
.child = struct_field_ty.toIntern(),
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.is_const = true,
|
|
},
|
|
})).toIntern(),
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, struct_field_vals.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod));
|
|
|
|
const backing_integer_val = try mod.intern(.{ .opt = .{
|
|
.ty = (try mod.optionalType(.type_type)).toIntern(),
|
|
.val = if (layout == .Packed) val: {
|
|
const struct_obj = mod.typeToStruct(ty).?;
|
|
assert(struct_obj.haveLayout());
|
|
assert(struct_obj.backing_int_ty.isInt(mod));
|
|
break :val struct_obj.backing_int_ty.toIntern();
|
|
} else .none,
|
|
} });
|
|
|
|
const container_layout_ty = t: {
|
|
const decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
(try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "ContainerLayout"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
|
|
try sema.ensureDeclAnalyzed(decl_index);
|
|
const decl = mod.declPtr(decl_index);
|
|
break :t decl.val.toType();
|
|
};
|
|
|
|
const field_values = [_]InternPool.Index{
|
|
// layout: ContainerLayout,
|
|
(try mod.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(),
|
|
// backing_integer: ?type,
|
|
backing_integer_val,
|
|
// fields: []const StructField,
|
|
fields_val,
|
|
// decls: []const Declaration,
|
|
decls_val,
|
|
// is_tuple: bool,
|
|
Value.makeBool(ty.isTuple(mod)).toIntern(),
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Struct))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = type_struct_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Opaque => {
|
|
// TODO: look into memoizing this result.
|
|
|
|
const type_opaque_ty = t: {
|
|
const type_opaque_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, "Opaque"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, type_opaque_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index);
|
|
const type_opaque_ty_decl = mod.declPtr(type_opaque_ty_decl_index);
|
|
break :t type_opaque_ty_decl.val.toType();
|
|
};
|
|
|
|
try sema.resolveTypeFields(ty);
|
|
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod));
|
|
|
|
const field_values = .{
|
|
// decls: []const Declaration,
|
|
decls_val,
|
|
};
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = type_info_ty.toIntern(),
|
|
.tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Opaque))).toIntern(),
|
|
.val = try mod.intern(.{ .aggregate = .{
|
|
.ty = type_opaque_ty.toIntern(),
|
|
.storage = .{ .elems = &field_values },
|
|
} }),
|
|
} })));
|
|
},
|
|
.Frame => return sema.failWithUseOfAsync(block, src),
|
|
.AnyFrame => return sema.failWithUseOfAsync(block, src),
|
|
}
|
|
}
|
|
|
|
fn typeInfoDecls(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
type_info_ty: Type,
|
|
opt_namespace: Module.Namespace.OptionalIndex,
|
|
) CompileError!InternPool.Index {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
|
|
var decls_anon_decl = try block.startAnonDecl();
|
|
defer decls_anon_decl.deinit();
|
|
|
|
const declaration_ty = t: {
|
|
const declaration_ty_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
type_info_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try mod.intern_pool.getOrPutString(gpa, "Declaration"),
|
|
)).?;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index);
|
|
try sema.ensureDeclAnalyzed(declaration_ty_decl_index);
|
|
const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index);
|
|
break :t declaration_ty_decl.val.toType();
|
|
};
|
|
try sema.queueFullTypeResolution(declaration_ty);
|
|
|
|
var decl_vals = std.ArrayList(InternPool.Index).init(gpa);
|
|
defer decl_vals.deinit();
|
|
|
|
var seen_namespaces = std.AutoHashMap(*Namespace, void).init(gpa);
|
|
defer seen_namespaces.deinit();
|
|
|
|
if (opt_namespace.unwrap()) |namespace_index| {
|
|
const namespace = mod.namespacePtr(namespace_index);
|
|
try sema.typeInfoNamespaceDecls(block, namespace, declaration_ty, &decl_vals, &seen_namespaces);
|
|
}
|
|
|
|
const array_decl_ty = try mod.arrayType(.{
|
|
.len = decl_vals.items.len,
|
|
.child = declaration_ty.toIntern(),
|
|
});
|
|
const new_decl = try decls_anon_decl.finish(
|
|
array_decl_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = array_decl_ty.toIntern(),
|
|
.storage = .{ .elems = decl_vals.items },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
return try mod.intern(.{ .ptr = .{
|
|
.ty = (try mod.ptrType(.{
|
|
.child = declaration_ty.toIntern(),
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.is_const = true,
|
|
},
|
|
})).toIntern(),
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, decl_vals.items.len)).toIntern(),
|
|
} });
|
|
}
|
|
|
|
fn typeInfoNamespaceDecls(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
namespace: *Namespace,
|
|
declaration_ty: Type,
|
|
decl_vals: *std.ArrayList(InternPool.Index),
|
|
seen_namespaces: *std.AutoHashMap(*Namespace, void),
|
|
) !void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const gop = try seen_namespaces.getOrPut(namespace);
|
|
if (gop.found_existing) return;
|
|
const decls = namespace.decls.keys();
|
|
for (decls) |decl_index| {
|
|
const decl = mod.declPtr(decl_index);
|
|
if (decl.kind == .@"usingnamespace") {
|
|
if (decl.analysis == .in_progress) continue;
|
|
try mod.ensureDeclAnalyzed(decl_index);
|
|
const new_ns = decl.val.toType().getNamespace(mod).?;
|
|
try sema.typeInfoNamespaceDecls(block, new_ns, declaration_ty, decl_vals, seen_namespaces);
|
|
continue;
|
|
}
|
|
if (decl.kind != .named or !decl.is_pub) continue;
|
|
const name_val = v: {
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const name = try sema.arena.dupe(u8, ip.stringToSlice(decl.name));
|
|
const new_decl_ty = try mod.arrayType(.{
|
|
.len = name.len,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl = try anon_decl.finish(
|
|
new_decl_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = new_decl_ty.toIntern(),
|
|
.storage = .{ .bytes = name },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
break :v try mod.intern(.{ .ptr = .{
|
|
.ty = .slice_const_u8_type,
|
|
.addr = .{ .decl = new_decl },
|
|
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
|
|
} });
|
|
};
|
|
|
|
const fields = .{
|
|
//name: []const u8,
|
|
name_val,
|
|
};
|
|
try decl_vals.append(try mod.intern(.{ .aggregate = .{
|
|
.ty = declaration_ty.toIntern(),
|
|
.storage = .{ .elems = &fields },
|
|
} }));
|
|
}
|
|
}
|
|
|
|
fn zirTypeof(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
_ = block;
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
const inst_data = zir_datas[inst].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
return Air.internedToRef(operand_ty.toIntern());
|
|
}
|
|
|
|
fn zirTypeofBuiltin(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const pl_node = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index);
|
|
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
|
|
var child_block: Block = .{
|
|
.parent = block,
|
|
.sema = sema,
|
|
.src_decl = block.src_decl,
|
|
.namespace = block.namespace,
|
|
.wip_capture_scope = block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.inlining = block.inlining,
|
|
.is_comptime = false,
|
|
.is_typeof = true,
|
|
.want_safety = false,
|
|
.error_return_trace_index = block.error_return_trace_index,
|
|
};
|
|
defer child_block.instructions.deinit(sema.gpa);
|
|
|
|
const operand = try sema.resolveBody(&child_block, body, inst);
|
|
const operand_ty = sema.typeOf(operand);
|
|
if (operand_ty.isGenericPoison()) return error.GenericPoison;
|
|
return Air.internedToRef(operand_ty.toIntern());
|
|
}
|
|
|
|
fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const res_ty = try sema.log2IntType(block, operand_ty, src);
|
|
return Air.internedToRef(res_ty.toIntern());
|
|
}
|
|
|
|
fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type {
|
|
const mod = sema.mod;
|
|
switch (operand.zigTypeTag(mod)) {
|
|
.ComptimeInt => return Type.comptime_int,
|
|
.Int => {
|
|
const bits = operand.bitSize(mod);
|
|
const count = if (bits == 0)
|
|
0
|
|
else blk: {
|
|
var count: u16 = 0;
|
|
var s = bits - 1;
|
|
while (s != 0) : (s >>= 1) {
|
|
count += 1;
|
|
}
|
|
break :blk count;
|
|
};
|
|
return mod.intType(.unsigned, count);
|
|
},
|
|
.Vector => {
|
|
const elem_ty = operand.elemType2(mod);
|
|
const log2_elem_ty = try sema.log2IntType(block, elem_ty, src);
|
|
return mod.vectorType(.{
|
|
.len = operand.vectorLen(mod),
|
|
.child = log2_elem_ty.toIntern(),
|
|
});
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"bit shifting operation expected integer type, found '{}'",
|
|
.{operand.fmt(mod)},
|
|
);
|
|
}
|
|
|
|
fn zirTypeofPeer(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.TypeOfPeer, extended.operand);
|
|
const src = LazySrcLoc.nodeOffset(extra.data.src_node);
|
|
const body = sema.code.extra[extra.data.body_index..][0..extra.data.body_len];
|
|
|
|
var child_block: Block = .{
|
|
.parent = block,
|
|
.sema = sema,
|
|
.src_decl = block.src_decl,
|
|
.namespace = block.namespace,
|
|
.wip_capture_scope = block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.inlining = block.inlining,
|
|
.is_comptime = false,
|
|
.is_typeof = true,
|
|
.runtime_cond = block.runtime_cond,
|
|
.runtime_loop = block.runtime_loop,
|
|
.runtime_index = block.runtime_index,
|
|
};
|
|
defer child_block.instructions.deinit(sema.gpa);
|
|
// Ignore the result, we only care about the instructions in `args`.
|
|
_ = try sema.analyzeBodyBreak(&child_block, body);
|
|
|
|
const args = sema.code.refSlice(extra.end, extended.small);
|
|
|
|
const inst_list = try sema.gpa.alloc(Air.Inst.Ref, args.len);
|
|
defer sema.gpa.free(inst_list);
|
|
|
|
for (args, 0..) |arg_ref, i| {
|
|
inst_list[i] = try sema.resolveInst(arg_ref);
|
|
}
|
|
|
|
const result_type = try sema.resolvePeerTypes(block, src, inst_list, .{ .typeof_builtin_call_node_offset = extra.data.src_node });
|
|
return Air.internedToRef(result_type.toIntern());
|
|
}
|
|
|
|
fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
|
|
const uncasted_operand = try sema.resolveInst(inst_data.operand);
|
|
|
|
const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src);
|
|
if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
return if (val.isUndef(mod))
|
|
mod.undefRef(Type.bool)
|
|
else if (val.toBool())
|
|
Air.Inst.Ref.bool_false
|
|
else
|
|
Air.Inst.Ref.bool_true;
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.not, Type.bool, operand);
|
|
}
|
|
|
|
fn zirBoolBr(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
is_bool_or: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const datas = sema.code.instructions.items(.data);
|
|
const inst_data = datas[inst].bool_br;
|
|
const lhs = try sema.resolveInst(inst_data.lhs);
|
|
const lhs_src = sema.src;
|
|
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
|
|
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
const gpa = sema.gpa;
|
|
|
|
if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| {
|
|
if (is_bool_or and lhs_val.toBool()) {
|
|
return Air.Inst.Ref.bool_true;
|
|
} else if (!is_bool_or and !lhs_val.toBool()) {
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
// comptime-known left-hand side. No need for a block here; the result
|
|
// is simply the rhs expression. Here we rely on there only being 1
|
|
// break instruction (`break_inline`).
|
|
return sema.resolveBody(parent_block, body, inst);
|
|
}
|
|
|
|
const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
|
|
try sema.air_instructions.append(gpa, .{
|
|
.tag = .block,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = .bool_type,
|
|
.payload = undefined,
|
|
} },
|
|
});
|
|
|
|
var child_block = parent_block.makeSubBlock();
|
|
child_block.runtime_loop = null;
|
|
child_block.runtime_cond = lhs_src;
|
|
child_block.runtime_index.increment();
|
|
defer child_block.instructions.deinit(gpa);
|
|
|
|
var then_block = child_block.makeSubBlock();
|
|
defer then_block.instructions.deinit(gpa);
|
|
|
|
var else_block = child_block.makeSubBlock();
|
|
defer else_block.instructions.deinit(gpa);
|
|
|
|
const lhs_block = if (is_bool_or) &then_block else &else_block;
|
|
const rhs_block = if (is_bool_or) &else_block else &then_block;
|
|
|
|
const lhs_result: Air.Inst.Ref = if (is_bool_or) .bool_true else .bool_false;
|
|
_ = try lhs_block.addBr(block_inst, lhs_result);
|
|
|
|
const rhs_result = try sema.resolveBody(rhs_block, body, inst);
|
|
if (!sema.typeOf(rhs_result).isNoReturn(mod)) {
|
|
_ = try rhs_block.addBr(block_inst, rhs_result);
|
|
}
|
|
|
|
const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst);
|
|
if (!sema.typeOf(rhs_result).isNoReturn(mod)) {
|
|
if (try sema.resolveDefinedValue(rhs_block, sema.src, rhs_result)) |rhs_val| {
|
|
if (is_bool_or and rhs_val.toBool()) {
|
|
return Air.Inst.Ref.bool_true;
|
|
} else if (!is_bool_or and !rhs_val.toBool()) {
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
fn finishCondBr(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
child_block: *Block,
|
|
then_block: *Block,
|
|
else_block: *Block,
|
|
cond: Air.Inst.Ref,
|
|
block_inst: Air.Inst.Index,
|
|
) !Air.Inst.Ref {
|
|
const gpa = sema.gpa;
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
|
|
then_block.instructions.items.len + else_block.instructions.items.len +
|
|
@typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1);
|
|
|
|
const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @as(u32, @intCast(then_block.instructions.items.len)),
|
|
.else_body_len = @as(u32, @intCast(else_block.instructions.items.len)),
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items);
|
|
sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items);
|
|
|
|
_ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{
|
|
.operand = cond,
|
|
.payload = cond_br_payload,
|
|
} } });
|
|
|
|
sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity(
|
|
Air.Block{ .body_len = @as(u32, @intCast(child_block.instructions.items.len)) },
|
|
);
|
|
sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items);
|
|
|
|
try parent_block.instructions.append(gpa, block_inst);
|
|
return Air.indexToRef(block_inst);
|
|
}
|
|
|
|
fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Optional, .Null, .Undefined => return,
|
|
.Pointer => if (ty.isPtrLikeOptional(mod)) return,
|
|
else => {},
|
|
}
|
|
return sema.failWithExpectedOptionalType(block, src, ty);
|
|
}
|
|
|
|
fn zirIsNonNull(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
try sema.checkNullableType(block, src, sema.typeOf(operand));
|
|
return sema.analyzeIsNull(block, src, operand, true);
|
|
}
|
|
|
|
fn zirIsNonNullPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const ptr = try sema.resolveInst(inst_data.operand);
|
|
try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2(mod));
|
|
if ((try sema.resolveMaybeUndefVal(ptr)) == null) {
|
|
return block.addUnOp(.is_non_null_ptr, ptr);
|
|
}
|
|
const loaded = try sema.analyzeLoad(block, src, ptr, src);
|
|
return sema.analyzeIsNull(block, src, loaded, true);
|
|
}
|
|
|
|
fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ErrorSet, .ErrorUnion, .Undefined => return,
|
|
else => return sema.fail(block, src, "expected error union type, found '{}'", .{
|
|
ty.fmt(mod),
|
|
}),
|
|
}
|
|
}
|
|
|
|
fn zirIsNonErr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
try sema.checkErrorType(block, src, sema.typeOf(operand));
|
|
return sema.analyzeIsNonErr(block, src, operand);
|
|
}
|
|
|
|
fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const ptr = try sema.resolveInst(inst_data.operand);
|
|
try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2(mod));
|
|
const loaded = try sema.analyzeLoad(block, src, ptr, src);
|
|
return sema.analyzeIsNonErr(block, src, loaded);
|
|
}
|
|
|
|
fn zirRetIsNonErr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
return sema.analyzeIsNonErr(block, src, operand);
|
|
}
|
|
|
|
fn zirCondbr(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Zir.Inst.Index {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
|
|
|
|
const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len];
|
|
const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
|
|
|
|
const uncasted_cond = try sema.resolveInst(extra.data.condition);
|
|
const cond = try sema.coerce(parent_block, Type.bool, uncasted_cond, cond_src);
|
|
|
|
if (try sema.resolveDefinedValue(parent_block, cond_src, cond)) |cond_val| {
|
|
const body = if (cond_val.toBool()) then_body else else_body;
|
|
|
|
try sema.maybeErrorUnwrapCondbr(parent_block, body, extra.data.condition, cond_src);
|
|
// We use `analyzeBodyInner` since we want to propagate any possible
|
|
// `error.ComptimeBreak` to the caller.
|
|
return sema.analyzeBodyInner(parent_block, body);
|
|
}
|
|
|
|
const gpa = sema.gpa;
|
|
|
|
// We'll re-use the sub block to save on memory bandwidth, and yank out the
|
|
// instructions array in between using it for the then block and else block.
|
|
var sub_block = parent_block.makeSubBlock();
|
|
sub_block.runtime_loop = null;
|
|
sub_block.runtime_cond = cond_src;
|
|
sub_block.runtime_index.increment();
|
|
defer sub_block.instructions.deinit(gpa);
|
|
|
|
try sema.analyzeBodyRuntimeBreak(&sub_block, then_body);
|
|
const true_instructions = try sub_block.instructions.toOwnedSlice(gpa);
|
|
defer gpa.free(true_instructions);
|
|
|
|
const err_cond = blk: {
|
|
const index = Zir.refToIndex(extra.data.condition) orelse break :blk null;
|
|
if (sema.code.instructions.items(.tag)[index] != .is_non_err) break :blk null;
|
|
|
|
const err_inst_data = sema.code.instructions.items(.data)[index].un_node;
|
|
const err_operand = try sema.resolveInst(err_inst_data.operand);
|
|
const operand_ty = sema.typeOf(err_operand);
|
|
assert(operand_ty.zigTypeTag(mod) == .ErrorUnion);
|
|
const result_ty = operand_ty.errorUnionSet(mod);
|
|
break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand);
|
|
};
|
|
|
|
if (err_cond != null and try sema.maybeErrorUnwrap(&sub_block, else_body, err_cond.?, cond_src)) {
|
|
// nothing to do
|
|
} else {
|
|
try sema.analyzeBodyRuntimeBreak(&sub_block, else_body);
|
|
}
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
|
|
true_instructions.len + sub_block.instructions.items.len);
|
|
_ = try parent_block.addInst(.{
|
|
.tag = .cond_br,
|
|
.data = .{ .pl_op = .{
|
|
.operand = cond,
|
|
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @as(u32, @intCast(true_instructions.len)),
|
|
.else_body_len = @as(u32, @intCast(sub_block.instructions.items.len)),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(true_instructions);
|
|
sema.air_extra.appendSliceAssumeCapacity(sub_block.instructions.items);
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
|
|
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
const err_union = try sema.resolveInst(extra.data.operand);
|
|
const err_union_ty = sema.typeOf(err_union);
|
|
const mod = sema.mod;
|
|
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
|
|
err_union_ty.fmt(mod),
|
|
});
|
|
}
|
|
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union);
|
|
if (is_non_err != .none) {
|
|
const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?;
|
|
if (is_non_err_val.toBool()) {
|
|
return sema.analyzeErrUnionPayload(parent_block, src, err_union_ty, err_union, operand_src, false);
|
|
}
|
|
// We can analyze the body directly in the parent block because we know there are
|
|
// no breaks from the body possible, and that the body is noreturn.
|
|
return sema.resolveBody(parent_block, body, inst);
|
|
}
|
|
|
|
var sub_block = parent_block.makeSubBlock();
|
|
defer sub_block.instructions.deinit(sema.gpa);
|
|
|
|
// This body is guaranteed to end with noreturn and has no breaks.
|
|
_ = try sema.analyzeBodyInner(&sub_block, body);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Try).Struct.fields.len +
|
|
sub_block.instructions.items.len);
|
|
const try_inst = try parent_block.addInst(.{
|
|
.tag = .@"try",
|
|
.data = .{ .pl_op = .{
|
|
.operand = err_union,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Try{
|
|
.body_len = @as(u32, @intCast(sub_block.instructions.items.len)),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(sub_block.instructions.items);
|
|
return try_inst;
|
|
}
|
|
|
|
fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
|
|
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
|
|
const operand = try sema.resolveInst(extra.data.operand);
|
|
const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src);
|
|
const err_union_ty = sema.typeOf(err_union);
|
|
const mod = sema.mod;
|
|
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
|
|
return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
|
|
err_union_ty.fmt(mod),
|
|
});
|
|
}
|
|
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union);
|
|
if (is_non_err != .none) {
|
|
const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?;
|
|
if (is_non_err_val.toBool()) {
|
|
return sema.analyzeErrUnionPayloadPtr(parent_block, src, operand, false, false);
|
|
}
|
|
// We can analyze the body directly in the parent block because we know there are
|
|
// no breaks from the body possible, and that the body is noreturn.
|
|
return sema.resolveBody(parent_block, body, inst);
|
|
}
|
|
|
|
var sub_block = parent_block.makeSubBlock();
|
|
defer sub_block.instructions.deinit(sema.gpa);
|
|
|
|
// This body is guaranteed to end with noreturn and has no breaks.
|
|
_ = try sema.analyzeBodyInner(&sub_block, body);
|
|
|
|
const operand_ty = sema.typeOf(operand);
|
|
const ptr_info = operand_ty.ptrInfo(mod);
|
|
const res_ty = try mod.ptrType(.{
|
|
.child = err_union_ty.errorUnionPayload(mod).toIntern(),
|
|
.flags = .{
|
|
.is_const = ptr_info.flags.is_const,
|
|
.is_volatile = ptr_info.flags.is_volatile,
|
|
.is_allowzero = ptr_info.flags.is_allowzero,
|
|
.address_space = ptr_info.flags.address_space,
|
|
},
|
|
});
|
|
const res_ty_ref = Air.internedToRef(res_ty.toIntern());
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.TryPtr).Struct.fields.len +
|
|
sub_block.instructions.items.len);
|
|
const try_inst = try parent_block.addInst(.{
|
|
.tag = .try_ptr,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = res_ty_ref,
|
|
.payload = sema.addExtraAssumeCapacity(Air.TryPtr{
|
|
.ptr = operand,
|
|
.body_len = @as(u32, @intCast(sub_block.instructions.items.len)),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(sub_block.instructions.items);
|
|
return try_inst;
|
|
}
|
|
|
|
// A `break` statement is inside a runtime condition, but trying to
|
|
// break from an inline loop. In such case we must convert it to
|
|
// a runtime break.
|
|
fn addRuntimeBreak(sema: *Sema, child_block: *Block, break_data: BreakData) !void {
|
|
const gop = sema.inst_map.getOrPutAssumeCapacity(break_data.block_inst);
|
|
const labeled_block = if (!gop.found_existing) blk: {
|
|
try sema.post_hoc_blocks.ensureUnusedCapacity(sema.gpa, 1);
|
|
|
|
const new_block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
|
|
gop.value_ptr.* = Air.indexToRef(new_block_inst);
|
|
try sema.air_instructions.append(sema.gpa, .{
|
|
.tag = .block,
|
|
.data = undefined,
|
|
});
|
|
const labeled_block = try sema.gpa.create(LabeledBlock);
|
|
labeled_block.* = .{
|
|
.label = .{
|
|
.zir_block = break_data.block_inst,
|
|
.merges = .{
|
|
.src_locs = .{},
|
|
.results = .{},
|
|
.br_list = .{},
|
|
.block_inst = new_block_inst,
|
|
},
|
|
},
|
|
.block = .{
|
|
.parent = child_block,
|
|
.sema = sema,
|
|
.src_decl = child_block.src_decl,
|
|
.namespace = child_block.namespace,
|
|
.wip_capture_scope = child_block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.label = &labeled_block.label,
|
|
.inlining = child_block.inlining,
|
|
.is_comptime = child_block.is_comptime,
|
|
},
|
|
};
|
|
sema.post_hoc_blocks.putAssumeCapacityNoClobber(new_block_inst, labeled_block);
|
|
break :blk labeled_block;
|
|
} else blk: {
|
|
const new_block_inst = Air.refToIndex(gop.value_ptr.*).?;
|
|
const labeled_block = sema.post_hoc_blocks.get(new_block_inst).?;
|
|
break :blk labeled_block;
|
|
};
|
|
|
|
const operand = try sema.resolveInst(break_data.operand);
|
|
const br_ref = try child_block.addBr(labeled_block.label.merges.block_inst, operand);
|
|
try labeled_block.label.merges.results.append(sema.gpa, operand);
|
|
try labeled_block.label.merges.br_list.append(sema.gpa, Air.refToIndex(br_ref).?);
|
|
labeled_block.block.runtime_index.increment();
|
|
if (labeled_block.block.runtime_cond == null and labeled_block.block.runtime_loop == null) {
|
|
labeled_block.block.runtime_cond = child_block.runtime_cond orelse child_block.runtime_loop;
|
|
labeled_block.block.runtime_loop = child_block.runtime_loop;
|
|
}
|
|
}
|
|
|
|
fn zirUnreachable(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].@"unreachable";
|
|
const src = inst_data.src();
|
|
|
|
if (block.is_comptime) {
|
|
return sema.fail(block, src, "reached unreachable code", .{});
|
|
}
|
|
// TODO Add compile error for @optimizeFor occurring too late in a scope.
|
|
block.addUnreachable(src, true) catch |err| switch (err) {
|
|
error.AnalysisFail => {
|
|
const msg = sema.err orelse return err;
|
|
if (!mem.eql(u8, msg.msg, "runtime safety check not allowed in naked function")) return err;
|
|
try sema.errNote(block, src, msg, "the end of a naked function is implicitly unreachable", .{});
|
|
return err;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn zirRetErrValue(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Zir.Inst.Index {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
|
|
const err_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code));
|
|
_ = try mod.getErrorValue(err_name);
|
|
const src = inst_data.src();
|
|
// Return the error code from the function.
|
|
const error_set_type = try mod.singleErrorSetType(err_name);
|
|
const result_inst = Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = error_set_type.toIntern(),
|
|
.name = err_name,
|
|
} })));
|
|
return sema.analyzeRet(block, result_inst, src);
|
|
}
|
|
|
|
fn zirRetImplicit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Zir.Inst.Index {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_tok;
|
|
const r_brace_src = inst_data.src();
|
|
if (block.inlining == null and sema.func_is_naked) {
|
|
assert(!block.is_comptime);
|
|
if (block.wantSafety()) {
|
|
// Calling a safety function from a naked function would not be legal.
|
|
_ = try block.addNoOp(.trap);
|
|
} else {
|
|
try block.addUnreachable(r_brace_src, false);
|
|
}
|
|
return always_noreturn;
|
|
}
|
|
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
|
|
const base_tag = sema.fn_ret_ty.baseZigTypeTag(mod);
|
|
if (base_tag == .NoReturn) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ret_ty_src, "function declared '{}' implicitly returns", .{
|
|
sema.fn_ret_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
} else if (base_tag != .Void) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ret_ty_src, "function with non-void return type '{}' implicitly returns", .{
|
|
sema.fn_ret_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
return sema.analyzeRet(block, operand, r_brace_src);
|
|
}
|
|
|
|
fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const src = inst_data.src();
|
|
|
|
return sema.analyzeRet(block, operand, src);
|
|
}
|
|
|
|
fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const ret_ptr = try sema.resolveInst(inst_data.operand);
|
|
|
|
if (block.is_comptime or block.inlining != null or sema.func_is_naked) {
|
|
const operand = try sema.analyzeLoad(block, src, ret_ptr, src);
|
|
return sema.analyzeRet(block, operand, src);
|
|
}
|
|
|
|
if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) {
|
|
const is_non_err = try sema.analyzePtrIsNonErr(block, src, ret_ptr);
|
|
return sema.retWithErrTracing(block, src, is_non_err, .ret_load, ret_ptr);
|
|
}
|
|
|
|
_ = try block.addUnOp(.ret_load, ret_ptr);
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn retWithErrTracing(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
is_non_err: Air.Inst.Ref,
|
|
ret_tag: Air.Inst.Tag,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Zir.Inst.Index {
|
|
const mod = sema.mod;
|
|
const need_check = switch (is_non_err) {
|
|
.bool_true => {
|
|
_ = try block.addUnOp(ret_tag, operand);
|
|
return always_noreturn;
|
|
},
|
|
.bool_false => false,
|
|
else => true,
|
|
};
|
|
const gpa = sema.gpa;
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
|
|
const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
|
|
const return_err_fn = try sema.getBuiltin("returnError");
|
|
const args: [1]Air.Inst.Ref = .{err_return_trace};
|
|
|
|
if (!need_check) {
|
|
try sema.callBuiltin(block, src, return_err_fn, .never_inline, &args, .@"error return");
|
|
_ = try block.addUnOp(ret_tag, operand);
|
|
return always_noreturn;
|
|
}
|
|
|
|
var then_block = block.makeSubBlock();
|
|
defer then_block.instructions.deinit(gpa);
|
|
_ = try then_block.addUnOp(ret_tag, operand);
|
|
|
|
var else_block = block.makeSubBlock();
|
|
defer else_block.instructions.deinit(gpa);
|
|
try sema.callBuiltin(&else_block, src, return_err_fn, .never_inline, &args, .@"error return");
|
|
_ = try else_block.addUnOp(ret_tag, operand);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
|
|
then_block.instructions.items.len + else_block.instructions.items.len +
|
|
@typeInfo(Air.Block).Struct.fields.len + 1);
|
|
|
|
const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = @as(u32, @intCast(then_block.instructions.items.len)),
|
|
.else_body_len = @as(u32, @intCast(else_block.instructions.items.len)),
|
|
});
|
|
sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items);
|
|
sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items);
|
|
|
|
_ = try block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{
|
|
.operand = is_non_err,
|
|
.payload = cond_br_payload,
|
|
} } });
|
|
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool {
|
|
const mod = sema.mod;
|
|
if (!mod.backendSupportsFeature(.error_return_trace)) return false;
|
|
|
|
return fn_ret_ty.isError(mod) and
|
|
mod.comp.bin_file.options.error_return_tracing;
|
|
}
|
|
|
|
fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].save_err_ret_index;
|
|
|
|
if (!mod.backendSupportsFeature(.error_return_trace)) return;
|
|
if (!mod.comp.bin_file.options.error_return_tracing) return;
|
|
|
|
// This is only relevant at runtime.
|
|
if (block.is_comptime or block.is_typeof) return;
|
|
|
|
const save_index = inst_data.operand == .none or b: {
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
break :b operand_ty.isError(mod);
|
|
};
|
|
|
|
if (save_index)
|
|
block.error_return_trace_index = try sema.analyzeSaveErrRetIndex(block);
|
|
}
|
|
|
|
fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].restore_err_ret_index;
|
|
const src = sema.src; // TODO
|
|
|
|
// This is only relevant at runtime.
|
|
if (start_block.is_comptime or start_block.is_typeof) return;
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
if (!mod.backendSupportsFeature(.error_return_trace)) return;
|
|
if (!ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) return;
|
|
if (!mod.comp.bin_file.options.error_return_tracing) return;
|
|
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const saved_index = if (Zir.refToIndexAllowNone(inst_data.block)) |zir_block| b: {
|
|
var block = start_block;
|
|
while (true) {
|
|
if (block.label) |label| {
|
|
if (label.zir_block == zir_block) {
|
|
const target_trace_index = if (block.parent) |parent_block| tgt: {
|
|
break :tgt parent_block.error_return_trace_index;
|
|
} else sema.error_return_trace_index_on_fn_entry;
|
|
|
|
if (start_block.error_return_trace_index != target_trace_index)
|
|
break :b target_trace_index;
|
|
|
|
return; // No need to restore
|
|
}
|
|
}
|
|
block = block.parent.?;
|
|
}
|
|
} else b: {
|
|
if (start_block.error_return_trace_index != sema.error_return_trace_index_on_fn_entry)
|
|
break :b sema.error_return_trace_index_on_fn_entry;
|
|
|
|
return; // No need to restore
|
|
};
|
|
|
|
assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere
|
|
|
|
const operand = try sema.resolveInstAllowNone(inst_data.operand);
|
|
return sema.popErrorReturnTrace(start_block, src, operand, saved_index);
|
|
}
|
|
|
|
fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion);
|
|
const err_set_ty = sema.fn_ret_ty.errorUnionSet(mod).toIntern();
|
|
switch (err_set_ty) {
|
|
.adhoc_inferred_error_set_type => {
|
|
const ies = sema.fn_ret_ty_ies.?;
|
|
assert(ies.func == .none);
|
|
try addToInferredErrorSetPtr(mod, ies, sema.typeOf(uncasted_operand));
|
|
},
|
|
else => if (ip.isInferredErrorSetType(err_set_ty)) {
|
|
const ies = sema.fn_ret_ty_ies.?;
|
|
assert(ies.func == sema.func_index);
|
|
try addToInferredErrorSetPtr(mod, ies, sema.typeOf(uncasted_operand));
|
|
},
|
|
}
|
|
}
|
|
|
|
fn addToInferredErrorSetPtr(mod: *Module, ies: *InferredErrorSet, op_ty: Type) !void {
|
|
const gpa = mod.gpa;
|
|
const ip = &mod.intern_pool;
|
|
switch (op_ty.zigTypeTag(mod)) {
|
|
.ErrorSet => try ies.addErrorSet(op_ty, ip, gpa),
|
|
.ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(mod), ip, gpa),
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
fn analyzeRet(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
uncasted_operand: Air.Inst.Ref,
|
|
src: LazySrcLoc,
|
|
) CompileError!Zir.Inst.Index {
|
|
// Special case for returning an error to an inferred error set; we need to
|
|
// add the error tag to the inferred error set of the in-scope function, so
|
|
// that the coercion below works correctly.
|
|
const mod = sema.mod;
|
|
if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) {
|
|
try sema.addToInferredErrorSet(uncasted_operand);
|
|
}
|
|
const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (block.inlining) |inlining| {
|
|
if (block.is_comptime) {
|
|
_ = try sema.resolveConstMaybeUndefVal(block, src, operand, "value being returned at comptime must be comptime-known");
|
|
inlining.comptime_result = operand;
|
|
return error.ComptimeReturn;
|
|
}
|
|
// We are inlining a function call; rewrite the `ret` as a `break`.
|
|
try inlining.merges.results.append(sema.gpa, operand);
|
|
_ = try block.addBr(inlining.merges.block_inst, operand);
|
|
return always_noreturn;
|
|
} else if (block.is_comptime) {
|
|
return sema.fail(block, src, "function called at runtime cannot return value at comptime", .{});
|
|
} else if (sema.func_is_naked) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot return from naked function", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.errNote(block, src, msg, "can only return using assembly", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
try sema.resolveTypeLayout(sema.fn_ret_ty);
|
|
|
|
if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) {
|
|
// Avoid adding a frame to the error return trace in case the value is comptime-known
|
|
// to be not an error.
|
|
const is_non_err = try sema.analyzeIsNonErr(block, src, operand);
|
|
return sema.retWithErrTracing(block, src, is_non_err, .ret, operand);
|
|
}
|
|
|
|
_ = try block.addUnOp(.ret, operand);
|
|
|
|
return always_noreturn;
|
|
}
|
|
|
|
fn floatOpAllowed(tag: Zir.Inst.Tag) bool {
|
|
// extend this swich as additional operators are implemented
|
|
return switch (tag) {
|
|
.add, .sub, .mul, .div, .div_exact, .div_trunc, .div_floor, .mod, .rem, .mod_rem => true,
|
|
else => false,
|
|
};
|
|
}
|
|
|
|
fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].ptr_type;
|
|
const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index);
|
|
const elem_ty_src: LazySrcLoc = .{ .node_offset_ptr_elem = extra.data.src_node };
|
|
const sentinel_src: LazySrcLoc = .{ .node_offset_ptr_sentinel = extra.data.src_node };
|
|
const align_src: LazySrcLoc = .{ .node_offset_ptr_align = extra.data.src_node };
|
|
const addrspace_src: LazySrcLoc = .{ .node_offset_ptr_addrspace = extra.data.src_node };
|
|
const bitoffset_src: LazySrcLoc = .{ .node_offset_ptr_bitoffset = extra.data.src_node };
|
|
const hostsize_src: LazySrcLoc = .{ .node_offset_ptr_hostsize = extra.data.src_node };
|
|
|
|
const elem_ty = blk: {
|
|
const air_inst = try sema.resolveInst(extra.data.elem_type);
|
|
const ty = sema.analyzeAsType(block, elem_ty_src, air_inst) catch |err| {
|
|
if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer(mod)) {
|
|
try sema.errNote(block, elem_ty_src, sema.err.?, "use '.*' to dereference pointer", .{});
|
|
}
|
|
return err;
|
|
};
|
|
if (ty.isGenericPoison()) return error.GenericPoison;
|
|
break :blk ty;
|
|
};
|
|
|
|
if (elem_ty.zigTypeTag(mod) == .NoReturn)
|
|
return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{});
|
|
|
|
const target = mod.getTarget();
|
|
|
|
var extra_i = extra.end;
|
|
|
|
const sentinel = if (inst_data.flags.has_sentinel) blk: {
|
|
const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i]));
|
|
extra_i += 1;
|
|
const coerced = try sema.coerce(block, elem_ty, try sema.resolveInst(ref), sentinel_src);
|
|
const val = try sema.resolveConstValue(block, sentinel_src, coerced, "pointer sentinel value must be comptime-known");
|
|
break :blk val.toIntern();
|
|
} else .none;
|
|
|
|
const abi_align: Alignment = if (inst_data.flags.has_align) blk: {
|
|
const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i]));
|
|
extra_i += 1;
|
|
const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src);
|
|
const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known");
|
|
// Check if this happens to be the lazy alignment of our element type, in
|
|
// which case we can make this 0 without resolving it.
|
|
switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
|
.int => |int| switch (int.storage) {
|
|
.lazy_align => |lazy_ty| if (lazy_ty == elem_ty.toIntern()) break :blk .none,
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
const abi_align = @as(u32, @intCast((try val.getUnsignedIntAdvanced(mod, sema)).?));
|
|
try sema.validateAlign(block, align_src, abi_align);
|
|
break :blk Alignment.fromByteUnits(abi_align);
|
|
} else .none;
|
|
|
|
const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: {
|
|
const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i]));
|
|
extra_i += 1;
|
|
break :blk try sema.analyzeAddressSpace(block, addrspace_src, ref, .pointer);
|
|
} else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic;
|
|
|
|
const bit_offset = if (inst_data.flags.has_bit_range) blk: {
|
|
const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i]));
|
|
extra_i += 1;
|
|
const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, Type.u16, "pointer bit-offset must be comptime-known");
|
|
break :blk @as(u16, @intCast(bit_offset));
|
|
} else 0;
|
|
|
|
const host_size: u16 = if (inst_data.flags.has_bit_range) blk: {
|
|
const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i]));
|
|
extra_i += 1;
|
|
const host_size = try sema.resolveInt(block, hostsize_src, ref, Type.u16, "pointer host size must be comptime-known");
|
|
break :blk @as(u16, @intCast(host_size));
|
|
} else 0;
|
|
|
|
if (host_size != 0 and bit_offset >= host_size * 8) {
|
|
return sema.fail(block, bitoffset_src, "bit offset starts after end of host integer", .{});
|
|
}
|
|
|
|
if (elem_ty.zigTypeTag(mod) == .Fn) {
|
|
if (inst_data.size != .One) {
|
|
return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{});
|
|
}
|
|
const fn_align = mod.typeToFunc(elem_ty).?.alignment;
|
|
if (inst_data.flags.has_align and abi_align != .none and fn_align != .none and
|
|
abi_align != fn_align)
|
|
{
|
|
return sema.fail(block, align_src, "function pointer alignment disagrees with function alignment", .{});
|
|
}
|
|
} else if (inst_data.size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, elem_ty_src, "unknown-length pointer to opaque not allowed", .{});
|
|
} else if (inst_data.size == .C) {
|
|
if (!try sema.validateExternType(elem_ty, .other)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src.toSrcLoc(src_decl, mod), elem_ty, .other);
|
|
|
|
try sema.addDeclaredHereNote(msg, elem_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (elem_ty.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, elem_ty_src, "C pointers cannot point to opaque types", .{});
|
|
}
|
|
}
|
|
|
|
const ty = try mod.ptrType(.{
|
|
.child = elem_ty.toIntern(),
|
|
.sentinel = sentinel,
|
|
.flags = .{
|
|
.alignment = abi_align,
|
|
.address_space = address_space,
|
|
.is_const = !inst_data.flags.is_mutable,
|
|
.is_allowzero = inst_data.flags.is_allowzero,
|
|
.is_volatile = inst_data.flags.is_volatile,
|
|
.size = inst_data.size,
|
|
},
|
|
.packed_offset = .{
|
|
.bit_offset = bit_offset,
|
|
.host_size = host_size,
|
|
},
|
|
});
|
|
return Air.internedToRef(ty.toIntern());
|
|
}
|
|
|
|
fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const obj_ty = try sema.resolveType(block, src, inst_data.operand);
|
|
const mod = sema.mod;
|
|
|
|
switch (obj_ty.zigTypeTag(mod)) {
|
|
.Struct => return sema.structInitEmpty(block, obj_ty, src, src),
|
|
.Array, .Vector => return sema.arrayInitEmpty(block, src, obj_ty),
|
|
.Void => return Air.internedToRef(Value.void.toIntern()),
|
|
.Union => return sema.fail(block, src, "union initializer must initialize one field", .{}),
|
|
else => return sema.failWithArrayInitNotSupported(block, src, obj_ty),
|
|
}
|
|
}
|
|
|
|
fn structInitEmpty(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
struct_ty: Type,
|
|
dest_src: LazySrcLoc,
|
|
init_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
// This logic must be synchronized with that in `zirStructInit`.
|
|
try sema.resolveTypeFields(struct_ty);
|
|
|
|
// The init values to use for the struct instance.
|
|
const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod));
|
|
defer gpa.free(field_inits);
|
|
@memset(field_inits, .none);
|
|
|
|
return sema.finishStructInit(block, init_src, dest_src, field_inits, struct_ty, false);
|
|
}
|
|
|
|
fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const arr_len = obj_ty.arrayLen(mod);
|
|
if (arr_len != 0) {
|
|
if (obj_ty.zigTypeTag(mod) == .Array) {
|
|
return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len});
|
|
} else {
|
|
return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len});
|
|
}
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = obj_ty.toIntern(),
|
|
.storage = .{ .elems = &.{} },
|
|
} })));
|
|
}
|
|
|
|
fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const field_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const init_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.UnionInit, inst_data.payload_index).data;
|
|
const union_ty = try sema.resolveType(block, ty_src, extra.union_type);
|
|
if (union_ty.zigTypeTag(sema.mod) != .Union) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ty_src, "expected union type, found '{}'", .{union_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, "name of field being initialized must be comptime-known");
|
|
const init = try sema.resolveInst(extra.init);
|
|
return sema.unionInit(block, init, init_src, union_ty, ty_src, field_name, field_src);
|
|
}
|
|
|
|
fn unionInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
uncasted_init: Air.Inst.Ref,
|
|
init_src: LazySrcLoc,
|
|
union_ty: Type,
|
|
union_ty_src: LazySrcLoc,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
|
|
const field = union_ty.unionFields(mod).values()[field_index];
|
|
const init = try sema.coerce(block, field.ty, uncasted_init, init_src);
|
|
|
|
if (try sema.resolveMaybeUndefVal(init)) |init_val| {
|
|
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
|
|
const enum_field_index = @as(u32, @intCast(tag_ty.enumFieldIndex(field_name, mod).?));
|
|
const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
|
|
return Air.internedToRef((try mod.intern(.{ .un = .{
|
|
.ty = union_ty.toIntern(),
|
|
.tag = try tag_val.intern(tag_ty, mod),
|
|
.val = try init_val.intern(field.ty, mod),
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, init_src, null);
|
|
_ = union_ty_src;
|
|
try sema.queueFullTypeResolution(union_ty);
|
|
return block.addUnionInit(union_ty, field_index, init);
|
|
}
|
|
|
|
fn zirStructInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
is_ref: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const gpa = sema.gpa;
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
const inst_data = zir_datas[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index);
|
|
const src = inst_data.src();
|
|
|
|
const mod = sema.mod;
|
|
const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data;
|
|
const first_field_type_data = zir_datas[first_item.field_type].pl_node;
|
|
const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data;
|
|
const resolved_ty = sema.resolveType(block, src, first_field_type_extra.container_type) catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
// The type wasn't actually known, so treat this as an anon struct init.
|
|
return sema.structInitAnon(block, src, .typed_init, extra.data, extra.end, is_ref);
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
try sema.resolveTypeLayout(resolved_ty);
|
|
|
|
if (resolved_ty.zigTypeTag(mod) == .Struct) {
|
|
// This logic must be synchronized with that in `zirStructInitEmpty`.
|
|
|
|
// Maps field index to field_type index of where it was already initialized.
|
|
// For making sure all fields are accounted for and no fields are duplicated.
|
|
const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount(mod));
|
|
defer gpa.free(found_fields);
|
|
|
|
// The init values to use for the struct instance.
|
|
const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount(mod));
|
|
defer gpa.free(field_inits);
|
|
@memset(field_inits, .none);
|
|
|
|
var field_i: u32 = 0;
|
|
var extra_index = extra.end;
|
|
|
|
const is_packed = resolved_ty.containerLayout(mod) == .Packed;
|
|
while (field_i < extra.data.fields_len) : (field_i += 1) {
|
|
const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index);
|
|
extra_index = item.end;
|
|
|
|
const field_type_data = zir_datas[item.data.field_type].pl_node;
|
|
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node };
|
|
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
|
|
const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start));
|
|
const field_index = if (resolved_ty.isTuple(mod))
|
|
try sema.tupleFieldIndex(block, resolved_ty, field_name, field_src)
|
|
else
|
|
try sema.structFieldIndex(block, resolved_ty, field_name, field_src);
|
|
if (field_inits[field_index] != .none) {
|
|
const other_field_type = found_fields[field_index];
|
|
const other_field_type_data = zir_datas[other_field_type].pl_node;
|
|
const other_field_src: LazySrcLoc = .{ .node_offset_initializer = other_field_type_data.src_node };
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, field_src, "duplicate field", .{});
|
|
errdefer msg.destroy(gpa);
|
|
try sema.errNote(block, other_field_src, msg, "other field here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
found_fields[field_index] = item.data.field_type;
|
|
field_inits[field_index] = try sema.resolveInst(item.data.init);
|
|
if (!is_packed) if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| {
|
|
const init_val = (try sema.resolveMaybeUndefVal(field_inits[field_index])) orelse {
|
|
return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known");
|
|
};
|
|
|
|
if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, mod), mod)) {
|
|
return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index);
|
|
}
|
|
};
|
|
}
|
|
|
|
return sema.finishStructInit(block, src, src, field_inits, resolved_ty, is_ref);
|
|
} else if (resolved_ty.zigTypeTag(mod) == .Union) {
|
|
if (extra.data.fields_len != 1) {
|
|
return sema.fail(block, src, "union initialization expects exactly one field", .{});
|
|
}
|
|
|
|
const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end);
|
|
|
|
const field_type_data = zir_datas[item.data.field_type].pl_node;
|
|
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node };
|
|
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
|
|
const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start));
|
|
const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src);
|
|
const tag_ty = resolved_ty.unionTagTypeHypothetical(mod);
|
|
const enum_field_index = @as(u32, @intCast(tag_ty.enumFieldIndex(field_name, mod).?));
|
|
const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
|
|
|
|
const init_inst = try sema.resolveInst(item.data.init);
|
|
if (try sema.resolveMaybeUndefVal(init_inst)) |val| {
|
|
const field = resolved_ty.unionFields(mod).values()[field_index];
|
|
return sema.addConstantMaybeRef(block, resolved_ty, (try mod.intern(.{ .un = .{
|
|
.ty = resolved_ty.toIntern(),
|
|
.tag = try tag_val.intern(tag_ty, mod),
|
|
.val = try val.intern(field.ty, mod),
|
|
} })).toValue(), is_ref);
|
|
}
|
|
|
|
if (is_ref) {
|
|
const target = mod.getTarget();
|
|
const alloc_ty = try mod.ptrType(.{
|
|
.child = resolved_ty.toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
const field_ptr = try sema.unionFieldPtr(block, field_src, alloc, field_name, field_src, resolved_ty, true);
|
|
try sema.storePtr(block, src, field_ptr, init_inst);
|
|
const new_tag = Air.internedToRef(tag_val.toIntern());
|
|
_ = try block.addBinOp(.set_union_tag, alloc, new_tag);
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
try sema.queueFullTypeResolution(resolved_ty);
|
|
return block.addUnionInit(resolved_ty, field_index, init_inst);
|
|
} else if (resolved_ty.isAnonStruct(mod)) {
|
|
return sema.fail(block, src, "TODO anon struct init validation", .{});
|
|
}
|
|
unreachable;
|
|
}
|
|
|
|
fn finishStructInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
init_src: LazySrcLoc,
|
|
dest_src: LazySrcLoc,
|
|
field_inits: []Air.Inst.Ref,
|
|
struct_ty: Type,
|
|
is_ref: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
var root_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
switch (ip.indexToKey(struct_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct| {
|
|
for (anon_struct.values, 0..) |default_val, i| {
|
|
if (field_inits[i] != .none) continue;
|
|
|
|
if (default_val == .none) {
|
|
if (anon_struct.names.len == 0) {
|
|
const template = "missing tuple field with index {d}";
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, .{i});
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, .{i});
|
|
}
|
|
} else {
|
|
const field_name = anon_struct.names[i];
|
|
const template = "missing struct field: {}";
|
|
const args = .{field_name.fmt(ip)};
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, args);
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, args);
|
|
}
|
|
}
|
|
} else {
|
|
field_inits[i] = Air.internedToRef(default_val);
|
|
}
|
|
}
|
|
},
|
|
.struct_type => |struct_type| {
|
|
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
|
|
for (struct_obj.fields.values(), 0..) |field, i| {
|
|
if (field_inits[i] != .none) continue;
|
|
|
|
if (field.default_val == .none) {
|
|
const field_name = struct_obj.fields.keys()[i];
|
|
const template = "missing struct field: {}";
|
|
const args = .{field_name.fmt(ip)};
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, init_src, msg, template, args);
|
|
} else {
|
|
root_msg = try sema.errMsg(block, init_src, template, args);
|
|
}
|
|
} else {
|
|
field_inits[i] = Air.internedToRef(field.default_val);
|
|
}
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
if (mod.typeToStruct(struct_ty)) |struct_obj| {
|
|
const fqn = try struct_obj.getFullyQualifiedName(mod);
|
|
try mod.errNoteNonLazy(
|
|
struct_obj.srcLoc(mod),
|
|
msg,
|
|
"struct '{}' declared here",
|
|
.{fqn.fmt(ip)},
|
|
);
|
|
}
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
// Find which field forces the expression to be runtime, if any.
|
|
const opt_runtime_index = for (field_inits, 0..) |field_init, i| {
|
|
if (!(try sema.isComptimeKnown(field_init))) {
|
|
break i;
|
|
}
|
|
} else null;
|
|
|
|
const runtime_index = opt_runtime_index orelse {
|
|
const elems = try sema.arena.alloc(InternPool.Index, field_inits.len);
|
|
for (elems, field_inits, 0..) |*elem, field_init, field_i| {
|
|
elem.* = try (sema.resolveMaybeUndefVal(field_init) catch unreachable).?
|
|
.intern(struct_ty.structFieldType(field_i, mod), mod);
|
|
}
|
|
const struct_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = struct_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} });
|
|
return sema.addConstantMaybeRef(block, struct_ty, struct_val.toValue(), is_ref);
|
|
};
|
|
|
|
if (is_ref) {
|
|
try sema.resolveStructLayout(struct_ty);
|
|
const target = sema.mod.getTarget();
|
|
const alloc_ty = try mod.ptrType(.{
|
|
.child = struct_ty.toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
for (field_inits, 0..) |field_init, i_usize| {
|
|
const i = @as(u32, @intCast(i_usize));
|
|
const field_src = dest_src;
|
|
const field_ptr = try sema.structFieldPtrByIndex(block, dest_src, alloc, i, field_src, struct_ty, true);
|
|
try sema.storePtr(block, dest_src, field_ptr, field_init);
|
|
}
|
|
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const field_src = mod.initSrc(dest_src.node_offset.x, decl, runtime_index);
|
|
try sema.requireRuntimeBlock(block, dest_src, field_src);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
try sema.queueFullTypeResolution(struct_ty);
|
|
return block.addAggregateInit(struct_ty, field_inits);
|
|
}
|
|
|
|
fn zirStructInitAnon(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
is_ref: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index);
|
|
return sema.structInitAnon(block, src, .anon_init, extra.data, extra.end, is_ref);
|
|
}
|
|
|
|
fn structInitAnon(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
/// It is possible for a typed struct_init to be downgraded to an anonymous init due to a
|
|
/// generic poison type. In this case, we need to know to interpret the extra data differently.
|
|
comptime kind: enum { anon_init, typed_init },
|
|
extra_data: switch (kind) {
|
|
.anon_init => Zir.Inst.StructInitAnon,
|
|
.typed_init => Zir.Inst.StructInit,
|
|
},
|
|
extra_end: usize,
|
|
is_ref: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const zir_datas = sema.code.instructions.items(.data);
|
|
|
|
const types = try sema.arena.alloc(InternPool.Index, extra_data.fields_len);
|
|
const values = try sema.arena.alloc(InternPool.Index, types.len);
|
|
|
|
var fields = std.AutoArrayHashMap(InternPool.NullTerminatedString, u32).init(sema.arena);
|
|
try fields.ensureUnusedCapacity(types.len);
|
|
|
|
// Find which field forces the expression to be runtime, if any.
|
|
const opt_runtime_index = rs: {
|
|
var runtime_index: ?usize = null;
|
|
var extra_index = extra_end;
|
|
for (types, 0..) |*field_ty, i_usize| {
|
|
const i: u32 = @intCast(i_usize);
|
|
const item = switch (kind) {
|
|
.anon_init => sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index),
|
|
.typed_init => sema.code.extraData(Zir.Inst.StructInit.Item, extra_index),
|
|
};
|
|
extra_index = item.end;
|
|
|
|
const name = switch (kind) {
|
|
.anon_init => sema.code.nullTerminatedString(item.data.field_name),
|
|
.typed_init => name: {
|
|
// `item.data.field_type` references a `field_type` instruction
|
|
const field_type_data = zir_datas[item.data.field_type].pl_node;
|
|
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index);
|
|
break :name sema.code.nullTerminatedString(field_type_extra.data.name_start);
|
|
},
|
|
};
|
|
const name_ip = try mod.intern_pool.getOrPutString(gpa, name);
|
|
const gop = fields.getOrPutAssumeCapacity(name_ip);
|
|
if (gop.found_existing) {
|
|
const msg = msg: {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const field_src = mod.initSrc(src.node_offset.x, decl, i);
|
|
const msg = try sema.errMsg(block, field_src, "duplicate field", .{});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const prev_source = mod.initSrc(src.node_offset.x, decl, gop.value_ptr.*);
|
|
try sema.errNote(block, prev_source, msg, "other field here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
gop.value_ptr.* = i;
|
|
|
|
const init = try sema.resolveInst(item.data.init);
|
|
field_ty.* = sema.typeOf(init).toIntern();
|
|
if (field_ty.toType().zigTypeTag(mod) == .Opaque) {
|
|
const msg = msg: {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const field_src = mod.initSrc(src.node_offset.x, decl, i);
|
|
const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty.toType());
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (try sema.resolveMaybeUndefVal(init)) |init_val| {
|
|
values[i] = try init_val.intern(field_ty.toType(), mod);
|
|
} else {
|
|
values[i] = .none;
|
|
runtime_index = i;
|
|
}
|
|
}
|
|
break :rs runtime_index;
|
|
};
|
|
|
|
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
|
|
.names = fields.keys(),
|
|
.types = types,
|
|
.values = values,
|
|
} });
|
|
|
|
const runtime_index = opt_runtime_index orelse {
|
|
const tuple_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = tuple_ty,
|
|
.storage = .{ .elems = values },
|
|
} });
|
|
return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref);
|
|
};
|
|
|
|
sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const field_src = mod.initSrc(src.node_offset.x, decl, runtime_index);
|
|
try sema.requireRuntimeBlock(block, src, field_src);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (is_ref) {
|
|
const target = mod.getTarget();
|
|
const alloc_ty = try mod.ptrType(.{
|
|
.child = tuple_ty,
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
var extra_index = extra_end;
|
|
for (types, 0..) |field_ty, i_usize| {
|
|
const i = @as(u32, @intCast(i_usize));
|
|
const item = switch (kind) {
|
|
.anon_init => sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index),
|
|
.typed_init => sema.code.extraData(Zir.Inst.StructInit.Item, extra_index),
|
|
};
|
|
extra_index = item.end;
|
|
|
|
const field_ptr_ty = try mod.ptrType(.{
|
|
.child = field_ty,
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
if (values[i] == .none) {
|
|
const init = try sema.resolveInst(item.data.init);
|
|
const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty);
|
|
_ = try block.addBinOp(.store, field_ptr, init);
|
|
}
|
|
}
|
|
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, types.len);
|
|
var extra_index = extra_end;
|
|
for (types, 0..) |_, i| {
|
|
const item = switch (kind) {
|
|
.anon_init => sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index),
|
|
.typed_init => sema.code.extraData(Zir.Inst.StructInit.Item, extra_index),
|
|
};
|
|
extra_index = item.end;
|
|
element_refs[i] = try sema.resolveInst(item.data.init);
|
|
}
|
|
|
|
return block.addAggregateInit(tuple_ty.toType(), element_refs);
|
|
}
|
|
|
|
fn zirArrayInit(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
is_ref: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
|
|
const args = sema.code.refSlice(extra.end, extra.data.operands_len);
|
|
assert(args.len >= 2); // array_ty + at least one element
|
|
|
|
const array_ty = sema.resolveType(block, src, args[0]) catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
// The type wasn't actually known, so treat this as an anon array init.
|
|
return sema.arrayInitAnon(block, src, args[1..], is_ref);
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const is_tuple = array_ty.zigTypeTag(mod) == .Struct;
|
|
const sentinel_val = array_ty.sentinel(mod);
|
|
|
|
const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @intFromBool(sentinel_val != null));
|
|
defer gpa.free(resolved_args);
|
|
for (args[1..], 0..) |arg, i| {
|
|
const resolved_arg = try sema.resolveInst(arg);
|
|
const elem_ty = if (is_tuple)
|
|
array_ty.structFieldType(i, mod)
|
|
else
|
|
array_ty.elemType2(mod);
|
|
resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const elem_src = mod.initSrc(src.node_offset.x, decl, i);
|
|
_ = try sema.coerce(block, elem_ty, resolved_arg, elem_src);
|
|
unreachable;
|
|
},
|
|
else => return err,
|
|
};
|
|
if (is_tuple) if (try array_ty.structFieldValueComptime(mod, i)) |field_val| {
|
|
const init_val = try sema.resolveMaybeUndefVal(resolved_args[i]) orelse {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const elem_src = mod.initSrc(src.node_offset.x, decl, i);
|
|
return sema.failWithNeededComptime(block, elem_src, "value stored in comptime field must be comptime-known");
|
|
};
|
|
if (!field_val.eql(init_val, elem_ty, mod)) {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const elem_src = mod.initSrc(src.node_offset.x, decl, i);
|
|
return sema.failWithInvalidComptimeFieldStore(block, elem_src, array_ty, i);
|
|
}
|
|
};
|
|
}
|
|
|
|
if (sentinel_val) |some| {
|
|
resolved_args[resolved_args.len - 1] = Air.internedToRef(some.toIntern());
|
|
}
|
|
|
|
const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| {
|
|
const comptime_known = try sema.isComptimeKnown(arg);
|
|
if (!comptime_known) break @as(u32, @intCast(i));
|
|
} else null;
|
|
|
|
const runtime_index = opt_runtime_index orelse {
|
|
const elem_vals = try sema.arena.alloc(InternPool.Index, resolved_args.len);
|
|
for (elem_vals, resolved_args, 0..) |*val, arg, i| {
|
|
const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct)
|
|
array_ty.structFieldType(i, mod)
|
|
else
|
|
array_ty.elemType2(mod);
|
|
// We checked that all args are comptime above.
|
|
val.* = try ((sema.resolveMaybeUndefVal(arg) catch unreachable).?).intern(elem_ty, mod);
|
|
}
|
|
return sema.addConstantMaybeRef(block, array_ty, (try mod.intern(.{ .aggregate = .{
|
|
.ty = array_ty.toIntern(),
|
|
.storage = .{ .elems = elem_vals },
|
|
} })).toValue(), is_ref);
|
|
};
|
|
|
|
sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const decl = mod.declPtr(block.src_decl);
|
|
const elem_src = mod.initSrc(src.node_offset.x, decl, runtime_index);
|
|
try sema.requireRuntimeBlock(block, src, elem_src);
|
|
unreachable;
|
|
},
|
|
else => return err,
|
|
};
|
|
try sema.queueFullTypeResolution(array_ty);
|
|
|
|
if (is_ref) {
|
|
const target = mod.getTarget();
|
|
const alloc_ty = try mod.ptrType(.{
|
|
.child = array_ty.toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
|
|
if (array_ty.isTuple(mod)) {
|
|
for (resolved_args, 0..) |arg, i| {
|
|
const elem_ptr_ty = try mod.ptrType(.{
|
|
.child = array_ty.structFieldType(i, mod).toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
|
|
|
|
const index = try mod.intRef(Type.usize, i);
|
|
const elem_ptr = try block.addPtrElemPtrTypeRef(alloc, index, elem_ptr_ty_ref);
|
|
_ = try block.addBinOp(.store, elem_ptr, arg);
|
|
}
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
const elem_ptr_ty = try mod.ptrType(.{
|
|
.child = array_ty.elemType2(mod).toIntern(),
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
|
|
|
|
for (resolved_args, 0..) |arg, i| {
|
|
const index = try mod.intRef(Type.usize, i);
|
|
const elem_ptr = try block.addPtrElemPtrTypeRef(alloc, index, elem_ptr_ty_ref);
|
|
_ = try block.addBinOp(.store, elem_ptr, arg);
|
|
}
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
return block.addAggregateInit(array_ty, resolved_args);
|
|
}
|
|
|
|
fn zirArrayInitAnon(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
is_ref: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
|
|
const operands = sema.code.refSlice(extra.end, extra.data.operands_len);
|
|
return sema.arrayInitAnon(block, src, operands, is_ref);
|
|
}
|
|
|
|
fn arrayInitAnon(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operands: []const Zir.Inst.Ref,
|
|
is_ref: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
|
|
const types = try sema.arena.alloc(InternPool.Index, operands.len);
|
|
const values = try sema.arena.alloc(InternPool.Index, operands.len);
|
|
|
|
const opt_runtime_src = rs: {
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
for (operands, 0..) |operand, i| {
|
|
const operand_src = src; // TODO better source location
|
|
const elem = try sema.resolveInst(operand);
|
|
types[i] = sema.typeOf(elem).toIntern();
|
|
if (types[i].toType().zigTypeTag(mod) == .Opaque) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, types[i].toType());
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (try sema.resolveMaybeUndefVal(elem)) |val| {
|
|
values[i] = val.toIntern();
|
|
} else {
|
|
values[i] = .none;
|
|
runtime_src = operand_src;
|
|
}
|
|
}
|
|
break :rs runtime_src;
|
|
};
|
|
|
|
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
|
|
.types = types,
|
|
.values = values,
|
|
.names = &.{},
|
|
} });
|
|
|
|
const runtime_src = opt_runtime_src orelse {
|
|
const tuple_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = tuple_ty,
|
|
.storage = .{ .elems = values },
|
|
} });
|
|
return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref);
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
if (is_ref) {
|
|
const target = sema.mod.getTarget();
|
|
const alloc_ty = try mod.ptrType(.{
|
|
.child = tuple_ty,
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
const alloc = try block.addTy(.alloc, alloc_ty);
|
|
for (operands, 0..) |operand, i_usize| {
|
|
const i = @as(u32, @intCast(i_usize));
|
|
const field_ptr_ty = try mod.ptrType(.{
|
|
.child = types[i],
|
|
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
|
});
|
|
if (values[i] == .none) {
|
|
const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty);
|
|
_ = try block.addBinOp(.store, field_ptr, try sema.resolveInst(operand));
|
|
}
|
|
}
|
|
|
|
return sema.makePtrConst(block, alloc);
|
|
}
|
|
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len);
|
|
for (operands, 0..) |operand, i| {
|
|
element_refs[i] = try sema.resolveInst(operand);
|
|
}
|
|
|
|
return block.addAggregateInit(tuple_ty.toType(), element_refs);
|
|
}
|
|
|
|
fn addConstantMaybeRef(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty: Type,
|
|
val: Value,
|
|
is_ref: bool,
|
|
) !Air.Inst.Ref {
|
|
if (!is_ref) return Air.internedToRef(val.toIntern());
|
|
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
const decl = try anon_decl.finish(
|
|
ty,
|
|
val,
|
|
.none, // default alignment
|
|
);
|
|
return sema.analyzeDeclRef(decl);
|
|
}
|
|
|
|
fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.FieldTypeRef, inst_data.payload_index).data;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const field_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const aggregate_ty = try sema.resolveType(block, ty_src, extra.container_type);
|
|
const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, "field name must be comptime-known");
|
|
return sema.fieldType(block, aggregate_ty, field_name, field_src, ty_src);
|
|
}
|
|
|
|
fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data;
|
|
const ty_src = inst_data.src();
|
|
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
|
|
const aggregate_ty = sema.resolveType(block, ty_src, extra.container_type) catch |err| switch (err) {
|
|
// Since this is a ZIR instruction that returns a type, encountering
|
|
// generic poison should not result in a failed compilation, but the
|
|
// generic poison type. This prevents unnecessary failures when
|
|
// constructing types at compile-time.
|
|
error.GenericPoison => return Air.Inst.Ref.generic_poison_type,
|
|
else => |e| return e,
|
|
};
|
|
const zir_field_name = sema.code.nullTerminatedString(extra.name_start);
|
|
const field_name = try ip.getOrPutString(sema.gpa, zir_field_name);
|
|
return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src);
|
|
}
|
|
|
|
fn fieldType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
aggregate_ty: Type,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_src: LazySrcLoc,
|
|
ty_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
var cur_ty = aggregate_ty;
|
|
while (true) {
|
|
try sema.resolveTypeFields(cur_ty);
|
|
switch (cur_ty.zigTypeTag(mod)) {
|
|
.Struct => switch (mod.intern_pool.indexToKey(cur_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct| {
|
|
const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src);
|
|
return Air.internedToRef(anon_struct.types[field_index]);
|
|
},
|
|
.struct_type => |struct_type| {
|
|
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
|
|
const field = struct_obj.fields.get(field_name) orelse
|
|
return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
|
|
return Air.internedToRef(field.ty.toIntern());
|
|
},
|
|
else => unreachable,
|
|
},
|
|
.Union => {
|
|
const union_obj = mod.typeToUnion(cur_ty).?;
|
|
const field = union_obj.fields.get(field_name) orelse
|
|
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
|
|
return Air.internedToRef(field.ty.toIntern());
|
|
},
|
|
.Optional => {
|
|
// Struct/array init through optional requires the child type to not be a pointer.
|
|
// If the child of .optional is a pointer it'll error on the next loop.
|
|
cur_ty = mod.intern_pool.indexToKey(cur_ty.toIntern()).opt_type.toType();
|
|
continue;
|
|
},
|
|
.ErrorUnion => {
|
|
cur_ty = cur_ty.errorUnionPayload(mod);
|
|
continue;
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.fail(block, ty_src, "expected struct or union; found '{}'", .{
|
|
cur_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
}
|
|
|
|
fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
|
|
return sema.getErrorReturnTrace(block);
|
|
}
|
|
|
|
fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
|
|
const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern());
|
|
|
|
if (sema.owner_func_index != .none and
|
|
ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn and
|
|
mod.comp.bin_file.options.error_return_tracing and
|
|
mod.backendSupportsFeature(.error_return_trace))
|
|
{
|
|
return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .opt = .{
|
|
.ty = opt_ptr_stack_trace_ty.toIntern(),
|
|
.val = .none,
|
|
} })));
|
|
}
|
|
|
|
fn zirFrame(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand)));
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
|
|
if (ty.isNoReturn(mod)) {
|
|
return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)});
|
|
}
|
|
const val = try ty.lazyAbiAlignment(mod);
|
|
if (val.isLazyAlign(mod)) {
|
|
try sema.queueFullTypeResolution(ty);
|
|
}
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(Type.u1);
|
|
if (val.toBool()) return Air.internedToRef((try mod.intValue(Type.u1, 1)).toIntern());
|
|
return Air.internedToRef((try mod.intValue(Type.u1, 0)).toIntern());
|
|
}
|
|
return block.addUnOp(.int_from_bool, operand);
|
|
}
|
|
|
|
fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
|
|
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
|
|
const err_name = sema.mod.intern_pool.indexToKey(val.toIntern()).err.name;
|
|
return sema.addStrLit(block, sema.mod.intern_pool.stringToSlice(err_name));
|
|
}
|
|
|
|
// Similar to zirTagName, we have special AIR instruction for the error name in case an optimimzation pass
|
|
// might be able to resolve the result at compile time.
|
|
return block.addUnOp(.error_name, operand);
|
|
}
|
|
|
|
fn zirUnaryMath(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
comptime eval: fn (Value, Type, Allocator, *Module) Allocator.Error!Value,
|
|
) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat, .Float => {},
|
|
.Vector => {
|
|
const scalar_ty = operand_ty.scalarType(mod);
|
|
switch (scalar_ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat, .Float => {},
|
|
else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{scalar_ty.fmt(sema.mod)}),
|
|
}
|
|
},
|
|
else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{operand_ty.fmt(sema.mod)}),
|
|
}
|
|
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Vector => {
|
|
const scalar_ty = operand_ty.scalarType(mod);
|
|
const vec_len = operand_ty.vectorLen(mod);
|
|
const result_ty = try mod.vectorType(.{
|
|
.len = vec_len,
|
|
.child = scalar_ty.toIntern(),
|
|
});
|
|
if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
if (val.isUndef(mod))
|
|
return mod.undefRef(result_ty);
|
|
|
|
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
for (elems, 0..) |*elem, i| {
|
|
const elem_val = try val.elemValue(sema.mod, i);
|
|
elem.* = try (try eval(elem_val, scalar_ty, sema.arena, sema.mod)).intern(scalar_ty, mod);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = result_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, operand_src, null);
|
|
return block.addUnOp(air_tag, operand);
|
|
},
|
|
.ComptimeFloat, .Float => {
|
|
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
|
|
if (operand_val.isUndef(mod))
|
|
return mod.undefRef(operand_ty);
|
|
const result_val = try eval(operand_val, operand_ty, sema.arena, sema.mod);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, operand_src, null);
|
|
return block.addUnOp(air_tag, operand);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const src = inst_data.src();
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
try sema.resolveTypeLayout(operand_ty);
|
|
const enum_ty = switch (operand_ty.zigTypeTag(mod)) {
|
|
.EnumLiteral => {
|
|
const val = try sema.resolveConstValue(block, .unneeded, operand, "");
|
|
const tag_name = ip.indexToKey(val.toIntern()).enum_literal;
|
|
return sema.addStrLit(block, ip.stringToSlice(tag_name));
|
|
},
|
|
.Enum => operand_ty,
|
|
.Union => operand_ty.unionTagType(mod) orelse {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "union '{}' is untagged", .{
|
|
operand_ty.fmt(sema.mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, operand_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
else => return sema.fail(block, operand_src, "expected enum or union; found '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
}),
|
|
};
|
|
if (enum_ty.enumFieldCount(mod) == 0) {
|
|
// TODO I don't think this is the correct way to handle this but
|
|
// it prevents a crash.
|
|
return sema.fail(block, operand_src, "cannot get @tagName of empty enum '{}'", .{
|
|
enum_ty.fmt(mod),
|
|
});
|
|
}
|
|
const enum_decl_index = enum_ty.getOwnerDecl(mod);
|
|
const casted_operand = try sema.coerce(block, enum_ty, operand, operand_src);
|
|
if (try sema.resolveDefinedValue(block, operand_src, casted_operand)) |val| {
|
|
const field_index = enum_ty.enumTagFieldIndex(val, mod) orelse {
|
|
const enum_decl = mod.declPtr(enum_decl_index);
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{}'", .{
|
|
val.fmtValue(enum_ty, sema.mod), enum_decl.name.fmt(ip),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try mod.errNoteNonLazy(enum_decl.srcLoc(mod), msg, "declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
};
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
const field_name = enum_ty.enumFieldName(field_index, mod);
|
|
return sema.addStrLit(block, ip.stringToSlice(field_name));
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
if (block.wantSafety() and sema.mod.backendSupportsFeature(.is_named_enum_value)) {
|
|
const ok = try block.addUnOp(.is_named_enum_value, casted_operand);
|
|
try sema.addSafetyCheck(block, src, ok, .invalid_enum_value);
|
|
}
|
|
// In case the value is runtime-known, we have an AIR instruction for this instead
|
|
// of trying to lower it in Sema because an optimization pass may result in the operand
|
|
// being comptime-known, which would let us elide the `tag_name` AIR instruction.
|
|
return block.addUnOp(.tag_name, casted_operand);
|
|
}
|
|
|
|
fn zirReify(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const name_strategy = @as(Zir.Inst.NameStrategy, @enumFromInt(extended.small));
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const type_info_ty = try sema.getBuiltinType("Type");
|
|
const uncasted_operand = try sema.resolveInst(extra.operand);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src);
|
|
const val = try sema.resolveConstValue(block, operand_src, type_info, "operand to @Type must be comptime-known");
|
|
const union_val = ip.indexToKey(val.toIntern()).un;
|
|
const target = mod.getTarget();
|
|
if (try union_val.val.toValue().anyUndef(mod)) return sema.failWithUseOfUndef(block, src);
|
|
const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag.toValue(), mod).?;
|
|
switch (@as(std.builtin.TypeId, @enumFromInt(tag_index))) {
|
|
.Type => return Air.Inst.Ref.type_type,
|
|
.Void => return Air.Inst.Ref.void_type,
|
|
.Bool => return Air.Inst.Ref.bool_type,
|
|
.NoReturn => return Air.Inst.Ref.noreturn_type,
|
|
.ComptimeFloat => return Air.Inst.Ref.comptime_float_type,
|
|
.ComptimeInt => return Air.Inst.Ref.comptime_int_type,
|
|
.Undefined => return Air.Inst.Ref.undefined_type,
|
|
.Null => return Air.Inst.Ref.null_type,
|
|
.AnyFrame => return sema.failWithUseOfAsync(block, src),
|
|
.EnumLiteral => return Air.Inst.Ref.enum_literal_type,
|
|
.Int => {
|
|
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
|
|
const signedness_val = try union_val.val.toValue().fieldValue(
|
|
mod,
|
|
fields.getIndex(try ip.getOrPutString(gpa, "signedness")).?,
|
|
);
|
|
const bits_val = try union_val.val.toValue().fieldValue(
|
|
mod,
|
|
fields.getIndex(try ip.getOrPutString(gpa, "bits")).?,
|
|
);
|
|
|
|
const signedness = mod.toEnum(std.builtin.Signedness, signedness_val);
|
|
const bits = @as(u16, @intCast(bits_val.toUnsignedInt(mod)));
|
|
const ty = try mod.intType(signedness, bits);
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Vector => {
|
|
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
|
|
const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "len"),
|
|
).?);
|
|
const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "child"),
|
|
).?);
|
|
|
|
const len = @as(u32, @intCast(len_val.toUnsignedInt(mod)));
|
|
const child_ty = child_val.toType();
|
|
|
|
try sema.checkVectorElemType(block, src, child_ty);
|
|
|
|
const ty = try mod.vectorType(.{
|
|
.len = len,
|
|
.child = child_ty.toIntern(),
|
|
});
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Float => {
|
|
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
|
|
const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "bits"),
|
|
).?);
|
|
|
|
const bits = @as(u16, @intCast(bits_val.toUnsignedInt(mod)));
|
|
const ty = switch (bits) {
|
|
16 => Type.f16,
|
|
32 => Type.f32,
|
|
64 => Type.f64,
|
|
80 => Type.f80,
|
|
128 => Type.f128,
|
|
else => return sema.fail(block, src, "{}-bit float unsupported", .{bits}),
|
|
};
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Pointer => {
|
|
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
|
|
const size_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "size"),
|
|
).?);
|
|
const is_const_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "is_const"),
|
|
).?);
|
|
const is_volatile_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "is_volatile"),
|
|
).?);
|
|
const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "alignment"),
|
|
).?);
|
|
const address_space_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "address_space"),
|
|
).?);
|
|
const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "child"),
|
|
).?);
|
|
const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "is_allowzero"),
|
|
).?);
|
|
const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "sentinel"),
|
|
).?);
|
|
|
|
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
|
|
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
|
|
}
|
|
|
|
const abi_align = Alignment.fromByteUnits(
|
|
(try alignment_val.getUnsignedIntAdvanced(mod, sema)).?,
|
|
);
|
|
|
|
const elem_ty = child_val.toType();
|
|
if (abi_align != .none) {
|
|
try sema.resolveTypeLayout(elem_ty);
|
|
}
|
|
|
|
const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val);
|
|
|
|
const actual_sentinel: InternPool.Index = s: {
|
|
if (!sentinel_val.isNull(mod)) {
|
|
if (ptr_size == .One or ptr_size == .C) {
|
|
return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{});
|
|
}
|
|
const sentinel_ptr_val = sentinel_val.optionalValue(mod).?;
|
|
const ptr_ty = try mod.singleMutPtrType(elem_ty);
|
|
const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?;
|
|
break :s sent_val.toIntern();
|
|
}
|
|
break :s .none;
|
|
};
|
|
|
|
if (elem_ty.zigTypeTag(mod) == .NoReturn) {
|
|
return sema.fail(block, src, "pointer to noreturn not allowed", .{});
|
|
} else if (elem_ty.zigTypeTag(mod) == .Fn) {
|
|
if (ptr_size != .One) {
|
|
return sema.fail(block, src, "function pointers must be single pointers", .{});
|
|
}
|
|
const fn_align = mod.typeToFunc(elem_ty).?.alignment;
|
|
if (abi_align != .none and fn_align != .none and
|
|
abi_align != fn_align)
|
|
{
|
|
return sema.fail(block, src, "function pointer alignment disagrees with function alignment", .{});
|
|
}
|
|
} else if (ptr_size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, src, "unknown-length pointer to opaque not allowed", .{});
|
|
} else if (ptr_size == .C) {
|
|
if (!try sema.validateExternType(elem_ty, .other)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), elem_ty, .other);
|
|
|
|
try sema.addDeclaredHereNote(msg, elem_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (elem_ty.zigTypeTag(mod) == .Opaque) {
|
|
return sema.fail(block, src, "C pointers cannot point to opaque types", .{});
|
|
}
|
|
}
|
|
|
|
const ty = try mod.ptrType(.{
|
|
.child = elem_ty.toIntern(),
|
|
.sentinel = actual_sentinel,
|
|
.flags = .{
|
|
.size = ptr_size,
|
|
.is_const = is_const_val.toBool(),
|
|
.is_volatile = is_volatile_val.toBool(),
|
|
.alignment = abi_align,
|
|
.address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val),
|
|
.is_allowzero = is_allowzero_val.toBool(),
|
|
},
|
|
});
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Array => {
|
|
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
|
|
const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "len"),
|
|
).?);
|
|
const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "child"),
|
|
).?);
|
|
const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "sentinel"),
|
|
).?);
|
|
|
|
const len = len_val.toUnsignedInt(mod);
|
|
const child_ty = child_val.toType();
|
|
const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: {
|
|
const ptr_ty = try mod.singleMutPtrType(child_ty);
|
|
break :blk (try sema.pointerDeref(block, src, p, ptr_ty)).?;
|
|
} else null;
|
|
|
|
const ty = try mod.arrayType(.{
|
|
.len = len,
|
|
.sentinel = if (sentinel) |s| s.toIntern() else .none,
|
|
.child = child_ty.toIntern(),
|
|
});
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Optional => {
|
|
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
|
|
const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "child"),
|
|
).?);
|
|
|
|
const child_ty = child_val.toType();
|
|
|
|
const ty = try mod.optionalType(child_ty.toIntern());
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.ErrorUnion => {
|
|
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
|
|
const error_set_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "error_set"),
|
|
).?);
|
|
const payload_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "payload"),
|
|
).?);
|
|
|
|
const error_set_ty = error_set_val.toType();
|
|
const payload_ty = payload_val.toType();
|
|
|
|
if (error_set_ty.zigTypeTag(mod) != .ErrorSet) {
|
|
return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{});
|
|
}
|
|
|
|
const ty = try mod.errorUnionType(error_set_ty, payload_ty);
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.ErrorSet => {
|
|
const payload_val = union_val.val.toValue().optionalValue(mod) orelse
|
|
return Air.internedToRef(Type.anyerror.toIntern());
|
|
|
|
const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod));
|
|
var names: InferredErrorSet.NameMap = .{};
|
|
try names.ensureUnusedCapacity(sema.arena, len);
|
|
for (0..len) |i| {
|
|
const elem_val = try payload_val.elemValue(mod, i);
|
|
const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
|
|
const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "name"),
|
|
).?);
|
|
|
|
const name = try name_val.toIpString(Type.slice_const_u8, mod);
|
|
_ = try mod.getErrorValue(name);
|
|
const gop = names.getOrPutAssumeCapacity(name);
|
|
if (gop.found_existing) {
|
|
return sema.fail(block, src, "duplicate error '{}'", .{
|
|
name.fmt(ip),
|
|
});
|
|
}
|
|
}
|
|
|
|
const ty = try mod.errorSetFromUnsortedNames(names.keys());
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Struct => {
|
|
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
|
|
const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "layout"),
|
|
).?);
|
|
const backing_integer_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "backing_integer"),
|
|
).?);
|
|
const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "fields"),
|
|
).?);
|
|
const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "decls"),
|
|
).?);
|
|
const is_tuple_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "is_tuple"),
|
|
).?);
|
|
|
|
const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
|
|
|
|
// Decls
|
|
if (decls_val.sliceLen(mod) > 0) {
|
|
return sema.fail(block, src, "reified structs must have no decls", .{});
|
|
}
|
|
|
|
if (layout != .Packed and !backing_integer_val.isNull(mod)) {
|
|
return sema.fail(block, src, "non-packed struct does not support backing integer type", .{});
|
|
}
|
|
|
|
return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool());
|
|
},
|
|
.Enum => {
|
|
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
|
|
const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "tag_type"),
|
|
).?);
|
|
const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "fields"),
|
|
).?);
|
|
const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "decls"),
|
|
).?);
|
|
const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "is_exhaustive"),
|
|
).?);
|
|
|
|
// Decls
|
|
if (decls_val.sliceLen(mod) > 0) {
|
|
return sema.fail(block, src, "reified enums must have no decls", .{});
|
|
}
|
|
|
|
const int_tag_ty = tag_type_val.toType();
|
|
if (int_tag_ty.zigTypeTag(mod) != .Int) {
|
|
return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{});
|
|
}
|
|
|
|
// Because these things each reference each other, `undefined`
|
|
// placeholders are used before being set after the enum type gains
|
|
// an InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, name_strategy, "enum", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer {
|
|
new_decl.has_tv = false; // namespace and val were destroyed by later errdefers
|
|
mod.abortAnonDecl(new_decl_index);
|
|
}
|
|
|
|
// Define our empty enum decl
|
|
const fields_len = @as(u32, @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod))));
|
|
const incomplete_enum = try ip.getIncompleteEnum(gpa, .{
|
|
.decl = new_decl_index,
|
|
.namespace = .none,
|
|
.fields_len = fields_len,
|
|
.has_values = true,
|
|
.tag_mode = if (!is_exhaustive_val.toBool())
|
|
.nonexhaustive
|
|
else
|
|
.explicit,
|
|
.tag_ty = int_tag_ty.toIntern(),
|
|
});
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer ip.remove(incomplete_enum.index);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = incomplete_enum.index.toValue();
|
|
|
|
for (0..fields_len) |field_i| {
|
|
const elem_val = try fields_val.elemValue(mod, field_i);
|
|
const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
|
|
const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "name"),
|
|
).?);
|
|
const value_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "value"),
|
|
).?);
|
|
|
|
const field_name = try name_val.toIpString(Type.slice_const_u8, mod);
|
|
|
|
if (!try sema.intFitsInType(value_val, int_tag_ty, null)) {
|
|
// TODO: better source location
|
|
return sema.fail(block, src, "field '{}' with enumeration value '{}' is too large for backing int type '{}'", .{
|
|
field_name.fmt(ip),
|
|
value_val.fmtValue(Type.comptime_int, mod),
|
|
int_tag_ty.fmt(mod),
|
|
});
|
|
}
|
|
|
|
if (try incomplete_enum.addFieldName(ip, gpa, field_name)) |other_index| {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "duplicate enum field '{}'", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
errdefer msg.destroy(gpa);
|
|
_ = other_index; // TODO: this note is incorrect
|
|
try sema.errNote(block, src, msg, "other field here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (try incomplete_enum.addFieldValue(ip, gpa, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)});
|
|
errdefer msg.destroy(gpa);
|
|
_ = other; // TODO: this note is incorrect
|
|
try sema.errNote(block, src, msg, "other enum tag value here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
},
|
|
.Opaque => {
|
|
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
|
|
const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "decls"),
|
|
).?);
|
|
|
|
// Decls
|
|
if (decls_val.sliceLen(mod) > 0) {
|
|
return sema.fail(block, src, "reified opaque must have no decls", .{});
|
|
}
|
|
|
|
// Because these three things each reference each other,
|
|
// `undefined` placeholders are used in two places before being set
|
|
// after the opaque type gains an InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, name_strategy, "opaque", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer {
|
|
new_decl.has_tv = false; // namespace and val were destroyed by later errdefers
|
|
mod.abortAnonDecl(new_decl_index);
|
|
}
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.ty = undefined,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
const new_namespace = mod.namespacePtr(new_namespace_index);
|
|
errdefer mod.destroyNamespace(new_namespace_index);
|
|
|
|
const opaque_ty = try mod.intern(.{ .opaque_type = .{
|
|
.decl = new_decl_index,
|
|
.namespace = new_namespace_index,
|
|
} });
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer ip.remove(opaque_ty);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = opaque_ty.toValue();
|
|
new_namespace.ty = opaque_ty.toType();
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
},
|
|
.Union => {
|
|
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
|
|
const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "layout"),
|
|
).?);
|
|
const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "tag_type"),
|
|
).?);
|
|
const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "fields"),
|
|
).?);
|
|
const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "decls"),
|
|
).?);
|
|
|
|
// Decls
|
|
if (decls_val.sliceLen(mod) > 0) {
|
|
return sema.fail(block, src, "reified unions must have no decls", .{});
|
|
}
|
|
const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
|
|
|
|
// Because these three things each reference each other, `undefined`
|
|
// placeholders are used before being set after the union type gains an
|
|
// InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, name_strategy, "union", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer {
|
|
new_decl.has_tv = false; // namespace and val were destroyed by later errdefers
|
|
mod.abortAnonDecl(new_decl_index);
|
|
}
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.ty = undefined,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
const new_namespace = mod.namespacePtr(new_namespace_index);
|
|
errdefer mod.destroyNamespace(new_namespace_index);
|
|
|
|
const union_index = try mod.createUnion(.{
|
|
.owner_decl = new_decl_index,
|
|
.tag_ty = Type.null,
|
|
.fields = .{},
|
|
.zir_index = inst,
|
|
.layout = layout,
|
|
.status = .have_field_types,
|
|
.namespace = new_namespace_index,
|
|
});
|
|
const union_obj = mod.unionPtr(union_index);
|
|
errdefer mod.destroyUnion(union_index);
|
|
|
|
const union_ty = try ip.get(gpa, .{ .union_type = .{
|
|
.index = union_index,
|
|
.runtime_tag = if (!tag_type_val.isNull(mod))
|
|
.tagged
|
|
else if (layout != .Auto)
|
|
.none
|
|
else switch (mod.optimizeMode()) {
|
|
.Debug, .ReleaseSafe => .safety,
|
|
.ReleaseFast, .ReleaseSmall => .none,
|
|
},
|
|
} });
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer ip.remove(union_ty);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = union_ty.toValue();
|
|
new_namespace.ty = union_ty.toType();
|
|
|
|
// Tag type
|
|
const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod));
|
|
var explicit_tags_seen: []bool = &.{};
|
|
var enum_field_names: []InternPool.NullTerminatedString = &.{};
|
|
if (tag_type_val.optionalValue(mod)) |payload_val| {
|
|
union_obj.tag_ty = payload_val.toType();
|
|
|
|
const enum_type = switch (ip.indexToKey(union_obj.tag_ty.toIntern())) {
|
|
.enum_type => |x| x,
|
|
else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}),
|
|
};
|
|
|
|
explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len);
|
|
@memset(explicit_tags_seen, false);
|
|
} else {
|
|
enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
|
|
}
|
|
|
|
// Fields
|
|
try union_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len);
|
|
|
|
for (0..fields_len) |i| {
|
|
const elem_val = try fields_val.elemValue(mod, i);
|
|
const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
|
|
const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "name"),
|
|
).?);
|
|
const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "type"),
|
|
).?);
|
|
const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "alignment"),
|
|
).?);
|
|
|
|
const field_name = try name_val.toIpString(Type.slice_const_u8, mod);
|
|
|
|
if (enum_field_names.len != 0) {
|
|
enum_field_names[i] = field_name;
|
|
}
|
|
|
|
if (explicit_tags_seen.len > 0) {
|
|
const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type;
|
|
const enum_index = tag_info.nameIndex(ip, field_name) orelse {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "no field named '{}' in enum '{}'", .{
|
|
field_name.fmt(ip),
|
|
union_obj.tag_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(gpa);
|
|
try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
};
|
|
// No check for duplicate because the check already happened in order
|
|
// to create the enum type in the first place.
|
|
assert(!explicit_tags_seen[enum_index]);
|
|
explicit_tags_seen[enum_index] = true;
|
|
}
|
|
|
|
const gop = union_obj.fields.getOrPutAssumeCapacity(field_name);
|
|
if (gop.found_existing) {
|
|
// TODO: better source location
|
|
return sema.fail(block, src, "duplicate union field {}", .{field_name.fmt(ip)});
|
|
}
|
|
|
|
const field_ty = type_val.toType();
|
|
gop.value_ptr.* = .{
|
|
.ty = field_ty,
|
|
.abi_align = Alignment.fromByteUnits((try alignment_val.getUnsignedIntAdvanced(mod, sema)).?),
|
|
};
|
|
|
|
if (field_ty.zigTypeTag(mod) == .Opaque) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .union_field);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
} else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
if (explicit_tags_seen.len > 0) {
|
|
const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type;
|
|
if (tag_info.names.len > fields_len) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const enum_ty = union_obj.tag_ty;
|
|
for (tag_info.names.get(ip), 0..) |field_name, field_index| {
|
|
if (explicit_tags_seen[field_index]) continue;
|
|
try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{}' missing, declared here", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
}
|
|
try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
} else {
|
|
union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, enum_field_names, null);
|
|
}
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
},
|
|
.Fn => {
|
|
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
|
|
const calling_convention_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "calling_convention"),
|
|
).?);
|
|
const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "alignment"),
|
|
).?);
|
|
const is_generic_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "is_generic"),
|
|
).?);
|
|
const is_var_args_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "is_var_args"),
|
|
).?);
|
|
const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "return_type"),
|
|
).?);
|
|
const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
|
|
try ip.getOrPutString(gpa, "params"),
|
|
).?);
|
|
|
|
const is_generic = is_generic_val.toBool();
|
|
if (is_generic) {
|
|
return sema.fail(block, src, "Type.Fn.is_generic must be false for @Type", .{});
|
|
}
|
|
|
|
const is_var_args = is_var_args_val.toBool();
|
|
const cc = mod.toEnum(std.builtin.CallingConvention, calling_convention_val);
|
|
if (is_var_args) {
|
|
try sema.checkCallConvSupportsVarArgs(block, src, cc);
|
|
}
|
|
|
|
const alignment = alignment: {
|
|
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
|
|
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
|
|
}
|
|
const alignment = @as(u29, @intCast(alignment_val.toUnsignedInt(mod)));
|
|
if (alignment == target_util.defaultFunctionAlignment(target)) {
|
|
break :alignment .none;
|
|
} else {
|
|
break :alignment Alignment.fromByteUnits(alignment);
|
|
}
|
|
};
|
|
const return_type = return_type_val.optionalValue(mod) orelse
|
|
return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{});
|
|
|
|
const args_len = try sema.usizeCast(block, src, params_val.sliceLen(mod));
|
|
const param_types = try sema.arena.alloc(InternPool.Index, args_len);
|
|
|
|
var noalias_bits: u32 = 0;
|
|
for (param_types, 0..) |*param_type, i| {
|
|
const elem_val = try params_val.elemValue(mod, i);
|
|
const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
|
|
const param_is_generic_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "is_generic"),
|
|
).?);
|
|
const param_is_noalias_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "is_noalias"),
|
|
).?);
|
|
const opt_param_type_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "type"),
|
|
).?);
|
|
|
|
if (param_is_generic_val.toBool()) {
|
|
return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{});
|
|
}
|
|
|
|
const param_type_val = opt_param_type_val.optionalValue(mod) orelse
|
|
return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{});
|
|
param_type.* = param_type_val.toIntern();
|
|
|
|
if (param_is_noalias_val.toBool()) {
|
|
if (!param_type.toType().isPtrAtRuntime(mod)) {
|
|
return sema.fail(block, src, "non-pointer parameter declared noalias", .{});
|
|
}
|
|
noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse
|
|
return sema.fail(block, src, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{}));
|
|
}
|
|
}
|
|
|
|
const ty = try mod.funcType(.{
|
|
.param_types = param_types,
|
|
.comptime_bits = 0,
|
|
.noalias_bits = noalias_bits,
|
|
.return_type = return_type.toIntern(),
|
|
.alignment = alignment,
|
|
.cc = cc,
|
|
.is_var_args = is_var_args,
|
|
.is_generic = false,
|
|
.is_noinline = false,
|
|
.section_is_generic = false,
|
|
.addrspace_is_generic = false,
|
|
});
|
|
return Air.internedToRef(ty.toIntern());
|
|
},
|
|
.Frame => return sema.failWithUseOfAsync(block, src),
|
|
}
|
|
}
|
|
|
|
fn reifyStruct(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
src: LazySrcLoc,
|
|
layout: std.builtin.Type.ContainerLayout,
|
|
backing_int_val: Value,
|
|
fields_val: Value,
|
|
name_strategy: Zir.Inst.NameStrategy,
|
|
is_tuple: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
if (is_tuple) switch (layout) {
|
|
.Extern => return sema.fail(block, src, "extern tuples are not supported", .{}),
|
|
.Packed => return sema.fail(block, src, "packed tuples are not supported", .{}),
|
|
.Auto => {},
|
|
};
|
|
|
|
// Because these three things each reference each other, `undefined`
|
|
// placeholders are used before being set after the struct type gains an
|
|
// InternPool index.
|
|
|
|
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, name_strategy, "struct", inst);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
errdefer {
|
|
new_decl.has_tv = false; // namespace and val were destroyed by later errdefers
|
|
mod.abortAnonDecl(new_decl_index);
|
|
}
|
|
|
|
const new_namespace_index = try mod.createNamespace(.{
|
|
.parent = block.namespace.toOptional(),
|
|
.ty = undefined,
|
|
.file_scope = block.getFileScope(mod),
|
|
});
|
|
const new_namespace = mod.namespacePtr(new_namespace_index);
|
|
errdefer mod.destroyNamespace(new_namespace_index);
|
|
|
|
const struct_index = try mod.createStruct(.{
|
|
.owner_decl = new_decl_index,
|
|
.fields = .{},
|
|
.zir_index = inst,
|
|
.layout = layout,
|
|
.status = .have_field_types,
|
|
.known_non_opv = false,
|
|
.is_tuple = is_tuple,
|
|
.namespace = new_namespace_index,
|
|
});
|
|
const struct_obj = mod.structPtr(struct_index);
|
|
errdefer mod.destroyStruct(struct_index);
|
|
|
|
const struct_ty = try ip.get(gpa, .{ .struct_type = .{
|
|
.index = struct_index.toOptional(),
|
|
.namespace = new_namespace_index.toOptional(),
|
|
} });
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer ip.remove(struct_ty);
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = struct_ty.toValue();
|
|
new_namespace.ty = struct_ty.toType();
|
|
|
|
// Fields
|
|
const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod));
|
|
try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len);
|
|
var i: usize = 0;
|
|
while (i < fields_len) : (i += 1) {
|
|
const elem_val = try fields_val.elemValue(mod, i);
|
|
const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
|
|
const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "name"),
|
|
).?);
|
|
const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "type"),
|
|
).?);
|
|
const default_value_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "default_value"),
|
|
).?);
|
|
const is_comptime_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "is_comptime"),
|
|
).?);
|
|
const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
|
|
try ip.getOrPutString(gpa, "alignment"),
|
|
).?);
|
|
|
|
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
|
|
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
|
|
}
|
|
const abi_align = (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?;
|
|
|
|
if (layout == .Packed) {
|
|
if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
|
|
if (is_comptime_val.toBool()) return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{});
|
|
}
|
|
if (layout == .Extern and is_comptime_val.toBool()) {
|
|
return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{});
|
|
}
|
|
|
|
const field_name = try name_val.toIpString(Type.slice_const_u8, mod);
|
|
|
|
if (is_tuple) {
|
|
const field_index = field_name.toUnsigned(ip) orelse return sema.fail(
|
|
block,
|
|
src,
|
|
"tuple cannot have non-numeric field '{}'",
|
|
.{field_name.fmt(ip)},
|
|
);
|
|
|
|
if (field_index >= fields_len) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"tuple field {} exceeds tuple field count",
|
|
.{field_index},
|
|
);
|
|
}
|
|
}
|
|
const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
|
|
if (gop.found_existing) {
|
|
// TODO: better source location
|
|
return sema.fail(block, src, "duplicate struct field {}", .{field_name.fmt(ip)});
|
|
}
|
|
|
|
const field_ty = type_val.toType();
|
|
const default_val = if (default_value_val.optionalValue(mod)) |opt_val|
|
|
(try sema.pointerDeref(block, src, opt_val, try mod.singleConstPtrType(field_ty)) orelse
|
|
return sema.failWithNeededComptime(block, src, "struct field default value must be comptime-known")).toIntern()
|
|
else
|
|
.none;
|
|
if (is_comptime_val.toBool() and default_val == .none) {
|
|
return sema.fail(block, src, "comptime field without default initialization value", .{});
|
|
}
|
|
|
|
gop.value_ptr.* = .{
|
|
.ty = field_ty,
|
|
.abi_align = Alignment.fromByteUnits(abi_align),
|
|
.default_val = default_val,
|
|
.is_comptime = is_comptime_val.toBool(),
|
|
.offset = undefined,
|
|
};
|
|
|
|
if (field_ty.zigTypeTag(mod) == .Opaque) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (field_ty.zigTypeTag(mod) == .NoReturn) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (struct_obj.layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = sema.mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .struct_field);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
} else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const src_decl = sema.mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
if (layout == .Packed) {
|
|
struct_obj.status = .layout_wip;
|
|
|
|
for (struct_obj.fields.values(), 0..) |field, index| {
|
|
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
|
|
error.AnalysisFail => {
|
|
const msg = sema.err orelse return err;
|
|
try sema.addFieldErrNote(struct_ty.toType(), index, msg, "while checking this field", .{});
|
|
return err;
|
|
},
|
|
else => return err,
|
|
};
|
|
}
|
|
|
|
var fields_bit_sum: u64 = 0;
|
|
for (struct_obj.fields.values()) |field| {
|
|
fields_bit_sum += field.ty.bitSize(mod);
|
|
}
|
|
|
|
if (backing_int_val.optionalValue(mod)) |payload| {
|
|
const backing_int_ty = payload.toType();
|
|
try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
|
|
struct_obj.backing_int_ty = backing_int_ty;
|
|
} else {
|
|
struct_obj.backing_int_ty = try mod.intType(.unsigned, @as(u16, @intCast(fields_bit_sum)));
|
|
}
|
|
|
|
struct_obj.status = .have_layout;
|
|
}
|
|
|
|
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return decl_val;
|
|
}
|
|
|
|
fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref {
|
|
const va_list_ty = try sema.getBuiltinType("VaList");
|
|
const va_list_ptr = try sema.mod.singleMutPtrType(va_list_ty);
|
|
|
|
const inst = try sema.resolveInst(zir_ref);
|
|
return sema.coerce(block, va_list_ptr, inst, src);
|
|
}
|
|
|
|
fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
|
|
const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.lhs);
|
|
const arg_ty = try sema.resolveType(block, ty_src, extra.rhs);
|
|
|
|
if (!try sema.validateExternType(arg_ty, .param_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ty_src, "cannot get '{}' from variadic argument", .{arg_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = sema.mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl, mod), arg_ty, .param_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, arg_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.c_va_arg, arg_ty, va_list_ref);
|
|
}
|
|
|
|
fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
|
|
const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand);
|
|
const va_list_ty = try sema.getBuiltinType("VaList");
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref);
|
|
}
|
|
|
|
fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
|
|
const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand);
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addUnOp(.c_va_end, va_list_ref);
|
|
}
|
|
|
|
fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand)));
|
|
|
|
const va_list_ty = try sema.getBuiltinType("VaList");
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addInst(.{
|
|
.tag = .c_va_start,
|
|
.data = .{ .ty = va_list_ty },
|
|
});
|
|
}
|
|
|
|
fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ty = try sema.resolveType(block, ty_src, inst_data.operand);
|
|
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
|
|
var bytes = std.ArrayList(u8).init(sema.arena);
|
|
defer bytes.deinit();
|
|
try ty.print(bytes.writer(), mod);
|
|
|
|
const decl_ty = try mod.arrayType(.{
|
|
.len = bytes.items.len,
|
|
.sentinel = .zero_u8,
|
|
.child = .u8_type,
|
|
});
|
|
const new_decl = try anon_decl.finish(
|
|
decl_ty,
|
|
(try mod.intern(.{ .aggregate = .{
|
|
.ty = decl_ty.toIntern(),
|
|
.storage = .{ .bytes = bytes.items },
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
);
|
|
|
|
return sema.analyzeDeclRef(new_decl);
|
|
}
|
|
|
|
fn zirFrameType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@intFromFloat");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
_ = try sema.checkIntType(block, src, dest_ty);
|
|
try sema.checkFloatType(block, operand_src, operand_ty);
|
|
|
|
if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
const result_val = try sema.intFromFloat(block, operand_src, val, operand_ty, dest_ty);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_int' must be comptime-known");
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
|
|
if (dest_ty.intInfo(mod).bits == 0) {
|
|
if (block.wantSafety()) {
|
|
const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, Air.internedToRef((try mod.floatValue(operand_ty, 0.0)).toIntern()));
|
|
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
|
|
}
|
|
return Air.internedToRef((try mod.intValue(dest_ty, 0)).toIntern());
|
|
}
|
|
const result = try block.addTyOp(if (block.float_mode == .Optimized) .int_from_float_optimized else .int_from_float, dest_ty, operand);
|
|
if (block.wantSafety()) {
|
|
const back = try block.addTyOp(.float_from_int, operand_ty, result);
|
|
const diff = try block.addBinOp(.sub, operand, back);
|
|
const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_ty, 1.0)).toIntern()));
|
|
const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_ty, -1.0)).toIntern()));
|
|
const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg);
|
|
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@floatFromInt");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
try sema.checkFloatType(block, src, dest_ty);
|
|
_ = try sema.checkIntType(block, operand_src, operand_ty);
|
|
|
|
if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
const result_val = try val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, sema.mod, sema);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
|
|
return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_float' must be comptime-known");
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
|
|
return block.addTyOp(.float_from_int, dest_ty, operand);
|
|
}
|
|
|
|
fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand_res = try sema.resolveInst(extra.rhs);
|
|
const operand_coerced = try sema.coerce(block, Type.usize, operand_res, operand_src);
|
|
|
|
const ptr_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu, "@ptrFromInt");
|
|
try sema.checkPtrType(block, src, ptr_ty);
|
|
const elem_ty = ptr_ty.elemType2(mod);
|
|
const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema);
|
|
|
|
if (ptr_ty.isSlice(mod)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "slice length cannot be inferred from address", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| {
|
|
const addr = val.toUnsignedInt(mod);
|
|
if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0)
|
|
return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)});
|
|
if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0)
|
|
return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)});
|
|
|
|
const ptr_val = switch (ptr_ty.zigTypeTag(mod)) {
|
|
.Optional => (try mod.intern(.{ .opt = .{
|
|
.ty = ptr_ty.toIntern(),
|
|
.val = if (addr == 0) .none else (try mod.ptrIntValue(ptr_ty.childType(mod), addr)).toIntern(),
|
|
} })).toValue(),
|
|
.Pointer => try mod.ptrIntValue(ptr_ty, addr),
|
|
else => unreachable,
|
|
};
|
|
return Air.internedToRef(ptr_val.toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) {
|
|
if (!ptr_ty.isAllowzeroPtr(mod)) {
|
|
const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize);
|
|
try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
|
|
}
|
|
|
|
if (ptr_align > 1) {
|
|
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, ptr_align - 1)).toIntern());
|
|
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1);
|
|
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
|
|
try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment);
|
|
}
|
|
}
|
|
return block.addBitCast(ptr_ty, operand_coerced);
|
|
}
|
|
|
|
fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@errSetCast");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const operand_ty = sema.typeOf(operand);
|
|
try sema.checkErrorSetType(block, src, dest_ty);
|
|
try sema.checkErrorSetType(block, operand_src, operand_ty);
|
|
|
|
// operand must be defined since it can be an invalid error value
|
|
const maybe_operand_val = try sema.resolveDefinedValue(block, operand_src, operand);
|
|
|
|
if (disjoint: {
|
|
// Try avoiding resolving inferred error sets if we can
|
|
if (!dest_ty.isAnyError(mod) and dest_ty.errorSetNames(mod).len == 0) break :disjoint true;
|
|
if (!operand_ty.isAnyError(mod) and operand_ty.errorSetNames(mod).len == 0) break :disjoint true;
|
|
if (dest_ty.isAnyError(mod)) break :disjoint false;
|
|
if (operand_ty.isAnyError(mod)) break :disjoint false;
|
|
for (dest_ty.errorSetNames(mod)) |dest_err_name| {
|
|
if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name))
|
|
break :disjoint false;
|
|
}
|
|
|
|
if (!ip.isInferredErrorSetType(dest_ty.toIntern()) and
|
|
!ip.isInferredErrorSetType(operand_ty.toIntern()))
|
|
{
|
|
break :disjoint true;
|
|
}
|
|
|
|
_ = try sema.resolveInferredErrorSetTy(block, src, dest_ty.toIntern());
|
|
_ = try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty.toIntern());
|
|
for (dest_ty.errorSetNames(mod)) |dest_err_name| {
|
|
if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name))
|
|
break :disjoint false;
|
|
}
|
|
|
|
break :disjoint true;
|
|
}) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"error sets '{}' and '{}' have no common errors",
|
|
.{ operand_ty.fmt(sema.mod), dest_ty.fmt(sema.mod) },
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, operand_ty);
|
|
try sema.addDeclaredHereNote(msg, dest_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (maybe_operand_val) |val| {
|
|
if (!dest_ty.isAnyError(mod)) {
|
|
const error_name = mod.intern_pool.indexToKey(val.toIntern()).err.name;
|
|
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), error_name)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"'error.{}' not a member of error set '{}'",
|
|
.{ error_name.fmt(ip), dest_ty.fmt(sema.mod) },
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, dest_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
return Air.internedToRef((try mod.getCoerced(val, dest_ty)).toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
if (block.wantSafety() and !dest_ty.isAnyError(mod) and sema.mod.backendSupportsFeature(.error_set_has_value)) {
|
|
const err_int_inst = try block.addBitCast(Type.err_int, operand);
|
|
const ok = try block.addTyOp(.error_set_has_value, dest_ty, err_int_inst);
|
|
try sema.addSafetyCheck(block, src, ok, .invalid_error_code);
|
|
}
|
|
return block.addBitCast(dest_ty, operand);
|
|
}
|
|
|
|
fn zirPtrCastFull(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(u5, @truncate(extended.small)));
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_ptrcast_operand = extra.node };
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu, flags.needResultTypeBuiltinName());
|
|
return sema.ptrCastFull(
|
|
block,
|
|
flags,
|
|
src,
|
|
operand,
|
|
operand_src,
|
|
dest_ty,
|
|
);
|
|
}
|
|
|
|
fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu, "@ptrCast");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
|
|
return sema.ptrCastFull(
|
|
block,
|
|
.{ .ptr_cast = true },
|
|
src,
|
|
operand,
|
|
operand_src,
|
|
dest_ty,
|
|
);
|
|
}
|
|
|
|
fn ptrCastFull(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
flags: Zir.Inst.FullPtrCastFlags,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
operand_src: LazySrcLoc,
|
|
dest_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
try sema.checkPtrType(block, src, dest_ty);
|
|
try sema.checkPtrOperand(block, operand_src, operand_ty);
|
|
|
|
const src_info = operand_ty.ptrInfo(mod);
|
|
const dest_info = dest_ty.ptrInfo(mod);
|
|
|
|
try sema.resolveTypeLayout(src_info.child.toType());
|
|
try sema.resolveTypeLayout(dest_info.child.toType());
|
|
|
|
const src_slice_like = src_info.flags.size == .Slice or
|
|
(src_info.flags.size == .One and src_info.child.toType().zigTypeTag(mod) == .Array);
|
|
|
|
const dest_slice_like = dest_info.flags.size == .Slice or
|
|
(dest_info.flags.size == .One and dest_info.child.toType().zigTypeTag(mod) == .Array);
|
|
|
|
if (dest_info.flags.size == .Slice and !src_slice_like) {
|
|
return sema.fail(block, src, "illegal pointer cast to slice", .{});
|
|
}
|
|
|
|
if (dest_info.flags.size == .Slice) {
|
|
const src_elem_size = switch (src_info.flags.size) {
|
|
.Slice => src_info.child.toType().abiSize(mod),
|
|
// pointer to array
|
|
.One => src_info.child.toType().childType(mod).abiSize(mod),
|
|
else => unreachable,
|
|
};
|
|
const dest_elem_size = dest_info.child.toType().abiSize(mod);
|
|
if (src_elem_size != dest_elem_size) {
|
|
return sema.fail(block, src, "TODO: implement @ptrCast between slices changing the length", .{});
|
|
}
|
|
}
|
|
|
|
// The checking logic in this function must stay in sync with Sema.coerceInMemoryAllowedPtrs
|
|
|
|
if (!flags.ptr_cast) {
|
|
check_size: {
|
|
if (src_info.flags.size == dest_info.flags.size) break :check_size;
|
|
if (src_slice_like and dest_slice_like) break :check_size;
|
|
if (src_info.flags.size == .C) break :check_size;
|
|
if (dest_info.flags.size == .C) break :check_size;
|
|
return sema.failWithOwnedErrorMsg(msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot implicitly convert {s} pointer to {s} pointer", .{
|
|
pointerSizeString(src_info.flags.size),
|
|
pointerSizeString(dest_info.flags.size),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
if (dest_info.flags.size == .Many and
|
|
(src_info.flags.size == .Slice or
|
|
(src_info.flags.size == .One and src_info.child.toType().zigTypeTag(mod) == .Array)))
|
|
{
|
|
try sema.errNote(block, src, msg, "use 'ptr' field to convert slice to many pointer", .{});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "use @ptrCast to change pointer size", .{});
|
|
}
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
check_child: {
|
|
const src_child = if (dest_info.flags.size == .Slice and src_info.flags.size == .One) blk: {
|
|
// *[n]T -> []T
|
|
break :blk src_info.child.toType().childType(mod);
|
|
} else src_info.child.toType();
|
|
|
|
const dest_child = dest_info.child.toType();
|
|
|
|
const imc_res = try sema.coerceInMemoryAllowed(
|
|
block,
|
|
dest_child,
|
|
src_child,
|
|
!dest_info.flags.is_const,
|
|
mod.getTarget(),
|
|
src,
|
|
operand_src,
|
|
);
|
|
if (imc_res == .ok) break :check_child;
|
|
return sema.failWithOwnedErrorMsg(msg: {
|
|
const msg = try sema.errMsg(block, src, "pointer element type '{}' cannot coerce into element type '{}'", .{
|
|
src_child.fmt(mod),
|
|
dest_child.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try imc_res.report(sema, block, src, msg);
|
|
try sema.errNote(block, src, msg, "use @ptrCast to cast pointer element type", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
check_sent: {
|
|
if (dest_info.sentinel == .none) break :check_sent;
|
|
if (src_info.flags.size == .C) break :check_sent;
|
|
if (src_info.sentinel != .none) {
|
|
const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_info.sentinel, dest_info.child);
|
|
if (dest_info.sentinel == coerced_sent) break :check_sent;
|
|
}
|
|
if (src_slice_like and src_info.flags.size == .One and dest_info.flags.size == .Slice) {
|
|
// [*]nT -> []T
|
|
const arr_ty = src_info.child.toType();
|
|
if (arr_ty.sentinel(mod)) |src_sentinel| {
|
|
const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_sentinel.toIntern(), dest_info.child);
|
|
if (dest_info.sentinel == coerced_sent) break :check_sent;
|
|
}
|
|
}
|
|
return sema.failWithOwnedErrorMsg(msg: {
|
|
const msg = if (src_info.sentinel == .none) blk: {
|
|
break :blk try sema.errMsg(block, src, "destination pointer requires '{}' sentinel", .{
|
|
dest_info.sentinel.toValue().fmtValue(dest_info.child.toType(), mod),
|
|
});
|
|
} else blk: {
|
|
break :blk try sema.errMsg(block, src, "pointer sentinel '{}' cannot coerce into pointer sentinel '{}'", .{
|
|
src_info.sentinel.toValue().fmtValue(src_info.child.toType(), mod),
|
|
dest_info.sentinel.toValue().fmtValue(dest_info.child.toType(), mod),
|
|
});
|
|
};
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "use @ptrCast to cast pointer sentinel", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
if (src_info.packed_offset.host_size != dest_info.packed_offset.host_size) {
|
|
return sema.failWithOwnedErrorMsg(msg: {
|
|
const msg = try sema.errMsg(block, src, "pointer host size '{}' cannot coerce into pointer host size '{}'", .{
|
|
src_info.packed_offset.host_size,
|
|
dest_info.packed_offset.host_size,
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "use @ptrCast to cast pointer host size", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
if (src_info.packed_offset.bit_offset != dest_info.packed_offset.bit_offset) {
|
|
return sema.failWithOwnedErrorMsg(msg: {
|
|
const msg = try sema.errMsg(block, src, "pointer bit offset '{}' cannot coerce into pointer bit offset '{}'", .{
|
|
src_info.packed_offset.bit_offset,
|
|
dest_info.packed_offset.bit_offset,
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "use @ptrCast to cast pointer bit offset", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
check_allowzero: {
|
|
const src_allows_zero = operand_ty.ptrAllowsZero(mod);
|
|
const dest_allows_zero = dest_ty.ptrAllowsZero(mod);
|
|
if (!src_allows_zero) break :check_allowzero;
|
|
if (dest_allows_zero) break :check_allowzero;
|
|
|
|
return sema.failWithOwnedErrorMsg(msg: {
|
|
const msg = try sema.errMsg(block, src, "'{}' could have null values which are illegal in type '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
dest_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "use @ptrCast to assert the pointer is not null", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
|
|
// TODO: vector index?
|
|
}
|
|
|
|
const src_align = src_info.flags.alignment.toByteUnitsOptional() orelse src_info.child.toType().abiAlignment(mod);
|
|
const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse dest_info.child.toType().abiAlignment(mod);
|
|
if (!flags.align_cast) {
|
|
if (dest_align > src_align) {
|
|
return sema.failWithOwnedErrorMsg(msg: {
|
|
const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{
|
|
operand_ty.fmt(mod), src_align,
|
|
});
|
|
try sema.errNote(block, src, msg, "'{}' has alignment '{d}'", .{
|
|
dest_ty.fmt(mod), dest_align,
|
|
});
|
|
try sema.errNote(block, src, msg, "use @alignCast to assert pointer alignment", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
}
|
|
|
|
if (!flags.addrspace_cast) {
|
|
if (src_info.flags.address_space != dest_info.flags.address_space) {
|
|
return sema.failWithOwnedErrorMsg(msg: {
|
|
const msg = try sema.errMsg(block, src, "cast changes pointer address space", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, operand_src, msg, "'{}' has address space '{s}'", .{
|
|
operand_ty.fmt(mod), @tagName(src_info.flags.address_space),
|
|
});
|
|
try sema.errNote(block, src, msg, "'{}' has address space '{s}'", .{
|
|
dest_ty.fmt(mod), @tagName(dest_info.flags.address_space),
|
|
});
|
|
try sema.errNote(block, src, msg, "use @addrSpaceCast to cast pointer address space", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
} else {
|
|
// Some address space casts are always disallowed
|
|
if (!target_util.addrSpaceCastIsValid(mod.getTarget(), src_info.flags.address_space, dest_info.flags.address_space)) {
|
|
return sema.failWithOwnedErrorMsg(msg: {
|
|
const msg = try sema.errMsg(block, src, "invalid address space cast", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, operand_src, msg, "address space '{s}' is not compatible with address space '{s}'", .{
|
|
@tagName(src_info.flags.address_space),
|
|
@tagName(dest_info.flags.address_space),
|
|
});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
}
|
|
|
|
if (!flags.const_cast) {
|
|
if (src_info.flags.is_const and !dest_info.flags.is_const) {
|
|
return sema.failWithOwnedErrorMsg(msg: {
|
|
const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "use @constCast to discard const qualifier", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
}
|
|
|
|
if (!flags.volatile_cast) {
|
|
if (src_info.flags.is_volatile and !dest_info.flags.is_volatile) {
|
|
return sema.failWithOwnedErrorMsg(msg: {
|
|
const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "use @volatileCast to discard volatile qualifier", .{});
|
|
break :msg msg;
|
|
});
|
|
}
|
|
}
|
|
|
|
const ptr = if (src_info.flags.size == .Slice and dest_info.flags.size != .Slice) ptr: {
|
|
break :ptr try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty);
|
|
} else operand;
|
|
|
|
const dest_ptr_ty = if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) blk: {
|
|
// Only convert to a many-pointer at first
|
|
var info = dest_info;
|
|
info.flags.size = .Many;
|
|
const ty = try mod.ptrType(info);
|
|
if (dest_ty.zigTypeTag(mod) == .Optional) {
|
|
break :blk try mod.optionalType(ty.toIntern());
|
|
} else {
|
|
break :blk ty;
|
|
}
|
|
} else dest_ty;
|
|
|
|
// Cannot do @addrSpaceCast at comptime
|
|
if (!flags.addrspace_cast) {
|
|
if (try sema.resolveMaybeUndefVal(ptr)) |ptr_val| {
|
|
if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, operand_src);
|
|
}
|
|
if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isNull(mod)) {
|
|
return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)});
|
|
}
|
|
if (dest_align > src_align) {
|
|
if (try ptr_val.getUnsignedIntAdvanced(mod, null)) |addr| {
|
|
if (addr % dest_align != 0) {
|
|
return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align });
|
|
}
|
|
}
|
|
}
|
|
if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) {
|
|
if (ptr_val.isUndef(mod)) return mod.undefRef(dest_ty);
|
|
const arr_len = try mod.intValue(Type.usize, src_info.child.toType().arrayLen(mod));
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.addr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr,
|
|
.len = arr_len.toIntern(),
|
|
} })));
|
|
} else {
|
|
assert(dest_ptr_ty.eql(dest_ty, mod));
|
|
return Air.internedToRef((try mod.getCoerced(ptr_val, dest_ty)).toIntern());
|
|
}
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
|
|
if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and
|
|
(try sema.typeHasRuntimeBits(dest_info.child.toType()) or dest_info.child.toType().zigTypeTag(mod) == .Fn))
|
|
{
|
|
const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
|
|
const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
|
|
const ok = if (src_info.flags.size == .Slice and dest_info.flags.size == .Slice) ok: {
|
|
const len = try sema.analyzeSliceLen(block, operand_src, ptr);
|
|
const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
|
|
break :ok try block.addBinOp(.bit_or, len_zero, is_non_zero);
|
|
} else is_non_zero;
|
|
try sema.addSafetyCheck(block, src, ok, .cast_to_null);
|
|
}
|
|
|
|
if (block.wantSafety() and dest_align > src_align and try sema.typeHasRuntimeBits(dest_info.child.toType())) {
|
|
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, dest_align - 1)).toIntern());
|
|
const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
|
|
const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1);
|
|
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
|
|
const ok = if (src_info.flags.size == .Slice and dest_info.flags.size == .Slice) ok: {
|
|
const len = try sema.analyzeSliceLen(block, operand_src, ptr);
|
|
const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
|
|
break :ok try block.addBinOp(.bit_or, len_zero, is_aligned);
|
|
} else is_aligned;
|
|
try sema.addSafetyCheck(block, src, ok, .incorrect_alignment);
|
|
}
|
|
|
|
// If we're going from an array pointer to a slice, this will only be the pointer part!
|
|
const result_ptr = if (flags.addrspace_cast) ptr: {
|
|
// We can't change address spaces with a bitcast, so this requires two instructions
|
|
var intermediate_info = src_info;
|
|
intermediate_info.flags.address_space = dest_info.flags.address_space;
|
|
const intermediate_ptr_ty = try mod.ptrType(intermediate_info);
|
|
const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: {
|
|
break :blk try mod.optionalType(intermediate_ptr_ty.toIntern());
|
|
} else intermediate_ptr_ty;
|
|
const intermediate = try block.addInst(.{
|
|
.tag = .addrspace_cast,
|
|
.data = .{ .ty_op = .{
|
|
.ty = Air.internedToRef(intermediate_ty.toIntern()),
|
|
.operand = ptr,
|
|
} },
|
|
});
|
|
if (intermediate_ty.eql(dest_ptr_ty, mod)) {
|
|
// We only changed the address space, so no need for a bitcast
|
|
break :ptr intermediate;
|
|
}
|
|
break :ptr try block.addBitCast(dest_ptr_ty, intermediate);
|
|
} else ptr: {
|
|
break :ptr try block.addBitCast(dest_ptr_ty, ptr);
|
|
};
|
|
|
|
if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) {
|
|
// We have to construct a slice using the operand's child's array length
|
|
// Note that we know from the check at the start of the function that operand_ty is slice-like
|
|
const arr_len = Air.internedToRef((try mod.intValue(Type.usize, src_info.child.toType().arrayLen(mod))).toIntern());
|
|
return block.addInst(.{
|
|
.tag = .slice,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(dest_ty.toIntern()),
|
|
.payload = try sema.addExtra(Air.Bin{
|
|
.lhs = result_ptr,
|
|
.rhs = arr_len,
|
|
}),
|
|
} },
|
|
});
|
|
} else {
|
|
assert(dest_ptr_ty.eql(dest_ty, mod));
|
|
return result_ptr;
|
|
}
|
|
}
|
|
|
|
fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small))));
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const operand_src: LazySrcLoc = .{ .node_offset_ptrcast_operand = extra.node };
|
|
const operand = try sema.resolveInst(extra.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
try sema.checkPtrOperand(block, operand_src, operand_ty);
|
|
|
|
var ptr_info = operand_ty.ptrInfo(mod);
|
|
if (flags.const_cast) ptr_info.flags.is_const = false;
|
|
if (flags.volatile_cast) ptr_info.flags.is_volatile = false;
|
|
const dest_ty = try mod.ptrType(ptr_info);
|
|
|
|
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
|
|
return Air.internedToRef((try mod.getCoerced(operand_val, dest_ty)).toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addBitCast(dest_ty, operand);
|
|
}
|
|
|
|
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@truncate");
|
|
const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, src);
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
|
|
|
|
const operand_is_vector = operand_ty.zigTypeTag(mod) == .Vector;
|
|
const dest_is_vector = dest_ty.zigTypeTag(mod) == .Vector;
|
|
if (operand_is_vector != dest_is_vector) {
|
|
return sema.fail(block, operand_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), operand_ty.fmt(mod) });
|
|
}
|
|
|
|
if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.coerce(block, dest_ty, operand, operand_src);
|
|
}
|
|
|
|
const dest_info = dest_scalar_ty.intInfo(mod);
|
|
|
|
if (try sema.typeHasOnePossibleValue(dest_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
if (operand_scalar_ty.zigTypeTag(mod) != .ComptimeInt) {
|
|
const operand_info = operand_ty.intInfo(mod);
|
|
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
if (operand_info.signedness != dest_info.signedness) {
|
|
return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{
|
|
@tagName(dest_info.signedness), operand_ty.fmt(mod),
|
|
});
|
|
}
|
|
if (operand_info.bits < dest_info.bits) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"destination type '{}' has more bits than source type '{}'",
|
|
.{ dest_ty.fmt(mod), operand_ty.fmt(mod) },
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "destination type has {d} bits", .{
|
|
dest_info.bits,
|
|
});
|
|
try sema.errNote(block, operand_src, msg, "operand type has {d} bits", .{
|
|
operand_info.bits,
|
|
});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefValIntable(operand)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(dest_ty);
|
|
if (!dest_is_vector) {
|
|
return Air.internedToRef((try mod.getCoerced(
|
|
try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod),
|
|
dest_ty,
|
|
)).toIntern());
|
|
}
|
|
const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod));
|
|
for (elems, 0..) |*elem, i| {
|
|
const elem_val = try val.elemValue(mod, i);
|
|
elem.* = try (try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, mod)).intern(dest_scalar_ty, mod);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
return block.addTyOp(.trunc, dest_ty, operand);
|
|
}
|
|
|
|
fn zirBitCount(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
air_tag: Air.Inst.Tag,
|
|
comptime comptimeOp: fn (val: Value, ty: Type, mod: *Module) u64,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
_ = try sema.checkIntOrVector(block, operand, operand_src);
|
|
const bits = operand_ty.intInfo(mod).bits;
|
|
|
|
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
const result_scalar_ty = try mod.smallestUnsignedInt(bits);
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Vector => {
|
|
const vec_len = operand_ty.vectorLen(mod);
|
|
const result_ty = try mod.vectorType(.{
|
|
.len = vec_len,
|
|
.child = result_scalar_ty.toIntern(),
|
|
});
|
|
if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(result_ty);
|
|
|
|
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
const scalar_ty = operand_ty.scalarType(mod);
|
|
for (elems, 0..) |*elem, i| {
|
|
const elem_val = try val.elemValue(mod, i);
|
|
const count = comptimeOp(elem_val, scalar_ty, mod);
|
|
elem.* = (try mod.intValue(result_scalar_ty, count)).toIntern();
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = result_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
} else {
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
return block.addTyOp(air_tag, result_ty, operand);
|
|
}
|
|
},
|
|
.Int => {
|
|
if (try sema.resolveMaybeUndefLazyVal(operand)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(result_scalar_ty);
|
|
return mod.intRef(result_scalar_ty, comptimeOp(val, operand_ty, mod));
|
|
} else {
|
|
try sema.requireRuntimeBlock(block, src, operand_src);
|
|
return block.addTyOp(air_tag, result_scalar_ty, operand);
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src);
|
|
const bits = scalar_ty.intInfo(mod).bits;
|
|
if (bits % 8 != 0) {
|
|
return sema.fail(
|
|
block,
|
|
operand_src,
|
|
"@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits",
|
|
.{ scalar_ty.fmt(mod), bits },
|
|
);
|
|
}
|
|
|
|
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Int => {
|
|
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(operand_ty);
|
|
const result_val = try val.byteSwap(operand_ty, mod, sema.arena);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else operand_src;
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addTyOp(.byte_swap, operand_ty, operand);
|
|
},
|
|
.Vector => {
|
|
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
if (val.isUndef(mod))
|
|
return mod.undefRef(operand_ty);
|
|
|
|
const vec_len = operand_ty.vectorLen(mod);
|
|
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
for (elems, 0..) |*elem, i| {
|
|
const elem_val = try val.elemValue(mod, i);
|
|
elem.* = try (try elem_val.byteSwap(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = operand_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
} else operand_src;
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addTyOp(.byte_swap, operand_ty, operand);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand = try sema.resolveInst(inst_data.operand);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src);
|
|
|
|
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
const mod = sema.mod;
|
|
switch (operand_ty.zigTypeTag(mod)) {
|
|
.Int => {
|
|
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(operand_ty);
|
|
const result_val = try val.bitReverse(operand_ty, mod, sema.arena);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else operand_src;
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addTyOp(.bit_reverse, operand_ty, operand);
|
|
},
|
|
.Vector => {
|
|
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
if (val.isUndef(mod))
|
|
return mod.undefRef(operand_ty);
|
|
|
|
const vec_len = operand_ty.vectorLen(mod);
|
|
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
for (elems, 0..) |*elem, i| {
|
|
const elem_val = try val.elemValue(mod, i);
|
|
elem.* = try (try elem_val.bitReverse(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = operand_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
} else operand_src;
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addTyOp(.bit_reverse, operand_ty, operand);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const offset = try sema.bitOffsetOf(block, inst);
|
|
return sema.mod.intRef(Type.comptime_int, offset);
|
|
}
|
|
|
|
fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const offset = try sema.bitOffsetOf(block, inst);
|
|
// TODO reminder to make this a compile error for packed structs
|
|
return sema.mod.intRef(Type.comptime_int, offset / 8);
|
|
}
|
|
|
|
fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u64 {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
|
sema.src = src;
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
|
|
const ty = try sema.resolveType(block, lhs_src, extra.lhs);
|
|
const field_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, "name of field must be comptime-known");
|
|
|
|
const mod = sema.mod;
|
|
try sema.resolveTypeLayout(ty);
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Struct => {},
|
|
else => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
}
|
|
|
|
const field_index = if (ty.isTuple(mod)) blk: {
|
|
if (mod.intern_pool.stringEqlSlice(field_name, "len")) {
|
|
return sema.fail(block, src, "no offset available for 'len' field of tuple", .{});
|
|
}
|
|
break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src);
|
|
} else try sema.structFieldIndex(block, ty, field_name, rhs_src);
|
|
|
|
if (ty.structFieldIsComptime(field_index, mod)) {
|
|
return sema.fail(block, src, "no offset available for comptime field", .{});
|
|
}
|
|
|
|
switch (ty.containerLayout(mod)) {
|
|
.Packed => {
|
|
var bit_sum: u64 = 0;
|
|
const fields = ty.structFields(mod);
|
|
for (fields.values(), 0..) |field, i| {
|
|
if (i == field_index) {
|
|
return bit_sum;
|
|
}
|
|
bit_sum += field.ty.bitSize(mod);
|
|
} else unreachable;
|
|
},
|
|
else => return ty.structFieldOffset(field_index, mod) * 8,
|
|
}
|
|
}
|
|
|
|
fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Struct, .Enum, .Union, .Opaque => return,
|
|
else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(mod)}),
|
|
}
|
|
}
|
|
|
|
/// Returns `true` if the type was a comptime_int.
|
|
fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
|
|
const mod = sema.mod;
|
|
switch (try ty.zigTypeTagOrPoison(mod)) {
|
|
.ComptimeInt => return true,
|
|
.Int => return false,
|
|
else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(mod)}),
|
|
}
|
|
}
|
|
|
|
fn checkInvalidPtrArithmetic(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (try ty.zigTypeTagOrPoison(mod)) {
|
|
.Pointer => switch (ty.ptrSize(mod)) {
|
|
.One, .Slice => return,
|
|
.Many, .C => return sema.fail(
|
|
block,
|
|
src,
|
|
"invalid pointer arithmetic operator",
|
|
.{},
|
|
),
|
|
},
|
|
else => return,
|
|
}
|
|
}
|
|
|
|
fn checkArithmeticOp(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
scalar_tag: std.builtin.TypeId,
|
|
lhs_zig_ty_tag: std.builtin.TypeId,
|
|
rhs_zig_ty_tag: std.builtin.TypeId,
|
|
zir_tag: Zir.Inst.Tag,
|
|
) CompileError!void {
|
|
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
|
|
const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat;
|
|
|
|
if (!is_int and !(is_float and floatOpAllowed(zir_tag))) {
|
|
return sema.fail(block, src, "invalid operands to binary expression: '{s}' and '{s}'", .{
|
|
@tagName(lhs_zig_ty_tag), @tagName(rhs_zig_ty_tag),
|
|
});
|
|
}
|
|
}
|
|
|
|
fn checkPtrOperand(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty_src: LazySrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Pointer => return,
|
|
.Fn => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
ty_src,
|
|
"expected pointer, found '{}'",
|
|
.{ty.fmt(mod)},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.errNote(block, ty_src, msg, "use '&' to obtain a function pointer", .{});
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
.Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return,
|
|
else => {},
|
|
}
|
|
return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)});
|
|
}
|
|
|
|
fn checkPtrType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty_src: LazySrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Pointer => return,
|
|
.Fn => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
ty_src,
|
|
"expected pointer type, found '{}'",
|
|
.{ty.fmt(mod)},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.errNote(block, ty_src, msg, "use '*const ' to make a function pointer type", .{});
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
.Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return,
|
|
else => {},
|
|
}
|
|
return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)});
|
|
}
|
|
|
|
fn checkVectorElemType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty_src: LazySrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Int, .Float, .Bool => return,
|
|
else => if (ty.isPtrAtRuntime(mod)) return,
|
|
}
|
|
return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(mod)});
|
|
}
|
|
|
|
fn checkFloatType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty_src: LazySrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ComptimeInt, .ComptimeFloat, .Float => {},
|
|
else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(mod)}),
|
|
}
|
|
}
|
|
|
|
fn checkNumericType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ty_src: LazySrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat, .Float, .ComptimeInt, .Int => {},
|
|
.Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
|
|
.ComptimeFloat, .Float, .ComptimeInt, .Int => {},
|
|
else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}),
|
|
},
|
|
else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(mod)}),
|
|
}
|
|
}
|
|
|
|
/// Returns the casted pointer.
|
|
fn checkAtomicPtrOperand(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
elem_ty: Type,
|
|
elem_ty_src: LazySrcLoc,
|
|
ptr: Air.Inst.Ref,
|
|
ptr_src: LazySrcLoc,
|
|
ptr_const: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
var diag: Module.AtomicPtrAlignmentDiagnostics = .{};
|
|
const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.FloatTooBig => return sema.fail(
|
|
block,
|
|
elem_ty_src,
|
|
"expected {d}-bit float type or smaller; found {d}-bit float type",
|
|
.{ diag.max_bits, diag.bits },
|
|
),
|
|
error.IntTooBig => return sema.fail(
|
|
block,
|
|
elem_ty_src,
|
|
"expected {d}-bit integer type or smaller; found {d}-bit integer type",
|
|
.{ diag.max_bits, diag.bits },
|
|
),
|
|
error.BadType => return sema.fail(
|
|
block,
|
|
elem_ty_src,
|
|
"expected bool, integer, float, enum, or pointer type; found '{}'",
|
|
.{elem_ty.fmt(mod)},
|
|
),
|
|
};
|
|
|
|
var wanted_ptr_data: InternPool.Key.PtrType = .{
|
|
.child = elem_ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = alignment,
|
|
.is_const = ptr_const,
|
|
},
|
|
};
|
|
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) {
|
|
.Pointer => ptr_ty.ptrInfo(mod),
|
|
else => {
|
|
const wanted_ptr_ty = try mod.ptrType(wanted_ptr_data);
|
|
_ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
|
|
unreachable;
|
|
},
|
|
};
|
|
|
|
wanted_ptr_data.flags.address_space = ptr_data.flags.address_space;
|
|
wanted_ptr_data.flags.is_allowzero = ptr_data.flags.is_allowzero;
|
|
wanted_ptr_data.flags.is_volatile = ptr_data.flags.is_volatile;
|
|
|
|
const wanted_ptr_ty = try mod.ptrType(wanted_ptr_data);
|
|
const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
|
|
|
|
return casted_ptr;
|
|
}
|
|
|
|
fn checkPtrIsNotComptimeMutable(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ptr_val: Value,
|
|
ptr_src: LazySrcLoc,
|
|
operand_src: LazySrcLoc,
|
|
) CompileError!void {
|
|
_ = operand_src;
|
|
if (ptr_val.isComptimeMutablePtr(sema.mod)) {
|
|
return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{});
|
|
}
|
|
}
|
|
|
|
fn checkComptimeVarStore(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl,
|
|
) CompileError!void {
|
|
if (@intFromEnum(decl_ref_mut.runtime_index) < @intFromEnum(block.runtime_index)) {
|
|
if (block.runtime_cond) |cond_src| {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "store to comptime variable depends on runtime condition", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, cond_src, msg, "runtime condition here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (block.runtime_loop) |loop_src| {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot store to comptime variable in non-inline loop", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, loop_src, msg, "non-inline loop here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
unreachable;
|
|
}
|
|
}
|
|
|
|
fn checkIntOrVector(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
operand: Air.Inst.Ref,
|
|
operand_src: LazySrcLoc,
|
|
) CompileError!Type {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
switch (try operand_ty.zigTypeTagOrPoison(mod)) {
|
|
.Int => return operand_ty,
|
|
.Vector => {
|
|
const elem_ty = operand_ty.childType(mod);
|
|
switch (try elem_ty.zigTypeTagOrPoison(mod)) {
|
|
.Int => return elem_ty,
|
|
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
|
|
elem_ty.fmt(mod),
|
|
}),
|
|
}
|
|
},
|
|
else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
}),
|
|
}
|
|
}
|
|
|
|
fn checkIntOrVectorAllowComptime(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
operand_ty: Type,
|
|
operand_src: LazySrcLoc,
|
|
) CompileError!Type {
|
|
const mod = sema.mod;
|
|
switch (try operand_ty.zigTypeTagOrPoison(mod)) {
|
|
.Int, .ComptimeInt => return operand_ty,
|
|
.Vector => {
|
|
const elem_ty = operand_ty.childType(mod);
|
|
switch (try elem_ty.zigTypeTagOrPoison(mod)) {
|
|
.Int, .ComptimeInt => return elem_ty,
|
|
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
|
|
elem_ty.fmt(mod),
|
|
}),
|
|
}
|
|
},
|
|
else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{
|
|
operand_ty.fmt(mod),
|
|
}),
|
|
}
|
|
}
|
|
|
|
fn checkErrorSetType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ErrorSet => return,
|
|
else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(mod)}),
|
|
}
|
|
}
|
|
|
|
const SimdBinOp = struct {
|
|
len: ?usize,
|
|
/// Coerced to `result_ty`.
|
|
lhs: Air.Inst.Ref,
|
|
/// Coerced to `result_ty`.
|
|
rhs: Air.Inst.Ref,
|
|
lhs_val: ?Value,
|
|
rhs_val: ?Value,
|
|
/// Only different than `scalar_ty` when it is a vector operation.
|
|
result_ty: Type,
|
|
scalar_ty: Type,
|
|
};
|
|
|
|
fn checkSimdBinOp(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
uncasted_lhs: Air.Inst.Ref,
|
|
uncasted_rhs: Air.Inst.Ref,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
) CompileError!SimdBinOp {
|
|
const mod = sema.mod;
|
|
const lhs_ty = sema.typeOf(uncasted_lhs);
|
|
const rhs_ty = sema.typeOf(uncasted_rhs);
|
|
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen(mod) else null;
|
|
const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{
|
|
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
|
|
});
|
|
const lhs = try sema.coerce(block, result_ty, uncasted_lhs, lhs_src);
|
|
const rhs = try sema.coerce(block, result_ty, uncasted_rhs, rhs_src);
|
|
|
|
return SimdBinOp{
|
|
.len = vec_len,
|
|
.lhs = lhs,
|
|
.rhs = rhs,
|
|
.lhs_val = try sema.resolveMaybeUndefVal(lhs),
|
|
.rhs_val = try sema.resolveMaybeUndefVal(rhs),
|
|
.result_ty = result_ty,
|
|
.scalar_ty = result_ty.scalarType(mod),
|
|
};
|
|
}
|
|
|
|
fn checkVectorizableBinaryOperands(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
lhs_ty: Type,
|
|
rhs_ty: Type,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
|
|
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
|
|
if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return;
|
|
|
|
const lhs_is_vector = switch (lhs_zig_ty_tag) {
|
|
.Vector, .Array => true,
|
|
else => false,
|
|
};
|
|
const rhs_is_vector = switch (rhs_zig_ty_tag) {
|
|
.Vector, .Array => true,
|
|
else => false,
|
|
};
|
|
|
|
if (lhs_is_vector and rhs_is_vector) {
|
|
const lhs_len = lhs_ty.arrayLen(mod);
|
|
const rhs_len = rhs_ty.arrayLen(mod);
|
|
if (lhs_len != rhs_len) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "vector length mismatch", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, lhs_src, msg, "length {d} here", .{lhs_len});
|
|
try sema.errNote(block, rhs_src, msg, "length {d} here", .{rhs_len});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
} else {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: '{}' and '{}'", .{
|
|
lhs_ty.fmt(mod), rhs_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
if (lhs_is_vector) {
|
|
try sema.errNote(block, lhs_src, msg, "vector here", .{});
|
|
try sema.errNote(block, rhs_src, msg, "scalar here", .{});
|
|
} else {
|
|
try sema.errNote(block, lhs_src, msg, "scalar here", .{});
|
|
try sema.errNote(block, rhs_src, msg, "vector here", .{});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
fn maybeOptionsSrc(sema: *Sema, block: *Block, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc {
|
|
if (base_src == .unneeded) return .unneeded;
|
|
const mod = sema.mod;
|
|
return mod.optionsSrc(mod.declPtr(block.src_decl), base_src, wanted);
|
|
}
|
|
|
|
fn resolveExportOptions(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
) CompileError!Module.Export.Options {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const export_options_ty = try sema.getBuiltinType("ExportOptions");
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
const options = try sema.coerce(block, export_options_ty, air_ref, src);
|
|
|
|
const name_src = sema.maybeOptionsSrc(block, src, "name");
|
|
const linkage_src = sema.maybeOptionsSrc(block, src, "linkage");
|
|
const section_src = sema.maybeOptionsSrc(block, src, "section");
|
|
const visibility_src = sema.maybeOptionsSrc(block, src, "visibility");
|
|
|
|
const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src);
|
|
const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known");
|
|
const name_ty = Type.slice_const_u8;
|
|
const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod);
|
|
|
|
const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src);
|
|
const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known");
|
|
const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val);
|
|
|
|
const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "section"), section_src);
|
|
const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known");
|
|
const section_ty = Type.slice_const_u8;
|
|
const section = if (section_opt_val.optionalValue(mod)) |section_val|
|
|
try section_val.toAllocatedBytes(section_ty, sema.arena, mod)
|
|
else
|
|
null;
|
|
|
|
const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "visibility"), visibility_src);
|
|
const visibility_val = try sema.resolveConstValue(block, visibility_src, visibility_operand, "visibility of exported value must be comptime-known");
|
|
const visibility = mod.toEnum(std.builtin.SymbolVisibility, visibility_val);
|
|
|
|
if (name.len < 1) {
|
|
return sema.fail(block, name_src, "exported symbol name cannot be empty", .{});
|
|
}
|
|
|
|
if (visibility != .default and linkage == .Internal) {
|
|
return sema.fail(block, visibility_src, "symbol '{s}' exported with internal linkage has non-default visibility {s}", .{
|
|
name, @tagName(visibility),
|
|
});
|
|
}
|
|
|
|
return .{
|
|
.name = try ip.getOrPutString(gpa, name),
|
|
.linkage = linkage,
|
|
.section = try ip.getOrPutStringOpt(gpa, section),
|
|
.visibility = visibility,
|
|
};
|
|
}
|
|
|
|
fn resolveBuiltinEnum(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
comptime name: []const u8,
|
|
reason: []const u8,
|
|
) CompileError!@field(std.builtin, name) {
|
|
const mod = sema.mod;
|
|
const ty = try sema.getBuiltinType(name);
|
|
const air_ref = try sema.resolveInst(zir_ref);
|
|
const coerced = try sema.coerce(block, ty, air_ref, src);
|
|
const val = try sema.resolveConstValue(block, src, coerced, reason);
|
|
return mod.toEnum(@field(std.builtin, name), val);
|
|
}
|
|
|
|
fn resolveAtomicOrder(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
reason: []const u8,
|
|
) CompileError!std.builtin.AtomicOrder {
|
|
return sema.resolveBuiltinEnum(block, src, zir_ref, "AtomicOrder", reason);
|
|
}
|
|
|
|
fn resolveAtomicRmwOp(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
) CompileError!std.builtin.AtomicRmwOp {
|
|
return sema.resolveBuiltinEnum(block, src, zir_ref, "AtomicRmwOp", "@atomicRmW operation must be comptime-known");
|
|
}
|
|
|
|
fn zirCmpxchg(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data;
|
|
const air_tag: Air.Inst.Tag = switch (extended.small) {
|
|
0 => .cmpxchg_weak,
|
|
1 => .cmpxchg_strong,
|
|
else => unreachable,
|
|
};
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
// zig fmt: off
|
|
const elem_ty_src : LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
const expected_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = extra.node };
|
|
const new_value_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = extra.node };
|
|
const success_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg4 = extra.node };
|
|
const failure_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg5 = extra.node };
|
|
// zig fmt: on
|
|
const expected_value = try sema.resolveInst(extra.expected_value);
|
|
const elem_ty = sema.typeOf(expected_value);
|
|
if (elem_ty.zigTypeTag(mod) == .Float) {
|
|
return sema.fail(
|
|
block,
|
|
elem_ty_src,
|
|
"expected bool, integer, enum, or pointer type; found '{}'",
|
|
.{elem_ty.fmt(mod)},
|
|
);
|
|
}
|
|
const uncasted_ptr = try sema.resolveInst(extra.ptr);
|
|
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
|
|
const new_value = try sema.coerce(block, elem_ty, try sema.resolveInst(extra.new_value), new_value_src);
|
|
const success_order = try sema.resolveAtomicOrder(block, success_order_src, extra.success_order, "atomic order of cmpxchg success must be comptime-known");
|
|
const failure_order = try sema.resolveAtomicOrder(block, failure_order_src, extra.failure_order, "atomic order of cmpxchg failure must be comptime-known");
|
|
|
|
if (@intFromEnum(success_order) < @intFromEnum(std.builtin.AtomicOrder.Monotonic)) {
|
|
return sema.fail(block, success_order_src, "success atomic ordering must be Monotonic or stricter", .{});
|
|
}
|
|
if (@intFromEnum(failure_order) < @intFromEnum(std.builtin.AtomicOrder.Monotonic)) {
|
|
return sema.fail(block, failure_order_src, "failure atomic ordering must be Monotonic or stricter", .{});
|
|
}
|
|
if (@intFromEnum(failure_order) > @intFromEnum(success_order)) {
|
|
return sema.fail(block, failure_order_src, "failure atomic ordering must be no stricter than success", .{});
|
|
}
|
|
if (failure_order == .Release or failure_order == .AcqRel) {
|
|
return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{});
|
|
}
|
|
|
|
const result_ty = try mod.optionalType(elem_ty.toIntern());
|
|
|
|
// special case zero bit types
|
|
if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) {
|
|
return Air.internedToRef((try mod.intern(.{ .opt = .{
|
|
.ty = result_ty.toIntern(),
|
|
.val = .none,
|
|
} })));
|
|
}
|
|
|
|
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
|
|
if (try sema.resolveMaybeUndefVal(expected_value)) |expected_val| {
|
|
if (try sema.resolveMaybeUndefVal(new_value)) |new_val| {
|
|
if (expected_val.isUndef(mod) or new_val.isUndef(mod)) {
|
|
// TODO: this should probably cause the memory stored at the pointer
|
|
// to become undef as well
|
|
return mod.undefRef(result_ty);
|
|
}
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
|
|
const result_val = try mod.intern(.{ .opt = .{
|
|
.ty = result_ty.toIntern(),
|
|
.val = if (stored_val.eql(expected_val, elem_ty, mod)) blk: {
|
|
try sema.storePtr(block, src, ptr, new_value);
|
|
break :blk .none;
|
|
} else stored_val.toIntern(),
|
|
} });
|
|
return Air.internedToRef(result_val);
|
|
} else break :rs new_value_src;
|
|
} else break :rs expected_src;
|
|
} else ptr_src;
|
|
|
|
const flags: u32 = @as(u32, @intFromEnum(success_order)) |
|
|
(@as(u32, @intFromEnum(failure_order)) << 3);
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addInst(.{
|
|
.tag = air_tag,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(result_ty.toIntern()),
|
|
.payload = try sema.addExtra(Air.Cmpxchg{
|
|
.ptr = ptr,
|
|
.expected_value = expected_value,
|
|
.new_value = new_value,
|
|
.flags = flags,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const scalar_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@splat");
|
|
|
|
if (!dest_ty.isVector(mod)) return sema.fail(block, src, "expected vector type, found '{}'", .{dest_ty.fmt(mod)});
|
|
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const scalar_ty = dest_ty.childType(mod);
|
|
const scalar = try sema.coerce(block, scalar_ty, operand, scalar_src);
|
|
if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| {
|
|
if (scalar_val.isUndef(mod)) return mod.undefRef(dest_ty);
|
|
return Air.internedToRef((try sema.splat(dest_ty, scalar_val)).toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), scalar_src);
|
|
return block.addTyOp(.splat, dest_ty, scalar);
|
|
}
|
|
|
|
fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const op_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const operation = try sema.resolveBuiltinEnum(block, op_src, extra.lhs, "ReduceOp", "@reduce operation must be comptime-known");
|
|
const operand = try sema.resolveInst(extra.rhs);
|
|
const operand_ty = sema.typeOf(operand);
|
|
const mod = sema.mod;
|
|
|
|
if (operand_ty.zigTypeTag(mod) != .Vector) {
|
|
return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)});
|
|
}
|
|
|
|
const scalar_ty = operand_ty.childType(mod);
|
|
|
|
// Type-check depending on operation.
|
|
switch (operation) {
|
|
.And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) {
|
|
.Int, .Bool => {},
|
|
else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{
|
|
@tagName(operation), operand_ty.fmt(mod),
|
|
}),
|
|
},
|
|
.Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) {
|
|
.Int, .Float => {},
|
|
else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{
|
|
@tagName(operation), operand_ty.fmt(mod),
|
|
}),
|
|
},
|
|
}
|
|
|
|
const vec_len = operand_ty.vectorLen(mod);
|
|
if (vec_len == 0) {
|
|
// TODO re-evaluate if we should introduce a "neutral value" for some operations,
|
|
// e.g. zero for add and one for mul.
|
|
return sema.fail(block, operand_src, "@reduce operation requires a vector with nonzero length", .{});
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
|
|
if (operand_val.isUndef(mod)) return mod.undefRef(scalar_ty);
|
|
|
|
var accum: Value = try operand_val.elemValue(mod, 0);
|
|
var i: u32 = 1;
|
|
while (i < vec_len) : (i += 1) {
|
|
const elem_val = try operand_val.elemValue(mod, i);
|
|
switch (operation) {
|
|
.And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod),
|
|
.Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod),
|
|
.Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, mod),
|
|
.Min => accum = accum.numberMin(elem_val, mod),
|
|
.Max => accum = accum.numberMax(elem_val, mod),
|
|
.Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty),
|
|
.Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, mod),
|
|
}
|
|
}
|
|
return Air.internedToRef(accum.toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
|
|
return block.addInst(.{
|
|
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = operand,
|
|
.operation = operation,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data;
|
|
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const mask_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
|
|
|
|
const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type);
|
|
try sema.checkVectorElemType(block, elem_ty_src, elem_ty);
|
|
var a = try sema.resolveInst(extra.a);
|
|
var b = try sema.resolveInst(extra.b);
|
|
var mask = try sema.resolveInst(extra.mask);
|
|
var mask_ty = sema.typeOf(mask);
|
|
|
|
const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) {
|
|
.Array, .Vector => sema.typeOf(mask).arrayLen(mod),
|
|
else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}),
|
|
};
|
|
mask_ty = try mod.vectorType(.{
|
|
.len = @as(u32, @intCast(mask_len)),
|
|
.child = .i32_type,
|
|
});
|
|
mask = try sema.coerce(block, mask_ty, mask, mask_src);
|
|
const mask_val = try sema.resolveConstMaybeUndefVal(block, mask_src, mask, "shuffle mask must be comptime-known");
|
|
return sema.analyzeShuffle(block, inst_data.src_node, elem_ty, a, b, mask_val, @as(u32, @intCast(mask_len)));
|
|
}
|
|
|
|
fn analyzeShuffle(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src_node: i32,
|
|
elem_ty: Type,
|
|
a_arg: Air.Inst.Ref,
|
|
b_arg: Air.Inst.Ref,
|
|
mask: Value,
|
|
mask_len: u32,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const a_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = src_node };
|
|
const b_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = src_node };
|
|
const mask_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = src_node };
|
|
var a = a_arg;
|
|
var b = b_arg;
|
|
|
|
const res_ty = try mod.vectorType(.{
|
|
.len = mask_len,
|
|
.child = elem_ty.toIntern(),
|
|
});
|
|
|
|
var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) {
|
|
.Array, .Vector => sema.typeOf(a).arrayLen(mod),
|
|
.Undefined => null,
|
|
else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{
|
|
elem_ty.fmt(sema.mod),
|
|
sema.typeOf(a).fmt(sema.mod),
|
|
}),
|
|
};
|
|
var maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) {
|
|
.Array, .Vector => sema.typeOf(b).arrayLen(mod),
|
|
.Undefined => null,
|
|
else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{
|
|
elem_ty.fmt(sema.mod),
|
|
sema.typeOf(b).fmt(sema.mod),
|
|
}),
|
|
};
|
|
if (maybe_a_len == null and maybe_b_len == null) {
|
|
return mod.undefRef(res_ty);
|
|
}
|
|
const a_len = @as(u32, @intCast(maybe_a_len orelse maybe_b_len.?));
|
|
const b_len = @as(u32, @intCast(maybe_b_len orelse a_len));
|
|
|
|
const a_ty = try mod.vectorType(.{
|
|
.len = a_len,
|
|
.child = elem_ty.toIntern(),
|
|
});
|
|
const b_ty = try mod.vectorType(.{
|
|
.len = b_len,
|
|
.child = elem_ty.toIntern(),
|
|
});
|
|
|
|
if (maybe_a_len == null) a = try mod.undefRef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src);
|
|
if (maybe_b_len == null) b = try mod.undefRef(b_ty) else b = try sema.coerce(block, b_ty, b, b_src);
|
|
|
|
const operand_info = [2]std.meta.Tuple(&.{ u64, LazySrcLoc, Type }){
|
|
.{ a_len, a_src, a_ty },
|
|
.{ b_len, b_src, b_ty },
|
|
};
|
|
|
|
for (0..@as(usize, @intCast(mask_len))) |i| {
|
|
const elem = try mask.elemValue(sema.mod, i);
|
|
if (elem.isUndef(mod)) continue;
|
|
const int = elem.toSignedInt(mod);
|
|
var unsigned: u32 = undefined;
|
|
var chosen: u32 = undefined;
|
|
if (int >= 0) {
|
|
unsigned = @as(u32, @intCast(int));
|
|
chosen = 0;
|
|
} else {
|
|
unsigned = @as(u32, @intCast(~int));
|
|
chosen = 1;
|
|
}
|
|
if (unsigned >= operand_info[chosen][0]) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, mask_src, "mask index '{d}' has out-of-bounds selection", .{i});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.errNote(block, operand_info[chosen][1], msg, "selected index '{d}' out of bounds of '{}'", .{
|
|
unsigned,
|
|
operand_info[chosen][2].fmt(sema.mod),
|
|
});
|
|
|
|
if (chosen == 0) {
|
|
try sema.errNote(block, b_src, msg, "selections from the second vector are specified with negative numbers", .{});
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefVal(a)) |a_val| {
|
|
if (try sema.resolveMaybeUndefVal(b)) |b_val| {
|
|
const values = try sema.arena.alloc(InternPool.Index, mask_len);
|
|
for (values, 0..) |*value, i| {
|
|
const mask_elem_val = try mask.elemValue(sema.mod, i);
|
|
if (mask_elem_val.isUndef(mod)) {
|
|
value.* = try mod.intern(.{ .undef = elem_ty.toIntern() });
|
|
continue;
|
|
}
|
|
const int = mask_elem_val.toSignedInt(mod);
|
|
const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int));
|
|
values[i] = try (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).intern(elem_ty, mod);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = res_ty.toIntern(),
|
|
.storage = .{ .elems = values },
|
|
} })));
|
|
}
|
|
}
|
|
|
|
// All static analysis passed, and not comptime.
|
|
// For runtime codegen, vectors a and b must be the same length. Here we
|
|
// recursively @shuffle the smaller vector to append undefined elements
|
|
// to it up to the length of the longer vector. This recursion terminates
|
|
// in 1 call because these calls to analyzeShuffle guarantee a_len == b_len.
|
|
if (a_len != b_len) {
|
|
const min_len = @min(a_len, b_len);
|
|
const max_src = if (a_len > b_len) a_src else b_src;
|
|
const max_len = try sema.usizeCast(block, max_src, @max(a_len, b_len));
|
|
|
|
const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len);
|
|
for (@as(usize, @intCast(0))..@as(usize, @intCast(min_len))) |i| {
|
|
expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern();
|
|
}
|
|
for (@as(usize, @intCast(min_len))..@as(usize, @intCast(max_len))) |i| {
|
|
expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern();
|
|
}
|
|
const expand_mask = try mod.intern(.{ .aggregate = .{
|
|
.ty = (try mod.vectorType(.{ .len = @as(u32, @intCast(max_len)), .child = .comptime_int_type })).toIntern(),
|
|
.storage = .{ .elems = expand_mask_values },
|
|
} });
|
|
|
|
if (a_len < b_len) {
|
|
const undef = try mod.undefRef(a_ty);
|
|
a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @as(u32, @intCast(max_len)));
|
|
} else {
|
|
const undef = try mod.undefRef(b_ty);
|
|
b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @as(u32, @intCast(max_len)));
|
|
}
|
|
}
|
|
|
|
return block.addInst(.{
|
|
.tag = .shuffle,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(res_ty.toIntern()),
|
|
.payload = try block.sema.addExtra(Air.Shuffle{
|
|
.a = a,
|
|
.b = b,
|
|
.mask = mask.toIntern(),
|
|
.mask_len = mask_len,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data;
|
|
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const pred_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
const a_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = extra.node };
|
|
const b_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = extra.node };
|
|
|
|
const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type);
|
|
try sema.checkVectorElemType(block, elem_ty_src, elem_ty);
|
|
const pred_uncoerced = try sema.resolveInst(extra.pred);
|
|
const pred_ty = sema.typeOf(pred_uncoerced);
|
|
|
|
const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) {
|
|
.Vector, .Array => pred_ty.arrayLen(mod),
|
|
else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(mod)}),
|
|
};
|
|
const vec_len = @as(u32, @intCast(try sema.usizeCast(block, pred_src, vec_len_u64)));
|
|
|
|
const bool_vec_ty = try mod.vectorType(.{
|
|
.len = vec_len,
|
|
.child = .bool_type,
|
|
});
|
|
const pred = try sema.coerce(block, bool_vec_ty, pred_uncoerced, pred_src);
|
|
|
|
const vec_ty = try mod.vectorType(.{
|
|
.len = vec_len,
|
|
.child = elem_ty.toIntern(),
|
|
});
|
|
const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src);
|
|
const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src);
|
|
|
|
const maybe_pred = try sema.resolveMaybeUndefVal(pred);
|
|
const maybe_a = try sema.resolveMaybeUndefVal(a);
|
|
const maybe_b = try sema.resolveMaybeUndefVal(b);
|
|
|
|
const runtime_src = if (maybe_pred) |pred_val| rs: {
|
|
if (pred_val.isUndef(mod)) return mod.undefRef(vec_ty);
|
|
|
|
if (maybe_a) |a_val| {
|
|
if (a_val.isUndef(mod)) return mod.undefRef(vec_ty);
|
|
|
|
if (maybe_b) |b_val| {
|
|
if (b_val.isUndef(mod)) return mod.undefRef(vec_ty);
|
|
|
|
const elems = try sema.gpa.alloc(InternPool.Index, vec_len);
|
|
for (elems, 0..) |*elem, i| {
|
|
const pred_elem_val = try pred_val.elemValue(mod, i);
|
|
const should_choose_a = pred_elem_val.toBool();
|
|
elem.* = try (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).intern(elem_ty, mod);
|
|
}
|
|
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = vec_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
} else {
|
|
break :rs b_src;
|
|
}
|
|
} else {
|
|
if (maybe_b) |b_val| {
|
|
if (b_val.isUndef(mod)) return mod.undefRef(vec_ty);
|
|
}
|
|
break :rs a_src;
|
|
}
|
|
} else rs: {
|
|
if (maybe_a) |a_val| {
|
|
if (a_val.isUndef(mod)) return mod.undefRef(vec_ty);
|
|
}
|
|
if (maybe_b) |b_val| {
|
|
if (b_val.isUndef(mod)) return mod.undefRef(vec_ty);
|
|
}
|
|
break :rs pred_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addInst(.{
|
|
.tag = .select,
|
|
.data = .{ .pl_op = .{
|
|
.operand = pred,
|
|
.payload = try block.sema.addExtra(Air.Bin{
|
|
.lhs = a,
|
|
.rhs = b,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.AtomicLoad, inst_data.payload_index).data;
|
|
// zig fmt: off
|
|
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
// zig fmt: on
|
|
const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type);
|
|
const uncasted_ptr = try sema.resolveInst(extra.ptr);
|
|
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, true);
|
|
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, "atomic order of @atomicLoad must be comptime-known");
|
|
|
|
switch (order) {
|
|
.Release, .AcqRel => {
|
|
return sema.fail(
|
|
block,
|
|
order_src,
|
|
"@atomicLoad atomic ordering must not be Release or AcqRel",
|
|
.{},
|
|
);
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
if (try sema.typeHasOnePossibleValue(elem_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
|
|
if (try sema.pointerDeref(block, ptr_src, ptr_val, sema.typeOf(ptr))) |elem_val| {
|
|
return Air.internedToRef(elem_val.toIntern());
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src);
|
|
return block.addInst(.{
|
|
.tag = .atomic_load,
|
|
.data = .{ .atomic_load = .{
|
|
.ptr = ptr,
|
|
.order = order,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
// zig fmt: off
|
|
const elem_ty_src : LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const op_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
|
|
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node };
|
|
// zig fmt: on
|
|
const operand = try sema.resolveInst(extra.operand);
|
|
const elem_ty = sema.typeOf(operand);
|
|
const uncasted_ptr = try sema.resolveInst(extra.ptr);
|
|
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
|
|
const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation);
|
|
|
|
switch (elem_ty.zigTypeTag(mod)) {
|
|
.Enum => if (op != .Xchg) {
|
|
return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{});
|
|
},
|
|
.Bool => if (op != .Xchg) {
|
|
return sema.fail(block, op_src, "@atomicRmw with bool only allowed with .Xchg", .{});
|
|
},
|
|
.Float => switch (op) {
|
|
.Xchg, .Add, .Sub, .Max, .Min => {},
|
|
else => return sema.fail(block, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, .Sub, .Max, and .Min", .{}),
|
|
},
|
|
else => {},
|
|
}
|
|
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, "atomic order of @atomicRmW must be comptime-known");
|
|
|
|
if (order == .Unordered) {
|
|
return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be Unordered", .{});
|
|
}
|
|
|
|
// special case zero bit types
|
|
if (try sema.typeHasOnePossibleValue(elem_ty)) |val| {
|
|
return Air.internedToRef(val.toIntern());
|
|
}
|
|
|
|
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
|
|
const maybe_operand_val = try sema.resolveMaybeUndefVal(operand);
|
|
const operand_val = maybe_operand_val orelse {
|
|
try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src);
|
|
break :rs operand_src;
|
|
};
|
|
if (ptr_val.isComptimeMutablePtr(mod)) {
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
|
|
const new_val = switch (op) {
|
|
// zig fmt: off
|
|
.Xchg => operand_val,
|
|
.Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty),
|
|
.Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty),
|
|
.And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, mod),
|
|
.Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, mod),
|
|
.Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, mod),
|
|
.Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, mod),
|
|
.Max => stored_val.numberMax (operand_val, mod),
|
|
.Min => stored_val.numberMin (operand_val, mod),
|
|
// zig fmt: on
|
|
};
|
|
try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty);
|
|
return Air.internedToRef(stored_val.toIntern());
|
|
} else break :rs ptr_src;
|
|
} else ptr_src;
|
|
|
|
const flags: u32 = @as(u32, @intFromEnum(order)) | (@as(u32, @intFromEnum(op)) << 3);
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addInst(.{
|
|
.tag = .atomic_rmw,
|
|
.data = .{ .pl_op = .{
|
|
.operand = ptr,
|
|
.payload = try sema.addExtra(Air.AtomicRmw{
|
|
.operand = operand,
|
|
.flags = flags,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirAtomicStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
// zig fmt: off
|
|
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
|
|
// zig fmt: on
|
|
const operand = try sema.resolveInst(extra.operand);
|
|
const elem_ty = sema.typeOf(operand);
|
|
const uncasted_ptr = try sema.resolveInst(extra.ptr);
|
|
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
|
|
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, "atomic order of @atomicStore must be comptime-known");
|
|
|
|
const air_tag: Air.Inst.Tag = switch (order) {
|
|
.Acquire, .AcqRel => {
|
|
return sema.fail(
|
|
block,
|
|
order_src,
|
|
"@atomicStore atomic ordering must not be Acquire or AcqRel",
|
|
.{},
|
|
);
|
|
},
|
|
.Unordered => .atomic_store_unordered,
|
|
.Monotonic => .atomic_store_monotonic,
|
|
.Release => .atomic_store_release,
|
|
.SeqCst => .atomic_store_seq_cst,
|
|
};
|
|
|
|
return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag);
|
|
}
|
|
|
|
fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.MulAdd, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
|
|
const mulend1_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const mulend2_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
const addend_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
|
|
|
|
const addend = try sema.resolveInst(extra.addend);
|
|
const ty = sema.typeOf(addend);
|
|
const mulend1 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend1), mulend1_src);
|
|
const mulend2 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend2), mulend2_src);
|
|
|
|
const maybe_mulend1 = try sema.resolveMaybeUndefVal(mulend1);
|
|
const maybe_mulend2 = try sema.resolveMaybeUndefVal(mulend2);
|
|
const maybe_addend = try sema.resolveMaybeUndefVal(addend);
|
|
const mod = sema.mod;
|
|
|
|
switch (ty.scalarType(mod).zigTypeTag(mod)) {
|
|
.ComptimeFloat, .Float => {},
|
|
else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(sema.mod)}),
|
|
}
|
|
|
|
const runtime_src = if (maybe_mulend1) |mulend1_val| rs: {
|
|
if (maybe_mulend2) |mulend2_val| {
|
|
if (mulend2_val.isUndef(mod)) return mod.undefRef(ty);
|
|
|
|
if (maybe_addend) |addend_val| {
|
|
if (addend_val.isUndef(mod)) return mod.undefRef(ty);
|
|
const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else {
|
|
break :rs addend_src;
|
|
}
|
|
} else {
|
|
if (maybe_addend) |addend_val| {
|
|
if (addend_val.isUndef(mod)) return mod.undefRef(ty);
|
|
}
|
|
break :rs mulend2_src;
|
|
}
|
|
} else rs: {
|
|
if (maybe_mulend2) |mulend2_val| {
|
|
if (mulend2_val.isUndef(mod)) return mod.undefRef(ty);
|
|
}
|
|
if (maybe_addend) |addend_val| {
|
|
if (addend_val.isUndef(mod)) return mod.undefRef(ty);
|
|
}
|
|
break :rs mulend1_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addInst(.{
|
|
.tag = .mul_add,
|
|
.data = .{ .pl_op = .{
|
|
.operand = addend,
|
|
.payload = try sema.addExtra(Air.Bin{
|
|
.lhs = mulend1,
|
|
.rhs = mulend2,
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const modifier_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const func_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const args_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
const call_src = inst_data.src();
|
|
|
|
const extra = sema.code.extraData(Zir.Inst.BuiltinCall, inst_data.payload_index).data;
|
|
var func = try sema.resolveInst(extra.callee);
|
|
|
|
const modifier_ty = try sema.getBuiltinType("CallModifier");
|
|
const air_ref = try sema.resolveInst(extra.modifier);
|
|
const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src);
|
|
const modifier_val = try sema.resolveConstValue(block, modifier_src, modifier_ref, "call modifier must be comptime-known");
|
|
var modifier = mod.toEnum(std.builtin.CallModifier, modifier_val);
|
|
switch (modifier) {
|
|
// These can be upgraded to comptime or nosuspend calls.
|
|
.auto, .never_tail, .no_async => {
|
|
if (block.is_comptime) {
|
|
if (modifier == .never_tail) {
|
|
return sema.fail(block, modifier_src, "unable to perform 'never_tail' call at compile-time", .{});
|
|
}
|
|
modifier = .compile_time;
|
|
} else if (extra.flags.is_nosuspend) {
|
|
modifier = .no_async;
|
|
}
|
|
},
|
|
// These can be upgraded to comptime. nosuspend bit can be safely ignored.
|
|
.always_inline, .compile_time => {
|
|
_ = (try sema.resolveDefinedValue(block, func_src, func)) orelse {
|
|
return sema.fail(block, func_src, "modifier '{s}' requires a comptime-known function", .{@tagName(modifier)});
|
|
};
|
|
|
|
if (block.is_comptime) {
|
|
modifier = .compile_time;
|
|
}
|
|
},
|
|
.always_tail => {
|
|
if (block.is_comptime) {
|
|
modifier = .compile_time;
|
|
}
|
|
},
|
|
.async_kw => {
|
|
if (extra.flags.is_nosuspend) {
|
|
return sema.fail(block, modifier_src, "modifier 'async_kw' cannot be used inside nosuspend block", .{});
|
|
}
|
|
if (block.is_comptime) {
|
|
return sema.fail(block, modifier_src, "modifier 'async_kw' cannot be used in combination with comptime function call", .{});
|
|
}
|
|
},
|
|
.never_inline => {
|
|
if (block.is_comptime) {
|
|
return sema.fail(block, modifier_src, "unable to perform 'never_inline' call at compile-time", .{});
|
|
}
|
|
},
|
|
}
|
|
|
|
const args = try sema.resolveInst(extra.args);
|
|
|
|
const args_ty = sema.typeOf(args);
|
|
if (!args_ty.isTuple(mod) and args_ty.toIntern() != .empty_struct_type) {
|
|
return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)});
|
|
}
|
|
|
|
var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod));
|
|
for (resolved_args, 0..) |*resolved, i| {
|
|
resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @as(u32, @intCast(i)), args_ty);
|
|
}
|
|
|
|
const callee_ty = sema.typeOf(func);
|
|
const func_ty = try sema.checkCallArgumentCount(block, func, func_src, callee_ty, resolved_args.len, false);
|
|
const ensure_result_used = extra.flags.ensure_result_used;
|
|
return sema.analyzeCall(
|
|
block,
|
|
func,
|
|
func_ty,
|
|
func_src,
|
|
call_src,
|
|
modifier,
|
|
ensure_result_used,
|
|
.{ .call_builtin = .{
|
|
.call_node_offset = inst_data.src_node,
|
|
.args = resolved_args,
|
|
} },
|
|
null,
|
|
.@"@call",
|
|
);
|
|
}
|
|
|
|
fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.FieldParentPtr, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
|
|
|
const parent_ty = try sema.resolveType(block, ty_src, extra.parent_type);
|
|
const field_name = try sema.resolveConstStringIntern(block, name_src, extra.field_name, "field name must be comptime-known");
|
|
const field_ptr = try sema.resolveInst(extra.field_ptr);
|
|
const field_ptr_ty = sema.typeOf(field_ptr);
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
if (parent_ty.zigTypeTag(mod) != .Struct and parent_ty.zigTypeTag(mod) != .Union) {
|
|
return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)});
|
|
}
|
|
try sema.resolveTypeLayout(parent_ty);
|
|
|
|
const field_index = switch (parent_ty.zigTypeTag(mod)) {
|
|
.Struct => blk: {
|
|
if (parent_ty.isTuple(mod)) {
|
|
if (ip.stringEqlSlice(field_name, "len")) {
|
|
return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{});
|
|
}
|
|
break :blk try sema.tupleFieldIndex(block, parent_ty, field_name, name_src);
|
|
} else {
|
|
break :blk try sema.structFieldIndex(block, parent_ty, field_name, name_src);
|
|
}
|
|
},
|
|
.Union => try sema.unionFieldIndex(block, parent_ty, field_name, name_src),
|
|
else => unreachable,
|
|
};
|
|
|
|
if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index, mod)) {
|
|
return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{});
|
|
}
|
|
|
|
try sema.checkPtrOperand(block, ptr_src, field_ptr_ty);
|
|
const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod);
|
|
|
|
var ptr_ty_data: InternPool.Key.PtrType = .{
|
|
.child = parent_ty.structFieldType(field_index, mod).toIntern(),
|
|
.flags = .{
|
|
.address_space = field_ptr_ty_info.flags.address_space,
|
|
.is_const = field_ptr_ty_info.flags.is_const,
|
|
},
|
|
};
|
|
|
|
if (parent_ty.containerLayout(mod) == .Packed) {
|
|
return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{});
|
|
} else {
|
|
ptr_ty_data.flags.alignment = blk: {
|
|
if (mod.typeToStruct(parent_ty)) |struct_obj| {
|
|
break :blk struct_obj.fields.values()[field_index].abi_align;
|
|
} else if (mod.typeToUnion(parent_ty)) |union_obj| {
|
|
break :blk union_obj.fields.values()[field_index].abi_align;
|
|
} else {
|
|
break :blk .none;
|
|
}
|
|
};
|
|
}
|
|
|
|
const actual_field_ptr_ty = try mod.ptrType(ptr_ty_data);
|
|
const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, ptr_src);
|
|
|
|
ptr_ty_data.child = parent_ty.toIntern();
|
|
const result_ptr = try mod.ptrType(ptr_ty_data);
|
|
|
|
if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| {
|
|
const field = switch (ip.indexToKey(field_ptr_val.toIntern())) {
|
|
.ptr => |ptr| switch (ptr.addr) {
|
|
.field => |field| field,
|
|
else => null,
|
|
},
|
|
else => null,
|
|
} orelse return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{});
|
|
|
|
if (field.index != field_index) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"field '{}' has index '{d}' but pointer value is index '{d}' of struct '{}'",
|
|
.{
|
|
field_name.fmt(ip),
|
|
field_index,
|
|
field.index,
|
|
parent_ty.fmt(sema.mod),
|
|
},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, parent_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
return Air.internedToRef(field.base);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, ptr_src);
|
|
try sema.queueFullTypeResolution(result_ptr);
|
|
return block.addInst(.{
|
|
.tag = .field_parent_ptr,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(result_ptr.toIntern()),
|
|
.payload = try block.sema.addExtra(Air.FieldParentPtr{
|
|
.field_ptr = casted_field_ptr,
|
|
.field_index = @as(u32, @intCast(field_index)),
|
|
}),
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirMinMax(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
comptime air_tag: Air.Inst.Tag,
|
|
) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const lhs = try sema.resolveInst(extra.lhs);
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
try sema.checkNumericType(block, lhs_src, sema.typeOf(lhs));
|
|
try sema.checkNumericType(block, rhs_src, sema.typeOf(rhs));
|
|
return sema.analyzeMinMax(block, src, air_tag, &.{ lhs, rhs }, &.{ lhs_src, rhs_src });
|
|
}
|
|
|
|
fn zirMinMaxMulti(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
comptime air_tag: Air.Inst.Tag,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand);
|
|
const src_node = extra.data.src_node;
|
|
const src = LazySrcLoc.nodeOffset(src_node);
|
|
const operands = sema.code.refSlice(extra.end, extended.small);
|
|
|
|
const air_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len);
|
|
const operand_srcs = try sema.arena.alloc(LazySrcLoc, operands.len);
|
|
|
|
for (operands, air_refs, operand_srcs, 0..) |zir_ref, *air_ref, *op_src, i| {
|
|
op_src.* = switch (i) {
|
|
0 => .{ .node_offset_builtin_call_arg0 = src_node },
|
|
1 => .{ .node_offset_builtin_call_arg1 = src_node },
|
|
2 => .{ .node_offset_builtin_call_arg2 = src_node },
|
|
3 => .{ .node_offset_builtin_call_arg3 = src_node },
|
|
4 => .{ .node_offset_builtin_call_arg4 = src_node },
|
|
5 => .{ .node_offset_builtin_call_arg5 = src_node },
|
|
else => src, // TODO: better source location
|
|
};
|
|
air_ref.* = try sema.resolveInst(zir_ref);
|
|
try sema.checkNumericType(block, op_src.*, sema.typeOf(air_ref.*));
|
|
}
|
|
|
|
return sema.analyzeMinMax(block, src, air_tag, air_refs, operand_srcs);
|
|
}
|
|
|
|
fn analyzeMinMax(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
comptime air_tag: Air.Inst.Tag,
|
|
operands: []const Air.Inst.Ref,
|
|
operand_srcs: []const LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
assert(operands.len == operand_srcs.len);
|
|
assert(operands.len > 0);
|
|
const mod = sema.mod;
|
|
|
|
if (operands.len == 1) return operands[0];
|
|
|
|
const opFunc = switch (air_tag) {
|
|
.min => Value.numberMin,
|
|
.max => Value.numberMax,
|
|
else => @compileError("unreachable"),
|
|
};
|
|
|
|
// The set of runtime-known operands. Set up in the loop below.
|
|
var runtime_known = try std.DynamicBitSet.initFull(sema.arena, operands.len);
|
|
// The current minmax value - initially this will always be comptime-known, then we'll add
|
|
// runtime values into the mix later.
|
|
var cur_minmax: ?Air.Inst.Ref = null;
|
|
var cur_minmax_src: LazySrcLoc = undefined; // defined if cur_minmax not null
|
|
// The current known scalar bounds of the value.
|
|
var bounds_status: enum {
|
|
unknown, // We've only seen undef comptime_ints so far, so do not know the bounds.
|
|
defined, // We've seen only integers, so the bounds are defined.
|
|
non_integral, // There are floats in the mix, so the bounds aren't defined.
|
|
} = .unknown;
|
|
var cur_min_scalar: Value = undefined;
|
|
var cur_max_scalar: Value = undefined;
|
|
|
|
// First, find all comptime-known arguments, and get their min/max
|
|
|
|
for (operands, operand_srcs, 0..) |operand, operand_src, operand_idx| {
|
|
// Resolve the value now to avoid redundant calls to `checkSimdBinOp` - we'll have to call
|
|
// it in the runtime path anyway since the result type may have been refined
|
|
const unresolved_uncoerced_val = try sema.resolveMaybeUndefVal(operand) orelse continue;
|
|
const uncoerced_val = try sema.resolveLazyValue(unresolved_uncoerced_val);
|
|
|
|
runtime_known.unset(operand_idx);
|
|
|
|
switch (bounds_status) {
|
|
.unknown, .defined => refine_bounds: {
|
|
const ty = sema.typeOf(operand);
|
|
if (!ty.scalarType(mod).isInt(mod) and !ty.scalarType(mod).eql(Type.comptime_int, mod)) {
|
|
bounds_status = .non_integral;
|
|
break :refine_bounds;
|
|
}
|
|
const scalar_bounds: ?[2]Value = bounds: {
|
|
if (!ty.isVector(mod)) break :bounds try uncoerced_val.intValueBounds(mod);
|
|
var cur_bounds: [2]Value = try Value.intValueBounds(try uncoerced_val.elemValue(mod, 0), mod) orelse break :bounds null;
|
|
const len = try sema.usizeCast(block, src, ty.vectorLen(mod));
|
|
for (1..len) |i| {
|
|
const elem = try uncoerced_val.elemValue(mod, i);
|
|
const elem_bounds = try elem.intValueBounds(mod) orelse break :bounds null;
|
|
cur_bounds = .{
|
|
Value.numberMin(elem_bounds[0], cur_bounds[0], mod),
|
|
Value.numberMax(elem_bounds[1], cur_bounds[1], mod),
|
|
};
|
|
}
|
|
break :bounds cur_bounds;
|
|
};
|
|
if (scalar_bounds) |bounds| {
|
|
if (bounds_status == .unknown) {
|
|
cur_min_scalar = bounds[0];
|
|
cur_max_scalar = bounds[1];
|
|
bounds_status = .defined;
|
|
} else {
|
|
cur_min_scalar = opFunc(cur_min_scalar, bounds[0], mod);
|
|
cur_max_scalar = opFunc(cur_max_scalar, bounds[1], mod);
|
|
}
|
|
}
|
|
},
|
|
.non_integral => {},
|
|
}
|
|
|
|
const cur = cur_minmax orelse {
|
|
cur_minmax = operand;
|
|
cur_minmax_src = operand_src;
|
|
continue;
|
|
};
|
|
|
|
const simd_op = try sema.checkSimdBinOp(block, src, cur, operand, cur_minmax_src, operand_src);
|
|
const cur_val = try sema.resolveLazyValue(simd_op.lhs_val.?); // cur_minmax is comptime-known
|
|
const operand_val = try sema.resolveLazyValue(simd_op.rhs_val.?); // we checked the operand was resolvable above
|
|
|
|
const vec_len = simd_op.len orelse {
|
|
const result_val = opFunc(cur_val, operand_val, mod);
|
|
cur_minmax = Air.internedToRef(result_val.toIntern());
|
|
continue;
|
|
};
|
|
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
for (elems, 0..) |*elem, i| {
|
|
const lhs_elem_val = try cur_val.elemValue(mod, i);
|
|
const rhs_elem_val = try operand_val.elemValue(mod, i);
|
|
const uncoerced_elem = opFunc(lhs_elem_val, rhs_elem_val, mod);
|
|
elem.* = (try mod.getCoerced(uncoerced_elem, simd_op.scalar_ty)).toIntern();
|
|
}
|
|
cur_minmax = Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = simd_op.result_ty.toIntern(),
|
|
.storage = .{ .elems = elems },
|
|
} })));
|
|
}
|
|
|
|
const opt_runtime_idx = runtime_known.findFirstSet();
|
|
|
|
if (cur_minmax) |ct_minmax_ref| refine: {
|
|
// Refine the comptime-known result type based on the bounds. This isn't strictly necessary
|
|
// in the runtime case, since we'll refine the type again later, but keeping things as small
|
|
// as possible will allow us to emit more optimal AIR (if all the runtime operands have
|
|
// smaller types than the non-refined comptime type).
|
|
|
|
const val = (try sema.resolveMaybeUndefVal(ct_minmax_ref)).?;
|
|
const orig_ty = sema.typeOf(ct_minmax_ref);
|
|
|
|
if (opt_runtime_idx == null and orig_ty.scalarType(mod).eql(Type.comptime_int, mod)) {
|
|
// If all arguments were `comptime_int`, and there are no runtime args, we'll preserve that type
|
|
break :refine;
|
|
}
|
|
|
|
// We can't refine float types
|
|
if (orig_ty.scalarType(mod).isAnyFloat()) break :refine;
|
|
|
|
assert(bounds_status == .defined); // there was a non-comptime-int integral comptime-known arg
|
|
|
|
const refined_scalar_ty = try mod.intFittingRange(cur_min_scalar, cur_max_scalar);
|
|
const refined_ty = if (orig_ty.isVector(mod)) try mod.vectorType(.{
|
|
.len = orig_ty.vectorLen(mod),
|
|
.child = refined_scalar_ty.toIntern(),
|
|
}) else refined_scalar_ty;
|
|
|
|
// Apply the refined type to the current value
|
|
if (std.debug.runtime_safety) {
|
|
assert(try sema.intFitsInType(val, refined_ty, null));
|
|
}
|
|
cur_minmax = try sema.coerceInMemory(val, refined_ty);
|
|
}
|
|
|
|
const runtime_idx = opt_runtime_idx orelse return cur_minmax.?;
|
|
const runtime_src = operand_srcs[runtime_idx];
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
// Now, iterate over runtime operands, emitting a min/max instruction for each. We'll refine the
|
|
// type again at the end, based on the comptime-known bound.
|
|
|
|
// If the comptime-known part is undef we can avoid emitting actual instructions later
|
|
const known_undef = if (cur_minmax) |operand| blk: {
|
|
const val = (try sema.resolveMaybeUndefVal(operand)).?;
|
|
break :blk val.isUndef(mod);
|
|
} else false;
|
|
|
|
if (cur_minmax == null) {
|
|
// No comptime operands - use the first operand as the starting value
|
|
assert(bounds_status == .unknown);
|
|
assert(runtime_idx == 0);
|
|
cur_minmax = operands[0];
|
|
cur_minmax_src = runtime_src;
|
|
runtime_known.unset(0); // don't look at this operand in the loop below
|
|
const scalar_ty = sema.typeOf(cur_minmax.?).scalarType(mod);
|
|
if (scalar_ty.isInt(mod)) {
|
|
cur_min_scalar = try scalar_ty.minInt(mod, scalar_ty);
|
|
cur_max_scalar = try scalar_ty.maxInt(mod, scalar_ty);
|
|
bounds_status = .defined;
|
|
} else {
|
|
bounds_status = .non_integral;
|
|
}
|
|
}
|
|
|
|
var it = runtime_known.iterator(.{});
|
|
while (it.next()) |idx| {
|
|
const lhs = cur_minmax.?;
|
|
const lhs_src = cur_minmax_src;
|
|
const rhs = operands[idx];
|
|
const rhs_src = operand_srcs[idx];
|
|
const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src);
|
|
if (known_undef) {
|
|
cur_minmax = try mod.undefRef(simd_op.result_ty);
|
|
} else {
|
|
cur_minmax = try block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs);
|
|
}
|
|
// Compute the bounds of this type
|
|
switch (bounds_status) {
|
|
.unknown, .defined => refine_bounds: {
|
|
const scalar_ty = sema.typeOf(rhs).scalarType(mod);
|
|
if (scalar_ty.isAnyFloat()) {
|
|
bounds_status = .non_integral;
|
|
break :refine_bounds;
|
|
}
|
|
const scalar_min = try scalar_ty.minInt(mod, scalar_ty);
|
|
const scalar_max = try scalar_ty.maxInt(mod, scalar_ty);
|
|
if (bounds_status == .unknown) {
|
|
cur_min_scalar = scalar_min;
|
|
cur_max_scalar = scalar_max;
|
|
bounds_status = .defined;
|
|
} else {
|
|
cur_min_scalar = opFunc(cur_min_scalar, scalar_min, mod);
|
|
cur_max_scalar = opFunc(cur_max_scalar, scalar_max, mod);
|
|
}
|
|
},
|
|
.non_integral => {},
|
|
}
|
|
}
|
|
|
|
// Finally, refine the type based on the known bounds.
|
|
const unrefined_ty = sema.typeOf(cur_minmax.?);
|
|
if (unrefined_ty.scalarType(mod).isAnyFloat()) {
|
|
// We can't refine floats, so we're done.
|
|
return cur_minmax.?;
|
|
}
|
|
assert(bounds_status == .defined); // there were integral runtime operands
|
|
const refined_scalar_ty = try mod.intFittingRange(cur_min_scalar, cur_max_scalar);
|
|
const refined_ty = if (unrefined_ty.isVector(mod)) try mod.vectorType(.{
|
|
.len = unrefined_ty.vectorLen(mod),
|
|
.child = refined_scalar_ty.toIntern(),
|
|
}) else refined_scalar_ty;
|
|
|
|
if (!refined_ty.eql(unrefined_ty, mod)) {
|
|
// We've reduced the type - cast the result down
|
|
return block.addTyOp(.intcast, refined_ty, cur_minmax.?);
|
|
}
|
|
|
|
return cur_minmax.?;
|
|
}
|
|
|
|
fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const info = sema.typeOf(ptr).ptrInfo(mod);
|
|
if (info.flags.size == .One) {
|
|
// Already an array pointer.
|
|
return ptr;
|
|
}
|
|
const new_ty = try mod.ptrType(.{
|
|
.child = (try mod.arrayType(.{
|
|
.len = len,
|
|
.sentinel = info.sentinel,
|
|
.child = info.child,
|
|
})).toIntern(),
|
|
.flags = .{
|
|
.alignment = info.flags.alignment,
|
|
.is_const = info.flags.is_const,
|
|
.is_volatile = info.flags.is_volatile,
|
|
.is_allowzero = info.flags.is_allowzero,
|
|
.address_space = info.flags.address_space,
|
|
},
|
|
});
|
|
if (info.flags.size == .Slice) {
|
|
return block.addTyOp(.slice_ptr, new_ty, ptr);
|
|
}
|
|
return block.addBitCast(new_ty, ptr);
|
|
}
|
|
|
|
fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const dest_ptr = try sema.resolveInst(extra.lhs);
|
|
const src_ptr = try sema.resolveInst(extra.rhs);
|
|
const dest_ty = sema.typeOf(dest_ptr);
|
|
const src_ty = sema.typeOf(src_ptr);
|
|
const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr);
|
|
const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr);
|
|
const target = sema.mod.getTarget();
|
|
const mod = sema.mod;
|
|
|
|
if (dest_ty.isConstPtr(mod)) {
|
|
return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{});
|
|
}
|
|
|
|
if (dest_len == .none and src_len == .none) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "unknown @memcpy length", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, dest_src, msg, "destination type '{}' provides no length", .{
|
|
dest_ty.fmt(sema.mod),
|
|
});
|
|
try sema.errNote(block, src_src, msg, "source type '{}' provides no length", .{
|
|
src_ty.fmt(sema.mod),
|
|
});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
var len_val: ?Value = null;
|
|
|
|
if (dest_len != .none and src_len != .none) check: {
|
|
// If we can check at compile-time, no need for runtime safety.
|
|
if (try sema.resolveDefinedValue(block, dest_src, dest_len)) |dest_len_val| {
|
|
len_val = dest_len_val;
|
|
if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
|
|
if (!(try sema.valuesEqual(dest_len_val, src_len_val, Type.usize))) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "non-matching @memcpy lengths", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, dest_src, msg, "length {} here", .{
|
|
dest_len_val.fmtValue(Type.usize, sema.mod),
|
|
});
|
|
try sema.errNote(block, src_src, msg, "length {} here", .{
|
|
src_len_val.fmtValue(Type.usize, sema.mod),
|
|
});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
break :check;
|
|
}
|
|
} else if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
|
|
len_val = src_len_val;
|
|
}
|
|
|
|
if (block.wantSafety()) {
|
|
const ok = try block.addBinOp(.cmp_eq, dest_len, src_len);
|
|
try sema.addSafetyCheck(block, src, ok, .memcpy_len_mismatch);
|
|
}
|
|
} else if (dest_len != .none) {
|
|
if (try sema.resolveDefinedValue(block, dest_src, dest_len)) |dest_len_val| {
|
|
len_val = dest_len_val;
|
|
}
|
|
} else if (src_len != .none) {
|
|
if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
|
|
len_val = src_len_val;
|
|
}
|
|
}
|
|
|
|
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: {
|
|
if (!dest_ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src;
|
|
if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| {
|
|
const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?;
|
|
const len = try sema.usizeCast(block, dest_src, len_u64);
|
|
for (0..len) |i| {
|
|
const elem_index = try mod.intRef(Type.usize, i);
|
|
const dest_elem_ptr = try sema.elemPtrOneLayerOnly(
|
|
block,
|
|
src,
|
|
dest_ptr,
|
|
elem_index,
|
|
src,
|
|
true, // init
|
|
false, // oob_safety
|
|
);
|
|
const src_elem_ptr = try sema.elemPtrOneLayerOnly(
|
|
block,
|
|
src,
|
|
src_ptr,
|
|
elem_index,
|
|
src,
|
|
false, // init
|
|
false, // oob_safety
|
|
);
|
|
const uncoerced_elem = try sema.analyzeLoad(block, src, src_elem_ptr, src_src);
|
|
try sema.storePtr2(
|
|
block,
|
|
src,
|
|
dest_elem_ptr,
|
|
dest_src,
|
|
uncoerced_elem,
|
|
src_src,
|
|
.store,
|
|
);
|
|
}
|
|
return;
|
|
} else break :rs src_src;
|
|
} else dest_src;
|
|
|
|
// If in-memory coercion is not allowed, explode this memcpy call into a
|
|
// for loop that copies element-wise.
|
|
// Likewise if this is an iterable rather than a pointer, do the same
|
|
// lowering. The AIR instruction requires pointers with element types of
|
|
// equal ABI size.
|
|
|
|
if (dest_ty.zigTypeTag(mod) != .Pointer or src_ty.zigTypeTag(mod) != .Pointer) {
|
|
return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{});
|
|
}
|
|
|
|
const dest_elem_ty = dest_ty.elemType2(mod);
|
|
const src_elem_ty = src_ty.elemType2(mod);
|
|
if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src)) {
|
|
return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{});
|
|
}
|
|
|
|
// If the length is comptime-known, then upgrade src and destination types
|
|
// into pointer-to-array. At this point we know they are both pointers
|
|
// already.
|
|
var new_dest_ptr = dest_ptr;
|
|
var new_src_ptr = src_ptr;
|
|
if (len_val) |val| {
|
|
const len = val.toUnsignedInt(mod);
|
|
if (len == 0) {
|
|
// This AIR instruction guarantees length > 0 if it is comptime-known.
|
|
return;
|
|
}
|
|
new_dest_ptr = try upgradeToArrayPtr(sema, block, dest_ptr, len);
|
|
new_src_ptr = try upgradeToArrayPtr(sema, block, src_ptr, len);
|
|
}
|
|
|
|
if (dest_len != .none) {
|
|
// Change the src from slice to a many pointer, to avoid multiple ptr
|
|
// slice extractions in AIR instructions.
|
|
const new_src_ptr_ty = sema.typeOf(new_src_ptr);
|
|
if (new_src_ptr_ty.isSlice(mod)) {
|
|
new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty);
|
|
}
|
|
} else if (dest_len == .none and len_val == null) {
|
|
// Change the dest to a slice, since its type must have the length.
|
|
const dest_ptr_ptr = try sema.analyzeRef(block, dest_src, new_dest_ptr);
|
|
new_dest_ptr = try sema.analyzeSlice(block, dest_src, dest_ptr_ptr, .zero, src_len, .none, .unneeded, dest_src, dest_src, dest_src, false);
|
|
const new_src_ptr_ty = sema.typeOf(new_src_ptr);
|
|
if (new_src_ptr_ty.isSlice(mod)) {
|
|
new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty);
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
// Aliasing safety check.
|
|
if (block.wantSafety()) {
|
|
const len = if (len_val) |v|
|
|
Air.internedToRef(v.toIntern())
|
|
else if (dest_len != .none)
|
|
dest_len
|
|
else
|
|
src_len;
|
|
|
|
// Extract raw pointer from dest slice. The AIR instructions could support them, but
|
|
// it would cause redundant machine code instructions.
|
|
const new_dest_ptr_ty = sema.typeOf(new_dest_ptr);
|
|
const raw_dest_ptr = if (new_dest_ptr_ty.isSlice(mod))
|
|
try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty)
|
|
else if (new_dest_ptr_ty.ptrSize(mod) == .One) ptr: {
|
|
var dest_manyptr_ty_key = mod.intern_pool.indexToKey(new_dest_ptr_ty.toIntern()).ptr_type;
|
|
assert(dest_manyptr_ty_key.flags.size == .One);
|
|
dest_manyptr_ty_key.child = dest_elem_ty.toIntern();
|
|
dest_manyptr_ty_key.flags.size = .Many;
|
|
break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src);
|
|
} else new_dest_ptr;
|
|
|
|
const new_src_ptr_ty = sema.typeOf(new_src_ptr);
|
|
const raw_src_ptr = if (new_src_ptr_ty.isSlice(mod))
|
|
try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty)
|
|
else if (new_src_ptr_ty.ptrSize(mod) == .One) ptr: {
|
|
var src_manyptr_ty_key = mod.intern_pool.indexToKey(new_src_ptr_ty.toIntern()).ptr_type;
|
|
assert(src_manyptr_ty_key.flags.size == .One);
|
|
src_manyptr_ty_key.child = src_elem_ty.toIntern();
|
|
src_manyptr_ty_key.flags.size = .Many;
|
|
break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(src_manyptr_ty_key), new_src_ptr, src_src);
|
|
} else new_src_ptr;
|
|
|
|
// ok1: dest >= src + len
|
|
// ok2: src >= dest + len
|
|
const src_plus_len = try sema.analyzePtrArithmetic(block, src, raw_src_ptr, len, .ptr_add, src_src, src);
|
|
const dest_plus_len = try sema.analyzePtrArithmetic(block, src, raw_dest_ptr, len, .ptr_add, dest_src, src);
|
|
const ok1 = try block.addBinOp(.cmp_gte, raw_dest_ptr, src_plus_len);
|
|
const ok2 = try block.addBinOp(.cmp_gte, new_src_ptr, dest_plus_len);
|
|
const ok = try block.addBinOp(.bit_or, ok1, ok2);
|
|
try sema.addSafetyCheck(block, src, ok, .memcpy_alias);
|
|
}
|
|
|
|
_ = try block.addInst(.{
|
|
.tag = .memcpy,
|
|
.data = .{ .bin_op = .{
|
|
.lhs = new_dest_ptr,
|
|
.rhs = new_src_ptr,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
|
const src = inst_data.src();
|
|
const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
|
const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
|
const dest_ptr = try sema.resolveInst(extra.lhs);
|
|
const uncoerced_elem = try sema.resolveInst(extra.rhs);
|
|
const dest_ptr_ty = sema.typeOf(dest_ptr);
|
|
try checkMemOperand(sema, block, dest_src, dest_ptr_ty);
|
|
|
|
if (dest_ptr_ty.isConstPtr(mod)) {
|
|
return sema.fail(block, dest_src, "cannot memset constant pointer", .{});
|
|
}
|
|
|
|
const dest_elem_ty = dest_ptr_ty.elemType2(mod);
|
|
|
|
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: {
|
|
const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len"), dest_src);
|
|
const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse
|
|
break :rs dest_src;
|
|
const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?;
|
|
const len = try sema.usizeCast(block, dest_src, len_u64);
|
|
if (len == 0) {
|
|
// This AIR instruction guarantees length > 0 if it is comptime-known.
|
|
return;
|
|
}
|
|
|
|
if (!ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src;
|
|
if (try sema.resolveMaybeUndefVal(uncoerced_elem)) |_| {
|
|
for (0..len) |i| {
|
|
const elem_index = try mod.intRef(Type.usize, i);
|
|
const elem_ptr = try sema.elemPtrOneLayerOnly(
|
|
block,
|
|
src,
|
|
dest_ptr,
|
|
elem_index,
|
|
src,
|
|
true, // init
|
|
false, // oob_safety
|
|
);
|
|
try sema.storePtr2(
|
|
block,
|
|
src,
|
|
elem_ptr,
|
|
dest_src,
|
|
uncoerced_elem,
|
|
value_src,
|
|
.store,
|
|
);
|
|
}
|
|
return;
|
|
} else break :rs value_src;
|
|
} else dest_src;
|
|
|
|
const elem = try sema.coerce(block, dest_elem_ty, uncoerced_elem, value_src);
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
_ = try block.addInst(.{
|
|
.tag = if (block.wantSafety()) .memset_safe else .memset,
|
|
.data = .{ .bin_op = .{
|
|
.lhs = dest_ptr,
|
|
.rhs = elem,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirBuiltinAsyncCall(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirResume(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirAwait(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Zir.Inst.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
|
const src = inst_data.src();
|
|
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirAwaitNosuspend(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src = LazySrcLoc.nodeOffset(extra.node);
|
|
|
|
return sema.failWithUseOfAsync(block, src);
|
|
}
|
|
|
|
fn zirVarExtended(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand);
|
|
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 };
|
|
const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 };
|
|
const small = @as(Zir.Inst.ExtendedVar.Small, @bitCast(extended.small));
|
|
|
|
var extra_index: usize = extra.end;
|
|
|
|
const lib_name: ?[]const u8 = if (small.has_lib_name) blk: {
|
|
const lib_name = sema.code.nullTerminatedString(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
break :blk lib_name;
|
|
} else null;
|
|
|
|
// ZIR supports encoding this information but it is not used; the information
|
|
// is encoded via the Decl entry.
|
|
assert(!small.has_align);
|
|
|
|
const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: {
|
|
const init_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
break :blk try sema.resolveInst(init_ref);
|
|
} else .none;
|
|
|
|
const have_ty = extra.data.var_type != .none;
|
|
const var_ty = if (have_ty)
|
|
try sema.resolveType(block, ty_src, extra.data.var_type)
|
|
else
|
|
sema.typeOf(uncasted_init);
|
|
|
|
const init_val = if (uncasted_init != .none) blk: {
|
|
const init = if (have_ty)
|
|
try sema.coerce(block, var_ty, uncasted_init, init_src)
|
|
else
|
|
uncasted_init;
|
|
|
|
break :blk ((try sema.resolveMaybeUndefVal(init)) orelse
|
|
return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime-known")).toIntern();
|
|
} else .none;
|
|
|
|
try sema.validateVarType(block, ty_src, var_ty, small.is_extern);
|
|
|
|
return Air.internedToRef((try mod.intern(.{ .variable = .{
|
|
.ty = var_ty.toIntern(),
|
|
.init = init_val,
|
|
.decl = sema.owner_decl_index,
|
|
.lib_name = if (lib_name) |lname| (try mod.intern_pool.getOrPutString(
|
|
sema.gpa,
|
|
try sema.handleExternLibName(block, ty_src, lname),
|
|
)).toOptional() else .none,
|
|
.is_extern = small.is_extern,
|
|
.is_threadlocal = small.is_threadlocal,
|
|
} })));
|
|
}
|
|
|
|
fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const mod = sema.mod;
|
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
|
const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
|
|
const target = mod.getTarget();
|
|
|
|
const align_src: LazySrcLoc = .{ .node_offset_fn_type_align = inst_data.src_node };
|
|
const addrspace_src: LazySrcLoc = .{ .node_offset_fn_type_addrspace = inst_data.src_node };
|
|
const section_src: LazySrcLoc = .{ .node_offset_fn_type_section = inst_data.src_node };
|
|
const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = inst_data.src_node };
|
|
const ret_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = inst_data.src_node };
|
|
const has_body = extra.data.body_len != 0;
|
|
|
|
var extra_index: usize = extra.end;
|
|
|
|
const lib_name: ?[]const u8 = if (extra.data.bits.has_lib_name) blk: {
|
|
const lib_name = sema.code.nullTerminatedString(sema.code.extra[extra_index]);
|
|
extra_index += 1;
|
|
break :blk lib_name;
|
|
} else null;
|
|
|
|
if (has_body and
|
|
(extra.data.bits.has_align_body or extra.data.bits.has_align_ref) and
|
|
!target_util.supportsFunctionAlignment(target))
|
|
{
|
|
return sema.fail(block, align_src, "target does not support function alignment", .{});
|
|
}
|
|
|
|
const @"align": ?Alignment = if (extra.data.bits.has_align_body) blk: {
|
|
const body_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const body = sema.code.extra[extra_index..][0..body_len];
|
|
extra_index += body.len;
|
|
|
|
const val = try sema.resolveGenericBody(block, align_src, body, inst, Type.u29, "alignment must be comptime-known");
|
|
if (val.isGenericPoison()) {
|
|
break :blk null;
|
|
}
|
|
const alignment = @as(u32, @intCast(val.toUnsignedInt(mod)));
|
|
try sema.validateAlign(block, align_src, alignment);
|
|
if (alignment == target_util.defaultFunctionAlignment(target)) {
|
|
break :blk .none;
|
|
} else {
|
|
break :blk Alignment.fromNonzeroByteUnits(alignment);
|
|
}
|
|
} else if (extra.data.bits.has_align_ref) blk: {
|
|
const align_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const align_tv = sema.resolveInstConst(block, align_src, align_ref, "alignment must be comptime-known") catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
break :blk null;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const alignment = @as(u32, @intCast(align_tv.val.toUnsignedInt(mod)));
|
|
try sema.validateAlign(block, align_src, alignment);
|
|
if (alignment == target_util.defaultFunctionAlignment(target)) {
|
|
break :blk .none;
|
|
} else {
|
|
break :blk Alignment.fromNonzeroByteUnits(alignment);
|
|
}
|
|
} else .none;
|
|
|
|
const @"addrspace": ?std.builtin.AddressSpace = if (extra.data.bits.has_addrspace_body) blk: {
|
|
const body_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const body = sema.code.extra[extra_index..][0..body_len];
|
|
extra_index += body.len;
|
|
|
|
const addrspace_ty = try sema.getBuiltinType("AddressSpace");
|
|
const val = try sema.resolveGenericBody(block, addrspace_src, body, inst, addrspace_ty, "addrespace must be comptime-known");
|
|
if (val.isGenericPoison()) {
|
|
break :blk null;
|
|
}
|
|
break :blk mod.toEnum(std.builtin.AddressSpace, val);
|
|
} else if (extra.data.bits.has_addrspace_ref) blk: {
|
|
const addrspace_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const addrspace_tv = sema.resolveInstConst(block, addrspace_src, addrspace_ref, "addrespace must be comptime-known") catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
break :blk null;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val);
|
|
} else target_util.defaultAddressSpace(target, .function);
|
|
|
|
const section: Section = if (extra.data.bits.has_section_body) blk: {
|
|
const body_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const body = sema.code.extra[extra_index..][0..body_len];
|
|
extra_index += body.len;
|
|
|
|
const ty = Type.slice_const_u8;
|
|
const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known");
|
|
if (val.isGenericPoison()) {
|
|
break :blk .generic;
|
|
}
|
|
break :blk .{ .explicit = try val.toIpString(ty, mod) };
|
|
} else if (extra.data.bits.has_section_ref) blk: {
|
|
const section_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
break :blk .generic;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
break :blk .{ .explicit = section_name };
|
|
} else .default;
|
|
|
|
const cc: ?std.builtin.CallingConvention = if (extra.data.bits.has_cc_body) blk: {
|
|
const body_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const body = sema.code.extra[extra_index..][0..body_len];
|
|
extra_index += body.len;
|
|
|
|
const cc_ty = try sema.getBuiltinType("CallingConvention");
|
|
const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, "calling convention must be comptime-known");
|
|
if (val.isGenericPoison()) {
|
|
break :blk null;
|
|
}
|
|
break :blk mod.toEnum(std.builtin.CallingConvention, val);
|
|
} else if (extra.data.bits.has_cc_ref) blk: {
|
|
const cc_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const cc_tv = sema.resolveInstConst(block, cc_src, cc_ref, "calling convention must be comptime-known") catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
break :blk null;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
break :blk mod.toEnum(std.builtin.CallingConvention, cc_tv.val);
|
|
} else if (sema.owner_decl.is_exported and has_body)
|
|
.C
|
|
else
|
|
.Unspecified;
|
|
|
|
const ret_ty: Type = if (extra.data.bits.has_ret_ty_body) blk: {
|
|
const body_len = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
const body = sema.code.extra[extra_index..][0..body_len];
|
|
extra_index += body.len;
|
|
|
|
const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, "return type must be comptime-known");
|
|
const ty = val.toType();
|
|
break :blk ty;
|
|
} else if (extra.data.bits.has_ret_ty_ref) blk: {
|
|
const ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
|
|
extra_index += 1;
|
|
const ret_ty_tv = sema.resolveInstConst(block, ret_src, ret_ty_ref, "return type must be comptime-known") catch |err| switch (err) {
|
|
error.GenericPoison => {
|
|
break :blk Type.generic_poison;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const ty = ret_ty_tv.val.toType();
|
|
break :blk ty;
|
|
} else Type.void;
|
|
|
|
const noalias_bits: u32 = if (extra.data.bits.has_any_noalias) blk: {
|
|
const x = sema.code.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk x;
|
|
} else 0;
|
|
|
|
var src_locs: Zir.Inst.Func.SrcLocs = undefined;
|
|
if (has_body) {
|
|
extra_index += extra.data.body_len;
|
|
src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data;
|
|
}
|
|
|
|
const is_var_args = extra.data.bits.is_var_args;
|
|
const is_inferred_error = extra.data.bits.is_inferred_error;
|
|
const is_extern = extra.data.bits.is_extern;
|
|
const is_noinline = extra.data.bits.is_noinline;
|
|
|
|
return sema.funcCommon(
|
|
block,
|
|
inst_data.src_node,
|
|
inst,
|
|
@"align",
|
|
@"addrspace",
|
|
section,
|
|
cc,
|
|
ret_ty,
|
|
is_var_args,
|
|
is_inferred_error,
|
|
is_extern,
|
|
has_body,
|
|
src_locs,
|
|
lib_name,
|
|
noalias_bits,
|
|
is_noinline,
|
|
);
|
|
}
|
|
|
|
fn zirCUndef(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
|
|
const name = try sema.resolveConstString(block, src, extra.operand, "name of macro being undefined must be comptime-known");
|
|
try block.c_import_buf.?.writer().print("#undef {s}\n", .{name});
|
|
return Air.Inst.Ref.void_value;
|
|
}
|
|
|
|
fn zirCInclude(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
|
|
const name = try sema.resolveConstString(block, src, extra.operand, "path being included must be comptime-known");
|
|
try block.c_import_buf.?.writer().print("#include <{s}>\n", .{name});
|
|
return Air.Inst.Ref.void_value;
|
|
}
|
|
|
|
fn zirCDefine(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const val_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
|
|
const name = try sema.resolveConstString(block, name_src, extra.lhs, "name of macro being undefined must be comptime-known");
|
|
const rhs = try sema.resolveInst(extra.rhs);
|
|
if (sema.typeOf(rhs).zigTypeTag(mod) != .Void) {
|
|
const value = try sema.resolveConstString(block, val_src, extra.rhs, "value of macro being undefined must be comptime-known");
|
|
try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value });
|
|
} else {
|
|
try block.c_import_buf.?.writer().print("#define {s}\n", .{name});
|
|
}
|
|
return Air.Inst.Ref.void_value;
|
|
}
|
|
|
|
fn zirWasmMemorySize(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const index_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const builtin_src = LazySrcLoc.nodeOffset(extra.node);
|
|
const target = sema.mod.getTarget();
|
|
if (!target.isWasm()) {
|
|
return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
|
|
}
|
|
|
|
const index = @as(u32, @intCast(try sema.resolveInt(block, index_src, extra.operand, Type.u32, "wasm memory size index must be comptime-known")));
|
|
try sema.requireRuntimeBlock(block, builtin_src, null);
|
|
return block.addInst(.{
|
|
.tag = .wasm_memory_size,
|
|
.data = .{ .pl_op = .{
|
|
.operand = .none,
|
|
.payload = index,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirWasmMemoryGrow(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const builtin_src = LazySrcLoc.nodeOffset(extra.node);
|
|
const index_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const delta_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
const target = sema.mod.getTarget();
|
|
if (!target.isWasm()) {
|
|
return sema.fail(block, builtin_src, "builtin @wasmMemoryGrow is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
|
|
}
|
|
|
|
const index = @as(u32, @intCast(try sema.resolveInt(block, index_src, extra.lhs, Type.u32, "wasm memory size index must be comptime-known")));
|
|
const delta = try sema.coerce(block, Type.u32, try sema.resolveInst(extra.rhs), delta_src);
|
|
|
|
try sema.requireRuntimeBlock(block, builtin_src, null);
|
|
return block.addInst(.{
|
|
.tag = .wasm_memory_grow,
|
|
.data = .{ .pl_op = .{
|
|
.operand = delta,
|
|
.payload = index,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn resolvePrefetchOptions(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
) CompileError!std.builtin.PrefetchOptions {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const options_ty = try sema.getBuiltinType("PrefetchOptions");
|
|
const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src);
|
|
|
|
const rw_src = sema.maybeOptionsSrc(block, src, "rw");
|
|
const locality_src = sema.maybeOptionsSrc(block, src, "locality");
|
|
const cache_src = sema.maybeOptionsSrc(block, src, "cache");
|
|
|
|
const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "rw"), rw_src);
|
|
const rw_val = try sema.resolveConstValue(block, rw_src, rw, "prefetch read/write must be comptime-known");
|
|
|
|
const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "locality"), locality_src);
|
|
const locality_val = try sema.resolveConstValue(block, locality_src, locality, "prefetch locality must be comptime-known");
|
|
|
|
const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "cache"), cache_src);
|
|
const cache_val = try sema.resolveConstValue(block, cache_src, cache, "prefetch cache must be comptime-known");
|
|
|
|
return std.builtin.PrefetchOptions{
|
|
.rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val),
|
|
.locality = @as(u2, @intCast(locality_val.toUnsignedInt(mod))),
|
|
.cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val),
|
|
};
|
|
}
|
|
|
|
fn zirPrefetch(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const opts_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
const ptr = try sema.resolveInst(extra.lhs);
|
|
try sema.checkPtrOperand(block, ptr_src, sema.typeOf(ptr));
|
|
|
|
const options = sema.resolvePrefetchOptions(block, .unneeded, extra.rhs) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
_ = try sema.resolvePrefetchOptions(block, opts_src, extra.rhs);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (!block.is_comptime) {
|
|
_ = try block.addInst(.{
|
|
.tag = .prefetch,
|
|
.data = .{ .prefetch = .{
|
|
.ptr = ptr,
|
|
.rw = options.rw,
|
|
.locality = options.locality,
|
|
.cache = options.cache,
|
|
} },
|
|
});
|
|
}
|
|
|
|
return Air.Inst.Ref.void_value;
|
|
}
|
|
|
|
fn resolveExternOptions(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
) CompileError!struct {
|
|
name: InternPool.NullTerminatedString,
|
|
library_name: InternPool.OptionalNullTerminatedString = .none,
|
|
linkage: std.builtin.GlobalLinkage = .Strong,
|
|
is_thread_local: bool = false,
|
|
} {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const options_inst = try sema.resolveInst(zir_ref);
|
|
const extern_options_ty = try sema.getBuiltinType("ExternOptions");
|
|
const options = try sema.coerce(block, extern_options_ty, options_inst, src);
|
|
|
|
const name_src = sema.maybeOptionsSrc(block, src, "name");
|
|
const library_src = sema.maybeOptionsSrc(block, src, "library");
|
|
const linkage_src = sema.maybeOptionsSrc(block, src, "linkage");
|
|
const thread_local_src = sema.maybeOptionsSrc(block, src, "thread_local");
|
|
|
|
const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src);
|
|
const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known");
|
|
const name = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod);
|
|
|
|
const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "library_name"), library_src);
|
|
const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known");
|
|
|
|
const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src);
|
|
const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_ref, "linkage of the extern symbol must be comptime-known");
|
|
const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val);
|
|
|
|
const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "is_thread_local"), thread_local_src);
|
|
const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known");
|
|
|
|
const library_name = if (library_name_val.optionalValue(mod)) |payload| blk: {
|
|
const library_name = try payload.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod);
|
|
if (library_name.len == 0) {
|
|
return sema.fail(block, library_src, "library name cannot be empty", .{});
|
|
}
|
|
break :blk try sema.handleExternLibName(block, library_src, library_name);
|
|
} else null;
|
|
|
|
if (name.len == 0) {
|
|
return sema.fail(block, name_src, "extern symbol name cannot be empty", .{});
|
|
}
|
|
|
|
if (linkage != .Weak and linkage != .Strong) {
|
|
return sema.fail(block, linkage_src, "extern symbol must use strong or weak linkage", .{});
|
|
}
|
|
|
|
return .{
|
|
.name = try ip.getOrPutString(gpa, name),
|
|
.library_name = try ip.getOrPutStringOpt(gpa, library_name),
|
|
.linkage = linkage,
|
|
.is_thread_local = is_thread_local_val.toBool(),
|
|
};
|
|
}
|
|
|
|
fn zirBuiltinExtern(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
|
|
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
|
|
|
|
var ty = try sema.resolveType(block, ty_src, extra.lhs);
|
|
if (!ty.isPtrAtRuntime(mod)) {
|
|
return sema.fail(block, ty_src, "expected (optional) pointer", .{});
|
|
}
|
|
if (!try sema.validateExternType(ty.childType(mod), .other)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
const src_decl = sema.mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl, mod), ty, .other);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
const options = sema.resolveExternOptions(block, .unneeded, extra.rhs) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
_ = try sema.resolveExternOptions(block, options_src, extra.rhs);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) {
|
|
ty = try mod.optionalType(ty.toIntern());
|
|
}
|
|
|
|
// TODO check duplicate extern
|
|
|
|
const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null);
|
|
errdefer mod.destroyDecl(new_decl_index);
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.name = options.name;
|
|
|
|
{
|
|
const new_var = try mod.intern(.{ .variable = .{
|
|
.ty = ty.toIntern(),
|
|
.init = .none,
|
|
.decl = sema.owner_decl_index,
|
|
.is_extern = true,
|
|
.is_const = true,
|
|
.is_threadlocal = options.is_thread_local,
|
|
.is_weak_linkage = options.linkage == .Weak,
|
|
} });
|
|
|
|
new_decl.src_line = sema.owner_decl.src_line;
|
|
// We only access this decl through the decl_ref with the correct type created
|
|
// below, so this type doesn't matter
|
|
new_decl.ty = ty;
|
|
new_decl.val = new_var.toValue();
|
|
new_decl.alignment = .none;
|
|
new_decl.@"linksection" = .none;
|
|
new_decl.has_tv = true;
|
|
new_decl.analysis = .complete;
|
|
new_decl.generation = mod.generation;
|
|
}
|
|
|
|
try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index);
|
|
try sema.ensureDeclAnalyzed(new_decl_index);
|
|
|
|
return Air.internedToRef((try mod.getCoerced((try mod.intern(.{ .ptr = .{
|
|
.ty = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
|
|
.ptr_type => ty.toIntern(),
|
|
.opt_type => |child_type| child_type,
|
|
else => unreachable,
|
|
},
|
|
.addr = .{ .decl = new_decl_index },
|
|
} })).toValue(), ty)).toIntern());
|
|
}
|
|
|
|
fn zirWorkItem(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
extended: Zir.Inst.Extended.InstData,
|
|
zir_tag: Zir.Inst.Extended,
|
|
) CompileError!Air.Inst.Ref {
|
|
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
|
const dimension_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
|
|
const builtin_src = LazySrcLoc.nodeOffset(extra.node);
|
|
const target = sema.mod.getTarget();
|
|
|
|
switch (target.cpu.arch) {
|
|
// TODO: Allow for other GPU targets.
|
|
.amdgcn => {},
|
|
else => {
|
|
return sema.fail(block, builtin_src, "builtin only available on GPU targets; targeted architecture is {s}", .{@tagName(target.cpu.arch)});
|
|
},
|
|
}
|
|
|
|
const dimension = @as(u32, @intCast(try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, "dimension must be comptime-known")));
|
|
try sema.requireRuntimeBlock(block, builtin_src, null);
|
|
|
|
return block.addInst(.{
|
|
.tag = switch (zir_tag) {
|
|
.work_item_id => .work_item_id,
|
|
.work_group_size => .work_group_size,
|
|
.work_group_id => .work_group_id,
|
|
else => unreachable,
|
|
},
|
|
.data = .{ .pl_op = .{
|
|
.operand = .none,
|
|
.payload = dimension,
|
|
} },
|
|
});
|
|
}
|
|
|
|
fn zirInComptime(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
) CompileError!Air.Inst.Ref {
|
|
_ = sema;
|
|
if (block.is_comptime) {
|
|
return Air.Inst.Ref.bool_true;
|
|
} else {
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
}
|
|
|
|
fn requireRuntimeBlock(sema: *Sema, block: *Block, src: LazySrcLoc, runtime_src: ?LazySrcLoc) !void {
|
|
if (block.is_comptime) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "unable to evaluate comptime expression", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
if (runtime_src) |some| {
|
|
try sema.errNote(block, some, msg, "operation is runtime due to this operand", .{});
|
|
}
|
|
if (block.comptime_reason) |some| {
|
|
try some.explain(sema, msg);
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
/// Emit a compile error if type cannot be used for a runtime variable.
|
|
fn validateVarType(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
var_ty: Type,
|
|
is_extern: bool,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
if (is_extern and !try sema.validateExternType(var_ty, .other)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "extern variable cannot have type '{}'", .{var_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), var_ty, .other);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (is_extern and var_ty.zigTypeTag(mod) == .Opaque) return;
|
|
if (!try sema.typeRequiresComptime(var_ty)) return;
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "variable of type '{}' must be const or comptime", .{var_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), var_ty);
|
|
if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) {
|
|
try sema.errNote(block, src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{});
|
|
}
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
const TypeSet = std.AutoHashMapUnmanaged(InternPool.Index, void);
|
|
|
|
fn explainWhyTypeIsComptime(
|
|
sema: *Sema,
|
|
msg: *Module.ErrorMsg,
|
|
src_loc: Module.SrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
var type_set = TypeSet{};
|
|
defer type_set.deinit(sema.gpa);
|
|
|
|
try sema.resolveTypeFully(ty);
|
|
return sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty, &type_set);
|
|
}
|
|
|
|
fn explainWhyTypeIsComptimeInner(
|
|
sema: *Sema,
|
|
msg: *Module.ErrorMsg,
|
|
src_loc: Module.SrcLoc,
|
|
ty: Type,
|
|
type_set: *TypeSet,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Bool,
|
|
.Int,
|
|
.Float,
|
|
.ErrorSet,
|
|
.Enum,
|
|
.Frame,
|
|
.AnyFrame,
|
|
.Void,
|
|
=> return,
|
|
|
|
.Fn => {
|
|
try mod.errNoteNonLazy(src_loc, msg, "use '*const {}' for a function pointer type", .{
|
|
ty.fmt(sema.mod),
|
|
});
|
|
},
|
|
|
|
.Type => {
|
|
try mod.errNoteNonLazy(src_loc, msg, "types are not available at runtime", .{});
|
|
},
|
|
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.NoReturn,
|
|
.Undefined,
|
|
.Null,
|
|
=> return,
|
|
|
|
.Opaque => {
|
|
try mod.errNoteNonLazy(src_loc, msg, "opaque type '{}' has undefined size", .{ty.fmt(sema.mod)});
|
|
},
|
|
|
|
.Array, .Vector => {
|
|
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set);
|
|
},
|
|
.Pointer => {
|
|
const elem_ty = ty.elemType2(mod);
|
|
if (elem_ty.zigTypeTag(mod) == .Fn) {
|
|
const fn_info = mod.typeToFunc(elem_ty).?;
|
|
if (fn_info.is_generic) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{});
|
|
}
|
|
switch (fn_info.cc) {
|
|
.Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}),
|
|
else => {},
|
|
}
|
|
if (fn_info.return_type.toType().comptimeOnly(mod)) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{});
|
|
}
|
|
return;
|
|
}
|
|
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set);
|
|
},
|
|
|
|
.Optional => {
|
|
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set);
|
|
},
|
|
.ErrorUnion => {
|
|
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(mod), type_set);
|
|
},
|
|
|
|
.Struct => {
|
|
if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
|
|
|
|
if (mod.typeToStruct(ty)) |struct_obj| {
|
|
for (struct_obj.fields.values(), 0..) |field, i| {
|
|
const field_src_loc = mod.fieldSrcLoc(struct_obj.owner_decl, .{
|
|
.index = i,
|
|
.range = .type,
|
|
});
|
|
|
|
if (try sema.typeRequiresComptime(field.ty)) {
|
|
try mod.errNoteNonLazy(field_src_loc, msg, "struct requires comptime because of this field", .{});
|
|
try sema.explainWhyTypeIsComptimeInner(msg, field_src_loc, field.ty, type_set);
|
|
}
|
|
}
|
|
}
|
|
// TODO tuples
|
|
},
|
|
|
|
.Union => {
|
|
if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
|
|
|
|
if (mod.typeToUnion(ty)) |union_obj| {
|
|
for (union_obj.fields.values(), 0..) |field, i| {
|
|
const field_src_loc = mod.fieldSrcLoc(union_obj.owner_decl, .{
|
|
.index = i,
|
|
.range = .type,
|
|
});
|
|
|
|
if (try sema.typeRequiresComptime(field.ty)) {
|
|
try mod.errNoteNonLazy(field_src_loc, msg, "union requires comptime because of this field", .{});
|
|
try sema.explainWhyTypeIsComptimeInner(msg, field_src_loc, field.ty, type_set);
|
|
}
|
|
}
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
const ExternPosition = enum {
|
|
ret_ty,
|
|
param_ty,
|
|
union_field,
|
|
struct_field,
|
|
element,
|
|
other,
|
|
};
|
|
|
|
/// Returns true if `ty` is allowed in extern types.
|
|
/// Does *NOT* require `ty` to be resolved in any way.
|
|
/// Calls `resolveTypeLayout` for packed containers.
|
|
fn validateExternType(
|
|
sema: *Sema,
|
|
ty: Type,
|
|
position: ExternPosition,
|
|
) !bool {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Type,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.Undefined,
|
|
.Null,
|
|
.ErrorUnion,
|
|
.ErrorSet,
|
|
.Frame,
|
|
=> return false,
|
|
.Void => return position == .union_field or position == .ret_ty or position == .struct_field or position == .element,
|
|
.NoReturn => return position == .ret_ty,
|
|
.Opaque,
|
|
.Bool,
|
|
.Float,
|
|
.AnyFrame,
|
|
=> return true,
|
|
.Pointer => return !(ty.isSlice(mod) or try sema.typeRequiresComptime(ty)),
|
|
.Int => switch (ty.intInfo(mod).bits) {
|
|
0, 8, 16, 32, 64, 128 => return true,
|
|
else => return false,
|
|
},
|
|
.Fn => {
|
|
if (position != .other) return false;
|
|
const target = sema.mod.getTarget();
|
|
// For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI.
|
|
// The goal is to experiment with more integrated CPU/GPU code.
|
|
if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) {
|
|
return true;
|
|
}
|
|
return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod));
|
|
},
|
|
.Enum => {
|
|
return sema.validateExternType(ty.intTagType(mod), position);
|
|
},
|
|
.Struct, .Union => switch (ty.containerLayout(mod)) {
|
|
.Extern => return true,
|
|
.Packed => {
|
|
const bit_size = try ty.bitSizeAdvanced(mod, sema);
|
|
switch (bit_size) {
|
|
0, 8, 16, 32, 64, 128 => return true,
|
|
else => return false,
|
|
}
|
|
},
|
|
.Auto => return !(try sema.typeHasRuntimeBits(ty)),
|
|
},
|
|
.Array => {
|
|
if (position == .ret_ty or position == .param_ty) return false;
|
|
return sema.validateExternType(ty.elemType2(mod), .element);
|
|
},
|
|
.Vector => return sema.validateExternType(ty.elemType2(mod), .element),
|
|
.Optional => return ty.isPtrLikeOptional(mod),
|
|
}
|
|
}
|
|
|
|
fn explainWhyTypeIsNotExtern(
|
|
sema: *Sema,
|
|
msg: *Module.ErrorMsg,
|
|
src_loc: Module.SrcLoc,
|
|
ty: Type,
|
|
position: ExternPosition,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Opaque,
|
|
.Bool,
|
|
.Float,
|
|
.AnyFrame,
|
|
=> return,
|
|
|
|
.Type,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.Undefined,
|
|
.Null,
|
|
.ErrorUnion,
|
|
.ErrorSet,
|
|
.Frame,
|
|
=> return,
|
|
|
|
.Pointer => {
|
|
if (ty.isSlice(mod)) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{});
|
|
} else {
|
|
const pointee_ty = ty.childType(mod);
|
|
try mod.errNoteNonLazy(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(sema.mod)});
|
|
try sema.explainWhyTypeIsComptime(msg, src_loc, pointee_ty);
|
|
}
|
|
},
|
|
.Void => try mod.errNoteNonLazy(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}),
|
|
.NoReturn => try mod.errNoteNonLazy(src_loc, msg, "'noreturn' is only allowed as a return type", .{}),
|
|
.Int => if (!std.math.isPowerOfTwo(ty.intInfo(mod).bits)) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "only integers with 0 or power of two bits are extern compatible", .{});
|
|
} else {
|
|
try mod.errNoteNonLazy(src_loc, msg, "only integers with 0, 8, 16, 32, 64 and 128 bits are extern compatible", .{});
|
|
},
|
|
.Fn => {
|
|
if (position != .other) {
|
|
try mod.errNoteNonLazy(src_loc, msg, "type has no guaranteed in-memory representation", .{});
|
|
try mod.errNoteNonLazy(src_loc, msg, "use '*const ' to make a function pointer type", .{});
|
|
return;
|
|
}
|
|
switch (ty.fnCallingConvention(mod)) {
|
|
.Unspecified => try mod.errNoteNonLazy(src_loc, msg, "extern function must specify calling convention", .{}),
|
|
.Async => try mod.errNoteNonLazy(src_loc, msg, "async function cannot be extern", .{}),
|
|
.Inline => try mod.errNoteNonLazy(src_loc, msg, "inline function cannot be extern", .{}),
|
|
else => return,
|
|
}
|
|
},
|
|
.Enum => {
|
|
const tag_ty = ty.intTagType(mod);
|
|
try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)});
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position);
|
|
},
|
|
.Struct => try mod.errNoteNonLazy(src_loc, msg, "only extern structs and ABI sized packed structs are extern compatible", .{}),
|
|
.Union => try mod.errNoteNonLazy(src_loc, msg, "only extern unions and ABI sized packed unions are extern compatible", .{}),
|
|
.Array => {
|
|
if (position == .ret_ty) {
|
|
return mod.errNoteNonLazy(src_loc, msg, "arrays are not allowed as a return type", .{});
|
|
} else if (position == .param_ty) {
|
|
return mod.errNoteNonLazy(src_loc, msg, "arrays are not allowed as a parameter type", .{});
|
|
}
|
|
try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element);
|
|
},
|
|
.Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element),
|
|
.Optional => try mod.errNoteNonLazy(src_loc, msg, "only pointer like optionals are extern compatible", .{}),
|
|
}
|
|
}
|
|
|
|
/// Returns true if `ty` is allowed in packed types.
|
|
/// Does *NOT* require `ty` to be resolved in any way.
|
|
fn validatePackedType(ty: Type, mod: *Module) bool {
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Type,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.Undefined,
|
|
.Null,
|
|
.ErrorUnion,
|
|
.ErrorSet,
|
|
.Frame,
|
|
.NoReturn,
|
|
.Opaque,
|
|
.AnyFrame,
|
|
.Fn,
|
|
.Array,
|
|
=> return false,
|
|
.Optional => return ty.isPtrLikeOptional(mod),
|
|
.Void,
|
|
.Bool,
|
|
.Float,
|
|
.Int,
|
|
.Vector,
|
|
.Enum,
|
|
=> return true,
|
|
.Pointer => return !ty.isSlice(mod),
|
|
.Struct, .Union => return ty.containerLayout(mod) == .Packed,
|
|
}
|
|
}
|
|
|
|
fn explainWhyTypeIsNotPacked(
|
|
sema: *Sema,
|
|
msg: *Module.ErrorMsg,
|
|
src_loc: Module.SrcLoc,
|
|
ty: Type,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Void,
|
|
.Bool,
|
|
.Float,
|
|
.Int,
|
|
.Vector,
|
|
.Enum,
|
|
=> return,
|
|
.Type,
|
|
.ComptimeFloat,
|
|
.ComptimeInt,
|
|
.EnumLiteral,
|
|
.Undefined,
|
|
.Null,
|
|
.Frame,
|
|
.NoReturn,
|
|
.Opaque,
|
|
.ErrorUnion,
|
|
.ErrorSet,
|
|
.AnyFrame,
|
|
.Optional,
|
|
.Array,
|
|
=> try mod.errNoteNonLazy(src_loc, msg, "type has no guaranteed in-memory representation", .{}),
|
|
.Pointer => try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{}),
|
|
.Fn => {
|
|
try mod.errNoteNonLazy(src_loc, msg, "type has no guaranteed in-memory representation", .{});
|
|
try mod.errNoteNonLazy(src_loc, msg, "use '*const ' to make a function pointer type", .{});
|
|
},
|
|
.Struct => try mod.errNoteNonLazy(src_loc, msg, "only packed structs layout are allowed in packed types", .{}),
|
|
.Union => try mod.errNoteNonLazy(src_loc, msg, "only packed unions layout are allowed in packed types", .{}),
|
|
}
|
|
}
|
|
|
|
fn prepareSimplePanic(sema: *Sema, block: *Block) !void {
|
|
const mod = sema.mod;
|
|
|
|
if (mod.panic_func_index == .none) {
|
|
const decl_index = (try sema.getBuiltinDecl(block, "panic"));
|
|
// decl_index may be an alias; we must find the decl that actually
|
|
// owns the function.
|
|
try sema.ensureDeclAnalyzed(decl_index);
|
|
const tv = try mod.declPtr(decl_index).typedValue();
|
|
assert(tv.ty.zigTypeTag(mod) == .Fn);
|
|
assert(try sema.fnHasRuntimeBits(tv.ty));
|
|
const func_index = tv.val.toIntern();
|
|
try mod.ensureFuncBodyAnalysisQueued(func_index);
|
|
mod.panic_func_index = func_index;
|
|
}
|
|
|
|
if (mod.null_stack_trace == .none) {
|
|
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
|
|
try sema.resolveTypeFields(stack_trace_ty);
|
|
const target = mod.getTarget();
|
|
const ptr_stack_trace_ty = try mod.ptrType(.{
|
|
.child = stack_trace_ty.toIntern(),
|
|
.flags = .{
|
|
.address_space = target_util.defaultAddressSpace(target, .global_constant),
|
|
},
|
|
});
|
|
const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern());
|
|
mod.null_stack_trace = try mod.intern(.{ .opt = .{
|
|
.ty = opt_ptr_stack_trace_ty.toIntern(),
|
|
.val = .none,
|
|
} });
|
|
}
|
|
}
|
|
|
|
/// Backends depend on panic decls being available when lowering safety-checked
|
|
/// instructions. This function ensures the panic function will be available to
|
|
/// be called during that time.
|
|
fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !Module.Decl.Index {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x;
|
|
|
|
try sema.prepareSimplePanic(block);
|
|
|
|
const panic_messages_ty = try sema.getBuiltinType("panic_messages");
|
|
const msg_decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
sema.src,
|
|
panic_messages_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id)),
|
|
)).?;
|
|
try sema.ensureDeclAnalyzed(msg_decl_index);
|
|
mod.panic_messages[@intFromEnum(panic_id)] = msg_decl_index.toOptional();
|
|
return msg_decl_index;
|
|
}
|
|
|
|
fn addSafetyCheck(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
ok: Air.Inst.Ref,
|
|
panic_id: Module.PanicId,
|
|
) !void {
|
|
const gpa = sema.gpa;
|
|
assert(!parent_block.is_comptime);
|
|
|
|
var fail_block: Block = .{
|
|
.parent = parent_block,
|
|
.sema = sema,
|
|
.src_decl = parent_block.src_decl,
|
|
.namespace = parent_block.namespace,
|
|
.wip_capture_scope = parent_block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.inlining = parent_block.inlining,
|
|
.is_comptime = false,
|
|
};
|
|
|
|
defer fail_block.instructions.deinit(gpa);
|
|
|
|
try sema.safetyPanic(&fail_block, src, panic_id);
|
|
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
|
|
}
|
|
|
|
fn addSafetyCheckExtra(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
ok: Air.Inst.Ref,
|
|
fail_block: *Block,
|
|
) !void {
|
|
const gpa = sema.gpa;
|
|
|
|
try parent_block.instructions.ensureUnusedCapacity(gpa, 1);
|
|
|
|
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
|
|
1 + // The main block only needs space for the cond_br.
|
|
@typeInfo(Air.CondBr).Struct.fields.len +
|
|
1 + // The ok branch of the cond_br only needs space for the br.
|
|
fail_block.instructions.items.len);
|
|
|
|
try sema.air_instructions.ensureUnusedCapacity(gpa, 3);
|
|
const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
|
|
const cond_br_inst = block_inst + 1;
|
|
const br_inst = cond_br_inst + 1;
|
|
sema.air_instructions.appendAssumeCapacity(.{
|
|
.tag = .block,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = .void_type,
|
|
.payload = sema.addExtraAssumeCapacity(Air.Block{
|
|
.body_len = 1,
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendAssumeCapacity(cond_br_inst);
|
|
|
|
sema.air_instructions.appendAssumeCapacity(.{
|
|
.tag = .cond_br,
|
|
.data = .{ .pl_op = .{
|
|
.operand = ok,
|
|
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
|
|
.then_body_len = 1,
|
|
.else_body_len = @as(u32, @intCast(fail_block.instructions.items.len)),
|
|
}),
|
|
} },
|
|
});
|
|
sema.air_extra.appendAssumeCapacity(br_inst);
|
|
sema.air_extra.appendSliceAssumeCapacity(fail_block.instructions.items);
|
|
|
|
sema.air_instructions.appendAssumeCapacity(.{
|
|
.tag = .br,
|
|
.data = .{ .br = .{
|
|
.block_inst = block_inst,
|
|
.operand = .void_value,
|
|
} },
|
|
});
|
|
|
|
parent_block.instructions.appendAssumeCapacity(block_inst);
|
|
}
|
|
|
|
fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst.Ref, operation: CallOperation) !void {
|
|
const mod = sema.mod;
|
|
|
|
if (!mod.backendSupportsFeature(.panic_fn)) {
|
|
_ = try block.addNoOp(.trap);
|
|
return;
|
|
}
|
|
|
|
try sema.prepareSimplePanic(block);
|
|
|
|
const panic_func = mod.funcInfo(mod.panic_func_index);
|
|
const panic_fn = try sema.analyzeDeclVal(block, src, panic_func.owner_decl);
|
|
const null_stack_trace = Air.internedToRef(mod.null_stack_trace);
|
|
|
|
const opt_usize_ty = try mod.optionalType(.usize_type);
|
|
const null_ret_addr = Air.internedToRef((try mod.intern(.{ .opt = .{
|
|
.ty = opt_usize_ty.toIntern(),
|
|
.val = .none,
|
|
} })));
|
|
try sema.callBuiltin(block, src, panic_fn, .auto, &.{ msg_inst, null_stack_trace, null_ret_addr }, operation);
|
|
}
|
|
|
|
fn panicUnwrapError(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
unwrap_err_tag: Air.Inst.Tag,
|
|
is_non_err_tag: Air.Inst.Tag,
|
|
) !void {
|
|
assert(!parent_block.is_comptime);
|
|
const ok = try parent_block.addUnOp(is_non_err_tag, operand);
|
|
if (!sema.mod.comp.formatted_panics) {
|
|
return sema.addSafetyCheck(parent_block, src, ok, .unwrap_error);
|
|
}
|
|
const gpa = sema.gpa;
|
|
|
|
var fail_block: Block = .{
|
|
.parent = parent_block,
|
|
.sema = sema,
|
|
.src_decl = parent_block.src_decl,
|
|
.namespace = parent_block.namespace,
|
|
.wip_capture_scope = parent_block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.inlining = parent_block.inlining,
|
|
.is_comptime = false,
|
|
};
|
|
|
|
defer fail_block.instructions.deinit(gpa);
|
|
|
|
{
|
|
if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) {
|
|
_ = try fail_block.addNoOp(.trap);
|
|
} else {
|
|
const panic_fn = try sema.getBuiltin("panicUnwrapError");
|
|
const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand);
|
|
const err_return_trace = try sema.getErrorReturnTrace(&fail_block);
|
|
const args: [2]Air.Inst.Ref = .{ err_return_trace, err };
|
|
try sema.callBuiltin(&fail_block, src, panic_fn, .auto, &args, .@"safety check");
|
|
}
|
|
}
|
|
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
|
|
}
|
|
|
|
fn panicIndexOutOfBounds(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
index: Air.Inst.Ref,
|
|
len: Air.Inst.Ref,
|
|
cmp_op: Air.Inst.Tag,
|
|
) !void {
|
|
assert(!parent_block.is_comptime);
|
|
const ok = try parent_block.addBinOp(cmp_op, index, len);
|
|
if (!sema.mod.comp.formatted_panics) {
|
|
return sema.addSafetyCheck(parent_block, src, ok, .index_out_of_bounds);
|
|
}
|
|
try sema.safetyCheckFormatted(parent_block, src, ok, "panicOutOfBounds", &.{ index, len });
|
|
}
|
|
|
|
fn panicInactiveUnionField(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
active_tag: Air.Inst.Ref,
|
|
wanted_tag: Air.Inst.Ref,
|
|
) !void {
|
|
assert(!parent_block.is_comptime);
|
|
const ok = try parent_block.addBinOp(.cmp_eq, active_tag, wanted_tag);
|
|
if (!sema.mod.comp.formatted_panics) {
|
|
return sema.addSafetyCheck(parent_block, src, ok, .inactive_union_field);
|
|
}
|
|
try sema.safetyCheckFormatted(parent_block, src, ok, "panicInactiveUnionField", &.{ active_tag, wanted_tag });
|
|
}
|
|
|
|
fn panicSentinelMismatch(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
maybe_sentinel: ?Value,
|
|
sentinel_ty: Type,
|
|
ptr: Air.Inst.Ref,
|
|
sentinel_index: Air.Inst.Ref,
|
|
) !void {
|
|
assert(!parent_block.is_comptime);
|
|
const mod = sema.mod;
|
|
const expected_sentinel_val = maybe_sentinel orelse return;
|
|
const expected_sentinel = Air.internedToRef(expected_sentinel_val.toIntern());
|
|
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const actual_sentinel = if (ptr_ty.isSlice(mod))
|
|
try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index)
|
|
else blk: {
|
|
const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null);
|
|
const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty);
|
|
break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr);
|
|
};
|
|
|
|
const ok = if (sentinel_ty.zigTypeTag(mod) == .Vector) ok: {
|
|
const eql =
|
|
try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq);
|
|
break :ok try parent_block.addInst(.{
|
|
.tag = .reduce,
|
|
.data = .{ .reduce = .{
|
|
.operand = eql,
|
|
.operation = .And,
|
|
} },
|
|
});
|
|
} else if (sentinel_ty.isSelfComparable(mod, true))
|
|
try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel)
|
|
else {
|
|
const panic_fn = try sema.getBuiltin("checkNonScalarSentinel");
|
|
const args: [2]Air.Inst.Ref = .{ expected_sentinel, actual_sentinel };
|
|
try sema.callBuiltin(parent_block, src, panic_fn, .auto, &args, .@"safety check");
|
|
return;
|
|
};
|
|
|
|
if (!sema.mod.comp.formatted_panics) {
|
|
return sema.addSafetyCheck(parent_block, src, ok, .sentinel_mismatch);
|
|
}
|
|
try sema.safetyCheckFormatted(parent_block, src, ok, "panicSentinelMismatch", &.{ expected_sentinel, actual_sentinel });
|
|
}
|
|
|
|
fn safetyCheckFormatted(
|
|
sema: *Sema,
|
|
parent_block: *Block,
|
|
src: LazySrcLoc,
|
|
ok: Air.Inst.Ref,
|
|
func: []const u8,
|
|
args: []const Air.Inst.Ref,
|
|
) CompileError!void {
|
|
assert(sema.mod.comp.formatted_panics);
|
|
const gpa = sema.gpa;
|
|
|
|
var fail_block: Block = .{
|
|
.parent = parent_block,
|
|
.sema = sema,
|
|
.src_decl = parent_block.src_decl,
|
|
.namespace = parent_block.namespace,
|
|
.wip_capture_scope = parent_block.wip_capture_scope,
|
|
.instructions = .{},
|
|
.inlining = parent_block.inlining,
|
|
.is_comptime = false,
|
|
};
|
|
|
|
defer fail_block.instructions.deinit(gpa);
|
|
|
|
if (!sema.mod.backendSupportsFeature(.safety_check_formatted)) {
|
|
_ = try fail_block.addNoOp(.trap);
|
|
} else {
|
|
const panic_fn = try sema.getBuiltin(func);
|
|
try sema.callBuiltin(&fail_block, src, panic_fn, .auto, args, .@"safety check");
|
|
}
|
|
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
|
|
}
|
|
|
|
fn safetyPanic(sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: Module.PanicId) CompileError!void {
|
|
const msg_decl_index = try sema.preparePanicId(block, panic_id);
|
|
const msg_inst = try sema.analyzeDeclVal(block, src, msg_decl_index);
|
|
try sema.panicWithMsg(block, src, msg_inst, .@"safety check");
|
|
}
|
|
|
|
fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void {
|
|
sema.branch_count += 1;
|
|
if (sema.branch_count > sema.branch_quota) {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
src,
|
|
"evaluation exceeded {d} backwards branches",
|
|
.{sema.branch_quota},
|
|
);
|
|
try sema.errNote(
|
|
block,
|
|
src,
|
|
msg,
|
|
"use @setEvalBranchQuota() to raise the branch limit from {d}",
|
|
.{sema.branch_quota},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
fn fieldVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
object: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
// When editing this function, note that there is corresponding logic to be edited
|
|
// in `fieldPtr`. This function takes a value and returns a value.
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const object_src = src; // TODO better source location
|
|
const object_ty = sema.typeOf(object);
|
|
|
|
// Zig allows dereferencing a single pointer during field lookup. Note that
|
|
// we don't actually need to generate the dereference some field lookups, like the
|
|
// length of arrays and other comptime operations.
|
|
const is_pointer_to = object_ty.isSinglePointer(mod);
|
|
|
|
const inner_ty = if (is_pointer_to)
|
|
object_ty.childType(mod)
|
|
else
|
|
object_ty;
|
|
|
|
switch (inner_ty.zigTypeTag(mod)) {
|
|
.Array => {
|
|
if (ip.stringEqlSlice(field_name, "len")) {
|
|
return Air.internedToRef((try mod.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern());
|
|
} else if (ip.stringEqlSlice(field_name, "ptr") and is_pointer_to) {
|
|
const ptr_info = object_ty.ptrInfo(mod);
|
|
const result_ty = try mod.ptrType(.{
|
|
.child = ptr_info.child.toType().childType(mod).toIntern(),
|
|
.sentinel = ptr_info.sentinel,
|
|
.flags = .{
|
|
.size = .Many,
|
|
.alignment = ptr_info.flags.alignment,
|
|
.is_const = ptr_info.flags.is_const,
|
|
.is_volatile = ptr_info.flags.is_volatile,
|
|
.is_allowzero = ptr_info.flags.is_allowzero,
|
|
.address_space = ptr_info.flags.address_space,
|
|
.vector_index = ptr_info.flags.vector_index,
|
|
},
|
|
.packed_offset = ptr_info.packed_offset,
|
|
});
|
|
return sema.coerce(block, result_ty, object, src);
|
|
} else {
|
|
return sema.fail(
|
|
block,
|
|
field_name_src,
|
|
"no member named '{}' in '{}'",
|
|
.{ field_name.fmt(ip), object_ty.fmt(mod) },
|
|
);
|
|
}
|
|
},
|
|
.Pointer => {
|
|
const ptr_info = inner_ty.ptrInfo(mod);
|
|
if (ptr_info.flags.size == .Slice) {
|
|
if (ip.stringEqlSlice(field_name, "ptr")) {
|
|
const slice = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, object, object_src)
|
|
else
|
|
object;
|
|
return sema.analyzeSlicePtr(block, object_src, slice, inner_ty);
|
|
} else if (ip.stringEqlSlice(field_name, "len")) {
|
|
const slice = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, object, object_src)
|
|
else
|
|
object;
|
|
return sema.analyzeSliceLen(block, src, slice);
|
|
} else {
|
|
return sema.fail(
|
|
block,
|
|
field_name_src,
|
|
"no member named '{}' in '{}'",
|
|
.{ field_name.fmt(ip), object_ty.fmt(mod) },
|
|
);
|
|
}
|
|
}
|
|
},
|
|
.Type => {
|
|
const dereffed_type = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, object, object_src)
|
|
else
|
|
object;
|
|
|
|
const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?;
|
|
const child_type = val.toType();
|
|
|
|
switch (try child_type.zigTypeTagOrPoison(mod)) {
|
|
.ErrorSet => {
|
|
switch (ip.indexToKey(child_type.toIntern())) {
|
|
.error_set_type => |error_set_type| blk: {
|
|
if (error_set_type.nameIndex(ip, field_name) != null) break :blk;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "no error named '{}' in '{}'", .{
|
|
field_name.fmt(ip), child_type.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, child_type);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
.inferred_error_set_type => {
|
|
return sema.fail(block, src, "TODO handle inferred error sets here", .{});
|
|
},
|
|
.simple_type => |t| {
|
|
assert(t == .anyerror);
|
|
_ = try mod.getErrorValue(field_name);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
const error_set_type = if (!child_type.isAnyError(mod))
|
|
child_type
|
|
else
|
|
try mod.singleErrorSetType(field_name);
|
|
return Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = error_set_type.toIntern(),
|
|
.name = field_name,
|
|
} })));
|
|
},
|
|
.Union => {
|
|
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
|
|
return inst;
|
|
}
|
|
}
|
|
try sema.resolveTypeFields(child_type);
|
|
if (child_type.unionTagType(mod)) |enum_ty| {
|
|
if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| {
|
|
const field_index = @as(u32, @intCast(field_index_usize));
|
|
return Air.internedToRef((try mod.enumValueFieldIndex(enum_ty, field_index)).toIntern());
|
|
}
|
|
}
|
|
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
|
|
},
|
|
.Enum => {
|
|
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
|
|
return inst;
|
|
}
|
|
}
|
|
const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse
|
|
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
|
|
const field_index = @as(u32, @intCast(field_index_usize));
|
|
const enum_val = try mod.enumValueFieldIndex(child_type, field_index);
|
|
return Air.internedToRef(enum_val.toIntern());
|
|
},
|
|
.Struct, .Opaque => {
|
|
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
|
|
return inst;
|
|
}
|
|
}
|
|
return sema.failWithBadMemberAccess(block, child_type, src, field_name);
|
|
},
|
|
else => {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
if (child_type.isSlice(mod)) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{});
|
|
if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
}
|
|
},
|
|
.Struct => if (is_pointer_to) {
|
|
// Avoid loading the entire struct by fetching a pointer and loading that
|
|
const field_ptr = try sema.structFieldPtr(block, src, object, field_name, field_name_src, inner_ty, false);
|
|
return sema.analyzeLoad(block, src, field_ptr, object_src);
|
|
} else {
|
|
return sema.structFieldVal(block, src, object, field_name, field_name_src, inner_ty);
|
|
},
|
|
.Union => if (is_pointer_to) {
|
|
// Avoid loading the entire union by fetching a pointer and loading that
|
|
const field_ptr = try sema.unionFieldPtr(block, src, object, field_name, field_name_src, inner_ty, false);
|
|
return sema.analyzeLoad(block, src, field_ptr, object_src);
|
|
} else {
|
|
return sema.unionFieldVal(block, src, object, field_name, field_name_src, inner_ty);
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.failWithInvalidFieldAccess(block, src, object_ty, field_name);
|
|
}
|
|
|
|
fn fieldPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
object_ptr: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
initializing: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
// When editing this function, note that there is corresponding logic to be edited
|
|
// in `fieldVal`. This function takes a pointer and returns a pointer.
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const object_ptr_src = src; // TODO better source location
|
|
const object_ptr_ty = sema.typeOf(object_ptr);
|
|
const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) {
|
|
.Pointer => object_ptr_ty.childType(mod),
|
|
else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(mod)}),
|
|
};
|
|
|
|
// Zig allows dereferencing a single pointer during field lookup. Note that
|
|
// we don't actually need to generate the dereference some field lookups, like the
|
|
// length of arrays and other comptime operations.
|
|
const is_pointer_to = object_ty.isSinglePointer(mod);
|
|
|
|
const inner_ty = if (is_pointer_to)
|
|
object_ty.childType(mod)
|
|
else
|
|
object_ty;
|
|
|
|
switch (inner_ty.zigTypeTag(mod)) {
|
|
.Array => {
|
|
if (ip.stringEqlSlice(field_name, "len")) {
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
return sema.analyzeDeclRef(try anon_decl.finish(
|
|
Type.usize,
|
|
try mod.intValue(Type.usize, inner_ty.arrayLen(mod)),
|
|
.none, // default alignment
|
|
));
|
|
} else {
|
|
return sema.fail(
|
|
block,
|
|
field_name_src,
|
|
"no member named '{}' in '{}'",
|
|
.{ field_name.fmt(ip), object_ty.fmt(mod) },
|
|
);
|
|
}
|
|
},
|
|
.Pointer => if (inner_ty.isSlice(mod)) {
|
|
const inner_ptr = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
|
|
else
|
|
object_ptr;
|
|
|
|
const attr_ptr_ty = if (is_pointer_to) object_ty else object_ptr_ty;
|
|
|
|
if (ip.stringEqlSlice(field_name, "ptr")) {
|
|
const slice_ptr_ty = inner_ty.slicePtrFieldType(mod);
|
|
|
|
const result_ty = try mod.ptrType(.{
|
|
.child = slice_ptr_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = !attr_ptr_ty.ptrIsMutable(mod),
|
|
.is_volatile = attr_ptr_ty.isVolatilePtr(mod),
|
|
.address_space = attr_ptr_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
|
|
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = result_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = val.toIntern(),
|
|
.index = Value.slice_ptr_index,
|
|
} },
|
|
} })));
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
|
|
return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr);
|
|
} else if (ip.stringEqlSlice(field_name, "len")) {
|
|
const result_ty = try mod.ptrType(.{
|
|
.child = .usize_type,
|
|
.flags = .{
|
|
.is_const = !attr_ptr_ty.ptrIsMutable(mod),
|
|
.is_volatile = attr_ptr_ty.isVolatilePtr(mod),
|
|
.address_space = attr_ptr_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
|
|
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = result_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = val.toIntern(),
|
|
.index = Value.slice_len_index,
|
|
} },
|
|
} })));
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
|
|
return block.addTyOp(.ptr_slice_len_ptr, result_ty, inner_ptr);
|
|
} else {
|
|
return sema.fail(
|
|
block,
|
|
field_name_src,
|
|
"no member named '{}' in '{}'",
|
|
.{ field_name.fmt(ip), object_ty.fmt(mod) },
|
|
);
|
|
}
|
|
},
|
|
.Type => {
|
|
_ = try sema.resolveConstValue(block, .unneeded, object_ptr, "");
|
|
const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
|
|
const inner = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, result, object_ptr_src)
|
|
else
|
|
result;
|
|
|
|
const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?;
|
|
const child_type = val.toType();
|
|
|
|
switch (child_type.zigTypeTag(mod)) {
|
|
.ErrorSet => {
|
|
switch (ip.indexToKey(child_type.toIntern())) {
|
|
.error_set_type => |error_set_type| blk: {
|
|
if (error_set_type.nameIndex(ip, field_name) != null) {
|
|
break :blk;
|
|
}
|
|
return sema.fail(block, src, "no error named '{}' in '{}'", .{
|
|
field_name.fmt(ip), child_type.fmt(mod),
|
|
});
|
|
},
|
|
.inferred_error_set_type => {
|
|
return sema.fail(block, src, "TODO handle inferred error sets here", .{});
|
|
},
|
|
.simple_type => |t| {
|
|
assert(t == .anyerror);
|
|
_ = try mod.getErrorValue(field_name);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
const error_set_type = if (!child_type.isAnyError(mod))
|
|
child_type
|
|
else
|
|
try mod.singleErrorSetType(field_name);
|
|
return sema.analyzeDeclRef(try anon_decl.finish(
|
|
error_set_type,
|
|
(try mod.intern(.{ .err = .{
|
|
.ty = error_set_type.toIntern(),
|
|
.name = field_name,
|
|
} })).toValue(),
|
|
.none, // default alignment
|
|
));
|
|
},
|
|
.Union => {
|
|
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
|
|
return inst;
|
|
}
|
|
}
|
|
try sema.resolveTypeFields(child_type);
|
|
if (child_type.unionTagType(mod)) |enum_ty| {
|
|
if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| {
|
|
const field_index_u32 = @as(u32, @intCast(field_index));
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
return sema.analyzeDeclRef(try anon_decl.finish(
|
|
enum_ty,
|
|
try mod.enumValueFieldIndex(enum_ty, field_index_u32),
|
|
.none, // default alignment
|
|
));
|
|
}
|
|
}
|
|
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
|
|
},
|
|
.Enum => {
|
|
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
|
|
return inst;
|
|
}
|
|
}
|
|
const field_index = child_type.enumFieldIndex(field_name, mod) orelse {
|
|
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
|
|
};
|
|
const field_index_u32 = @as(u32, @intCast(field_index));
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
return sema.analyzeDeclRef(try anon_decl.finish(
|
|
child_type,
|
|
try mod.enumValueFieldIndex(child_type, field_index_u32),
|
|
.none, // default alignment
|
|
));
|
|
},
|
|
.Struct, .Opaque => {
|
|
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
|
|
return inst;
|
|
}
|
|
}
|
|
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
|
|
},
|
|
else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(mod)}),
|
|
}
|
|
},
|
|
.Struct => {
|
|
const inner_ptr = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
|
|
else
|
|
object_ptr;
|
|
return sema.structFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty, initializing);
|
|
},
|
|
.Union => {
|
|
const inner_ptr = if (is_pointer_to)
|
|
try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
|
|
else
|
|
object_ptr;
|
|
return sema.unionFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty, initializing);
|
|
},
|
|
else => {},
|
|
}
|
|
return sema.failWithInvalidFieldAccess(block, src, object_ty, field_name);
|
|
}
|
|
|
|
const ResolvedFieldCallee = union(enum) {
|
|
/// The LHS of the call was an actual field with this value.
|
|
direct: Air.Inst.Ref,
|
|
/// This is a method call, with the function and first argument given.
|
|
method: struct {
|
|
func_inst: Air.Inst.Ref,
|
|
arg0_inst: Air.Inst.Ref,
|
|
},
|
|
};
|
|
|
|
fn fieldCallBind(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
raw_ptr: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
) CompileError!ResolvedFieldCallee {
|
|
// When editing this function, note that there is corresponding logic to be edited
|
|
// in `fieldVal`. This function takes a pointer and returns a pointer.
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const raw_ptr_src = src; // TODO better source location
|
|
const raw_ptr_ty = sema.typeOf(raw_ptr);
|
|
const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C))
|
|
raw_ptr_ty.childType(mod)
|
|
else
|
|
return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(mod)});
|
|
|
|
// Optionally dereference a second pointer to get the concrete type.
|
|
const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One;
|
|
const concrete_ty = if (is_double_ptr) inner_ty.childType(mod) else inner_ty;
|
|
const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty;
|
|
const object_ptr = if (is_double_ptr)
|
|
try sema.analyzeLoad(block, src, raw_ptr, src)
|
|
else
|
|
raw_ptr;
|
|
|
|
find_field: {
|
|
switch (concrete_ty.zigTypeTag(mod)) {
|
|
.Struct => {
|
|
try sema.resolveTypeFields(concrete_ty);
|
|
if (mod.typeToStruct(concrete_ty)) |struct_obj| {
|
|
const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
|
|
break :find_field;
|
|
const field_index = @as(u32, @intCast(field_index_usize));
|
|
const field = struct_obj.fields.values()[field_index];
|
|
|
|
return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr);
|
|
} else if (concrete_ty.isTuple(mod)) {
|
|
if (ip.stringEqlSlice(field_name, "len")) {
|
|
return .{ .direct = try mod.intRef(Type.usize, concrete_ty.structFieldCount(mod)) };
|
|
}
|
|
if (field_name.toUnsigned(ip)) |field_index| {
|
|
if (field_index >= concrete_ty.structFieldCount(mod)) break :find_field;
|
|
return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(field_index, mod), field_index, object_ptr);
|
|
}
|
|
} else {
|
|
const max = concrete_ty.structFieldCount(mod);
|
|
for (0..max) |i_usize| {
|
|
const i = @as(u32, @intCast(i_usize));
|
|
if (field_name == concrete_ty.structFieldName(i, mod)) {
|
|
return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(i, mod), i, object_ptr);
|
|
}
|
|
}
|
|
}
|
|
},
|
|
.Union => {
|
|
try sema.resolveTypeFields(concrete_ty);
|
|
const fields = concrete_ty.unionFields(mod);
|
|
const field_index_usize = fields.getIndex(field_name) orelse break :find_field;
|
|
const field_index = @as(u32, @intCast(field_index_usize));
|
|
const field = fields.values()[field_index];
|
|
|
|
return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr);
|
|
},
|
|
.Type => {
|
|
const namespace = try sema.analyzeLoad(block, src, object_ptr, src);
|
|
return .{ .direct = try sema.fieldVal(block, src, namespace, field_name, field_name_src) };
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
// If we get here, we need to look for a decl in the struct type instead.
|
|
const found_decl = switch (concrete_ty.zigTypeTag(mod)) {
|
|
.Struct, .Opaque, .Union, .Enum => found_decl: {
|
|
if (concrete_ty.getNamespaceIndex(mod).unwrap()) |namespace| {
|
|
if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| {
|
|
try sema.addReferencedBy(block, src, decl_idx);
|
|
const decl_val = try sema.analyzeDeclVal(block, src, decl_idx);
|
|
const decl_type = sema.typeOf(decl_val);
|
|
if (mod.typeToFunc(decl_type)) |func_type| f: {
|
|
if (func_type.param_types.len == 0) break :f;
|
|
|
|
const first_param_type = func_type.param_types.get(ip)[0].toType();
|
|
// zig fmt: off
|
|
if (first_param_type.isGenericPoison() or (
|
|
first_param_type.zigTypeTag(mod) == .Pointer and
|
|
(first_param_type.ptrSize(mod) == .One or
|
|
first_param_type.ptrSize(mod) == .C) and
|
|
first_param_type.childType(mod).eql(concrete_ty, mod)))
|
|
{
|
|
// zig fmt: on
|
|
// Note that if the param type is generic poison, we know that it must
|
|
// specifically be `anytype` since it's the first parameter, meaning we
|
|
// can safely assume it can be a pointer.
|
|
// TODO: bound fn calls on rvalues should probably
|
|
// generate a by-value argument somehow.
|
|
return .{ .method = .{
|
|
.func_inst = decl_val,
|
|
.arg0_inst = object_ptr,
|
|
} };
|
|
} else if (first_param_type.eql(concrete_ty, mod)) {
|
|
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
|
|
return .{ .method = .{
|
|
.func_inst = decl_val,
|
|
.arg0_inst = deref,
|
|
} };
|
|
} else if (first_param_type.zigTypeTag(mod) == .Optional) {
|
|
const child = first_param_type.optionalChild(mod);
|
|
if (child.eql(concrete_ty, mod)) {
|
|
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
|
|
return .{ .method = .{
|
|
.func_inst = decl_val,
|
|
.arg0_inst = deref,
|
|
} };
|
|
} else if (child.zigTypeTag(mod) == .Pointer and
|
|
child.ptrSize(mod) == .One and
|
|
child.childType(mod).eql(concrete_ty, mod))
|
|
{
|
|
return .{ .method = .{
|
|
.func_inst = decl_val,
|
|
.arg0_inst = object_ptr,
|
|
} };
|
|
}
|
|
} else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and
|
|
first_param_type.errorUnionPayload(mod).eql(concrete_ty, mod))
|
|
{
|
|
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
|
|
return .{ .method = .{
|
|
.func_inst = decl_val,
|
|
.arg0_inst = deref,
|
|
} };
|
|
}
|
|
}
|
|
break :found_decl decl_idx;
|
|
}
|
|
}
|
|
break :found_decl null;
|
|
},
|
|
else => null,
|
|
};
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "no field or member function named '{}' in '{}'", .{
|
|
field_name.fmt(ip),
|
|
concrete_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, concrete_ty);
|
|
if (found_decl) |decl_idx| {
|
|
const decl = mod.declPtr(decl_idx);
|
|
try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{}' is not a member function", .{field_name.fmt(ip)});
|
|
}
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn finishFieldCallBind(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr_ty: Type,
|
|
field_ty: Type,
|
|
field_index: u32,
|
|
object_ptr: Air.Inst.Ref,
|
|
) CompileError!ResolvedFieldCallee {
|
|
const mod = sema.mod;
|
|
const ptr_field_ty = try mod.ptrType(.{
|
|
.child = field_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = !ptr_ty.ptrIsMutable(mod),
|
|
.address_space = ptr_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
|
|
const container_ty = ptr_ty.childType(mod);
|
|
if (container_ty.zigTypeTag(mod) == .Struct) {
|
|
if (try container_ty.structFieldValueComptime(mod, field_index)) |default_val| {
|
|
return .{ .direct = Air.internedToRef(default_val.toIntern()) };
|
|
}
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| {
|
|
const pointer = Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = struct_ptr_val.toIntern(),
|
|
.index = field_index,
|
|
} },
|
|
} })));
|
|
return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) };
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
const ptr_inst = try block.addStructFieldPtr(object_ptr, field_index, ptr_field_ty);
|
|
return .{ .direct = try sema.analyzeLoad(block, src, ptr_inst, src) };
|
|
}
|
|
|
|
fn namespaceLookup(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
namespace: Namespace.Index,
|
|
decl_name: InternPool.NullTerminatedString,
|
|
) CompileError!?Decl.Index {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| {
|
|
const decl = mod.declPtr(decl_index);
|
|
if (!decl.is_pub and decl.getFileScope(mod) != block.getFileScope(mod)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "'{}' is not marked 'pub'", .{
|
|
decl_name.fmt(&mod.intern_pool),
|
|
});
|
|
errdefer msg.destroy(gpa);
|
|
try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
return decl_index;
|
|
}
|
|
return null;
|
|
}
|
|
|
|
fn namespaceLookupRef(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
namespace: Namespace.Index,
|
|
decl_name: InternPool.NullTerminatedString,
|
|
) CompileError!?Air.Inst.Ref {
|
|
const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null;
|
|
try sema.addReferencedBy(block, src, decl);
|
|
return try sema.analyzeDeclRef(decl);
|
|
}
|
|
|
|
fn namespaceLookupVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
namespace: Namespace.Index,
|
|
decl_name: InternPool.NullTerminatedString,
|
|
) CompileError!?Air.Inst.Ref {
|
|
const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null;
|
|
return try sema.analyzeDeclVal(block, src, decl);
|
|
}
|
|
|
|
fn structFieldPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
struct_ptr: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
struct_ty: Type,
|
|
initializing: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
assert(struct_ty.zigTypeTag(mod) == .Struct);
|
|
|
|
try sema.resolveTypeFields(struct_ty);
|
|
try sema.resolveStructLayout(struct_ty);
|
|
|
|
if (struct_ty.isTuple(mod)) {
|
|
if (mod.intern_pool.stringEqlSlice(field_name, "len")) {
|
|
const len_inst = try mod.intRef(Type.usize, struct_ty.structFieldCount(mod));
|
|
return sema.analyzeRef(block, src, len_inst);
|
|
}
|
|
const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src);
|
|
return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
|
|
} else if (struct_ty.isAnonStruct(mod)) {
|
|
const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src);
|
|
return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
|
|
}
|
|
|
|
const struct_obj = mod.typeToStruct(struct_ty).?;
|
|
|
|
const field_index_big = struct_obj.fields.getIndex(field_name) orelse
|
|
return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
|
|
const field_index = @as(u32, @intCast(field_index_big));
|
|
|
|
return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, field_name_src, struct_ty, initializing);
|
|
}
|
|
|
|
fn structFieldPtrByIndex(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
struct_ptr: Air.Inst.Ref,
|
|
field_index: u32,
|
|
field_src: LazySrcLoc,
|
|
struct_ty: Type,
|
|
initializing: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
if (struct_ty.isAnonStruct(mod)) {
|
|
return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing);
|
|
}
|
|
|
|
const struct_obj = mod.typeToStruct(struct_ty).?;
|
|
const field = struct_obj.fields.values()[field_index];
|
|
const struct_ptr_ty = sema.typeOf(struct_ptr);
|
|
const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
|
|
|
|
var ptr_ty_data: InternPool.Key.PtrType = .{
|
|
.child = field.ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = struct_ptr_ty_info.flags.is_const,
|
|
.is_volatile = struct_ptr_ty_info.flags.is_volatile,
|
|
.address_space = struct_ptr_ty_info.flags.address_space,
|
|
},
|
|
};
|
|
|
|
const target = mod.getTarget();
|
|
|
|
const parent_align = struct_ptr_ty_info.flags.alignment.toByteUnitsOptional() orelse
|
|
try sema.typeAbiAlignment(struct_ptr_ty_info.child.toType());
|
|
|
|
if (struct_obj.layout == .Packed) {
|
|
comptime assert(Type.packed_struct_layout_version == 2);
|
|
|
|
var running_bits: u16 = 0;
|
|
for (struct_obj.fields.values(), 0..) |f, i| {
|
|
if (!(try sema.typeHasRuntimeBits(f.ty))) continue;
|
|
|
|
if (i == field_index) {
|
|
ptr_ty_data.packed_offset.bit_offset = running_bits;
|
|
}
|
|
running_bits += @as(u16, @intCast(f.ty.bitSize(mod)));
|
|
}
|
|
ptr_ty_data.packed_offset.host_size = (running_bits + 7) / 8;
|
|
|
|
// If this is a packed struct embedded in another one, we need to offset
|
|
// the bits against each other.
|
|
if (struct_ptr_ty_info.packed_offset.host_size != 0) {
|
|
ptr_ty_data.packed_offset.host_size = struct_ptr_ty_info.packed_offset.host_size;
|
|
ptr_ty_data.packed_offset.bit_offset += struct_ptr_ty_info.packed_offset.bit_offset;
|
|
}
|
|
|
|
ptr_ty_data.flags.alignment = Alignment.fromByteUnits(parent_align);
|
|
|
|
// If the field happens to be byte-aligned, simplify the pointer type.
|
|
// The pointee type bit size must match its ABI byte size so that loads and stores
|
|
// do not interfere with the surrounding packed bits.
|
|
// We do not attempt this with big-endian targets yet because of nested
|
|
// structs and floats. I need to double-check the desired behavior for big endian
|
|
// targets before adding the necessary complications to this code. This will not
|
|
// cause miscompilations; it only means the field pointer uses bit masking when it
|
|
// might not be strictly necessary.
|
|
if (parent_align != 0 and ptr_ty_data.packed_offset.bit_offset % 8 == 0 and
|
|
target.cpu.arch.endian() == .Little)
|
|
{
|
|
const elem_size_bytes = ptr_ty_data.child.toType().abiSize(mod);
|
|
const elem_size_bits = ptr_ty_data.child.toType().bitSize(mod);
|
|
if (elem_size_bytes * 8 == elem_size_bits) {
|
|
const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8;
|
|
const new_align = @as(Alignment, @enumFromInt(@ctz(byte_offset | parent_align)));
|
|
assert(new_align != .none);
|
|
ptr_ty_data.flags.alignment = new_align;
|
|
ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 };
|
|
}
|
|
}
|
|
} else if (struct_obj.layout == .Extern) {
|
|
// For extern structs, field aligment might be bigger than type's natural alignment. Eg, in
|
|
// `extern struct { x: u32, y: u16 }` the second field is aligned as u32.
|
|
const field_offset = struct_ty.structFieldOffset(field_index, mod);
|
|
ptr_ty_data.flags.alignment = Alignment.fromByteUnits(
|
|
if (parent_align == 0) 0 else std.math.gcd(field_offset, parent_align),
|
|
);
|
|
} else {
|
|
// Our alignment is capped at the field alignment
|
|
const field_align = try sema.structFieldAlignment(field, struct_obj.layout);
|
|
ptr_ty_data.flags.alignment = Alignment.fromByteUnits(@min(field_align, parent_align));
|
|
}
|
|
|
|
const ptr_field_ty = try mod.ptrType(ptr_ty_data);
|
|
|
|
if (field.is_comptime) {
|
|
const val = try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .comptime_field = field.default_val },
|
|
} });
|
|
return Air.internedToRef(val);
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
|
|
const val = try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = try struct_ptr_val.intern(struct_ptr_ty, mod),
|
|
.index = field_index,
|
|
} },
|
|
} });
|
|
return Air.internedToRef(val);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addStructFieldPtr(struct_ptr, field_index, ptr_field_ty);
|
|
}
|
|
|
|
fn structFieldVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
struct_byval: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
struct_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
assert(struct_ty.zigTypeTag(mod) == .Struct);
|
|
|
|
try sema.resolveTypeFields(struct_ty);
|
|
switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) {
|
|
.struct_type => |struct_type| {
|
|
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
|
|
if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
|
|
|
|
const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
|
|
return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
|
|
const field_index = @as(u32, @intCast(field_index_usize));
|
|
const field = struct_obj.fields.values()[field_index];
|
|
|
|
if (field.is_comptime) {
|
|
return Air.internedToRef(field.default_val);
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| {
|
|
if (struct_val.isUndef(mod)) return mod.undefRef(field.ty);
|
|
if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| {
|
|
return Air.internedToRef(opv.toIntern());
|
|
}
|
|
return Air.internedToRef((try struct_val.fieldValue(mod, field_index)).toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addStructFieldVal(struct_byval, field_index, field.ty);
|
|
},
|
|
.anon_struct_type => |anon_struct| {
|
|
if (anon_struct.names.len == 0) {
|
|
return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
|
|
} else {
|
|
const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src);
|
|
return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty);
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn tupleFieldVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
tuple_byval: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
tuple_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
if (mod.intern_pool.stringEqlSlice(field_name, "len")) {
|
|
return mod.intRef(Type.usize, tuple_ty.structFieldCount(mod));
|
|
}
|
|
const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src);
|
|
return sema.tupleFieldValByIndex(block, src, tuple_byval, field_index, tuple_ty);
|
|
}
|
|
|
|
/// Asserts that `field_name` is not "len".
|
|
fn tupleFieldIndex(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
tuple_ty: Type,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
) CompileError!u32 {
|
|
const mod = sema.mod;
|
|
assert(!mod.intern_pool.stringEqlSlice(field_name, "len"));
|
|
if (field_name.toUnsigned(&mod.intern_pool)) |field_index| {
|
|
if (field_index < tuple_ty.structFieldCount(mod)) return field_index;
|
|
return sema.fail(block, field_name_src, "index '{}' out of bounds of tuple '{}'", .{
|
|
field_name.fmt(&mod.intern_pool), tuple_ty.fmt(mod),
|
|
});
|
|
}
|
|
|
|
return sema.fail(block, field_name_src, "no field named '{}' in tuple '{}'", .{
|
|
field_name.fmt(&mod.intern_pool), tuple_ty.fmt(mod),
|
|
});
|
|
}
|
|
|
|
fn tupleFieldValByIndex(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
tuple_byval: Air.Inst.Ref,
|
|
field_index: u32,
|
|
tuple_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const field_ty = tuple_ty.structFieldType(field_index, mod);
|
|
|
|
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
|
|
return Air.internedToRef(default_value.toIntern());
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefVal(tuple_byval)) |tuple_val| {
|
|
if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| {
|
|
return Air.internedToRef(opv.toIntern());
|
|
}
|
|
return switch (mod.intern_pool.indexToKey(tuple_val.toIntern())) {
|
|
.undef => mod.undefRef(field_ty),
|
|
.aggregate => |aggregate| Air.internedToRef(switch (aggregate.storage) {
|
|
.bytes => |bytes| try mod.intValue(Type.u8, bytes[0]),
|
|
.elems => |elems| elems[field_index].toValue(),
|
|
.repeated_elem => |elem| elem.toValue(),
|
|
}.toIntern()),
|
|
else => unreachable,
|
|
};
|
|
}
|
|
|
|
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| {
|
|
return Air.internedToRef(default_val.toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addStructFieldVal(tuple_byval, field_index, field_ty);
|
|
}
|
|
|
|
fn unionFieldPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
union_ptr: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
union_ty: Type,
|
|
initializing: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
assert(union_ty.zigTypeTag(mod) == .Union);
|
|
|
|
const union_ptr_ty = sema.typeOf(union_ptr);
|
|
const union_ptr_info = union_ptr_ty.ptrInfo(mod);
|
|
try sema.resolveTypeFields(union_ty);
|
|
const union_obj = mod.typeToUnion(union_ty).?;
|
|
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
|
|
const field = union_obj.fields.values()[field_index];
|
|
const ptr_field_ty = try mod.ptrType(.{
|
|
.child = field.ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = union_ptr_info.flags.is_const,
|
|
.is_volatile = union_ptr_info.flags.is_volatile,
|
|
.address_space = union_ptr_info.flags.address_space,
|
|
.alignment = if (union_obj.layout == .Auto) blk: {
|
|
const union_align = union_ptr_info.flags.alignment.toByteUnitsOptional() orelse try sema.typeAbiAlignment(union_ty);
|
|
const field_align = try sema.unionFieldAlignment(field);
|
|
break :blk InternPool.Alignment.fromByteUnits(@min(union_align, field_align));
|
|
} else union_ptr_info.flags.alignment,
|
|
},
|
|
.packed_offset = union_ptr_info.packed_offset,
|
|
});
|
|
const enum_field_index = @as(u32, @intCast(union_obj.tag_ty.enumFieldIndex(field_name, mod).?));
|
|
|
|
if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| ct: {
|
|
switch (union_obj.layout) {
|
|
.Auto => if (!initializing) {
|
|
const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse
|
|
break :ct;
|
|
if (union_val.isUndef(mod)) {
|
|
return sema.failWithUseOfUndef(block, src);
|
|
}
|
|
const un = ip.indexToKey(union_val.toIntern()).un;
|
|
const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index);
|
|
const tag_matches = un.tag == field_tag.toIntern();
|
|
if (!tag_matches) {
|
|
const msg = msg: {
|
|
const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?;
|
|
const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod);
|
|
const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{
|
|
field_name.fmt(ip),
|
|
active_field_name.fmt(ip),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
},
|
|
.Packed, .Extern => {},
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = union_ptr_val.toIntern(),
|
|
.index = field_index,
|
|
} },
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
if (!initializing and union_obj.layout == .Auto and block.wantSafety() and
|
|
union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1)
|
|
{
|
|
const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index);
|
|
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
|
|
// TODO would it be better if get_union_tag supported pointers to unions?
|
|
const union_val = try block.addTyOp(.load, union_ty, union_ptr);
|
|
const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_val);
|
|
try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag);
|
|
}
|
|
if (field.ty.zigTypeTag(mod) == .NoReturn) {
|
|
_ = try block.addNoOp(.unreach);
|
|
return Air.Inst.Ref.unreachable_value;
|
|
}
|
|
return block.addStructFieldPtr(union_ptr, field_index, ptr_field_ty);
|
|
}
|
|
|
|
fn unionFieldVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
union_byval: Air.Inst.Ref,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_name_src: LazySrcLoc,
|
|
union_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
assert(union_ty.zigTypeTag(mod) == .Union);
|
|
|
|
try sema.resolveTypeFields(union_ty);
|
|
const union_obj = mod.typeToUnion(union_ty).?;
|
|
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
|
|
const field = union_obj.fields.values()[field_index];
|
|
const enum_field_index = @as(u32, @intCast(union_obj.tag_ty.enumFieldIndex(field_name, mod).?));
|
|
|
|
if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| {
|
|
if (union_val.isUndef(mod)) return mod.undefRef(field.ty);
|
|
|
|
const un = ip.indexToKey(union_val.toIntern()).un;
|
|
const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index);
|
|
const tag_matches = un.tag == field_tag.toIntern();
|
|
switch (union_obj.layout) {
|
|
.Auto => {
|
|
if (tag_matches) {
|
|
return Air.internedToRef(un.val);
|
|
} else {
|
|
const msg = msg: {
|
|
const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?;
|
|
const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod);
|
|
const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{
|
|
field_name.fmt(ip), active_field_name.fmt(ip),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
},
|
|
.Packed, .Extern => {
|
|
if (tag_matches) {
|
|
return Air.internedToRef(un.val);
|
|
} else {
|
|
const old_ty = union_ty.unionFieldType(un.tag.toValue(), mod);
|
|
if (try sema.bitCastVal(block, src, un.val.toValue(), old_ty, field.ty, 0)) |new_val| {
|
|
return Air.internedToRef(new_val.toIntern());
|
|
}
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
if (union_obj.layout == .Auto and block.wantSafety() and
|
|
union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1)
|
|
{
|
|
const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index);
|
|
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
|
|
const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval);
|
|
try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag);
|
|
}
|
|
if (field.ty.zigTypeTag(mod) == .NoReturn) {
|
|
_ = try block.addNoOp(.unreach);
|
|
return Air.Inst.Ref.unreachable_value;
|
|
}
|
|
return block.addStructFieldVal(union_byval, field_index, field.ty);
|
|
}
|
|
|
|
fn elemPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
indexable_ptr: Air.Inst.Ref,
|
|
elem_index: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
init: bool,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const indexable_ptr_src = src; // TODO better source location
|
|
const indexable_ptr_ty = sema.typeOf(indexable_ptr);
|
|
|
|
const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) {
|
|
.Pointer => indexable_ptr_ty.childType(mod),
|
|
else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(mod)}),
|
|
};
|
|
try checkIndexable(sema, block, src, indexable_ty);
|
|
|
|
switch (indexable_ty.zigTypeTag(mod)) {
|
|
.Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety),
|
|
.Struct => {
|
|
// Tuple field access.
|
|
const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
|
|
const index = @as(u32, @intCast(index_val.toUnsignedInt(mod)));
|
|
return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init);
|
|
},
|
|
else => {
|
|
const indexable = try sema.analyzeLoad(block, indexable_ptr_src, indexable_ptr, indexable_ptr_src);
|
|
return elemPtrOneLayerOnly(sema, block, src, indexable, elem_index, elem_index_src, init, oob_safety);
|
|
},
|
|
}
|
|
}
|
|
|
|
/// Asserts that the type of indexable is pointer.
|
|
fn elemPtrOneLayerOnly(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
indexable: Air.Inst.Ref,
|
|
elem_index: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
init: bool,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const indexable_src = src; // TODO better source location
|
|
const indexable_ty = sema.typeOf(indexable);
|
|
const mod = sema.mod;
|
|
|
|
try checkIndexable(sema, block, src, indexable_ty);
|
|
|
|
switch (indexable_ty.ptrSize(mod)) {
|
|
.Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
|
|
.Many, .C => {
|
|
const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
|
|
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
|
|
const runtime_src = rs: {
|
|
const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
|
|
const index_val = maybe_index_val orelse break :rs elem_index_src;
|
|
const index = @as(usize, @intCast(index_val.toUnsignedInt(mod)));
|
|
const result_ty = try sema.elemPtrType(indexable_ty, index);
|
|
const elem_ptr = try ptr_val.elemPtr(result_ty, index, mod);
|
|
return Air.internedToRef(elem_ptr.toIntern());
|
|
};
|
|
const result_ty = try sema.elemPtrType(indexable_ty, null);
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addPtrElemPtr(indexable, elem_index, result_ty);
|
|
},
|
|
.One => {
|
|
const child_ty = indexable_ty.childType(mod);
|
|
switch (child_ty.zigTypeTag(mod)) {
|
|
.Array, .Vector => {
|
|
return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety);
|
|
},
|
|
.Struct => {
|
|
assert(child_ty.isTuple(mod));
|
|
const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
|
|
const index = @as(u32, @intCast(index_val.toUnsignedInt(mod)));
|
|
return sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false);
|
|
},
|
|
else => unreachable, // Guaranteed by checkIndexable
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
fn elemVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
indexable: Air.Inst.Ref,
|
|
elem_index_uncasted: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const indexable_src = src; // TODO better source location
|
|
const indexable_ty = sema.typeOf(indexable);
|
|
const mod = sema.mod;
|
|
|
|
try checkIndexable(sema, block, src, indexable_ty);
|
|
|
|
// TODO in case of a vector of pointers, we need to detect whether the element
|
|
// index is a scalar or vector instead of unconditionally casting to usize.
|
|
const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src);
|
|
|
|
switch (indexable_ty.zigTypeTag(mod)) {
|
|
.Pointer => switch (indexable_ty.ptrSize(mod)) {
|
|
.Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
|
|
.Many, .C => {
|
|
const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
|
|
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
|
|
|
|
const runtime_src = rs: {
|
|
const indexable_val = maybe_indexable_val orelse break :rs indexable_src;
|
|
const index_val = maybe_index_val orelse break :rs elem_index_src;
|
|
const index = @as(usize, @intCast(index_val.toUnsignedInt(mod)));
|
|
const elem_ty = indexable_ty.elemType2(mod);
|
|
const many_ptr_ty = try mod.manyConstPtrType(elem_ty);
|
|
const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty);
|
|
const elem_ptr_ty = try mod.singleConstPtrType(elem_ty);
|
|
const elem_ptr_val = try many_ptr_val.elemPtr(elem_ptr_ty, index, mod);
|
|
if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| {
|
|
return Air.internedToRef((try mod.getCoerced(elem_val, elem_ty)).toIntern());
|
|
}
|
|
break :rs indexable_src;
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addBinOp(.ptr_elem_val, indexable, elem_index);
|
|
},
|
|
.One => {
|
|
arr_sent: {
|
|
const inner_ty = indexable_ty.childType(mod);
|
|
if (inner_ty.zigTypeTag(mod) != .Array) break :arr_sent;
|
|
const sentinel = inner_ty.sentinel(mod) orelse break :arr_sent;
|
|
const index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index) orelse break :arr_sent;
|
|
const index = try sema.usizeCast(block, src, index_val.toUnsignedInt(mod));
|
|
if (index != inner_ty.arrayLen(mod)) break :arr_sent;
|
|
return Air.internedToRef(sentinel.toIntern());
|
|
}
|
|
const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety);
|
|
return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src);
|
|
},
|
|
},
|
|
.Array => return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
|
|
.Vector => {
|
|
// TODO: If the index is a vector, the result should be a vector.
|
|
return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety);
|
|
},
|
|
.Struct => {
|
|
// Tuple field access.
|
|
const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
|
|
const index = @as(u32, @intCast(index_val.toUnsignedInt(mod)));
|
|
return sema.tupleField(block, indexable_src, indexable, elem_index_src, index);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn validateRuntimeElemAccess(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
elem_index_src: LazySrcLoc,
|
|
elem_ty: Type,
|
|
parent_ty: Type,
|
|
parent_src: LazySrcLoc,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
if (try sema.typeRequiresComptime(elem_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
elem_index_src,
|
|
"values of type '{}' must be comptime-known, but index value is runtime-known",
|
|
.{parent_ty.fmt(mod)},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsComptime(msg, parent_src.toSrcLoc(src_decl, mod), parent_ty);
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
fn tupleFieldPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
tuple_ptr_src: LazySrcLoc,
|
|
tuple_ptr: Air.Inst.Ref,
|
|
field_index_src: LazySrcLoc,
|
|
field_index: u32,
|
|
init: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const tuple_ptr_ty = sema.typeOf(tuple_ptr);
|
|
const tuple_ty = tuple_ptr_ty.childType(mod);
|
|
try sema.resolveTypeFields(tuple_ty);
|
|
const field_count = tuple_ty.structFieldCount(mod);
|
|
|
|
if (field_count == 0) {
|
|
return sema.fail(block, tuple_ptr_src, "indexing into empty tuple is not allowed", .{});
|
|
}
|
|
|
|
if (field_index >= field_count) {
|
|
return sema.fail(block, field_index_src, "index {d} outside tuple of length {d}", .{
|
|
field_index, field_count,
|
|
});
|
|
}
|
|
|
|
const field_ty = tuple_ty.structFieldType(field_index, mod);
|
|
const ptr_field_ty = try mod.ptrType(.{
|
|
.child = field_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = !tuple_ptr_ty.ptrIsMutable(mod),
|
|
.is_volatile = tuple_ptr_ty.isVolatilePtr(mod),
|
|
.address_space = tuple_ptr_ty.ptrAddressSpace(mod),
|
|
},
|
|
});
|
|
|
|
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| {
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .comptime_field = default_val.toIntern() },
|
|
} })));
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefVal(tuple_ptr)) |tuple_ptr_val| {
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_field_ty.toIntern(),
|
|
.addr = .{ .field = .{
|
|
.base = tuple_ptr_val.toIntern(),
|
|
.index = field_index,
|
|
} },
|
|
} })));
|
|
}
|
|
|
|
if (!init) {
|
|
try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_ptr_src);
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, tuple_ptr_src, null);
|
|
return block.addStructFieldPtr(tuple_ptr, field_index, ptr_field_ty);
|
|
}
|
|
|
|
fn tupleField(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
tuple_src: LazySrcLoc,
|
|
tuple: Air.Inst.Ref,
|
|
field_index_src: LazySrcLoc,
|
|
field_index: u32,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const tuple_ty = sema.typeOf(tuple);
|
|
try sema.resolveTypeFields(tuple_ty);
|
|
const field_count = tuple_ty.structFieldCount(mod);
|
|
|
|
if (field_count == 0) {
|
|
return sema.fail(block, tuple_src, "indexing into empty tuple is not allowed", .{});
|
|
}
|
|
|
|
if (field_index >= field_count) {
|
|
return sema.fail(block, field_index_src, "index {d} outside tuple of length {d}", .{
|
|
field_index, field_count,
|
|
});
|
|
}
|
|
|
|
const field_ty = tuple_ty.structFieldType(field_index, mod);
|
|
|
|
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
|
|
return Air.internedToRef(default_value.toIntern()); // comptime field
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| {
|
|
if (tuple_val.isUndef(mod)) return mod.undefRef(field_ty);
|
|
return Air.internedToRef((try tuple_val.fieldValue(mod, field_index)).toIntern());
|
|
}
|
|
|
|
try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src);
|
|
|
|
try sema.requireRuntimeBlock(block, tuple_src, null);
|
|
return block.addStructFieldVal(tuple, field_index, field_ty);
|
|
}
|
|
|
|
fn elemValArray(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
array_src: LazySrcLoc,
|
|
array: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
elem_index: Air.Inst.Ref,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const array_ty = sema.typeOf(array);
|
|
const array_sent = array_ty.sentinel(mod);
|
|
const array_len = array_ty.arrayLen(mod);
|
|
const array_len_s = array_len + @intFromBool(array_sent != null);
|
|
const elem_ty = array_ty.childType(mod);
|
|
|
|
if (array_len_s == 0) {
|
|
return sema.fail(block, array_src, "indexing into empty array is not allowed", .{});
|
|
}
|
|
|
|
const maybe_undef_array_val = try sema.resolveMaybeUndefVal(array);
|
|
// index must be defined since it can access out of bounds
|
|
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
|
|
|
|
if (maybe_index_val) |index_val| {
|
|
const index = @as(usize, @intCast(index_val.toUnsignedInt(mod)));
|
|
if (array_sent) |s| {
|
|
if (index == array_len) {
|
|
return Air.internedToRef(s.toIntern());
|
|
}
|
|
}
|
|
if (index >= array_len_s) {
|
|
const sentinel_label: []const u8 = if (array_sent != null) " +1 (sentinel)" else "";
|
|
return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label });
|
|
}
|
|
}
|
|
if (maybe_undef_array_val) |array_val| {
|
|
if (array_val.isUndef(mod)) {
|
|
return mod.undefRef(elem_ty);
|
|
}
|
|
if (maybe_index_val) |index_val| {
|
|
const index = @as(usize, @intCast(index_val.toUnsignedInt(mod)));
|
|
const elem_val = try array_val.elemValue(mod, index);
|
|
return Air.internedToRef(elem_val.toIntern());
|
|
}
|
|
}
|
|
|
|
try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ty, array_ty, array_src);
|
|
|
|
const runtime_src = if (maybe_undef_array_val != null) elem_index_src else array_src;
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
try sema.queueFullTypeResolution(array_ty);
|
|
if (oob_safety and block.wantSafety()) {
|
|
// Runtime check is only needed if unable to comptime check
|
|
if (maybe_index_val == null) {
|
|
const len_inst = try mod.intRef(Type.usize, array_len);
|
|
const cmp_op: Air.Inst.Tag = if (array_sent != null) .cmp_lte else .cmp_lt;
|
|
try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op);
|
|
}
|
|
}
|
|
return block.addBinOp(.array_elem_val, array, elem_index);
|
|
}
|
|
|
|
fn elemPtrArray(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
array_ptr_src: LazySrcLoc,
|
|
array_ptr: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
elem_index: Air.Inst.Ref,
|
|
init: bool,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const array_ptr_ty = sema.typeOf(array_ptr);
|
|
const array_ty = array_ptr_ty.childType(mod);
|
|
const array_sent = array_ty.sentinel(mod) != null;
|
|
const array_len = array_ty.arrayLen(mod);
|
|
const array_len_s = array_len + @intFromBool(array_sent);
|
|
|
|
if (array_len_s == 0) {
|
|
return sema.fail(block, array_ptr_src, "indexing into empty array is not allowed", .{});
|
|
}
|
|
|
|
const maybe_undef_array_ptr_val = try sema.resolveMaybeUndefVal(array_ptr);
|
|
// The index must not be undefined since it can be out of bounds.
|
|
const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: {
|
|
const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod));
|
|
if (index >= array_len_s) {
|
|
const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else "";
|
|
return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label });
|
|
}
|
|
break :o index;
|
|
} else null;
|
|
|
|
const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset);
|
|
|
|
if (maybe_undef_array_ptr_val) |array_ptr_val| {
|
|
if (array_ptr_val.isUndef(mod)) {
|
|
return mod.undefRef(elem_ptr_ty);
|
|
}
|
|
if (offset) |index| {
|
|
const elem_ptr = try array_ptr_val.elemPtr(elem_ptr_ty, index, mod);
|
|
return Air.internedToRef(elem_ptr.toIntern());
|
|
}
|
|
}
|
|
|
|
if (!init) {
|
|
try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(mod), array_ty, array_ptr_src);
|
|
}
|
|
|
|
const runtime_src = if (maybe_undef_array_ptr_val != null) elem_index_src else array_ptr_src;
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
// Runtime check is only needed if unable to comptime check.
|
|
if (oob_safety and block.wantSafety() and offset == null) {
|
|
const len_inst = try mod.intRef(Type.usize, array_len);
|
|
const cmp_op: Air.Inst.Tag = if (array_sent) .cmp_lte else .cmp_lt;
|
|
try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op);
|
|
}
|
|
|
|
return block.addPtrElemPtr(array_ptr, elem_index, elem_ptr_ty);
|
|
}
|
|
|
|
fn elemValSlice(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
slice_src: LazySrcLoc,
|
|
slice: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
elem_index: Air.Inst.Ref,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const slice_ty = sema.typeOf(slice);
|
|
const slice_sent = slice_ty.sentinel(mod) != null;
|
|
const elem_ty = slice_ty.elemType2(mod);
|
|
var runtime_src = slice_src;
|
|
|
|
// slice must be defined since it can dereferenced as null
|
|
const maybe_slice_val = try sema.resolveDefinedValue(block, slice_src, slice);
|
|
// index must be defined since it can index out of bounds
|
|
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
|
|
|
|
if (maybe_slice_val) |slice_val| {
|
|
runtime_src = elem_index_src;
|
|
const slice_len = slice_val.sliceLen(mod);
|
|
const slice_len_s = slice_len + @intFromBool(slice_sent);
|
|
if (slice_len_s == 0) {
|
|
return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
|
|
}
|
|
if (maybe_index_val) |index_val| {
|
|
const index = @as(usize, @intCast(index_val.toUnsignedInt(mod)));
|
|
if (index >= slice_len_s) {
|
|
const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
|
|
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
|
|
}
|
|
const elem_ptr_ty = try sema.elemPtrType(slice_ty, index);
|
|
const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod);
|
|
if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| {
|
|
return Air.internedToRef(elem_val.toIntern());
|
|
}
|
|
runtime_src = slice_src;
|
|
}
|
|
}
|
|
|
|
try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ty, slice_ty, slice_src);
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
if (oob_safety and block.wantSafety()) {
|
|
const len_inst = if (maybe_slice_val) |slice_val|
|
|
try mod.intRef(Type.usize, slice_val.sliceLen(mod))
|
|
else
|
|
try block.addTyOp(.slice_len, Type.usize, slice);
|
|
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
|
|
try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op);
|
|
}
|
|
try sema.queueFullTypeResolution(sema.typeOf(slice));
|
|
return block.addBinOp(.slice_elem_val, slice, elem_index);
|
|
}
|
|
|
|
fn elemPtrSlice(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
slice_src: LazySrcLoc,
|
|
slice: Air.Inst.Ref,
|
|
elem_index_src: LazySrcLoc,
|
|
elem_index: Air.Inst.Ref,
|
|
oob_safety: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const slice_ty = sema.typeOf(slice);
|
|
const slice_sent = slice_ty.sentinel(mod) != null;
|
|
|
|
const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(slice);
|
|
// The index must not be undefined since it can be out of bounds.
|
|
const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: {
|
|
const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod));
|
|
break :o index;
|
|
} else null;
|
|
|
|
const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset);
|
|
|
|
if (maybe_undef_slice_val) |slice_val| {
|
|
if (slice_val.isUndef(mod)) {
|
|
return mod.undefRef(elem_ptr_ty);
|
|
}
|
|
const slice_len = slice_val.sliceLen(mod);
|
|
const slice_len_s = slice_len + @intFromBool(slice_sent);
|
|
if (slice_len_s == 0) {
|
|
return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
|
|
}
|
|
if (offset) |index| {
|
|
if (index >= slice_len_s) {
|
|
const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
|
|
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
|
|
}
|
|
const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod);
|
|
return Air.internedToRef(elem_ptr_val.toIntern());
|
|
}
|
|
}
|
|
|
|
try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ptr_ty, slice_ty, slice_src);
|
|
|
|
const runtime_src = if (maybe_undef_slice_val != null) elem_index_src else slice_src;
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
if (oob_safety and block.wantSafety()) {
|
|
const len_inst = len: {
|
|
if (maybe_undef_slice_val) |slice_val|
|
|
if (!slice_val.isUndef(mod))
|
|
break :len try mod.intRef(Type.usize, slice_val.sliceLen(mod));
|
|
break :len try block.addTyOp(.slice_len, Type.usize, slice);
|
|
};
|
|
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
|
|
try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op);
|
|
}
|
|
return block.addSliceElemPtr(slice, elem_index, elem_ptr_ty);
|
|
}
|
|
|
|
fn coerce(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty_unresolved: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
return sema.coerceExtra(block, dest_ty_unresolved, inst, inst_src, .{}) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
const CoersionError = CompileError || error{
|
|
/// When coerce is called recursively, this error should be returned instead of using `fail`
|
|
/// to ensure correct types in compile errors.
|
|
NotCoercible,
|
|
};
|
|
|
|
const CoerceOpts = struct {
|
|
/// Should coerceExtra emit error messages.
|
|
report_err: bool = true,
|
|
/// Ignored if `report_err == false`.
|
|
is_ret: bool = false,
|
|
/// Should coercion to comptime_int ermit an error message.
|
|
no_cast_to_comptime_int: bool = false,
|
|
|
|
param_src: struct {
|
|
func_inst: Air.Inst.Ref = .none,
|
|
param_i: u32 = undefined,
|
|
|
|
fn get(info: @This(), sema: *Sema) !?Module.SrcLoc {
|
|
if (info.func_inst == .none) return null;
|
|
const mod = sema.mod;
|
|
const fn_decl = (try sema.funcDeclSrc(info.func_inst)) orelse return null;
|
|
const param_src = Module.paramSrc(0, mod, fn_decl, info.param_i);
|
|
if (param_src == .node_offset_param) {
|
|
return Module.SrcLoc{
|
|
.file_scope = fn_decl.getFileScope(mod),
|
|
.parent_decl_node = fn_decl.src_node,
|
|
.lazy = LazySrcLoc.nodeOffset(param_src.node_offset_param),
|
|
};
|
|
}
|
|
return param_src.toSrcLoc(fn_decl, mod);
|
|
}
|
|
} = .{},
|
|
};
|
|
|
|
fn coerceExtra(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
opts: CoerceOpts,
|
|
) CoersionError!Air.Inst.Ref {
|
|
if (dest_ty.isGenericPoison()) return inst;
|
|
const mod = sema.mod;
|
|
const dest_ty_src = inst_src; // TODO better source location
|
|
try sema.resolveTypeFields(dest_ty);
|
|
const inst_ty = sema.typeOf(inst);
|
|
try sema.resolveTypeFields(inst_ty);
|
|
const target = mod.getTarget();
|
|
// If the types are the same, we can return the operand.
|
|
if (dest_ty.eql(inst_ty, mod))
|
|
return inst;
|
|
|
|
const maybe_inst_val = try sema.resolveMaybeUndefVal(inst);
|
|
|
|
var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src);
|
|
if (in_memory_result == .ok) {
|
|
if (maybe_inst_val) |val| {
|
|
return sema.coerceInMemory(val, dest_ty);
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addBitCast(dest_ty, inst);
|
|
}
|
|
|
|
const is_undef = inst_ty.zigTypeTag(mod) == .Undefined;
|
|
|
|
switch (dest_ty.zigTypeTag(mod)) {
|
|
.Optional => optional: {
|
|
// undefined sets the optional bit also to undefined.
|
|
if (is_undef) {
|
|
return mod.undefRef(dest_ty);
|
|
}
|
|
|
|
// null to ?T
|
|
if (inst_ty.zigTypeTag(mod) == .Null) {
|
|
return Air.internedToRef((try mod.intern(.{ .opt = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.val = .none,
|
|
} })));
|
|
}
|
|
|
|
// cast from ?*T and ?[*]T to ?*anyopaque
|
|
// but don't do it if the source type is a double pointer
|
|
if (dest_ty.isPtrLikeOptional(mod) and
|
|
dest_ty.elemType2(mod).toIntern() == .anyopaque_type and
|
|
inst_ty.isPtrAtRuntime(mod))
|
|
anyopaque_check: {
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional;
|
|
const elem_ty = inst_ty.elemType2(mod);
|
|
if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) {
|
|
in_memory_result = .{ .double_ptr_to_anyopaque = .{
|
|
.actual = inst_ty,
|
|
.wanted = dest_ty,
|
|
} };
|
|
break :optional;
|
|
}
|
|
// Let the logic below handle wrapping the optional now that
|
|
// it has been checked to correctly coerce.
|
|
if (!inst_ty.isPtrLikeOptional(mod)) break :anyopaque_check;
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
}
|
|
|
|
// T to ?T
|
|
const child_type = dest_ty.optionalChild(mod);
|
|
const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
|
|
error.NotCoercible => {
|
|
if (in_memory_result == .no_match) {
|
|
// Try to give more useful notes
|
|
in_memory_result = try sema.coerceInMemoryAllowed(block, child_type, inst_ty, false, target, dest_ty_src, inst_src);
|
|
}
|
|
break :optional;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
return try sema.wrapOptional(block, dest_ty, intermediate, inst_src);
|
|
},
|
|
.Pointer => pointer: {
|
|
const dest_info = dest_ty.ptrInfo(mod);
|
|
|
|
// Function body to function pointer.
|
|
if (inst_ty.zigTypeTag(mod) == .Fn) {
|
|
const fn_val = try sema.resolveConstValue(block, .unneeded, inst, "");
|
|
const fn_decl = fn_val.pointerDecl(mod).?;
|
|
const inst_as_ptr = try sema.analyzeDeclRef(fn_decl);
|
|
return sema.coerce(block, dest_ty, inst_as_ptr, inst_src);
|
|
}
|
|
|
|
// *T to *[1]T
|
|
single_item: {
|
|
if (dest_info.flags.size != .One) break :single_item;
|
|
if (!inst_ty.isSinglePointer(mod)) break :single_item;
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
|
|
const ptr_elem_ty = inst_ty.childType(mod);
|
|
const array_ty = dest_info.child.toType();
|
|
if (array_ty.zigTypeTag(mod) != .Array) break :single_item;
|
|
const array_elem_ty = array_ty.childType(mod);
|
|
if (array_ty.arrayLen(mod) != 1) break :single_item;
|
|
const dest_is_mut = !dest_info.flags.is_const;
|
|
switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) {
|
|
.ok => {},
|
|
else => break :single_item,
|
|
}
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
}
|
|
|
|
// Coercions where the source is a single pointer to an array.
|
|
src_array_ptr: {
|
|
if (!inst_ty.isSinglePointer(mod)) break :src_array_ptr;
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
|
|
const array_ty = inst_ty.childType(mod);
|
|
if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr;
|
|
const array_elem_type = array_ty.childType(mod);
|
|
const dest_is_mut = !dest_info.flags.is_const;
|
|
|
|
const dst_elem_type = dest_info.child.toType();
|
|
const elem_res = try sema.coerceInMemoryAllowed(block, dst_elem_type, array_elem_type, dest_is_mut, target, dest_ty_src, inst_src);
|
|
switch (elem_res) {
|
|
.ok => {},
|
|
else => {
|
|
in_memory_result = .{ .ptr_child = .{
|
|
.child = try elem_res.dupe(sema.arena),
|
|
.actual = array_elem_type,
|
|
.wanted = dst_elem_type,
|
|
} };
|
|
break :src_array_ptr;
|
|
},
|
|
}
|
|
|
|
if (dest_info.sentinel != .none) {
|
|
if (array_ty.sentinel(mod)) |inst_sent| {
|
|
if (dest_info.sentinel != (try mod.getCoerced(inst_sent, dst_elem_type)).toIntern()) {
|
|
in_memory_result = .{ .ptr_sentinel = .{
|
|
.actual = inst_sent,
|
|
.wanted = dest_info.sentinel.toValue(),
|
|
.ty = dst_elem_type,
|
|
} };
|
|
break :src_array_ptr;
|
|
}
|
|
} else {
|
|
in_memory_result = .{ .ptr_sentinel = .{
|
|
.actual = Value.@"unreachable",
|
|
.wanted = dest_info.sentinel.toValue(),
|
|
.ty = dst_elem_type,
|
|
} };
|
|
break :src_array_ptr;
|
|
}
|
|
}
|
|
|
|
switch (dest_info.flags.size) {
|
|
.Slice => {
|
|
// *[N]T to []T
|
|
return sema.coerceArrayPtrToSlice(block, dest_ty, inst, inst_src);
|
|
},
|
|
.C => {
|
|
// *[N]T to [*c]T
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
},
|
|
.Many => {
|
|
// *[N]T to [*]T
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
},
|
|
.One => {},
|
|
}
|
|
}
|
|
|
|
// coercion from C pointer
|
|
if (inst_ty.isCPtr(mod)) src_c_ptr: {
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr;
|
|
// In this case we must add a safety check because the C pointer
|
|
// could be null.
|
|
const src_elem_ty = inst_ty.childType(mod);
|
|
const dest_is_mut = !dest_info.flags.is_const;
|
|
const dst_elem_type = dest_info.child.toType();
|
|
switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) {
|
|
.ok => {},
|
|
else => break :src_c_ptr,
|
|
}
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
}
|
|
|
|
// cast from *T and [*]T to *anyopaque
|
|
// but don't do it if the source type is a double pointer
|
|
if (dest_info.child == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: {
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
|
|
const elem_ty = inst_ty.elemType2(mod);
|
|
if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) {
|
|
in_memory_result = .{ .double_ptr_to_anyopaque = .{
|
|
.actual = inst_ty,
|
|
.wanted = dest_ty,
|
|
} };
|
|
break :pointer;
|
|
}
|
|
if (dest_ty.isSlice(mod)) break :to_anyopaque;
|
|
if (inst_ty.isSlice(mod)) {
|
|
in_memory_result = .{ .slice_to_anyopaque = .{
|
|
.actual = inst_ty,
|
|
.wanted = dest_ty,
|
|
} };
|
|
break :pointer;
|
|
}
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
}
|
|
|
|
switch (dest_info.flags.size) {
|
|
// coercion to C pointer
|
|
.C => switch (inst_ty.zigTypeTag(mod)) {
|
|
.Null => {
|
|
return Air.internedToRef((try mod.getCoerced(Value.null, dest_ty)).toIntern());
|
|
},
|
|
.ComptimeInt => {
|
|
const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
|
|
error.NotCoercible => break :pointer,
|
|
else => |e| return e,
|
|
};
|
|
return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
|
|
},
|
|
.Int => {
|
|
const ptr_size_ty = switch (inst_ty.intInfo(mod).signedness) {
|
|
.signed => Type.isize,
|
|
.unsigned => Type.usize,
|
|
};
|
|
const addr = sema.coerceExtra(block, ptr_size_ty, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
|
|
error.NotCoercible => {
|
|
// Try to give more useful notes
|
|
in_memory_result = try sema.coerceInMemoryAllowed(block, ptr_size_ty, inst_ty, false, target, dest_ty_src, inst_src);
|
|
break :pointer;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
|
|
},
|
|
.Pointer => p: {
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p;
|
|
const inst_info = inst_ty.ptrInfo(mod);
|
|
switch (try sema.coerceInMemoryAllowed(
|
|
block,
|
|
dest_info.child.toType(),
|
|
inst_info.child.toType(),
|
|
!dest_info.flags.is_const,
|
|
target,
|
|
dest_ty_src,
|
|
inst_src,
|
|
)) {
|
|
.ok => {},
|
|
else => break :p,
|
|
}
|
|
if (inst_info.flags.size == .Slice) {
|
|
assert(dest_info.sentinel == .none);
|
|
if (inst_info.sentinel == .none or
|
|
inst_info.sentinel != (try mod.intValue(inst_info.child.toType(), 0)).toIntern())
|
|
break :p;
|
|
|
|
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, slice_ptr, inst_src);
|
|
}
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
|
},
|
|
else => {},
|
|
},
|
|
.One => switch (dest_info.child.toType().zigTypeTag(mod)) {
|
|
.Union => {
|
|
// pointer to anonymous struct to pointer to union
|
|
if (inst_ty.isSinglePointer(mod) and
|
|
inst_ty.childType(mod).isAnonStruct(mod) and
|
|
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
|
|
{
|
|
return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
|
|
}
|
|
},
|
|
.Struct => {
|
|
// pointer to anonymous struct to pointer to struct
|
|
if (inst_ty.isSinglePointer(mod) and
|
|
inst_ty.childType(mod).isAnonStruct(mod) and
|
|
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
|
|
{
|
|
return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) {
|
|
error.NotCoercible => break :pointer,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
},
|
|
.Array => {
|
|
// pointer to tuple to pointer to array
|
|
if (inst_ty.isSinglePointer(mod) and
|
|
inst_ty.childType(mod).isTuple(mod) and
|
|
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
|
|
{
|
|
return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
|
|
}
|
|
},
|
|
else => {},
|
|
},
|
|
.Slice => to_slice: {
|
|
if (inst_ty.zigTypeTag(mod) == .Array) {
|
|
return sema.fail(
|
|
block,
|
|
inst_src,
|
|
"array literal requires address-of operator (&) to coerce to slice type '{}'",
|
|
.{dest_ty.fmt(mod)},
|
|
);
|
|
}
|
|
|
|
if (!inst_ty.isSinglePointer(mod)) break :to_slice;
|
|
const inst_child_ty = inst_ty.childType(mod);
|
|
if (!inst_child_ty.isTuple(mod)) break :to_slice;
|
|
|
|
// empty tuple to zero-length slice
|
|
// note that this allows coercing to a mutable slice.
|
|
if (inst_child_ty.structFieldCount(mod) == 0) {
|
|
// Optional slice is represented with a null pointer so
|
|
// we use a dummy pointer value with the required alignment.
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.addr = .{ .int = (if (dest_info.flags.alignment != .none)
|
|
try mod.intValue(Type.usize, dest_info.flags.alignment.toByteUnitsOptional().?)
|
|
else
|
|
try mod.getCoerced(try dest_info.child.toType().lazyAbiAlignment(mod), Type.usize)).toIntern() },
|
|
.len = (try mod.intValue(Type.usize, 0)).toIntern(),
|
|
} })));
|
|
}
|
|
|
|
// pointer to tuple to slice
|
|
if (!dest_info.flags.is_const) {
|
|
const err_msg = err_msg: {
|
|
const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(mod)});
|
|
errdefer err_msg.deinit(sema.gpa);
|
|
try sema.errNote(block, dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{});
|
|
break :err_msg err_msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(err_msg);
|
|
}
|
|
return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src);
|
|
},
|
|
.Many => p: {
|
|
if (!inst_ty.isSlice(mod)) break :p;
|
|
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p;
|
|
const inst_info = inst_ty.ptrInfo(mod);
|
|
|
|
switch (try sema.coerceInMemoryAllowed(
|
|
block,
|
|
dest_info.child.toType(),
|
|
inst_info.child.toType(),
|
|
!dest_info.flags.is_const,
|
|
target,
|
|
dest_ty_src,
|
|
inst_src,
|
|
)) {
|
|
.ok => {},
|
|
else => break :p,
|
|
}
|
|
|
|
if (dest_info.sentinel == .none or inst_info.sentinel == .none or
|
|
dest_info.sentinel !=
|
|
try mod.intern_pool.getCoerced(sema.gpa, inst_info.sentinel, dest_info.child))
|
|
break :p;
|
|
|
|
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
|
|
return sema.coerceCompatiblePtrs(block, dest_ty, slice_ptr, inst_src);
|
|
},
|
|
}
|
|
},
|
|
.Int, .ComptimeInt => switch (inst_ty.zigTypeTag(mod)) {
|
|
.Float, .ComptimeFloat => float: {
|
|
if (is_undef) {
|
|
return mod.undefRef(dest_ty);
|
|
}
|
|
const val = (try sema.resolveMaybeUndefVal(inst)) orelse {
|
|
if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
if (!opts.report_err) return error.NotCoercible;
|
|
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known");
|
|
}
|
|
break :float;
|
|
};
|
|
|
|
if (val.floatHasFraction(mod)) {
|
|
return sema.fail(
|
|
block,
|
|
inst_src,
|
|
"fractional component prevents float value '{}' from coercion to type '{}'",
|
|
.{ val.fmtValue(inst_ty, mod), dest_ty.fmt(mod) },
|
|
);
|
|
}
|
|
const result_val = try sema.intFromFloat(block, inst_src, val, inst_ty, dest_ty);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
},
|
|
.Int, .ComptimeInt => {
|
|
if (is_undef) {
|
|
return mod.undefRef(dest_ty);
|
|
}
|
|
if (try sema.resolveMaybeUndefVal(inst)) |val| {
|
|
// comptime-known integer to other number
|
|
if (!(try sema.intFitsInType(val, dest_ty, null))) {
|
|
if (!opts.report_err) return error.NotCoercible;
|
|
return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) });
|
|
}
|
|
return Air.internedToRef((try mod.getCoerced(val, dest_ty)).toIntern());
|
|
}
|
|
if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
if (!opts.report_err) return error.NotCoercible;
|
|
if (opts.no_cast_to_comptime_int) return inst;
|
|
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known");
|
|
}
|
|
|
|
// integer widening
|
|
const dst_info = dest_ty.intInfo(mod);
|
|
const src_info = inst_ty.intInfo(mod);
|
|
if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or
|
|
// small enough unsigned ints can get casted to large enough signed ints
|
|
(dst_info.signedness == .signed and dst_info.bits > src_info.bits))
|
|
{
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addTyOp(.intcast, dest_ty, inst);
|
|
}
|
|
},
|
|
.Undefined => {
|
|
return mod.undefRef(dest_ty);
|
|
},
|
|
else => {},
|
|
},
|
|
.Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat => {
|
|
const val = try sema.resolveConstValue(block, .unneeded, inst, "");
|
|
const result_val = try val.floatCast(dest_ty, mod);
|
|
return Air.internedToRef(result_val.toIntern());
|
|
},
|
|
.Float => {
|
|
if (is_undef) {
|
|
return mod.undefRef(dest_ty);
|
|
}
|
|
if (try sema.resolveMaybeUndefVal(inst)) |val| {
|
|
const result_val = try val.floatCast(dest_ty, mod);
|
|
if (!val.eql(try result_val.floatCast(inst_ty, mod), inst_ty, mod)) {
|
|
return sema.fail(
|
|
block,
|
|
inst_src,
|
|
"type '{}' cannot represent float value '{}'",
|
|
.{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) },
|
|
);
|
|
}
|
|
return Air.internedToRef(result_val.toIntern());
|
|
} else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
|
|
if (!opts.report_err) return error.NotCoercible;
|
|
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known");
|
|
}
|
|
|
|
// float widening
|
|
const src_bits = inst_ty.floatBits(target);
|
|
const dst_bits = dest_ty.floatBits(target);
|
|
if (dst_bits >= src_bits) {
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addTyOp(.fpext, dest_ty, inst);
|
|
}
|
|
},
|
|
.Int, .ComptimeInt => int: {
|
|
if (is_undef) {
|
|
return mod.undefRef(dest_ty);
|
|
}
|
|
const val = (try sema.resolveMaybeUndefVal(inst)) orelse {
|
|
if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
|
|
if (!opts.report_err) return error.NotCoercible;
|
|
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known");
|
|
}
|
|
break :int;
|
|
};
|
|
const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, mod, sema);
|
|
// TODO implement this compile error
|
|
//const int_again_val = try result_val.intFromFloat(sema.arena, inst_ty);
|
|
//if (!int_again_val.eql(val, inst_ty, mod)) {
|
|
// return sema.fail(
|
|
// block,
|
|
// inst_src,
|
|
// "type '{}' cannot represent integer value '{}'",
|
|
// .{ dest_ty.fmt(mod), val },
|
|
// );
|
|
//}
|
|
return Air.internedToRef(result_val.toIntern());
|
|
},
|
|
.Undefined => {
|
|
return mod.undefRef(dest_ty);
|
|
},
|
|
else => {},
|
|
},
|
|
.Enum => switch (inst_ty.zigTypeTag(mod)) {
|
|
.EnumLiteral => {
|
|
// enum literal to enum
|
|
const val = try sema.resolveConstValue(block, .unneeded, inst, "");
|
|
const string = mod.intern_pool.indexToKey(val.toIntern()).enum_literal;
|
|
const field_index = dest_ty.enumFieldIndex(string, mod) orelse {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
inst_src,
|
|
"no field named '{}' in enum '{}'",
|
|
.{ string.fmt(&mod.intern_pool), dest_ty.fmt(mod) },
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, dest_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
};
|
|
return Air.internedToRef((try mod.enumValueFieldIndex(dest_ty, @as(u32, @intCast(field_index)))).toIntern());
|
|
},
|
|
.Union => blk: {
|
|
// union to its own tag type
|
|
const union_tag_ty = inst_ty.unionTagType(mod) orelse break :blk;
|
|
if (union_tag_ty.eql(dest_ty, mod)) {
|
|
return sema.unionToTag(block, dest_ty, inst, inst_src);
|
|
}
|
|
},
|
|
.Undefined => {
|
|
return mod.undefRef(dest_ty);
|
|
},
|
|
else => {},
|
|
},
|
|
.ErrorUnion => switch (inst_ty.zigTypeTag(mod)) {
|
|
.ErrorUnion => eu: {
|
|
if (maybe_inst_val) |inst_val| {
|
|
switch (inst_val.toIntern()) {
|
|
.undef => return mod.undefRef(dest_ty),
|
|
else => switch (mod.intern_pool.indexToKey(inst_val.toIntern())) {
|
|
.error_union => |error_union| switch (error_union.val) {
|
|
.err_name => |err_name| {
|
|
const error_set_ty = inst_ty.errorUnionSet(mod);
|
|
const error_set_val = Air.internedToRef((try mod.intern(.{ .err = .{
|
|
.ty = error_set_ty.toIntern(),
|
|
.name = err_name,
|
|
} })));
|
|
return sema.wrapErrorUnionSet(block, dest_ty, error_set_val, inst_src);
|
|
},
|
|
.payload => |payload| {
|
|
const payload_val = Air.internedToRef(payload);
|
|
return sema.wrapErrorUnionPayload(block, dest_ty, payload_val, inst_src) catch |err| switch (err) {
|
|
error.NotCoercible => break :eu,
|
|
else => |e| return e,
|
|
};
|
|
},
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
}
|
|
},
|
|
.ErrorSet => {
|
|
// E to E!T
|
|
return sema.wrapErrorUnionSet(block, dest_ty, inst, inst_src);
|
|
},
|
|
.Undefined => {
|
|
return mod.undefRef(dest_ty);
|
|
},
|
|
else => eu: {
|
|
// T to E!T
|
|
return sema.wrapErrorUnionPayload(block, dest_ty, inst, inst_src) catch |err| switch (err) {
|
|
error.NotCoercible => break :eu,
|
|
else => |e| return e,
|
|
};
|
|
},
|
|
},
|
|
.Union => switch (inst_ty.zigTypeTag(mod)) {
|
|
.Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src),
|
|
.Struct => {
|
|
if (inst_ty.isAnonStruct(mod)) {
|
|
return sema.coerceAnonStructToUnion(block, dest_ty, dest_ty_src, inst, inst_src);
|
|
}
|
|
},
|
|
.Undefined => {
|
|
return mod.undefRef(dest_ty);
|
|
},
|
|
else => {},
|
|
},
|
|
.Array => switch (inst_ty.zigTypeTag(mod)) {
|
|
.Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
|
|
.Struct => {
|
|
if (inst == .empty_struct) {
|
|
return sema.arrayInitEmpty(block, inst_src, dest_ty);
|
|
}
|
|
if (inst_ty.isTuple(mod)) {
|
|
return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src);
|
|
}
|
|
},
|
|
.Undefined => {
|
|
return mod.undefRef(dest_ty);
|
|
},
|
|
else => {},
|
|
},
|
|
.Vector => switch (inst_ty.zigTypeTag(mod)) {
|
|
.Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
|
|
.Struct => {
|
|
if (inst_ty.isTuple(mod)) {
|
|
return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src);
|
|
}
|
|
},
|
|
.Undefined => {
|
|
return mod.undefRef(dest_ty);
|
|
},
|
|
else => {},
|
|
},
|
|
.Struct => blk: {
|
|
if (inst == .empty_struct) {
|
|
return sema.structInitEmpty(block, dest_ty, dest_ty_src, inst_src);
|
|
}
|
|
if (inst_ty.isTupleOrAnonStruct(mod)) {
|
|
return sema.coerceTupleToStruct(block, dest_ty, inst, inst_src) catch |err| switch (err) {
|
|
error.NotCoercible => break :blk,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
// undefined to anything. We do this after the big switch above so that
|
|
// special logic has a chance to run first, such as `*[N]T` to `[]T` which
|
|
// should initialize the length field of the slice.
|
|
if (is_undef) {
|
|
return mod.undefRef(dest_ty);
|
|
}
|
|
|
|
if (!opts.report_err) return error.NotCoercible;
|
|
|
|
if (opts.is_ret and dest_ty.zigTypeTag(mod) == .NoReturn) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "function declared 'noreturn' returns", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
|
|
const src_decl = mod.funcOwnerDeclPtr(sema.func_index);
|
|
try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "'noreturn' declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), inst_ty.fmt(mod) });
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
// E!T to T
|
|
if (inst_ty.zigTypeTag(mod) == .ErrorUnion and
|
|
(try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
|
|
{
|
|
try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{});
|
|
try sema.errNote(block, inst_src, msg, "consider using 'try', 'catch', or 'if'", .{});
|
|
}
|
|
|
|
// ?T to T
|
|
if (inst_ty.zigTypeTag(mod) == .Optional and
|
|
(try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
|
|
{
|
|
try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{});
|
|
try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{});
|
|
}
|
|
|
|
try in_memory_result.report(sema, block, inst_src, msg);
|
|
|
|
// Add notes about function return type
|
|
if (opts.is_ret and
|
|
mod.test_functions.get(mod.funcOwnerDeclIndex(sema.func_index)) == null)
|
|
{
|
|
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
|
|
const src_decl = mod.funcOwnerDeclPtr(sema.func_index);
|
|
if (inst_ty.isError(mod) and !dest_ty.isError(mod)) {
|
|
try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function cannot return an error", .{});
|
|
} else {
|
|
try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function return type declared here", .{});
|
|
}
|
|
}
|
|
|
|
if (try opts.param_src.get(sema)) |param_src| {
|
|
try mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{});
|
|
}
|
|
|
|
// TODO maybe add "cannot store an error in type '{}'" note
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn coerceInMemory(
|
|
sema: *Sema,
|
|
val: Value,
|
|
dst_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
return Air.internedToRef((try sema.mod.getCoerced(val, dst_ty)).toIntern());
|
|
}
|
|
|
|
const InMemoryCoercionResult = union(enum) {
|
|
ok,
|
|
no_match: Pair,
|
|
int_not_coercible: Int,
|
|
error_union_payload: PairAndChild,
|
|
array_len: IntPair,
|
|
array_sentinel: Sentinel,
|
|
array_elem: PairAndChild,
|
|
vector_len: IntPair,
|
|
vector_elem: PairAndChild,
|
|
optional_shape: Pair,
|
|
optional_child: PairAndChild,
|
|
from_anyerror,
|
|
missing_error: []const InternPool.NullTerminatedString,
|
|
/// true if wanted is var args
|
|
fn_var_args: bool,
|
|
/// true if wanted is generic
|
|
fn_generic: bool,
|
|
fn_param_count: IntPair,
|
|
fn_param_noalias: IntPair,
|
|
fn_param_comptime: ComptimeParam,
|
|
fn_param: Param,
|
|
fn_cc: CC,
|
|
fn_return_type: PairAndChild,
|
|
ptr_child: PairAndChild,
|
|
ptr_addrspace: AddressSpace,
|
|
ptr_sentinel: Sentinel,
|
|
ptr_size: Size,
|
|
ptr_qualifiers: Qualifiers,
|
|
ptr_allowzero: Pair,
|
|
ptr_bit_range: BitRange,
|
|
ptr_alignment: IntPair,
|
|
double_ptr_to_anyopaque: Pair,
|
|
slice_to_anyopaque: Pair,
|
|
|
|
const Pair = struct {
|
|
actual: Type,
|
|
wanted: Type,
|
|
};
|
|
|
|
const PairAndChild = struct {
|
|
child: *InMemoryCoercionResult,
|
|
actual: Type,
|
|
wanted: Type,
|
|
};
|
|
|
|
const Param = struct {
|
|
child: *InMemoryCoercionResult,
|
|
actual: Type,
|
|
wanted: Type,
|
|
index: u64,
|
|
};
|
|
|
|
const ComptimeParam = struct {
|
|
index: u64,
|
|
wanted: bool,
|
|
};
|
|
|
|
const Sentinel = struct {
|
|
// unreachable_value indicates no sentinel
|
|
actual: Value,
|
|
wanted: Value,
|
|
ty: Type,
|
|
};
|
|
|
|
const Int = struct {
|
|
actual_signedness: std.builtin.Signedness,
|
|
wanted_signedness: std.builtin.Signedness,
|
|
actual_bits: u16,
|
|
wanted_bits: u16,
|
|
};
|
|
|
|
const IntPair = struct {
|
|
actual: u64,
|
|
wanted: u64,
|
|
};
|
|
|
|
const Size = struct {
|
|
actual: std.builtin.Type.Pointer.Size,
|
|
wanted: std.builtin.Type.Pointer.Size,
|
|
};
|
|
|
|
const Qualifiers = struct {
|
|
actual_const: bool,
|
|
wanted_const: bool,
|
|
actual_volatile: bool,
|
|
wanted_volatile: bool,
|
|
};
|
|
|
|
const AddressSpace = struct {
|
|
actual: std.builtin.AddressSpace,
|
|
wanted: std.builtin.AddressSpace,
|
|
};
|
|
|
|
const CC = struct {
|
|
actual: std.builtin.CallingConvention,
|
|
wanted: std.builtin.CallingConvention,
|
|
};
|
|
|
|
const BitRange = struct {
|
|
actual_host: u16,
|
|
wanted_host: u16,
|
|
actual_offset: u16,
|
|
wanted_offset: u16,
|
|
};
|
|
|
|
fn dupe(child: *const InMemoryCoercionResult, arena: Allocator) !*InMemoryCoercionResult {
|
|
const res = try arena.create(InMemoryCoercionResult);
|
|
res.* = child.*;
|
|
return res;
|
|
}
|
|
|
|
fn report(res: *const InMemoryCoercionResult, sema: *Sema, block: *Block, src: LazySrcLoc, msg: *Module.ErrorMsg) !void {
|
|
const mod = sema.mod;
|
|
var cur = res;
|
|
while (true) switch (cur.*) {
|
|
.ok => unreachable,
|
|
.no_match => |types| {
|
|
try sema.addDeclaredHereNote(msg, types.wanted);
|
|
try sema.addDeclaredHereNote(msg, types.actual);
|
|
break;
|
|
},
|
|
.int_not_coercible => |int| {
|
|
try sema.errNote(block, src, msg, "{s} {d}-bit int cannot represent all possible {s} {d}-bit values", .{
|
|
@tagName(int.wanted_signedness), int.wanted_bits, @tagName(int.actual_signedness), int.actual_bits,
|
|
});
|
|
break;
|
|
},
|
|
.error_union_payload => |pair| {
|
|
try sema.errNote(block, src, msg, "error union payload '{}' cannot cast into error union payload '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
cur = pair.child;
|
|
},
|
|
.array_len => |lens| {
|
|
try sema.errNote(block, src, msg, "array of length {d} cannot cast into an array of length {d}", .{
|
|
lens.actual, lens.wanted,
|
|
});
|
|
break;
|
|
},
|
|
.array_sentinel => |sentinel| {
|
|
if (sentinel.actual.toIntern() != .unreachable_value) {
|
|
try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{
|
|
sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod),
|
|
});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "destination array requires '{}' sentinel", .{
|
|
sentinel.wanted.fmtValue(sentinel.ty, mod),
|
|
});
|
|
}
|
|
break;
|
|
},
|
|
.array_elem => |pair| {
|
|
try sema.errNote(block, src, msg, "array element type '{}' cannot cast into array element type '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
cur = pair.child;
|
|
},
|
|
.vector_len => |lens| {
|
|
try sema.errNote(block, src, msg, "vector of length {d} cannot cast into a vector of length {d}", .{
|
|
lens.actual, lens.wanted,
|
|
});
|
|
break;
|
|
},
|
|
.vector_elem => |pair| {
|
|
try sema.errNote(block, src, msg, "vector element type '{}' cannot cast into vector element type '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
cur = pair.child;
|
|
},
|
|
.optional_shape => |pair| {
|
|
try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{
|
|
pair.actual.optionalChild(mod).fmt(mod), pair.wanted.optionalChild(mod).fmt(mod),
|
|
});
|
|
break;
|
|
},
|
|
.optional_child => |pair| {
|
|
try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
cur = pair.child;
|
|
},
|
|
.from_anyerror => {
|
|
try sema.errNote(block, src, msg, "global error set cannot cast into a smaller set", .{});
|
|
break;
|
|
},
|
|
.missing_error => |missing_errors| {
|
|
for (missing_errors) |err| {
|
|
try sema.errNote(block, src, msg, "'error.{}' not a member of destination error set", .{err.fmt(&mod.intern_pool)});
|
|
}
|
|
break;
|
|
},
|
|
.fn_var_args => |wanted_var_args| {
|
|
if (wanted_var_args) {
|
|
try sema.errNote(block, src, msg, "non-variadic function cannot cast into a variadic function", .{});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "variadic function cannot cast into a non-variadic function", .{});
|
|
}
|
|
break;
|
|
},
|
|
.fn_generic => |wanted_generic| {
|
|
if (wanted_generic) {
|
|
try sema.errNote(block, src, msg, "non-generic function cannot cast into a generic function", .{});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "generic function cannot cast into a non-generic function", .{});
|
|
}
|
|
break;
|
|
},
|
|
.fn_param_count => |lens| {
|
|
try sema.errNote(block, src, msg, "function with {d} parameters cannot cast into a function with {d} parameters", .{
|
|
lens.actual, lens.wanted,
|
|
});
|
|
break;
|
|
},
|
|
.fn_param_noalias => |param| {
|
|
var index: u6 = 0;
|
|
var actual_noalias = false;
|
|
while (true) : (index += 1) {
|
|
const actual = @as(u1, @truncate(param.actual >> index));
|
|
const wanted = @as(u1, @truncate(param.wanted >> index));
|
|
if (actual != wanted) {
|
|
actual_noalias = actual == 1;
|
|
break;
|
|
}
|
|
}
|
|
if (!actual_noalias) {
|
|
try sema.errNote(block, src, msg, "regular parameter {d} cannot cast into a noalias parameter", .{index});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "noalias parameter {d} cannot cast into a regular parameter", .{index});
|
|
}
|
|
break;
|
|
},
|
|
.fn_param_comptime => |param| {
|
|
if (param.wanted) {
|
|
try sema.errNote(block, src, msg, "non-comptime parameter {d} cannot cast into a comptime parameter", .{param.index});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "comptime parameter {d} cannot cast into a non-comptime parameter", .{param.index});
|
|
}
|
|
break;
|
|
},
|
|
.fn_param => |param| {
|
|
try sema.errNote(block, src, msg, "parameter {d} '{}' cannot cast into '{}'", .{
|
|
param.index, param.actual.fmt(mod), param.wanted.fmt(mod),
|
|
});
|
|
cur = param.child;
|
|
},
|
|
.fn_cc => |cc| {
|
|
try sema.errNote(block, src, msg, "calling convention '{s}' cannot cast into calling convention '{s}'", .{ @tagName(cc.actual), @tagName(cc.wanted) });
|
|
break;
|
|
},
|
|
.fn_return_type => |pair| {
|
|
try sema.errNote(block, src, msg, "return type '{}' cannot cast into return type '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
cur = pair.child;
|
|
},
|
|
.ptr_child => |pair| {
|
|
try sema.errNote(block, src, msg, "pointer type child '{}' cannot cast into pointer type child '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
cur = pair.child;
|
|
},
|
|
.ptr_addrspace => |@"addrspace"| {
|
|
try sema.errNote(block, src, msg, "address space '{s}' cannot cast into address space '{s}'", .{ @tagName(@"addrspace".actual), @tagName(@"addrspace".wanted) });
|
|
break;
|
|
},
|
|
.ptr_sentinel => |sentinel| {
|
|
if (sentinel.actual.toIntern() != .unreachable_value) {
|
|
try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{
|
|
sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod),
|
|
});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "destination pointer requires '{}' sentinel", .{
|
|
sentinel.wanted.fmtValue(sentinel.ty, mod),
|
|
});
|
|
}
|
|
break;
|
|
},
|
|
.ptr_size => |size| {
|
|
try sema.errNote(block, src, msg, "a {s} pointer cannot cast into a {s} pointer", .{ pointerSizeString(size.actual), pointerSizeString(size.wanted) });
|
|
break;
|
|
},
|
|
.ptr_qualifiers => |qualifiers| {
|
|
const ok_const = !qualifiers.actual_const or qualifiers.wanted_const;
|
|
const ok_volatile = !qualifiers.actual_volatile or qualifiers.wanted_volatile;
|
|
if (!ok_const) {
|
|
try sema.errNote(block, src, msg, "cast discards const qualifier", .{});
|
|
} else if (!ok_volatile) {
|
|
try sema.errNote(block, src, msg, "cast discards volatile qualifier", .{});
|
|
}
|
|
break;
|
|
},
|
|
.ptr_allowzero => |pair| {
|
|
const wanted_allow_zero = pair.wanted.ptrAllowsZero(mod);
|
|
const actual_allow_zero = pair.actual.ptrAllowsZero(mod);
|
|
if (actual_allow_zero and !wanted_allow_zero) {
|
|
try sema.errNote(block, src, msg, "'{}' could have null values which are illegal in type '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
} else {
|
|
try sema.errNote(block, src, msg, "mutable '{}' allows illegal null values stored to type '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
}
|
|
break;
|
|
},
|
|
.ptr_bit_range => |bit_range| {
|
|
if (bit_range.actual_host != bit_range.wanted_host) {
|
|
try sema.errNote(block, src, msg, "pointer host size '{}' cannot cast into pointer host size '{}'", .{
|
|
bit_range.actual_host, bit_range.wanted_host,
|
|
});
|
|
}
|
|
if (bit_range.actual_offset != bit_range.wanted_offset) {
|
|
try sema.errNote(block, src, msg, "pointer bit offset '{}' cannot cast into pointer bit offset '{}'", .{
|
|
bit_range.actual_offset, bit_range.wanted_offset,
|
|
});
|
|
}
|
|
break;
|
|
},
|
|
.ptr_alignment => |pair| {
|
|
try sema.errNote(block, src, msg, "pointer alignment '{}' cannot cast into pointer alignment '{}'", .{
|
|
pair.actual, pair.wanted,
|
|
});
|
|
break;
|
|
},
|
|
.double_ptr_to_anyopaque => |pair| {
|
|
try sema.errNote(block, src, msg, "cannot implicitly cast double pointer '{}' to anyopaque pointer '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
break;
|
|
},
|
|
.slice_to_anyopaque => |pair| {
|
|
try sema.errNote(block, src, msg, "cannot implicitly cast slice '{}' to anyopaque pointer '{}'", .{
|
|
pair.actual.fmt(mod), pair.wanted.fmt(mod),
|
|
});
|
|
try sema.errNote(block, src, msg, "consider using '.ptr'", .{});
|
|
break;
|
|
},
|
|
};
|
|
}
|
|
};
|
|
|
|
fn pointerSizeString(size: std.builtin.Type.Pointer.Size) []const u8 {
|
|
return switch (size) {
|
|
.One => "single",
|
|
.Many => "many",
|
|
.C => "C",
|
|
.Slice => unreachable,
|
|
};
|
|
}
|
|
|
|
/// If pointers have the same representation in runtime memory, a bitcast AIR instruction
|
|
/// may be used for the coercion.
|
|
/// * `const` attribute can be gained
|
|
/// * `volatile` attribute can be gained
|
|
/// * `allowzero` attribute can be gained (whether from explicit attribute, C pointer, or optional pointer) but only if !dest_is_mut
|
|
/// * alignment can be decreased
|
|
/// * bit offset attributes must match exactly
|
|
/// * `*`/`[*]` must match exactly, but `[*c]` matches either one
|
|
/// * sentinel-terminated pointers can coerce into `[*]`
|
|
fn coerceInMemoryAllowed(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
src_ty: Type,
|
|
dest_is_mut: bool,
|
|
target: std.Target,
|
|
dest_src: LazySrcLoc,
|
|
src_src: LazySrcLoc,
|
|
) CompileError!InMemoryCoercionResult {
|
|
const mod = sema.mod;
|
|
|
|
if (dest_ty.eql(src_ty, mod))
|
|
return .ok;
|
|
|
|
const dest_tag = dest_ty.zigTypeTag(mod);
|
|
const src_tag = src_ty.zigTypeTag(mod);
|
|
|
|
// Differently-named integers with the same number of bits.
|
|
if (dest_tag == .Int and src_tag == .Int) {
|
|
const dest_info = dest_ty.intInfo(mod);
|
|
const src_info = src_ty.intInfo(mod);
|
|
|
|
if (dest_info.signedness == src_info.signedness and
|
|
dest_info.bits == src_info.bits)
|
|
{
|
|
return .ok;
|
|
}
|
|
|
|
if ((src_info.signedness == dest_info.signedness and dest_info.bits < src_info.bits) or
|
|
// small enough unsigned ints can get casted to large enough signed ints
|
|
(dest_info.signedness == .signed and (src_info.signedness == .unsigned or dest_info.bits <= src_info.bits)) or
|
|
(dest_info.signedness == .unsigned and src_info.signedness == .signed))
|
|
{
|
|
return InMemoryCoercionResult{ .int_not_coercible = .{
|
|
.actual_signedness = src_info.signedness,
|
|
.wanted_signedness = dest_info.signedness,
|
|
.actual_bits = src_info.bits,
|
|
.wanted_bits = dest_info.bits,
|
|
} };
|
|
}
|
|
}
|
|
|
|
// Differently-named floats with the same number of bits.
|
|
if (dest_tag == .Float and src_tag == .Float) {
|
|
const dest_bits = dest_ty.floatBits(target);
|
|
const src_bits = src_ty.floatBits(target);
|
|
if (dest_bits == src_bits) {
|
|
return .ok;
|
|
}
|
|
}
|
|
|
|
// Pointers / Pointer-like Optionals
|
|
const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty);
|
|
const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty);
|
|
if (maybe_dest_ptr_ty) |dest_ptr_ty| {
|
|
if (maybe_src_ptr_ty) |src_ptr_ty| {
|
|
return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target, dest_src, src_src);
|
|
}
|
|
}
|
|
|
|
// Slices
|
|
if (dest_ty.isSlice(mod) and src_ty.isSlice(mod)) {
|
|
return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src);
|
|
}
|
|
|
|
// Functions
|
|
if (dest_tag == .Fn and src_tag == .Fn) {
|
|
return try sema.coerceInMemoryAllowedFns(block, dest_ty, src_ty, target, dest_src, src_src);
|
|
}
|
|
|
|
// Error Unions
|
|
if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) {
|
|
const dest_payload = dest_ty.errorUnionPayload(mod);
|
|
const src_payload = src_ty.errorUnionPayload(mod);
|
|
const child = try sema.coerceInMemoryAllowed(block, dest_payload, src_payload, dest_is_mut, target, dest_src, src_src);
|
|
if (child != .ok) {
|
|
return InMemoryCoercionResult{ .error_union_payload = .{
|
|
.child = try child.dupe(sema.arena),
|
|
.actual = src_payload,
|
|
.wanted = dest_payload,
|
|
} };
|
|
}
|
|
return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(mod), src_ty.errorUnionSet(mod), dest_is_mut, target, dest_src, src_src);
|
|
}
|
|
|
|
// Error Sets
|
|
if (dest_tag == .ErrorSet and src_tag == .ErrorSet) {
|
|
return try sema.coerceInMemoryAllowedErrorSets(block, dest_ty, src_ty, dest_src, src_src);
|
|
}
|
|
|
|
// Arrays
|
|
if (dest_tag == .Array and src_tag == .Array) {
|
|
const dest_info = dest_ty.arrayInfo(mod);
|
|
const src_info = src_ty.arrayInfo(mod);
|
|
if (dest_info.len != src_info.len) {
|
|
return InMemoryCoercionResult{ .array_len = .{
|
|
.actual = src_info.len,
|
|
.wanted = dest_info.len,
|
|
} };
|
|
}
|
|
|
|
const child = try sema.coerceInMemoryAllowed(block, dest_info.elem_type, src_info.elem_type, dest_is_mut, target, dest_src, src_src);
|
|
if (child != .ok) {
|
|
return InMemoryCoercionResult{ .array_elem = .{
|
|
.child = try child.dupe(sema.arena),
|
|
.actual = src_info.elem_type,
|
|
.wanted = dest_info.elem_type,
|
|
} };
|
|
}
|
|
const ok_sent = dest_info.sentinel == null or
|
|
(src_info.sentinel != null and
|
|
dest_info.sentinel.?.eql(
|
|
try mod.getCoerced(src_info.sentinel.?, dest_info.elem_type),
|
|
dest_info.elem_type,
|
|
mod,
|
|
));
|
|
if (!ok_sent) {
|
|
return InMemoryCoercionResult{ .array_sentinel = .{
|
|
.actual = src_info.sentinel orelse Value.@"unreachable",
|
|
.wanted = dest_info.sentinel orelse Value.@"unreachable",
|
|
.ty = dest_info.elem_type,
|
|
} };
|
|
}
|
|
return .ok;
|
|
}
|
|
|
|
// Vectors
|
|
if (dest_tag == .Vector and src_tag == .Vector) {
|
|
const dest_len = dest_ty.vectorLen(mod);
|
|
const src_len = src_ty.vectorLen(mod);
|
|
if (dest_len != src_len) {
|
|
return InMemoryCoercionResult{ .vector_len = .{
|
|
.actual = src_len,
|
|
.wanted = dest_len,
|
|
} };
|
|
}
|
|
|
|
const dest_elem_ty = dest_ty.scalarType(mod);
|
|
const src_elem_ty = src_ty.scalarType(mod);
|
|
const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src);
|
|
if (child != .ok) {
|
|
return InMemoryCoercionResult{ .vector_elem = .{
|
|
.child = try child.dupe(sema.arena),
|
|
.actual = src_elem_ty,
|
|
.wanted = dest_elem_ty,
|
|
} };
|
|
}
|
|
|
|
return .ok;
|
|
}
|
|
|
|
// Arrays <-> Vectors
|
|
if ((dest_tag == .Vector and src_tag == .Array) or
|
|
(dest_tag == .Array and src_tag == .Vector))
|
|
{
|
|
const dest_len = dest_ty.arrayLen(mod);
|
|
const src_len = src_ty.arrayLen(mod);
|
|
if (dest_len != src_len) {
|
|
return InMemoryCoercionResult{ .array_len = .{
|
|
.actual = src_len,
|
|
.wanted = dest_len,
|
|
} };
|
|
}
|
|
|
|
const dest_elem_ty = dest_ty.childType(mod);
|
|
const src_elem_ty = src_ty.childType(mod);
|
|
const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src);
|
|
if (child != .ok) {
|
|
return InMemoryCoercionResult{ .array_elem = .{
|
|
.child = try child.dupe(sema.arena),
|
|
.actual = src_elem_ty,
|
|
.wanted = dest_elem_ty,
|
|
} };
|
|
}
|
|
|
|
if (dest_tag == .Array) {
|
|
const dest_info = dest_ty.arrayInfo(mod);
|
|
if (dest_info.sentinel != null) {
|
|
return InMemoryCoercionResult{ .array_sentinel = .{
|
|
.actual = Value.@"unreachable",
|
|
.wanted = dest_info.sentinel.?,
|
|
.ty = dest_info.elem_type,
|
|
} };
|
|
}
|
|
}
|
|
|
|
// The memory layout of @Vector(N, iM) is the same as the integer type i(N*M),
|
|
// that is to say, the padding bits are not in the same place as the array [N]iM.
|
|
// If there's no padding, the bitcast is possible.
|
|
const elem_bit_size = dest_elem_ty.bitSize(mod);
|
|
const elem_abi_byte_size = dest_elem_ty.abiSize(mod);
|
|
if (elem_abi_byte_size * 8 == elem_bit_size)
|
|
return .ok;
|
|
}
|
|
|
|
// Optionals
|
|
if (dest_tag == .Optional and src_tag == .Optional) {
|
|
if ((maybe_dest_ptr_ty != null) != (maybe_src_ptr_ty != null)) {
|
|
return InMemoryCoercionResult{ .optional_shape = .{
|
|
.actual = src_ty,
|
|
.wanted = dest_ty,
|
|
} };
|
|
}
|
|
const dest_child_type = dest_ty.optionalChild(mod);
|
|
const src_child_type = src_ty.optionalChild(mod);
|
|
|
|
const child = try sema.coerceInMemoryAllowed(block, dest_child_type, src_child_type, dest_is_mut, target, dest_src, src_src);
|
|
if (child != .ok) {
|
|
return InMemoryCoercionResult{ .optional_child = .{
|
|
.child = try child.dupe(sema.arena),
|
|
.actual = src_child_type,
|
|
.wanted = dest_child_type,
|
|
} };
|
|
}
|
|
|
|
return .ok;
|
|
}
|
|
|
|
// Tuples (with in-memory-coercible fields)
|
|
if (dest_ty.isTuple(mod) and src_ty.isTuple(mod)) tuple: {
|
|
if (dest_ty.containerLayout(mod) != src_ty.containerLayout(mod)) break :tuple;
|
|
if (dest_ty.structFieldCount(mod) != src_ty.structFieldCount(mod)) break :tuple;
|
|
const field_count = dest_ty.structFieldCount(mod);
|
|
for (0..field_count) |field_idx| {
|
|
if (dest_ty.structFieldIsComptime(field_idx, mod) != src_ty.structFieldIsComptime(field_idx, mod)) break :tuple;
|
|
if (dest_ty.structFieldAlign(field_idx, mod) != src_ty.structFieldAlign(field_idx, mod)) break :tuple;
|
|
const dest_field_ty = dest_ty.structFieldType(field_idx, mod);
|
|
const src_field_ty = src_ty.structFieldType(field_idx, mod);
|
|
const field = try sema.coerceInMemoryAllowed(block, dest_field_ty, src_field_ty, dest_is_mut, target, dest_src, src_src);
|
|
if (field != .ok) break :tuple;
|
|
}
|
|
return .ok;
|
|
}
|
|
|
|
return InMemoryCoercionResult{ .no_match = .{
|
|
.actual = dest_ty,
|
|
.wanted = src_ty,
|
|
} };
|
|
}
|
|
|
|
fn coerceInMemoryAllowedErrorSets(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
src_ty: Type,
|
|
dest_src: LazySrcLoc,
|
|
src_src: LazySrcLoc,
|
|
) !InMemoryCoercionResult {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
// Coercion to `anyerror`. Note that this check can return false negatives
|
|
// in case the error sets did not get resolved.
|
|
if (dest_ty.isAnyError(mod)) {
|
|
return .ok;
|
|
}
|
|
|
|
if (dest_ty.toIntern() == .adhoc_inferred_error_set_type) {
|
|
// We are trying to coerce an error set to the current function's
|
|
// inferred error set.
|
|
const dst_ies = sema.fn_ret_ty_ies.?;
|
|
try dst_ies.addErrorSet(src_ty, ip, gpa);
|
|
return .ok;
|
|
}
|
|
|
|
if (ip.isInferredErrorSetType(dest_ty.toIntern())) {
|
|
const dst_ies_func_index = ip.iesFuncIndex(dest_ty.toIntern());
|
|
if (sema.fn_ret_ty_ies) |dst_ies| {
|
|
if (dst_ies.func == dst_ies_func_index) {
|
|
// We are trying to coerce an error set to the current function's
|
|
// inferred error set.
|
|
try dst_ies.addErrorSet(src_ty, ip, gpa);
|
|
return .ok;
|
|
}
|
|
}
|
|
switch (try sema.resolveInferredErrorSet(block, dest_src, dest_ty.toIntern())) {
|
|
// isAnyError might have changed from a false negative to a true
|
|
// positive after resolution.
|
|
.anyerror_type => return .ok,
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
var missing_error_buf = std.ArrayList(InternPool.NullTerminatedString).init(gpa);
|
|
defer missing_error_buf.deinit();
|
|
|
|
switch (src_ty.toIntern()) {
|
|
.anyerror_type => switch (ip.indexToKey(dest_ty.toIntern())) {
|
|
.simple_type => unreachable, // filtered out above
|
|
.error_set_type, .inferred_error_set_type => return .from_anyerror,
|
|
else => unreachable,
|
|
},
|
|
|
|
else => switch (ip.indexToKey(src_ty.toIntern())) {
|
|
.inferred_error_set_type => {
|
|
const resolved_src_ty = try sema.resolveInferredErrorSet(block, src_src, src_ty.toIntern());
|
|
// src anyerror status might have changed after the resolution.
|
|
if (resolved_src_ty == .anyerror_type) {
|
|
// dest_ty.isAnyError(mod) == true is already checked for at this point.
|
|
return .from_anyerror;
|
|
}
|
|
|
|
for (ip.indexToKey(resolved_src_ty).error_set_type.names.get(ip)) |key| {
|
|
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), key)) {
|
|
try missing_error_buf.append(key);
|
|
}
|
|
}
|
|
|
|
if (missing_error_buf.items.len != 0) {
|
|
return InMemoryCoercionResult{
|
|
.missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items),
|
|
};
|
|
}
|
|
|
|
return .ok;
|
|
},
|
|
.error_set_type => |error_set_type| {
|
|
for (error_set_type.names.get(ip)) |name| {
|
|
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), name)) {
|
|
try missing_error_buf.append(name);
|
|
}
|
|
}
|
|
|
|
if (missing_error_buf.items.len != 0) {
|
|
return InMemoryCoercionResult{
|
|
.missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items),
|
|
};
|
|
}
|
|
|
|
return .ok;
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
}
|
|
|
|
fn coerceInMemoryAllowedFns(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
src_ty: Type,
|
|
target: std.Target,
|
|
dest_src: LazySrcLoc,
|
|
src_src: LazySrcLoc,
|
|
) !InMemoryCoercionResult {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const dest_info = mod.typeToFunc(dest_ty).?;
|
|
const src_info = mod.typeToFunc(src_ty).?;
|
|
|
|
{
|
|
if (dest_info.is_var_args != src_info.is_var_args) {
|
|
return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args };
|
|
}
|
|
|
|
if (dest_info.is_generic != src_info.is_generic) {
|
|
return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic };
|
|
}
|
|
|
|
if (dest_info.cc != src_info.cc) {
|
|
return InMemoryCoercionResult{ .fn_cc = .{
|
|
.actual = src_info.cc,
|
|
.wanted = dest_info.cc,
|
|
} };
|
|
}
|
|
|
|
switch (src_info.return_type) {
|
|
.noreturn_type, .generic_poison_type => {},
|
|
else => {
|
|
const dest_return_type = dest_info.return_type.toType();
|
|
const src_return_type = src_info.return_type.toType();
|
|
const rt = try sema.coerceInMemoryAllowed(block, dest_return_type, src_return_type, false, target, dest_src, src_src);
|
|
if (rt != .ok) {
|
|
return InMemoryCoercionResult{ .fn_return_type = .{
|
|
.child = try rt.dupe(sema.arena),
|
|
.actual = src_return_type,
|
|
.wanted = dest_return_type,
|
|
} };
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
const params_len = params_len: {
|
|
if (dest_info.param_types.len != src_info.param_types.len) {
|
|
return InMemoryCoercionResult{ .fn_param_count = .{
|
|
.actual = src_info.param_types.len,
|
|
.wanted = dest_info.param_types.len,
|
|
} };
|
|
}
|
|
|
|
if (dest_info.noalias_bits != src_info.noalias_bits) {
|
|
return InMemoryCoercionResult{ .fn_param_noalias = .{
|
|
.actual = src_info.noalias_bits,
|
|
.wanted = dest_info.noalias_bits,
|
|
} };
|
|
}
|
|
|
|
break :params_len dest_info.param_types.len;
|
|
};
|
|
|
|
for (0..params_len) |param_i| {
|
|
const dest_param_ty = dest_info.param_types.get(ip)[param_i].toType();
|
|
const src_param_ty = src_info.param_types.get(ip)[param_i].toType();
|
|
|
|
const param_i_small: u5 = @intCast(param_i);
|
|
if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) {
|
|
return InMemoryCoercionResult{ .fn_param_comptime = .{
|
|
.index = param_i,
|
|
.wanted = dest_info.paramIsComptime(param_i_small),
|
|
} };
|
|
}
|
|
|
|
switch (src_param_ty.toIntern()) {
|
|
.generic_poison_type => {},
|
|
else => {
|
|
// Note: Cast direction is reversed here.
|
|
const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src);
|
|
if (param != .ok) {
|
|
return InMemoryCoercionResult{ .fn_param = .{
|
|
.child = try param.dupe(sema.arena),
|
|
.actual = src_param_ty,
|
|
.wanted = dest_param_ty,
|
|
.index = param_i,
|
|
} };
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
return .ok;
|
|
}
|
|
|
|
fn coerceInMemoryAllowedPtrs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
src_ty: Type,
|
|
dest_ptr_ty: Type,
|
|
src_ptr_ty: Type,
|
|
dest_is_mut: bool,
|
|
target: std.Target,
|
|
dest_src: LazySrcLoc,
|
|
src_src: LazySrcLoc,
|
|
) !InMemoryCoercionResult {
|
|
const mod = sema.mod;
|
|
const dest_info = dest_ptr_ty.ptrInfo(mod);
|
|
const src_info = src_ptr_ty.ptrInfo(mod);
|
|
|
|
const ok_ptr_size = src_info.flags.size == dest_info.flags.size or
|
|
src_info.flags.size == .C or dest_info.flags.size == .C;
|
|
if (!ok_ptr_size) {
|
|
return InMemoryCoercionResult{ .ptr_size = .{
|
|
.actual = src_info.flags.size,
|
|
.wanted = dest_info.flags.size,
|
|
} };
|
|
}
|
|
|
|
const ok_cv_qualifiers =
|
|
(!src_info.flags.is_const or dest_info.flags.is_const) and
|
|
(!src_info.flags.is_volatile or dest_info.flags.is_volatile);
|
|
|
|
if (!ok_cv_qualifiers) {
|
|
return InMemoryCoercionResult{ .ptr_qualifiers = .{
|
|
.actual_const = src_info.flags.is_const,
|
|
.wanted_const = dest_info.flags.is_const,
|
|
.actual_volatile = src_info.flags.is_volatile,
|
|
.wanted_volatile = dest_info.flags.is_volatile,
|
|
} };
|
|
}
|
|
|
|
if (dest_info.flags.address_space != src_info.flags.address_space) {
|
|
return InMemoryCoercionResult{ .ptr_addrspace = .{
|
|
.actual = src_info.flags.address_space,
|
|
.wanted = dest_info.flags.address_space,
|
|
} };
|
|
}
|
|
|
|
const child = try sema.coerceInMemoryAllowed(block, dest_info.child.toType(), src_info.child.toType(), !dest_info.flags.is_const, target, dest_src, src_src);
|
|
if (child != .ok) {
|
|
return InMemoryCoercionResult{ .ptr_child = .{
|
|
.child = try child.dupe(sema.arena),
|
|
.actual = src_info.child.toType(),
|
|
.wanted = dest_info.child.toType(),
|
|
} };
|
|
}
|
|
|
|
const dest_allow_zero = dest_ty.ptrAllowsZero(mod);
|
|
const src_allow_zero = src_ty.ptrAllowsZero(mod);
|
|
|
|
const ok_allows_zero = (dest_allow_zero and
|
|
(src_allow_zero or !dest_is_mut)) or
|
|
(!dest_allow_zero and !src_allow_zero);
|
|
if (!ok_allows_zero) {
|
|
return InMemoryCoercionResult{ .ptr_allowzero = .{
|
|
.actual = src_ty,
|
|
.wanted = dest_ty,
|
|
} };
|
|
}
|
|
|
|
if (src_info.packed_offset.host_size != dest_info.packed_offset.host_size or
|
|
src_info.packed_offset.bit_offset != dest_info.packed_offset.bit_offset)
|
|
{
|
|
return InMemoryCoercionResult{ .ptr_bit_range = .{
|
|
.actual_host = src_info.packed_offset.host_size,
|
|
.wanted_host = dest_info.packed_offset.host_size,
|
|
.actual_offset = src_info.packed_offset.bit_offset,
|
|
.wanted_offset = dest_info.packed_offset.bit_offset,
|
|
} };
|
|
}
|
|
|
|
const ok_sent = dest_info.sentinel == .none or src_info.flags.size == .C or
|
|
(src_info.sentinel != .none and
|
|
dest_info.sentinel == try mod.intern_pool.getCoerced(sema.gpa, src_info.sentinel, dest_info.child));
|
|
if (!ok_sent) {
|
|
return InMemoryCoercionResult{ .ptr_sentinel = .{
|
|
.actual = switch (src_info.sentinel) {
|
|
.none => Value.@"unreachable",
|
|
else => src_info.sentinel.toValue(),
|
|
},
|
|
.wanted = switch (dest_info.sentinel) {
|
|
.none => Value.@"unreachable",
|
|
else => dest_info.sentinel.toValue(),
|
|
},
|
|
.ty = dest_info.child.toType(),
|
|
} };
|
|
}
|
|
|
|
// If both pointers have alignment 0, it means they both want ABI alignment.
|
|
// In this case, if they share the same child type, no need to resolve
|
|
// pointee type alignment. Otherwise both pointee types must have their alignment
|
|
// resolved and we compare the alignment numerically.
|
|
if (src_info.flags.alignment != .none or dest_info.flags.alignment != .none or
|
|
dest_info.child != src_info.child)
|
|
{
|
|
const src_align = src_info.flags.alignment.toByteUnitsOptional() orelse
|
|
src_info.child.toType().abiAlignment(mod);
|
|
|
|
const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse
|
|
dest_info.child.toType().abiAlignment(mod);
|
|
|
|
if (dest_align > src_align) {
|
|
return InMemoryCoercionResult{ .ptr_alignment = .{
|
|
.actual = src_align,
|
|
.wanted = dest_align,
|
|
} };
|
|
}
|
|
}
|
|
|
|
return .ok;
|
|
}
|
|
|
|
fn coerceVarArgParam(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
if (block.is_typeof) return inst;
|
|
|
|
const mod = sema.mod;
|
|
const uncasted_ty = sema.typeOf(inst);
|
|
const coerced = switch (uncasted_ty.zigTypeTag(mod)) {
|
|
// TODO consider casting to c_int/f64 if they fit
|
|
.ComptimeInt, .ComptimeFloat => return sema.fail(
|
|
block,
|
|
inst_src,
|
|
"integer and float literals passed to variadic function must be casted to a fixed-size number type",
|
|
.{},
|
|
),
|
|
.Fn => blk: {
|
|
const fn_val = try sema.resolveConstValue(block, .unneeded, inst, "");
|
|
const fn_decl = fn_val.pointerDecl(mod).?;
|
|
break :blk try sema.analyzeDeclRef(fn_decl);
|
|
},
|
|
.Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}),
|
|
.Float => float: {
|
|
const target = sema.mod.getTarget();
|
|
const double_bits = target.c_type_bit_size(.double);
|
|
const inst_bits = uncasted_ty.floatBits(sema.mod.getTarget());
|
|
if (inst_bits >= double_bits) break :float inst;
|
|
switch (double_bits) {
|
|
32 => break :float try sema.coerce(block, Type.f32, inst, inst_src),
|
|
64 => break :float try sema.coerce(block, Type.f64, inst, inst_src),
|
|
else => unreachable,
|
|
}
|
|
},
|
|
else => inst,
|
|
};
|
|
|
|
const coerced_ty = sema.typeOf(coerced);
|
|
if (!try sema.validateExternType(coerced_ty, .param_ty)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "cannot pass '{}' to variadic function", .{coerced_ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const src_decl = sema.mod.declPtr(block.src_decl);
|
|
try sema.explainWhyTypeIsNotExtern(msg, inst_src.toSrcLoc(src_decl, mod), coerced_ty, .param_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, coerced_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
return coerced;
|
|
}
|
|
|
|
// TODO migrate callsites to use storePtr2 instead.
|
|
fn storePtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr: Air.Inst.Ref,
|
|
uncasted_operand: Air.Inst.Ref,
|
|
) CompileError!void {
|
|
const air_tag: Air.Inst.Tag = if (block.wantSafety()) .store_safe else .store;
|
|
return sema.storePtr2(block, src, ptr, src, uncasted_operand, src, air_tag);
|
|
}
|
|
|
|
fn storePtr2(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr: Air.Inst.Ref,
|
|
ptr_src: LazySrcLoc,
|
|
uncasted_operand: Air.Inst.Ref,
|
|
operand_src: LazySrcLoc,
|
|
air_tag: Air.Inst.Tag,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
if (ptr_ty.isConstPtr(mod))
|
|
return sema.fail(block, ptr_src, "cannot assign to constant", .{});
|
|
|
|
const elem_ty = ptr_ty.childType(mod);
|
|
|
|
// To generate better code for tuples, we detect a tuple operand here, and
|
|
// analyze field loads and stores directly. This avoids an extra allocation + memcpy
|
|
// which would occur if we used `coerce`.
|
|
// However, we avoid this mechanism if the destination element type is a tuple,
|
|
// because the regular store will be better for this case.
|
|
// If the destination type is a struct we don't want this mechanism to trigger, because
|
|
// this code does not handle tuple-to-struct coercion which requires dealing with missing
|
|
// fields.
|
|
const operand_ty = sema.typeOf(uncasted_operand);
|
|
if (operand_ty.isTuple(mod) and elem_ty.zigTypeTag(mod) == .Array) {
|
|
const field_count = operand_ty.structFieldCount(mod);
|
|
var i: u32 = 0;
|
|
while (i < field_count) : (i += 1) {
|
|
const elem_src = operand_src; // TODO better source location
|
|
const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i);
|
|
const elem_index = try mod.intRef(Type.usize, i);
|
|
const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, true);
|
|
try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store);
|
|
}
|
|
return;
|
|
}
|
|
|
|
// TODO do the same thing for anon structs as for tuples above.
|
|
// However, beware of the need to handle missing/extra fields.
|
|
|
|
const is_ret = air_tag == .ret_ptr;
|
|
|
|
// Detect if we are storing an array operand to a bitcasted vector pointer.
|
|
// If so, we instead reach through the bitcasted pointer to the vector pointer,
|
|
// bitcast the array operand to a vector, and then lower this as a store of
|
|
// a vector value to a vector pointer. This generally results in better code,
|
|
// as well as working around an LLVM bug:
|
|
// https://github.com/ziglang/zig/issues/11154
|
|
if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| {
|
|
const vector_ty = sema.typeOf(vector_ptr).childType(mod);
|
|
const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
try sema.storePtr2(block, src, vector_ptr, ptr_src, vector, operand_src, .store);
|
|
return;
|
|
}
|
|
|
|
const operand = sema.coerceExtra(block, elem_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
|
|
error.NotCoercible => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
const maybe_operand_val = try sema.resolveMaybeUndefVal(operand);
|
|
|
|
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
|
|
const operand_val = maybe_operand_val orelse {
|
|
try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src);
|
|
break :rs operand_src;
|
|
};
|
|
if (ptr_val.isComptimeMutablePtr(mod)) {
|
|
try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty);
|
|
return;
|
|
} else break :rs ptr_src;
|
|
} else ptr_src;
|
|
|
|
// We do this after the possible comptime store above, for the case of field_ptr stores
|
|
// to unions because we want the comptime tag to be set, even if the field type is void.
|
|
if ((try sema.typeHasOnePossibleValue(elem_ty)) != null)
|
|
return;
|
|
|
|
if (air_tag == .bitcast) {
|
|
// `air_tag == .bitcast` is used as a special case for `zirCoerceResultPtr`
|
|
// to avoid calling `requireRuntimeBlock` for the dummy block.
|
|
_ = try block.addBinOp(.store, ptr, operand);
|
|
return;
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
try sema.queueFullTypeResolution(elem_ty);
|
|
|
|
if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) {
|
|
const ptr_inst = Air.refToIndex(ptr).?;
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
if (air_tags[ptr_inst] == .ptr_elem_ptr) {
|
|
const ty_pl = sema.air_instructions.items(.data)[ptr_inst].ty_pl;
|
|
const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data;
|
|
_ = try block.addInst(.{
|
|
.tag = .vector_store_elem,
|
|
.data = .{ .vector_store_elem = .{
|
|
.vector_ptr = bin_op.lhs,
|
|
.payload = try block.sema.addExtra(Air.Bin{
|
|
.lhs = bin_op.rhs,
|
|
.rhs = operand,
|
|
}),
|
|
} },
|
|
});
|
|
return;
|
|
}
|
|
return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{
|
|
ptr_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
if (is_ret) {
|
|
_ = try block.addBinOp(.store, ptr, operand);
|
|
} else {
|
|
_ = try block.addBinOp(air_tag, ptr, operand);
|
|
}
|
|
}
|
|
|
|
/// Traverse an arbitrary number of bitcasted pointers and return the underyling vector
|
|
/// pointer. Only if the final element type matches the vector element type, and the
|
|
/// lengths match.
|
|
fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const array_ty = sema.typeOf(ptr).childType(mod);
|
|
if (array_ty.zigTypeTag(mod) != .Array) return null;
|
|
var ptr_ref = ptr;
|
|
var ptr_inst = Air.refToIndex(ptr_ref) orelse return null;
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
const vector_ty = while (air_tags[ptr_inst] == .bitcast) {
|
|
ptr_ref = air_datas[ptr_inst].ty_op.operand;
|
|
if (!sema.isKnownZigType(ptr_ref, .Pointer)) return null;
|
|
const child_ty = sema.typeOf(ptr_ref).childType(mod);
|
|
if (child_ty.zigTypeTag(mod) == .Vector) break child_ty;
|
|
ptr_inst = Air.refToIndex(ptr_ref) orelse return null;
|
|
} else return null;
|
|
|
|
// We have a pointer-to-array and a pointer-to-vector. If the elements and
|
|
// lengths match, return the result.
|
|
if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and
|
|
array_ty.arrayLen(mod) == vector_ty.vectorLen(mod))
|
|
{
|
|
return ptr_ref;
|
|
} else {
|
|
return null;
|
|
}
|
|
}
|
|
|
|
/// Call when you have Value objects rather than Air instructions, and you want to
|
|
/// assert the store must be done at comptime.
|
|
fn storePtrVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr_val: Value,
|
|
operand_val: Value,
|
|
operand_ty: Type,
|
|
) !void {
|
|
const mod = sema.mod;
|
|
var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty);
|
|
try sema.checkComptimeVarStore(block, src, mut_kit.mut_decl);
|
|
|
|
switch (mut_kit.pointee) {
|
|
.direct => |val_ptr| {
|
|
if (mut_kit.mut_decl.runtime_index == .comptime_field_ptr) {
|
|
if (!operand_val.eql(val_ptr.*, operand_ty, mod)) {
|
|
// TODO use failWithInvalidComptimeFieldStore
|
|
return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{});
|
|
}
|
|
return;
|
|
}
|
|
val_ptr.* = (try operand_val.intern(operand_ty, mod)).toValue();
|
|
},
|
|
.reinterpret => |reinterpret| {
|
|
const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod));
|
|
const buffer = try sema.gpa.alloc(u8, abi_size);
|
|
defer sema.gpa.free(buffer);
|
|
reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, mod, buffer) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.ReinterpretDeclRef => unreachable,
|
|
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
|
|
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}),
|
|
};
|
|
operand_val.writeToMemory(operand_ty, mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.ReinterpretDeclRef => unreachable,
|
|
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
|
|
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}),
|
|
};
|
|
|
|
reinterpret.val_ptr.* = (try (try Value.readFromMemory(mut_kit.ty, mod, buffer, sema.arena)).intern(mut_kit.ty, mod)).toValue();
|
|
},
|
|
.bad_decl_ty, .bad_ptr_ty => {
|
|
// TODO show the decl declaration site in a note and explain whether the decl
|
|
// or the pointer is the problematic type
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout",
|
|
.{mut_kit.ty.fmt(mod)},
|
|
);
|
|
},
|
|
}
|
|
}
|
|
|
|
const ComptimePtrMutationKit = struct {
|
|
mut_decl: InternPool.Key.Ptr.Addr.MutDecl,
|
|
pointee: union(enum) {
|
|
/// The pointer type matches the actual comptime Value so a direct
|
|
/// modification is possible.
|
|
direct: *Value,
|
|
/// The largest parent Value containing pointee and having a well-defined memory layout.
|
|
/// This is used for bitcasting, if direct dereferencing failed.
|
|
reinterpret: struct {
|
|
val_ptr: *Value,
|
|
byte_offset: usize,
|
|
},
|
|
/// If the root decl could not be used as parent, this means `ty` is the type that
|
|
/// caused that by not having a well-defined layout.
|
|
/// This one means the Decl that owns the value trying to be modified does not
|
|
/// have a well defined memory layout.
|
|
bad_decl_ty,
|
|
/// If the root decl could not be used as parent, this means `ty` is the type that
|
|
/// caused that by not having a well-defined layout.
|
|
/// This one means the pointer type that is being stored through does not
|
|
/// have a well defined memory layout.
|
|
bad_ptr_ty,
|
|
},
|
|
ty: Type,
|
|
};
|
|
|
|
fn beginComptimePtrMutation(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr_val: Value,
|
|
ptr_elem_ty: Type,
|
|
) CompileError!ComptimePtrMutationKit {
|
|
const mod = sema.mod;
|
|
const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
|
|
switch (ptr.addr) {
|
|
.decl, .int => unreachable, // isComptimeMutablePtr has been checked already
|
|
.mut_decl => |mut_decl| {
|
|
const decl = mod.declPtr(mut_decl.decl);
|
|
return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, mut_decl);
|
|
},
|
|
.comptime_field => |comptime_field| {
|
|
const duped = try sema.arena.create(Value);
|
|
duped.* = comptime_field.toValue();
|
|
return sema.beginComptimePtrMutationInner(block, src, mod.intern_pool.typeOf(comptime_field).toType(), duped, ptr_elem_ty, .{
|
|
.decl = undefined,
|
|
.runtime_index = .comptime_field_ptr,
|
|
});
|
|
},
|
|
.eu_payload => |eu_ptr| {
|
|
const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod);
|
|
var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.toValue(), eu_ty);
|
|
switch (parent.pointee) {
|
|
.direct => |val_ptr| {
|
|
const payload_ty = parent.ty.errorUnionPayload(mod);
|
|
if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) {
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data },
|
|
.ty = payload_ty,
|
|
};
|
|
} else {
|
|
// An error union has been initialized to undefined at comptime and now we
|
|
// are for the first time setting the payload. We must change the
|
|
// representation of the error union from `undef` to `opt_payload`.
|
|
|
|
const payload = try sema.arena.create(Value.Payload.SubValue);
|
|
payload.* = .{
|
|
.base = .{ .tag = .eu_payload },
|
|
.data = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(),
|
|
};
|
|
|
|
val_ptr.* = Value.initPayload(&payload.base);
|
|
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .direct = &payload.data },
|
|
.ty = payload_ty,
|
|
};
|
|
}
|
|
},
|
|
.bad_decl_ty, .bad_ptr_ty => return parent,
|
|
// Even though the parent value type has well-defined memory layout, our
|
|
// pointer type does not.
|
|
.reinterpret => return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .bad_ptr_ty,
|
|
.ty = eu_ty,
|
|
},
|
|
}
|
|
},
|
|
.opt_payload => |opt_ptr| {
|
|
const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod);
|
|
var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.toValue(), opt_ty);
|
|
switch (parent.pointee) {
|
|
.direct => |val_ptr| {
|
|
const payload_ty = parent.ty.optionalChild(mod);
|
|
switch (val_ptr.ip_index) {
|
|
.none => return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data },
|
|
.ty = payload_ty,
|
|
},
|
|
else => {
|
|
const payload_val = switch (mod.intern_pool.indexToKey(val_ptr.ip_index)) {
|
|
.undef => try mod.intern(.{ .undef = payload_ty.toIntern() }),
|
|
.opt => |opt| switch (opt.val) {
|
|
.none => try mod.intern(.{ .undef = payload_ty.toIntern() }),
|
|
else => |payload| payload,
|
|
},
|
|
else => unreachable,
|
|
};
|
|
|
|
// An optional has been initialized to undefined at comptime and now we
|
|
// are for the first time setting the payload. We must change the
|
|
// representation of the optional from `undef` to `opt_payload`.
|
|
|
|
const payload = try sema.arena.create(Value.Payload.SubValue);
|
|
payload.* = .{
|
|
.base = .{ .tag = .opt_payload },
|
|
.data = payload_val.toValue(),
|
|
};
|
|
|
|
val_ptr.* = Value.initPayload(&payload.base);
|
|
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .direct = &payload.data },
|
|
.ty = payload_ty,
|
|
};
|
|
},
|
|
}
|
|
},
|
|
.bad_decl_ty, .bad_ptr_ty => return parent,
|
|
// Even though the parent value type has well-defined memory layout, our
|
|
// pointer type does not.
|
|
.reinterpret => return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .bad_ptr_ty,
|
|
.ty = opt_ty,
|
|
},
|
|
}
|
|
},
|
|
.elem => |elem_ptr| {
|
|
const base_elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod);
|
|
var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.base.toValue(), base_elem_ty);
|
|
|
|
switch (parent.pointee) {
|
|
.direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) {
|
|
.Array, .Vector => {
|
|
const check_len = parent.ty.arrayLenIncludingSentinel(mod);
|
|
if (elem_ptr.index >= check_len) {
|
|
// TODO have the parent include the decl so we can say "declared here"
|
|
return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{
|
|
elem_ptr.index, check_len,
|
|
});
|
|
}
|
|
const elem_ty = parent.ty.childType(mod);
|
|
|
|
// We might have a pointer to multiple elements of the array (e.g. a pointer
|
|
// to a sub-array). In this case, we just have to reinterpret the relevant
|
|
// bytes of the whole array rather than any single element.
|
|
const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty);
|
|
if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) {
|
|
const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64);
|
|
const elem_idx = try sema.usizeCast(block, src, elem_ptr.index);
|
|
return .{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .reinterpret = .{
|
|
.val_ptr = val_ptr,
|
|
.byte_offset = elem_abi_size * elem_idx,
|
|
} },
|
|
.ty = parent.ty,
|
|
};
|
|
}
|
|
|
|
switch (val_ptr.ip_index) {
|
|
.none => switch (val_ptr.tag()) {
|
|
.bytes => {
|
|
// An array is memory-optimized to store a slice of bytes, but we are about
|
|
// to modify an individual field and the representation has to change.
|
|
// If we wanted to avoid this, there would need to be special detection
|
|
// elsewhere to identify when writing a value to an array element that is stored
|
|
// using the `bytes` tag, and handle it without making a call to this function.
|
|
const arena = mod.tmp_hack_arena.allocator();
|
|
|
|
const bytes = val_ptr.castTag(.bytes).?.data;
|
|
const dest_len = parent.ty.arrayLenIncludingSentinel(mod);
|
|
// bytes.len may be one greater than dest_len because of the case when
|
|
// assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted.
|
|
assert(bytes.len >= dest_len);
|
|
const elems = try arena.alloc(Value, @as(usize, @intCast(dest_len)));
|
|
for (elems, 0..) |*elem, i| {
|
|
elem.* = try mod.intValue(elem_ty, bytes[i]);
|
|
}
|
|
|
|
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
elem_ty,
|
|
&elems[@as(usize, @intCast(elem_ptr.index))],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
.repeated => {
|
|
// An array is memory-optimized to store only a single element value, and
|
|
// that value is understood to be the same for the entire length of the array.
|
|
// However, now we want to modify an individual field and so the
|
|
// representation has to change. If we wanted to avoid this, there would
|
|
// need to be special detection elsewhere to identify when writing a value to an
|
|
// array element that is stored using the `repeated` tag, and handle it
|
|
// without making a call to this function.
|
|
const arena = mod.tmp_hack_arena.allocator();
|
|
|
|
const repeated_val = try val_ptr.castTag(.repeated).?.data.intern(parent.ty.childType(mod), mod);
|
|
const array_len_including_sentinel =
|
|
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
|
|
const elems = try arena.alloc(Value, array_len_including_sentinel);
|
|
@memset(elems, repeated_val.toValue());
|
|
|
|
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
elem_ty,
|
|
&elems[@as(usize, @intCast(elem_ptr.index))],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
|
|
.aggregate => return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
elem_ty,
|
|
&val_ptr.castTag(.aggregate).?.data[@as(usize, @intCast(elem_ptr.index))],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
),
|
|
|
|
else => unreachable,
|
|
},
|
|
else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) {
|
|
.undef => {
|
|
// An array has been initialized to undefined at comptime and now we
|
|
// are for the first time setting an element. We must change the representation
|
|
// of the array from `undef` to `array`.
|
|
const arena = mod.tmp_hack_arena.allocator();
|
|
|
|
const array_len_including_sentinel =
|
|
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
|
|
const elems = try arena.alloc(Value, array_len_including_sentinel);
|
|
@memset(elems, (try mod.intern(.{ .undef = elem_ty.toIntern() })).toValue());
|
|
|
|
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
elem_ty,
|
|
&elems[@as(usize, @intCast(elem_ptr.index))],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
},
|
|
else => {
|
|
if (elem_ptr.index != 0) {
|
|
// TODO include a "declared here" note for the decl
|
|
return sema.fail(block, src, "out of bounds comptime store of index {d}", .{
|
|
elem_ptr.index,
|
|
});
|
|
}
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty,
|
|
val_ptr,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
},
|
|
.reinterpret => |reinterpret| {
|
|
if (!base_elem_ty.hasWellDefinedLayout(mod)) {
|
|
// Even though the parent value type has well-defined memory layout, our
|
|
// pointer type does not.
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .bad_ptr_ty,
|
|
.ty = base_elem_ty,
|
|
};
|
|
}
|
|
|
|
const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty);
|
|
const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64);
|
|
const elem_idx = try sema.usizeCast(block, src, elem_ptr.index);
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .reinterpret = .{
|
|
.val_ptr = reinterpret.val_ptr,
|
|
.byte_offset = reinterpret.byte_offset + elem_abi_size * elem_idx,
|
|
} },
|
|
.ty = parent.ty,
|
|
};
|
|
},
|
|
.bad_decl_ty, .bad_ptr_ty => return parent,
|
|
}
|
|
},
|
|
.field => |field_ptr| {
|
|
const base_child_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
|
|
const field_index = @as(u32, @intCast(field_ptr.index));
|
|
|
|
var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.base.toValue(), base_child_ty);
|
|
switch (parent.pointee) {
|
|
.direct => |val_ptr| switch (val_ptr.ip_index) {
|
|
.empty_struct => {
|
|
const duped = try sema.arena.create(Value);
|
|
duped.* = val_ptr.*;
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty.structFieldType(field_index, mod),
|
|
duped,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
.none => switch (val_ptr.tag()) {
|
|
.aggregate => return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty.structFieldType(field_index, mod),
|
|
&val_ptr.castTag(.aggregate).?.data[field_index],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
),
|
|
.repeated => {
|
|
const arena = mod.tmp_hack_arena.allocator();
|
|
|
|
const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod));
|
|
@memset(elems, val_ptr.castTag(.repeated).?.data);
|
|
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty.structFieldType(field_index, mod),
|
|
&elems[field_index],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
.@"union" => {
|
|
// We need to set the active field of the union.
|
|
const union_tag_ty = base_child_ty.unionTagTypeHypothetical(mod);
|
|
|
|
const payload = &val_ptr.castTag(.@"union").?.data;
|
|
payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty.structFieldType(field_index, mod),
|
|
&payload.val,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
.slice => switch (field_index) {
|
|
Value.slice_ptr_index => return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty.slicePtrFieldType(mod),
|
|
&val_ptr.castTag(.slice).?.data.ptr,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
),
|
|
|
|
Value.slice_len_index => return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
Type.usize,
|
|
&val_ptr.castTag(.slice).?.data.len,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
),
|
|
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) {
|
|
.undef => {
|
|
// A struct or union has been initialized to undefined at comptime and now we
|
|
// are for the first time setting a field. We must change the representation
|
|
// of the struct/union from `undef` to `struct`/`union`.
|
|
const arena = mod.tmp_hack_arena.allocator();
|
|
|
|
switch (parent.ty.zigTypeTag(mod)) {
|
|
.Struct => {
|
|
const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod));
|
|
for (fields, 0..) |*field, i| field.* = (try mod.intern(.{
|
|
.undef = parent.ty.structFieldType(i, mod).toIntern(),
|
|
})).toValue();
|
|
|
|
val_ptr.* = try Value.Tag.aggregate.create(arena, fields);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
parent.ty.structFieldType(field_index, mod),
|
|
&fields[field_index],
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
.Union => {
|
|
const payload = try arena.create(Value.Payload.Union);
|
|
const tag_ty = parent.ty.unionTagTypeHypothetical(mod);
|
|
const payload_ty = parent.ty.structFieldType(field_index, mod);
|
|
payload.* = .{ .data = .{
|
|
.tag = try mod.enumValueFieldIndex(tag_ty, field_index),
|
|
.val = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(),
|
|
} };
|
|
|
|
val_ptr.* = Value.initPayload(&payload.base);
|
|
|
|
return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
payload_ty,
|
|
&payload.data.val,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
);
|
|
},
|
|
.Pointer => {
|
|
assert(parent.ty.isSlice(mod));
|
|
const ptr_ty = parent.ty.slicePtrFieldType(mod);
|
|
val_ptr.* = try Value.Tag.slice.create(arena, .{
|
|
.ptr = (try mod.intern(.{ .undef = ptr_ty.toIntern() })).toValue(),
|
|
.len = (try mod.intern(.{ .undef = .usize_type })).toValue(),
|
|
});
|
|
|
|
switch (field_index) {
|
|
Value.slice_ptr_index => return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
ptr_ty,
|
|
&val_ptr.castTag(.slice).?.data.ptr,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
),
|
|
Value.slice_len_index => return beginComptimePtrMutationInner(
|
|
sema,
|
|
block,
|
|
src,
|
|
Type.usize,
|
|
&val_ptr.castTag(.slice).?.data.len,
|
|
ptr_elem_ty,
|
|
parent.mut_decl,
|
|
),
|
|
|
|
else => unreachable,
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
},
|
|
else => unreachable,
|
|
},
|
|
},
|
|
.reinterpret => |reinterpret| {
|
|
const field_offset_u64 = base_child_ty.structFieldOffset(field_index, mod);
|
|
const field_offset = try sema.usizeCast(block, src, field_offset_u64);
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = parent.mut_decl,
|
|
.pointee = .{ .reinterpret = .{
|
|
.val_ptr = reinterpret.val_ptr,
|
|
.byte_offset = reinterpret.byte_offset + field_offset,
|
|
} },
|
|
.ty = parent.ty,
|
|
};
|
|
},
|
|
.bad_decl_ty, .bad_ptr_ty => return parent,
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
fn beginComptimePtrMutationInner(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
decl_ty: Type,
|
|
decl_val: *Value,
|
|
ptr_elem_ty: Type,
|
|
mut_decl: InternPool.Key.Ptr.Addr.MutDecl,
|
|
) CompileError!ComptimePtrMutationKit {
|
|
const mod = sema.mod;
|
|
const target = mod.getTarget();
|
|
const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok;
|
|
|
|
decl_val.* = try decl_val.unintern(sema.arena, mod);
|
|
|
|
if (coerce_ok) {
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = mut_decl,
|
|
.pointee = .{ .direct = decl_val },
|
|
.ty = decl_ty,
|
|
};
|
|
}
|
|
|
|
// Handle the case that the decl is an array and we're actually trying to point to an element.
|
|
if (decl_ty.isArrayOrVector(mod)) {
|
|
const decl_elem_ty = decl_ty.childType(mod);
|
|
if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) {
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = mut_decl,
|
|
.pointee = .{ .direct = decl_val },
|
|
.ty = decl_ty,
|
|
};
|
|
}
|
|
}
|
|
|
|
if (!decl_ty.hasWellDefinedLayout(mod)) {
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = mut_decl,
|
|
.pointee = .bad_decl_ty,
|
|
.ty = decl_ty,
|
|
};
|
|
}
|
|
if (!ptr_elem_ty.hasWellDefinedLayout(mod)) {
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = mut_decl,
|
|
.pointee = .bad_ptr_ty,
|
|
.ty = ptr_elem_ty,
|
|
};
|
|
}
|
|
return ComptimePtrMutationKit{
|
|
.mut_decl = mut_decl,
|
|
.pointee = .{ .reinterpret = .{
|
|
.val_ptr = decl_val,
|
|
.byte_offset = 0,
|
|
} },
|
|
.ty = decl_ty,
|
|
};
|
|
}
|
|
|
|
const TypedValueAndOffset = struct {
|
|
tv: TypedValue,
|
|
byte_offset: usize,
|
|
};
|
|
|
|
const ComptimePtrLoadKit = struct {
|
|
/// The Value and Type corresponding to the pointee of the provided pointer.
|
|
/// If a direct dereference is not possible, this is null.
|
|
pointee: ?TypedValue,
|
|
/// The largest parent Value containing `pointee` and having a well-defined memory layout.
|
|
/// This is used for bitcasting, if direct dereferencing failed (i.e. `pointee` is null).
|
|
parent: ?TypedValueAndOffset,
|
|
/// Whether the `pointee` could be mutated by further
|
|
/// semantic analysis and a copy must be performed.
|
|
is_mutable: bool,
|
|
/// If the root decl could not be used as `parent`, this is the type that
|
|
/// caused that by not having a well-defined layout
|
|
ty_without_well_defined_layout: ?Type,
|
|
};
|
|
|
|
const ComptimePtrLoadError = CompileError || error{
|
|
RuntimeLoad,
|
|
};
|
|
|
|
/// If `maybe_array_ty` is provided, it will be used to directly dereference an
|
|
/// .elem_ptr of type T to a value of [N]T, if necessary.
|
|
fn beginComptimePtrLoad(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr_val: Value,
|
|
maybe_array_ty: ?Type,
|
|
) ComptimePtrLoadError!ComptimePtrLoadKit {
|
|
const mod = sema.mod;
|
|
const target = mod.getTarget();
|
|
|
|
var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) {
|
|
.ptr => |ptr| switch (ptr.addr) {
|
|
.decl, .mut_decl => blk: {
|
|
const decl_index = switch (ptr.addr) {
|
|
.decl => |decl| decl,
|
|
.mut_decl => |mut_decl| mut_decl.decl,
|
|
else => unreachable,
|
|
};
|
|
const is_mutable = ptr.addr == .mut_decl;
|
|
const decl = mod.declPtr(decl_index);
|
|
const decl_tv = try decl.typedValue();
|
|
if (decl.val.getVariable(mod) != null) return error.RuntimeLoad;
|
|
|
|
const layout_defined = decl.ty.hasWellDefinedLayout(mod);
|
|
break :blk ComptimePtrLoadKit{
|
|
.parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null,
|
|
.pointee = decl_tv,
|
|
.is_mutable = is_mutable,
|
|
.ty_without_well_defined_layout = if (!layout_defined) decl.ty else null,
|
|
};
|
|
},
|
|
.int => return error.RuntimeLoad,
|
|
.eu_payload, .opt_payload => |container_ptr| blk: {
|
|
const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod);
|
|
const payload_ty = switch (ptr.addr) {
|
|
.eu_payload => container_ty.errorUnionPayload(mod),
|
|
.opt_payload => container_ty.optionalChild(mod),
|
|
else => unreachable,
|
|
};
|
|
var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty);
|
|
|
|
// eu_payload and opt_payload never have a well-defined layout
|
|
if (deref.parent != null) {
|
|
deref.parent = null;
|
|
deref.ty_without_well_defined_layout = container_ty;
|
|
}
|
|
|
|
if (deref.pointee) |*tv| {
|
|
const coerce_in_mem_ok =
|
|
(try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
|
|
(try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
|
|
if (coerce_in_mem_ok) {
|
|
const payload_val = switch (tv.val.ip_index) {
|
|
.none => tv.val.cast(Value.Payload.SubValue).?.data,
|
|
.null_value => return sema.fail(block, src, "attempt to use null value", .{}),
|
|
else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) {
|
|
.error_union => |error_union| switch (error_union.val) {
|
|
.err_name => |err_name| return sema.fail(
|
|
block,
|
|
src,
|
|
"attempt to unwrap error: {}",
|
|
.{err_name.fmt(&mod.intern_pool)},
|
|
),
|
|
.payload => |payload| payload,
|
|
},
|
|
.opt => |opt| switch (opt.val) {
|
|
.none => return sema.fail(block, src, "attempt to use null value", .{}),
|
|
else => |payload| payload,
|
|
},
|
|
else => unreachable,
|
|
}.toValue(),
|
|
};
|
|
tv.* = TypedValue{ .ty = payload_ty, .val = payload_val };
|
|
break :blk deref;
|
|
}
|
|
}
|
|
deref.pointee = null;
|
|
break :blk deref;
|
|
},
|
|
.comptime_field => |comptime_field| blk: {
|
|
const field_ty = mod.intern_pool.typeOf(comptime_field).toType();
|
|
break :blk ComptimePtrLoadKit{
|
|
.parent = null,
|
|
.pointee = .{ .ty = field_ty, .val = comptime_field.toValue() },
|
|
.is_mutable = false,
|
|
.ty_without_well_defined_layout = field_ty,
|
|
};
|
|
},
|
|
.elem => |elem_ptr| blk: {
|
|
const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod);
|
|
var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null);
|
|
|
|
// This code assumes that elem_ptrs have been "flattened" in order for direct dereference
|
|
// to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that
|
|
// our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened"
|
|
switch (mod.intern_pool.indexToKey(elem_ptr.base)) {
|
|
.ptr => |base_ptr| switch (base_ptr.addr) {
|
|
.elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)),
|
|
else => {},
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
if (elem_ptr.index != 0) {
|
|
if (elem_ty.hasWellDefinedLayout(mod)) {
|
|
if (deref.parent) |*parent| {
|
|
// Update the byte offset (in-place)
|
|
const elem_size = try sema.typeAbiSize(elem_ty);
|
|
const offset = parent.byte_offset + elem_size * elem_ptr.index;
|
|
parent.byte_offset = try sema.usizeCast(block, src, offset);
|
|
}
|
|
} else {
|
|
deref.parent = null;
|
|
deref.ty_without_well_defined_layout = elem_ty;
|
|
}
|
|
}
|
|
|
|
// If we're loading an elem that was derived from a different type
|
|
// than the true type of the underlying decl, we cannot deref directly
|
|
const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: {
|
|
const deref_elem_ty = deref.pointee.?.ty.childType(mod);
|
|
break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or
|
|
(try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok;
|
|
} else false;
|
|
if (!ty_matches) {
|
|
deref.pointee = null;
|
|
break :blk deref;
|
|
}
|
|
|
|
var array_tv = deref.pointee.?;
|
|
const check_len = array_tv.ty.arrayLenIncludingSentinel(mod);
|
|
if (maybe_array_ty) |load_ty| {
|
|
// It's possible that we're loading a [N]T, in which case we'd like to slice
|
|
// the pointee array directly from our parent array.
|
|
if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) {
|
|
const len = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod));
|
|
const elem_idx = try sema.usizeCast(block, src, elem_ptr.index);
|
|
deref.pointee = if (elem_ptr.index + len <= check_len) TypedValue{
|
|
.ty = try mod.arrayType(.{
|
|
.len = len,
|
|
.child = elem_ty.toIntern(),
|
|
}),
|
|
.val = try array_tv.val.sliceArray(mod, sema.arena, elem_idx, elem_idx + len),
|
|
} else null;
|
|
break :blk deref;
|
|
}
|
|
}
|
|
|
|
if (elem_ptr.index >= check_len) {
|
|
deref.pointee = null;
|
|
break :blk deref;
|
|
}
|
|
if (elem_ptr.index == check_len - 1) {
|
|
if (array_tv.ty.sentinel(mod)) |sent| {
|
|
deref.pointee = TypedValue{
|
|
.ty = elem_ty,
|
|
.val = sent,
|
|
};
|
|
break :blk deref;
|
|
}
|
|
}
|
|
deref.pointee = TypedValue{
|
|
.ty = elem_ty,
|
|
.val = try array_tv.val.elemValue(mod, @as(usize, @intCast(elem_ptr.index))),
|
|
};
|
|
break :blk deref;
|
|
},
|
|
.field => |field_ptr| blk: {
|
|
const field_index = @as(u32, @intCast(field_ptr.index));
|
|
const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
|
|
var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty);
|
|
|
|
if (container_ty.hasWellDefinedLayout(mod)) {
|
|
const struct_obj = mod.typeToStruct(container_ty);
|
|
if (struct_obj != null and struct_obj.?.layout == .Packed) {
|
|
// packed structs are not byte addressable
|
|
deref.parent = null;
|
|
} else if (deref.parent) |*parent| {
|
|
// Update the byte offset (in-place)
|
|
try sema.resolveTypeLayout(container_ty);
|
|
const field_offset = container_ty.structFieldOffset(field_index, mod);
|
|
parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset);
|
|
}
|
|
} else {
|
|
deref.parent = null;
|
|
deref.ty_without_well_defined_layout = container_ty;
|
|
}
|
|
|
|
const tv = deref.pointee orelse {
|
|
deref.pointee = null;
|
|
break :blk deref;
|
|
};
|
|
const coerce_in_mem_ok =
|
|
(try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
|
|
(try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
|
|
if (!coerce_in_mem_ok) {
|
|
deref.pointee = null;
|
|
break :blk deref;
|
|
}
|
|
|
|
if (container_ty.isSlice(mod)) {
|
|
deref.pointee = switch (field_index) {
|
|
Value.slice_ptr_index => TypedValue{
|
|
.ty = container_ty.slicePtrFieldType(mod),
|
|
.val = tv.val.slicePtr(mod),
|
|
},
|
|
Value.slice_len_index => TypedValue{
|
|
.ty = Type.usize,
|
|
.val = mod.intern_pool.indexToKey(try tv.val.intern(tv.ty, mod)).ptr.len.toValue(),
|
|
},
|
|
else => unreachable,
|
|
};
|
|
} else {
|
|
const field_ty = container_ty.structFieldType(field_index, mod);
|
|
deref.pointee = TypedValue{
|
|
.ty = field_ty,
|
|
.val = try tv.val.fieldValue(mod, field_index),
|
|
};
|
|
}
|
|
break :blk deref;
|
|
},
|
|
},
|
|
.opt => |opt| switch (opt.val) {
|
|
.none => return sema.fail(block, src, "attempt to use null value", .{}),
|
|
else => |payload| try sema.beginComptimePtrLoad(block, src, payload.toValue(), null),
|
|
},
|
|
else => unreachable,
|
|
};
|
|
|
|
if (deref.pointee) |tv| {
|
|
if (deref.parent == null and tv.ty.hasWellDefinedLayout(mod)) {
|
|
deref.parent = .{ .tv = tv, .byte_offset = 0 };
|
|
}
|
|
}
|
|
return deref;
|
|
}
|
|
|
|
fn bitCast(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
operand_src: ?LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
try sema.resolveTypeLayout(dest_ty);
|
|
|
|
const old_ty = sema.typeOf(inst);
|
|
try sema.resolveTypeLayout(old_ty);
|
|
|
|
const dest_bits = dest_ty.bitSize(mod);
|
|
const old_bits = old_ty.bitSize(mod);
|
|
|
|
if (old_bits != dest_bits) {
|
|
return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{
|
|
dest_ty.fmt(mod),
|
|
dest_bits,
|
|
old_ty.fmt(mod),
|
|
old_bits,
|
|
});
|
|
}
|
|
|
|
if (try sema.resolveMaybeUndefVal(inst)) |val| {
|
|
if (try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty, 0)) |result_val| {
|
|
return Air.internedToRef(result_val.toIntern());
|
|
}
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_src, operand_src);
|
|
return block.addBitCast(dest_ty, inst);
|
|
}
|
|
|
|
fn bitCastVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
val: Value,
|
|
old_ty: Type,
|
|
new_ty: Type,
|
|
buffer_offset: usize,
|
|
) !?Value {
|
|
const mod = sema.mod;
|
|
if (old_ty.eql(new_ty, mod)) return val;
|
|
|
|
// For types with well-defined memory layouts, we serialize them a byte buffer,
|
|
// then deserialize to the new type.
|
|
const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(mod));
|
|
const buffer = try sema.gpa.alloc(u8, abi_size);
|
|
defer sema.gpa.free(buffer);
|
|
val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
error.ReinterpretDeclRef => return null,
|
|
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
|
|
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}),
|
|
};
|
|
return try Value.readFromMemory(new_ty, mod, buffer[buffer_offset..], sema.arena);
|
|
}
|
|
|
|
fn coerceArrayPtrToSlice(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
if (try sema.resolveMaybeUndefVal(inst)) |val| {
|
|
const ptr_array_ty = sema.typeOf(inst);
|
|
const array_ty = ptr_array_ty.childType(mod);
|
|
const slice_val = try mod.intern(.{ .ptr = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.addr = switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
|
.undef => .{ .int = try mod.intern(.{ .undef = .usize_type }) },
|
|
.ptr => |ptr| ptr.addr,
|
|
else => unreachable,
|
|
},
|
|
.len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(),
|
|
} });
|
|
return Air.internedToRef(slice_val);
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addTyOp(.array_to_slice, dest_ty, inst);
|
|
}
|
|
|
|
fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool {
|
|
const mod = sema.mod;
|
|
const dest_info = dest_ty.ptrInfo(mod);
|
|
const inst_info = inst_ty.ptrInfo(mod);
|
|
const len0 = (inst_info.child.toType().zigTypeTag(mod) == .Array and (inst_info.child.toType().arrayLenIncludingSentinel(mod) == 0 or
|
|
(inst_info.child.toType().arrayLen(mod) == 0 and dest_info.sentinel == .none and dest_info.flags.size != .C and dest_info.flags.size != .Many))) or
|
|
(inst_info.child.toType().isTuple(mod) and inst_info.child.toType().structFieldCount(mod) == 0);
|
|
|
|
const ok_cv_qualifiers =
|
|
((!inst_info.flags.is_const or dest_info.flags.is_const) or len0) and
|
|
(!inst_info.flags.is_volatile or dest_info.flags.is_volatile);
|
|
|
|
if (!ok_cv_qualifiers) {
|
|
in_memory_result.* = .{ .ptr_qualifiers = .{
|
|
.actual_const = inst_info.flags.is_const,
|
|
.wanted_const = dest_info.flags.is_const,
|
|
.actual_volatile = inst_info.flags.is_volatile,
|
|
.wanted_volatile = dest_info.flags.is_volatile,
|
|
} };
|
|
return false;
|
|
}
|
|
if (dest_info.flags.address_space != inst_info.flags.address_space) {
|
|
in_memory_result.* = .{ .ptr_addrspace = .{
|
|
.actual = inst_info.flags.address_space,
|
|
.wanted = dest_info.flags.address_space,
|
|
} };
|
|
return false;
|
|
}
|
|
if (inst_info.flags.alignment == .none and dest_info.flags.alignment == .none) return true;
|
|
if (len0) return true;
|
|
|
|
const inst_align = inst_info.flags.alignment.toByteUnitsOptional() orelse
|
|
inst_info.child.toType().abiAlignment(mod);
|
|
|
|
const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse
|
|
dest_info.child.toType().abiAlignment(mod);
|
|
|
|
if (dest_align > inst_align) {
|
|
in_memory_result.* = .{ .ptr_alignment = .{
|
|
.actual = inst_align,
|
|
.wanted = dest_align,
|
|
} };
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
fn coerceCompatiblePtrs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_ty = sema.typeOf(inst);
|
|
if (try sema.resolveMaybeUndefVal(inst)) |val| {
|
|
if (!val.isUndef(mod) and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) {
|
|
return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)});
|
|
}
|
|
// The comptime Value representation is compatible with both types.
|
|
return Air.internedToRef(
|
|
(try mod.getCoerced((try val.intern(inst_ty, mod)).toValue(), dest_ty)).toIntern(),
|
|
);
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod);
|
|
if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and
|
|
(try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn))
|
|
{
|
|
const actual_ptr = if (inst_ty.isSlice(mod))
|
|
try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty)
|
|
else
|
|
inst;
|
|
const ptr_int = try block.addUnOp(.int_from_ptr, actual_ptr);
|
|
const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
|
|
const ok = if (inst_ty.isSlice(mod)) ok: {
|
|
const len = try sema.analyzeSliceLen(block, inst_src, inst);
|
|
const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
|
|
break :ok try block.addBinOp(.bit_or, len_zero, is_non_zero);
|
|
} else is_non_zero;
|
|
try sema.addSafetyCheck(block, inst_src, ok, .cast_to_null);
|
|
}
|
|
return sema.bitCast(block, dest_ty, inst, inst_src, null);
|
|
}
|
|
|
|
fn coerceEnumToUnion(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
union_ty: Type,
|
|
union_ty_src: LazySrcLoc,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const inst_ty = sema.typeOf(inst);
|
|
|
|
const tag_ty = union_ty.unionTagType(mod) orelse {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{
|
|
union_ty.fmt(sema.mod), inst_ty.fmt(sema.mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, union_ty_src, msg, "cannot coerce enum to untagged union", .{});
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
};
|
|
|
|
const enum_tag = try sema.coerce(block, tag_ty, inst, inst_src);
|
|
if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| {
|
|
const field_index = union_ty.unionTagFieldIndex(val, sema.mod) orelse {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "union '{}' has no tag with value '{}'", .{
|
|
union_ty.fmt(sema.mod), val.fmtValue(tag_ty, sema.mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
};
|
|
|
|
const union_obj = mod.typeToUnion(union_ty).?;
|
|
const field = union_obj.fields.values()[field_index];
|
|
try sema.resolveTypeFields(field.ty);
|
|
if (field.ty.zigTypeTag(mod) == .NoReturn) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "cannot initialize 'noreturn' field of union", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const field_name = union_obj.fields.keys()[field_index];
|
|
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
const opv = (try sema.typeHasOnePossibleValue(field.ty)) orelse {
|
|
const msg = msg: {
|
|
const field_name = union_obj.fields.keys()[field_index];
|
|
const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{}'", .{
|
|
inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod),
|
|
field.ty.fmt(sema.mod), field_name.fmt(ip),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
};
|
|
|
|
return Air.internedToRef((try mod.unionValue(union_ty, val, opv)).toIntern());
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
|
|
if (tag_ty.isNonexhaustiveEnum(mod)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "runtime coercion to union '{}' from non-exhaustive enum", .{
|
|
union_ty.fmt(sema.mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, tag_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
const union_obj = mod.typeToUnion(union_ty).?;
|
|
{
|
|
var msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (msg) |some| some.destroy(sema.gpa);
|
|
|
|
for (union_obj.fields.values(), 0..) |field, i| {
|
|
if (field.ty.zigTypeTag(mod) == .NoReturn) {
|
|
const err_msg = msg orelse try sema.errMsg(
|
|
block,
|
|
inst_src,
|
|
"runtime coercion from enum '{}' to union '{}' which has a 'noreturn' field",
|
|
.{ tag_ty.fmt(sema.mod), union_ty.fmt(sema.mod) },
|
|
);
|
|
msg = err_msg;
|
|
|
|
try sema.addFieldErrNote(union_ty, i, err_msg, "'noreturn' field here", .{});
|
|
}
|
|
}
|
|
if (msg) |some| {
|
|
msg = null;
|
|
try sema.addDeclaredHereNote(some, union_ty);
|
|
return sema.failWithOwnedErrorMsg(some);
|
|
}
|
|
}
|
|
|
|
// If the union has all fields 0 bits, the union value is just the enum value.
|
|
if (union_ty.unionHasAllZeroBitFieldTypes(mod)) {
|
|
return block.addBitCast(union_ty, enum_tag);
|
|
}
|
|
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(
|
|
block,
|
|
inst_src,
|
|
"runtime coercion from enum '{}' to union '{}' which has non-void fields",
|
|
.{ tag_ty.fmt(sema.mod), union_ty.fmt(sema.mod) },
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
var it = union_obj.fields.iterator();
|
|
var field_index: usize = 0;
|
|
while (it.next()) |field| : (field_index += 1) {
|
|
const field_name = field.key_ptr.*;
|
|
const field_ty = field.value_ptr.ty;
|
|
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
|
|
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{
|
|
field_name.fmt(ip),
|
|
field_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn coerceAnonStructToUnion(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
union_ty: Type,
|
|
union_ty_src: LazySrcLoc,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_ty = sema.typeOf(inst);
|
|
const field_info: union(enum) {
|
|
name: InternPool.NullTerminatedString,
|
|
count: usize,
|
|
} = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 1)
|
|
.{ .name = anon_struct_type.names[0] }
|
|
else
|
|
.{ .count = anon_struct_type.names.len },
|
|
.struct_type => |struct_type| name: {
|
|
const field_names = mod.structPtrUnwrap(struct_type.index).?.fields.keys();
|
|
break :name if (field_names.len == 1)
|
|
.{ .name = field_names[0] }
|
|
else
|
|
.{ .count = field_names.len };
|
|
},
|
|
else => unreachable,
|
|
};
|
|
switch (field_info) {
|
|
.name => |field_name| {
|
|
const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty);
|
|
return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src);
|
|
},
|
|
.count => |field_count| {
|
|
assert(field_count != 1);
|
|
const msg = msg: {
|
|
const msg = if (field_count > 1) try sema.errMsg(
|
|
block,
|
|
inst_src,
|
|
"cannot initialize multiple union fields at once; unions can only have one active field",
|
|
.{},
|
|
) else try sema.errMsg(
|
|
block,
|
|
inst_src,
|
|
"union initializer must initialize one field",
|
|
.{},
|
|
);
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
// TODO add notes for where the anon struct was created to point out
|
|
// the extra fields.
|
|
|
|
try sema.addDeclaredHereNote(msg, union_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn coerceAnonStructToUnionPtrs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ptr_union_ty: Type,
|
|
union_ty_src: LazySrcLoc,
|
|
ptr_anon_struct: Air.Inst.Ref,
|
|
anon_struct_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const union_ty = ptr_union_ty.childType(mod);
|
|
const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src);
|
|
const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src);
|
|
return sema.analyzeRef(block, union_ty_src, union_inst);
|
|
}
|
|
|
|
fn coerceAnonStructToStructPtrs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ptr_struct_ty: Type,
|
|
struct_ty_src: LazySrcLoc,
|
|
ptr_anon_struct: Air.Inst.Ref,
|
|
anon_struct_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const struct_ty = ptr_struct_ty.childType(mod);
|
|
const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src);
|
|
const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src);
|
|
return sema.analyzeRef(block, struct_ty_src, struct_inst);
|
|
}
|
|
|
|
/// If the lengths match, coerces element-wise.
|
|
fn coerceArrayLike(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
dest_ty_src: LazySrcLoc,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_ty = sema.typeOf(inst);
|
|
const target = mod.getTarget();
|
|
|
|
// try coercion of the whole array
|
|
const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src);
|
|
if (in_memory_result == .ok) {
|
|
if (try sema.resolveMaybeUndefVal(inst)) |inst_val| {
|
|
// These types share the same comptime value representation.
|
|
return sema.coerceInMemory(inst_val, dest_ty);
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addBitCast(dest_ty, inst);
|
|
}
|
|
|
|
// otherwise, try element by element
|
|
const inst_len = inst_ty.arrayLen(mod);
|
|
const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen(mod));
|
|
if (dest_len != inst_len) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{
|
|
dest_ty.fmt(mod), inst_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len});
|
|
try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
const dest_elem_ty = dest_ty.childType(mod);
|
|
const element_vals = try sema.arena.alloc(InternPool.Index, dest_len);
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len);
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
|
|
for (element_vals, element_refs, 0..) |*val, *ref, i| {
|
|
const index_ref = Air.internedToRef((try mod.intValue(Type.usize, i)).toIntern());
|
|
const src = inst_src; // TODO better source location
|
|
const elem_src = inst_src; // TODO better source location
|
|
const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true);
|
|
const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src);
|
|
ref.* = coerced;
|
|
if (runtime_src == null) {
|
|
if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| {
|
|
val.* = try elem_val.intern(dest_elem_ty, mod);
|
|
} else {
|
|
runtime_src = elem_src;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (runtime_src) |rs| {
|
|
try sema.requireRuntimeBlock(block, inst_src, rs);
|
|
return block.addAggregateInit(dest_ty, element_refs);
|
|
}
|
|
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.storage = .{ .elems = element_vals },
|
|
} })));
|
|
}
|
|
|
|
/// If the lengths match, coerces element-wise.
|
|
fn coerceTupleToArray(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
dest_ty_src: LazySrcLoc,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const inst_ty = sema.typeOf(inst);
|
|
const inst_len = inst_ty.arrayLen(mod);
|
|
const dest_len = dest_ty.arrayLen(mod);
|
|
|
|
if (dest_len != inst_len) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{
|
|
dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len});
|
|
try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_len);
|
|
const element_vals = try sema.arena.alloc(InternPool.Index, dest_elems);
|
|
const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems);
|
|
const dest_elem_ty = dest_ty.childType(mod);
|
|
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
for (element_vals, element_refs, 0..) |*val, *ref, i_usize| {
|
|
const i = @as(u32, @intCast(i_usize));
|
|
if (i_usize == inst_len) {
|
|
const sentinel_val = dest_ty.sentinel(mod).?;
|
|
val.* = sentinel_val.toIntern();
|
|
ref.* = Air.internedToRef(sentinel_val.toIntern());
|
|
break;
|
|
}
|
|
const elem_src = inst_src; // TODO better source location
|
|
const elem_ref = try sema.tupleField(block, inst_src, inst, elem_src, i);
|
|
const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src);
|
|
ref.* = coerced;
|
|
if (runtime_src == null) {
|
|
if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| {
|
|
val.* = try elem_val.intern(dest_elem_ty, mod);
|
|
} else {
|
|
runtime_src = elem_src;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (runtime_src) |rs| {
|
|
try sema.requireRuntimeBlock(block, inst_src, rs);
|
|
return block.addAggregateInit(dest_ty, element_refs);
|
|
}
|
|
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.storage = .{ .elems = element_vals },
|
|
} })));
|
|
}
|
|
|
|
/// If the lengths match, coerces element-wise.
|
|
fn coerceTupleToSlicePtrs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
slice_ty: Type,
|
|
slice_ty_src: LazySrcLoc,
|
|
ptr_tuple: Air.Inst.Ref,
|
|
tuple_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const tuple_ty = sema.typeOf(ptr_tuple).childType(mod);
|
|
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
|
|
const slice_info = slice_ty.ptrInfo(mod);
|
|
const array_ty = try mod.arrayType(.{
|
|
.len = tuple_ty.structFieldCount(mod),
|
|
.sentinel = slice_info.sentinel,
|
|
.child = slice_info.child,
|
|
});
|
|
const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src);
|
|
if (slice_info.flags.alignment != .none) {
|
|
return sema.fail(block, slice_ty_src, "TODO: override the alignment of the array decl we create here", .{});
|
|
}
|
|
const ptr_array = try sema.analyzeRef(block, slice_ty_src, array_inst);
|
|
return sema.coerceArrayPtrToSlice(block, slice_ty, ptr_array, slice_ty_src);
|
|
}
|
|
|
|
/// If the lengths match, coerces element-wise.
|
|
fn coerceTupleToArrayPtrs(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
ptr_array_ty: Type,
|
|
array_ty_src: LazySrcLoc,
|
|
ptr_tuple: Air.Inst.Ref,
|
|
tuple_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
|
|
const ptr_info = ptr_array_ty.ptrInfo(mod);
|
|
const array_ty = ptr_info.child.toType();
|
|
const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src);
|
|
if (ptr_info.flags.alignment != .none) {
|
|
return sema.fail(block, array_ty_src, "TODO: override the alignment of the array decl we create here", .{});
|
|
}
|
|
const ptr_array = try sema.analyzeRef(block, array_ty_src, array_inst);
|
|
return ptr_array;
|
|
}
|
|
|
|
/// Handles both tuples and anon struct literals. Coerces field-wise. Reports
|
|
/// errors for both extra fields and missing fields.
|
|
fn coerceTupleToStruct(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
struct_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
try sema.resolveTypeFields(struct_ty);
|
|
|
|
if (struct_ty.isTupleOrAnonStruct(mod)) {
|
|
return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src);
|
|
}
|
|
|
|
const fields = struct_ty.structFields(mod);
|
|
const field_vals = try sema.arena.alloc(InternPool.Index, fields.count());
|
|
const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
|
|
@memset(field_refs, .none);
|
|
|
|
const inst_ty = sema.typeOf(inst);
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
const field_count = switch (ip.indexToKey(inst_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
|
|
.struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj|
|
|
struct_obj.fields.count()
|
|
else
|
|
0,
|
|
else => unreachable,
|
|
};
|
|
for (0..field_count) |field_index_usize| {
|
|
const field_i = @as(u32, @intCast(field_index_usize));
|
|
const field_src = inst_src; // TODO better source location
|
|
// https://github.com/ziglang/zig/issues/15709
|
|
const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0)
|
|
anon_struct_type.names[field_i]
|
|
else
|
|
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
|
|
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i],
|
|
else => unreachable,
|
|
};
|
|
const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src);
|
|
const field = fields.values()[field_index];
|
|
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
|
|
const coerced = try sema.coerce(block, field.ty, elem_ref, field_src);
|
|
field_refs[field_index] = coerced;
|
|
if (field.is_comptime) {
|
|
const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
|
|
return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known");
|
|
};
|
|
|
|
if (!init_val.eql(field.default_val.toValue(), field.ty, sema.mod)) {
|
|
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
|
|
}
|
|
}
|
|
if (runtime_src == null) {
|
|
if (try sema.resolveMaybeUndefVal(coerced)) |field_val| {
|
|
field_vals[field_index] = field_val.toIntern();
|
|
} else {
|
|
runtime_src = field_src;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Populate default field values and report errors for missing fields.
|
|
var root_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
for (field_refs, 0..) |*field_ref, i| {
|
|
if (field_ref.* != .none) continue;
|
|
|
|
const field_name = fields.keys()[i];
|
|
const field = fields.values()[i];
|
|
const field_src = inst_src; // TODO better source location
|
|
if (field.default_val == .none) {
|
|
const template = "missing struct field: {}";
|
|
const args = .{field_name.fmt(ip)};
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, field_src, msg, template, args);
|
|
} else {
|
|
root_msg = try sema.errMsg(block, field_src, template, args);
|
|
}
|
|
continue;
|
|
}
|
|
if (runtime_src == null) {
|
|
field_vals[i] = field.default_val;
|
|
} else {
|
|
field_ref.* = Air.internedToRef(field.default_val);
|
|
}
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
try sema.addDeclaredHereNote(msg, struct_ty);
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (runtime_src) |rs| {
|
|
try sema.requireRuntimeBlock(block, inst_src, rs);
|
|
return block.addAggregateInit(struct_ty, field_refs);
|
|
}
|
|
|
|
const struct_val = try mod.intern(.{ .aggregate = .{
|
|
.ty = struct_ty.toIntern(),
|
|
.storage = .{ .elems = field_vals },
|
|
} });
|
|
// TODO: figure out InternPool removals for incremental compilation
|
|
//errdefer ip.remove(struct_val);
|
|
|
|
return Air.internedToRef(struct_val);
|
|
}
|
|
|
|
fn coerceTupleToTuple(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
tuple_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
|
|
.struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj|
|
|
struct_obj.fields.count()
|
|
else
|
|
0,
|
|
else => unreachable,
|
|
};
|
|
const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count);
|
|
const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
|
|
@memset(field_refs, .none);
|
|
|
|
const inst_ty = sema.typeOf(inst);
|
|
const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
|
|
.struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj|
|
|
struct_obj.fields.count()
|
|
else
|
|
0,
|
|
else => unreachable,
|
|
};
|
|
if (src_field_count > dest_field_count) return error.NotCoercible;
|
|
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
for (0..dest_field_count) |field_index_usize| {
|
|
const field_i = @as(u32, @intCast(field_index_usize));
|
|
const field_src = inst_src; // TODO better source location
|
|
// https://github.com/ziglang/zig/issues/15709
|
|
const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0)
|
|
anon_struct_type.names[field_i]
|
|
else
|
|
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
|
|
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i],
|
|
else => unreachable,
|
|
};
|
|
|
|
if (ip.stringEqlSlice(field_name, "len"))
|
|
return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{});
|
|
|
|
const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| anon_struct_type.types[field_index_usize].toType(),
|
|
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].ty,
|
|
else => unreachable,
|
|
};
|
|
const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| anon_struct_type.values[field_index_usize],
|
|
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val,
|
|
else => unreachable,
|
|
};
|
|
|
|
const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src);
|
|
|
|
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
|
|
const coerced = try sema.coerce(block, field_ty, elem_ref, field_src);
|
|
field_refs[field_index] = coerced;
|
|
if (default_val != .none) {
|
|
const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
|
|
return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known");
|
|
};
|
|
|
|
if (!init_val.eql(default_val.toValue(), field_ty, sema.mod)) {
|
|
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
|
|
}
|
|
}
|
|
if (runtime_src == null) {
|
|
if (try sema.resolveMaybeUndefVal(coerced)) |field_val| {
|
|
field_vals[field_index] = field_val.toIntern();
|
|
} else {
|
|
runtime_src = field_src;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Populate default field values and report errors for missing fields.
|
|
var root_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
for (field_refs, 0..) |*field_ref, i| {
|
|
if (field_ref.* != .none) continue;
|
|
|
|
const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| anon_struct_type.values[i],
|
|
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val,
|
|
else => unreachable,
|
|
};
|
|
|
|
const field_src = inst_src; // TODO better source location
|
|
if (default_val == .none) {
|
|
if (tuple_ty.isTuple(mod)) {
|
|
const template = "missing tuple field: {d}";
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, field_src, msg, template, .{i});
|
|
} else {
|
|
root_msg = try sema.errMsg(block, field_src, template, .{i});
|
|
}
|
|
continue;
|
|
}
|
|
const template = "missing struct field: {}";
|
|
const args = .{tuple_ty.structFieldName(i, mod).fmt(ip)};
|
|
if (root_msg) |msg| {
|
|
try sema.errNote(block, field_src, msg, template, args);
|
|
} else {
|
|
root_msg = try sema.errMsg(block, field_src, template, args);
|
|
}
|
|
continue;
|
|
}
|
|
if (runtime_src == null) {
|
|
field_vals[i] = default_val;
|
|
} else {
|
|
field_ref.* = Air.internedToRef(default_val);
|
|
}
|
|
}
|
|
|
|
if (root_msg) |msg| {
|
|
try sema.addDeclaredHereNote(msg, tuple_ty);
|
|
root_msg = null;
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (runtime_src) |rs| {
|
|
try sema.requireRuntimeBlock(block, inst_src, rs);
|
|
return block.addAggregateInit(tuple_ty, field_refs);
|
|
}
|
|
|
|
return Air.internedToRef((try mod.intern(.{ .aggregate = .{
|
|
.ty = tuple_ty.toIntern(),
|
|
.storage = .{ .elems = field_vals },
|
|
} })));
|
|
}
|
|
|
|
fn analyzeDeclVal(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
decl_index: Decl.Index,
|
|
) CompileError!Air.Inst.Ref {
|
|
try sema.addReferencedBy(block, src, decl_index);
|
|
if (sema.decl_val_table.get(decl_index)) |result| {
|
|
return result;
|
|
}
|
|
const decl_ref = try sema.analyzeDeclRefInner(decl_index, false);
|
|
const result = try sema.analyzeLoad(block, src, decl_ref, src);
|
|
if (Air.refToInterned(result) != null) {
|
|
if (!block.is_typeof) {
|
|
try sema.decl_val_table.put(sema.gpa, decl_index, result);
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
fn addReferencedBy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
decl_index: Decl.Index,
|
|
) !void {
|
|
if (sema.mod.comp.reference_trace == 0) return;
|
|
if (src == .unneeded) {
|
|
// We can't use NeededSourceLocation, since sites handling that assume it means a compile
|
|
// error. Our long-term strategy here is to gradually transition from NeededSourceLocation
|
|
// into having more LazySrcLoc tags. In the meantime, let release compilers just ignore this
|
|
// reference (a slightly-incomplete error is better than a crash!), but trigger a panic in
|
|
// debug so we can fix this case.
|
|
if (std.debug.runtime_safety) unreachable else return;
|
|
}
|
|
try sema.mod.reference_table.put(sema.gpa, decl_index, .{
|
|
.referencer = block.src_decl,
|
|
.src = src,
|
|
});
|
|
}
|
|
|
|
fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const decl = mod.declPtr(decl_index);
|
|
if (decl.analysis == .in_progress) {
|
|
const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(mod), "dependency loop detected", .{});
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
mod.ensureDeclAnalyzed(decl_index) catch |err| {
|
|
if (sema.owner_func_index != .none) {
|
|
ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
|
|
} else {
|
|
sema.owner_decl.analysis = .dependency_failure;
|
|
}
|
|
return err;
|
|
};
|
|
}
|
|
|
|
fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
mod.ensureFuncBodyAnalyzed(func) catch |err| {
|
|
if (sema.owner_func_index != .none) {
|
|
ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure;
|
|
} else {
|
|
sema.owner_decl.analysis = .dependency_failure;
|
|
}
|
|
return err;
|
|
};
|
|
}
|
|
|
|
fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value {
|
|
const mod = sema.mod;
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
const decl = try anon_decl.finish(
|
|
ty,
|
|
val,
|
|
.none, // default alignment
|
|
);
|
|
try sema.maybeQueueFuncBodyAnalysis(decl);
|
|
try mod.declareDeclDependency(sema.owner_decl_index, decl);
|
|
const result = try mod.intern(.{ .ptr = .{
|
|
.ty = (try mod.singleConstPtrType(ty)).toIntern(),
|
|
.addr = .{ .decl = decl },
|
|
} });
|
|
return result.toValue();
|
|
}
|
|
|
|
fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value {
|
|
const mod = sema.mod;
|
|
const ptr_anyopaque_ty = try mod.singleConstPtrType(Type.anyopaque);
|
|
return (try mod.intern(.{ .opt = .{
|
|
.ty = (try mod.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(),
|
|
.val = if (opt_val) |val| (try mod.getCoerced(
|
|
try sema.refValue(block, ty, val),
|
|
ptr_anyopaque_ty,
|
|
)).toIntern() else .none,
|
|
} })).toValue();
|
|
}
|
|
|
|
fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref {
|
|
return sema.analyzeDeclRefInner(decl_index, true);
|
|
}
|
|
|
|
/// Analyze a reference to the decl at the given index. Ensures the underlying decl is analyzed, but
|
|
/// only triggers analysis for function bodies if `analyze_fn_body` is true. If it's possible for a
|
|
/// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps
|
|
/// this function with `analyze_fn_body` set to true.
|
|
fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: bool) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
|
|
try sema.ensureDeclAnalyzed(decl_index);
|
|
|
|
const decl = mod.declPtr(decl_index);
|
|
const decl_tv = try decl.typedValue();
|
|
const ptr_ty = try mod.ptrType(.{
|
|
.child = decl_tv.ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = decl.alignment,
|
|
.is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else true,
|
|
.address_space = decl.@"addrspace",
|
|
},
|
|
});
|
|
if (analyze_fn_body) {
|
|
try sema.maybeQueueFuncBodyAnalysis(decl_index);
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_ty.toIntern(),
|
|
.addr = .{ .decl = decl_index },
|
|
} })));
|
|
}
|
|
|
|
fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void {
|
|
const mod = sema.mod;
|
|
const decl = mod.declPtr(decl_index);
|
|
const tv = try decl.typedValue();
|
|
if (tv.ty.zigTypeTag(mod) != .Fn) return;
|
|
if (!try sema.fnHasRuntimeBits(tv.ty)) return;
|
|
const func_index = tv.val.toIntern();
|
|
if (!mod.intern_pool.isFuncBody(func_index)) return; // undef or extern function
|
|
try mod.ensureFuncBodyAnalysisQueued(func_index);
|
|
}
|
|
|
|
fn analyzeRef(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const operand_ty = sema.typeOf(operand);
|
|
|
|
if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
|
switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
|
.extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl),
|
|
.func => |func| return sema.analyzeDeclRef(func.owner_decl),
|
|
else => {},
|
|
}
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
return sema.analyzeDeclRef(try anon_decl.finish(
|
|
operand_ty,
|
|
val,
|
|
.none, // default alignment
|
|
));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
const address_space = target_util.defaultAddressSpace(mod.getTarget(), .local);
|
|
const ptr_type = try mod.ptrType(.{
|
|
.child = operand_ty.toIntern(),
|
|
.flags = .{
|
|
.is_const = true,
|
|
.address_space = address_space,
|
|
},
|
|
});
|
|
const mut_ptr_type = try mod.ptrType(.{
|
|
.child = operand_ty.toIntern(),
|
|
.flags = .{ .address_space = address_space },
|
|
});
|
|
const alloc = try block.addTy(.alloc, mut_ptr_type);
|
|
try sema.storePtr(block, src, alloc, operand);
|
|
|
|
// TODO: Replace with sema.coerce when that supports adding pointer constness.
|
|
return sema.bitCast(block, ptr_type, alloc, src, null);
|
|
}
|
|
|
|
fn analyzeLoad(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr: Air.Inst.Ref,
|
|
ptr_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ptr_ty = sema.typeOf(ptr);
|
|
const elem_ty = switch (ptr_ty.zigTypeTag(mod)) {
|
|
.Pointer => ptr_ty.childType(mod),
|
|
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}),
|
|
};
|
|
|
|
if (try sema.typeHasOnePossibleValue(elem_ty)) |opv| {
|
|
return Air.internedToRef(opv.toIntern());
|
|
}
|
|
|
|
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
|
|
if (try sema.pointerDeref(block, src, ptr_val, ptr_ty)) |elem_val| {
|
|
return Air.internedToRef(elem_val.toIntern());
|
|
}
|
|
}
|
|
|
|
if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) {
|
|
const ptr_inst = Air.refToIndex(ptr).?;
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
if (air_tags[ptr_inst] == .ptr_elem_ptr) {
|
|
const ty_pl = sema.air_instructions.items(.data)[ptr_inst].ty_pl;
|
|
const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data;
|
|
return block.addBinOp(.ptr_elem_val, bin_op.lhs, bin_op.rhs);
|
|
}
|
|
return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{
|
|
ptr_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
return block.addTyOp(.load, elem_ty, ptr);
|
|
}
|
|
|
|
fn analyzeSlicePtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
slice_src: LazySrcLoc,
|
|
slice: Air.Inst.Ref,
|
|
slice_ty: Type,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const result_ty = slice_ty.slicePtrFieldType(mod);
|
|
if (try sema.resolveMaybeUndefVal(slice)) |val| {
|
|
if (val.isUndef(mod)) return mod.undefRef(result_ty);
|
|
return Air.internedToRef(val.slicePtr(mod).toIntern());
|
|
}
|
|
try sema.requireRuntimeBlock(block, slice_src, null);
|
|
return block.addTyOp(.slice_ptr, result_ty, slice);
|
|
}
|
|
|
|
fn analyzeSliceLen(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
slice_inst: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
if (try sema.resolveMaybeUndefVal(slice_inst)) |slice_val| {
|
|
if (slice_val.isUndef(mod)) {
|
|
return mod.undefRef(Type.usize);
|
|
}
|
|
return mod.intRef(Type.usize, slice_val.sliceLen(sema.mod));
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addTyOp(.slice_len, Type.usize, slice_inst);
|
|
}
|
|
|
|
fn analyzeIsNull(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
invert_logic: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const result_ty = Type.bool;
|
|
if (try sema.resolveMaybeUndefVal(operand)) |opt_val| {
|
|
if (opt_val.isUndef(mod)) {
|
|
return mod.undefRef(result_ty);
|
|
}
|
|
const is_null = opt_val.isNull(mod);
|
|
const bool_value = if (invert_logic) !is_null else is_null;
|
|
if (bool_value) {
|
|
return Air.Inst.Ref.bool_true;
|
|
} else {
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
}
|
|
|
|
const inverted_non_null_res = if (invert_logic) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
|
|
const operand_ty = sema.typeOf(operand);
|
|
if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(mod).zigTypeTag(mod) == .NoReturn) {
|
|
return inverted_non_null_res;
|
|
}
|
|
if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) {
|
|
return inverted_non_null_res;
|
|
}
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
const air_tag: Air.Inst.Tag = if (invert_logic) .is_non_null else .is_null;
|
|
return block.addUnOp(air_tag, operand);
|
|
}
|
|
|
|
fn analyzePtrIsNonErrComptimeOnly(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ptr_ty = sema.typeOf(operand);
|
|
assert(ptr_ty.zigTypeTag(mod) == .Pointer);
|
|
const child_ty = ptr_ty.childType(mod);
|
|
|
|
const child_tag = child_ty.zigTypeTag(mod);
|
|
if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true;
|
|
if (child_tag == .ErrorSet) return Air.Inst.Ref.bool_false;
|
|
assert(child_tag == .ErrorUnion);
|
|
|
|
_ = block;
|
|
_ = src;
|
|
|
|
return Air.Inst.Ref.none;
|
|
}
|
|
|
|
fn analyzeIsNonErrComptimeOnly(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const operand_ty = sema.typeOf(operand);
|
|
const ot = operand_ty.zigTypeTag(mod);
|
|
if (ot != .ErrorSet and ot != .ErrorUnion) return .bool_true;
|
|
if (ot == .ErrorSet) return .bool_false;
|
|
assert(ot == .ErrorUnion);
|
|
|
|
const payload_ty = operand_ty.errorUnionPayload(mod);
|
|
if (payload_ty.zigTypeTag(mod) == .NoReturn) {
|
|
return .bool_false;
|
|
}
|
|
|
|
if (Air.refToIndex(operand)) |operand_inst| {
|
|
switch (sema.air_instructions.items(.tag)[operand_inst]) {
|
|
.wrap_errunion_payload => return .bool_true,
|
|
.wrap_errunion_err => return .bool_false,
|
|
else => {},
|
|
}
|
|
} else if (operand == .undef) {
|
|
return mod.undefRef(Type.bool);
|
|
} else if (@intFromEnum(operand) < InternPool.static_len) {
|
|
// None of the ref tags can be errors.
|
|
return .bool_true;
|
|
}
|
|
|
|
const maybe_operand_val = try sema.resolveMaybeUndefVal(operand);
|
|
|
|
// exception if the error union error set is known to be empty,
|
|
// we allow the comparison but always make it comptime-known.
|
|
const set_ty = ip.errorUnionSet(operand_ty.toIntern());
|
|
switch (set_ty) {
|
|
.anyerror_type => {},
|
|
.adhoc_inferred_error_set_type => if (sema.fn_ret_ty_ies) |ies| blk: {
|
|
// If the error set is empty, we must return a comptime true or false.
|
|
// However we want to avoid unnecessarily resolving an inferred error set
|
|
// in case it is already non-empty.
|
|
switch (ies.resolved) {
|
|
.anyerror_type => break :blk,
|
|
.none => {},
|
|
else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk,
|
|
}
|
|
|
|
if (maybe_operand_val != null) break :blk;
|
|
|
|
// Try to avoid resolving inferred error set if possible.
|
|
if (ies.errors.count() != 0) return .none;
|
|
switch (ies.resolved) {
|
|
.anyerror_type => return .none,
|
|
.none => {},
|
|
else => switch (ip.indexToKey(ies.resolved).error_set_type.names.len) {
|
|
0 => return .bool_true,
|
|
else => return .none,
|
|
},
|
|
}
|
|
// We do not have a comptime answer because this inferred error
|
|
// set is not resolved, and an instruction later in this function
|
|
// body may or may not cause an error to be added to this set.
|
|
return .none;
|
|
},
|
|
else => switch (ip.indexToKey(set_ty)) {
|
|
.error_set_type => |error_set_type| {
|
|
if (error_set_type.names.len == 0) return .bool_true;
|
|
},
|
|
.inferred_error_set_type => |func_index| blk: {
|
|
// If the error set is empty, we must return a comptime true or false.
|
|
// However we want to avoid unnecessarily resolving an inferred error set
|
|
// in case it is already non-empty.
|
|
switch (ip.funcIesResolved(func_index).*) {
|
|
.anyerror_type => break :blk,
|
|
.none => {},
|
|
else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk,
|
|
}
|
|
if (maybe_operand_val != null) break :blk;
|
|
if (sema.fn_ret_ty_ies) |ies| {
|
|
if (ies.func == func_index) {
|
|
// Try to avoid resolving inferred error set if possible.
|
|
if (ies.errors.count() != 0) return .none;
|
|
switch (ies.resolved) {
|
|
.anyerror_type => return .none,
|
|
.none => {},
|
|
else => switch (ip.indexToKey(ies.resolved).error_set_type.names.len) {
|
|
0 => return .bool_true,
|
|
else => return .none,
|
|
},
|
|
}
|
|
// We do not have a comptime answer because this inferred error
|
|
// set is not resolved, and an instruction later in this function
|
|
// body may or may not cause an error to be added to this set.
|
|
return .none;
|
|
}
|
|
}
|
|
const resolved_ty = try sema.resolveInferredErrorSet(block, src, set_ty);
|
|
if (resolved_ty == .anyerror_type)
|
|
break :blk;
|
|
if (ip.indexToKey(resolved_ty).error_set_type.names.len == 0)
|
|
return .bool_true;
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
|
|
if (maybe_operand_val) |err_union| {
|
|
if (err_union.isUndef(mod)) {
|
|
return mod.undefRef(Type.bool);
|
|
}
|
|
if (err_union.getErrorName(mod) == .none) {
|
|
return .bool_true;
|
|
} else {
|
|
return .bool_false;
|
|
}
|
|
}
|
|
return .none;
|
|
}
|
|
|
|
fn analyzeIsNonErr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const result = try sema.analyzeIsNonErrComptimeOnly(block, src, operand);
|
|
if (result == .none) {
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addUnOp(.is_non_err, operand);
|
|
} else {
|
|
return result;
|
|
}
|
|
}
|
|
|
|
fn analyzePtrIsNonErr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
operand: Air.Inst.Ref,
|
|
) CompileError!Air.Inst.Ref {
|
|
const result = try sema.analyzePtrIsNonErrComptimeOnly(block, src, operand);
|
|
if (result == .none) {
|
|
try sema.requireRuntimeBlock(block, src, null);
|
|
return block.addUnOp(.is_non_err_ptr, operand);
|
|
} else {
|
|
return result;
|
|
}
|
|
}
|
|
|
|
fn analyzeSlice(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ptr_ptr: Air.Inst.Ref,
|
|
uncasted_start: Air.Inst.Ref,
|
|
uncasted_end_opt: Air.Inst.Ref,
|
|
sentinel_opt: Air.Inst.Ref,
|
|
sentinel_src: LazySrcLoc,
|
|
ptr_src: LazySrcLoc,
|
|
start_src: LazySrcLoc,
|
|
end_src: LazySrcLoc,
|
|
by_length: bool,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
// Slice expressions can operate on a variable whose type is an array. This requires
|
|
// the slice operand to be a pointer. In the case of a non-array, it will be a double pointer.
|
|
const ptr_ptr_ty = sema.typeOf(ptr_ptr);
|
|
const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) {
|
|
.Pointer => ptr_ptr_ty.childType(mod),
|
|
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(mod)}),
|
|
};
|
|
|
|
var array_ty = ptr_ptr_child_ty;
|
|
var slice_ty = ptr_ptr_ty;
|
|
var ptr_or_slice = ptr_ptr;
|
|
var elem_ty: Type = undefined;
|
|
var ptr_sentinel: ?Value = null;
|
|
switch (ptr_ptr_child_ty.zigTypeTag(mod)) {
|
|
.Array => {
|
|
ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
|
|
elem_ty = ptr_ptr_child_ty.childType(mod);
|
|
},
|
|
.Pointer => switch (ptr_ptr_child_ty.ptrSize(mod)) {
|
|
.One => {
|
|
const double_child_ty = ptr_ptr_child_ty.childType(mod);
|
|
if (double_child_ty.zigTypeTag(mod) == .Array) {
|
|
ptr_sentinel = double_child_ty.sentinel(mod);
|
|
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
|
|
slice_ty = ptr_ptr_child_ty;
|
|
array_ty = double_child_ty;
|
|
elem_ty = double_child_ty.childType(mod);
|
|
} else {
|
|
return sema.fail(block, src, "slice of single-item pointer", .{});
|
|
}
|
|
},
|
|
.Many, .C => {
|
|
ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
|
|
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
|
|
slice_ty = ptr_ptr_child_ty;
|
|
array_ty = ptr_ptr_child_ty;
|
|
elem_ty = ptr_ptr_child_ty.childType(mod);
|
|
|
|
if (ptr_ptr_child_ty.ptrSize(mod) == .C) {
|
|
if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| {
|
|
if (ptr_val.isNull(mod)) {
|
|
return sema.fail(block, src, "slice of null pointer", .{});
|
|
}
|
|
}
|
|
}
|
|
},
|
|
.Slice => {
|
|
ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
|
|
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
|
|
slice_ty = ptr_ptr_child_ty;
|
|
array_ty = ptr_ptr_child_ty;
|
|
elem_ty = ptr_ptr_child_ty.childType(mod);
|
|
},
|
|
},
|
|
else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}),
|
|
}
|
|
|
|
const ptr = if (slice_ty.isSlice(mod))
|
|
try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty)
|
|
else if (array_ty.zigTypeTag(mod) == .Array) ptr: {
|
|
var manyptr_ty_key = mod.intern_pool.indexToKey(slice_ty.toIntern()).ptr_type;
|
|
assert(manyptr_ty_key.child == array_ty.toIntern());
|
|
assert(manyptr_ty_key.flags.size == .One);
|
|
manyptr_ty_key.child = elem_ty.toIntern();
|
|
manyptr_ty_key.flags.size = .Many;
|
|
break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src);
|
|
} else ptr_or_slice;
|
|
|
|
const start = try sema.coerce(block, Type.usize, uncasted_start, start_src);
|
|
const new_ptr = try sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src);
|
|
const new_ptr_ty = sema.typeOf(new_ptr);
|
|
|
|
// true if and only if the end index of the slice, implicitly or explicitly, equals
|
|
// the length of the underlying object being sliced. we might learn the length of the
|
|
// underlying object because it is an array (which has the length in the type), or
|
|
// we might learn of the length because it is a comptime-known slice value.
|
|
var end_is_len = uncasted_end_opt == .none;
|
|
const end = e: {
|
|
if (array_ty.zigTypeTag(mod) == .Array) {
|
|
const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod));
|
|
|
|
if (!end_is_len) {
|
|
const end = if (by_length) end: {
|
|
const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
|
|
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
|
|
break :end try sema.coerce(block, Type.usize, uncasted_end, end_src);
|
|
} else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
|
|
if (try sema.resolveMaybeUndefVal(end)) |end_val| {
|
|
const len_s_val = try mod.intValue(
|
|
Type.usize,
|
|
array_ty.arrayLenIncludingSentinel(mod),
|
|
);
|
|
if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) {
|
|
const sentinel_label: []const u8 = if (array_ty.sentinel(mod) != null)
|
|
" +1 (sentinel)"
|
|
else
|
|
"";
|
|
|
|
return sema.fail(
|
|
block,
|
|
end_src,
|
|
"end index {} out of bounds for array of length {}{s}",
|
|
.{
|
|
end_val.fmtValue(Type.usize, mod),
|
|
len_val.fmtValue(Type.usize, mod),
|
|
sentinel_label,
|
|
},
|
|
);
|
|
}
|
|
|
|
// end_is_len is only true if we are NOT using the sentinel
|
|
// length. For sentinel-length, we don't want the type to
|
|
// contain the sentinel.
|
|
if (end_val.eql(len_val, Type.usize, mod)) {
|
|
end_is_len = true;
|
|
}
|
|
}
|
|
break :e end;
|
|
}
|
|
|
|
break :e Air.internedToRef(len_val.toIntern());
|
|
} else if (slice_ty.isSlice(mod)) {
|
|
if (!end_is_len) {
|
|
const end = if (by_length) end: {
|
|
const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
|
|
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
|
|
break :end try sema.coerce(block, Type.usize, uncasted_end, end_src);
|
|
} else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
|
|
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
|
|
if (try sema.resolveMaybeUndefVal(ptr_or_slice)) |slice_val| {
|
|
if (slice_val.isUndef(mod)) {
|
|
return sema.fail(block, src, "slice of undefined", .{});
|
|
}
|
|
const has_sentinel = slice_ty.sentinel(mod) != null;
|
|
const slice_len = slice_val.sliceLen(mod);
|
|
const len_plus_sent = slice_len + @intFromBool(has_sentinel);
|
|
const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent);
|
|
if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) {
|
|
const sentinel_label: []const u8 = if (has_sentinel)
|
|
" +1 (sentinel)"
|
|
else
|
|
"";
|
|
|
|
return sema.fail(
|
|
block,
|
|
end_src,
|
|
"end index {} out of bounds for slice of length {d}{s}",
|
|
.{
|
|
end_val.fmtValue(Type.usize, mod),
|
|
slice_val.sliceLen(mod),
|
|
sentinel_label,
|
|
},
|
|
);
|
|
}
|
|
|
|
// If the slice has a sentinel, we consider end_is_len
|
|
// is only true if it equals the length WITHOUT the
|
|
// sentinel, so we don't add a sentinel type.
|
|
const slice_len_val = try mod.intValue(Type.usize, slice_len);
|
|
if (end_val.eql(slice_len_val, Type.usize, mod)) {
|
|
end_is_len = true;
|
|
}
|
|
}
|
|
}
|
|
break :e end;
|
|
}
|
|
break :e try sema.analyzeSliceLen(block, src, ptr_or_slice);
|
|
}
|
|
if (!end_is_len) {
|
|
if (by_length) {
|
|
const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
|
|
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
|
|
break :e try sema.coerce(block, Type.usize, uncasted_end, end_src);
|
|
} else break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
|
|
}
|
|
return sema.fail(block, src, "slice of pointer must include end value", .{});
|
|
};
|
|
|
|
const sentinel = s: {
|
|
if (sentinel_opt != .none) {
|
|
const casted = try sema.coerce(block, elem_ty, sentinel_opt, sentinel_src);
|
|
break :s try sema.resolveConstValue(block, sentinel_src, casted, "slice sentinel must be comptime-known");
|
|
}
|
|
// If we are slicing to the end of something that is sentinel-terminated
|
|
// then the resulting slice type is also sentinel-terminated.
|
|
if (end_is_len) {
|
|
if (ptr_sentinel) |sent| {
|
|
break :s sent;
|
|
}
|
|
}
|
|
break :s null;
|
|
};
|
|
const slice_sentinel = if (sentinel_opt != .none) sentinel else null;
|
|
|
|
var checked_start_lte_end = by_length;
|
|
var runtime_src: ?LazySrcLoc = null;
|
|
|
|
// requirement: start <= end
|
|
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
|
|
if (try sema.resolveDefinedValue(block, start_src, start)) |start_val| {
|
|
if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, Type.usize))) {
|
|
return sema.fail(
|
|
block,
|
|
start_src,
|
|
"start index {} is larger than end index {}",
|
|
.{
|
|
start_val.fmtValue(Type.usize, mod),
|
|
end_val.fmtValue(Type.usize, mod),
|
|
},
|
|
);
|
|
}
|
|
checked_start_lte_end = true;
|
|
if (try sema.resolveMaybeUndefVal(new_ptr)) |ptr_val| sentinel_check: {
|
|
const expected_sentinel = sentinel orelse break :sentinel_check;
|
|
const start_int = start_val.getUnsignedInt(mod).?;
|
|
const end_int = end_val.getUnsignedInt(mod).?;
|
|
const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int);
|
|
|
|
const many_ptr_ty = try mod.manyConstPtrType(elem_ty);
|
|
const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty);
|
|
const elem_ptr_ty = try mod.singleConstPtrType(elem_ty);
|
|
const elem_ptr = try many_ptr_val.elemPtr(elem_ptr_ty, sentinel_index, mod);
|
|
const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty);
|
|
const actual_sentinel = switch (res) {
|
|
.runtime_load => break :sentinel_check,
|
|
.val => |v| v,
|
|
.needed_well_defined => |ty| return sema.fail(
|
|
block,
|
|
src,
|
|
"comptime dereference requires '{}' to have a well-defined layout, but it does not.",
|
|
.{ty.fmt(mod)},
|
|
),
|
|
.out_of_bounds => |ty| return sema.fail(
|
|
block,
|
|
end_src,
|
|
"slice end index {d} exceeds bounds of containing decl of type '{}'",
|
|
.{ end_int, ty.fmt(mod) },
|
|
),
|
|
};
|
|
|
|
if (!actual_sentinel.eql(expected_sentinel, elem_ty, mod)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "value in memory does not match slice sentinel", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "expected '{}', found '{}'", .{
|
|
expected_sentinel.fmtValue(elem_ty, mod),
|
|
actual_sentinel.fmtValue(elem_ty, mod),
|
|
});
|
|
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
} else {
|
|
runtime_src = ptr_src;
|
|
}
|
|
} else {
|
|
runtime_src = start_src;
|
|
}
|
|
} else {
|
|
runtime_src = end_src;
|
|
}
|
|
|
|
if (!checked_start_lte_end and block.wantSafety() and !block.is_comptime) {
|
|
// requirement: start <= end
|
|
assert(!block.is_comptime);
|
|
try sema.requireRuntimeBlock(block, src, runtime_src.?);
|
|
const ok = try block.addBinOp(.cmp_lte, start, end);
|
|
if (!sema.mod.comp.formatted_panics) {
|
|
try sema.addSafetyCheck(block, src, ok, .start_index_greater_than_end);
|
|
} else {
|
|
try sema.safetyCheckFormatted(block, src, ok, "panicStartGreaterThanEnd", &.{ start, end });
|
|
}
|
|
}
|
|
const new_len = if (by_length)
|
|
try sema.coerce(block, Type.usize, uncasted_end_opt, end_src)
|
|
else
|
|
try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false);
|
|
const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len);
|
|
|
|
const new_ptr_ty_info = new_ptr_ty.ptrInfo(mod);
|
|
const new_allowzero = new_ptr_ty_info.flags.is_allowzero and sema.typeOf(ptr).ptrSize(mod) != .C;
|
|
|
|
if (opt_new_len_val) |new_len_val| {
|
|
const new_len_int = new_len_val.toUnsignedInt(mod);
|
|
|
|
const return_ty = try mod.ptrType(.{
|
|
.child = (try mod.arrayType(.{
|
|
.len = new_len_int,
|
|
.sentinel = if (sentinel) |s| s.toIntern() else .none,
|
|
.child = elem_ty.toIntern(),
|
|
})).toIntern(),
|
|
.flags = .{
|
|
.alignment = new_ptr_ty_info.flags.alignment,
|
|
.is_const = new_ptr_ty_info.flags.is_const,
|
|
.is_allowzero = new_allowzero,
|
|
.is_volatile = new_ptr_ty_info.flags.is_volatile,
|
|
.address_space = new_ptr_ty_info.flags.address_space,
|
|
},
|
|
});
|
|
|
|
const opt_new_ptr_val = try sema.resolveMaybeUndefVal(new_ptr);
|
|
const new_ptr_val = opt_new_ptr_val orelse {
|
|
const result = try block.addBitCast(return_ty, new_ptr);
|
|
if (block.wantSafety()) {
|
|
// requirement: slicing C ptr is non-null
|
|
if (ptr_ptr_child_ty.isCPtr(mod)) {
|
|
const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true);
|
|
try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null);
|
|
}
|
|
|
|
if (slice_ty.isSlice(mod)) {
|
|
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
|
|
const actual_len = if (slice_ty.sentinel(mod) == null)
|
|
slice_len_inst
|
|
else
|
|
try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true);
|
|
|
|
const actual_end = if (slice_sentinel != null)
|
|
try sema.analyzeArithmetic(block, .add, end, .one, src, end_src, end_src, true)
|
|
else
|
|
end;
|
|
|
|
try sema.panicIndexOutOfBounds(block, src, actual_end, actual_len, .cmp_lte);
|
|
}
|
|
|
|
// requirement: result[new_len] == slice_sentinel
|
|
try sema.panicSentinelMismatch(block, src, slice_sentinel, elem_ty, result, new_len);
|
|
}
|
|
return result;
|
|
};
|
|
|
|
if (!new_ptr_val.isUndef(mod)) {
|
|
return Air.internedToRef((try mod.getCoerced(
|
|
(try new_ptr_val.intern(new_ptr_ty, mod)).toValue(),
|
|
return_ty,
|
|
)).toIntern());
|
|
}
|
|
|
|
// Special case: @as([]i32, undefined)[x..x]
|
|
if (new_len_int == 0) {
|
|
return mod.undefRef(return_ty);
|
|
}
|
|
|
|
return sema.fail(block, src, "non-zero length slice of undefined pointer", .{});
|
|
}
|
|
|
|
const return_ty = try mod.ptrType(.{
|
|
.child = elem_ty.toIntern(),
|
|
.sentinel = if (sentinel) |s| s.toIntern() else .none,
|
|
.flags = .{
|
|
.size = .Slice,
|
|
.alignment = new_ptr_ty_info.flags.alignment,
|
|
.is_const = new_ptr_ty_info.flags.is_const,
|
|
.is_volatile = new_ptr_ty_info.flags.is_volatile,
|
|
.is_allowzero = new_allowzero,
|
|
.address_space = new_ptr_ty_info.flags.address_space,
|
|
},
|
|
});
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src.?);
|
|
if (block.wantSafety()) {
|
|
// requirement: slicing C ptr is non-null
|
|
if (ptr_ptr_child_ty.isCPtr(mod)) {
|
|
const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true);
|
|
try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null);
|
|
}
|
|
|
|
// requirement: end <= len
|
|
const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array)
|
|
try mod.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod))
|
|
else if (slice_ty.isSlice(mod)) blk: {
|
|
if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
|
|
// we don't need to add one for sentinels because the
|
|
// underlying value data includes the sentinel
|
|
break :blk try mod.intRef(Type.usize, slice_val.sliceLen(mod));
|
|
}
|
|
|
|
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
|
|
if (slice_ty.sentinel(mod) == null) break :blk slice_len_inst;
|
|
|
|
// we have to add one because slice lengths don't include the sentinel
|
|
break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true);
|
|
} else null;
|
|
if (opt_len_inst) |len_inst| {
|
|
const actual_end = if (slice_sentinel != null)
|
|
try sema.analyzeArithmetic(block, .add, end, .one, src, end_src, end_src, true)
|
|
else
|
|
end;
|
|
try sema.panicIndexOutOfBounds(block, src, actual_end, len_inst, .cmp_lte);
|
|
}
|
|
|
|
// requirement: start <= end
|
|
try sema.panicIndexOutOfBounds(block, src, start, end, .cmp_lte);
|
|
}
|
|
const result = try block.addInst(.{
|
|
.tag = .slice,
|
|
.data = .{ .ty_pl = .{
|
|
.ty = Air.internedToRef(return_ty.toIntern()),
|
|
.payload = try sema.addExtra(Air.Bin{
|
|
.lhs = new_ptr,
|
|
.rhs = new_len,
|
|
}),
|
|
} },
|
|
});
|
|
if (block.wantSafety()) {
|
|
// requirement: result[new_len] == slice_sentinel
|
|
try sema.panicSentinelMismatch(block, src, slice_sentinel, elem_ty, result, new_len);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
/// Asserts that lhs and rhs types are both numeric.
|
|
fn cmpNumeric(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
uncasted_lhs: Air.Inst.Ref,
|
|
uncasted_rhs: Air.Inst.Ref,
|
|
op: std.math.CompareOperator,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const lhs_ty = sema.typeOf(uncasted_lhs);
|
|
const rhs_ty = sema.typeOf(uncasted_rhs);
|
|
|
|
assert(lhs_ty.isNumeric(mod));
|
|
assert(rhs_ty.isNumeric(mod));
|
|
|
|
const lhs_ty_tag = lhs_ty.zigTypeTag(mod);
|
|
const rhs_ty_tag = rhs_ty.zigTypeTag(mod);
|
|
const target = mod.getTarget();
|
|
|
|
// One exception to heterogeneous comparison: comptime_float needs to
|
|
// coerce to fixed-width float.
|
|
|
|
const lhs = if (lhs_ty_tag == .ComptimeFloat and rhs_ty_tag == .Float)
|
|
try sema.coerce(block, rhs_ty, uncasted_lhs, lhs_src)
|
|
else
|
|
uncasted_lhs;
|
|
|
|
const rhs = if (lhs_ty_tag == .Float and rhs_ty_tag == .ComptimeFloat)
|
|
try sema.coerce(block, lhs_ty, uncasted_rhs, rhs_src)
|
|
else
|
|
uncasted_rhs;
|
|
|
|
const runtime_src: LazySrcLoc = src: {
|
|
if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| {
|
|
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
|
|
// Compare ints: const vs. undefined (or vice versa)
|
|
if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef(mod)) {
|
|
if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| {
|
|
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
|
|
}
|
|
} else if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef(mod)) {
|
|
if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| {
|
|
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
|
|
}
|
|
}
|
|
|
|
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(Type.bool);
|
|
}
|
|
if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) {
|
|
if (op == std.math.CompareOperator.neq) {
|
|
return Air.Inst.Ref.bool_true;
|
|
} else {
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
}
|
|
if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema)) {
|
|
return Air.Inst.Ref.bool_true;
|
|
} else {
|
|
return Air.Inst.Ref.bool_false;
|
|
}
|
|
} else {
|
|
if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) {
|
|
// Compare ints: const vs. var
|
|
if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| {
|
|
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
|
|
}
|
|
}
|
|
break :src rhs_src;
|
|
}
|
|
} else {
|
|
if (try sema.resolveMaybeUndefLazyVal(rhs)) |rhs_val| {
|
|
if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) {
|
|
// Compare ints: var vs. const
|
|
if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| {
|
|
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
|
|
}
|
|
}
|
|
}
|
|
break :src lhs_src;
|
|
}
|
|
};
|
|
|
|
// TODO handle comparisons against lazy zero values
|
|
// Some values can be compared against zero without being runtime-known or without forcing
|
|
// a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to
|
|
// always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout
|
|
// of this function if we don't need to.
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
|
|
// For floats, emit a float comparison instruction.
|
|
const lhs_is_float = switch (lhs_ty_tag) {
|
|
.Float, .ComptimeFloat => true,
|
|
else => false,
|
|
};
|
|
const rhs_is_float = switch (rhs_ty_tag) {
|
|
.Float, .ComptimeFloat => true,
|
|
else => false,
|
|
};
|
|
|
|
if (lhs_is_float and rhs_is_float) {
|
|
// Smaller fixed-width floats coerce to larger fixed-width floats.
|
|
// comptime_float coerces to fixed-width float.
|
|
const dest_ty = x: {
|
|
if (lhs_ty_tag == .ComptimeFloat) {
|
|
break :x rhs_ty;
|
|
} else if (rhs_ty_tag == .ComptimeFloat) {
|
|
break :x lhs_ty;
|
|
}
|
|
if (lhs_ty.floatBits(target) >= rhs_ty.floatBits(target)) {
|
|
break :x lhs_ty;
|
|
} else {
|
|
break :x rhs_ty;
|
|
}
|
|
};
|
|
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
|
|
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs);
|
|
}
|
|
// For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
|
|
// For mixed signed and unsigned integers, implicit cast both operands to a signed
|
|
// integer with + 1 bit.
|
|
// For mixed floats and integers, extract the integer part from the float, cast that to
|
|
// a signed integer with mantissa bits + 1, and if there was any non-integral part of the float,
|
|
// add/subtract 1.
|
|
const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val|
|
|
!(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))
|
|
else
|
|
(lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod));
|
|
const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val|
|
|
!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))
|
|
else
|
|
(rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod));
|
|
const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
|
|
|
|
var dest_float_type: ?Type = null;
|
|
|
|
var lhs_bits: usize = undefined;
|
|
if (try sema.resolveMaybeUndefLazyVal(lhs)) |lhs_val| {
|
|
if (lhs_val.isUndef(mod))
|
|
return mod.undefRef(Type.bool);
|
|
if (lhs_val.isNan(mod)) switch (op) {
|
|
.neq => return Air.Inst.Ref.bool_true,
|
|
else => return Air.Inst.Ref.bool_false,
|
|
};
|
|
if (lhs_val.isInf(mod)) switch (op) {
|
|
.neq => return Air.Inst.Ref.bool_true,
|
|
.eq => return Air.Inst.Ref.bool_false,
|
|
.gt, .gte => return if (lhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true,
|
|
.lt, .lte => return if (lhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false,
|
|
};
|
|
if (!rhs_is_signed) {
|
|
switch (lhs_val.orderAgainstZero(mod)) {
|
|
.gt => {},
|
|
.eq => switch (op) { // LHS = 0, RHS is unsigned
|
|
.lte => return Air.Inst.Ref.bool_true,
|
|
.gt => return Air.Inst.Ref.bool_false,
|
|
else => {},
|
|
},
|
|
.lt => switch (op) { // LHS < 0, RHS is unsigned
|
|
.neq, .lt, .lte => return Air.Inst.Ref.bool_true,
|
|
.eq, .gt, .gte => return Air.Inst.Ref.bool_false,
|
|
},
|
|
}
|
|
}
|
|
if (lhs_is_float) {
|
|
if (lhs_val.floatHasFraction(mod)) {
|
|
switch (op) {
|
|
.eq => return Air.Inst.Ref.bool_false,
|
|
.neq => return Air.Inst.Ref.bool_true,
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, mod));
|
|
defer bigint.deinit();
|
|
if (lhs_val.floatHasFraction(mod)) {
|
|
if (lhs_is_signed) {
|
|
try bigint.addScalar(&bigint, -1);
|
|
} else {
|
|
try bigint.addScalar(&bigint, 1);
|
|
}
|
|
}
|
|
lhs_bits = bigint.toConst().bitCountTwosComp();
|
|
} else {
|
|
lhs_bits = lhs_val.intBitCountTwosComp(mod);
|
|
}
|
|
lhs_bits += @intFromBool(!lhs_is_signed and dest_int_is_signed);
|
|
} else if (lhs_is_float) {
|
|
dest_float_type = lhs_ty;
|
|
} else {
|
|
const int_info = lhs_ty.intInfo(mod);
|
|
lhs_bits = int_info.bits + @intFromBool(int_info.signedness == .unsigned and dest_int_is_signed);
|
|
}
|
|
|
|
var rhs_bits: usize = undefined;
|
|
if (try sema.resolveMaybeUndefLazyVal(rhs)) |rhs_val| {
|
|
if (rhs_val.isUndef(mod))
|
|
return mod.undefRef(Type.bool);
|
|
if (rhs_val.isNan(mod)) switch (op) {
|
|
.neq => return Air.Inst.Ref.bool_true,
|
|
else => return Air.Inst.Ref.bool_false,
|
|
};
|
|
if (rhs_val.isInf(mod)) switch (op) {
|
|
.neq => return Air.Inst.Ref.bool_true,
|
|
.eq => return Air.Inst.Ref.bool_false,
|
|
.gt, .gte => return if (rhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false,
|
|
.lt, .lte => return if (rhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true,
|
|
};
|
|
if (!lhs_is_signed) {
|
|
switch (rhs_val.orderAgainstZero(mod)) {
|
|
.gt => {},
|
|
.eq => switch (op) { // RHS = 0, LHS is unsigned
|
|
.gte => return Air.Inst.Ref.bool_true,
|
|
.lt => return Air.Inst.Ref.bool_false,
|
|
else => {},
|
|
},
|
|
.lt => switch (op) { // RHS < 0, LHS is unsigned
|
|
.neq, .gt, .gte => return Air.Inst.Ref.bool_true,
|
|
.eq, .lt, .lte => return Air.Inst.Ref.bool_false,
|
|
},
|
|
}
|
|
}
|
|
if (rhs_is_float) {
|
|
if (rhs_val.floatHasFraction(mod)) {
|
|
switch (op) {
|
|
.eq => return Air.Inst.Ref.bool_false,
|
|
.neq => return Air.Inst.Ref.bool_true,
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, mod));
|
|
defer bigint.deinit();
|
|
if (rhs_val.floatHasFraction(mod)) {
|
|
if (rhs_is_signed) {
|
|
try bigint.addScalar(&bigint, -1);
|
|
} else {
|
|
try bigint.addScalar(&bigint, 1);
|
|
}
|
|
}
|
|
rhs_bits = bigint.toConst().bitCountTwosComp();
|
|
} else {
|
|
rhs_bits = rhs_val.intBitCountTwosComp(mod);
|
|
}
|
|
rhs_bits += @intFromBool(!rhs_is_signed and dest_int_is_signed);
|
|
} else if (rhs_is_float) {
|
|
dest_float_type = rhs_ty;
|
|
} else {
|
|
const int_info = rhs_ty.intInfo(mod);
|
|
rhs_bits = int_info.bits + @intFromBool(int_info.signedness == .unsigned and dest_int_is_signed);
|
|
}
|
|
|
|
const dest_ty = if (dest_float_type) |ft| ft else blk: {
|
|
const max_bits = @max(lhs_bits, rhs_bits);
|
|
const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits});
|
|
const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned;
|
|
break :blk try mod.intType(signedness, casted_bits);
|
|
};
|
|
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
|
|
|
|
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs);
|
|
}
|
|
|
|
/// Asserts that LHS value is an int or comptime int and not undefined, and
|
|
/// that RHS type is an int. Given a const LHS and an unknown RHS, attempt to
|
|
/// determine whether `op` has a guaranteed result.
|
|
/// If it cannot be determined, returns null.
|
|
/// Otherwise returns a bool for the guaranteed comparison operation.
|
|
fn compareIntsOnlyPossibleResult(
|
|
sema: *Sema,
|
|
lhs_val: Value,
|
|
op: std.math.CompareOperator,
|
|
rhs_ty: Type,
|
|
) Allocator.Error!?bool {
|
|
const mod = sema.mod;
|
|
const rhs_info = rhs_ty.intInfo(mod);
|
|
const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable;
|
|
const is_zero = vs_zero == .eq;
|
|
const is_negative = vs_zero == .lt;
|
|
const is_positive = vs_zero == .gt;
|
|
|
|
// Anything vs. zero-sized type has guaranteed outcome.
|
|
if (rhs_info.bits == 0) return switch (op) {
|
|
.eq, .lte, .gte => is_zero,
|
|
.neq, .lt, .gt => !is_zero,
|
|
};
|
|
|
|
// Special case for i1, which can only be 0 or -1.
|
|
// Zero and positive ints have guaranteed outcome.
|
|
if (rhs_info.bits == 1 and rhs_info.signedness == .signed) {
|
|
if (is_positive) return switch (op) {
|
|
.gt, .gte, .neq => true,
|
|
.lt, .lte, .eq => false,
|
|
};
|
|
if (is_zero) return switch (op) {
|
|
.gte => true,
|
|
.lt => false,
|
|
.gt, .lte, .eq, .neq => null,
|
|
};
|
|
}
|
|
|
|
// Negative vs. unsigned has guaranteed outcome.
|
|
if (rhs_info.signedness == .unsigned and is_negative) return switch (op) {
|
|
.eq, .gt, .gte => false,
|
|
.neq, .lt, .lte => true,
|
|
};
|
|
|
|
const sign_adj = @intFromBool(!is_negative and rhs_info.signedness == .signed);
|
|
const req_bits = lhs_val.intBitCountTwosComp(mod) + sign_adj;
|
|
|
|
// No sized type can have more than 65535 bits.
|
|
// The RHS type operand is either a runtime value or sized (but undefined) constant.
|
|
if (req_bits > 65535) return switch (op) {
|
|
.lt, .lte => is_negative,
|
|
.gt, .gte => is_positive,
|
|
.eq => false,
|
|
.neq => true,
|
|
};
|
|
const fits = req_bits <= rhs_info.bits;
|
|
|
|
// Oversized int has guaranteed outcome.
|
|
switch (op) {
|
|
.eq => return if (!fits) false else null,
|
|
.neq => return if (!fits) true else null,
|
|
.lt, .lte => if (!fits) return is_negative,
|
|
.gt, .gte => if (!fits) return !is_negative,
|
|
}
|
|
|
|
// For any other comparison, we need to know if the LHS value is
|
|
// equal to the maximum or minimum possible value of the RHS type.
|
|
const edge: struct { min: bool, max: bool } = edge: {
|
|
if (is_zero and rhs_info.signedness == .unsigned) break :edge .{
|
|
.min = true,
|
|
.max = false,
|
|
};
|
|
|
|
if (req_bits != rhs_info.bits) break :edge .{
|
|
.min = false,
|
|
.max = false,
|
|
};
|
|
|
|
const ty = try mod.intType(
|
|
if (is_negative) .signed else .unsigned,
|
|
@as(u16, @intCast(req_bits)),
|
|
);
|
|
const pop_count = lhs_val.popCount(ty, mod);
|
|
|
|
if (is_negative) {
|
|
break :edge .{
|
|
.min = pop_count == 1,
|
|
.max = false,
|
|
};
|
|
} else {
|
|
break :edge .{
|
|
.min = false,
|
|
.max = pop_count == req_bits - sign_adj,
|
|
};
|
|
}
|
|
};
|
|
|
|
assert(fits);
|
|
return switch (op) {
|
|
.lt => if (edge.max) false else null,
|
|
.lte => if (edge.min) true else null,
|
|
.gt => if (edge.min) false else null,
|
|
.gte => if (edge.max) true else null,
|
|
.eq, .neq => unreachable,
|
|
};
|
|
}
|
|
|
|
/// Asserts that lhs and rhs types are both vectors.
|
|
fn cmpVector(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
lhs: Air.Inst.Ref,
|
|
rhs: Air.Inst.Ref,
|
|
op: std.math.CompareOperator,
|
|
lhs_src: LazySrcLoc,
|
|
rhs_src: LazySrcLoc,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const lhs_ty = sema.typeOf(lhs);
|
|
const rhs_ty = sema.typeOf(rhs);
|
|
assert(lhs_ty.zigTypeTag(mod) == .Vector);
|
|
assert(rhs_ty.zigTypeTag(mod) == .Vector);
|
|
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
|
|
|
const resolved_ty = try sema.resolvePeerTypes(block, src, &.{ lhs, rhs }, .{ .override = &.{ lhs_src, rhs_src } });
|
|
const casted_lhs = try sema.coerce(block, resolved_ty, lhs, lhs_src);
|
|
const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src);
|
|
|
|
const result_ty = try mod.vectorType(.{
|
|
.len = lhs_ty.vectorLen(mod),
|
|
.child = .bool_type,
|
|
});
|
|
|
|
const runtime_src: LazySrcLoc = src: {
|
|
if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| {
|
|
if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| {
|
|
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
|
|
return mod.undefRef(result_ty);
|
|
}
|
|
const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty);
|
|
return Air.internedToRef(cmp_val.toIntern());
|
|
} else {
|
|
break :src rhs_src;
|
|
}
|
|
} else {
|
|
break :src lhs_src;
|
|
}
|
|
};
|
|
|
|
try sema.requireRuntimeBlock(block, src, runtime_src);
|
|
return block.addCmpVector(casted_lhs, casted_rhs, op);
|
|
}
|
|
|
|
fn wrapOptional(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
if (try sema.resolveMaybeUndefVal(inst)) |val| {
|
|
return Air.internedToRef((try sema.mod.intern(.{ .opt = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.val = val.toIntern(),
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
return block.addTyOp(.wrap_optional, dest_ty, inst);
|
|
}
|
|
|
|
fn wrapErrorUnionPayload(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const dest_payload_ty = dest_ty.errorUnionPayload(mod);
|
|
const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false });
|
|
if (try sema.resolveMaybeUndefVal(coerced)) |val| {
|
|
return Air.internedToRef((try mod.intern(.{ .error_union = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.val = .{ .payload = try val.intern(dest_payload_ty, mod) },
|
|
} })));
|
|
}
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
try sema.queueFullTypeResolution(dest_payload_ty);
|
|
return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced);
|
|
}
|
|
|
|
fn wrapErrorUnionSet(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
dest_ty: Type,
|
|
inst: Air.Inst.Ref,
|
|
inst_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const inst_ty = sema.typeOf(inst);
|
|
const dest_err_set_ty = dest_ty.errorUnionSet(mod);
|
|
if (try sema.resolveMaybeUndefVal(inst)) |val| {
|
|
const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name;
|
|
switch (dest_err_set_ty.toIntern()) {
|
|
.anyerror_type => {},
|
|
.adhoc_inferred_error_set_type => ok: {
|
|
const ies = sema.fn_ret_ty_ies.?;
|
|
switch (ies.resolved) {
|
|
.anyerror_type => break :ok,
|
|
.none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) {
|
|
break :ok;
|
|
},
|
|
else => |i| if (ip.indexToKey(i).error_set_type.nameIndex(ip, expected_name) != null) {
|
|
break :ok;
|
|
},
|
|
}
|
|
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
|
|
},
|
|
else => switch (ip.indexToKey(dest_err_set_ty.toIntern())) {
|
|
.error_set_type => |error_set_type| ok: {
|
|
if (error_set_type.nameIndex(ip, expected_name) != null) break :ok;
|
|
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
|
|
},
|
|
.inferred_error_set_type => |func_index| ok: {
|
|
// We carefully do this in an order that avoids unnecessarily
|
|
// resolving the destination error set type.
|
|
switch (ip.funcIesResolved(func_index).*) {
|
|
.anyerror_type => break :ok,
|
|
.none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) {
|
|
break :ok;
|
|
},
|
|
else => |i| if (ip.indexToKey(i).error_set_type.nameIndex(ip, expected_name) != null) {
|
|
break :ok;
|
|
},
|
|
}
|
|
|
|
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
return Air.internedToRef((try mod.intern(.{ .error_union = .{
|
|
.ty = dest_ty.toIntern(),
|
|
.val = .{ .err_name = expected_name },
|
|
} })));
|
|
}
|
|
|
|
try sema.requireRuntimeBlock(block, inst_src, null);
|
|
const coerced = try sema.coerce(block, dest_err_set_ty, inst, inst_src);
|
|
return block.addTyOp(.wrap_errunion_err, dest_ty, coerced);
|
|
}
|
|
|
|
fn unionToTag(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
enum_ty: Type,
|
|
un: Air.Inst.Ref,
|
|
un_src: LazySrcLoc,
|
|
) !Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
if ((try sema.typeHasOnePossibleValue(enum_ty))) |opv| {
|
|
return Air.internedToRef(opv.toIntern());
|
|
}
|
|
if (try sema.resolveMaybeUndefVal(un)) |un_val| {
|
|
return Air.internedToRef(un_val.unionTag(mod).toIntern());
|
|
}
|
|
try sema.requireRuntimeBlock(block, un_src, null);
|
|
return block.addTyOp(.get_union_tag, enum_ty, un);
|
|
}
|
|
|
|
const PeerResolveStrategy = enum {
|
|
/// The type is not known.
|
|
/// If refined no further, this is equivalent to `exact`.
|
|
unknown,
|
|
/// The type may be an error set or error union.
|
|
/// If refined no further, it is an error set.
|
|
error_set,
|
|
/// The type must be some error union.
|
|
error_union,
|
|
/// The type may be @TypeOf(null), an optional or a C pointer.
|
|
/// If refined no further, it is @TypeOf(null).
|
|
nullable,
|
|
/// The type must be some optional or a C pointer.
|
|
/// If refined no further, it is an optional.
|
|
optional,
|
|
/// The type must be either an array or a vector.
|
|
/// If refined no further, it is an array.
|
|
array,
|
|
/// The type must be a vector.
|
|
vector,
|
|
/// The type must be a C pointer.
|
|
c_ptr,
|
|
/// The type must be a pointer (C or not).
|
|
/// If refined no further, it is a non-C pointer.
|
|
ptr,
|
|
/// The type must be a function or a pointer to a function.
|
|
/// If refined no further, it is a function.
|
|
func,
|
|
/// The type must be an enum literal, or some specific enum or union. Which one is decided
|
|
/// afterwards based on the types in question.
|
|
enum_or_union,
|
|
/// The type must be some integer or float type.
|
|
/// If refined no further, it is `comptime_int`.
|
|
comptime_int,
|
|
/// The type must be some float type.
|
|
/// If refined no further, it is `comptime_float`.
|
|
comptime_float,
|
|
/// The type must be some float or fixed-width integer type.
|
|
/// If refined no further, it is some fixed-width integer type.
|
|
fixed_int,
|
|
/// The type must be some fixed-width float type.
|
|
fixed_float,
|
|
/// The type must be a struct literal or tuple type.
|
|
coercible_struct,
|
|
/// The peers must all be of the same type.
|
|
exact,
|
|
|
|
/// Given two strategies, find a strategy that satisfies both, if one exists. If no such
|
|
/// strategy exists, any strategy may be returned; an error will be emitted when the caller
|
|
/// attempts to use the strategy to resolve the type.
|
|
/// Strategy `a` comes from the peer in `reason_peer`, while strategy `b` comes from the peer at
|
|
/// index `b_peer_idx`. `reason_peer` is updated to reflect the reason for the new strategy.
|
|
fn merge(a: PeerResolveStrategy, b: PeerResolveStrategy, reason_peer: *usize, b_peer_idx: usize) PeerResolveStrategy {
|
|
// Our merging should be order-independent. Thus, even though the union order is arbitrary,
|
|
// by sorting the tags and switching first on the smaller, we have half as many cases to
|
|
// worry about (since we avoid the duplicates).
|
|
const s0_is_a = @intFromEnum(a) <= @intFromEnum(b);
|
|
const s0 = if (s0_is_a) a else b;
|
|
const s1 = if (s0_is_a) b else a;
|
|
|
|
const ReasonMethod = enum {
|
|
all_s0,
|
|
all_s1,
|
|
either,
|
|
};
|
|
|
|
const res: struct { ReasonMethod, PeerResolveStrategy } = switch (s0) {
|
|
.unknown => .{ .all_s1, s1 },
|
|
.error_set => switch (s1) {
|
|
.error_set => .{ .either, .error_set },
|
|
else => .{ .all_s0, .error_union },
|
|
},
|
|
.error_union => switch (s1) {
|
|
.error_union => .{ .either, .error_union },
|
|
else => .{ .all_s0, .error_union },
|
|
},
|
|
.nullable => switch (s1) {
|
|
.nullable => .{ .either, .nullable },
|
|
.c_ptr => .{ .all_s1, .c_ptr },
|
|
else => .{ .all_s0, .optional },
|
|
},
|
|
.optional => switch (s1) {
|
|
.optional => .{ .either, .optional },
|
|
.c_ptr => .{ .all_s1, .c_ptr },
|
|
else => .{ .all_s0, .optional },
|
|
},
|
|
.array => switch (s1) {
|
|
.array => .{ .either, .array },
|
|
.vector => .{ .all_s1, .vector },
|
|
else => .{ .all_s0, .array },
|
|
},
|
|
.vector => switch (s1) {
|
|
.vector => .{ .either, .vector },
|
|
else => .{ .all_s0, .vector },
|
|
},
|
|
.c_ptr => switch (s1) {
|
|
.c_ptr => .{ .either, .c_ptr },
|
|
else => .{ .all_s0, .c_ptr },
|
|
},
|
|
.ptr => switch (s1) {
|
|
.ptr => .{ .either, .ptr },
|
|
else => .{ .all_s0, .ptr },
|
|
},
|
|
.func => switch (s1) {
|
|
.func => .{ .either, .func },
|
|
else => .{ .all_s1, s1 }, // doesn't override anything later
|
|
},
|
|
.enum_or_union => switch (s1) {
|
|
.enum_or_union => .{ .either, .enum_or_union },
|
|
else => .{ .all_s0, .enum_or_union },
|
|
},
|
|
.comptime_int => switch (s1) {
|
|
.comptime_int => .{ .either, .comptime_int },
|
|
else => .{ .all_s1, s1 }, // doesn't override anything later
|
|
},
|
|
.comptime_float => switch (s1) {
|
|
.comptime_float => .{ .either, .comptime_float },
|
|
else => .{ .all_s1, s1 }, // doesn't override anything later
|
|
},
|
|
.fixed_int => switch (s1) {
|
|
.fixed_int => .{ .either, .fixed_int },
|
|
else => .{ .all_s1, s1 }, // doesn't override anything later
|
|
},
|
|
.fixed_float => switch (s1) {
|
|
.fixed_float => .{ .either, .fixed_float },
|
|
else => .{ .all_s1, s1 }, // doesn't override anything later
|
|
},
|
|
.coercible_struct => switch (s1) {
|
|
.exact => .{ .all_s1, .exact },
|
|
else => .{ .all_s0, .coercible_struct },
|
|
},
|
|
.exact => .{ .all_s0, .exact },
|
|
};
|
|
|
|
switch (res[0]) {
|
|
.all_s0 => {
|
|
if (!s0_is_a) {
|
|
reason_peer.* = b_peer_idx;
|
|
}
|
|
},
|
|
.all_s1 => {
|
|
if (s0_is_a) {
|
|
reason_peer.* = b_peer_idx;
|
|
}
|
|
},
|
|
.either => {
|
|
// Prefer the earliest peer
|
|
reason_peer.* = @min(reason_peer.*, b_peer_idx);
|
|
},
|
|
}
|
|
|
|
return res[1];
|
|
}
|
|
|
|
fn select(ty: Type, mod: *Module) PeerResolveStrategy {
|
|
return switch (ty.zigTypeTag(mod)) {
|
|
.Type, .Void, .Bool, .Opaque, .Frame, .AnyFrame => .exact,
|
|
.NoReturn, .Undefined => .unknown,
|
|
.Null => .nullable,
|
|
.ComptimeInt => .comptime_int,
|
|
.Int => .fixed_int,
|
|
.ComptimeFloat => .comptime_float,
|
|
.Float => .fixed_float,
|
|
.Pointer => if (ty.ptrInfo(mod).flags.size == .C) .c_ptr else .ptr,
|
|
.Array => .array,
|
|
.Vector => .vector,
|
|
.Optional => .optional,
|
|
.ErrorSet => .error_set,
|
|
.ErrorUnion => .error_union,
|
|
.EnumLiteral, .Enum, .Union => .enum_or_union,
|
|
.Struct => if (ty.isTupleOrAnonStruct(mod)) .coercible_struct else .exact,
|
|
.Fn => .func,
|
|
};
|
|
}
|
|
};
|
|
|
|
const PeerResolveResult = union(enum) {
|
|
/// The peer type resolution was successful, and resulted in the given type.
|
|
success: Type,
|
|
/// There was some generic conflict between two peers.
|
|
conflict: struct {
|
|
peer_idx_a: usize,
|
|
peer_idx_b: usize,
|
|
},
|
|
/// There was an error when resolving the type of a struct or tuple field.
|
|
field_error: struct {
|
|
/// The name of the field which caused the failure.
|
|
field_name: []const u8,
|
|
/// The type of this field in each peer.
|
|
field_types: []Type,
|
|
/// The error from resolving the field type. Guaranteed not to be `success`.
|
|
sub_result: *PeerResolveResult,
|
|
},
|
|
|
|
fn report(
|
|
result: PeerResolveResult,
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
instructions: []const Air.Inst.Ref,
|
|
candidate_srcs: Module.PeerTypeCandidateSrc,
|
|
) !*Module.ErrorMsg {
|
|
const mod = sema.mod;
|
|
const decl_ptr = mod.declPtr(block.src_decl);
|
|
|
|
var opt_msg: ?*Module.ErrorMsg = null;
|
|
errdefer if (opt_msg) |msg| msg.destroy(sema.gpa);
|
|
|
|
// If we mention fields we'll want to include field types, so put peer types in a buffer
|
|
var peer_tys = try sema.arena.alloc(Type, instructions.len);
|
|
for (peer_tys, instructions) |*ty, inst| {
|
|
ty.* = sema.typeOf(inst);
|
|
}
|
|
|
|
var cur = result;
|
|
while (true) {
|
|
var conflict_idx: [2]usize = undefined;
|
|
|
|
switch (cur) {
|
|
.success => unreachable,
|
|
.conflict => |conflict| {
|
|
// Fall through to two-peer conflict handling below
|
|
conflict_idx = .{
|
|
conflict.peer_idx_a,
|
|
conflict.peer_idx_b,
|
|
};
|
|
},
|
|
.field_error => |field_error| {
|
|
const fmt = "struct field '{s}' has conflicting types";
|
|
const args = .{field_error.field_name};
|
|
if (opt_msg) |msg| {
|
|
try sema.errNote(block, src, msg, fmt, args);
|
|
} else {
|
|
opt_msg = try sema.errMsg(block, src, fmt, args);
|
|
}
|
|
|
|
// Continue on to child error
|
|
cur = field_error.sub_result.*;
|
|
peer_tys = field_error.field_types;
|
|
continue;
|
|
},
|
|
}
|
|
|
|
// This is the path for reporting a generic conflict between two peers.
|
|
|
|
if (conflict_idx[1] < conflict_idx[0]) {
|
|
// b comes first in source, so it's better if it comes first in the error
|
|
std.mem.swap(usize, &conflict_idx[0], &conflict_idx[1]);
|
|
}
|
|
|
|
const conflict_tys: [2]Type = .{
|
|
peer_tys[conflict_idx[0]],
|
|
peer_tys[conflict_idx[1]],
|
|
};
|
|
const conflict_srcs: [2]?LazySrcLoc = .{
|
|
candidate_srcs.resolve(mod, decl_ptr, conflict_idx[0]),
|
|
candidate_srcs.resolve(mod, decl_ptr, conflict_idx[1]),
|
|
};
|
|
|
|
const fmt = "incompatible types: '{}' and '{}'";
|
|
const args = .{
|
|
conflict_tys[0].fmt(mod),
|
|
conflict_tys[1].fmt(mod),
|
|
};
|
|
const msg = if (opt_msg) |msg| msg: {
|
|
try sema.errNote(block, src, msg, fmt, args);
|
|
break :msg msg;
|
|
} else msg: {
|
|
const msg = try sema.errMsg(block, src, fmt, args);
|
|
opt_msg = msg;
|
|
break :msg msg;
|
|
};
|
|
|
|
if (conflict_srcs[0]) |src_loc| try sema.errNote(block, src_loc, msg, "type '{}' here", .{conflict_tys[0].fmt(mod)});
|
|
if (conflict_srcs[1]) |src_loc| try sema.errNote(block, src_loc, msg, "type '{}' here", .{conflict_tys[1].fmt(mod)});
|
|
|
|
// No child error
|
|
break;
|
|
}
|
|
|
|
return opt_msg.?;
|
|
}
|
|
};
|
|
|
|
fn resolvePeerTypes(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
instructions: []const Air.Inst.Ref,
|
|
candidate_srcs: Module.PeerTypeCandidateSrc,
|
|
) !Type {
|
|
switch (instructions.len) {
|
|
0 => return Type.noreturn,
|
|
1 => return sema.typeOf(instructions[0]),
|
|
else => {},
|
|
}
|
|
|
|
var peer_tys = try sema.arena.alloc(?Type, instructions.len);
|
|
var peer_vals = try sema.arena.alloc(?Value, instructions.len);
|
|
|
|
for (instructions, peer_tys, peer_vals) |inst, *ty, *val| {
|
|
ty.* = sema.typeOf(inst);
|
|
val.* = try sema.resolveMaybeUndefVal(inst);
|
|
}
|
|
|
|
switch (try sema.resolvePeerTypesInner(block, src, peer_tys, peer_vals)) {
|
|
.success => |ty| return ty,
|
|
else => |result| {
|
|
const msg = try result.report(sema, block, src, instructions, candidate_srcs);
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn resolvePeerTypesInner(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
peer_tys: []?Type,
|
|
peer_vals: []?Value,
|
|
) !PeerResolveResult {
|
|
const mod = sema.mod;
|
|
|
|
var strat_reason: usize = 0;
|
|
var s: PeerResolveStrategy = .unknown;
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
s = s.merge(PeerResolveStrategy.select(ty, mod), &strat_reason, i);
|
|
}
|
|
|
|
if (s == .unknown) {
|
|
// The whole thing was noreturn or undefined - try to do an exact match
|
|
s = .exact;
|
|
} else {
|
|
// There was something other than noreturn and undefined, so we can ignore those peers
|
|
for (peer_tys) |*ty_ptr| {
|
|
const ty = ty_ptr.* orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.NoReturn, .Undefined => ty_ptr.* = null,
|
|
else => {},
|
|
}
|
|
}
|
|
}
|
|
|
|
const target = mod.getTarget();
|
|
|
|
switch (s) {
|
|
.unknown => unreachable,
|
|
|
|
.error_set => {
|
|
var final_set: ?Type = null;
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
if (ty.zigTypeTag(mod) != .ErrorSet) return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
if (final_set) |cur_set| {
|
|
final_set = try sema.maybeMergeErrorSets(block, src, cur_set, ty);
|
|
} else {
|
|
final_set = ty;
|
|
}
|
|
}
|
|
return .{ .success = final_set.? };
|
|
},
|
|
|
|
.error_union => {
|
|
var final_set: ?Type = null;
|
|
for (peer_tys, peer_vals) |*ty_ptr, *val_ptr| {
|
|
const ty = ty_ptr.* orelse continue;
|
|
const set_ty = switch (ty.zigTypeTag(mod)) {
|
|
.ErrorSet => blk: {
|
|
ty_ptr.* = null; // no payload to decide on
|
|
val_ptr.* = null;
|
|
break :blk ty;
|
|
},
|
|
.ErrorUnion => blk: {
|
|
const set_ty = ty.errorUnionSet(mod);
|
|
ty_ptr.* = ty.errorUnionPayload(mod);
|
|
if (val_ptr.*) |eu_val| switch (mod.intern_pool.indexToKey(eu_val.toIntern())) {
|
|
.error_union => |eu| switch (eu.val) {
|
|
.payload => |payload_ip| val_ptr.* = payload_ip.toValue(),
|
|
.err_name => val_ptr.* = null,
|
|
},
|
|
.undef => val_ptr.* = (try sema.mod.intern(.{ .undef = ty_ptr.*.?.toIntern() })).toValue(),
|
|
else => unreachable,
|
|
};
|
|
break :blk set_ty;
|
|
},
|
|
else => continue, // whole type is the payload
|
|
};
|
|
if (final_set) |cur_set| {
|
|
final_set = try sema.maybeMergeErrorSets(block, src, cur_set, set_ty);
|
|
} else {
|
|
final_set = set_ty;
|
|
}
|
|
}
|
|
assert(final_set != null);
|
|
const final_payload = switch (try sema.resolvePeerTypesInner(
|
|
block,
|
|
src,
|
|
peer_tys,
|
|
peer_vals,
|
|
)) {
|
|
.success => |ty| ty,
|
|
else => |result| return result,
|
|
};
|
|
return .{ .success = try mod.errorUnionType(final_set.?, final_payload) };
|
|
},
|
|
|
|
.nullable => {
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
if (!ty.eql(Type.null, mod)) return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
return .{ .success = Type.null };
|
|
},
|
|
|
|
.optional => {
|
|
for (peer_tys, peer_vals) |*ty_ptr, *val_ptr| {
|
|
const ty = ty_ptr.* orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Null => {
|
|
ty_ptr.* = null;
|
|
val_ptr.* = null;
|
|
},
|
|
.Optional => {
|
|
ty_ptr.* = ty.optionalChild(mod);
|
|
if (val_ptr.*) |opt_val| val_ptr.* = if (!opt_val.isUndef(mod)) opt_val.optionalValue(mod) else null;
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
const child_ty = switch (try sema.resolvePeerTypesInner(
|
|
block,
|
|
src,
|
|
peer_tys,
|
|
peer_vals,
|
|
)) {
|
|
.success => |ty| ty,
|
|
else => |result| return result,
|
|
};
|
|
return .{ .success = try mod.optionalType(child_ty.toIntern()) };
|
|
},
|
|
|
|
.array => {
|
|
// Index of the first non-null peer
|
|
var opt_first_idx: ?usize = null;
|
|
// Index of the first array or vector peer (i.e. not a tuple)
|
|
var opt_first_arr_idx: ?usize = null;
|
|
// Set to non-null once we see any peer, even a tuple
|
|
var len: u64 = undefined;
|
|
var sentinel: ?Value = undefined;
|
|
// Only set once we see a non-tuple peer
|
|
var elem_ty: Type = undefined;
|
|
|
|
for (peer_tys, 0..) |*ty_ptr, i| {
|
|
const ty = ty_ptr.* orelse continue;
|
|
|
|
if (!ty.isArrayOrVector(mod)) {
|
|
// We allow tuples of the correct length. We won't validate their elem type, since the elements can be coerced.
|
|
const arr_like = sema.typeIsArrayLike(ty) orelse return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
|
|
if (opt_first_idx) |first_idx| {
|
|
if (arr_like.len != len) return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
} else {
|
|
opt_first_idx = i;
|
|
len = arr_like.len;
|
|
}
|
|
|
|
sentinel = null;
|
|
|
|
continue;
|
|
}
|
|
|
|
const first_arr_idx = opt_first_arr_idx orelse {
|
|
if (opt_first_idx == null) {
|
|
opt_first_idx = i;
|
|
len = ty.arrayLen(mod);
|
|
sentinel = ty.sentinel(mod);
|
|
}
|
|
opt_first_arr_idx = i;
|
|
elem_ty = ty.childType(mod);
|
|
continue;
|
|
};
|
|
|
|
if (ty.arrayLen(mod) != len) return .{ .conflict = .{
|
|
.peer_idx_a = first_arr_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
|
|
if (!ty.childType(mod).eql(elem_ty, mod)) {
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_arr_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
|
|
if (sentinel) |cur_sent| {
|
|
if (ty.sentinel(mod)) |peer_sent| {
|
|
if (!peer_sent.eql(cur_sent, elem_ty, mod)) sentinel = null;
|
|
} else {
|
|
sentinel = null;
|
|
}
|
|
}
|
|
}
|
|
|
|
// There should always be at least one array or vector peer
|
|
assert(opt_first_arr_idx != null);
|
|
|
|
return .{ .success = try mod.arrayType(.{
|
|
.len = len,
|
|
.child = elem_ty.toIntern(),
|
|
.sentinel = if (sentinel) |sent_val| sent_val.toIntern() else .none,
|
|
}) };
|
|
},
|
|
|
|
.vector => {
|
|
var len: ?u64 = null;
|
|
var first_idx: usize = undefined;
|
|
for (peer_tys, peer_vals, 0..) |*ty_ptr, *val_ptr, i| {
|
|
const ty = ty_ptr.* orelse continue;
|
|
|
|
if (!ty.isArrayOrVector(mod)) {
|
|
// Allow tuples of the correct length
|
|
const arr_like = sema.typeIsArrayLike(ty) orelse return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
|
|
if (len) |expect_len| {
|
|
if (arr_like.len != expect_len) return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
} else {
|
|
len = arr_like.len;
|
|
first_idx = i;
|
|
}
|
|
|
|
// Tuples won't participate in the child type resolution. We'll resolve without
|
|
// them, and if the tuples have a bad type, we'll get a coercion error later.
|
|
ty_ptr.* = null;
|
|
val_ptr.* = null;
|
|
|
|
continue;
|
|
}
|
|
|
|
if (len) |expect_len| {
|
|
if (ty.arrayLen(mod) != expect_len) return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
} else {
|
|
len = ty.arrayLen(mod);
|
|
first_idx = i;
|
|
}
|
|
|
|
ty_ptr.* = ty.childType(mod);
|
|
val_ptr.* = null; // multiple child vals, so we can't easily use them in PTR
|
|
}
|
|
|
|
const child_ty = switch (try sema.resolvePeerTypesInner(
|
|
block,
|
|
src,
|
|
peer_tys,
|
|
peer_vals,
|
|
)) {
|
|
.success => |ty| ty,
|
|
else => |result| return result,
|
|
};
|
|
|
|
return .{ .success = try mod.vectorType(.{
|
|
.len = @as(u32, @intCast(len.?)),
|
|
.child = child_ty.toIntern(),
|
|
}) };
|
|
},
|
|
|
|
.c_ptr => {
|
|
var opt_ptr_info: ?InternPool.Key.PtrType = null;
|
|
var first_idx: usize = undefined;
|
|
for (peer_tys, peer_vals, 0..) |opt_ty, opt_val, i| {
|
|
const ty = opt_ty orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ComptimeInt => continue, // comptime-known integers can always coerce to C pointers
|
|
.Int => {
|
|
if (opt_val != null) {
|
|
// Always allow the coercion for comptime-known ints
|
|
continue;
|
|
} else {
|
|
// Runtime-known, so check if the type is no bigger than a usize
|
|
const ptr_bits = target.ptrBitWidth();
|
|
const bits = ty.intInfo(mod).bits;
|
|
if (bits <= ptr_bits) continue;
|
|
}
|
|
},
|
|
.Null => continue,
|
|
else => {},
|
|
}
|
|
|
|
if (!ty.isPtrAtRuntime(mod)) return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
|
|
// Goes through optionals
|
|
const peer_info = ty.ptrInfo(mod);
|
|
|
|
var ptr_info = opt_ptr_info orelse {
|
|
opt_ptr_info = peer_info;
|
|
opt_ptr_info.?.flags.size = .C;
|
|
first_idx = i;
|
|
continue;
|
|
};
|
|
|
|
// Try peer -> cur, then cur -> peer
|
|
ptr_info.child = ((try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), peer_info.child.toType())) orelse {
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}).toIntern();
|
|
|
|
if (ptr_info.sentinel != .none and peer_info.sentinel != .none) {
|
|
const peer_sent = try mod.intern_pool.getCoerced(sema.gpa, ptr_info.sentinel, ptr_info.child);
|
|
const ptr_sent = try mod.intern_pool.getCoerced(sema.gpa, peer_info.sentinel, ptr_info.child);
|
|
if (ptr_sent == peer_sent) {
|
|
ptr_info.sentinel = ptr_sent;
|
|
} else {
|
|
ptr_info.sentinel = .none;
|
|
}
|
|
} else {
|
|
ptr_info.sentinel = .none;
|
|
}
|
|
|
|
// Note that the align can be always non-zero; Module.ptrType will canonicalize it
|
|
ptr_info.flags.alignment = Alignment.fromByteUnits(@min(
|
|
ptr_info.flags.alignment.toByteUnitsOptional() orelse
|
|
ptr_info.child.toType().abiAlignment(mod),
|
|
peer_info.flags.alignment.toByteUnitsOptional() orelse
|
|
peer_info.child.toType().abiAlignment(mod),
|
|
));
|
|
if (ptr_info.flags.address_space != peer_info.flags.address_space) {
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
|
|
if (ptr_info.packed_offset.bit_offset != peer_info.packed_offset.bit_offset or
|
|
ptr_info.packed_offset.host_size != peer_info.packed_offset.host_size)
|
|
{
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
|
|
ptr_info.flags.is_const = ptr_info.flags.is_const or peer_info.flags.is_const;
|
|
ptr_info.flags.is_volatile = ptr_info.flags.is_volatile or peer_info.flags.is_volatile;
|
|
|
|
opt_ptr_info = ptr_info;
|
|
}
|
|
return .{ .success = try mod.ptrType(opt_ptr_info.?) };
|
|
},
|
|
|
|
.ptr => {
|
|
// If we've resolved to a `[]T` but then see a `[*]T`, we can resolve to a `[*]T` only
|
|
// if there were no actual slices. Else, we want the slice index to report a conflict.
|
|
var opt_slice_idx: ?usize = null;
|
|
|
|
var opt_ptr_info: ?InternPool.Key.PtrType = null;
|
|
var first_idx: usize = undefined;
|
|
var other_idx: usize = undefined; // We sometimes need a second peer index to report a generic error
|
|
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
const peer_info: InternPool.Key.PtrType = switch (ty.zigTypeTag(mod)) {
|
|
.Pointer => ty.ptrInfo(mod),
|
|
.Fn => .{
|
|
.child = ty.toIntern(),
|
|
.flags = .{
|
|
.address_space = target_util.defaultAddressSpace(target, .global_constant),
|
|
},
|
|
},
|
|
else => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
};
|
|
|
|
switch (peer_info.flags.size) {
|
|
.One, .Many => {},
|
|
.Slice => opt_slice_idx = i,
|
|
.C => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
}
|
|
|
|
var ptr_info = opt_ptr_info orelse {
|
|
opt_ptr_info = peer_info;
|
|
first_idx = i;
|
|
continue;
|
|
};
|
|
|
|
other_idx = i;
|
|
|
|
// We want to return this in a lot of cases, so alias it here for convenience
|
|
const generic_err: PeerResolveResult = .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
|
|
// Note that the align can be always non-zero; Type.ptr will canonicalize it
|
|
ptr_info.flags.alignment = Alignment.fromByteUnits(@min(
|
|
ptr_info.flags.alignment.toByteUnitsOptional() orelse
|
|
ptr_info.child.toType().abiAlignment(mod),
|
|
peer_info.flags.alignment.toByteUnitsOptional() orelse
|
|
peer_info.child.toType().abiAlignment(mod),
|
|
));
|
|
|
|
if (ptr_info.flags.address_space != peer_info.flags.address_space) {
|
|
return generic_err;
|
|
}
|
|
|
|
if (ptr_info.packed_offset.bit_offset != peer_info.packed_offset.bit_offset or
|
|
ptr_info.packed_offset.host_size != peer_info.packed_offset.host_size)
|
|
{
|
|
return generic_err;
|
|
}
|
|
|
|
ptr_info.flags.is_const = ptr_info.flags.is_const or peer_info.flags.is_const;
|
|
ptr_info.flags.is_volatile = ptr_info.flags.is_volatile or peer_info.flags.is_volatile;
|
|
|
|
const peer_sentinel: InternPool.Index = switch (peer_info.flags.size) {
|
|
.One => switch (mod.intern_pool.indexToKey(peer_info.child)) {
|
|
.array_type => |array_type| array_type.sentinel,
|
|
else => .none,
|
|
},
|
|
.Many, .Slice => peer_info.sentinel,
|
|
.C => unreachable,
|
|
};
|
|
|
|
const cur_sentinel: InternPool.Index = switch (ptr_info.flags.size) {
|
|
.One => switch (mod.intern_pool.indexToKey(ptr_info.child)) {
|
|
.array_type => |array_type| array_type.sentinel,
|
|
else => .none,
|
|
},
|
|
.Many, .Slice => ptr_info.sentinel,
|
|
.C => unreachable,
|
|
};
|
|
|
|
// We abstract array handling slightly so that tuple pointers can work like array pointers
|
|
const peer_pointee_array = sema.typeIsArrayLike(peer_info.child.toType());
|
|
const cur_pointee_array = sema.typeIsArrayLike(ptr_info.child.toType());
|
|
|
|
// This switch is just responsible for deciding the size and pointee (not including
|
|
// single-pointer array sentinel).
|
|
good: {
|
|
switch (peer_info.flags.size) {
|
|
.One => switch (ptr_info.flags.size) {
|
|
.One => {
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), peer_info.child.toType())) |pointee| {
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
|
|
const cur_arr = cur_pointee_array orelse return generic_err;
|
|
const peer_arr = peer_pointee_array orelse return generic_err;
|
|
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, cur_arr.elem_ty, peer_arr.elem_ty)) |elem_ty| {
|
|
// *[n:x]T + *[n:y]T = *[n]T
|
|
if (cur_arr.len == peer_arr.len) {
|
|
ptr_info.child = (try mod.arrayType(.{
|
|
.len = cur_arr.len,
|
|
.child = elem_ty.toIntern(),
|
|
})).toIntern();
|
|
break :good;
|
|
}
|
|
// *[a]T + *[b]T = []T
|
|
ptr_info.flags.size = .Slice;
|
|
ptr_info.child = elem_ty.toIntern();
|
|
break :good;
|
|
}
|
|
|
|
if (peer_arr.elem_ty.toIntern() == .noreturn_type) {
|
|
// *struct{} + *[a]T = []T
|
|
ptr_info.flags.size = .Slice;
|
|
ptr_info.child = cur_arr.elem_ty.toIntern();
|
|
break :good;
|
|
}
|
|
|
|
if (cur_arr.elem_ty.toIntern() == .noreturn_type) {
|
|
// *[a]T + *struct{} = []T
|
|
ptr_info.flags.size = .Slice;
|
|
ptr_info.child = peer_arr.elem_ty.toIntern();
|
|
break :good;
|
|
}
|
|
|
|
return generic_err;
|
|
},
|
|
.Many => {
|
|
// Only works for *[n]T + [*]T -> [*]T
|
|
const arr = peer_pointee_array orelse return generic_err;
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), arr.elem_ty)) |pointee| {
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
if (arr.elem_ty.toIntern() == .noreturn_type) {
|
|
// *struct{} + [*]T -> [*]T
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.Slice => {
|
|
// Only works for *[n]T + []T -> []T
|
|
const arr = peer_pointee_array orelse return generic_err;
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), arr.elem_ty)) |pointee| {
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
if (arr.elem_ty.toIntern() == .noreturn_type) {
|
|
// *struct{} + []T -> []T
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.C => unreachable,
|
|
},
|
|
.Many => switch (ptr_info.flags.size) {
|
|
.One => {
|
|
// Only works for [*]T + *[n]T -> [*]T
|
|
const arr = cur_pointee_array orelse return generic_err;
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, peer_info.child.toType())) |pointee| {
|
|
ptr_info.flags.size = .Many;
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
if (arr.elem_ty.toIntern() == .noreturn_type) {
|
|
// [*]T + *struct{} -> [*]T
|
|
ptr_info.flags.size = .Many;
|
|
ptr_info.child = peer_info.child;
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.Many => {
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), peer_info.child.toType())) |pointee| {
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.Slice => {
|
|
// Only works if no peers are actually slices
|
|
if (opt_slice_idx) |slice_idx| {
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = slice_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
// Okay, then works for [*]T + "[]T" -> [*]T
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), peer_info.child.toType())) |pointee| {
|
|
ptr_info.flags.size = .Many;
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.C => unreachable,
|
|
},
|
|
.Slice => switch (ptr_info.flags.size) {
|
|
.One => {
|
|
// Only works for []T + *[n]T -> []T
|
|
const arr = cur_pointee_array orelse return generic_err;
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, peer_info.child.toType())) |pointee| {
|
|
ptr_info.flags.size = .Slice;
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
if (arr.elem_ty.toIntern() == .noreturn_type) {
|
|
// []T + *struct{} -> []T
|
|
ptr_info.flags.size = .Slice;
|
|
ptr_info.child = peer_info.child;
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.Many => {
|
|
// Impossible! (current peer is an actual slice)
|
|
return generic_err;
|
|
},
|
|
.Slice => {
|
|
if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), peer_info.child.toType())) |pointee| {
|
|
ptr_info.child = pointee.toIntern();
|
|
break :good;
|
|
}
|
|
return generic_err;
|
|
},
|
|
.C => unreachable,
|
|
},
|
|
.C => unreachable,
|
|
}
|
|
}
|
|
|
|
const sentinel_ty = switch (ptr_info.flags.size) {
|
|
.One => switch (mod.intern_pool.indexToKey(ptr_info.child)) {
|
|
.array_type => |array_type| array_type.child,
|
|
else => ptr_info.child,
|
|
},
|
|
.Many, .Slice, .C => ptr_info.child,
|
|
};
|
|
|
|
sentinel: {
|
|
no_sentinel: {
|
|
if (peer_sentinel == .none) break :no_sentinel;
|
|
if (cur_sentinel == .none) break :no_sentinel;
|
|
const peer_sent_coerced = try mod.intern_pool.getCoerced(sema.gpa, peer_sentinel, sentinel_ty);
|
|
const cur_sent_coerced = try mod.intern_pool.getCoerced(sema.gpa, cur_sentinel, sentinel_ty);
|
|
if (peer_sent_coerced != cur_sent_coerced) break :no_sentinel;
|
|
// Sentinels match
|
|
if (ptr_info.flags.size == .One) switch (mod.intern_pool.indexToKey(ptr_info.child)) {
|
|
.array_type => |array_type| ptr_info.child = (try mod.arrayType(.{
|
|
.len = array_type.len,
|
|
.child = array_type.child,
|
|
.sentinel = cur_sent_coerced,
|
|
})).toIntern(),
|
|
else => unreachable,
|
|
} else {
|
|
ptr_info.sentinel = cur_sent_coerced;
|
|
}
|
|
break :sentinel;
|
|
}
|
|
// Clear existing sentinel
|
|
ptr_info.sentinel = .none;
|
|
switch (mod.intern_pool.indexToKey(ptr_info.child)) {
|
|
.array_type => |array_type| ptr_info.child = (try mod.arrayType(.{
|
|
.len = array_type.len,
|
|
.child = array_type.child,
|
|
.sentinel = .none,
|
|
})).toIntern(),
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
opt_ptr_info = ptr_info;
|
|
}
|
|
|
|
// Before we succeed, check the pointee type. If we tried to apply PTR to (for instance)
|
|
// &.{} and &.{}, we'll currently have a pointer type of `*[0]noreturn` - we wanted to
|
|
// coerce the empty struct to a specific type, but no peer provided one. We need to
|
|
// detect this case and emit an error.
|
|
const pointee = opt_ptr_info.?.child;
|
|
switch (pointee) {
|
|
.noreturn_type => return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = other_idx,
|
|
} },
|
|
else => switch (mod.intern_pool.indexToKey(pointee)) {
|
|
.array_type => |array_type| if (array_type.child == .noreturn_type) return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = other_idx,
|
|
} },
|
|
else => {},
|
|
},
|
|
}
|
|
|
|
return .{ .success = try mod.ptrType(opt_ptr_info.?) };
|
|
},
|
|
|
|
.func => {
|
|
var opt_cur_ty: ?Type = null;
|
|
var first_idx: usize = undefined;
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
const cur_ty = opt_cur_ty orelse {
|
|
opt_cur_ty = ty;
|
|
first_idx = i;
|
|
continue;
|
|
};
|
|
if (ty.zigTypeTag(mod) != .Fn) return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
// ty -> cur_ty
|
|
if (.ok == try sema.coerceInMemoryAllowedFns(block, cur_ty, ty, target, src, src)) {
|
|
continue;
|
|
}
|
|
// cur_ty -> ty
|
|
if (.ok == try sema.coerceInMemoryAllowedFns(block, ty, cur_ty, target, src, src)) {
|
|
opt_cur_ty = ty;
|
|
continue;
|
|
}
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
return .{ .success = opt_cur_ty.? };
|
|
},
|
|
|
|
.enum_or_union => {
|
|
var opt_cur_ty: ?Type = null;
|
|
// The peer index which gave the current type
|
|
var cur_ty_idx: usize = undefined;
|
|
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.EnumLiteral, .Enum, .Union => {},
|
|
else => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
}
|
|
const cur_ty = opt_cur_ty orelse {
|
|
opt_cur_ty = ty;
|
|
cur_ty_idx = i;
|
|
continue;
|
|
};
|
|
|
|
// We want to return this in a lot of cases, so alias it here for convenience
|
|
const generic_err: PeerResolveResult = .{ .conflict = .{
|
|
.peer_idx_a = cur_ty_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
|
|
switch (cur_ty.zigTypeTag(mod)) {
|
|
.EnumLiteral => {
|
|
opt_cur_ty = ty;
|
|
cur_ty_idx = i;
|
|
},
|
|
.Enum => switch (ty.zigTypeTag(mod)) {
|
|
.EnumLiteral => {},
|
|
.Enum => {
|
|
if (!ty.eql(cur_ty, mod)) return generic_err;
|
|
},
|
|
.Union => {
|
|
const tag_ty = ty.unionTagTypeHypothetical(mod);
|
|
if (!tag_ty.eql(cur_ty, mod)) return generic_err;
|
|
opt_cur_ty = ty;
|
|
cur_ty_idx = i;
|
|
},
|
|
else => unreachable,
|
|
},
|
|
.Union => switch (ty.zigTypeTag(mod)) {
|
|
.EnumLiteral => {},
|
|
.Enum => {
|
|
const cur_tag_ty = cur_ty.unionTagTypeHypothetical(mod);
|
|
if (!ty.eql(cur_tag_ty, mod)) return generic_err;
|
|
},
|
|
.Union => {
|
|
if (!ty.eql(cur_ty, mod)) return generic_err;
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
return .{ .success = opt_cur_ty.? };
|
|
},
|
|
|
|
.comptime_int => {
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ComptimeInt => {},
|
|
else => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
}
|
|
}
|
|
return .{ .success = Type.comptime_int };
|
|
},
|
|
|
|
.comptime_float => {
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ComptimeInt, .ComptimeFloat => {},
|
|
else => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
}
|
|
}
|
|
return .{ .success = Type.comptime_float };
|
|
},
|
|
|
|
.fixed_int => {
|
|
var idx_unsigned: ?usize = null;
|
|
var idx_signed: ?usize = null;
|
|
|
|
// TODO: this is for compatibility with legacy behavior. See beneath the loop.
|
|
var any_comptime_known = false;
|
|
|
|
for (peer_tys, peer_vals, 0..) |opt_ty, *ptr_opt_val, i| {
|
|
const ty = opt_ty orelse continue;
|
|
const opt_val = ptr_opt_val.*;
|
|
|
|
const peer_tag = ty.zigTypeTag(mod);
|
|
switch (peer_tag) {
|
|
.ComptimeInt => {
|
|
// If the value is undefined, we can't refine to a fixed-width int
|
|
if (opt_val == null or opt_val.?.isUndef(mod)) return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
any_comptime_known = true;
|
|
ptr_opt_val.* = try sema.resolveLazyValue(opt_val.?);
|
|
continue;
|
|
},
|
|
.Int => {},
|
|
else => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
}
|
|
|
|
if (opt_val != null) any_comptime_known = true;
|
|
|
|
const info = ty.intInfo(mod);
|
|
|
|
const idx_ptr = switch (info.signedness) {
|
|
.unsigned => &idx_unsigned,
|
|
.signed => &idx_signed,
|
|
};
|
|
|
|
const largest_idx = idx_ptr.* orelse {
|
|
idx_ptr.* = i;
|
|
continue;
|
|
};
|
|
|
|
const cur_info = peer_tys[largest_idx].?.intInfo(mod);
|
|
if (info.bits > cur_info.bits) {
|
|
idx_ptr.* = i;
|
|
}
|
|
}
|
|
|
|
if (idx_signed == null) {
|
|
return .{ .success = peer_tys[idx_unsigned.?].? };
|
|
}
|
|
|
|
if (idx_unsigned == null) {
|
|
return .{ .success = peer_tys[idx_signed.?].? };
|
|
}
|
|
|
|
const unsigned_info = peer_tys[idx_unsigned.?].?.intInfo(mod);
|
|
const signed_info = peer_tys[idx_signed.?].?.intInfo(mod);
|
|
if (signed_info.bits > unsigned_info.bits) {
|
|
return .{ .success = peer_tys[idx_signed.?].? };
|
|
}
|
|
|
|
// TODO: this is for compatibility with legacy behavior. Before this version of PTR was
|
|
// implemented, the algorithm very often returned false positives, with the expectation
|
|
// that you'd just hit a coercion error later. One of these was that for integers, the
|
|
// largest type would always be returned, even if it couldn't fit everything. This had
|
|
// an unintentional consequence to semantics, which is that if values were known at
|
|
// comptime, they would be coerced down to the smallest type where possible. This
|
|
// behavior is unintuitive and order-dependent, so in my opinion should be eliminated,
|
|
// but for now we'll retain compatibility.
|
|
if (any_comptime_known) {
|
|
if (unsigned_info.bits > signed_info.bits) {
|
|
return .{ .success = peer_tys[idx_unsigned.?].? };
|
|
}
|
|
const idx = @min(idx_unsigned.?, idx_signed.?);
|
|
return .{ .success = peer_tys[idx].? };
|
|
}
|
|
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = idx_unsigned.?,
|
|
.peer_idx_b = idx_signed.?,
|
|
} };
|
|
},
|
|
|
|
.fixed_float => {
|
|
var opt_cur_ty: ?Type = null;
|
|
|
|
for (peer_tys, peer_vals, 0..) |opt_ty, opt_val, i| {
|
|
const ty = opt_ty orelse continue;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.ComptimeFloat, .ComptimeInt => {},
|
|
.Int => {
|
|
if (opt_val == null) return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
},
|
|
.Float => {
|
|
if (opt_cur_ty) |cur_ty| {
|
|
if (cur_ty.eql(ty, mod)) continue;
|
|
// Recreate the type so we eliminate any c_longdouble
|
|
const bits = @max(cur_ty.floatBits(target), ty.floatBits(target));
|
|
opt_cur_ty = switch (bits) {
|
|
16 => Type.f16,
|
|
32 => Type.f32,
|
|
64 => Type.f64,
|
|
80 => Type.f80,
|
|
128 => Type.f128,
|
|
else => unreachable,
|
|
};
|
|
} else {
|
|
opt_cur_ty = ty;
|
|
}
|
|
},
|
|
else => return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} },
|
|
}
|
|
}
|
|
|
|
// Note that fixed_float is only chosen if there is at least one fixed-width float peer,
|
|
// so opt_cur_ty must be non-null.
|
|
return .{ .success = opt_cur_ty.? };
|
|
},
|
|
|
|
.coercible_struct => {
|
|
// First, check that every peer has the same approximate structure (field count and names)
|
|
|
|
var opt_first_idx: ?usize = null;
|
|
var is_tuple: bool = undefined;
|
|
var field_count: usize = undefined;
|
|
// Only defined for non-tuples.
|
|
var field_names: []InternPool.NullTerminatedString = undefined;
|
|
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
|
|
if (!ty.isTupleOrAnonStruct(mod)) {
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = strat_reason,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
|
|
const first_idx = opt_first_idx orelse {
|
|
opt_first_idx = i;
|
|
is_tuple = ty.isTuple(mod);
|
|
field_count = ty.structFieldCount(mod);
|
|
if (!is_tuple) {
|
|
const names = mod.intern_pool.indexToKey(ty.toIntern()).anon_struct_type.names;
|
|
field_names = try sema.arena.dupe(InternPool.NullTerminatedString, names);
|
|
}
|
|
continue;
|
|
};
|
|
|
|
if (ty.isTuple(mod) != is_tuple or ty.structFieldCount(mod) != field_count) {
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
|
|
if (!is_tuple) {
|
|
for (field_names, 0..) |expected, field_idx| {
|
|
const actual = ty.structFieldName(field_idx, mod);
|
|
if (actual == expected) continue;
|
|
return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
}
|
|
}
|
|
}
|
|
|
|
assert(opt_first_idx != null);
|
|
|
|
// Now, we'll recursively resolve the field types
|
|
const field_types = try sema.arena.alloc(InternPool.Index, field_count);
|
|
// Values for `comptime` fields - `.none` used for non-comptime fields
|
|
const field_vals = try sema.arena.alloc(InternPool.Index, field_count);
|
|
const sub_peer_tys = try sema.arena.alloc(?Type, peer_tys.len);
|
|
const sub_peer_vals = try sema.arena.alloc(?Value, peer_vals.len);
|
|
|
|
for (field_types, field_vals, 0..) |*field_ty, *field_val, field_idx| {
|
|
// Fill buffers with types and values of the field
|
|
for (peer_tys, peer_vals, sub_peer_tys, sub_peer_vals) |opt_ty, opt_val, *peer_field_ty, *peer_field_val| {
|
|
const ty = opt_ty orelse {
|
|
peer_field_ty.* = null;
|
|
peer_field_val.* = null;
|
|
continue;
|
|
};
|
|
peer_field_ty.* = ty.structFieldType(field_idx, mod);
|
|
peer_field_val.* = if (opt_val) |val| try val.fieldValue(mod, field_idx) else null;
|
|
}
|
|
|
|
// Resolve field type recursively
|
|
field_ty.* = switch (try sema.resolvePeerTypesInner(block, src, sub_peer_tys, sub_peer_vals)) {
|
|
.success => |ty| ty.toIntern(),
|
|
else => |result| {
|
|
const result_buf = try sema.arena.create(PeerResolveResult);
|
|
result_buf.* = result;
|
|
const field_name = if (is_tuple) name: {
|
|
break :name try std.fmt.allocPrint(sema.arena, "{d}", .{field_idx});
|
|
} else try sema.arena.dupe(u8, mod.intern_pool.stringToSlice(field_names[field_idx]));
|
|
|
|
// The error info needs the field types, but we can't reuse sub_peer_tys
|
|
// since the recursive call may have clobbered it.
|
|
const peer_field_tys = try sema.arena.alloc(Type, peer_tys.len);
|
|
for (peer_tys, peer_field_tys) |opt_ty, *peer_field_ty| {
|
|
// Already-resolved types won't be referenced by the error so it's fine
|
|
// to leave them undefined.
|
|
const ty = opt_ty orelse continue;
|
|
peer_field_ty.* = ty.structFieldType(field_idx, mod);
|
|
}
|
|
|
|
return .{ .field_error = .{
|
|
.field_name = field_name,
|
|
.field_types = peer_field_tys,
|
|
.sub_result = result_buf,
|
|
} };
|
|
},
|
|
};
|
|
|
|
// Decide if this is a comptime field. If it is comptime in all peers, and the
|
|
// coerced comptime values are all the same, we say it is comptime, else not.
|
|
|
|
var comptime_val: ?Value = null;
|
|
for (peer_tys) |opt_ty| {
|
|
const struct_ty = opt_ty orelse continue;
|
|
const uncoerced_field_val = try struct_ty.structFieldValueComptime(mod, field_idx) orelse {
|
|
comptime_val = null;
|
|
break;
|
|
};
|
|
const uncoerced_field = Air.internedToRef(uncoerced_field_val.toIntern());
|
|
const coerced_inst = sema.coerceExtra(block, field_ty.toType(), uncoerced_field, src, .{ .report_err = false }) catch |err| switch (err) {
|
|
// It's possible for PTR to give false positives. Just give up on making this a comptime field, we'll get an error later anyway
|
|
error.NotCoercible => {
|
|
comptime_val = null;
|
|
break;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const coerced_val = (try sema.resolveMaybeUndefVal(coerced_inst)) orelse continue;
|
|
const existing = comptime_val orelse {
|
|
comptime_val = coerced_val;
|
|
continue;
|
|
};
|
|
if (!coerced_val.eql(existing, field_ty.toType(), mod)) {
|
|
comptime_val = null;
|
|
break;
|
|
}
|
|
}
|
|
|
|
field_val.* = if (comptime_val) |v| v.toIntern() else .none;
|
|
}
|
|
|
|
const final_ty = try mod.intern(.{ .anon_struct_type = .{
|
|
.types = field_types,
|
|
.names = if (is_tuple) &.{} else field_names,
|
|
.values = field_vals,
|
|
} });
|
|
|
|
return .{ .success = final_ty.toType() };
|
|
},
|
|
|
|
.exact => {
|
|
var expect_ty: ?Type = null;
|
|
var first_idx: usize = undefined;
|
|
for (peer_tys, 0..) |opt_ty, i| {
|
|
const ty = opt_ty orelse continue;
|
|
if (expect_ty) |expect| {
|
|
if (!ty.eql(expect, mod)) return .{ .conflict = .{
|
|
.peer_idx_a = first_idx,
|
|
.peer_idx_b = i,
|
|
} };
|
|
} else {
|
|
expect_ty = ty;
|
|
first_idx = i;
|
|
}
|
|
}
|
|
return .{ .success = expect_ty.? };
|
|
},
|
|
}
|
|
}
|
|
|
|
fn maybeMergeErrorSets(sema: *Sema, block: *Block, src: LazySrcLoc, e0: Type, e1: Type) !Type {
|
|
// e0 -> e1
|
|
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, e1, e0, src, src)) {
|
|
return e1;
|
|
}
|
|
|
|
// e1 -> e0
|
|
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, e0, e1, src, src)) {
|
|
return e0;
|
|
}
|
|
|
|
return sema.errorSetMerge(e0, e1);
|
|
}
|
|
|
|
fn resolvePairInMemoryCoercible(sema: *Sema, block: *Block, src: LazySrcLoc, ty_a: Type, ty_b: Type) !?Type {
|
|
// ty_b -> ty_a
|
|
if (.ok == try sema.coerceInMemoryAllowed(block, ty_a, ty_b, true, sema.mod.getTarget(), src, src)) {
|
|
return ty_a;
|
|
}
|
|
|
|
// ty_a -> ty_b
|
|
if (.ok == try sema.coerceInMemoryAllowed(block, ty_b, ty_a, true, sema.mod.getTarget(), src, src)) {
|
|
return ty_b;
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
const ArrayLike = struct {
|
|
len: u64,
|
|
/// `noreturn` indicates that this type is `struct{}` so can coerce to anything
|
|
elem_ty: Type,
|
|
};
|
|
fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
|
|
const mod = sema.mod;
|
|
return switch (ty.zigTypeTag(mod)) {
|
|
.Array => .{
|
|
.len = ty.arrayLen(mod),
|
|
.elem_ty = ty.childType(mod),
|
|
},
|
|
.Struct => {
|
|
const field_count = ty.structFieldCount(mod);
|
|
if (field_count == 0) return .{
|
|
.len = 0,
|
|
.elem_ty = Type.noreturn,
|
|
};
|
|
if (!ty.isTuple(mod)) return null;
|
|
const elem_ty = ty.structFieldType(0, mod);
|
|
for (1..field_count) |i| {
|
|
if (!ty.structFieldType(i, mod).eql(elem_ty, mod)) {
|
|
return null;
|
|
}
|
|
}
|
|
return .{
|
|
.len = field_count,
|
|
.elem_ty = elem_ty,
|
|
};
|
|
},
|
|
else => null,
|
|
};
|
|
}
|
|
|
|
pub fn resolveIes(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
if (sema.fn_ret_ty_ies) |ies| {
|
|
try sema.resolveInferredErrorSetPtr(block, src, ies);
|
|
assert(ies.resolved != .none);
|
|
ip.funcIesResolved(sema.func_index).* = ies.resolved;
|
|
}
|
|
}
|
|
|
|
pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const fn_ty_info = mod.typeToFunc(fn_ty).?;
|
|
|
|
try sema.resolveTypeFully(fn_ty_info.return_type.toType());
|
|
|
|
if (mod.comp.bin_file.options.error_return_tracing and fn_ty_info.return_type.toType().isError(mod)) {
|
|
// Ensure the type exists so that backends can assume that.
|
|
_ = try sema.getBuiltinType("StackTrace");
|
|
}
|
|
|
|
for (0..fn_ty_info.param_types.len) |i| {
|
|
try sema.resolveTypeFully(fn_ty_info.param_types.get(ip)[i].toType());
|
|
}
|
|
}
|
|
|
|
/// Make it so that calling hash() and eql() on `val` will not assert due
|
|
/// to a type not having its layout resolved.
|
|
fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
|
|
const mod = sema.mod;
|
|
switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
|
.int => |int| switch (int.storage) {
|
|
.u64, .i64, .big_int => return val,
|
|
.lazy_align, .lazy_size => return (try mod.intern(.{ .int = .{
|
|
.ty = int.ty,
|
|
.storage = .{ .u64 = (try val.getUnsignedIntAdvanced(mod, sema)).? },
|
|
} })).toValue(),
|
|
},
|
|
.ptr => |ptr| {
|
|
const resolved_len = switch (ptr.len) {
|
|
.none => .none,
|
|
else => (try sema.resolveLazyValue(ptr.len.toValue())).toIntern(),
|
|
};
|
|
switch (ptr.addr) {
|
|
.decl, .mut_decl => return if (resolved_len == ptr.len)
|
|
val
|
|
else
|
|
(try mod.intern(.{ .ptr = .{
|
|
.ty = ptr.ty,
|
|
.addr = switch (ptr.addr) {
|
|
.decl => |decl| .{ .decl = decl },
|
|
.mut_decl => |mut_decl| .{ .mut_decl = mut_decl },
|
|
else => unreachable,
|
|
},
|
|
.len = resolved_len,
|
|
} })).toValue(),
|
|
.comptime_field => |field_val| {
|
|
const resolved_field_val =
|
|
(try sema.resolveLazyValue(field_val.toValue())).toIntern();
|
|
return if (resolved_field_val == field_val and resolved_len == ptr.len)
|
|
val
|
|
else
|
|
(try mod.intern(.{ .ptr = .{
|
|
.ty = ptr.ty,
|
|
.addr = .{ .comptime_field = resolved_field_val },
|
|
.len = resolved_len,
|
|
} })).toValue();
|
|
},
|
|
.int => |int| {
|
|
const resolved_int = (try sema.resolveLazyValue(int.toValue())).toIntern();
|
|
return if (resolved_int == int and resolved_len == ptr.len)
|
|
val
|
|
else
|
|
(try mod.intern(.{ .ptr = .{
|
|
.ty = ptr.ty,
|
|
.addr = .{ .int = resolved_int },
|
|
.len = resolved_len,
|
|
} })).toValue();
|
|
},
|
|
.eu_payload, .opt_payload => |base| {
|
|
const resolved_base = (try sema.resolveLazyValue(base.toValue())).toIntern();
|
|
return if (resolved_base == base and resolved_len == ptr.len)
|
|
val
|
|
else
|
|
(try mod.intern(.{ .ptr = .{
|
|
.ty = ptr.ty,
|
|
.addr = switch (ptr.addr) {
|
|
.eu_payload => .{ .eu_payload = resolved_base },
|
|
.opt_payload => .{ .opt_payload = resolved_base },
|
|
else => unreachable,
|
|
},
|
|
.len = ptr.len,
|
|
} })).toValue();
|
|
},
|
|
.elem, .field => |base_index| {
|
|
const resolved_base = (try sema.resolveLazyValue(base_index.base.toValue())).toIntern();
|
|
return if (resolved_base == base_index.base and resolved_len == ptr.len)
|
|
val
|
|
else
|
|
(try mod.intern(.{ .ptr = .{
|
|
.ty = ptr.ty,
|
|
.addr = switch (ptr.addr) {
|
|
.elem => .{ .elem = .{
|
|
.base = resolved_base,
|
|
.index = base_index.index,
|
|
} },
|
|
.field => .{ .field = .{
|
|
.base = resolved_base,
|
|
.index = base_index.index,
|
|
} },
|
|
else => unreachable,
|
|
},
|
|
.len = ptr.len,
|
|
} })).toValue();
|
|
},
|
|
}
|
|
},
|
|
.aggregate => |aggregate| switch (aggregate.storage) {
|
|
.bytes => return val,
|
|
.elems => |elems| {
|
|
var resolved_elems: []InternPool.Index = &.{};
|
|
for (elems, 0..) |elem, i| {
|
|
const resolved_elem = (try sema.resolveLazyValue(elem.toValue())).toIntern();
|
|
if (resolved_elems.len == 0 and resolved_elem != elem) {
|
|
resolved_elems = try sema.arena.alloc(InternPool.Index, elems.len);
|
|
@memcpy(resolved_elems[0..i], elems[0..i]);
|
|
}
|
|
if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem;
|
|
}
|
|
return if (resolved_elems.len == 0) val else (try mod.intern(.{ .aggregate = .{
|
|
.ty = aggregate.ty,
|
|
.storage = .{ .elems = resolved_elems },
|
|
} })).toValue();
|
|
},
|
|
.repeated_elem => |elem| {
|
|
const resolved_elem = (try sema.resolveLazyValue(elem.toValue())).toIntern();
|
|
return if (resolved_elem == elem) val else (try mod.intern(.{ .aggregate = .{
|
|
.ty = aggregate.ty,
|
|
.storage = .{ .repeated_elem = resolved_elem },
|
|
} })).toValue();
|
|
},
|
|
},
|
|
.un => |un| {
|
|
const resolved_tag = (try sema.resolveLazyValue(un.tag.toValue())).toIntern();
|
|
const resolved_val = (try sema.resolveLazyValue(un.val.toValue())).toIntern();
|
|
return if (resolved_tag == un.tag and resolved_val == un.val)
|
|
val
|
|
else
|
|
(try mod.intern(.{ .un = .{
|
|
.ty = un.ty,
|
|
.tag = resolved_tag,
|
|
.val = resolved_val,
|
|
} })).toValue();
|
|
},
|
|
else => return val,
|
|
}
|
|
}
|
|
|
|
pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (mod.intern_pool.indexToKey(ty.toIntern())) {
|
|
.simple_type => |simple_type| return sema.resolveSimpleType(simple_type),
|
|
else => {},
|
|
}
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Struct => return sema.resolveStructLayout(ty),
|
|
.Union => return sema.resolveUnionLayout(ty),
|
|
.Array => {
|
|
if (ty.arrayLenIncludingSentinel(mod) == 0) return;
|
|
const elem_ty = ty.childType(mod);
|
|
return sema.resolveTypeLayout(elem_ty);
|
|
},
|
|
.Optional => {
|
|
const payload_ty = ty.optionalChild(mod);
|
|
// In case of querying the ABI alignment of this optional, we will ask
|
|
// for hasRuntimeBits() of the payload type, so we need "requires comptime"
|
|
// to be known already before this function returns.
|
|
_ = try sema.typeRequiresComptime(payload_ty);
|
|
return sema.resolveTypeLayout(payload_ty);
|
|
},
|
|
.ErrorUnion => {
|
|
const payload_ty = ty.errorUnionPayload(mod);
|
|
return sema.resolveTypeLayout(payload_ty);
|
|
},
|
|
.Fn => {
|
|
const info = mod.typeToFunc(ty).?;
|
|
if (info.is_generic) {
|
|
// Resolving of generic function types is deferred to when
|
|
// the function is instantiated.
|
|
return;
|
|
}
|
|
const ip = &mod.intern_pool;
|
|
for (0..info.param_types.len) |i| {
|
|
const param_ty = info.param_types.get(ip)[i];
|
|
try sema.resolveTypeLayout(param_ty.toType());
|
|
}
|
|
try sema.resolveTypeLayout(info.return_type.toType());
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
try sema.resolveTypeFields(ty);
|
|
if (mod.typeToStruct(ty)) |struct_obj| {
|
|
switch (struct_obj.status) {
|
|
.none, .have_field_types => {},
|
|
.field_types_wip, .layout_wip => {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
struct_obj.srcLoc(mod),
|
|
"struct '{}' depends on itself",
|
|
.{ty.fmt(mod)},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
.have_layout, .fully_resolved_wip, .fully_resolved => return,
|
|
}
|
|
const prev_status = struct_obj.status;
|
|
errdefer if (struct_obj.status == .layout_wip) {
|
|
struct_obj.status = prev_status;
|
|
};
|
|
|
|
struct_obj.status = .layout_wip;
|
|
for (struct_obj.fields.values(), 0..) |field, i| {
|
|
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
|
|
error.AnalysisFail => {
|
|
const msg = sema.err orelse return err;
|
|
try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
|
|
return err;
|
|
},
|
|
else => return err,
|
|
};
|
|
}
|
|
|
|
if (struct_obj.layout == .Packed) {
|
|
try semaBackingIntType(mod, struct_obj);
|
|
}
|
|
|
|
struct_obj.status = .have_layout;
|
|
_ = try sema.resolveTypeRequiresComptime(ty);
|
|
|
|
if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
struct_obj.srcLoc(mod),
|
|
"struct layout depends on it having runtime bits",
|
|
.{},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (struct_obj.layout == .Auto and !struct_obj.is_tuple and
|
|
mod.backendSupportsFeature(.field_reordering))
|
|
{
|
|
const optimized_order = try mod.tmp_hack_arena.allocator().alloc(u32, struct_obj.fields.count());
|
|
|
|
for (struct_obj.fields.values(), 0..) |field, i| {
|
|
optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty))
|
|
@as(u32, @intCast(i))
|
|
else
|
|
Module.Struct.omitted_field;
|
|
}
|
|
|
|
const AlignSortContext = struct {
|
|
struct_obj: *Module.Struct,
|
|
sema: *Sema,
|
|
|
|
fn lessThan(ctx: @This(), a: u32, b: u32) bool {
|
|
const m = ctx.sema.mod;
|
|
if (a == Module.Struct.omitted_field) return false;
|
|
if (b == Module.Struct.omitted_field) return true;
|
|
return ctx.struct_obj.fields.values()[a].ty.abiAlignment(m) >
|
|
ctx.struct_obj.fields.values()[b].ty.abiAlignment(m);
|
|
}
|
|
};
|
|
mem.sort(u32, optimized_order, AlignSortContext{
|
|
.struct_obj = struct_obj,
|
|
.sema = sema,
|
|
}, AlignSortContext.lessThan);
|
|
struct_obj.optimized_order = optimized_order.ptr;
|
|
}
|
|
}
|
|
// otherwise it's a tuple; no need to resolve anything
|
|
}
|
|
|
|
fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
|
|
const gpa = mod.gpa;
|
|
|
|
var fields_bit_sum: u64 = 0;
|
|
for (struct_obj.fields.values()) |field| {
|
|
fields_bit_sum += field.ty.bitSize(mod);
|
|
}
|
|
|
|
const decl_index = struct_obj.owner_decl;
|
|
const decl = mod.declPtr(decl_index);
|
|
|
|
const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir;
|
|
const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
|
|
assert(extended.opcode == .struct_decl);
|
|
const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
|
|
|
|
if (small.has_backing_int) {
|
|
var extra_index: usize = extended.operand;
|
|
extra_index += @intFromBool(small.has_src_node);
|
|
extra_index += @intFromBool(small.has_fields_len);
|
|
extra_index += @intFromBool(small.has_decls_len);
|
|
|
|
const backing_int_body_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
|
|
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
|
|
defer analysis_arena.deinit();
|
|
|
|
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
|
|
defer comptime_mutable_decls.deinit();
|
|
|
|
var sema: Sema = .{
|
|
.mod = mod,
|
|
.gpa = gpa,
|
|
.arena = analysis_arena.allocator(),
|
|
.code = zir,
|
|
.owner_decl = decl,
|
|
.owner_decl_index = decl_index,
|
|
.func_index = .none,
|
|
.func_is_naked = false,
|
|
.fn_ret_ty = Type.void,
|
|
.fn_ret_ty_ies = null,
|
|
.owner_func_index = .none,
|
|
.comptime_mutable_decls = &comptime_mutable_decls,
|
|
};
|
|
defer sema.deinit();
|
|
|
|
var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope);
|
|
defer wip_captures.deinit();
|
|
|
|
var block: Block = .{
|
|
.parent = null,
|
|
.sema = &sema,
|
|
.src_decl = decl_index,
|
|
.namespace = struct_obj.namespace,
|
|
.wip_capture_scope = wip_captures.scope,
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer assert(block.instructions.items.len == 0);
|
|
|
|
const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 };
|
|
const backing_int_ty = blk: {
|
|
if (backing_int_body_len == 0) {
|
|
const backing_int_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
|
|
break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref);
|
|
} else {
|
|
const body = zir.extra[extra_index..][0..backing_int_body_len];
|
|
const ty_ref = try sema.resolveBody(&block, body, struct_obj.zir_index);
|
|
break :blk try sema.analyzeAsType(&block, backing_int_src, ty_ref);
|
|
}
|
|
};
|
|
|
|
try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum);
|
|
struct_obj.backing_int_ty = backing_int_ty;
|
|
try wip_captures.finalize();
|
|
for (comptime_mutable_decls.items) |ct_decl_index| {
|
|
const ct_decl = mod.declPtr(ct_decl_index);
|
|
_ = try ct_decl.internValue(mod);
|
|
}
|
|
} else {
|
|
if (fields_bit_sum > std.math.maxInt(u16)) {
|
|
var sema: Sema = .{
|
|
.mod = mod,
|
|
.gpa = gpa,
|
|
.arena = undefined,
|
|
.code = zir,
|
|
.owner_decl = decl,
|
|
.owner_decl_index = decl_index,
|
|
.func_index = .none,
|
|
.func_is_naked = false,
|
|
.fn_ret_ty = Type.void,
|
|
.fn_ret_ty_ies = null,
|
|
.owner_func_index = .none,
|
|
.comptime_mutable_decls = undefined,
|
|
};
|
|
defer sema.deinit();
|
|
|
|
var block: Block = .{
|
|
.parent = null,
|
|
.sema = &sema,
|
|
.src_decl = decl_index,
|
|
.namespace = struct_obj.namespace,
|
|
.wip_capture_scope = undefined,
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum});
|
|
}
|
|
struct_obj.backing_int_ty = try mod.intType(.unsigned, @as(u16, @intCast(fields_bit_sum)));
|
|
}
|
|
}
|
|
|
|
fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void {
|
|
const mod = sema.mod;
|
|
|
|
if (!backing_int_ty.isInt(mod)) {
|
|
return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)});
|
|
}
|
|
if (backing_int_ty.bitSize(mod) != fields_bit_sum) {
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}",
|
|
.{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(mod), fields_bit_sum },
|
|
);
|
|
}
|
|
}
|
|
|
|
fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
|
|
const mod = sema.mod;
|
|
if (!ty.isIndexable(mod)) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "type '{}' does not support indexing", .{ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "operand must be an array, slice, tuple, or vector", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Pointer) {
|
|
switch (ty.ptrSize(mod)) {
|
|
.Slice, .Many, .C => return,
|
|
.One => {
|
|
const elem_ty = ty.childType(mod);
|
|
if (elem_ty.zigTypeTag(mod) == .Array) return;
|
|
// TODO https://github.com/ziglang/zig/issues/15479
|
|
// if (elem_ty.isTuple()) return;
|
|
},
|
|
}
|
|
}
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "type '{}' is not an indexable pointer", .{ty.fmt(sema.mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(block, src, msg, "operand must be a slice, a many pointer or a pointer to an array", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
try sema.resolveTypeFields(ty);
|
|
const union_obj = mod.typeToUnion(ty).?;
|
|
switch (union_obj.status) {
|
|
.none, .have_field_types => {},
|
|
.field_types_wip, .layout_wip => {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
union_obj.srcLoc(sema.mod),
|
|
"union '{}' depends on itself",
|
|
.{ty.fmt(sema.mod)},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
.have_layout, .fully_resolved_wip, .fully_resolved => return,
|
|
}
|
|
const prev_status = union_obj.status;
|
|
errdefer if (union_obj.status == .layout_wip) {
|
|
union_obj.status = prev_status;
|
|
};
|
|
|
|
union_obj.status = .layout_wip;
|
|
for (union_obj.fields.values(), 0..) |field, i| {
|
|
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
|
|
error.AnalysisFail => {
|
|
const msg = sema.err orelse return err;
|
|
try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
|
|
return err;
|
|
},
|
|
else => return err,
|
|
};
|
|
}
|
|
union_obj.status = .have_layout;
|
|
_ = try sema.resolveTypeRequiresComptime(ty);
|
|
|
|
if (union_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
union_obj.srcLoc(sema.mod),
|
|
"union layout depends on it having runtime bits",
|
|
.{},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
// In case of querying the ABI alignment of this struct, we will ask
|
|
// for hasRuntimeBits() of each field, so we need "requires comptime"
|
|
// to be known already before this function returns.
|
|
pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
|
|
const mod = sema.mod;
|
|
|
|
return switch (ty.toIntern()) {
|
|
.empty_struct_type => false,
|
|
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
|
|
.int_type => false,
|
|
.ptr_type => |ptr_type| {
|
|
const child_ty = ptr_type.child.toType();
|
|
if (child_ty.zigTypeTag(mod) == .Fn) {
|
|
return mod.typeToFunc(child_ty).?.is_generic;
|
|
} else {
|
|
return sema.resolveTypeRequiresComptime(child_ty);
|
|
}
|
|
},
|
|
.anyframe_type => |child| {
|
|
if (child == .none) return false;
|
|
return sema.resolveTypeRequiresComptime(child.toType());
|
|
},
|
|
.array_type => |array_type| return sema.resolveTypeRequiresComptime(array_type.child.toType()),
|
|
.vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()),
|
|
.opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()),
|
|
.error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()),
|
|
.error_set_type, .inferred_error_set_type => false,
|
|
|
|
.func_type => true,
|
|
|
|
.simple_type => |t| switch (t) {
|
|
.f16,
|
|
.f32,
|
|
.f64,
|
|
.f80,
|
|
.f128,
|
|
.usize,
|
|
.isize,
|
|
.c_char,
|
|
.c_short,
|
|
.c_ushort,
|
|
.c_int,
|
|
.c_uint,
|
|
.c_long,
|
|
.c_ulong,
|
|
.c_longlong,
|
|
.c_ulonglong,
|
|
.c_longdouble,
|
|
.anyopaque,
|
|
.bool,
|
|
.void,
|
|
.anyerror,
|
|
.adhoc_inferred_error_set,
|
|
.noreturn,
|
|
.generic_poison,
|
|
.atomic_order,
|
|
.atomic_rmw_op,
|
|
.calling_convention,
|
|
.address_space,
|
|
.float_mode,
|
|
.reduce_op,
|
|
.call_modifier,
|
|
.prefetch_options,
|
|
.export_options,
|
|
.extern_options,
|
|
=> false,
|
|
|
|
.type,
|
|
.comptime_int,
|
|
.comptime_float,
|
|
.null,
|
|
.undefined,
|
|
.enum_literal,
|
|
.type_info,
|
|
=> true,
|
|
},
|
|
.struct_type => |struct_type| {
|
|
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
|
|
switch (struct_obj.requires_comptime) {
|
|
.no, .wip => return false,
|
|
.yes => return true,
|
|
.unknown => {
|
|
var requires_comptime = false;
|
|
struct_obj.requires_comptime = .wip;
|
|
for (struct_obj.fields.values()) |field| {
|
|
if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true;
|
|
}
|
|
if (requires_comptime) {
|
|
struct_obj.requires_comptime = .yes;
|
|
} else {
|
|
struct_obj.requires_comptime = .no;
|
|
}
|
|
return requires_comptime;
|
|
},
|
|
}
|
|
},
|
|
|
|
.anon_struct_type => |tuple| {
|
|
for (tuple.types, tuple.values) |field_ty, field_val| {
|
|
const have_comptime_val = field_val != .none;
|
|
if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty.toType())) {
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
},
|
|
|
|
.union_type => |union_type| {
|
|
const union_obj = mod.unionPtr(union_type.index);
|
|
switch (union_obj.requires_comptime) {
|
|
.no, .wip => return false,
|
|
.yes => return true,
|
|
.unknown => {
|
|
var requires_comptime = false;
|
|
union_obj.requires_comptime = .wip;
|
|
for (union_obj.fields.values()) |field| {
|
|
if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true;
|
|
}
|
|
if (requires_comptime) {
|
|
union_obj.requires_comptime = .yes;
|
|
} else {
|
|
union_obj.requires_comptime = .no;
|
|
}
|
|
return requires_comptime;
|
|
},
|
|
}
|
|
},
|
|
|
|
.opaque_type => false,
|
|
|
|
.enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()),
|
|
|
|
// values, not types
|
|
.undef,
|
|
.runtime_value,
|
|
.simple_value,
|
|
.variable,
|
|
.extern_func,
|
|
.func,
|
|
.int,
|
|
.err,
|
|
.error_union,
|
|
.enum_literal,
|
|
.enum_tag,
|
|
.empty_enum_value,
|
|
.float,
|
|
.ptr,
|
|
.opt,
|
|
.aggregate,
|
|
.un,
|
|
// memoization, not types
|
|
.memoized_call,
|
|
=> unreachable,
|
|
},
|
|
};
|
|
}
|
|
|
|
/// Returns `error.AnalysisFail` if any of the types (recursively) failed to
|
|
/// be resolved.
|
|
pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
switch (ty.zigTypeTag(mod)) {
|
|
.Pointer => {
|
|
return sema.resolveTypeFully(ty.childType(mod));
|
|
},
|
|
.Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
|
|
.struct_type => return sema.resolveStructFully(ty),
|
|
.anon_struct_type => |tuple| {
|
|
for (tuple.types) |field_ty| {
|
|
try sema.resolveTypeFully(field_ty.toType());
|
|
}
|
|
},
|
|
.simple_type => |simple_type| try sema.resolveSimpleType(simple_type),
|
|
else => {},
|
|
},
|
|
.Union => return sema.resolveUnionFully(ty),
|
|
.Array => return sema.resolveTypeFully(ty.childType(mod)),
|
|
.Optional => {
|
|
return sema.resolveTypeFully(ty.optionalChild(mod));
|
|
},
|
|
.ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload(mod)),
|
|
.Fn => {
|
|
const info = mod.typeToFunc(ty).?;
|
|
if (info.is_generic) {
|
|
// Resolving of generic function types is deferred to when
|
|
// the function is instantiated.
|
|
return;
|
|
}
|
|
const ip = &mod.intern_pool;
|
|
for (0..info.param_types.len) |i| {
|
|
const param_ty = info.param_types.get(ip)[i];
|
|
try sema.resolveTypeFully(param_ty.toType());
|
|
}
|
|
try sema.resolveTypeFully(info.return_type.toType());
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
|
|
fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void {
|
|
try sema.resolveStructLayout(ty);
|
|
|
|
const mod = sema.mod;
|
|
try sema.resolveTypeFields(ty);
|
|
const struct_obj = mod.typeToStruct(ty).?;
|
|
|
|
switch (struct_obj.status) {
|
|
.none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
|
|
.fully_resolved_wip, .fully_resolved => return,
|
|
}
|
|
|
|
{
|
|
// After we have resolve struct layout we have to go over the fields again to
|
|
// make sure pointer fields get their child types resolved as well.
|
|
// See also similar code for unions.
|
|
const prev_status = struct_obj.status;
|
|
errdefer struct_obj.status = prev_status;
|
|
|
|
struct_obj.status = .fully_resolved_wip;
|
|
for (struct_obj.fields.values()) |field| {
|
|
try sema.resolveTypeFully(field.ty);
|
|
}
|
|
struct_obj.status = .fully_resolved;
|
|
}
|
|
|
|
// And let's not forget comptime-only status.
|
|
_ = try sema.typeRequiresComptime(ty);
|
|
}
|
|
|
|
fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void {
|
|
try sema.resolveUnionLayout(ty);
|
|
|
|
const mod = sema.mod;
|
|
try sema.resolveTypeFields(ty);
|
|
const union_obj = mod.typeToUnion(ty).?;
|
|
switch (union_obj.status) {
|
|
.none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
|
|
.fully_resolved_wip, .fully_resolved => return,
|
|
}
|
|
|
|
{
|
|
// After we have resolve union layout we have to go over the fields again to
|
|
// make sure pointer fields get their child types resolved as well.
|
|
// See also similar code for structs.
|
|
const prev_status = union_obj.status;
|
|
errdefer union_obj.status = prev_status;
|
|
|
|
union_obj.status = .fully_resolved_wip;
|
|
for (union_obj.fields.values()) |field| {
|
|
try sema.resolveTypeFully(field.ty);
|
|
}
|
|
union_obj.status = .fully_resolved;
|
|
}
|
|
|
|
// And let's not forget comptime-only status.
|
|
_ = try sema.typeRequiresComptime(ty);
|
|
}
|
|
|
|
pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void {
|
|
const mod = sema.mod;
|
|
|
|
switch (ty.toIntern()) {
|
|
.var_args_param_type => unreachable,
|
|
|
|
.none => unreachable,
|
|
|
|
.u0_type,
|
|
.i0_type,
|
|
.u1_type,
|
|
.u8_type,
|
|
.i8_type,
|
|
.u16_type,
|
|
.i16_type,
|
|
.u29_type,
|
|
.u32_type,
|
|
.i32_type,
|
|
.u64_type,
|
|
.i64_type,
|
|
.u80_type,
|
|
.u128_type,
|
|
.i128_type,
|
|
.usize_type,
|
|
.isize_type,
|
|
.c_char_type,
|
|
.c_short_type,
|
|
.c_ushort_type,
|
|
.c_int_type,
|
|
.c_uint_type,
|
|
.c_long_type,
|
|
.c_ulong_type,
|
|
.c_longlong_type,
|
|
.c_ulonglong_type,
|
|
.c_longdouble_type,
|
|
.f16_type,
|
|
.f32_type,
|
|
.f64_type,
|
|
.f80_type,
|
|
.f128_type,
|
|
.anyopaque_type,
|
|
.bool_type,
|
|
.void_type,
|
|
.type_type,
|
|
.anyerror_type,
|
|
.adhoc_inferred_error_set_type,
|
|
.comptime_int_type,
|
|
.comptime_float_type,
|
|
.noreturn_type,
|
|
.anyframe_type,
|
|
.null_type,
|
|
.undefined_type,
|
|
.enum_literal_type,
|
|
.manyptr_u8_type,
|
|
.manyptr_const_u8_type,
|
|
.manyptr_const_u8_sentinel_0_type,
|
|
.single_const_pointer_to_comptime_int_type,
|
|
.slice_const_u8_type,
|
|
.slice_const_u8_sentinel_0_type,
|
|
.optional_noreturn_type,
|
|
.anyerror_void_error_union_type,
|
|
.generic_poison_type,
|
|
.empty_struct_type,
|
|
=> {},
|
|
|
|
.undef => unreachable,
|
|
.zero => unreachable,
|
|
.zero_usize => unreachable,
|
|
.zero_u8 => unreachable,
|
|
.one => unreachable,
|
|
.one_usize => unreachable,
|
|
.one_u8 => unreachable,
|
|
.four_u8 => unreachable,
|
|
.negative_one => unreachable,
|
|
.calling_convention_c => unreachable,
|
|
.calling_convention_inline => unreachable,
|
|
.void_value => unreachable,
|
|
.unreachable_value => unreachable,
|
|
.null_value => unreachable,
|
|
.bool_true => unreachable,
|
|
.bool_false => unreachable,
|
|
.empty_struct => unreachable,
|
|
.generic_poison => unreachable,
|
|
|
|
else => switch (mod.intern_pool.items.items(.tag)[@intFromEnum(ty.toIntern())]) {
|
|
.type_struct,
|
|
.type_struct_ns,
|
|
.type_union_tagged,
|
|
.type_union_untagged,
|
|
.type_union_safety,
|
|
.simple_type,
|
|
=> switch (mod.intern_pool.indexToKey(ty.toIntern())) {
|
|
.struct_type => |struct_type| {
|
|
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return;
|
|
try sema.resolveTypeFieldsStruct(ty, struct_obj);
|
|
},
|
|
.union_type => |union_type| {
|
|
const union_obj = mod.unionPtr(union_type.index);
|
|
try sema.resolveTypeFieldsUnion(ty, union_obj);
|
|
},
|
|
.simple_type => |simple_type| try sema.resolveSimpleType(simple_type),
|
|
else => unreachable,
|
|
},
|
|
else => {},
|
|
},
|
|
}
|
|
}
|
|
|
|
/// Fully resolves a simple type. This is usually a nop, but for builtin types with
|
|
/// special InternPool indices (such as std.builtin.Type) it will analyze and fully
|
|
/// resolve the container type.
|
|
fn resolveSimpleType(sema: *Sema, simple_type: InternPool.SimpleType) CompileError!void {
|
|
const builtin_type_name: []const u8 = switch (simple_type) {
|
|
.atomic_order => "AtomicOrder",
|
|
.atomic_rmw_op => "AtomicRmwOp",
|
|
.calling_convention => "CallingConvention",
|
|
.address_space => "AddressSpace",
|
|
.float_mode => "FloatMode",
|
|
.reduce_op => "ReduceOp",
|
|
.call_modifier => "CallModifer",
|
|
.prefetch_options => "PrefetchOptions",
|
|
.export_options => "ExportOptions",
|
|
.extern_options => "ExternOptions",
|
|
.type_info => "Type",
|
|
else => return,
|
|
};
|
|
// This will fully resolve the type.
|
|
_ = try sema.getBuiltinType(builtin_type_name);
|
|
}
|
|
|
|
fn resolveTypeFieldsStruct(
|
|
sema: *Sema,
|
|
ty: Type,
|
|
struct_obj: *Module.Struct,
|
|
) CompileError!void {
|
|
switch (sema.mod.declPtr(struct_obj.owner_decl).analysis) {
|
|
.file_failure,
|
|
.dependency_failure,
|
|
.sema_failure,
|
|
.sema_failure_retryable,
|
|
=> {
|
|
sema.owner_decl.analysis = .dependency_failure;
|
|
sema.owner_decl.generation = sema.mod.generation;
|
|
return error.AnalysisFail;
|
|
},
|
|
else => {},
|
|
}
|
|
switch (struct_obj.status) {
|
|
.none => {},
|
|
.field_types_wip => {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
struct_obj.srcLoc(sema.mod),
|
|
"struct '{}' depends on itself",
|
|
.{ty.fmt(sema.mod)},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
.have_field_types,
|
|
.have_layout,
|
|
.layout_wip,
|
|
.fully_resolved_wip,
|
|
.fully_resolved,
|
|
=> return,
|
|
}
|
|
|
|
struct_obj.status = .field_types_wip;
|
|
errdefer struct_obj.status = .none;
|
|
try semaStructFields(sema.mod, struct_obj);
|
|
}
|
|
|
|
fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_obj: *Module.Union) CompileError!void {
|
|
switch (sema.mod.declPtr(union_obj.owner_decl).analysis) {
|
|
.file_failure,
|
|
.dependency_failure,
|
|
.sema_failure,
|
|
.sema_failure_retryable,
|
|
=> {
|
|
sema.owner_decl.analysis = .dependency_failure;
|
|
sema.owner_decl.generation = sema.mod.generation;
|
|
return error.AnalysisFail;
|
|
},
|
|
else => {},
|
|
}
|
|
switch (union_obj.status) {
|
|
.none => {},
|
|
.field_types_wip => {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
union_obj.srcLoc(sema.mod),
|
|
"union '{}' depends on itself",
|
|
.{ty.fmt(sema.mod)},
|
|
);
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
},
|
|
.have_field_types,
|
|
.have_layout,
|
|
.layout_wip,
|
|
.fully_resolved_wip,
|
|
.fully_resolved,
|
|
=> return,
|
|
}
|
|
|
|
union_obj.status = .field_types_wip;
|
|
errdefer union_obj.status = .none;
|
|
try semaUnionFields(sema.mod, union_obj);
|
|
union_obj.status = .have_field_types;
|
|
}
|
|
|
|
/// Returns a normal error set corresponding to the fully populated inferred
|
|
/// error set.
|
|
fn resolveInferredErrorSet(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ies_index: InternPool.Index,
|
|
) CompileError!InternPool.Index {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const func_index = ip.iesFuncIndex(ies_index);
|
|
const func = mod.funcInfo(func_index);
|
|
const resolved_ty = func.resolvedErrorSet(ip).*;
|
|
if (resolved_ty != .none) return resolved_ty;
|
|
if (func.analysis(ip).state == .in_progress)
|
|
return sema.fail(block, src, "unable to resolve inferred error set", .{});
|
|
|
|
// In order to ensure that all dependencies are properly added to the set,
|
|
// we need to ensure the function body is analyzed of the inferred error
|
|
// set. However, in the case of comptime/inline function calls with
|
|
// inferred error sets, each call gets an adhoc InferredErrorSet object, which
|
|
// has no corresponding function body.
|
|
const ies_func_owner_decl = mod.declPtr(func.owner_decl);
|
|
const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?;
|
|
// if ies declared by a inline function with generic return type, the return_type should be generic_poison,
|
|
// because inline function does not create a new declaration, and the ies has been filled with analyzeCall,
|
|
// so here we can simply skip this case.
|
|
if (ies_func_info.return_type == .generic_poison_type) {
|
|
assert(ies_func_info.cc == .Inline);
|
|
} else if (ip.errorUnionSet(ies_func_info.return_type) == ies_index) {
|
|
if (ies_func_info.is_generic) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.mod.errNoteNonLazy(ies_func_owner_decl.srcLoc(mod), msg, "generic function declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
// In this case we are dealing with the actual InferredErrorSet object that
|
|
// corresponds to the function, not one created to track an inline/comptime call.
|
|
try sema.ensureFuncBodyAnalyzed(func_index);
|
|
}
|
|
|
|
// This will now have been resolved by the logic at the end of `Module.analyzeFnBody`
|
|
// which calls `resolveInferredErrorSetPtr`.
|
|
const final_resolved_ty = func.resolvedErrorSet(ip).*;
|
|
assert(final_resolved_ty != .none);
|
|
return final_resolved_ty;
|
|
}
|
|
|
|
pub fn resolveInferredErrorSetPtr(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ies: *InferredErrorSet,
|
|
) CompileError!void {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
|
|
if (ies.resolved != .none) return;
|
|
|
|
const ies_index = ip.errorUnionSet(sema.fn_ret_ty.toIntern());
|
|
|
|
for (ies.inferred_error_sets.keys()) |other_ies_index| {
|
|
if (ies_index == other_ies_index) continue;
|
|
switch (try sema.resolveInferredErrorSet(block, src, other_ies_index)) {
|
|
.anyerror_type => {
|
|
ies.resolved = .anyerror_type;
|
|
return;
|
|
},
|
|
else => |error_set_ty_index| {
|
|
const names = ip.indexToKey(error_set_ty_index).error_set_type.names;
|
|
for (names.get(ip)) |name| {
|
|
try ies.errors.put(sema.arena, name, {});
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
const resolved_error_set_ty = try mod.errorSetFromUnsortedNames(ies.errors.keys());
|
|
ies.resolved = resolved_error_set_ty.toIntern();
|
|
}
|
|
|
|
fn resolveAdHocInferredErrorSet(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
value: InternPool.Index,
|
|
) CompileError!InternPool.Index {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const new_ty = try resolveAdHocInferredErrorSetTy(sema, block, src, ip.typeOf(value));
|
|
if (new_ty == .none) return value;
|
|
return ip.getCoerced(gpa, value, new_ty);
|
|
}
|
|
|
|
fn resolveAdHocInferredErrorSetTy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ty: InternPool.Index,
|
|
) CompileError!InternPool.Index {
|
|
const ies = sema.fn_ret_ty_ies orelse return .none;
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const error_union_info = switch (ip.indexToKey(ty)) {
|
|
.error_union_type => |x| x,
|
|
else => return .none,
|
|
};
|
|
if (error_union_info.error_set_type != .adhoc_inferred_error_set_type)
|
|
return .none;
|
|
|
|
try sema.resolveInferredErrorSetPtr(block, src, ies);
|
|
const new_ty = try ip.get(gpa, .{ .error_union_type = .{
|
|
.error_set_type = ies.resolved,
|
|
.payload_type = error_union_info.payload_type,
|
|
} });
|
|
return new_ty;
|
|
}
|
|
|
|
fn resolveInferredErrorSetTy(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
ty: InternPool.Index,
|
|
) CompileError!InternPool.Index {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
if (ty == .anyerror_type) return ty;
|
|
switch (ip.indexToKey(ty)) {
|
|
.error_set_type => return ty,
|
|
.inferred_error_set_type => return sema.resolveInferredErrorSet(block, src, ty),
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
|
|
const gpa = mod.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const decl_index = struct_obj.owner_decl;
|
|
const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir;
|
|
const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
|
|
assert(extended.opcode == .struct_decl);
|
|
const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
|
|
var extra_index: usize = extended.operand;
|
|
|
|
const src = LazySrcLoc.nodeOffset(0);
|
|
extra_index += @intFromBool(small.has_src_node);
|
|
|
|
const fields_len = if (small.has_fields_len) blk: {
|
|
const fields_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk fields_len;
|
|
} else 0;
|
|
|
|
const decls_len = if (small.has_decls_len) decls_len: {
|
|
const decls_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
break :decls_len decls_len;
|
|
} else 0;
|
|
|
|
// The backing integer cannot be handled until `resolveStructLayout()`.
|
|
if (small.has_backing_int) {
|
|
const backing_int_body_len = zir.extra[extra_index];
|
|
extra_index += 1; // backing_int_body_len
|
|
if (backing_int_body_len == 0) {
|
|
extra_index += 1; // backing_int_ref
|
|
} else {
|
|
extra_index += backing_int_body_len; // backing_int_body_inst
|
|
}
|
|
}
|
|
|
|
// Skip over decls.
|
|
var decls_it = zir.declIteratorInner(extra_index, decls_len);
|
|
while (decls_it.next()) |_| {}
|
|
extra_index = decls_it.extra_index;
|
|
|
|
if (fields_len == 0) {
|
|
if (struct_obj.layout == .Packed) {
|
|
try semaBackingIntType(mod, struct_obj);
|
|
}
|
|
struct_obj.status = .have_layout;
|
|
return;
|
|
}
|
|
|
|
const decl = mod.declPtr(decl_index);
|
|
|
|
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
|
|
defer analysis_arena.deinit();
|
|
|
|
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
|
|
defer comptime_mutable_decls.deinit();
|
|
|
|
var sema: Sema = .{
|
|
.mod = mod,
|
|
.gpa = gpa,
|
|
.arena = analysis_arena.allocator(),
|
|
.code = zir,
|
|
.owner_decl = decl,
|
|
.owner_decl_index = decl_index,
|
|
.func_index = .none,
|
|
.func_is_naked = false,
|
|
.fn_ret_ty = Type.void,
|
|
.fn_ret_ty_ies = null,
|
|
.owner_func_index = .none,
|
|
.comptime_mutable_decls = &comptime_mutable_decls,
|
|
};
|
|
defer sema.deinit();
|
|
|
|
var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope);
|
|
defer wip_captures.deinit();
|
|
|
|
var block_scope: Block = .{
|
|
.parent = null,
|
|
.sema = &sema,
|
|
.src_decl = decl_index,
|
|
.namespace = struct_obj.namespace,
|
|
.wip_capture_scope = wip_captures.scope,
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer assert(block_scope.instructions.items.len == 0);
|
|
|
|
struct_obj.fields = .{};
|
|
try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len);
|
|
|
|
const Field = struct {
|
|
type_body_len: u32 = 0,
|
|
align_body_len: u32 = 0,
|
|
init_body_len: u32 = 0,
|
|
type_ref: Zir.Inst.Ref = .none,
|
|
};
|
|
const fields = try sema.arena.alloc(Field, fields_len);
|
|
var any_inits = false;
|
|
|
|
{
|
|
const bits_per_field = 4;
|
|
const fields_per_u32 = 32 / bits_per_field;
|
|
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
|
|
const flags_index = extra_index;
|
|
var bit_bag_index: usize = flags_index;
|
|
extra_index += bit_bags_count;
|
|
var cur_bit_bag: u32 = undefined;
|
|
var field_i: u32 = 0;
|
|
while (field_i < fields_len) : (field_i += 1) {
|
|
if (field_i % fields_per_u32 == 0) {
|
|
cur_bit_bag = zir.extra[bit_bag_index];
|
|
bit_bag_index += 1;
|
|
}
|
|
const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const has_init = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const is_comptime = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
|
|
var field_name_zir: ?[:0]const u8 = null;
|
|
if (!small.is_tuple) {
|
|
field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]);
|
|
extra_index += 1;
|
|
}
|
|
extra_index += 1; // doc_comment
|
|
|
|
fields[field_i] = .{};
|
|
|
|
if (has_type_body) {
|
|
fields[field_i].type_body_len = zir.extra[extra_index];
|
|
} else {
|
|
fields[field_i].type_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
|
|
}
|
|
extra_index += 1;
|
|
|
|
// This string needs to outlive the ZIR code.
|
|
const field_name = try ip.getOrPutString(gpa, if (field_name_zir) |s|
|
|
s
|
|
else
|
|
try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}));
|
|
|
|
const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
|
|
if (gop.found_existing) {
|
|
const msg = msg: {
|
|
const field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i }).lazy;
|
|
const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{}'", .{field_name.fmt(ip)});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const prev_field_index = struct_obj.fields.getIndex(field_name).?;
|
|
const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index });
|
|
try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{});
|
|
try sema.errNote(&block_scope, src, msg, "struct declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
gop.value_ptr.* = .{
|
|
.ty = Type.noreturn,
|
|
.abi_align = .none,
|
|
.default_val = .none,
|
|
.is_comptime = is_comptime,
|
|
.offset = undefined,
|
|
};
|
|
|
|
if (has_align) {
|
|
fields[field_i].align_body_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
}
|
|
if (has_init) {
|
|
fields[field_i].init_body_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
any_inits = true;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Next we do only types and alignments, saving the inits for a second pass,
|
|
// so that init values may depend on type layout.
|
|
const bodies_index = extra_index;
|
|
|
|
for (fields, 0..) |zir_field, field_i| {
|
|
const field_ty: Type = ty: {
|
|
if (zir_field.type_ref != .none) {
|
|
break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
_ = try sema.resolveType(&block_scope, ty_src, zir_field.type_ref);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
assert(zir_field.type_body_len != 0);
|
|
const body = zir.extra[extra_index..][0..zir_field.type_body_len];
|
|
extra_index += body.len;
|
|
const ty_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index);
|
|
break :ty sema.analyzeAsType(&block_scope, .unneeded, ty_ref) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
_ = try sema.analyzeAsType(&block_scope, ty_src, ty_ref);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
};
|
|
if (field_ty.isGenericPoison()) {
|
|
return error.GenericPoison;
|
|
}
|
|
|
|
const field = &struct_obj.fields.values()[field_i];
|
|
field.ty = field_ty;
|
|
|
|
if (field_ty.zigTypeTag(mod) == .Opaque) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
const msg = try sema.errMsg(&block_scope, ty_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (field_ty.zigTypeTag(mod) == .NoReturn) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
const msg = try sema.errMsg(&block_scope, ty_src, "struct fields cannot be 'noreturn'", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (struct_obj.layout == .Extern and !try sema.validateExternType(field.ty, .struct_field)) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
});
|
|
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.explainWhyTypeIsNotExtern(msg, ty_src, field.ty, .struct_field);
|
|
|
|
try sema.addDeclaredHereNote(msg, field.ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
} else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
});
|
|
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.explainWhyTypeIsNotPacked(msg, ty_src, field.ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, field.ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (zir_field.align_body_len > 0) {
|
|
const body = zir.extra[extra_index..][0..zir_field.align_body_len];
|
|
extra_index += body.len;
|
|
const align_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index);
|
|
field.abi_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const align_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .alignment,
|
|
}).lazy;
|
|
_ = try sema.analyzeAsAlign(&block_scope, align_src, align_ref);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
extra_index += zir_field.init_body_len;
|
|
}
|
|
|
|
struct_obj.status = .have_field_types;
|
|
|
|
if (any_inits) {
|
|
extra_index = bodies_index;
|
|
for (fields, 0..) |zir_field, field_i| {
|
|
extra_index += zir_field.type_body_len;
|
|
extra_index += zir_field.align_body_len;
|
|
if (zir_field.init_body_len > 0) {
|
|
const body = zir.extra[extra_index..][0..zir_field.init_body_len];
|
|
extra_index += body.len;
|
|
const init = try sema.resolveBody(&block_scope, body, struct_obj.zir_index);
|
|
const field = &struct_obj.fields.values()[field_i];
|
|
const coerced = sema.coerce(&block_scope, field.ty, init, .unneeded) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .value,
|
|
}).lazy;
|
|
_ = try sema.coerce(&block_scope, field.ty, init, init_src);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
const default_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
|
|
const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .value,
|
|
}).lazy;
|
|
return sema.failWithNeededComptime(&block_scope, init_src, "struct field default value must be comptime-known");
|
|
};
|
|
field.default_val = try default_val.intern(field.ty, mod);
|
|
}
|
|
}
|
|
}
|
|
try wip_captures.finalize();
|
|
for (comptime_mutable_decls.items) |ct_decl_index| {
|
|
const ct_decl = mod.declPtr(ct_decl_index);
|
|
_ = try ct_decl.internValue(mod);
|
|
}
|
|
|
|
struct_obj.have_field_inits = true;
|
|
}
|
|
|
|
fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const gpa = mod.gpa;
|
|
const ip = &mod.intern_pool;
|
|
const decl_index = union_obj.owner_decl;
|
|
const zir = mod.namespacePtr(union_obj.namespace).file_scope.zir;
|
|
const extended = zir.instructions.items(.data)[union_obj.zir_index].extended;
|
|
assert(extended.opcode == .union_decl);
|
|
const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small));
|
|
var extra_index: usize = extended.operand;
|
|
|
|
const src = LazySrcLoc.nodeOffset(0);
|
|
extra_index += @intFromBool(small.has_src_node);
|
|
|
|
const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: {
|
|
const ty_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
|
|
extra_index += 1;
|
|
break :blk ty_ref;
|
|
} else .none;
|
|
|
|
const body_len = if (small.has_body_len) blk: {
|
|
const body_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk body_len;
|
|
} else 0;
|
|
|
|
const fields_len = if (small.has_fields_len) blk: {
|
|
const fields_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
break :blk fields_len;
|
|
} else 0;
|
|
|
|
const decls_len = if (small.has_decls_len) decls_len: {
|
|
const decls_len = zir.extra[extra_index];
|
|
extra_index += 1;
|
|
break :decls_len decls_len;
|
|
} else 0;
|
|
|
|
// Skip over decls.
|
|
var decls_it = zir.declIteratorInner(extra_index, decls_len);
|
|
while (decls_it.next()) |_| {}
|
|
extra_index = decls_it.extra_index;
|
|
|
|
const body = zir.extra[extra_index..][0..body_len];
|
|
extra_index += body.len;
|
|
|
|
const decl = mod.declPtr(decl_index);
|
|
|
|
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
|
|
defer analysis_arena.deinit();
|
|
|
|
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
|
|
defer comptime_mutable_decls.deinit();
|
|
|
|
var sema: Sema = .{
|
|
.mod = mod,
|
|
.gpa = gpa,
|
|
.arena = analysis_arena.allocator(),
|
|
.code = zir,
|
|
.owner_decl = decl,
|
|
.owner_decl_index = decl_index,
|
|
.func_index = .none,
|
|
.func_is_naked = false,
|
|
.fn_ret_ty = Type.void,
|
|
.fn_ret_ty_ies = null,
|
|
.owner_func_index = .none,
|
|
.comptime_mutable_decls = &comptime_mutable_decls,
|
|
};
|
|
defer sema.deinit();
|
|
|
|
var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope);
|
|
defer wip_captures.deinit();
|
|
|
|
var block_scope: Block = .{
|
|
.parent = null,
|
|
.sema = &sema,
|
|
.src_decl = decl_index,
|
|
.namespace = union_obj.namespace,
|
|
.wip_capture_scope = wip_captures.scope,
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer assert(block_scope.instructions.items.len == 0);
|
|
|
|
if (body.len != 0) {
|
|
try sema.analyzeBody(&block_scope, body);
|
|
}
|
|
|
|
try wip_captures.finalize();
|
|
for (comptime_mutable_decls.items) |ct_decl_index| {
|
|
const ct_decl = mod.declPtr(ct_decl_index);
|
|
_ = try ct_decl.internValue(mod);
|
|
}
|
|
|
|
try union_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len);
|
|
|
|
var int_tag_ty: Type = undefined;
|
|
var enum_field_names: []InternPool.NullTerminatedString = &.{};
|
|
var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{};
|
|
var explicit_tags_seen: []bool = &.{};
|
|
if (tag_type_ref != .none) {
|
|
const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x };
|
|
const provided_ty = try sema.resolveType(&block_scope, tag_ty_src, tag_type_ref);
|
|
if (small.auto_enum_tag) {
|
|
// The provided type is an integer type and we must construct the enum tag type here.
|
|
int_tag_ty = provided_ty;
|
|
if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) {
|
|
return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(mod)});
|
|
}
|
|
|
|
if (fields_len > 0) {
|
|
const field_count_val = try mod.intValue(Type.comptime_int, fields_len - 1);
|
|
if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.errNote(&block_scope, tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{
|
|
int_tag_ty.fmt(mod),
|
|
fields_len - 1,
|
|
});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
|
|
try enum_field_vals.ensureTotalCapacity(sema.arena, fields_len);
|
|
}
|
|
} else {
|
|
// The provided type is the enum tag type.
|
|
union_obj.tag_ty = provided_ty;
|
|
const enum_type = switch (ip.indexToKey(union_obj.tag_ty.toIntern())) {
|
|
.enum_type => |x| x,
|
|
else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}),
|
|
};
|
|
// The fields of the union must match the enum exactly.
|
|
// A flag per field is used to check for missing and extraneous fields.
|
|
explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len);
|
|
@memset(explicit_tags_seen, false);
|
|
}
|
|
} else {
|
|
// If auto_enum_tag is false, this is an untagged union. However, for semantic analysis
|
|
// purposes, we still auto-generate an enum tag type the same way. That the union is
|
|
// untagged is represented by the Type tag (union vs union_tagged).
|
|
enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
|
|
}
|
|
|
|
const bits_per_field = 4;
|
|
const fields_per_u32 = 32 / bits_per_field;
|
|
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
|
|
var bit_bag_index: usize = extra_index;
|
|
extra_index += bit_bags_count;
|
|
var cur_bit_bag: u32 = undefined;
|
|
var field_i: u32 = 0;
|
|
var last_tag_val: ?Value = null;
|
|
while (field_i < fields_len) : (field_i += 1) {
|
|
if (field_i % fields_per_u32 == 0) {
|
|
cur_bit_bag = zir.extra[bit_bag_index];
|
|
bit_bag_index += 1;
|
|
}
|
|
const has_type = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const has_tag = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
const unused = @as(u1, @truncate(cur_bit_bag)) != 0;
|
|
cur_bit_bag >>= 1;
|
|
_ = unused;
|
|
|
|
const field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]);
|
|
extra_index += 1;
|
|
|
|
// doc_comment
|
|
extra_index += 1;
|
|
|
|
const field_type_ref: Zir.Inst.Ref = if (has_type) blk: {
|
|
const field_type_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
|
|
extra_index += 1;
|
|
break :blk field_type_ref;
|
|
} else .none;
|
|
|
|
const align_ref: Zir.Inst.Ref = if (has_align) blk: {
|
|
const align_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
|
|
extra_index += 1;
|
|
break :blk align_ref;
|
|
} else .none;
|
|
|
|
const tag_ref: Air.Inst.Ref = if (has_tag) blk: {
|
|
const tag_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
|
|
extra_index += 1;
|
|
break :blk try sema.resolveInst(tag_ref);
|
|
} else .none;
|
|
|
|
if (enum_field_vals.capacity() > 0) {
|
|
const enum_tag_val = if (tag_ref != .none) blk: {
|
|
const val = sema.semaUnionFieldVal(&block_scope, .unneeded, int_tag_ty, tag_ref) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const val_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .value,
|
|
}).lazy;
|
|
_ = try sema.semaUnionFieldVal(&block_scope, val_src, int_tag_ty, tag_ref);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
last_tag_val = val;
|
|
|
|
break :blk val;
|
|
} else blk: {
|
|
const val = if (last_tag_val) |val|
|
|
try sema.intAdd(val, Value.one_comptime_int, int_tag_ty, undefined)
|
|
else
|
|
try mod.intValue(int_tag_ty, 0);
|
|
last_tag_val = val;
|
|
|
|
break :blk val;
|
|
};
|
|
const gop = enum_field_vals.getOrPutAssumeCapacity(enum_tag_val.toIntern());
|
|
if (gop.found_existing) {
|
|
const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy;
|
|
const other_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = gop.index }).lazy;
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(int_tag_ty, mod)});
|
|
errdefer msg.destroy(gpa);
|
|
try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
}
|
|
|
|
// This string needs to outlive the ZIR code.
|
|
const field_name = try ip.getOrPutString(gpa, field_name_zir);
|
|
if (enum_field_names.len != 0) {
|
|
enum_field_names[field_i] = field_name;
|
|
}
|
|
|
|
const field_ty: Type = if (!has_type)
|
|
Type.void
|
|
else if (field_type_ref == .none)
|
|
Type.noreturn
|
|
else
|
|
sema.resolveType(&block_scope, .unneeded, field_type_ref) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
_ = try sema.resolveType(&block_scope, ty_src, field_type_ref);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (field_ty.isGenericPoison()) {
|
|
return error.GenericPoison;
|
|
}
|
|
|
|
const gop = union_obj.fields.getOrPutAssumeCapacity(field_name);
|
|
if (gop.found_existing) {
|
|
const msg = msg: {
|
|
const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy;
|
|
const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{}'", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
errdefer msg.destroy(gpa);
|
|
|
|
const prev_field_index = union_obj.fields.getIndex(field_name).?;
|
|
const prev_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = prev_field_index }).lazy;
|
|
try mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{});
|
|
try sema.errNote(&block_scope, src, msg, "union declared here", .{});
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
if (explicit_tags_seen.len > 0) {
|
|
const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type;
|
|
const enum_index = tag_info.nameIndex(ip, field_name) orelse {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{}' in enum '{}'", .{
|
|
field_name.fmt(ip), union_obj.tag_ty.fmt(mod),
|
|
});
|
|
errdefer msg.destroy(sema.gpa);
|
|
try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
};
|
|
// No check for duplicate because the check already happened in order
|
|
// to create the enum type in the first place.
|
|
assert(!explicit_tags_seen[enum_index]);
|
|
explicit_tags_seen[enum_index] = true;
|
|
}
|
|
|
|
if (field_ty.zigTypeTag(mod) == .Opaque) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
}).lazy;
|
|
const msg = try sema.errMsg(&block_scope, ty_src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
});
|
|
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .union_field);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
} else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) {
|
|
const msg = msg: {
|
|
const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .type,
|
|
});
|
|
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty);
|
|
|
|
try sema.addDeclaredHereNote(msg, field_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
|
|
gop.value_ptr.* = .{
|
|
.ty = field_ty,
|
|
.abi_align = .none,
|
|
};
|
|
|
|
if (align_ref != .none) {
|
|
gop.value_ptr.abi_align = sema.resolveAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) {
|
|
error.NeededSourceLocation => {
|
|
const align_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
|
|
.index = field_i,
|
|
.range = .alignment,
|
|
}).lazy;
|
|
_ = try sema.resolveAlign(&block_scope, align_src, align_ref);
|
|
unreachable;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
} else {
|
|
gop.value_ptr.abi_align = .none;
|
|
}
|
|
}
|
|
|
|
if (explicit_tags_seen.len > 0) {
|
|
const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type;
|
|
if (tag_info.names.len > fields_len) {
|
|
const msg = msg: {
|
|
const msg = try sema.errMsg(&block_scope, src, "enum field(s) missing in union", .{});
|
|
errdefer msg.destroy(sema.gpa);
|
|
|
|
const enum_ty = union_obj.tag_ty;
|
|
for (tag_info.names.get(ip), 0..) |field_name, field_index| {
|
|
if (explicit_tags_seen[field_index]) continue;
|
|
try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{}' missing, declared here", .{
|
|
field_name.fmt(ip),
|
|
});
|
|
}
|
|
try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
|
|
break :msg msg;
|
|
};
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
} else if (enum_field_vals.count() > 0) {
|
|
union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), union_obj);
|
|
} else {
|
|
union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, union_obj);
|
|
}
|
|
}
|
|
|
|
fn semaUnionFieldVal(sema: *Sema, block: *Block, src: LazySrcLoc, int_tag_ty: Type, tag_ref: Air.Inst.Ref) CompileError!Value {
|
|
const coerced = try sema.coerce(block, int_tag_ty, tag_ref, src);
|
|
return sema.resolveConstValue(block, src, coerced, "enum tag value must be comptime-known");
|
|
}
|
|
|
|
fn generateUnionTagTypeNumbered(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
enum_field_names: []const InternPool.NullTerminatedString,
|
|
enum_field_vals: []const InternPool.Index,
|
|
union_obj: *Module.Union,
|
|
) !Type {
|
|
const mod = sema.mod;
|
|
const gpa = sema.gpa;
|
|
const ip = &mod.intern_pool;
|
|
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope);
|
|
errdefer mod.destroyDecl(new_decl_index);
|
|
const fqn = try union_obj.getFullyQualifiedName(mod);
|
|
const name = try ip.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)});
|
|
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, name);
|
|
errdefer mod.abortAnonDecl(new_decl_index);
|
|
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.name_fully_qualified = true;
|
|
new_decl.owns_tv = true;
|
|
new_decl.name_fully_qualified = true;
|
|
|
|
const enum_ty = try ip.getEnum(gpa, .{
|
|
.decl = new_decl_index,
|
|
.namespace = .none,
|
|
.tag_ty = if (enum_field_vals.len == 0)
|
|
(try mod.intType(.unsigned, 0)).toIntern()
|
|
else
|
|
ip.typeOf(enum_field_vals[0]),
|
|
.names = enum_field_names,
|
|
.values = enum_field_vals,
|
|
.tag_mode = .explicit,
|
|
});
|
|
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = enum_ty.toValue();
|
|
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return enum_ty.toType();
|
|
}
|
|
|
|
fn generateUnionTagTypeSimple(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
enum_field_names: []const InternPool.NullTerminatedString,
|
|
maybe_union_obj: ?*Module.Union,
|
|
) !Type {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const gpa = sema.gpa;
|
|
|
|
const new_decl_index = new_decl_index: {
|
|
const union_obj = maybe_union_obj orelse {
|
|
break :new_decl_index try mod.createAnonymousDecl(block, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
});
|
|
};
|
|
const src_decl = mod.declPtr(block.src_decl);
|
|
const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope);
|
|
errdefer mod.destroyDecl(new_decl_index);
|
|
const fqn = try union_obj.getFullyQualifiedName(mod);
|
|
const name = try ip.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)});
|
|
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{
|
|
.ty = Type.noreturn,
|
|
.val = Value.@"unreachable",
|
|
}, name);
|
|
mod.declPtr(new_decl_index).name_fully_qualified = true;
|
|
break :new_decl_index new_decl_index;
|
|
};
|
|
errdefer mod.abortAnonDecl(new_decl_index);
|
|
|
|
const enum_ty = try ip.getEnum(gpa, .{
|
|
.decl = new_decl_index,
|
|
.namespace = .none,
|
|
.tag_ty = if (enum_field_names.len == 0)
|
|
(try mod.intType(.unsigned, 0)).toIntern()
|
|
else
|
|
(try mod.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(),
|
|
.names = enum_field_names,
|
|
.values = &.{},
|
|
.tag_mode = .auto,
|
|
});
|
|
|
|
const new_decl = mod.declPtr(new_decl_index);
|
|
new_decl.owns_tv = true;
|
|
new_decl.ty = Type.type;
|
|
new_decl.val = enum_ty.toValue();
|
|
|
|
try mod.finalizeAnonDecl(new_decl_index);
|
|
return enum_ty.toType();
|
|
}
|
|
|
|
fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref {
|
|
const gpa = sema.gpa;
|
|
const src = LazySrcLoc.nodeOffset(0);
|
|
|
|
var wip_captures = try WipCaptureScope.init(gpa, sema.owner_decl.src_scope);
|
|
defer wip_captures.deinit();
|
|
|
|
var block: Block = .{
|
|
.parent = null,
|
|
.sema = sema,
|
|
.src_decl = sema.owner_decl_index,
|
|
.namespace = sema.owner_decl.src_namespace,
|
|
.wip_capture_scope = wip_captures.scope,
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer block.instructions.deinit(gpa);
|
|
|
|
const decl_index = try getBuiltinDecl(sema, &block, name);
|
|
return sema.analyzeDeclVal(&block, src, decl_index);
|
|
}
|
|
|
|
fn getBuiltinDecl(sema: *Sema, block: *Block, name: []const u8) CompileError!Module.Decl.Index {
|
|
const gpa = sema.gpa;
|
|
|
|
const src = LazySrcLoc.nodeOffset(0);
|
|
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
const std_pkg = mod.main_pkg.table.get("std").?;
|
|
const std_file = (mod.importPkg(std_pkg) catch unreachable).file;
|
|
const opt_builtin_inst = (try sema.namespaceLookupRef(
|
|
block,
|
|
src,
|
|
mod.declPtr(std_file.root_decl.unwrap().?).src_namespace,
|
|
try ip.getOrPutString(gpa, "builtin"),
|
|
)) orelse @panic("lib/std.zig is corrupt and missing 'builtin'");
|
|
const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst, src);
|
|
const builtin_ty = sema.analyzeAsType(block, src, builtin_inst) catch |err| switch (err) {
|
|
error.AnalysisFail => std.debug.panic("std.builtin is corrupt", .{}),
|
|
else => |e| return e,
|
|
};
|
|
const decl_index = (try sema.namespaceLookup(
|
|
block,
|
|
src,
|
|
builtin_ty.getNamespaceIndex(mod).unwrap().?,
|
|
try ip.getOrPutString(gpa, name),
|
|
)) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name});
|
|
return decl_index;
|
|
}
|
|
|
|
fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type {
|
|
const ty_inst = try sema.getBuiltin(name);
|
|
|
|
var wip_captures = try WipCaptureScope.init(sema.gpa, sema.owner_decl.src_scope);
|
|
defer wip_captures.deinit();
|
|
|
|
var block: Block = .{
|
|
.parent = null,
|
|
.sema = sema,
|
|
.src_decl = sema.owner_decl_index,
|
|
.namespace = sema.owner_decl.src_namespace,
|
|
.wip_capture_scope = wip_captures.scope,
|
|
.instructions = .{},
|
|
.inlining = null,
|
|
.is_comptime = true,
|
|
};
|
|
defer block.instructions.deinit(sema.gpa);
|
|
const src = LazySrcLoc.nodeOffset(0);
|
|
|
|
const result_ty = sema.analyzeAsType(&block, src, ty_inst) catch |err| switch (err) {
|
|
error.AnalysisFail => std.debug.panic("std.builtin.{s} is corrupt", .{name}),
|
|
else => |e| return e,
|
|
};
|
|
try sema.resolveTypeFully(result_ty); // Should not fail
|
|
return result_ty;
|
|
}
|
|
|
|
/// There is another implementation of this in `Type.onePossibleValue`. This one
|
|
/// in `Sema` is for calling during semantic analysis, and performs field resolution
|
|
/// to get the answer. The one in `Type` is for calling during codegen and asserts
|
|
/// that the types are already resolved.
|
|
/// TODO assert the return value matches `ty.onePossibleValue`
|
|
pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
|
|
const mod = sema.mod;
|
|
const ip = &mod.intern_pool;
|
|
return switch (ty.toIntern()) {
|
|
.u0_type,
|
|
.i0_type,
|
|
=> try mod.intValue(ty, 0),
|
|
.u1_type,
|
|
.u8_type,
|
|
.i8_type,
|
|
.u16_type,
|
|
.i16_type,
|
|
.u29_type,
|
|
.u32_type,
|
|
.i32_type,
|
|
.u64_type,
|
|
.i64_type,
|
|
.u80_type,
|
|
.u128_type,
|
|
.i128_type,
|
|
.usize_type,
|
|
.isize_type,
|
|
.c_char_type,
|
|
.c_short_type,
|
|
.c_ushort_type,
|
|
.c_int_type,
|
|
.c_uint_type,
|
|
.c_long_type,
|
|
.c_ulong_type,
|
|
.c_longlong_type,
|
|
.c_ulonglong_type,
|
|
.c_longdouble_type,
|
|
.f16_type,
|
|
.f32_type,
|
|
.f64_type,
|
|
.f80_type,
|
|
.f128_type,
|
|
.anyopaque_type,
|
|
.bool_type,
|
|
.type_type,
|
|
.anyerror_type,
|
|
.adhoc_inferred_error_set_type,
|
|
.comptime_int_type,
|
|
.comptime_float_type,
|
|
.enum_literal_type,
|
|
.atomic_order_type,
|
|
.atomic_rmw_op_type,
|
|
.calling_convention_type,
|
|
.address_space_type,
|
|
.float_mode_type,
|
|
.reduce_op_type,
|
|
.call_modifier_type,
|
|
.prefetch_options_type,
|
|
.export_options_type,
|
|
.extern_options_type,
|
|
.type_info_type,
|
|
.manyptr_u8_type,
|
|
.manyptr_const_u8_type,
|
|
.manyptr_const_u8_sentinel_0_type,
|
|
.single_const_pointer_to_comptime_int_type,
|
|
.slice_const_u8_type,
|
|
.slice_const_u8_sentinel_0_type,
|
|
.anyerror_void_error_union_type,
|
|
=> null,
|
|
.void_type => Value.void,
|
|
.noreturn_type => Value.@"unreachable",
|
|
.anyframe_type => unreachable,
|
|
.null_type => Value.null,
|
|
.undefined_type => Value.undef,
|
|
.optional_noreturn_type => try mod.nullValue(ty),
|
|
.generic_poison_type => error.GenericPoison,
|
|
.empty_struct_type => Value.empty_struct,
|
|
// values, not types
|
|
.undef,
|
|
.zero,
|
|
.zero_usize,
|
|
.zero_u8,
|
|
.one,
|
|
.one_usize,
|
|
.one_u8,
|
|
.four_u8,
|
|
.negative_one,
|
|
.calling_convention_c,
|
|
.calling_convention_inline,
|
|
.void_value,
|
|
.unreachable_value,
|
|
.null_value,
|
|
.bool_true,
|
|
.bool_false,
|
|
.empty_struct,
|
|
.generic_poison,
|
|
// invalid
|
|
.var_args_param_type,
|
|
.none,
|
|
=> unreachable,
|
|
|
|
_ => switch (ip.items.items(.tag)[@intFromEnum(ty.toIntern())]) {
|
|
.type_int_signed, // i0 handled above
|
|
.type_int_unsigned, // u0 handled above
|
|
.type_pointer,
|
|
.type_slice,
|
|
.type_optional, // ?noreturn handled above
|
|
.type_anyframe,
|
|
.type_error_union,
|
|
.type_anyerror_union,
|
|
.type_error_set,
|
|
.type_inferred_error_set,
|
|
.type_opaque,
|
|
.type_function,
|
|
=> null,
|
|
|
|
.simple_type, // handled above
|
|
// values, not types
|
|
.undef,
|
|
.runtime_value,
|
|
.simple_value,
|
|
.ptr_decl,
|
|
.ptr_mut_decl,
|
|
.ptr_comptime_field,
|
|
.ptr_int,
|
|
.ptr_eu_payload,
|
|
.ptr_opt_payload,
|
|
.ptr_elem,
|
|
.ptr_field,
|
|
.ptr_slice,
|
|
.opt_payload,
|
|
.opt_null,
|
|
.int_u8,
|
|
.int_u16,
|
|
.int_u32,
|
|
.int_i32,
|
|
.int_usize,
|
|
.int_comptime_int_u32,
|
|
.int_comptime_int_i32,
|
|
.int_small,
|
|
.int_positive,
|
|
.int_negative,
|
|
.int_lazy_align,
|
|
.int_lazy_size,
|
|
.error_set_error,
|
|
.error_union_error,
|
|
.error_union_payload,
|
|
.enum_literal,
|
|
.enum_tag,
|
|
.float_f16,
|
|
.float_f32,
|
|
.float_f64,
|
|
.float_f80,
|
|
.float_f128,
|
|
.float_c_longdouble_f80,
|
|
.float_c_longdouble_f128,
|
|
.float_comptime_float,
|
|
.variable,
|
|
.extern_func,
|
|
.func_decl,
|
|
.func_instance,
|
|
.func_coerced,
|
|
.only_possible_value,
|
|
.union_value,
|
|
.bytes,
|
|
.aggregate,
|
|
.repeated,
|
|
// memoized value, not types
|
|
.memoized_call,
|
|
=> unreachable,
|
|
|
|
.type_array_big,
|
|
.type_array_small,
|
|
.type_vector,
|
|
.type_enum_auto,
|
|
.type_enum_explicit,
|
|
.type_enum_nonexhaustive,
|
|
.type_struct,
|
|
.type_struct_ns,
|
|
.type_struct_anon,
|
|
.type_tuple_anon,
|
|
.type_union_tagged,
|
|
.type_union_untagged,
|
|
.type_union_safety,
|
|
=> switch (ip.indexToKey(ty.toIntern())) {
|
|
inline .array_type, .vector_type => |seq_type, seq_tag| {
|
|
const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none;
|
|
if (seq_type.len + @intFromBool(has_sentinel) == 0) return (try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = &.{} },
|
|
} })).toValue();
|
|
|
|
if (try sema.typeHasOnePossibleValue(seq_type.child.toType())) |opv| {
|
|
return (try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .repeated_elem = opv.toIntern() },
|
|
} })).toValue();
|
|
}
|
|
return null;
|
|
},
|
|
|
|
.struct_type => |struct_type| {
|
|
try sema.resolveTypeFields(ty);
|
|
if (mod.structPtrUnwrap(struct_type.index)) |s| {
|
|
const field_vals = try sema.arena.alloc(InternPool.Index, s.fields.count());
|
|
for (field_vals, s.fields.values(), 0..) |*field_val, field, i| {
|
|
if (field.is_comptime) {
|
|
field_val.* = field.default_val;
|
|
continue;
|
|
}
|
|
if (field.ty.eql(ty, sema.mod)) {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
s.srcLoc(sema.mod),
|
|
"struct '{}' depends on itself",
|
|
.{ty.fmt(sema.mod)},
|
|
);
|
|
try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
if (try sema.typeHasOnePossibleValue(field.ty)) |field_opv| {
|
|
field_val.* = try field_opv.intern(field.ty, mod);
|
|
} else return null;
|
|
}
|
|
|
|
// In this case the struct has no runtime-known fields and
|
|
// therefore has one possible value.
|
|
return (try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = field_vals },
|
|
} })).toValue();
|
|
}
|
|
|
|
// In this case the struct has no fields at all and
|
|
// therefore has one possible value.
|
|
return (try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = &.{} },
|
|
} })).toValue();
|
|
},
|
|
|
|
.anon_struct_type => |tuple| {
|
|
for (tuple.values) |val| {
|
|
if (val == .none) return null;
|
|
}
|
|
// In this case the struct has all comptime-known fields and
|
|
// therefore has one possible value.
|
|
// TODO: write something like getCoercedInts to avoid needing to dupe
|
|
return (try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = try sema.arena.dupe(InternPool.Index, tuple.values) },
|
|
} })).toValue();
|
|
},
|
|
|
|
.union_type => |union_type| {
|
|
try sema.resolveTypeFields(ty);
|
|
const union_obj = mod.unionPtr(union_type.index);
|
|
const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse
|
|
return null;
|
|
const fields = union_obj.fields.values();
|
|
if (fields.len == 0) {
|
|
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
|
|
return only.toValue();
|
|
}
|
|
const only_field = fields[0];
|
|
if (only_field.ty.eql(ty, sema.mod)) {
|
|
const msg = try Module.ErrorMsg.create(
|
|
sema.gpa,
|
|
union_obj.srcLoc(sema.mod),
|
|
"union '{}' depends on itself",
|
|
.{ty.fmt(sema.mod)},
|
|
);
|
|
try sema.addFieldErrNote(ty, 0, msg, "while checking this field", .{});
|
|
return sema.failWithOwnedErrorMsg(msg);
|
|
}
|
|
const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse
|
|
return null;
|
|
const only = try mod.intern(.{ .un = .{
|
|
.ty = ty.toIntern(),
|
|
.tag = tag_val.toIntern(),
|
|
.val = val_val.toIntern(),
|
|
} });
|
|
return only.toValue();
|
|
},
|
|
|
|
.enum_type => |enum_type| switch (enum_type.tag_mode) {
|
|
.nonexhaustive => {
|
|
if (enum_type.tag_ty == .comptime_int_type) return null;
|
|
|
|
if (try sema.typeHasOnePossibleValue(enum_type.tag_ty.toType())) |int_opv| {
|
|
const only = try mod.intern(.{ .enum_tag = .{
|
|
.ty = ty.toIntern(),
|
|
.int = int_opv.toIntern(),
|
|
} });
|
|
return only.toValue();
|
|
}
|
|
|
|
return null;
|
|
},
|
|
.auto, .explicit => {
|
|
if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null;
|
|
|
|
switch (enum_type.names.len) {
|
|
0 => {
|
|
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
|
|
return only.toValue();
|
|
},
|
|
1 => return try mod.getCoerced((if (enum_type.values.len == 0)
|
|
try mod.intern(.{ .int = .{
|
|
.ty = enum_type.tag_ty,
|
|
.storage = .{ .u64 = 0 },
|
|
} })
|
|
else
|
|
enum_type.values.get(ip)[0]).toValue(), ty),
|
|
else => return null,
|
|
}
|
|
},
|
|
},
|
|
|
|
else => unreachable,
|
|
},
|
|
},
|
|
};
|
|
}
|
|
|
|
/// Returns the type of the AIR instruction.
|
|
fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type {
|
|
return sema.getTmpAir().typeOf(inst, &sema.mod.intern_pool);
|
|
}
|
|
|
|
pub fn getTmpAir(sema: Sema) Air {
|
|
return .{
|
|
.instructions = sema.air_instructions.slice(),
|
|
.extra = sema.air_extra.items,
|
|
};
|
|
}
|
|
|
|
pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 {
|
|
const fields = std.meta.fields(@TypeOf(extra));
|
|
try sema.air_extra.ensureUnusedCapacity(sema.gpa, fields.len);
|
|
return sema.addExtraAssumeCapacity(extra);
|
|
}
|
|
|
|
pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 {
|
|
const fields = std.meta.fields(@TypeOf(extra));
|
|
const result = @as(u32, @intCast(sema.air_extra.items.len));
|
|
inline for (fields) |field| {
|
|
sema.air_extra.appendAssumeCapacity(switch (field.type) {
|
|
u32 => @field(extra, field.name),
|
|
Air.Inst.Ref => @intFromEnum(@field(extra, field.name)),
|
|
i32 => @as(u32, @bitCast(@field(extra, field.name))),
|
|
InternPool.Index => @intFromEnum(@field(extra, field.name)),
|
|
else => @compileError("bad field type: " ++ @typeName(field.type)),
|
|
});
|
|
}
|
|
return result;
|
|
}
|
|
|
|
fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void {
|
|
const coerced = @as([]const u32, @ptrCast(refs));
|
|
sema.air_extra.appendSliceAssumeCapacity(coerced);
|
|
}
|
|
|
|
fn getBreakBlock(sema: *Sema, inst_index: Air.Inst.Index) ?Air.Inst.Index {
|
|
const air_datas = sema.air_instructions.items(.data);
|
|
const air_tags = sema.air_instructions.items(.tag);
|
|
switch (air_tags[inst_index]) {
|
|
.br => return air_datas[inst_index].br.block_inst,
|
|
else => return null,
|
|
}
|
|
}
|
|
|
|
fn isComptimeKnown(
|
|
sema: *Sema,
|
|
inst: Air.Inst.Ref,
|
|
) !bool {
|
|
return (try sema.resolveMaybeUndefVal(inst)) != null;
|
|
}
|
|
|
|
fn analyzeComptimeAlloc(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
var_type: Type,
|
|
alignment: Alignment,
|
|
) CompileError!Air.Inst.Ref {
|
|
const mod = sema.mod;
|
|
|
|
// Needed to make an anon decl with type `var_type` (the `finish()` call below).
|
|
_ = try sema.typeHasOnePossibleValue(var_type);
|
|
|
|
const ptr_type = try mod.ptrType(.{
|
|
.child = var_type.toIntern(),
|
|
.flags = .{
|
|
.alignment = alignment,
|
|
.address_space = target_util.defaultAddressSpace(mod.getTarget(), .global_constant),
|
|
},
|
|
});
|
|
|
|
var anon_decl = try block.startAnonDecl();
|
|
defer anon_decl.deinit();
|
|
|
|
const decl_index = try anon_decl.finish(
|
|
var_type,
|
|
// There will be stores before the first load, but they may be to sub-elements or
|
|
// sub-fields. So we need to initialize with undef to allow the mechanism to expand
|
|
// into fields/elements and have those overridden with stored values.
|
|
(try mod.intern(.{ .undef = var_type.toIntern() })).toValue(),
|
|
alignment,
|
|
);
|
|
const decl = mod.declPtr(decl_index);
|
|
decl.alignment = alignment;
|
|
|
|
try sema.comptime_mutable_decls.append(decl_index);
|
|
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
|
|
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
|
.ty = ptr_type.toIntern(),
|
|
.addr = .{ .mut_decl = .{
|
|
.decl = decl_index,
|
|
.runtime_index = block.runtime_index,
|
|
} },
|
|
} })));
|
|
}
|
|
|
|
/// The places where a user can specify an address space attribute
|
|
pub const AddressSpaceContext = enum {
|
|
/// A function is specified to be placed in a certain address space.
|
|
function,
|
|
|
|
/// A (global) variable is specified to be placed in a certain address space.
|
|
/// In contrast to .constant, these values (and thus the address space they will be
|
|
/// placed in) are required to be mutable.
|
|
variable,
|
|
|
|
/// A (global) constant value is specified to be placed in a certain address space.
|
|
/// In contrast to .variable, values placed in this address space are not required to be mutable.
|
|
constant,
|
|
|
|
/// A pointer is ascripted to point into a certain address space.
|
|
pointer,
|
|
};
|
|
|
|
pub fn analyzeAddressSpace(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
zir_ref: Zir.Inst.Ref,
|
|
ctx: AddressSpaceContext,
|
|
) !std.builtin.AddressSpace {
|
|
const mod = sema.mod;
|
|
const addrspace_tv = try sema.resolveInstConst(block, src, zir_ref, "address space must be comptime-known");
|
|
const address_space = mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val);
|
|
const target = sema.mod.getTarget();
|
|
const arch = target.cpu.arch;
|
|
|
|
const is_nv = arch == .nvptx or arch == .nvptx64;
|
|
const is_amd = arch == .amdgcn;
|
|
const is_spirv = arch == .spirv32 or arch == .spirv64;
|
|
const is_gpu = is_nv or is_amd or is_spirv;
|
|
|
|
const supported = switch (address_space) {
|
|
// TODO: on spir-v only when os is opencl.
|
|
.generic => true,
|
|
.gs, .fs, .ss => (arch == .x86 or arch == .x86_64) and ctx == .pointer,
|
|
// TODO: check that .shared and .local are left uninitialized
|
|
.param => is_nv,
|
|
.global, .shared, .local => is_gpu,
|
|
.constant => is_gpu and (ctx == .constant),
|
|
// TODO this should also check how many flash banks the cpu has
|
|
.flash, .flash1, .flash2, .flash3, .flash4, .flash5 => arch == .avr,
|
|
};
|
|
|
|
if (!supported) {
|
|
// TODO error messages could be made more elaborate here
|
|
const entity = switch (ctx) {
|
|
.function => "functions",
|
|
.variable => "mutable values",
|
|
.constant => "constant values",
|
|
.pointer => "pointers",
|
|
};
|
|
return sema.fail(
|
|
block,
|
|
src,
|
|
"{s} with address space '{s}' are not supported on {s}",
|
|
.{ entity, @tagName(address_space), arch.genericName() },
|
|
);
|
|
}
|
|
|
|
return address_space;
|
|
}
|
|
|
|
/// Asserts the value is a pointer and dereferences it.
|
|
/// Returns `null` if the pointer contents cannot be loaded at comptime.
|
|
fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value {
|
|
const mod = sema.mod;
|
|
const load_ty = ptr_ty.childType(mod);
|
|
const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty);
|
|
switch (res) {
|
|
.runtime_load => return null,
|
|
.val => |v| return v,
|
|
.needed_well_defined => |ty| return sema.fail(
|
|
block,
|
|
src,
|
|
"comptime dereference requires '{}' to have a well-defined layout, but it does not.",
|
|
.{ty.fmt(sema.mod)},
|
|
),
|
|
.out_of_bounds => |ty| return sema.fail(
|
|
block,
|
|
src,
|
|
"dereference of '{}' exceeds bounds of containing decl of type '{}'",
|
|
.{ ptr_ty.fmt(sema.mod), ty.fmt(sema.mod) },
|
|
),
|
|
}
|
|
}
|
|
|
|
const DerefResult = union(enum) {
|
|
runtime_load,
|
|
val: Value,
|
|
needed_well_defined: Type,
|
|
out_of_bounds: Type,
|
|
};
|
|
|
|
fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type) CompileError!DerefResult {
|
|
const mod = sema.mod;
|
|
const target = mod.getTarget();
|
|
const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) {
|
|
error.RuntimeLoad => return DerefResult{ .runtime_load = {} },
|
|
else => |e| return e,
|
|
};
|
|
|
|
if (deref.pointee) |tv| {
|
|
const coerce_in_mem_ok =
|
|
(try sema.coerceInMemoryAllowed(block, load_ty, tv.ty, false, target, src, src)) == .ok or
|
|
(try sema.coerceInMemoryAllowed(block, tv.ty, load_ty, false, target, src, src)) == .ok;
|
|
if (coerce_in_mem_ok) {
|
|
// We have a Value that lines up in virtual memory exactly with what we want to load,
|
|
// and it is in-memory coercible to load_ty. It may be returned without modifications.
|
|
// Move mutable decl values to the InternPool and assert other decls are already in
|
|
// the InternPool.
|
|
const uncoerced_val = if (deref.is_mutable) try tv.val.intern(tv.ty, mod) else tv.val.toIntern();
|
|
const coerced_val = try mod.getCoerced(uncoerced_val.toValue(), load_ty);
|
|
return .{ .val = coerced_val };
|
|
}
|
|
}
|
|
|
|
// The type is not in-memory coercible or the direct dereference failed, so it must
|
|
// be bitcast according to the pointer type we are performing the load through.
|
|
if (!load_ty.hasWellDefinedLayout(mod)) {
|
|
return DerefResult{ .needed_well_defined = load_ty };
|
|
}
|
|
|
|
const load_sz = try sema.typeAbiSize(load_ty);
|
|
|
|
// Try the smaller bit-cast first, since that's more efficient than using the larger `parent`
|
|
if (deref.pointee) |tv| if (load_sz <= try sema.typeAbiSize(tv.ty))
|
|
return DerefResult{ .val = (try sema.bitCastVal(block, src, tv.val, tv.ty, load_ty, 0)) orelse return .runtime_load };
|
|
|
|
// If that fails, try to bit-cast from the largest parent value with a well-defined layout
|
|
if (deref.parent) |parent| if (load_sz + parent.byte_offset <= try sema.typeAbiSize(parent.tv.ty))
|
|
return DerefResult{ .val = (try sema.bitCastVal(block, src, parent.tv.val, parent.tv.ty, load_ty, parent.byte_offset)) orelse return .runtime_load };
|
|
|
|
if (deref.ty_without_well_defined_layout) |bad_ty| {
|
|
// We got no parent for bit-casting, or the parent we got was too small. Either way, the problem
|
|
// is that some type we encountered when de-referencing does not have a well-defined layout.
|
|
return DerefResult{ .needed_well_defined = bad_ty };
|
|
} else {
|
|
// If all encountered types had well-defined layouts, the parent is the root decl and it just
|
|
// wasn't big enough for the load.
|
|
return DerefResult{ .out_of_bounds = deref.parent.?.tv.ty };
|
|
}
|
|
}
|
|
|
|
/// Used to convert a u64 value to a usize value, emitting a compile error if the number
|
|
/// is too big to fit.
|
|
fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError!usize {
|
|
if (@bitSizeOf(u64) <= @bitSizeOf(usize)) return int;
|
|
return std.math.cast(usize, int) orelse return sema.fail(block, src, "expression produces integer value '{d}' which is too big for this compiler implementation to handle", .{int});
|
|
}
|
|
|
|
/// For pointer-like optionals, it returns the pointer type. For pointers,
|
|
/// the type is returned unmodified.
|
|
/// This can return `error.AnalysisFail` because it sometimes requires resolving whether
|
|
/// a type has zero bits, which can cause a "foo depends on itself" compile error.
|
|
/// This logic must be kept in sync with `Type.isPtrLikeOptional`.
|
|
fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
|
|
const mod = sema.mod;
|
|
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
|
|
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
|
|
.One, .Many, .C => ty,
|
|
.Slice => null,
|
|
},
|
|
.opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) {
|
|
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
|
|
.Slice, .C => null,
|
|
.Many, .One => {
|
|
if (ptr_type.flags.is_allowzero) return null;
|
|
|
|
// optionals of zero sized types behave like bools, not pointers
|
|
const payload_ty = opt_child.toType();
|
|
if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) {
|
|
return null;
|
|
}
|
|
|
|
return payload_ty;
|
|
},
|
|
},
|
|
else => null,
|
|
},
|
|
else => null,
|
|
};
|
|
}
|
|
|
|
/// `generic_poison` will return false.
|
|
/// This function returns false negatives when structs and unions are having their
|
|
/// field types resolved.
|
|
/// TODO assert the return value matches `ty.comptimeOnly`
|
|
/// TODO merge these implementations together with the "advanced"/opt_sema pattern seen
|
|
/// elsewhere in value.zig
|
|
pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
|
|
const mod = sema.mod;
|
|
return switch (ty.toIntern()) {
|
|
.empty_struct_type => false,
|
|
|
|
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
|
|
.int_type => return false,
|
|
.ptr_type => |ptr_type| {
|
|
const child_ty = ptr_type.child.toType();
|
|
switch (child_ty.zigTypeTag(mod)) {
|
|
.Fn => return mod.typeToFunc(child_ty).?.is_generic,
|
|
.Opaque => return false,
|
|
else => return sema.typeRequiresComptime(child_ty),
|
|
}
|
|
},
|
|
.anyframe_type => |child| {
|
|
if (child == .none) return false;
|
|
return sema.typeRequiresComptime(child.toType());
|
|
},
|
|
.array_type => |array_type| return sema.typeRequiresComptime(array_type.child.toType()),
|
|
.vector_type => |vector_type| return sema.typeRequiresComptime(vector_type.child.toType()),
|
|
.opt_type => |child| return sema.typeRequiresComptime(child.toType()),
|
|
|
|
.error_union_type => |error_union_type| {
|
|
return sema.typeRequiresComptime(error_union_type.payload_type.toType());
|
|
},
|
|
|
|
.error_set_type, .inferred_error_set_type => false,
|
|
|
|
.func_type => true,
|
|
|
|
.simple_type => |t| return switch (t) {
|
|
.f16,
|
|
.f32,
|
|
.f64,
|
|
.f80,
|
|
.f128,
|
|
.usize,
|
|
.isize,
|
|
.c_char,
|
|
.c_short,
|
|
.c_ushort,
|
|
.c_int,
|
|
.c_uint,
|
|
.c_long,
|
|
.c_ulong,
|
|
.c_longlong,
|
|
.c_ulonglong,
|
|
.c_longdouble,
|
|
.bool,
|
|
.void,
|
|
.anyerror,
|
|
.noreturn,
|
|
.generic_poison,
|
|
.atomic_order,
|
|
.atomic_rmw_op,
|
|
.calling_convention,
|
|
.address_space,
|
|
.float_mode,
|
|
.reduce_op,
|
|
.call_modifier,
|
|
.prefetch_options,
|
|
.export_options,
|
|
.extern_options,
|
|
.adhoc_inferred_error_set,
|
|
=> false,
|
|
|
|
.anyopaque,
|
|
.type,
|
|
.comptime_int,
|
|
.comptime_float,
|
|
.null,
|
|
.undefined,
|
|
.enum_literal,
|
|
.type_info,
|
|
=> true,
|
|
},
|
|
.struct_type => |struct_type| {
|
|
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
|
|
switch (struct_obj.requires_comptime) {
|
|
.no, .wip => return false,
|
|
.yes => return true,
|
|
.unknown => {
|
|
if (struct_obj.status == .field_types_wip)
|
|
return false;
|
|
|
|
try sema.resolveTypeFieldsStruct(ty, struct_obj);
|
|
|
|
struct_obj.requires_comptime = .wip;
|
|
for (struct_obj.fields.values()) |field| {
|
|
if (field.is_comptime) continue;
|
|
if (try sema.typeRequiresComptime(field.ty)) {
|
|
struct_obj.requires_comptime = .yes;
|
|
return true;
|
|
}
|
|
}
|
|
struct_obj.requires_comptime = .no;
|
|
return false;
|
|
},
|
|
}
|
|
},
|
|
.anon_struct_type => |tuple| {
|
|
for (tuple.types, tuple.values) |field_ty, val| {
|
|
const have_comptime_val = val != .none;
|
|
if (!have_comptime_val and try sema.typeRequiresComptime(field_ty.toType())) {
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
},
|
|
|
|
.union_type => |union_type| {
|
|
const union_obj = mod.unionPtr(union_type.index);
|
|
switch (union_obj.requires_comptime) {
|
|
.no, .wip => return false,
|
|
.yes => return true,
|
|
.unknown => {
|
|
if (union_obj.status == .field_types_wip)
|
|
return false;
|
|
|
|
try sema.resolveTypeFieldsUnion(ty, union_obj);
|
|
|
|
union_obj.requires_comptime = .wip;
|
|
for (union_obj.fields.values()) |field| {
|
|
if (try sema.typeRequiresComptime(field.ty)) {
|
|
union_obj.requires_comptime = .yes;
|
|
return true;
|
|
}
|
|
}
|
|
union_obj.requires_comptime = .no;
|
|
return false;
|
|
},
|
|
}
|
|
},
|
|
|
|
.opaque_type => true,
|
|
.enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()),
|
|
|
|
// values, not types
|
|
.undef,
|
|
.runtime_value,
|
|
.simple_value,
|
|
.variable,
|
|
.extern_func,
|
|
.func,
|
|
.int,
|
|
.err,
|
|
.error_union,
|
|
.enum_literal,
|
|
.enum_tag,
|
|
.empty_enum_value,
|
|
.float,
|
|
.ptr,
|
|
.opt,
|
|
.aggregate,
|
|
.un,
|
|
// memoization, not types
|
|
.memoized_call,
|
|
=> unreachable,
|
|
},
|
|
};
|
|
}
|
|
|
|
pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
|
|
const mod = sema.mod;
|
|
return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) {
|
|
error.NeedLazy => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
fn typeAbiSize(sema: *Sema, ty: Type) !u64 {
|
|
try sema.resolveTypeLayout(ty);
|
|
return ty.abiSize(sema.mod);
|
|
}
|
|
|
|
fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 {
|
|
return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar;
|
|
}
|
|
|
|
/// Not valid to call for packed unions.
|
|
/// Keep implementation in sync with `Module.Union.Field.normalAlignment`.
|
|
fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 {
|
|
return @as(u32, @intCast(if (field.ty.isNoReturn(sema.mod))
|
|
0
|
|
else
|
|
field.abi_align.toByteUnitsOptional() orelse try sema.typeAbiAlignment(field.ty)));
|
|
}
|
|
|
|
/// Keep implementation in sync with `Module.Struct.Field.alignment`.
|
|
fn structFieldAlignment(sema: *Sema, field: Module.Struct.Field, layout: std.builtin.Type.ContainerLayout) !u32 {
|
|
const mod = sema.mod;
|
|
if (field.abi_align.toByteUnitsOptional()) |a| {
|
|
assert(layout != .Packed);
|
|
return @as(u32, @intCast(a));
|
|
}
|
|
switch (layout) {
|
|
.Packed => return 0,
|
|
.Auto => if (mod.getTarget().ofmt != .c) {
|
|
return sema.typeAbiAlignment(field.ty);
|
|
},
|
|
.Extern => {},
|
|
}
|
|
// extern
|
|
const ty_abi_align = try sema.typeAbiAlignment(field.ty);
|
|
if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
|
|
return @max(ty_abi_align, 16);
|
|
}
|
|
return ty_abi_align;
|
|
}
|
|
|
|
/// Synchronize logic with `Type.isFnOrHasRuntimeBits`.
|
|
pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
|
|
const mod = sema.mod;
|
|
const fn_info = mod.typeToFunc(ty).?;
|
|
if (fn_info.is_generic) return false;
|
|
if (fn_info.is_var_args) return true;
|
|
switch (fn_info.cc) {
|
|
// If there was a comptime calling convention, it should also return false here.
|
|
.Inline => return false,
|
|
else => {},
|
|
}
|
|
if (try sema.typeRequiresComptime(fn_info.return_type.toType())) {
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
fn unionFieldIndex(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
union_ty: Type,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_src: LazySrcLoc,
|
|
) !u32 {
|
|
const mod = sema.mod;
|
|
try sema.resolveTypeFields(union_ty);
|
|
const union_obj = mod.typeToUnion(union_ty).?;
|
|
const field_index_usize = union_obj.fields.getIndex(field_name) orelse
|
|
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
|
|
return @as(u32, @intCast(field_index_usize));
|
|
}
|
|
|
|
fn structFieldIndex(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
struct_ty: Type,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_src: LazySrcLoc,
|
|
) !u32 {
|
|
const mod = sema.mod;
|
|
try sema.resolveTypeFields(struct_ty);
|
|
if (struct_ty.isAnonStruct(mod)) {
|
|
return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src);
|
|
} else {
|
|
const struct_obj = mod.typeToStruct(struct_ty).?;
|
|
const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
|
|
return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
|
|
return @as(u32, @intCast(field_index_usize));
|
|
}
|
|
}
|
|
|
|
fn anonStructFieldIndex(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
struct_ty: Type,
|
|
field_name: InternPool.NullTerminatedString,
|
|
field_src: LazySrcLoc,
|
|
) !u32 {
|
|
const mod = sema.mod;
|
|
switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) {
|
|
.anon_struct_type => |anon_struct_type| for (anon_struct_type.names, 0..) |name, i| {
|
|
if (name == field_name) return @as(u32, @intCast(i));
|
|
},
|
|
.struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| {
|
|
for (struct_obj.fields.keys(), 0..) |name, i| {
|
|
if (name == field_name) {
|
|
return @as(u32, @intCast(i));
|
|
}
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{
|
|
field_name.fmt(&mod.intern_pool), struct_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
fn queueFullTypeResolution(sema: *Sema, ty: Type) !void {
|
|
try sema.types_to_resolve.put(sema.gpa, ty.toIntern(), {});
|
|
}
|
|
|
|
/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
|
|
/// overflow_idx to the vector index the overflow was at (or 0 for a scalar).
|
|
fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value {
|
|
var overflow: usize = undefined;
|
|
return sema.intAddInner(lhs, rhs, ty, &overflow) catch |err| switch (err) {
|
|
error.Overflow => {
|
|
const is_vec = ty.isVector(sema.mod);
|
|
overflow_idx.* = if (is_vec) overflow else 0;
|
|
const safe_ty = if (is_vec) try sema.mod.vectorType(.{
|
|
.len = ty.vectorLen(sema.mod),
|
|
.child = .comptime_int_type,
|
|
}) else Type.comptime_int;
|
|
return sema.intAddInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) {
|
|
error.Overflow => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Vector) {
|
|
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
|
|
const scalar_ty = ty.scalarType(mod);
|
|
for (result_data, 0..) |*scalar, i| {
|
|
const lhs_elem = try lhs.elemValue(mod, i);
|
|
const rhs_elem = try rhs.elemValue(mod, i);
|
|
const val = sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) {
|
|
error.Overflow => {
|
|
overflow_idx.* = i;
|
|
return error.Overflow;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
scalar.* = try val.intern(scalar_ty, mod);
|
|
}
|
|
return (try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} })).toValue();
|
|
}
|
|
return sema.intAddScalar(lhs, rhs, ty);
|
|
}
|
|
|
|
fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
|
|
const mod = sema.mod;
|
|
if (scalar_ty.toIntern() != .comptime_int_type) {
|
|
const res = try sema.intAddWithOverflowScalar(lhs, rhs, scalar_ty);
|
|
if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow;
|
|
return res.wrapped_result;
|
|
}
|
|
// TODO is this a performance issue? maybe we should try the operation without
|
|
// resorting to BigInt first.
|
|
var lhs_space: Value.BigIntSpace = undefined;
|
|
var rhs_space: Value.BigIntSpace = undefined;
|
|
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
|
|
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
|
|
const limbs = try sema.arena.alloc(
|
|
std.math.big.Limb,
|
|
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
|
|
);
|
|
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
result_bigint.add(lhs_bigint, rhs_bigint);
|
|
return mod.intValue_big(scalar_ty, result_bigint.toConst());
|
|
}
|
|
|
|
/// Supports both floats and ints; handles undefined.
|
|
fn numberAddWrapScalar(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value {
|
|
const mod = sema.mod;
|
|
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
|
|
|
|
if (ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.intAdd(lhs, rhs, ty, undefined);
|
|
}
|
|
|
|
if (ty.isAnyFloat()) {
|
|
return Value.floatAdd(lhs, rhs, ty, sema.arena, mod);
|
|
}
|
|
|
|
const overflow_result = try sema.intAddWithOverflow(lhs, rhs, ty);
|
|
return overflow_result.wrapped_result;
|
|
}
|
|
|
|
/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
|
|
/// overflow_idx to the vector index the overflow was at (or 0 for a scalar).
|
|
fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value {
|
|
var overflow: usize = undefined;
|
|
return sema.intSubInner(lhs, rhs, ty, &overflow) catch |err| switch (err) {
|
|
error.Overflow => {
|
|
const is_vec = ty.isVector(sema.mod);
|
|
overflow_idx.* = if (is_vec) overflow else 0;
|
|
const safe_ty = if (is_vec) try sema.mod.vectorType(.{
|
|
.len = ty.vectorLen(sema.mod),
|
|
.child = .comptime_int_type,
|
|
}) else Type.comptime_int;
|
|
return sema.intSubInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) {
|
|
error.Overflow => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Vector) {
|
|
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
|
|
const scalar_ty = ty.scalarType(mod);
|
|
for (result_data, 0..) |*scalar, i| {
|
|
const lhs_elem = try lhs.elemValue(sema.mod, i);
|
|
const rhs_elem = try rhs.elemValue(sema.mod, i);
|
|
const val = sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) {
|
|
error.Overflow => {
|
|
overflow_idx.* = i;
|
|
return error.Overflow;
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
scalar.* = try val.intern(scalar_ty, mod);
|
|
}
|
|
return (try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} })).toValue();
|
|
}
|
|
return sema.intSubScalar(lhs, rhs, ty);
|
|
}
|
|
|
|
fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
|
|
const mod = sema.mod;
|
|
if (scalar_ty.toIntern() != .comptime_int_type) {
|
|
const res = try sema.intSubWithOverflowScalar(lhs, rhs, scalar_ty);
|
|
if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow;
|
|
return res.wrapped_result;
|
|
}
|
|
// TODO is this a performance issue? maybe we should try the operation without
|
|
// resorting to BigInt first.
|
|
var lhs_space: Value.BigIntSpace = undefined;
|
|
var rhs_space: Value.BigIntSpace = undefined;
|
|
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
|
|
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
|
|
const limbs = try sema.arena.alloc(
|
|
std.math.big.Limb,
|
|
@max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
|
|
);
|
|
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
result_bigint.sub(lhs_bigint, rhs_bigint);
|
|
return mod.intValue_big(scalar_ty, result_bigint.toConst());
|
|
}
|
|
|
|
/// Supports both floats and ints; handles undefined.
|
|
fn numberSubWrapScalar(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value {
|
|
const mod = sema.mod;
|
|
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
|
|
|
|
if (ty.zigTypeTag(mod) == .ComptimeInt) {
|
|
return sema.intSub(lhs, rhs, ty, undefined);
|
|
}
|
|
|
|
if (ty.isAnyFloat()) {
|
|
return Value.floatSub(lhs, rhs, ty, sema.arena, mod);
|
|
}
|
|
|
|
const overflow_result = try sema.intSubWithOverflow(lhs, rhs, ty);
|
|
return overflow_result.wrapped_result;
|
|
}
|
|
|
|
fn intSubWithOverflow(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value.OverflowArithmeticResult {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Vector) {
|
|
const vec_len = ty.vectorLen(mod);
|
|
const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
const result_data = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
const scalar_ty = ty.scalarType(mod);
|
|
for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
|
|
const lhs_elem = try lhs.elemValue(sema.mod, i);
|
|
const rhs_elem = try rhs.elemValue(sema.mod, i);
|
|
const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty);
|
|
of.* = try of_math_result.overflow_bit.intern(Type.u1, mod);
|
|
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
|
|
}
|
|
return Value.OverflowArithmeticResult{
|
|
.overflow_bit = (try mod.intern(.{ .aggregate = .{
|
|
.ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
|
|
.storage = .{ .elems = overflowed_data },
|
|
} })).toValue(),
|
|
.wrapped_result = (try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} })).toValue(),
|
|
};
|
|
}
|
|
return sema.intSubWithOverflowScalar(lhs, rhs, ty);
|
|
}
|
|
|
|
fn intSubWithOverflowScalar(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value.OverflowArithmeticResult {
|
|
const mod = sema.mod;
|
|
const info = ty.intInfo(mod);
|
|
|
|
var lhs_space: Value.BigIntSpace = undefined;
|
|
var rhs_space: Value.BigIntSpace = undefined;
|
|
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
|
|
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
|
|
const limbs = try sema.arena.alloc(
|
|
std.math.big.Limb,
|
|
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
|
);
|
|
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
|
|
const wrapped_result = try mod.intValue_big(ty, result_bigint.toConst());
|
|
return Value.OverflowArithmeticResult{
|
|
.overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)),
|
|
.wrapped_result = wrapped_result,
|
|
};
|
|
}
|
|
|
|
fn intFromFloat(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
val: Value,
|
|
float_ty: Type,
|
|
int_ty: Type,
|
|
) CompileError!Value {
|
|
const mod = sema.mod;
|
|
if (float_ty.zigTypeTag(mod) == .Vector) {
|
|
const elem_ty = float_ty.scalarType(mod);
|
|
const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(mod));
|
|
const scalar_ty = int_ty.scalarType(mod);
|
|
for (result_data, 0..) |*scalar, i| {
|
|
const elem_val = try val.elemValue(sema.mod, i);
|
|
scalar.* = try (try sema.intFromFloatScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod))).intern(scalar_ty, mod);
|
|
}
|
|
return (try mod.intern(.{ .aggregate = .{
|
|
.ty = int_ty.toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} })).toValue();
|
|
}
|
|
return sema.intFromFloatScalar(block, src, val, float_ty, int_ty);
|
|
}
|
|
|
|
// float is expected to be finite and non-NaN
|
|
fn float128IntPartToBigInt(
|
|
arena: Allocator,
|
|
float: f128,
|
|
) !std.math.big.int.Managed {
|
|
const is_negative = std.math.signbit(float);
|
|
const floored = @floor(@fabs(float));
|
|
|
|
var rational = try std.math.big.Rational.init(arena);
|
|
defer rational.q.deinit();
|
|
rational.setFloat(f128, floored) catch |err| switch (err) {
|
|
error.NonFiniteFloat => unreachable,
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
};
|
|
|
|
// The float is reduced in rational.setFloat, so we assert that denominator is equal to one
|
|
const big_one = std.math.big.int.Const{ .limbs = &.{1}, .positive = true };
|
|
assert(rational.q.toConst().eqlAbs(big_one));
|
|
|
|
if (is_negative) {
|
|
rational.negate();
|
|
}
|
|
return rational.p;
|
|
}
|
|
|
|
fn intFromFloatScalar(
|
|
sema: *Sema,
|
|
block: *Block,
|
|
src: LazySrcLoc,
|
|
val: Value,
|
|
float_ty: Type,
|
|
int_ty: Type,
|
|
) CompileError!Value {
|
|
const mod = sema.mod;
|
|
|
|
const float = val.toFloat(f128, mod);
|
|
if (std.math.isNan(float)) {
|
|
return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{
|
|
int_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
if (std.math.isInf(float)) {
|
|
return sema.fail(block, src, "float value Inf cannot be stored in integer type '{}'", .{
|
|
int_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
|
|
var big_int = try float128IntPartToBigInt(sema.arena, float);
|
|
defer big_int.deinit();
|
|
|
|
const cti_result = try mod.intValue_big(Type.comptime_int, big_int.toConst());
|
|
|
|
if (!(try sema.intFitsInType(cti_result, int_ty, null))) {
|
|
return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{
|
|
val.fmtValue(float_ty, sema.mod), int_ty.fmt(sema.mod),
|
|
});
|
|
}
|
|
return mod.getCoerced(cti_result, int_ty);
|
|
}
|
|
|
|
/// Asserts the value is an integer, and the destination type is ComptimeInt or Int.
|
|
/// Vectors are also accepted. Vector results are reduced with AND.
|
|
///
|
|
/// If provided, `vector_index` reports the first element that failed the range check.
|
|
fn intFitsInType(
|
|
sema: *Sema,
|
|
val: Value,
|
|
ty: Type,
|
|
vector_index: ?*usize,
|
|
) CompileError!bool {
|
|
const mod = sema.mod;
|
|
if (ty.toIntern() == .comptime_int_type) return true;
|
|
const info = ty.intInfo(mod);
|
|
switch (val.toIntern()) {
|
|
.zero_usize, .zero_u8 => return true,
|
|
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
|
|
.undef => return true,
|
|
.variable, .extern_func, .func, .ptr => {
|
|
const target = mod.getTarget();
|
|
const ptr_bits = target.ptrBitWidth();
|
|
return switch (info.signedness) {
|
|
.signed => info.bits > ptr_bits,
|
|
.unsigned => info.bits >= ptr_bits,
|
|
};
|
|
},
|
|
.int => |int| switch (int.storage) {
|
|
.u64, .i64, .big_int => {
|
|
var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined;
|
|
const big_int = int.storage.toBigInt(&buffer);
|
|
return big_int.fitsInTwosComp(info.signedness, info.bits);
|
|
},
|
|
.lazy_align => |lazy_ty| {
|
|
const max_needed_bits = @as(u16, 16) + @intFromBool(info.signedness == .signed);
|
|
// If it is u16 or bigger we know the alignment fits without resolving it.
|
|
if (info.bits >= max_needed_bits) return true;
|
|
const x = try sema.typeAbiAlignment(lazy_ty.toType());
|
|
if (x == 0) return true;
|
|
const actual_needed_bits = std.math.log2(x) + 1 + @intFromBool(info.signedness == .signed);
|
|
return info.bits >= actual_needed_bits;
|
|
},
|
|
.lazy_size => |lazy_ty| {
|
|
const max_needed_bits = @as(u16, 64) + @intFromBool(info.signedness == .signed);
|
|
// If it is u64 or bigger we know the size fits without resolving it.
|
|
if (info.bits >= max_needed_bits) return true;
|
|
const x = try sema.typeAbiSize(lazy_ty.toType());
|
|
if (x == 0) return true;
|
|
const actual_needed_bits = std.math.log2(x) + 1 + @intFromBool(info.signedness == .signed);
|
|
return info.bits >= actual_needed_bits;
|
|
},
|
|
},
|
|
.aggregate => |aggregate| {
|
|
assert(ty.zigTypeTag(mod) == .Vector);
|
|
return switch (aggregate.storage) {
|
|
.bytes => |bytes| for (bytes, 0..) |byte, i| {
|
|
if (byte == 0) continue;
|
|
const actual_needed_bits = std.math.log2(byte) + 1 + @intFromBool(info.signedness == .signed);
|
|
if (info.bits >= actual_needed_bits) continue;
|
|
if (vector_index) |vi| vi.* = i;
|
|
break false;
|
|
} else true,
|
|
.elems, .repeated_elem => for (switch (aggregate.storage) {
|
|
.bytes => unreachable,
|
|
.elems => |elems| elems,
|
|
.repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem),
|
|
}, 0..) |elem, i| {
|
|
if (try sema.intFitsInType(elem.toValue(), ty.scalarType(mod), null)) continue;
|
|
if (vector_index) |vi| vi.* = i;
|
|
break false;
|
|
} else true,
|
|
};
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}
|
|
}
|
|
|
|
fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool {
|
|
const mod = sema.mod;
|
|
if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false;
|
|
const end_val = try mod.intValue(tag_ty, end);
|
|
if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false;
|
|
return true;
|
|
}
|
|
|
|
/// Asserts the type is an enum.
|
|
fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
|
|
const mod = sema.mod;
|
|
const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type;
|
|
assert(enum_type.tag_mode != .nonexhaustive);
|
|
// The `tagValueIndex` function call below relies on the type being the integer tag type.
|
|
// `getCoerced` assumes the value will fit the new type.
|
|
if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false;
|
|
const int_coerced = try mod.getCoerced(int, enum_type.tag_ty.toType());
|
|
|
|
return enum_type.tagValueIndex(&mod.intern_pool, int_coerced.toIntern()) != null;
|
|
}
|
|
|
|
fn intAddWithOverflow(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value.OverflowArithmeticResult {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Vector) {
|
|
const vec_len = ty.vectorLen(mod);
|
|
const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
const result_data = try sema.arena.alloc(InternPool.Index, vec_len);
|
|
const scalar_ty = ty.scalarType(mod);
|
|
for (overflowed_data, result_data, 0..) |*of, *scalar, i| {
|
|
const lhs_elem = try lhs.elemValue(sema.mod, i);
|
|
const rhs_elem = try rhs.elemValue(sema.mod, i);
|
|
const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty);
|
|
of.* = try of_math_result.overflow_bit.intern(Type.u1, mod);
|
|
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
|
|
}
|
|
return Value.OverflowArithmeticResult{
|
|
.overflow_bit = (try mod.intern(.{ .aggregate = .{
|
|
.ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
|
|
.storage = .{ .elems = overflowed_data },
|
|
} })).toValue(),
|
|
.wrapped_result = (try mod.intern(.{ .aggregate = .{
|
|
.ty = ty.toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} })).toValue(),
|
|
};
|
|
}
|
|
return sema.intAddWithOverflowScalar(lhs, rhs, ty);
|
|
}
|
|
|
|
fn intAddWithOverflowScalar(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value.OverflowArithmeticResult {
|
|
const mod = sema.mod;
|
|
const info = ty.intInfo(mod);
|
|
|
|
var lhs_space: Value.BigIntSpace = undefined;
|
|
var rhs_space: Value.BigIntSpace = undefined;
|
|
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
|
|
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
|
|
const limbs = try sema.arena.alloc(
|
|
std.math.big.Limb,
|
|
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
|
);
|
|
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
|
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
|
|
const result = try mod.intValue_big(ty, result_bigint.toConst());
|
|
return Value.OverflowArithmeticResult{
|
|
.overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)),
|
|
.wrapped_result = result,
|
|
};
|
|
}
|
|
|
|
/// Asserts the values are comparable. Both operands have type `ty`.
|
|
/// For vectors, returns true if the comparison is true for ALL elements.
|
|
///
|
|
/// Note that `!compareAll(.eq, ...) != compareAll(.neq, ...)`
|
|
fn compareAll(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
op: std.math.CompareOperator,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) CompileError!bool {
|
|
const mod = sema.mod;
|
|
if (ty.zigTypeTag(mod) == .Vector) {
|
|
var i: usize = 0;
|
|
while (i < ty.vectorLen(mod)) : (i += 1) {
|
|
const lhs_elem = try lhs.elemValue(sema.mod, i);
|
|
const rhs_elem = try rhs.elemValue(sema.mod, i);
|
|
if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) {
|
|
return false;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
return sema.compareScalar(lhs, op, rhs, ty);
|
|
}
|
|
|
|
/// Asserts the values are comparable. Both operands have type `ty`.
|
|
fn compareScalar(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
op: std.math.CompareOperator,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) CompileError!bool {
|
|
const mod = sema.mod;
|
|
const coerced_lhs = try mod.getCoerced(lhs, ty);
|
|
const coerced_rhs = try mod.getCoerced(rhs, ty);
|
|
switch (op) {
|
|
.eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty),
|
|
.neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)),
|
|
else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, sema),
|
|
}
|
|
}
|
|
|
|
fn valuesEqual(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) CompileError!bool {
|
|
return lhs.eql(rhs, ty, sema.mod);
|
|
}
|
|
|
|
/// Asserts the values are comparable vectors of type `ty`.
|
|
fn compareVector(
|
|
sema: *Sema,
|
|
lhs: Value,
|
|
op: std.math.CompareOperator,
|
|
rhs: Value,
|
|
ty: Type,
|
|
) !Value {
|
|
const mod = sema.mod;
|
|
assert(ty.zigTypeTag(mod) == .Vector);
|
|
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod));
|
|
for (result_data, 0..) |*scalar, i| {
|
|
const lhs_elem = try lhs.elemValue(sema.mod, i);
|
|
const rhs_elem = try rhs.elemValue(sema.mod, i);
|
|
const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod));
|
|
scalar.* = try Value.makeBool(res_bool).intern(Type.bool, mod);
|
|
}
|
|
return (try mod.intern(.{ .aggregate = .{
|
|
.ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(),
|
|
.storage = .{ .elems = result_data },
|
|
} })).toValue();
|
|
}
|
|
|
|
/// Returns the type of a pointer to an element.
|
|
/// Asserts that the type is a pointer, and that the element type is indexable.
|
|
/// For *[N]T, return *T
|
|
/// For [*]T, returns *T
|
|
/// For []T, returns *T
|
|
/// Handles const-ness and address spaces in particular.
|
|
/// This code is duplicated in `analyzePtrArithmetic`.
|
|
fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
|
|
const mod = sema.mod;
|
|
const ptr_info = ptr_ty.ptrInfo(mod);
|
|
const elem_ty = ptr_ty.elemType2(mod);
|
|
const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0;
|
|
const parent_ty = ptr_ty.childType(mod);
|
|
|
|
const VI = InternPool.Key.PtrType.VectorIndex;
|
|
|
|
const vector_info: struct {
|
|
host_size: u16 = 0,
|
|
alignment: u32 = 0,
|
|
vector_index: VI = .none,
|
|
} = if (parent_ty.isVector(mod) and ptr_info.flags.size == .One) blk: {
|
|
const elem_bits = elem_ty.bitSize(mod);
|
|
if (elem_bits == 0) break :blk .{};
|
|
const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
|
|
if (!is_packed) break :blk .{};
|
|
|
|
break :blk .{
|
|
.host_size = @as(u16, @intCast(parent_ty.arrayLen(mod))),
|
|
.alignment = @as(u32, @intCast(parent_ty.abiAlignment(mod))),
|
|
.vector_index = if (offset) |some| @as(VI, @enumFromInt(some)) else .runtime,
|
|
};
|
|
} else .{};
|
|
|
|
const alignment: Alignment = a: {
|
|
// Calculate the new pointer alignment.
|
|
if (ptr_info.flags.alignment == .none) {
|
|
if (vector_info.alignment != 0) break :a Alignment.fromNonzeroByteUnits(vector_info.alignment);
|
|
// ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness.
|
|
break :a .none;
|
|
}
|
|
// If the addend is not a comptime-known value we can still count on
|
|
// it being a multiple of the type size.
|
|
const elem_size = try sema.typeAbiSize(elem_ty);
|
|
const addend = if (offset) |off| elem_size * off else elem_size;
|
|
|
|
// The resulting pointer is aligned to the lcd between the offset (an
|
|
// arbitrary number) and the alignment factor (always a power of two,
|
|
// non zero).
|
|
const new_align = @as(Alignment, @enumFromInt(@min(
|
|
@ctz(addend),
|
|
@intFromEnum(ptr_info.flags.alignment),
|
|
)));
|
|
assert(new_align != .none);
|
|
break :a new_align;
|
|
};
|
|
return mod.ptrType(.{
|
|
.child = elem_ty.toIntern(),
|
|
.flags = .{
|
|
.alignment = alignment,
|
|
.is_const = ptr_info.flags.is_const,
|
|
.is_volatile = ptr_info.flags.is_volatile,
|
|
.is_allowzero = is_allowzero,
|
|
.address_space = ptr_info.flags.address_space,
|
|
.vector_index = vector_info.vector_index,
|
|
},
|
|
.packed_offset = .{
|
|
.host_size = vector_info.host_size,
|
|
.bit_offset = 0,
|
|
},
|
|
});
|
|
}
|
|
|
|
/// Merge lhs with rhs.
|
|
/// Asserts that lhs and rhs are both error sets and are resolved.
|
|
fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type {
|
|
const mod = sema.mod;
|
|
const arena = sema.arena;
|
|
const lhs_names = lhs.errorSetNames(mod);
|
|
const rhs_names = rhs.errorSetNames(mod);
|
|
var names: InferredErrorSet.NameMap = .{};
|
|
try names.ensureUnusedCapacity(arena, lhs_names.len);
|
|
|
|
for (lhs_names) |name| {
|
|
names.putAssumeCapacityNoClobber(name, {});
|
|
}
|
|
for (rhs_names) |name| {
|
|
try names.put(arena, name, {});
|
|
}
|
|
|
|
return mod.errorSetFromUnsortedNames(names.keys());
|
|
}
|
|
|
|
/// Avoids crashing the compiler when asking if inferred allocations are noreturn.
|
|
fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool {
|
|
if (ref == .unreachable_value) return true;
|
|
if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) {
|
|
.inferred_alloc, .inferred_alloc_comptime => return false,
|
|
else => {},
|
|
};
|
|
return sema.typeOf(ref).isNoReturn(sema.mod);
|
|
}
|
|
|
|
/// Avoids crashing the compiler when asking if inferred allocations are known to be a certain zig type.
|
|
fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool {
|
|
if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) {
|
|
.inferred_alloc, .inferred_alloc_comptime => return false,
|
|
else => {},
|
|
};
|
|
return sema.typeOf(ref).zigTypeTag(sema.mod) == tag;
|
|
}
|