zig/src/Sema.zig
Jacob Young 115c089562 Value: add intern and unintern to facilitate code conversion
This allows some code (like struct initializers) to use interned types
while other code (such as comptime mutation) continues to use legacy
types.

With these changes, an `zig build-obj empty.zig` gets to a crash on
missing interned error union types.
2023-06-10 20:47:53 -07:00

34416 lines
1.4 MiB

//! Semantic analysis of ZIR instructions.
//! Shared to every Block. Stored on the stack.
//! State used for compiling a ZIR into AIR.
//! Transforms untyped ZIR instructions into semantically-analyzed AIR instructions.
//! Does type checking, comptime control flow, and safety-check generation.
//! This is the the heart of the Zig compiler.
mod: *Module,
/// Alias to `mod.gpa`.
gpa: Allocator,
/// Points to the temporary arena allocator of the Sema.
/// This arena will be cleared when the sema is destroyed.
arena: Allocator,
/// Points to the arena allocator for the owner_decl.
/// This arena will persist until the decl is invalidated.
perm_arena: Allocator,
code: Zir,
air_instructions: std.MultiArrayList(Air.Inst) = .{},
air_extra: std.ArrayListUnmanaged(u32) = .{},
air_values: std.ArrayListUnmanaged(Value) = .{},
/// Maps ZIR to AIR.
inst_map: InstMap = .{},
/// When analyzing an inline function call, owner_decl is the Decl of the caller
/// and `src_decl` of `Block` is the `Decl` of the callee.
/// This `Decl` owns the arena memory of this `Sema`.
owner_decl: *Decl,
owner_decl_index: Decl.Index,
/// For an inline or comptime function call, this will be the root parent function
/// which contains the callsite. Corresponds to `owner_decl`.
owner_func: ?*Module.Fn,
/// The function this ZIR code is the body of, according to the source code.
/// This starts out the same as `owner_func` and then diverges in the case of
/// an inline or comptime function call.
func: ?*Module.Fn,
/// Used to restore the error return trace when returning a non-error from a function.
error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none,
/// When semantic analysis needs to know the return type of the function whose body
/// is being analyzed, this `Type` should be used instead of going through `func`.
/// This will correctly handle the case of a comptime/inline function call of a
/// generic function which uses a type expression for the return type.
/// The type will be `void` in the case that `func` is `null`.
fn_ret_ty: Type,
branch_quota: u32 = default_branch_quota,
branch_count: u32 = 0,
/// Populated when returning `error.ComptimeBreak`. Used to communicate the
/// break instruction up the stack to find the corresponding Block.
comptime_break_inst: Zir.Inst.Index = undefined,
/// This field is updated when a new source location becomes active, so that
/// instructions which do not have explicitly mapped source locations still have
/// access to the source location set by the previous instruction which did
/// contain a mapped source location.
src: LazySrcLoc = .{ .token_offset = 0 },
decl_val_table: std.AutoHashMapUnmanaged(Decl.Index, Air.Inst.Ref) = .{},
/// When doing a generic function instantiation, this array collects a
/// `Value` object for each parameter that is comptime-known and thus elided
/// from the generated function. This memory is allocated by a parent `Sema` and
/// owned by the values arena of the Sema owner_decl.
comptime_args: []TypedValue = &.{},
/// Marks the function instruction that `comptime_args` applies to so that we
/// don't accidentally apply it to a function prototype which is used in the
/// type expression of a generic function parameter.
comptime_args_fn_inst: Zir.Inst.Index = 0,
/// When `comptime_args` is provided, this field is also provided. It was used as
/// the key in the `monomorphed_funcs` set. The `func` instruction is supposed
/// to use this instead of allocating a fresh one. This avoids an unnecessary
/// extra hash table lookup in the `monomorphed_funcs` set.
/// Sema will set this to null when it takes ownership.
preallocated_new_func: ?*Module.Fn = null,
/// The key is `constant` AIR instructions to types that must be fully resolved
/// after the current function body analysis is done.
/// TODO: after upgrading to use InternPool change the key here to be an
/// InternPool value index.
types_to_resolve: std.ArrayListUnmanaged(Air.Inst.Ref) = .{},
/// These are lazily created runtime blocks from block_inline instructions.
/// They are created when an break_inline passes through a runtime condition, because
/// Sema must convert comptime control flow to runtime control flow, which means
/// breaking from a block.
post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{},
/// Populated with the last compile error created.
err: ?*Module.ErrorMsg = null,
/// True when analyzing a generic instantiation. Used to suppress some errors.
is_generic_instantiation: bool = false,
/// Set to true when analyzing a func type instruction so that nested generic
/// function types will emit generic poison instead of a partial type.
no_partial_func_ty: bool = false,
unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{},
const std = @import("std");
const math = std.math;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.sema);
const Sema = @This();
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
const TypedValue = @import("TypedValue.zig");
const Air = @import("Air.zig");
const Zir = @import("Zir.zig");
const Module = @import("Module.zig");
const trace = @import("tracy.zig").trace;
const Namespace = Module.Namespace;
const CompileError = Module.CompileError;
const SemaError = Module.SemaError;
const Decl = Module.Decl;
const CaptureScope = Module.CaptureScope;
const WipCaptureScope = Module.WipCaptureScope;
const LazySrcLoc = Module.LazySrcLoc;
const RangeSet = @import("RangeSet.zig");
const target_util = @import("target.zig");
const Package = @import("Package.zig");
const crash_report = @import("crash_report.zig");
const build_options = @import("build_options");
const Compilation = @import("Compilation.zig");
const InternPool = @import("InternPool.zig");
pub const default_branch_quota = 1000;
pub const default_reference_trace_len = 2;
/// Stores the mapping from `Zir.Inst.Index -> Air.Inst.Ref`, which is used by sema to resolve
/// instructions during analysis.
/// Instead of a hash table approach, InstMap is simply a slice that is indexed into using the
/// zir instruction index and a start offset. An index is not pressent in the map if the value
/// at the index is `Air.Inst.Ref.none`.
/// `ensureSpaceForInstructions` can be called to force InstMap to have a mapped range that
/// includes all instructions in a slice. After calling this function, `putAssumeCapacity*` can
/// be called safely for any of the instructions passed in.
pub const InstMap = struct {
items: []Air.Inst.Ref = &[_]Air.Inst.Ref{},
start: Zir.Inst.Index = 0,
pub fn deinit(map: InstMap, allocator: mem.Allocator) void {
allocator.free(map.items);
}
pub fn get(map: InstMap, key: Zir.Inst.Index) ?Air.Inst.Ref {
if (!map.contains(key)) return null;
return map.items[key - map.start];
}
pub fn putAssumeCapacity(
map: *InstMap,
key: Zir.Inst.Index,
ref: Air.Inst.Ref,
) void {
map.items[key - map.start] = ref;
}
pub fn putAssumeCapacityNoClobber(
map: *InstMap,
key: Zir.Inst.Index,
ref: Air.Inst.Ref,
) void {
assert(!map.contains(key));
map.putAssumeCapacity(key, ref);
}
pub const GetOrPutResult = struct {
value_ptr: *Air.Inst.Ref,
found_existing: bool,
};
pub fn getOrPutAssumeCapacity(
map: *InstMap,
key: Zir.Inst.Index,
) GetOrPutResult {
const index = key - map.start;
return GetOrPutResult{
.value_ptr = &map.items[index],
.found_existing = map.items[index] != .none,
};
}
pub fn remove(map: InstMap, key: Zir.Inst.Index) bool {
if (!map.contains(key)) return false;
map.items[key - map.start] = .none;
return true;
}
pub fn contains(map: InstMap, key: Zir.Inst.Index) bool {
return map.items[key - map.start] != .none;
}
pub fn ensureSpaceForInstructions(
map: *InstMap,
allocator: mem.Allocator,
insts: []const Zir.Inst.Index,
) !void {
const min_max = mem.minMax(Zir.Inst.Index, insts);
const start = min_max.min;
const end = min_max.max;
if (map.start <= start and end < map.items.len + map.start)
return;
const old_start = if (map.items.len == 0) start else map.start;
var better_capacity = map.items.len;
var better_start = old_start;
while (true) {
const extra_capacity = better_capacity / 2 + 16;
better_capacity += extra_capacity;
better_start -|= @intCast(Zir.Inst.Index, extra_capacity / 2);
if (better_start <= start and end < better_capacity + better_start)
break;
}
const start_diff = old_start - better_start;
const new_items = try allocator.alloc(Air.Inst.Ref, better_capacity);
@memset(new_items[0..start_diff], .none);
@memcpy(new_items[start_diff..][0..map.items.len], map.items);
@memset(new_items[start_diff + map.items.len ..], .none);
allocator.free(map.items);
map.items = new_items;
map.start = @intCast(Zir.Inst.Index, better_start);
}
};
/// This is the context needed to semantically analyze ZIR instructions and
/// produce AIR instructions.
/// This is a temporary structure stored on the stack; references to it are valid only
/// during semantic analysis of the block.
pub const Block = struct {
parent: ?*Block,
/// Shared among all child blocks.
sema: *Sema,
/// The namespace to use for lookups from this source block
/// When analyzing fields, this is different from src_decl.src_namespace.
namespace: Namespace.Index,
/// The AIR instructions generated for this block.
instructions: std.ArrayListUnmanaged(Air.Inst.Index),
// `param` instructions are collected here to be used by the `func` instruction.
params: std.ArrayListUnmanaged(Param) = .{},
wip_capture_scope: *CaptureScope,
label: ?*Label = null,
inlining: ?*Inlining,
/// If runtime_index is not 0 then one of these is guaranteed to be non null.
runtime_cond: ?LazySrcLoc = null,
runtime_loop: ?LazySrcLoc = null,
/// This Decl is the Decl according to the Zig source code corresponding to this Block.
/// This can vary during inline or comptime function calls. See `Sema.owner_decl`
/// for the one that will be the same for all Block instances.
src_decl: Decl.Index,
/// Non zero if a non-inline loop or a runtime conditional have been encountered.
/// Stores to comptime variables are only allowed when var.runtime_index <= runtime_index.
runtime_index: Value.RuntimeIndex = .zero,
inline_block: Zir.Inst.Index = 0,
comptime_reason: ?*const ComptimeReason = null,
// TODO is_comptime and comptime_reason should probably be merged together.
is_comptime: bool,
is_typeof: bool = false,
/// Keep track of the active error return trace index around blocks so that we can correctly
/// pop the error trace upon block exit.
error_return_trace_index: Air.Inst.Ref = .none,
/// when null, it is determined by build mode, changed by @setRuntimeSafety
want_safety: ?bool = null,
/// What mode to generate float operations in, set by @setFloatMode
float_mode: std.builtin.FloatMode = .Strict,
c_import_buf: ?*std.ArrayList(u8) = null,
/// type of `err` in `else => |err|`
switch_else_err_ty: ?Type = null,
/// Value for switch_capture in an inline case
inline_case_capture: Air.Inst.Ref = .none,
const ComptimeReason = union(enum) {
c_import: struct {
block: *Block,
src: LazySrcLoc,
},
comptime_ret_ty: struct {
block: *Block,
func: Air.Inst.Ref,
func_src: LazySrcLoc,
return_ty: Type,
},
fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void {
const parent = msg orelse return;
const mod = sema.mod;
const prefix = "expression is evaluated at comptime because ";
switch (cr) {
.c_import => |ci| {
try sema.errNote(ci.block, ci.src, parent, prefix ++ "it is inside a @cImport", .{});
},
.comptime_ret_ty => |rt| {
const src_loc = if (try sema.funcDeclSrc(rt.func)) |fn_decl| blk: {
var src_loc = fn_decl.srcLoc(mod);
src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 };
break :blk src_loc;
} else blk: {
const src_decl = sema.mod.declPtr(rt.block.src_decl);
break :blk rt.func_src.toSrcLoc(src_decl, mod);
};
if (rt.return_ty.isGenericPoison()) {
return sema.mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{});
}
try sema.mod.errNoteNonLazy(
src_loc,
parent,
prefix ++ "the function returns a comptime-only type '{}'",
.{rt.return_ty.fmt(sema.mod)},
);
try sema.explainWhyTypeIsComptime(parent, src_loc, rt.return_ty);
},
}
}
};
const Param = struct {
/// `noreturn` means `anytype`.
ty: Type,
is_comptime: bool,
name: []const u8,
};
/// This `Block` maps a block ZIR instruction to the corresponding
/// AIR instruction for break instruction analysis.
pub const Label = struct {
zir_block: Zir.Inst.Index,
merges: Merges,
};
/// This `Block` indicates that an inline function call is happening
/// and return instructions should be analyzed as a break instruction
/// to this AIR block instruction.
/// It is shared among all the blocks in an inline or comptime called
/// function.
pub const Inlining = struct {
func: ?*Module.Fn,
comptime_result: Air.Inst.Ref,
merges: Merges,
};
pub const Merges = struct {
block_inst: Air.Inst.Index,
/// Separate array list from break_inst_list so that it can be passed directly
/// to resolvePeerTypes.
results: std.ArrayListUnmanaged(Air.Inst.Ref),
/// Keeps track of the break instructions so that the operand can be replaced
/// if we need to add type coercion at the end of block analysis.
/// Same indexes, capacity, length as `results`.
br_list: std.ArrayListUnmanaged(Air.Inst.Index),
/// Keeps the source location of the rhs operand of the break instruction,
/// to enable more precise compile errors.
/// Same indexes, capacity, length as `results`.
src_locs: std.ArrayListUnmanaged(?LazySrcLoc),
pub fn deinit(merges: *@This(), allocator: mem.Allocator) void {
merges.results.deinit(allocator);
merges.br_list.deinit(allocator);
merges.src_locs.deinit(allocator);
}
};
/// For debugging purposes.
pub fn dump(block: *Block, mod: Module) void {
Zir.dumpBlock(mod, block);
}
pub fn makeSubBlock(parent: *Block) Block {
return .{
.parent = parent,
.sema = parent.sema,
.src_decl = parent.src_decl,
.namespace = parent.namespace,
.instructions = .{},
.wip_capture_scope = parent.wip_capture_scope,
.label = null,
.inlining = parent.inlining,
.is_comptime = parent.is_comptime,
.comptime_reason = parent.comptime_reason,
.is_typeof = parent.is_typeof,
.runtime_cond = parent.runtime_cond,
.runtime_loop = parent.runtime_loop,
.runtime_index = parent.runtime_index,
.want_safety = parent.want_safety,
.float_mode = parent.float_mode,
.c_import_buf = parent.c_import_buf,
.switch_else_err_ty = parent.switch_else_err_ty,
.error_return_trace_index = parent.error_return_trace_index,
};
}
pub fn wantSafety(block: *const Block) bool {
return block.want_safety orelse switch (block.sema.mod.optimizeMode()) {
.Debug => true,
.ReleaseSafe => true,
.ReleaseFast => false,
.ReleaseSmall => false,
};
}
pub fn getFileScope(block: *Block, mod: *Module) *Module.File {
return mod.namespacePtr(block.namespace).file_scope;
}
fn addTy(
block: *Block,
tag: Air.Inst.Tag,
ty: Type,
) error{OutOfMemory}!Air.Inst.Ref {
return block.addInst(.{
.tag = tag,
.data = .{ .ty = ty },
});
}
fn addTyOp(
block: *Block,
tag: Air.Inst.Tag,
ty: Type,
operand: Air.Inst.Ref,
) error{OutOfMemory}!Air.Inst.Ref {
return block.addInst(.{
.tag = tag,
.data = .{ .ty_op = .{
.ty = try block.sema.addType(ty),
.operand = operand,
} },
});
}
fn addBitCast(block: *Block, ty: Type, operand: Air.Inst.Ref) Allocator.Error!Air.Inst.Ref {
return block.addInst(.{
.tag = .bitcast,
.data = .{ .ty_op = .{
.ty = try block.sema.addType(ty),
.operand = operand,
} },
});
}
fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref {
return block.addInst(.{
.tag = tag,
.data = .{ .no_op = {} },
});
}
fn addUnOp(
block: *Block,
tag: Air.Inst.Tag,
operand: Air.Inst.Ref,
) error{OutOfMemory}!Air.Inst.Ref {
return block.addInst(.{
.tag = tag,
.data = .{ .un_op = operand },
});
}
fn addBr(
block: *Block,
target_block: Air.Inst.Index,
operand: Air.Inst.Ref,
) error{OutOfMemory}!Air.Inst.Ref {
return block.addInst(.{
.tag = .br,
.data = .{ .br = .{
.block_inst = target_block,
.operand = operand,
} },
});
}
fn addBinOp(
block: *Block,
tag: Air.Inst.Tag,
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
) error{OutOfMemory}!Air.Inst.Ref {
return block.addInst(.{
.tag = tag,
.data = .{ .bin_op = .{
.lhs = lhs,
.rhs = rhs,
} },
});
}
fn addStructFieldPtr(
block: *Block,
struct_ptr: Air.Inst.Ref,
field_index: u32,
ptr_field_ty: Type,
) !Air.Inst.Ref {
const ty = try block.sema.addType(ptr_field_ty);
const tag: Air.Inst.Tag = switch (field_index) {
0 => .struct_field_ptr_index_0,
1 => .struct_field_ptr_index_1,
2 => .struct_field_ptr_index_2,
3 => .struct_field_ptr_index_3,
else => {
return block.addInst(.{
.tag = .struct_field_ptr,
.data = .{ .ty_pl = .{
.ty = ty,
.payload = try block.sema.addExtra(Air.StructField{
.struct_operand = struct_ptr,
.field_index = field_index,
}),
} },
});
},
};
return block.addInst(.{
.tag = tag,
.data = .{ .ty_op = .{
.ty = ty,
.operand = struct_ptr,
} },
});
}
fn addStructFieldVal(
block: *Block,
struct_val: Air.Inst.Ref,
field_index: u32,
field_ty: Type,
) !Air.Inst.Ref {
return block.addInst(.{
.tag = .struct_field_val,
.data = .{ .ty_pl = .{
.ty = try block.sema.addType(field_ty),
.payload = try block.sema.addExtra(Air.StructField{
.struct_operand = struct_val,
.field_index = field_index,
}),
} },
});
}
fn addSliceElemPtr(
block: *Block,
slice: Air.Inst.Ref,
elem_index: Air.Inst.Ref,
elem_ptr_ty: Type,
) !Air.Inst.Ref {
return block.addInst(.{
.tag = .slice_elem_ptr,
.data = .{ .ty_pl = .{
.ty = try block.sema.addType(elem_ptr_ty),
.payload = try block.sema.addExtra(Air.Bin{
.lhs = slice,
.rhs = elem_index,
}),
} },
});
}
fn addPtrElemPtr(
block: *Block,
array_ptr: Air.Inst.Ref,
elem_index: Air.Inst.Ref,
elem_ptr_ty: Type,
) !Air.Inst.Ref {
const ty_ref = try block.sema.addType(elem_ptr_ty);
return block.addPtrElemPtrTypeRef(array_ptr, elem_index, ty_ref);
}
fn addPtrElemPtrTypeRef(
block: *Block,
array_ptr: Air.Inst.Ref,
elem_index: Air.Inst.Ref,
elem_ptr_ty: Air.Inst.Ref,
) !Air.Inst.Ref {
return block.addInst(.{
.tag = .ptr_elem_ptr,
.data = .{ .ty_pl = .{
.ty = elem_ptr_ty,
.payload = try block.sema.addExtra(Air.Bin{
.lhs = array_ptr,
.rhs = elem_index,
}),
} },
});
}
fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref {
const sema = block.sema;
const mod = sema.mod;
return block.addInst(.{
.tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector,
.data = .{ .ty_pl = .{
.ty = try sema.addType(
try mod.vectorType(.{
.len = sema.typeOf(lhs).vectorLen(mod),
.child = .bool_type,
}),
),
.payload = try sema.addExtra(Air.VectorCmp{
.lhs = lhs,
.rhs = rhs,
.op = Air.VectorCmp.encodeOp(cmp_op),
}),
} },
});
}
fn addAggregateInit(
block: *Block,
aggregate_ty: Type,
elements: []const Air.Inst.Ref,
) !Air.Inst.Ref {
const sema = block.sema;
const ty_ref = try sema.addType(aggregate_ty);
try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements.len);
const extra_index = @intCast(u32, sema.air_extra.items.len);
sema.appendRefsAssumeCapacity(elements);
return block.addInst(.{
.tag = .aggregate_init,
.data = .{ .ty_pl = .{
.ty = ty_ref,
.payload = extra_index,
} },
});
}
fn addUnionInit(
block: *Block,
union_ty: Type,
field_index: u32,
init: Air.Inst.Ref,
) !Air.Inst.Ref {
return block.addInst(.{
.tag = .union_init,
.data = .{ .ty_pl = .{
.ty = try block.sema.addType(union_ty),
.payload = try block.sema.addExtra(Air.UnionInit{
.field_index = field_index,
.init = init,
}),
} },
});
}
pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref {
return Air.indexToRef(try block.addInstAsIndex(inst));
}
pub fn addInstAsIndex(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index {
const sema = block.sema;
const gpa = sema.gpa;
try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
try block.instructions.ensureUnusedCapacity(gpa, 1);
const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len);
sema.air_instructions.appendAssumeCapacity(inst);
block.instructions.appendAssumeCapacity(result_index);
return result_index;
}
/// Insert an instruction into the block at `index`. Moves all following
/// instructions forward in the block to make room. Operation is O(N).
pub fn insertInst(block: *Block, index: Air.Inst.Index, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref {
return Air.indexToRef(try block.insertInstAsIndex(index, inst));
}
pub fn insertInstAsIndex(block: *Block, index: Air.Inst.Index, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index {
const sema = block.sema;
const gpa = sema.gpa;
try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len);
sema.air_instructions.appendAssumeCapacity(inst);
try block.instructions.insert(gpa, index, result_index);
return result_index;
}
fn addUnreachable(block: *Block, safety_check: bool) !void {
if (safety_check and block.wantSafety()) {
try block.sema.safetyPanic(block, .unreach);
} else {
_ = try block.addNoOp(.unreach);
}
}
pub fn startAnonDecl(block: *Block) !WipAnonDecl {
return WipAnonDecl{
.block = block,
.new_decl_arena = std.heap.ArenaAllocator.init(block.sema.gpa),
.finished = false,
};
}
pub const WipAnonDecl = struct {
block: *Block,
new_decl_arena: std.heap.ArenaAllocator,
finished: bool,
pub fn arena(wad: *WipAnonDecl) Allocator {
return wad.new_decl_arena.allocator();
}
pub fn deinit(wad: *WipAnonDecl) void {
if (!wad.finished) {
wad.new_decl_arena.deinit();
}
wad.* = undefined;
}
/// `alignment` value of 0 means to use ABI alignment.
pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value, alignment: u32) !Decl.Index {
const sema = wad.block.sema;
// Do this ahead of time because `createAnonymousDecl` depends on calling
// `type.hasRuntimeBits()`.
_ = try sema.typeHasRuntimeBits(ty);
const new_decl_index = try sema.mod.createAnonymousDecl(wad.block, .{
.ty = ty,
.val = val,
});
const new_decl = sema.mod.declPtr(new_decl_index);
new_decl.@"align" = alignment;
errdefer sema.mod.abortAnonDecl(new_decl_index);
try new_decl.finalizeNewArena(&wad.new_decl_arena);
wad.finished = true;
return new_decl_index;
}
};
};
const LabeledBlock = struct {
block: Block,
label: Block.Label,
fn destroy(lb: *LabeledBlock, gpa: Allocator) void {
lb.block.instructions.deinit(gpa);
lb.label.merges.deinit(gpa);
gpa.destroy(lb);
}
};
pub fn deinit(sema: *Sema) void {
const gpa = sema.gpa;
sema.air_instructions.deinit(gpa);
sema.air_extra.deinit(gpa);
sema.air_values.deinit(gpa);
sema.inst_map.deinit(gpa);
sema.decl_val_table.deinit(gpa);
sema.types_to_resolve.deinit(gpa);
{
var it = sema.post_hoc_blocks.iterator();
while (it.next()) |entry| {
const labeled_block = entry.value_ptr.*;
labeled_block.destroy(gpa);
}
sema.post_hoc_blocks.deinit(gpa);
}
sema.unresolved_inferred_allocs.deinit(gpa);
sema.* = undefined;
}
/// Returns only the result from the body that is specified.
/// Only appropriate to call when it is determined at comptime that this body
/// has no peers.
fn resolveBody(
sema: *Sema,
block: *Block,
body: []const Zir.Inst.Index,
/// This is the instruction that a break instruction within `body` can
/// use to return from the body.
body_inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const break_data = (try sema.analyzeBodyBreak(block, body)) orelse
return Air.Inst.Ref.unreachable_value;
// For comptime control flow, we need to detect when `analyzeBody` reports
// that we need to break from an outer block. In such case we
// use Zig's error mechanism to send control flow up the stack until
// we find the corresponding block to this break.
if (block.is_comptime and break_data.block_inst != body_inst) {
sema.comptime_break_inst = break_data.inst;
return error.ComptimeBreak;
}
return try sema.resolveInst(break_data.operand);
}
fn analyzeBodyRuntimeBreak(sema: *Sema, block: *Block, body: []const Zir.Inst.Index) !void {
_ = sema.analyzeBodyInner(block, body) catch |err| switch (err) {
error.ComptimeBreak => {
const zir_datas = sema.code.instructions.items(.data);
const break_data = zir_datas[sema.comptime_break_inst].@"break";
const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data;
try sema.addRuntimeBreak(block, .{
.block_inst = extra.block_inst,
.operand = break_data.operand,
.inst = sema.comptime_break_inst,
});
},
else => |e| return e,
};
}
pub fn analyzeBody(
sema: *Sema,
block: *Block,
body: []const Zir.Inst.Index,
) !void {
_ = sema.analyzeBodyInner(block, body) catch |err| switch (err) {
error.ComptimeBreak => unreachable, // unexpected comptime control flow
else => |e| return e,
};
}
const BreakData = struct {
block_inst: Zir.Inst.Index,
operand: Zir.Inst.Ref,
inst: Zir.Inst.Index,
};
pub fn analyzeBodyBreak(
sema: *Sema,
block: *Block,
body: []const Zir.Inst.Index,
) CompileError!?BreakData {
const mod = sema.mod;
const break_inst = sema.analyzeBodyInner(block, body) catch |err| switch (err) {
error.ComptimeBreak => sema.comptime_break_inst,
else => |e| return e,
};
if (block.instructions.items.len != 0 and
sema.typeOf(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1])).isNoReturn(mod))
return null;
const break_data = sema.code.instructions.items(.data)[break_inst].@"break";
const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data;
return BreakData{
.block_inst = extra.block_inst,
.operand = break_data.operand,
.inst = break_inst,
};
}
/// ZIR instructions which are always `noreturn` return this. This matches the
/// return type of `analyzeBody` so that we can tail call them.
/// Only appropriate to return when the instruction is known to be NoReturn
/// solely based on the ZIR tag.
const always_noreturn: CompileError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined);
/// This function is the main loop of `Sema` and it can be used in two different ways:
/// * The traditional way where there are N breaks out of the block and peer type
/// resolution is done on the break operands. In this case, the `Zir.Inst.Index`
/// part of the return value will be `undefined`, and callsites should ignore it,
/// finding the block result value via the block scope.
/// * The "flat" way. There is only 1 break out of the block, and it is with a `break_inline`
/// instruction. In this case, the `Zir.Inst.Index` part of the return value will be
/// the break instruction. This communicates both which block the break applies to, as
/// well as the operand. No block scope needs to be created for this strategy.
fn analyzeBodyInner(
sema: *Sema,
block: *Block,
body: []const Zir.Inst.Index,
) CompileError!Zir.Inst.Index {
// No tracy calls here, to avoid interfering with the tail call mechanism.
try sema.inst_map.ensureSpaceForInstructions(sema.gpa, body);
const parent_capture_scope = block.wip_capture_scope;
var wip_captures = WipCaptureScope{
.finalized = true,
.scope = parent_capture_scope,
.perm_arena = sema.perm_arena,
.gpa = sema.gpa,
};
defer if (wip_captures.scope != parent_capture_scope) {
wip_captures.deinit();
};
const mod = sema.mod;
const map = &sema.inst_map;
const tags = sema.code.instructions.items(.tag);
const datas = sema.code.instructions.items(.data);
var orig_captures: usize = parent_capture_scope.captures.count();
var crash_info = crash_report.prepAnalyzeBody(sema, block, body);
crash_info.push();
defer crash_info.pop();
var dbg_block_begins: u32 = 0;
// We use a while (true) loop here to avoid a redundant way of breaking out of
// the loop. The only way to break out of the loop is with a `noreturn`
// instruction.
var i: usize = 0;
const result = while (true) {
crash_info.setBodyIndex(i);
const inst = body[i];
std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{
mod.namespacePtr(mod.declPtr(block.src_decl).src_namespace).file_scope.sub_file_path, inst,
});
const air_inst: Air.Inst.Ref = switch (tags[inst]) {
// zig fmt: off
.alloc => try sema.zirAlloc(block, inst),
.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)),
.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)),
.alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_const)),
.alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_mut)),
.alloc_mut => try sema.zirAllocMut(block, inst),
.alloc_comptime_mut => try sema.zirAllocComptime(block, inst),
.make_ptr_const => try sema.zirMakePtrConst(block, inst),
.anyframe_type => try sema.zirAnyframeType(block, inst),
.array_cat => try sema.zirArrayCat(block, inst),
.array_mul => try sema.zirArrayMul(block, inst),
.array_type => try sema.zirArrayType(block, inst),
.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst),
.vector_type => try sema.zirVectorType(block, inst),
.as => try sema.zirAs(block, inst),
.as_node => try sema.zirAsNode(block, inst),
.as_shift_operand => try sema.zirAsShiftOperand(block, inst),
.bit_and => try sema.zirBitwise(block, inst, .bit_and),
.bit_not => try sema.zirBitNot(block, inst),
.bit_or => try sema.zirBitwise(block, inst, .bit_or),
.bitcast => try sema.zirBitcast(block, inst),
.suspend_block => try sema.zirSuspendBlock(block, inst),
.bool_not => try sema.zirBoolNot(block, inst),
.bool_br_and => try sema.zirBoolBr(block, inst, false),
.bool_br_or => try sema.zirBoolBr(block, inst, true),
.c_import => try sema.zirCImport(block, inst),
.call => try sema.zirCall(block, inst, .direct),
.field_call => try sema.zirCall(block, inst, .field),
.closure_get => try sema.zirClosureGet(block, inst),
.cmp_lt => try sema.zirCmp(block, inst, .lt),
.cmp_lte => try sema.zirCmp(block, inst, .lte),
.cmp_eq => try sema.zirCmpEq(block, inst, .eq, Air.Inst.Tag.fromCmpOp(.eq, block.float_mode == .Optimized)),
.cmp_gte => try sema.zirCmp(block, inst, .gte),
.cmp_gt => try sema.zirCmp(block, inst, .gt),
.cmp_neq => try sema.zirCmpEq(block, inst, .neq, Air.Inst.Tag.fromCmpOp(.neq, block.float_mode == .Optimized)),
.coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst),
.decl_ref => try sema.zirDeclRef(block, inst),
.decl_val => try sema.zirDeclVal(block, inst),
.load => try sema.zirLoad(block, inst),
.elem_ptr => try sema.zirElemPtr(block, inst),
.elem_ptr_node => try sema.zirElemPtrNode(block, inst),
.elem_ptr_imm => try sema.zirElemPtrImm(block, inst),
.elem_val => try sema.zirElemVal(block, inst),
.elem_val_node => try sema.zirElemValNode(block, inst),
.elem_type_index => try sema.zirElemTypeIndex(block, inst),
.enum_literal => try sema.zirEnumLiteral(block, inst),
.enum_to_int => try sema.zirEnumToInt(block, inst),
.int_to_enum => try sema.zirIntToEnum(block, inst),
.err_union_code => try sema.zirErrUnionCode(block, inst),
.err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst),
.err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst),
.err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst),
.error_union_type => try sema.zirErrorUnionType(block, inst),
.error_value => try sema.zirErrorValue(block, inst),
.field_ptr => try sema.zirFieldPtr(block, inst, false),
.field_ptr_init => try sema.zirFieldPtr(block, inst, true),
.field_ptr_named => try sema.zirFieldPtrNamed(block, inst),
.field_val => try sema.zirFieldVal(block, inst),
.field_val_named => try sema.zirFieldValNamed(block, inst),
.func => try sema.zirFunc(block, inst, false),
.func_inferred => try sema.zirFunc(block, inst, true),
.func_fancy => try sema.zirFuncFancy(block, inst),
.import => try sema.zirImport(block, inst),
.indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst),
.int => try sema.zirInt(block, inst),
.int_big => try sema.zirIntBig(block, inst),
.float => try sema.zirFloat(block, inst),
.float128 => try sema.zirFloat128(block, inst),
.int_type => try sema.zirIntType(inst),
.is_non_err => try sema.zirIsNonErr(block, inst),
.is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst),
.ret_is_non_err => try sema.zirRetIsNonErr(block, inst),
.is_non_null => try sema.zirIsNonNull(block, inst),
.is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst),
.merge_error_sets => try sema.zirMergeErrorSets(block, inst),
.negate => try sema.zirNegate(block, inst),
.negate_wrap => try sema.zirNegateWrap(block, inst),
.optional_payload_safe => try sema.zirOptionalPayload(block, inst, true),
.optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true),
.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false),
.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false),
.optional_type => try sema.zirOptionalType(block, inst),
.ptr_type => try sema.zirPtrType(block, inst),
.ref => try sema.zirRef(block, inst),
.ret_err_value_code => try sema.zirRetErrValueCode(inst),
.shr => try sema.zirShr(block, inst, .shr),
.shr_exact => try sema.zirShr(block, inst, .shr_exact),
.slice_end => try sema.zirSliceEnd(block, inst),
.slice_sentinel => try sema.zirSliceSentinel(block, inst),
.slice_start => try sema.zirSliceStart(block, inst),
.slice_length => try sema.zirSliceLength(block, inst),
.str => try sema.zirStr(block, inst),
.switch_block => try sema.zirSwitchBlock(block, inst),
.switch_cond => try sema.zirSwitchCond(block, inst, false),
.switch_cond_ref => try sema.zirSwitchCond(block, inst, true),
.switch_capture => try sema.zirSwitchCapture(block, inst, false, false),
.switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true),
.switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false),
.switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true),
.switch_capture_tag => try sema.zirSwitchCaptureTag(block, inst),
.type_info => try sema.zirTypeInfo(block, inst),
.size_of => try sema.zirSizeOf(block, inst),
.bit_size_of => try sema.zirBitSizeOf(block, inst),
.typeof => try sema.zirTypeof(block, inst),
.typeof_builtin => try sema.zirTypeofBuiltin(block, inst),
.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst),
.xor => try sema.zirBitwise(block, inst, .xor),
.struct_init_empty => try sema.zirStructInitEmpty(block, inst),
.struct_init => try sema.zirStructInit(block, inst, false),
.struct_init_ref => try sema.zirStructInit(block, inst, true),
.struct_init_anon => try sema.zirStructInitAnon(block, inst, false),
.struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true),
.array_init => try sema.zirArrayInit(block, inst, false),
.array_init_ref => try sema.zirArrayInit(block, inst, true),
.array_init_anon => try sema.zirArrayInitAnon(block, inst, false),
.array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true),
.union_init => try sema.zirUnionInit(block, inst),
.field_type => try sema.zirFieldType(block, inst),
.field_type_ref => try sema.zirFieldTypeRef(block, inst),
.ptr_to_int => try sema.zirPtrToInt(block, inst),
.align_of => try sema.zirAlignOf(block, inst),
.bool_to_int => try sema.zirBoolToInt(block, inst),
.embed_file => try sema.zirEmbedFile(block, inst),
.error_name => try sema.zirErrorName(block, inst),
.tag_name => try sema.zirTagName(block, inst),
.type_name => try sema.zirTypeName(block, inst),
.frame_type => try sema.zirFrameType(block, inst),
.frame_size => try sema.zirFrameSize(block, inst),
.float_to_int => try sema.zirFloatToInt(block, inst),
.int_to_float => try sema.zirIntToFloat(block, inst),
.int_to_ptr => try sema.zirIntToPtr(block, inst),
.float_cast => try sema.zirFloatCast(block, inst),
.int_cast => try sema.zirIntCast(block, inst),
.ptr_cast => try sema.zirPtrCast(block, inst),
.truncate => try sema.zirTruncate(block, inst),
.align_cast => try sema.zirAlignCast(block, inst),
.has_decl => try sema.zirHasDecl(block, inst),
.has_field => try sema.zirHasField(block, inst),
.byte_swap => try sema.zirByteSwap(block, inst),
.bit_reverse => try sema.zirBitReverse(block, inst),
.bit_offset_of => try sema.zirBitOffsetOf(block, inst),
.offset_of => try sema.zirOffsetOf(block, inst),
.splat => try sema.zirSplat(block, inst),
.reduce => try sema.zirReduce(block, inst),
.shuffle => try sema.zirShuffle(block, inst),
.atomic_load => try sema.zirAtomicLoad(block, inst),
.atomic_rmw => try sema.zirAtomicRmw(block, inst),
.mul_add => try sema.zirMulAdd(block, inst),
.builtin_call => try sema.zirBuiltinCall(block, inst),
.field_parent_ptr => try sema.zirFieldParentPtr(block, inst),
.@"resume" => try sema.zirResume(block, inst),
.@"await" => try sema.zirAwait(block, inst),
.array_base_ptr => try sema.zirArrayBasePtr(block, inst),
.field_base_ptr => try sema.zirFieldBasePtr(block, inst),
.for_len => try sema.zirForLen(block, inst),
.clz => try sema.zirBitCount(block, inst, .clz, Value.clz),
.ctz => try sema.zirBitCount(block, inst, .ctz, Value.ctz),
.pop_count => try sema.zirBitCount(block, inst, .popcount, Value.popCount),
.sqrt => try sema.zirUnaryMath(block, inst, .sqrt, Value.sqrt),
.sin => try sema.zirUnaryMath(block, inst, .sin, Value.sin),
.cos => try sema.zirUnaryMath(block, inst, .cos, Value.cos),
.tan => try sema.zirUnaryMath(block, inst, .tan, Value.tan),
.exp => try sema.zirUnaryMath(block, inst, .exp, Value.exp),
.exp2 => try sema.zirUnaryMath(block, inst, .exp2, Value.exp2),
.log => try sema.zirUnaryMath(block, inst, .log, Value.log),
.log2 => try sema.zirUnaryMath(block, inst, .log2, Value.log2),
.log10 => try sema.zirUnaryMath(block, inst, .log10, Value.log10),
.fabs => try sema.zirUnaryMath(block, inst, .fabs, Value.fabs),
.floor => try sema.zirUnaryMath(block, inst, .floor, Value.floor),
.ceil => try sema.zirUnaryMath(block, inst, .ceil, Value.ceil),
.round => try sema.zirUnaryMath(block, inst, .round, Value.round),
.trunc => try sema.zirUnaryMath(block, inst, .trunc_float, Value.trunc),
.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent),
.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon),
.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func),
.add => try sema.zirArithmetic(block, inst, .add, true),
.addwrap => try sema.zirArithmetic(block, inst, .addwrap, true),
.add_sat => try sema.zirArithmetic(block, inst, .add_sat, true),
.add_unsafe=> try sema.zirArithmetic(block, inst, .add_unsafe, false),
.mul => try sema.zirArithmetic(block, inst, .mul, true),
.mulwrap => try sema.zirArithmetic(block, inst, .mulwrap, true),
.mul_sat => try sema.zirArithmetic(block, inst, .mul_sat, true),
.sub => try sema.zirArithmetic(block, inst, .sub, true),
.subwrap => try sema.zirArithmetic(block, inst, .subwrap, true),
.sub_sat => try sema.zirArithmetic(block, inst, .sub_sat, true),
.div => try sema.zirDiv(block, inst),
.div_exact => try sema.zirDivExact(block, inst),
.div_floor => try sema.zirDivFloor(block, inst),
.div_trunc => try sema.zirDivTrunc(block, inst),
.mod_rem => try sema.zirModRem(block, inst),
.mod => try sema.zirMod(block, inst),
.rem => try sema.zirRem(block, inst),
.max => try sema.zirMinMax(block, inst, .max),
.min => try sema.zirMinMax(block, inst, .min),
.shl => try sema.zirShl(block, inst, .shl),
.shl_exact => try sema.zirShl(block, inst, .shl_exact),
.shl_sat => try sema.zirShl(block, inst, .shl_sat),
.ret_ptr => try sema.zirRetPtr(block),
.ret_type => try sema.addType(sema.fn_ret_ty),
// Instructions that we know to *always* be noreturn based solely on their tag.
// These functions match the return type of analyzeBody so that we can
// tail call them here.
.compile_error => break sema.zirCompileError(block, inst),
.ret_implicit => break sema.zirRetImplicit(block, inst),
.ret_node => break sema.zirRetNode(block, inst),
.ret_load => break sema.zirRetLoad(block, inst),
.ret_err_value => break sema.zirRetErrValue(block, inst),
.@"unreachable" => break sema.zirUnreachable(block, inst),
.panic => break sema.zirPanic(block, inst),
.trap => break sema.zirTrap(block, inst),
// zig fmt: on
.extended => ext: {
const extended = datas[inst].extended;
break :ext switch (extended.opcode) {
// zig fmt: off
.variable => try sema.zirVarExtended( block, extended),
.struct_decl => try sema.zirStructDecl( block, extended, inst),
.enum_decl => try sema.zirEnumDecl( block, extended, inst),
.union_decl => try sema.zirUnionDecl( block, extended, inst),
.opaque_decl => try sema.zirOpaqueDecl( block, extended, inst),
.this => try sema.zirThis( block, extended),
.ret_addr => try sema.zirRetAddr( block, extended),
.builtin_src => try sema.zirBuiltinSrc( block, extended),
.error_return_trace => try sema.zirErrorReturnTrace( block),
.frame => try sema.zirFrame( block, extended),
.frame_address => try sema.zirFrameAddress( block, extended),
.alloc => try sema.zirAllocExtended( block, extended),
.builtin_extern => try sema.zirBuiltinExtern( block, extended),
.@"asm" => try sema.zirAsm( block, extended, false),
.asm_expr => try sema.zirAsm( block, extended, true),
.typeof_peer => try sema.zirTypeofPeer( block, extended),
.compile_log => try sema.zirCompileLog( extended),
.min_multi => try sema.zirMinMaxMulti( block, extended, .min),
.max_multi => try sema.zirMinMaxMulti( block, extended, .max),
.add_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode),
.sub_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode),
.mul_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode),
.shl_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode),
.c_undef => try sema.zirCUndef( block, extended),
.c_include => try sema.zirCInclude( block, extended),
.c_define => try sema.zirCDefine( block, extended),
.wasm_memory_size => try sema.zirWasmMemorySize( block, extended),
.wasm_memory_grow => try sema.zirWasmMemoryGrow( block, extended),
.prefetch => try sema.zirPrefetch( block, extended),
.err_set_cast => try sema.zirErrSetCast( block, extended),
.await_nosuspend => try sema.zirAwaitNosuspend( block, extended),
.select => try sema.zirSelect( block, extended),
.error_to_int => try sema.zirErrorToInt( block, extended),
.int_to_error => try sema.zirIntToError( block, extended),
.reify => try sema.zirReify( block, extended, inst),
.builtin_async_call => try sema.zirBuiltinAsyncCall( block, extended),
.cmpxchg => try sema.zirCmpxchg( block, extended),
.addrspace_cast => try sema.zirAddrSpaceCast( block, extended),
.c_va_arg => try sema.zirCVaArg( block, extended),
.c_va_copy => try sema.zirCVaCopy( block, extended),
.c_va_end => try sema.zirCVaEnd( block, extended),
.c_va_start => try sema.zirCVaStart( block, extended),
.const_cast, => try sema.zirConstCast( block, extended),
.volatile_cast, => try sema.zirVolatileCast( block, extended),
.work_item_id => try sema.zirWorkItem( block, extended, extended.opcode),
.work_group_size => try sema.zirWorkItem( block, extended, extended.opcode),
.work_group_id => try sema.zirWorkItem( block, extended, extended.opcode),
.in_comptime => try sema.zirInComptime( block),
// zig fmt: on
.fence => {
try sema.zirFence(block, extended);
i += 1;
continue;
},
.set_float_mode => {
try sema.zirSetFloatMode(block, extended);
i += 1;
continue;
},
.set_align_stack => {
try sema.zirSetAlignStack(block, extended);
i += 1;
continue;
},
.set_cold => {
try sema.zirSetCold(block, extended);
i += 1;
continue;
},
.breakpoint => {
if (!block.is_comptime) {
_ = try block.addNoOp(.breakpoint);
}
i += 1;
continue;
},
.errdefer_err_code => unreachable, // never appears in a body
};
},
// Instructions that we know can *never* be noreturn based solely on
// their tag. We avoid needlessly checking if they are noreturn and
// continue the loop.
// We also know that they cannot be referenced later, so we avoid
// putting them into the map.
.dbg_stmt => {
try sema.zirDbgStmt(block, inst);
i += 1;
continue;
},
.dbg_var_ptr => {
try sema.zirDbgVar(block, inst, .dbg_var_ptr);
i += 1;
continue;
},
.dbg_var_val => {
try sema.zirDbgVar(block, inst, .dbg_var_val);
i += 1;
continue;
},
.dbg_block_begin => {
dbg_block_begins += 1;
try sema.zirDbgBlockBegin(block);
i += 1;
continue;
},
.dbg_block_end => {
dbg_block_begins -= 1;
try sema.zirDbgBlockEnd(block);
i += 1;
continue;
},
.ensure_err_union_payload_void => {
try sema.zirEnsureErrUnionPayloadVoid(block, inst);
i += 1;
continue;
},
.ensure_result_non_error => {
try sema.zirEnsureResultNonError(block, inst);
i += 1;
continue;
},
.ensure_result_used => {
try sema.zirEnsureResultUsed(block, inst);
i += 1;
continue;
},
.set_eval_branch_quota => {
try sema.zirSetEvalBranchQuota(block, inst);
i += 1;
continue;
},
.atomic_store => {
try sema.zirAtomicStore(block, inst);
i += 1;
continue;
},
.store => {
try sema.zirStore(block, inst);
i += 1;
continue;
},
.store_node => {
try sema.zirStoreNode(block, inst);
i += 1;
continue;
},
.store_to_block_ptr => {
try sema.zirStoreToBlockPtr(block, inst);
i += 1;
continue;
},
.store_to_inferred_ptr => {
try sema.zirStoreToInferredPtr(block, inst);
i += 1;
continue;
},
.resolve_inferred_alloc => {
try sema.zirResolveInferredAlloc(block, inst);
i += 1;
continue;
},
.validate_array_init_ty => {
try sema.validateArrayInitTy(block, inst);
i += 1;
continue;
},
.validate_struct_init_ty => {
try sema.validateStructInitTy(block, inst);
i += 1;
continue;
},
.validate_struct_init => {
try sema.zirValidateStructInit(block, inst);
i += 1;
continue;
},
.validate_array_init => {
try sema.zirValidateArrayInit(block, inst);
i += 1;
continue;
},
.validate_deref => {
try sema.zirValidateDeref(block, inst);
i += 1;
continue;
},
.@"export" => {
try sema.zirExport(block, inst);
i += 1;
continue;
},
.export_value => {
try sema.zirExportValue(block, inst);
i += 1;
continue;
},
.set_runtime_safety => {
try sema.zirSetRuntimeSafety(block, inst);
i += 1;
continue;
},
.param => {
try sema.zirParam(block, inst, false);
i += 1;
continue;
},
.param_comptime => {
try sema.zirParam(block, inst, true);
i += 1;
continue;
},
.param_anytype => {
try sema.zirParamAnytype(block, inst, false);
i += 1;
continue;
},
.param_anytype_comptime => {
try sema.zirParamAnytype(block, inst, true);
i += 1;
continue;
},
.closure_capture => {
try sema.zirClosureCapture(block, inst);
i += 1;
continue;
},
.memcpy => {
try sema.zirMemcpy(block, inst);
i += 1;
continue;
},
.memset => {
try sema.zirMemset(block, inst);
i += 1;
continue;
},
.check_comptime_control_flow => {
if (!block.is_comptime) {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const inline_block = Zir.refToIndex(inst_data.operand).?;
var check_block = block;
const target_runtime_index = while (true) {
if (check_block.inline_block == inline_block) {
break check_block.runtime_index;
}
check_block = check_block.parent.?;
};
if (@enumToInt(target_runtime_index) < @enumToInt(block.runtime_index)) {
const runtime_src = block.runtime_cond orelse block.runtime_loop.?;
const msg = msg: {
const msg = try sema.errMsg(block, src, "comptime control flow inside runtime block", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, runtime_src, msg, "runtime control flow here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
i += 1;
continue;
},
.save_err_ret_index => {
try sema.zirSaveErrRetIndex(block, inst);
i += 1;
continue;
},
.restore_err_ret_index => {
try sema.zirRestoreErrRetIndex(block, inst);
i += 1;
continue;
},
// Special case instructions to handle comptime control flow.
.@"break" => {
if (block.is_comptime) {
break inst; // same as break_inline
} else {
break sema.zirBreak(block, inst);
}
},
.break_inline => {
if (block.is_comptime) {
break inst;
} else {
sema.comptime_break_inst = inst;
return error.ComptimeBreak;
}
},
.repeat => {
if (block.is_comptime) {
// Send comptime control flow back to the beginning of this block.
const src = LazySrcLoc.nodeOffset(datas[inst].node);
try sema.emitBackwardBranch(block, src);
if (wip_captures.scope.captures.count() != orig_captures) {
try wip_captures.reset(parent_capture_scope);
block.wip_capture_scope = wip_captures.scope;
orig_captures = 0;
}
i = 0;
continue;
} else {
break always_noreturn;
}
},
.repeat_inline => {
// Send comptime control flow back to the beginning of this block.
const src = LazySrcLoc.nodeOffset(datas[inst].node);
try sema.emitBackwardBranch(block, src);
if (wip_captures.scope.captures.count() != orig_captures) {
try wip_captures.reset(parent_capture_scope);
block.wip_capture_scope = wip_captures.scope;
orig_captures = 0;
}
i = 0;
continue;
},
.loop => blk: {
if (!block.is_comptime) break :blk try sema.zirLoop(block, inst);
// Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220
const inst_data = datas[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
break always_noreturn;
if (inst == break_data.block_inst) {
break :blk try sema.resolveInst(break_data.operand);
} else {
break break_data.inst;
}
},
.block, .block_comptime => blk: {
if (!block.is_comptime) {
break :blk try sema.zirBlock(block, inst, tags[inst] == .block_comptime);
}
// Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220
const inst_data = datas[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
// If this block contains a function prototype, we need to reset the
// current list of parameters and restore it later.
// Note: this probably needs to be resolved in a more general manner.
const prev_params = block.params;
block.params = .{};
defer {
block.params.deinit(sema.gpa);
block.params = prev_params;
}
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
break always_noreturn;
if (inst == break_data.block_inst) {
break :blk try sema.resolveInst(break_data.operand);
} else {
break break_data.inst;
}
},
.block_inline => blk: {
// Directly analyze the block body without introducing a new block.
// However, in the case of a corresponding break_inline which reaches
// through a runtime conditional branch, we must retroactively emit
// a block, so we remember the block index here just in case.
const block_index = block.instructions.items.len;
const inst_data = datas[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
const gpa = sema.gpa;
const opt_break_data = b: {
// Create a temporary child block so that this inline block is properly
// labeled for any .restore_err_ret_index instructions
var child_block = block.makeSubBlock();
// If this block contains a function prototype, we need to reset the
// current list of parameters and restore it later.
// Note: this probably needs to be resolved in a more general manner.
child_block.inline_block =
if (tags[inline_body[inline_body.len - 1]] == .repeat_inline) inline_body[0] else inst;
var label: Block.Label = .{
.zir_block = inst,
.merges = undefined,
};
child_block.label = &label;
defer child_block.params.deinit(gpa);
// Write these instructions directly into the parent block
child_block.instructions = block.instructions;
defer block.instructions = child_block.instructions;
break :b try sema.analyzeBodyBreak(&child_block, inline_body);
};
// A runtime conditional branch that needs a post-hoc block to be
// emitted communicates this by mapping the block index into the inst map.
if (map.get(inst)) |new_block_ref| ph: {
// Comptime control flow populates the map, so we don't actually know
// if this is a post-hoc runtime block until we check the
// post_hoc_block map.
const new_block_inst = Air.refToIndex(new_block_ref) orelse break :ph;
const labeled_block = sema.post_hoc_blocks.get(new_block_inst) orelse
break :ph;
// In this case we need to move all the instructions starting at
// block_index from the current block into this new one.
if (opt_break_data) |break_data| {
// This is a comptime break which we now change to a runtime break
// since it crosses a runtime branch.
// It may pass through our currently being analyzed block_inline or it
// may point directly to it. In the latter case, this modifies the
// block that we are about to look up in the post_hoc_blocks map below.
try sema.addRuntimeBreak(block, break_data);
} else {
// Here the comptime control flow ends with noreturn; however
// we have runtime control flow continuing after this block.
// This branch is therefore handled by the `i += 1; continue;`
// logic below.
}
try labeled_block.block.instructions.appendSlice(gpa, block.instructions.items[block_index..]);
block.instructions.items.len = block_index;
const block_result = try sema.analyzeBlockBody(block, inst_data.src(), &labeled_block.block, &labeled_block.label.merges);
{
// Destroy the ad-hoc block entry so that it does not interfere with
// the next iteration of comptime control flow, if any.
labeled_block.destroy(gpa);
assert(sema.post_hoc_blocks.remove(new_block_inst));
}
map.putAssumeCapacity(inst, block_result);
i += 1;
continue;
}
const break_data = opt_break_data orelse break always_noreturn;
if (inst == break_data.block_inst) {
break :blk try sema.resolveInst(break_data.operand);
} else {
break break_data.inst;
}
},
.condbr => blk: {
if (!block.is_comptime) break sema.zirCondbr(block, inst);
// Same as condbr_inline. TODO https://github.com/ziglang/zig/issues/8220
const inst_data = datas[inst].pl_node;
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len];
const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
const cond = sema.resolveInstConst(block, cond_src, extra.data.condition, "condition in comptime branch must be comptime-known") catch |err| {
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
return err;
};
const inline_body = if (cond.val.toBool(mod)) then_body else else_body;
try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src);
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
break always_noreturn;
if (inst == break_data.block_inst) {
break :blk try sema.resolveInst(break_data.operand);
} else {
break break_data.inst;
}
},
.condbr_inline => blk: {
const inst_data = datas[inst].pl_node;
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len];
const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
const cond = sema.resolveInstConst(block, cond_src, extra.data.condition, "condition in comptime branch must be comptime-known") catch |err| {
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
return err;
};
const inline_body = if (cond.val.toBool(mod)) then_body else else_body;
try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src);
const old_runtime_index = block.runtime_index;
defer block.runtime_index = old_runtime_index;
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
break always_noreturn;
if (inst == break_data.block_inst) {
break :blk try sema.resolveInst(break_data.operand);
} else {
break break_data.inst;
}
},
.@"try" => blk: {
if (!block.is_comptime) break :blk try sema.zirTry(block, inst);
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
const err_union = try sema.resolveInst(extra.data.operand);
const err_union_ty = sema.typeOf(err_union);
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
err_union_ty.fmt(mod),
});
}
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
assert(is_non_err != .none);
const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| {
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
return err;
};
if (is_non_err_val.toBool(mod)) {
break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false);
}
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
break always_noreturn;
if (inst == break_data.block_inst) {
break :blk try sema.resolveInst(break_data.operand);
} else {
break break_data.inst;
}
},
.try_ptr => blk: {
if (!block.is_comptime) break :blk try sema.zirTryPtr(block, inst);
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
const operand = try sema.resolveInst(extra.data.operand);
const err_union = try sema.analyzeLoad(block, src, operand, operand_src);
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
assert(is_non_err != .none);
const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| {
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
return err;
};
if (is_non_err_val.toBool(mod)) {
break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false);
}
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
break always_noreturn;
if (inst == break_data.block_inst) {
break :blk try sema.resolveInst(break_data.operand);
} else {
break break_data.inst;
}
},
.@"defer" => blk: {
const inst_data = sema.code.instructions.items(.data)[inst].@"defer";
const defer_body = sema.code.extra[inst_data.index..][0..inst_data.len];
const break_inst = sema.analyzeBodyInner(block, defer_body) catch |err| switch (err) {
error.ComptimeBreak => sema.comptime_break_inst,
else => |e| return e,
};
if (break_inst != defer_body[defer_body.len - 1]) break always_noreturn;
break :blk Air.Inst.Ref.void_value;
},
.defer_err_code => blk: {
const inst_data = sema.code.instructions.items(.data)[inst].defer_err_code;
const extra = sema.code.extraData(Zir.Inst.DeferErrCode, inst_data.payload_index).data;
const defer_body = sema.code.extra[extra.index..][0..extra.len];
const err_code = try sema.resolveInst(inst_data.err_code);
map.putAssumeCapacity(extra.remapped_err_code, err_code);
const break_inst = sema.analyzeBodyInner(block, defer_body) catch |err| switch (err) {
error.ComptimeBreak => sema.comptime_break_inst,
else => |e| return e,
};
if (break_inst != defer_body[defer_body.len - 1]) break always_noreturn;
break :blk Air.Inst.Ref.void_value;
},
};
if (sema.typeOf(air_inst).isNoReturn(mod))
break always_noreturn;
map.putAssumeCapacity(inst, air_inst);
i += 1;
};
// balance out dbg_block_begins in case of early noreturn
const noreturn_inst = block.instructions.popOrNull();
while (dbg_block_begins > 0) {
dbg_block_begins -= 1;
if (block.is_comptime or mod.comp.bin_file.options.strip) continue;
_ = try block.addInst(.{
.tag = .dbg_block_end,
.data = undefined,
});
}
if (noreturn_inst) |some| try block.instructions.append(sema.gpa, some);
if (!wip_captures.finalized) {
try wip_captures.finalize();
block.wip_capture_scope = parent_capture_scope;
}
return result;
}
pub fn resolveInstAllowNone(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref {
if (zir_ref == .none) {
return .none;
} else {
return resolveInst(sema, zir_ref);
}
}
pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref {
assert(zir_ref != .none);
const i = @enumToInt(zir_ref);
// First section of indexes correspond to a set number of constant values.
// We intentionally map the same indexes to the same values between ZIR and AIR.
if (i < InternPool.static_len) return @intToEnum(Air.Inst.Ref, i);
// The last section of indexes refers to the map of ZIR => AIR.
const inst = sema.inst_map.get(i - InternPool.static_len).?;
if (inst == .generic_poison) return error.GenericPoison;
const ty = sema.typeOf(inst);
assert(!ty.isGenericPoison());
return inst;
}
fn resolveConstBool(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
reason: []const u8,
) !bool {
const mod = sema.mod;
const air_inst = try sema.resolveInst(zir_ref);
const wanted_type = Type.bool;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstValue(block, src, coerced_inst, reason);
return val.toBool(mod);
}
pub fn resolveConstString(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
reason: []const u8,
) ![]u8 {
const air_inst = try sema.resolveInst(zir_ref);
const wanted_type = Type.const_slice_u8;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstValue(block, src, coerced_inst, reason);
return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod);
}
pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type {
const air_inst = try sema.resolveInst(zir_ref);
assert(air_inst != .var_args_param_type);
const ty = try sema.analyzeAsType(block, src, air_inst);
if (ty.isGenericPoison()) return error.GenericPoison;
return ty;
}
fn analyzeAsType(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
air_inst: Air.Inst.Ref,
) !Type {
const wanted_type = Type.type;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstValue(block, src, coerced_inst, "types must be comptime-known");
return val.toType();
}
pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void {
const mod = sema.mod;
if (!mod.backendSupportsFeature(.error_return_trace)) return;
assert(!block.is_comptime);
var err_trace_block = block.makeSubBlock();
defer err_trace_block.instructions.deinit(sema.gpa);
const src: LazySrcLoc = .unneeded;
// var addrs: [err_return_trace_addr_count]usize = undefined;
const err_return_trace_addr_count = 32;
const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, mod);
const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty));
// var st: StackTrace = undefined;
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty));
// st.instruction_addresses = &addrs;
const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "instruction_addresses", src, true);
try sema.storePtr2(&err_trace_block, src, addr_field_ptr, src, addrs_ptr, src, .store);
// st.index = 0;
const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "index", src, true);
try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store);
// @errorReturnTrace() = &st;
_ = try err_trace_block.addUnOp(.set_err_return_trace, st_ptr);
try block.instructions.insertSlice(sema.gpa, last_arg_index, err_trace_block.instructions.items);
}
/// May return Value Tags: `variable`, `undef`.
/// See `resolveConstValue` for an alternative.
/// Value Tag `generic_poison` causes `error.GenericPoison` to be returned.
fn resolveValue(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
air_ref: Air.Inst.Ref,
reason: []const u8,
) CompileError!Value {
if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| {
if (val.isGenericPoison()) return error.GenericPoison;
return val;
}
return sema.failWithNeededComptime(block, src, reason);
}
/// Value Tag `variable` will cause a compile error.
/// Value Tag `undef` may be returned.
fn resolveConstMaybeUndefVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
inst: Air.Inst.Ref,
reason: []const u8,
) CompileError!Value {
if (try sema.resolveMaybeUndefValAllowVariables(inst)) |val| {
switch (val.ip_index) {
.generic_poison => return error.GenericPoison,
.none => switch (val.tag()) {
.variable => return sema.failWithNeededComptime(block, src, reason),
else => return val,
},
else => return val,
}
}
return sema.failWithNeededComptime(block, src, reason);
}
/// Will not return Value Tags: `variable`, `undef`. Instead they will emit compile errors.
/// See `resolveValue` for an alternative.
fn resolveConstValue(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
air_ref: Air.Inst.Ref,
reason: []const u8,
) CompileError!Value {
if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| {
switch (val.ip_index) {
.generic_poison => return error.GenericPoison,
.undef => return sema.failWithUseOfUndef(block, src),
.none => switch (val.tag()) {
.variable => return sema.failWithNeededComptime(block, src, reason),
else => return val,
},
else => return val,
}
}
return sema.failWithNeededComptime(block, src, reason);
}
/// Value Tag `variable` causes this function to return `null`.
/// Value Tag `undef` causes this function to return a compile error.
fn resolveDefinedValue(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
air_ref: Air.Inst.Ref,
) CompileError!?Value {
const mod = sema.mod;
if (try sema.resolveMaybeUndefVal(air_ref)) |val| {
if (val.isUndef(mod)) {
if (block.is_typeof) return null;
return sema.failWithUseOfUndef(block, src);
}
return val;
}
return null;
}
/// Value Tag `variable` causes this function to return `null`.
/// Value Tag `undef` causes this function to return the Value.
/// Value Tag `generic_poison` causes `error.GenericPoison` to be returned.
fn resolveMaybeUndefVal(
sema: *Sema,
inst: Air.Inst.Ref,
) CompileError!?Value {
const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null;
switch (val.ip_index) {
.generic_poison => return error.GenericPoison,
.none => switch (val.tag()) {
.variable => return null,
else => return val,
},
else => return val,
}
}
/// Value Tag `variable` results in `null`.
/// Value Tag `undef` results in the Value.
/// Value Tag `generic_poison` causes `error.GenericPoison` to be returned.
/// Value Tag `decl_ref` and `decl_ref_mut` or any nested such value results in `null`.
fn resolveMaybeUndefValIntable(
sema: *Sema,
inst: Air.Inst.Ref,
) CompileError!?Value {
const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null;
var check = val;
while (true) switch (check.ip_index) {
.generic_poison => return error.GenericPoison,
.none => switch (check.tag()) {
.variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return null,
.field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr,
.elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr,
.eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr,
else => {
try sema.resolveLazyValue(val);
return val;
},
},
else => {
try sema.resolveLazyValue(val);
return val;
},
};
}
/// Returns all Value tags including `variable` and `undef`.
fn resolveMaybeUndefValAllowVariables(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
var make_runtime = false;
if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(inst, &make_runtime)) |val| {
if (make_runtime) return null;
return val;
}
return null;
}
/// Returns all Value tags including `variable`, `undef` and `runtime_value`.
fn resolveMaybeUndefValAllowVariablesMaybeRuntime(
sema: *Sema,
inst: Air.Inst.Ref,
make_runtime: *bool,
) CompileError!?Value {
assert(inst != .none);
// First section of indexes correspond to a set number of constant values.
const int = @enumToInt(inst);
if (int < InternPool.static_len) {
return @intToEnum(InternPool.Index, int).toValue();
}
const i = int - InternPool.static_len;
const air_tags = sema.air_instructions.items(.tag);
if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| {
if (air_tags[i] == .constant) {
const ty_pl = sema.air_instructions.items(.data)[i].ty_pl;
const val = sema.air_values.items[ty_pl.payload];
if (val.tagIsVariable()) return val;
}
return opv;
}
const air_datas = sema.air_instructions.items(.data);
switch (air_tags[i]) {
.constant => {
const ty_pl = air_datas[i].ty_pl;
const val = sema.air_values.items[ty_pl.payload];
if (val.isRuntimeValue()) make_runtime.* = true;
if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true;
return val;
},
.const_ty => return air_datas[i].ty.toValue(),
.interned => return air_datas[i].interned.toValue(),
else => return null,
}
}
fn failWithNeededComptime(sema: *Sema, block: *Block, src: LazySrcLoc, reason: []const u8) CompileError {
const msg = msg: {
const msg = try sema.errMsg(block, src, "unable to resolve comptime value", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "{s}", .{reason});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn failWithUseOfUndef(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError {
return sema.fail(block, src, "use of undefined value here causes undefined behavior", .{});
}
fn failWithDivideByZero(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError {
return sema.fail(block, src, "division by zero here causes undefined behavior", .{});
}
fn failWithModRemNegative(sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type) CompileError {
return sema.fail(block, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{
lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod),
});
}
fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, optional_ty: Type) CompileError {
return sema.fail(block, src, "expected optional type, found '{}'", .{optional_ty.fmt(sema.mod)});
}
fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError {
const mod = sema.mod;
const msg = msg: {
const msg = try sema.errMsg(block, src, "type '{}' does not support array initialization syntax", .{
ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
if (ty.isSlice(mod)) {
try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)});
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn failWithStructInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError {
return sema.fail(block, src, "type '{}' does not support struct initialization syntax", .{
ty.fmt(sema.mod),
});
}
fn failWithErrorSetCodeMissing(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
dest_err_set_ty: Type,
src_err_set_ty: Type,
) CompileError {
return sema.fail(block, src, "expected type '{}', found type '{}'", .{
dest_err_set_ty.fmt(sema.mod), src_err_set_ty.fmt(sema.mod),
});
}
fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError {
const mod = sema.mod;
if (int_ty.zigTypeTag(mod) == .Vector) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "overflow of vector type '{}' with value '{}'", .{
int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "when computing vector element at index '{d}'", .{vector_index});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
return sema.fail(block, src, "overflow of integer type '{}' with value '{}'", .{
int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod),
});
}
fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazySrcLoc, container_ty: Type, field_index: usize) CompileError {
const mod = sema.mod;
const msg = msg: {
const msg = try sema.errMsg(block, init_src, "value stored in comptime field does not match the default value of the field", .{});
errdefer msg.destroy(sema.gpa);
const struct_ty = mod.typeToStruct(container_ty) orelse break :msg msg;
const default_value_src = mod.fieldSrcLoc(struct_ty.owner_decl, .{
.index = field_index,
.range = .value,
});
try mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError {
const msg = msg: {
const msg = try sema.errMsg(block, src, "async has not been implemented in the self-hosted compiler yet", .{});
errdefer msg.destroy(sema.gpa);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError {
const mod = sema.mod;
const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty;
if (inner_ty.zigTypeTag(mod) == .Optional) opt: {
const child_ty = inner_ty.optionalChild(mod);
if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt;
const msg = msg: {
const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "consider using '.?', 'orelse', or 'if'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
} else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: {
const child_ty = inner_ty.errorUnionPayload(mod);
if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err;
const msg = msg: {
const msg = try sema.errMsg(block, src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
}
fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: []const u8) bool {
switch (ty.zigTypeTag(mod)) {
.Array => return mem.eql(u8, field_name, "len"),
.Pointer => {
const ptr_info = ty.ptrInfo(mod);
if (ptr_info.size == .Slice) {
return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len");
} else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) {
return mem.eql(u8, field_name, "len");
} else return false;
},
.Type, .Struct, .Union => return true,
else => return false,
}
}
/// We don't return a pointer to the new error note because the pointer
/// becomes invalid when you add another one.
fn errNote(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
parent: *Module.ErrorMsg,
comptime format: []const u8,
args: anytype,
) error{OutOfMemory}!void {
const mod = sema.mod;
const src_decl = mod.declPtr(block.src_decl);
return mod.errNoteNonLazy(src.toSrcLoc(src_decl, mod), parent, format, args);
}
fn addFieldErrNote(
sema: *Sema,
container_ty: Type,
field_index: usize,
parent: *Module.ErrorMsg,
comptime format: []const u8,
args: anytype,
) !void {
@setCold(true);
const mod = sema.mod;
const decl_index = container_ty.getOwnerDecl(mod);
const decl = mod.declPtr(decl_index);
const field_src = blk: {
const tree = decl.getFileScope(mod).getTree(sema.gpa) catch |err| {
log.err("unable to load AST to report compile error: {s}", .{@errorName(err)});
break :blk decl.srcLoc(mod);
};
const container_node = decl.relativeToNodeIndex(0);
const node_tags = tree.nodes.items(.tag);
var buf: [2]std.zig.Ast.Node.Index = undefined;
const container_decl = tree.fullContainerDecl(&buf, container_node) orelse break :blk decl.srcLoc(mod);
var it_index: usize = 0;
for (container_decl.ast.members) |member_node| {
switch (node_tags[member_node]) {
.container_field_init,
.container_field_align,
.container_field,
=> {
if (it_index == field_index) {
break :blk decl.nodeOffsetSrcLoc(decl.nodeIndexToRelative(member_node), mod);
}
it_index += 1;
},
else => continue,
}
}
unreachable;
};
try mod.errNoteNonLazy(field_src, parent, format, args);
}
fn errMsg(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
comptime format: []const u8,
args: anytype,
) error{OutOfMemory}!*Module.ErrorMsg {
const mod = sema.mod;
const src_decl = mod.declPtr(block.src_decl);
return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(src_decl, mod), format, args);
}
pub fn fail(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
comptime format: []const u8,
args: anytype,
) CompileError {
const err_msg = try sema.errMsg(block, src, format, args);
return sema.failWithOwnedErrorMsg(err_msg);
}
fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
@setCold(true);
const gpa = sema.gpa;
if (crash_report.is_enabled and sema.mod.comp.debug_compile_errors) {
if (err_msg.src_loc.lazy == .unneeded) return error.NeededSourceLocation;
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
wip_errors.init(gpa) catch unreachable;
Compilation.addModuleErrorMsg(&wip_errors, err_msg.*) catch unreachable;
std.debug.print("compile error during Sema:\n", .{});
var error_bundle = wip_errors.toOwnedBundle("") catch unreachable;
error_bundle.renderToStdErr(.{ .ttyconf = .no_color });
crash_report.compilerPanic("unexpected compile error occurred", null, null);
}
const mod = sema.mod;
ref: {
errdefer err_msg.destroy(gpa);
if (err_msg.src_loc.lazy == .unneeded) {
return error.NeededSourceLocation;
}
try mod.failed_decls.ensureUnusedCapacity(gpa, 1);
try mod.failed_files.ensureUnusedCapacity(gpa, 1);
const max_references = blk: {
if (sema.mod.comp.reference_trace) |num| break :blk num;
// Do not add multiple traces without explicit request.
if (sema.mod.failed_decls.count() != 0) break :ref;
break :blk default_reference_trace_len;
};
var referenced_by = if (sema.func) |some| some.owner_decl else sema.owner_decl_index;
var reference_stack = std.ArrayList(Module.ErrorMsg.Trace).init(gpa);
defer reference_stack.deinit();
// Avoid infinite loops.
var seen = std.AutoHashMap(Module.Decl.Index, void).init(gpa);
defer seen.deinit();
var cur_reference_trace: u32 = 0;
while (sema.mod.reference_table.get(referenced_by)) |ref| : (cur_reference_trace += 1) {
const gop = try seen.getOrPut(ref.referencer);
if (gop.found_existing) break;
if (cur_reference_trace < max_references) {
const decl = sema.mod.declPtr(ref.referencer);
try reference_stack.append(.{ .decl = decl.name, .src_loc = ref.src.toSrcLoc(decl, mod) });
}
referenced_by = ref.referencer;
}
if (sema.mod.comp.reference_trace == null and cur_reference_trace > 0) {
try reference_stack.append(.{
.decl = null,
.src_loc = undefined,
.hidden = 0,
});
} else if (cur_reference_trace > max_references) {
try reference_stack.append(.{
.decl = undefined,
.src_loc = undefined,
.hidden = cur_reference_trace - max_references,
});
}
err_msg.reference_trace = try reference_stack.toOwnedSlice();
}
if (sema.owner_func) |func| {
func.state = .sema_failure;
} else {
sema.owner_decl.analysis = .sema_failure;
sema.owner_decl.generation = mod.generation;
}
if (sema.func) |func| {
func.state = .sema_failure;
}
const gop = mod.failed_decls.getOrPutAssumeCapacity(sema.owner_decl_index);
if (gop.found_existing) {
// If there are multiple errors for the same Decl, prefer the first one added.
sema.err = null;
err_msg.destroy(gpa);
} else {
sema.err = err_msg;
gop.value_ptr.* = err_msg;
}
return error.AnalysisFail;
}
const align_ty = Type.u29;
fn analyzeAsAlign(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
air_ref: Air.Inst.Ref,
) !u32 {
const alignment_big = try sema.analyzeAsInt(block, src, air_ref, align_ty, "alignment must be comptime-known");
const alignment = @intCast(u32, alignment_big); // We coerce to u16 in the prev line.
try sema.validateAlign(block, src, alignment);
return alignment;
}
fn validateAlign(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
alignment: u32,
) !void {
if (alignment == 0) return sema.fail(block, src, "alignment must be >= 1", .{});
if (!std.math.isPowerOfTwo(alignment)) {
return sema.fail(block, src, "alignment value '{d}' is not a power of two", .{
alignment,
});
}
}
pub fn resolveAlign(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
) !u32 {
const air_ref = try sema.resolveInst(zir_ref);
return sema.analyzeAsAlign(block, src, air_ref);
}
fn resolveInt(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
dest_ty: Type,
reason: []const u8,
) !u64 {
const air_ref = try sema.resolveInst(zir_ref);
return sema.analyzeAsInt(block, src, air_ref, dest_ty, reason);
}
fn analyzeAsInt(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
air_ref: Air.Inst.Ref,
dest_ty: Type,
reason: []const u8,
) !u64 {
const mod = sema.mod;
const coerced = try sema.coerce(block, dest_ty, air_ref, src);
const val = try sema.resolveConstValue(block, src, coerced, reason);
return (try val.getUnsignedIntAdvanced(mod, sema)).?;
}
// Returns a compile error if the value has tag `variable`. See `resolveInstValue` for
// a function that does not.
pub fn resolveInstConst(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
reason: []const u8,
) CompileError!TypedValue {
const air_ref = try sema.resolveInst(zir_ref);
const val = try sema.resolveConstValue(block, src, air_ref, reason);
return TypedValue{
.ty = sema.typeOf(air_ref),
.val = val,
};
}
// Value Tag may be `undef` or `variable`.
// See `resolveInstConst` for an alternative.
pub fn resolveInstValue(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
reason: []const u8,
) CompileError!TypedValue {
const air_ref = try sema.resolveInst(zir_ref);
const val = try sema.resolveValue(block, src, air_ref, reason);
return TypedValue{
.ty = sema.typeOf(air_ref),
.val = val,
};
}
fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const pointee_ty = try sema.resolveType(block, src, extra.lhs);
const ptr = try sema.resolveInst(extra.rhs);
const target = sema.mod.getTarget();
const addr_space = target_util.defaultAddressSpace(target, .local);
if (Air.refToIndex(ptr)) |ptr_inst| {
if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) {
const air_datas = sema.air_instructions.items(.data);
const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload];
switch (ptr_val.tag()) {
.inferred_alloc => {
const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data;
// Add the stored instruction to the set we will use to resolve peer types
// for the inferred allocation.
// This instruction will not make it to codegen; it is only to participate
// in the `stored_inst_list` of the `inferred_alloc`.
var trash_block = block.makeSubBlock();
defer trash_block.instructions.deinit(sema.gpa);
const operand = try trash_block.addBitCast(pointee_ty, .void_value);
const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = pointee_ty,
.@"align" = inferred_alloc.alignment,
.@"addrspace" = addr_space,
});
const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
try inferred_alloc.prongs.append(sema.arena, .{
.stored_inst = operand,
.placeholder = Air.refToIndex(bitcasted_ptr).?,
});
return bitcasted_ptr;
},
.inferred_alloc_comptime => {
const iac = ptr_val.castTag(.inferred_alloc_comptime).?;
// There will be only one coerce_result_ptr because we are running at comptime.
// The alloc will turn into a Decl.
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
iac.data.decl_index = try anon_decl.finish(
pointee_ty,
Value.undef,
iac.data.alignment,
);
if (iac.data.alignment != 0) {
try sema.resolveTypeLayout(pointee_ty);
}
const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = pointee_ty,
.@"align" = iac.data.alignment,
.@"addrspace" = addr_space,
});
try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index);
return sema.addConstant(
ptr_ty,
try Value.Tag.decl_ref_mut.create(sema.arena, .{
.decl_index = iac.data.decl_index,
.runtime_index = block.runtime_index,
}),
);
},
else => {},
}
}
}
// Make a dummy store through the pointer to test the coercion.
// We will then use the generated instructions to decide what
// kind of transformations to make on the result pointer.
var trash_block = block.makeSubBlock();
trash_block.is_comptime = false;
defer trash_block.instructions.deinit(sema.gpa);
const dummy_ptr = try trash_block.addTy(.alloc, sema.typeOf(ptr));
const dummy_operand = try trash_block.addBitCast(pointee_ty, .void_value);
return sema.coerceResultPtr(block, src, ptr, dummy_ptr, dummy_operand, &trash_block);
}
fn coerceResultPtr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr: Air.Inst.Ref,
dummy_ptr: Air.Inst.Ref,
dummy_operand: Air.Inst.Ref,
trash_block: *Block,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const target = sema.mod.getTarget();
const addr_space = target_util.defaultAddressSpace(target, .local);
const pointee_ty = sema.typeOf(dummy_operand);
const prev_trash_len = trash_block.instructions.items.len;
try sema.storePtr2(trash_block, src, dummy_ptr, src, dummy_operand, src, .bitcast);
{
const air_tags = sema.air_instructions.items(.tag);
//std.debug.print("dummy storePtr instructions:\n", .{});
//for (trash_block.instructions.items) |item| {
// std.debug.print(" {s}\n", .{@tagName(air_tags[item])});
//}
// The last one is always `store`.
const trash_inst = trash_block.instructions.items[trash_block.instructions.items.len - 1];
if (air_tags[trash_inst] != .store and air_tags[trash_inst] != .store_safe) {
// no store instruction is generated for zero sized types
assert((try sema.typeHasOnePossibleValue(pointee_ty)) != null);
} else {
trash_block.instructions.items.len -= 1;
assert(trash_inst == sema.air_instructions.len - 1);
sema.air_instructions.len -= 1;
}
}
const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = pointee_ty,
.@"addrspace" = addr_space,
});
var new_ptr = ptr;
while (true) {
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
if (trash_block.instructions.items.len == prev_trash_len) {
if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| {
return sema.addConstant(ptr_ty, ptr_val);
}
if (pointee_ty.eql(Type.null, sema.mod)) {
const opt_ty = sema.typeOf(new_ptr).childType(mod);
const null_inst = try sema.addConstant(opt_ty, Value.null);
_ = try block.addBinOp(.store, new_ptr, null_inst);
return Air.Inst.Ref.void_value;
}
return sema.bitCast(block, ptr_ty, new_ptr, src, null);
}
const trash_inst = trash_block.instructions.pop();
switch (air_tags[trash_inst]) {
// Array coerced to Vector where element size is not equal but coercible.
.aggregate_init => {
const ty_pl = air_datas[trash_inst].ty_pl;
const ptr_operand_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = try sema.analyzeAsType(block, src, ty_pl.ty),
.@"addrspace" = addr_space,
});
if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| {
return sema.addConstant(ptr_operand_ty, ptr_val);
} else {
return sema.bitCast(block, ptr_operand_ty, new_ptr, src, null);
}
},
.bitcast => {
const ty_op = air_datas[trash_inst].ty_op;
const operand_ty = sema.typeOf(ty_op.operand);
const ptr_operand_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = operand_ty,
.@"addrspace" = addr_space,
});
if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| {
new_ptr = try sema.addConstant(ptr_operand_ty, ptr_val);
} else {
new_ptr = try sema.bitCast(block, ptr_operand_ty, new_ptr, src, null);
}
},
.wrap_optional => {
new_ptr = try sema.analyzeOptionalPayloadPtr(block, src, new_ptr, false, true);
},
.wrap_errunion_err => {
return sema.fail(block, src, "TODO coerce_result_ptr wrap_errunion_err", .{});
},
.wrap_errunion_payload => {
new_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, new_ptr, false, true);
},
.array_to_slice => {
return sema.fail(block, src, "TODO coerce_result_ptr array_to_slice", .{});
},
.get_union_tag => {
return sema.fail(block, src, "TODO coerce_result_ptr get_union_tag", .{});
},
else => {
if (std.debug.runtime_safety) {
std.debug.panic("unexpected AIR tag for coerce_result_ptr: {}", .{
air_tags[trash_inst],
});
} else {
unreachable;
}
},
}
}
}
pub fn analyzeStructDecl(
sema: *Sema,
new_decl: *Decl,
inst: Zir.Inst.Index,
struct_index: Module.Struct.Index,
) SemaError!void {
const mod = sema.mod;
const struct_obj = mod.structPtr(struct_index);
const extended = sema.code.instructions.items(.data)[inst].extended;
assert(extended.opcode == .struct_decl);
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
struct_obj.known_non_opv = small.known_non_opv;
if (small.known_comptime_only) {
struct_obj.requires_comptime = .yes;
}
var extra_index: usize = extended.operand;
extra_index += @boolToInt(small.has_src_node);
extra_index += @boolToInt(small.has_fields_len);
const decls_len = if (small.has_decls_len) blk: {
const decls_len = sema.code.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
if (small.has_backing_int) {
const backing_int_body_len = sema.code.extra[extra_index];
extra_index += 1; // backing_int_body_len
if (backing_int_body_len == 0) {
extra_index += 1; // backing_int_ref
} else {
extra_index += backing_int_body_len; // backing_int_body_inst
}
}
_ = try mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl);
}
fn zirStructDecl(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset = @bitCast(i32, sema.code.extra[extended.operand]);
break :blk LazySrcLoc.nodeOffset(node_offset);
} else sema.src;
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
// Because these three things each reference each other, `undefined`
// placeholders are used before being set after the struct type gains an
// InternPool index.
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = undefined,
}, small.name_strategy, "struct", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.ty = undefined,
.file_scope = block.getFileScope(mod),
});
const new_namespace = mod.namespacePtr(new_namespace_index);
errdefer mod.destroyNamespace(new_namespace_index);
const struct_index = try mod.createStruct(.{
.owner_decl = new_decl_index,
.fields = .{},
.zir_index = inst,
.layout = small.layout,
.status = .none,
.known_non_opv = undefined,
.is_tuple = small.is_tuple,
.namespace = new_namespace_index,
});
errdefer mod.destroyStruct(struct_index);
const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
.index = struct_index.toOptional(),
.namespace = new_namespace_index.toOptional(),
} });
errdefer mod.intern_pool.remove(struct_ty);
new_decl.val = struct_ty.toValue();
new_namespace.ty = struct_ty.toType();
try sema.analyzeStructDecl(new_decl, inst, struct_index);
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
}
fn createAnonymousDeclTypeNamed(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
typed_value: TypedValue,
name_strategy: Zir.Inst.NameStrategy,
anon_prefix: []const u8,
inst: ?Zir.Inst.Index,
) !Decl.Index {
const mod = sema.mod;
const gpa = sema.gpa;
const namespace = block.namespace;
const src_scope = block.wip_capture_scope;
const src_decl = mod.declPtr(block.src_decl);
const src_node = src_decl.relativeToNodeIndex(src.node_offset.x);
const new_decl_index = try mod.allocateNewDecl(namespace, src_node, src_scope);
errdefer mod.destroyDecl(new_decl_index);
switch (name_strategy) {
.anon => {
// It would be neat to have "struct:line:column" but this name has
// to survive incremental updates, where it may have been shifted down
// or up to a different line, but unchanged, and thus not unnecessarily
// semantically analyzed.
// This name is also used as the key in the parent namespace so it cannot be
// renamed.
const name = try std.fmt.allocPrintZ(gpa, "{s}__{s}_{d}", .{
src_decl.name, anon_prefix, @enumToInt(new_decl_index),
});
errdefer gpa.free(name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name);
return new_decl_index;
},
.parent => {
const name = try gpa.dupeZ(u8, mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0));
errdefer gpa.free(name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name);
return new_decl_index;
},
.func => {
const fn_info = sema.code.getFnInfo(sema.func.?.zir_body_inst);
const zir_tags = sema.code.instructions.items(.tag);
var buf = std.ArrayList(u8).init(gpa);
defer buf.deinit();
try buf.appendSlice(mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0));
try buf.appendSlice("(");
var arg_i: usize = 0;
for (fn_info.param_body) |zir_inst| switch (zir_tags[zir_inst]) {
.param, .param_comptime, .param_anytype, .param_anytype_comptime => {
const arg = sema.inst_map.get(zir_inst).?;
// If this is being called in a generic function then analyzeCall will
// have already resolved the args and this will work.
// If not then this is a struct type being returned from a non-generic
// function and the name doesn't matter since it will later
// result in a compile error.
const arg_val = sema.resolveConstMaybeUndefVal(block, .unneeded, arg, "") catch
return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null);
if (arg_i != 0) try buf.appendSlice(",");
try buf.writer().print("{}", .{arg_val.fmtValue(sema.typeOf(arg), sema.mod)});
arg_i += 1;
continue;
},
else => continue,
};
try buf.appendSlice(")");
const name = try buf.toOwnedSliceSentinel(0);
errdefer gpa.free(name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name);
return new_decl_index;
},
.dbg_var => {
const ref = Zir.indexToRef(inst.?);
const zir_tags = sema.code.instructions.items(.tag);
const zir_data = sema.code.instructions.items(.data);
var i = inst.?;
while (i < zir_tags.len) : (i += 1) switch (zir_tags[i]) {
.dbg_var_ptr, .dbg_var_val => {
if (zir_data[i].str_op.operand != ref) continue;
const name = try std.fmt.allocPrintZ(gpa, "{s}.{s}", .{
src_decl.name, zir_data[i].str_op.getStr(sema.code),
});
errdefer gpa.free(name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name);
return new_decl_index;
},
else => {},
};
return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null);
},
}
}
fn zirEnumDecl(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const gpa = sema.gpa;
const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small);
var extra_index: usize = extended.operand;
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset = @bitCast(i32, sema.code.extra[extra_index]);
extra_index += 1;
break :blk LazySrcLoc.nodeOffset(node_offset);
} else sema.src;
const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x };
const tag_type_ref = if (small.has_tag_type) blk: {
const tag_type_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
break :blk tag_type_ref;
} else .none;
const body_len = if (small.has_body_len) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
break :blk body_len;
} else 0;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = sema.code.extra[extra_index];
extra_index += 1;
break :blk fields_len;
} else 0;
const decls_len = if (small.has_decls_len) blk: {
const decls_len = sema.code.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
// Because these three things each reference each other, `undefined`
// placeholders are used before being set after the enum type gains an
// InternPool index.
var done = false;
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = undefined,
}, small.name_strategy, "enum", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer if (!done) mod.abortAnonDecl(new_decl_index);
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.ty = undefined,
.file_scope = block.getFileScope(mod),
});
const new_namespace = mod.namespacePtr(new_namespace_index);
errdefer if (!done) mod.destroyNamespace(new_namespace_index);
extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl);
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body.len;
const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
const body_end = extra_index;
extra_index += bit_bags_count;
const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| {
if (bag != 0) break true;
} else false;
const incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{
.decl = new_decl_index,
.namespace = new_namespace_index.toOptional(),
.fields_len = fields_len,
.has_values = any_values,
.tag_mode = if (small.nonexhaustive)
.nonexhaustive
else if (tag_type_ref == .none)
.auto
else
.explicit,
});
errdefer if (!done) mod.intern_pool.remove(incomplete_enum.index);
new_decl.val = incomplete_enum.index.toValue();
new_namespace.ty = incomplete_enum.index.toType();
const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index);
done = true;
const int_tag_ty = ty: {
// We create a block for the field type instructions because they
// may need to reference Decls from inside the enum namespace.
// Within the field type, default value, and alignment expressions, the "owner decl"
// should be the enum itself.
const prev_owner_decl = sema.owner_decl;
const prev_owner_decl_index = sema.owner_decl_index;
sema.owner_decl = new_decl;
sema.owner_decl_index = new_decl_index;
defer {
sema.owner_decl = prev_owner_decl;
sema.owner_decl_index = prev_owner_decl_index;
}
const prev_owner_func = sema.owner_func;
sema.owner_func = null;
defer sema.owner_func = prev_owner_func;
const prev_func = sema.func;
sema.func = null;
defer sema.func = prev_func;
var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope);
defer wip_captures.deinit();
var enum_block: Block = .{
.parent = null,
.sema = sema,
.src_decl = new_decl_index,
.namespace = new_namespace_index,
.wip_capture_scope = wip_captures.scope,
.instructions = .{},
.inlining = null,
.is_comptime = true,
};
defer enum_block.instructions.deinit(sema.gpa);
if (body.len != 0) {
try sema.analyzeBody(&enum_block, body);
}
try wip_captures.finalize();
if (tag_type_ref != .none) {
const ty = try sema.resolveType(block, tag_ty_src, tag_type_ref);
if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) {
return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)});
}
incomplete_enum.setTagType(&mod.intern_pool, ty.ip_index);
break :ty ty;
} else if (fields_len == 0) {
break :ty try mod.intType(.unsigned, 0);
} else {
const bits = std.math.log2_int_ceil(usize, fields_len);
break :ty try mod.intType(.unsigned, bits);
}
};
if (small.nonexhaustive and int_tag_ty.ip_index != .comptime_int_type) {
if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) {
return sema.fail(block, src, "non-exhaustive enum specifies every value", .{});
}
}
var bit_bag_index: usize = body_end;
var cur_bit_bag: u32 = undefined;
var field_i: u32 = 0;
var last_tag_val: ?Value = null;
while (field_i < fields_len) : (field_i += 1) {
if (field_i % 32 == 0) {
cur_bit_bag = sema.code.extra[bit_bag_index];
bit_bag_index += 1;
}
const has_tag_value = @truncate(u1, cur_bit_bag) != 0;
cur_bit_bag >>= 1;
const field_name_zir = sema.code.nullTerminatedString(sema.code.extra[extra_index]);
extra_index += 1;
// doc comment
extra_index += 1;
const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir);
if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name)) |other_index| {
const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
const msg = msg: {
const msg = try sema.errMsg(block, field_src, "duplicate enum field '{s}'", .{field_name_zir});
errdefer msg.destroy(gpa);
try sema.errNote(block, other_field_src, msg, "other field here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (has_tag_value) {
const tag_val_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const tag_inst = try sema.resolveInst(tag_val_ref);
const tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) {
error.NeededSourceLocation => {
const value_src = mod.fieldSrcLoc(new_decl_index, .{
.index = field_i,
.range = .value,
}).lazy;
_ = try sema.resolveConstValue(block, value_src, tag_inst, "enum tag value must be comptime-known");
unreachable;
},
else => |e| return e,
};
last_tag_val = tag_val;
if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.ip_index)) |other_index| {
const value_src = mod.fieldSrcLoc(new_decl_index, .{
.index = field_i,
.range = .value,
}).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
const msg = msg: {
const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{tag_val.fmtValue(int_tag_ty, sema.mod)});
errdefer msg.destroy(gpa);
try sema.errNote(block, other_field_src, msg, "other occurrence here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
} else if (any_values) {
const tag_val = if (last_tag_val) |val|
try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty)
else
try mod.intValue(int_tag_ty, 0);
last_tag_val = tag_val;
if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.ip_index)) |other_index| {
const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
const msg = msg: {
const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{tag_val.fmtValue(int_tag_ty, sema.mod)});
errdefer msg.destroy(gpa);
try sema.errNote(block, other_field_src, msg, "other occurrence here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
} else {
last_tag_val = try mod.intValue(int_tag_ty, field_i);
}
if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) {
const value_src = mod.fieldSrcLoc(new_decl_index, .{
.index = field_i,
.range = if (has_tag_value) .value else .name,
}).lazy;
const msg = try sema.errMsg(block, value_src, "enumeration value '{}' too large for type '{}'", .{
last_tag_val.?.fmtValue(int_tag_ty, mod), int_tag_ty.fmt(mod),
});
return sema.failWithOwnedErrorMsg(msg);
}
}
return decl_val;
}
fn zirUnionDecl(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const gpa = sema.gpa;
const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small);
var extra_index: usize = extended.operand;
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset = @bitCast(i32, sema.code.extra[extra_index]);
extra_index += 1;
break :blk LazySrcLoc.nodeOffset(node_offset);
} else sema.src;
extra_index += @boolToInt(small.has_tag_type);
extra_index += @boolToInt(small.has_body_len);
extra_index += @boolToInt(small.has_fields_len);
const decls_len = if (small.has_decls_len) blk: {
const decls_len = sema.code.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
// Because these three things each reference each other, `undefined`
// placeholders are used before being set after the union type gains an
// InternPool index.
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = undefined,
}, small.name_strategy, "union", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.ty = undefined,
.file_scope = block.getFileScope(mod),
});
const new_namespace = mod.namespacePtr(new_namespace_index);
errdefer mod.destroyNamespace(new_namespace_index);
const union_index = try mod.createUnion(.{
.owner_decl = new_decl_index,
.tag_ty = Type.null,
.fields = .{},
.zir_index = inst,
.layout = small.layout,
.status = .none,
.namespace = new_namespace_index,
});
errdefer mod.destroyUnion(union_index);
const union_ty = try mod.intern_pool.get(gpa, .{ .union_type = .{
.index = union_index,
.runtime_tag = if (small.has_tag_type or small.auto_enum_tag)
.tagged
else if (small.layout != .Auto)
.none
else switch (block.sema.mod.optimizeMode()) {
.Debug, .ReleaseSafe => .safety,
.ReleaseFast, .ReleaseSmall => .none,
},
} });
errdefer mod.intern_pool.remove(union_ty);
new_decl.val = union_ty.toValue();
new_namespace.ty = union_ty.toType();
_ = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl);
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
}
fn zirOpaqueDecl(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const gpa = sema.gpa;
const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small);
var extra_index: usize = extended.operand;
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset = @bitCast(i32, sema.code.extra[extra_index]);
extra_index += 1;
break :blk LazySrcLoc.nodeOffset(node_offset);
} else sema.src;
const decls_len = if (small.has_decls_len) blk: {
const decls_len = sema.code.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
// Because these three things each reference each other, `undefined`
// placeholders are used in two places before being set after the opaque
// type gains an InternPool index.
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = undefined,
}, small.name_strategy, "opaque", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.ty = undefined,
.file_scope = block.getFileScope(mod),
});
const new_namespace = mod.namespacePtr(new_namespace_index);
errdefer mod.destroyNamespace(new_namespace_index);
const opaque_ty = try mod.intern(.{ .opaque_type = .{
.decl = new_decl_index,
.namespace = new_namespace_index,
} });
errdefer mod.intern_pool.remove(opaque_ty);
new_decl.val = opaque_ty.toValue();
new_namespace.ty = opaque_ty.toType();
extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl);
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
}
fn zirErrorSetDecl(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
name_strategy: Zir.Inst.NameStrategy,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index);
var names: Module.Fn.InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len);
var extra_index = @intCast(u32, extra.end);
const extra_index_end = extra_index + (extra.data.fields_len * 2);
while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string
const str_index = sema.code.extra[extra_index];
const name = sema.code.nullTerminatedString(str_index);
const name_ip = try mod.intern_pool.getOrPutString(gpa, name);
const result = names.getOrPutAssumeCapacity(name_ip);
assert(!result.found_existing); // verified in AstGen
}
const error_set_ty = try mod.errorSetFromUnsortedNames(names.keys());
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = error_set_ty.toValue(),
}, name_strategy, "error", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
return sema.analyzeDeclVal(block, src, new_decl_index);
}
fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
if (block.is_comptime or try sema.typeRequiresComptime(sema.fn_ret_ty)) {
const fn_ret_ty = try sema.resolveTypeFields(sema.fn_ret_ty);
return sema.analyzeComptimeAlloc(block, fn_ret_ty, 0);
}
const target = sema.mod.getTarget();
const ptr_type = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = sema.fn_ret_ty,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
if (block.inlining != null) {
// We are inlining a function call; this should be emitted as an alloc, not a ret_ptr.
// TODO when functions gain result location support, the inlining struct in
// Block should contain the return pointer, and we would pass that through here.
return block.addTy(.alloc, ptr_type);
}
return block.addTy(.ret_ptr, ptr_type);
}
fn zirRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_tok;
const operand = try sema.resolveInst(inst_data.operand);
return sema.analyzeRef(block, inst_data.src(), operand);
}
fn zirEnsureResultUsed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const src = inst_data.src();
return sema.ensureResultUsed(block, sema.typeOf(operand), src);
}
fn ensureResultUsed(
sema: *Sema,
block: *Block,
ty: Type,
src: LazySrcLoc,
) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Void, .NoReturn => return,
.ErrorSet, .ErrorUnion => {
const msg = msg: {
const msg = try sema.errMsg(block, src, "error is ignored", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
else => {
const msg = msg: {
const msg = try sema.errMsg(block, src, "value of type '{}' ignored", .{ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "all non-void values must be used", .{});
try sema.errNote(block, src, msg, "this error can be suppressed by assigning the value to '_'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
}
}
fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const src = inst_data.src();
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag(mod)) {
.ErrorSet, .ErrorUnion => {
const msg = msg: {
const msg = try sema.errMsg(block, src, "error is discarded", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
else => return,
}
}
fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer)
operand_ty.childType(mod)
else
operand_ty;
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return;
const payload_ty = err_union_ty.errorUnionPayload(mod).zigTypeTag(mod);
if (payload_ty != .Void and payload_ty != .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "error union payload is ignored", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "payload value can be explicitly ignored with '|_|'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const object = try sema.resolveInst(inst_data.operand);
return indexablePtrLen(sema, block, src, object);
}
fn indexablePtrLen(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
object: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const object_ty = sema.typeOf(object);
const is_pointer_to = object_ty.isSinglePointer(mod);
const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty;
try checkIndexable(sema, block, src, indexable_ty);
return sema.fieldVal(block, src, object, "len", src);
}
fn indexablePtrLenOrNone(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
try checkMemOperand(sema, block, src, operand_ty);
if (operand_ty.ptrSize(mod) == .Many) return .none;
return sema.fieldVal(block, src, operand, "len", src);
}
fn zirAllocExtended(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand);
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = extra.data.src_node };
const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = extra.data.src_node };
const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small);
var extra_index: usize = extra.end;
const var_ty: Type = if (small.has_type) blk: {
const type_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
break :blk try sema.resolveType(block, ty_src, type_ref);
} else undefined;
const alignment: u32 = if (small.has_align) blk: {
const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const alignment = try sema.resolveAlign(block, align_src, align_ref);
break :blk alignment;
} else 0;
const inferred_alloc_ty = if (small.is_const)
Type.initTag(.inferred_alloc_const)
else
Type.initTag(.inferred_alloc_mut);
if (block.is_comptime or small.is_comptime) {
if (small.has_type) {
return sema.analyzeComptimeAlloc(block, var_ty, alignment);
} else {
return sema.addConstant(
inferred_alloc_ty,
try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{
.decl_index = undefined,
.alignment = alignment,
}),
);
}
}
if (small.has_type) {
if (!small.is_const) {
try sema.validateVarType(block, ty_src, var_ty, false);
}
const target = sema.mod.getTarget();
try sema.resolveTypeLayout(var_ty);
const ptr_type = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = var_ty,
.@"align" = alignment,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
return block.addTy(.alloc, ptr_type);
}
// `Sema.addConstant` does not add the instruction to the block because it is
// not needed in the case of constant values. However here, we plan to "downgrade"
// to a normal instruction when we hit `resolve_inferred_alloc`. So we append
// to the block even though it is currently a `.constant`.
const result = try sema.addConstant(
inferred_alloc_ty,
try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = alignment }),
);
try block.instructions.append(sema.gpa, Air.refToIndex(result).?);
try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {});
return result;
}
fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
return sema.analyzeComptimeAlloc(block, var_ty, 0);
}
fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const alloc = try sema.resolveInst(inst_data.operand);
const alloc_ty = sema.typeOf(alloc);
var ptr_info = alloc_ty.ptrInfo(mod);
const elem_ty = ptr_info.pointee_type;
// Detect if all stores to an `.alloc` were comptime-known.
ct: {
var search_index: usize = block.instructions.items.len;
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
const store_inst = while (true) {
if (search_index == 0) break :ct;
search_index -= 1;
const candidate = block.instructions.items[search_index];
switch (air_tags[candidate]) {
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
.store, .store_safe => break candidate,
else => break :ct,
}
};
while (true) {
if (search_index == 0) break :ct;
search_index -= 1;
const candidate = block.instructions.items[search_index];
switch (air_tags[candidate]) {
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
.alloc => {
if (Air.indexToRef(candidate) != alloc) break :ct;
break;
},
else => break :ct,
}
}
const store_op = air_datas[store_inst].bin_op;
const store_val = (try sema.resolveMaybeUndefVal(store_op.rhs)) orelse break :ct;
if (store_op.lhs != alloc) break :ct;
// Remove all the unnecessary runtime instructions.
block.instructions.shrinkRetainingCapacity(search_index);
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
elem_ty,
try store_val.copy(anon_decl.arena()),
ptr_info.@"align",
));
}
return sema.makePtrConst(block, alloc);
}
fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const alloc_ty = sema.typeOf(alloc);
var ptr_info = alloc_ty.ptrInfo(mod);
ptr_info.mutable = false;
const const_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
// Detect if a comptime value simply needs to have its type changed.
if (try sema.resolveMaybeUndefVal(alloc)) |val| {
return sema.addConstant(const_ptr_ty, val);
}
return block.addBitCast(const_ptr_ty, alloc);
}
fn zirAllocInferredComptime(
sema: *Sema,
inst: Zir.Inst.Index,
inferred_alloc_ty: Type,
) CompileError!Air.Inst.Ref {
const src_node = sema.code.instructions.items(.data)[inst].node;
const src = LazySrcLoc.nodeOffset(src_node);
sema.src = src;
return sema.addConstant(
inferred_alloc_ty,
try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{
.decl_index = undefined,
.alignment = 0,
}),
);
}
fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.is_comptime) {
return sema.analyzeComptimeAlloc(block, var_ty, 0);
}
const target = sema.mod.getTarget();
const ptr_type = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = var_ty,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
try sema.queueFullTypeResolution(var_ty);
return block.addTy(.alloc, ptr_type);
}
fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.is_comptime) {
return sema.analyzeComptimeAlloc(block, var_ty, 0);
}
try sema.validateVarType(block, ty_src, var_ty, false);
const target = sema.mod.getTarget();
const ptr_type = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = var_ty,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
try sema.queueFullTypeResolution(var_ty);
return block.addTy(.alloc, ptr_type);
}
fn zirAllocInferred(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
inferred_alloc_ty: Type,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const src_node = sema.code.instructions.items(.data)[inst].node;
const src = LazySrcLoc.nodeOffset(src_node);
sema.src = src;
if (block.is_comptime) {
return sema.addConstant(
inferred_alloc_ty,
try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{
.decl_index = undefined,
.alignment = 0,
}),
);
}
// `Sema.addConstant` does not add the instruction to the block because it is
// not needed in the case of constant values. However here, we plan to "downgrade"
// to a normal instruction when we hit `resolve_inferred_alloc`. So we append
// to the block even though it is currently a `.constant`.
const result = try sema.addConstant(
inferred_alloc_ty,
try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = 0 }),
);
try block.instructions.append(sema.gpa, Air.refToIndex(result).?);
try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {});
return result;
}
fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
const ptr = try sema.resolveInst(inst_data.operand);
const ptr_inst = Air.refToIndex(ptr).?;
assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant);
const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload;
const ptr_val = sema.air_values.items[value_index];
const var_is_mut = switch (sema.typeOf(ptr).tag()) {
.inferred_alloc_const => false,
.inferred_alloc_mut => true,
};
const target = sema.mod.getTarget();
switch (ptr_val.tag()) {
.inferred_alloc_comptime => {
const iac = ptr_val.castTag(.inferred_alloc_comptime).?;
const decl_index = iac.data.decl_index;
try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index);
const decl = sema.mod.declPtr(decl_index);
const final_elem_ty = decl.ty;
const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = final_elem_ty,
.mutable = true,
.@"align" = iac.data.alignment,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const final_ptr_ty_inst = try sema.addType(final_ptr_ty);
sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst;
try sema.maybeQueueFuncBodyAnalysis(decl_index);
if (var_is_mut) {
sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{
.decl_index = decl_index,
.runtime_index = block.runtime_index,
});
} else {
sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl_index);
}
},
.inferred_alloc => {
assert(sema.unresolved_inferred_allocs.remove(ptr_inst));
const inferred_alloc = ptr_val.castTag(.inferred_alloc).?;
const peer_inst_list = inferred_alloc.data.prongs.items(.stored_inst);
const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none);
const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = final_elem_ty,
.mutable = true,
.@"align" = inferred_alloc.data.alignment,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
if (var_is_mut) {
try sema.validateVarType(block, ty_src, final_elem_ty, false);
} else ct: {
// Detect if the value is comptime-known. In such case, the
// last 3 AIR instructions of the block will look like this:
//
// %a = constant
// %b = bitcast(%a)
// %c = store(%b, %d)
//
// If `%d` is comptime-known, then we want to store the value
// inside an anonymous Decl and then erase these three AIR
// instructions from the block, replacing the inst_map entry
// corresponding to the ZIR alloc instruction with a constant
// decl_ref pointing at our new Decl.
// dbg_stmt instructions may be interspersed into this pattern
// which must be ignored.
if (block.instructions.items.len < 3) break :ct;
var search_index: usize = block.instructions.items.len;
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
const store_inst = while (true) {
if (search_index == 0) break :ct;
search_index -= 1;
const candidate = block.instructions.items[search_index];
switch (air_tags[candidate]) {
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
.store, .store_safe => break candidate,
else => break :ct,
}
};
const bitcast_inst = while (true) {
if (search_index == 0) break :ct;
search_index -= 1;
const candidate = block.instructions.items[search_index];
switch (air_tags[candidate]) {
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
.bitcast => break candidate,
else => break :ct,
}
};
const const_inst = while (true) {
if (search_index == 0) break :ct;
search_index -= 1;
const candidate = block.instructions.items[search_index];
switch (air_tags[candidate]) {
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
.constant => break candidate,
else => break :ct,
}
};
const store_op = air_datas[store_inst].bin_op;
const store_val = (try sema.resolveMaybeUndefVal(store_op.rhs)) orelse break :ct;
if (store_op.lhs != Air.indexToRef(bitcast_inst)) break :ct;
if (air_datas[bitcast_inst].ty_op.operand != Air.indexToRef(const_inst)) break :ct;
const new_decl_index = d: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const new_decl_index = try anon_decl.finish(
final_elem_ty,
try store_val.copy(anon_decl.arena()),
inferred_alloc.data.alignment,
);
break :d new_decl_index;
};
try sema.mod.declareDeclDependency(sema.owner_decl_index, new_decl_index);
// Even though we reuse the constant instruction, we still remove it from the
// block so that codegen does not see it.
block.instructions.shrinkRetainingCapacity(search_index);
try sema.maybeQueueFuncBodyAnalysis(new_decl_index);
sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, new_decl_index);
// if bitcast ty ref needs to be made const, make_ptr_const
// ZIR handles it later, so we can just use the ty ref here.
air_datas[ptr_inst].ty_pl.ty = air_datas[bitcast_inst].ty_op.ty;
// Unless the block is comptime, `alloc_inferred` always produces
// a runtime constant. The final inferred type needs to be
// fully resolved so it can be lowered in codegen.
try sema.resolveTypeFully(final_elem_ty);
return;
}
try sema.queueFullTypeResolution(final_elem_ty);
// Change it to a normal alloc.
sema.air_instructions.set(ptr_inst, .{
.tag = .alloc,
.data = .{ .ty = final_ptr_ty },
});
// Now we need to go back over all the coerce_result_ptr instructions, which
// previously inserted a bitcast as a placeholder, and do the logic as if
// the new result ptr type was available.
const placeholders = inferred_alloc.data.prongs.items(.placeholder);
const gpa = sema.gpa;
var trash_block = block.makeSubBlock();
trash_block.is_comptime = false;
defer trash_block.instructions.deinit(gpa);
const mut_final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = final_elem_ty,
.mutable = true,
.@"align" = inferred_alloc.data.alignment,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const dummy_ptr = try trash_block.addTy(.alloc, mut_final_ptr_ty);
const empty_trash_count = trash_block.instructions.items.len;
for (peer_inst_list, placeholders) |peer_inst, placeholder_inst| {
const sub_ptr_ty = sema.typeOf(Air.indexToRef(placeholder_inst));
if (mut_final_ptr_ty.eql(sub_ptr_ty, sema.mod)) {
// New result location type is the same as the old one; nothing
// to do here.
continue;
}
var replacement_block = block.makeSubBlock();
defer replacement_block.instructions.deinit(gpa);
const result = switch (sema.air_instructions.items(.tag)[placeholder_inst]) {
.bitcast => result: {
trash_block.instructions.shrinkRetainingCapacity(empty_trash_count);
const sub_ptr = try sema.coerceResultPtr(&replacement_block, src, ptr, dummy_ptr, peer_inst, &trash_block);
assert(replacement_block.instructions.items.len > 0);
break :result sub_ptr;
},
.store, .store_safe => result: {
const bin_op = sema.air_instructions.items(.data)[placeholder_inst].bin_op;
try sema.storePtr2(&replacement_block, src, bin_op.lhs, src, bin_op.rhs, src, .bitcast);
break :result .void_value;
},
else => unreachable,
};
// If only one instruction is produced then we can replace the bitcast
// placeholder instruction with this instruction; no need for an entire block.
if (replacement_block.instructions.items.len == 1) {
const only_inst = replacement_block.instructions.items[0];
sema.air_instructions.set(placeholder_inst, sema.air_instructions.get(only_inst));
continue;
}
// Here we replace the placeholder bitcast instruction with a block
// that does the coerce_result_ptr logic.
_ = try replacement_block.addBr(placeholder_inst, result);
const ty_inst = if (result == .void_value)
.void_type
else
sema.air_instructions.items(.data)[placeholder_inst].ty_op.ty;
try sema.air_extra.ensureUnusedCapacity(
gpa,
@typeInfo(Air.Block).Struct.fields.len + replacement_block.instructions.items.len,
);
sema.air_instructions.set(placeholder_inst, .{
.tag = .block,
.data = .{ .ty_pl = .{
.ty = ty_inst,
.payload = sema.addExtraAssumeCapacity(Air.Block{
.body_len = @intCast(u32, replacement_block.instructions.items.len),
}),
} },
});
sema.air_extra.appendSliceAssumeCapacity(replacement_block.instructions.items);
}
},
else => unreachable,
}
}
fn zirArrayBasePtr(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const start_ptr = try sema.resolveInst(inst_data.operand);
var base_ptr = start_ptr;
while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) {
.ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
.Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true),
else => break,
};
const elem_ty = sema.typeOf(base_ptr).childType(mod);
switch (elem_ty.zigTypeTag(mod)) {
.Array, .Vector => return base_ptr,
.Struct => if (elem_ty.isTuple(mod)) {
// TODO validate element count
return base_ptr;
},
else => {},
}
return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod));
}
fn zirFieldBasePtr(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const start_ptr = try sema.resolveInst(inst_data.operand);
var base_ptr = start_ptr;
while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) {
.ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
.Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true),
else => break,
};
const elem_ty = sema.typeOf(base_ptr).childType(mod);
switch (elem_ty.zigTypeTag(mod)) {
.Struct, .Union => return base_ptr,
else => {},
}
return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod));
}
fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
const args = sema.code.refSlice(extra.end, extra.data.operands_len);
const src = inst_data.src();
var len: Air.Inst.Ref = .none;
var len_val: ?Value = null;
var len_idx: u32 = undefined;
var any_runtime = false;
const runtime_arg_lens = try gpa.alloc(Air.Inst.Ref, args.len);
defer gpa.free(runtime_arg_lens);
// First pass to look for comptime values.
for (args, 0..) |zir_arg, i_usize| {
const i = @intCast(u32, i_usize);
runtime_arg_lens[i] = .none;
if (zir_arg == .none) continue;
const object = try sema.resolveInst(zir_arg);
const object_ty = sema.typeOf(object);
// Each arg could be an indexable, or a range, in which case the length
// is passed directly as an integer.
const is_int = switch (object_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => true,
else => false,
};
const arg_src: LazySrcLoc = .{ .for_input = .{
.for_node_offset = inst_data.src_node,
.input_index = i,
} };
const arg_len_uncoerced = if (is_int) object else l: {
if (!object_ty.isIndexable(mod)) {
// Instead of using checkIndexable we customize this error.
const msg = msg: {
const msg = try sema.errMsg(block, arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, arg_src, msg, "for loop operand must be a range, array, slice, tuple, or vector", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (!object_ty.indexableHasLen(mod)) continue;
break :l try sema.fieldVal(block, arg_src, object, "len", arg_src);
};
const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src);
if (len == .none) {
len = arg_len;
len_idx = i;
}
if (try sema.resolveDefinedValue(block, src, arg_len)) |arg_val| {
if (len_val) |v| {
if (!(try sema.valuesEqual(arg_val, v, Type.usize))) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "non-matching for loop lengths", .{});
errdefer msg.destroy(gpa);
const a_src: LazySrcLoc = .{ .for_input = .{
.for_node_offset = inst_data.src_node,
.input_index = len_idx,
} };
try sema.errNote(block, a_src, msg, "length {} here", .{
v.fmtValue(Type.usize, sema.mod),
});
try sema.errNote(block, arg_src, msg, "length {} here", .{
arg_val.fmtValue(Type.usize, sema.mod),
});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
} else {
len = arg_len;
len_val = arg_val;
len_idx = i;
}
continue;
}
runtime_arg_lens[i] = arg_len;
any_runtime = true;
}
if (len == .none) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "unbounded for loop", .{});
errdefer msg.destroy(gpa);
for (args, 0..) |zir_arg, i_usize| {
const i = @intCast(u32, i_usize);
if (zir_arg == .none) continue;
const object = try sema.resolveInst(zir_arg);
const object_ty = sema.typeOf(object);
// Each arg could be an indexable, or a range, in which case the length
// is passed directly as an integer.
switch (object_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => continue,
else => {},
}
const arg_src: LazySrcLoc = .{ .for_input = .{
.for_node_offset = inst_data.src_node,
.input_index = i,
} };
try sema.errNote(block, arg_src, msg, "type '{}' has no upper bound", .{
object_ty.fmt(sema.mod),
});
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
// Now for the runtime checks.
if (any_runtime and block.wantSafety()) {
for (runtime_arg_lens, 0..) |arg_len, i| {
if (arg_len == .none) continue;
if (i == len_idx) continue;
const ok = try block.addBinOp(.cmp_eq, len, arg_len);
try sema.addSafetyCheck(block, ok, .for_len_mismatch);
}
}
return len;
}
fn validateArrayInitTy(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!void {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const ty_src: LazySrcLoc = .{ .node_offset_init_ty = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.ArrayInit, inst_data.payload_index).data;
const ty = try sema.resolveType(block, ty_src, extra.ty);
switch (ty.zigTypeTag(mod)) {
.Array => {
const array_len = ty.arrayLen(mod);
if (extra.init_count != array_len) {
return sema.fail(block, src, "expected {d} array elements; found {d}", .{
array_len, extra.init_count,
});
}
return;
},
.Vector => {
const array_len = ty.arrayLen(mod);
if (extra.init_count != array_len) {
return sema.fail(block, src, "expected {d} vector elements; found {d}", .{
array_len, extra.init_count,
});
}
return;
},
.Struct => if (ty.isTuple(mod)) {
_ = try sema.resolveTypeFields(ty);
const array_len = ty.arrayLen(mod);
if (extra.init_count > array_len) {
return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{
array_len, extra.init_count,
});
}
return;
},
else => {},
}
return sema.failWithArrayInitNotSupported(block, ty_src, ty);
}
fn validateStructInitTy(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!void {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ty = try sema.resolveType(block, src, inst_data.operand);
switch (ty.zigTypeTag(mod)) {
.Struct, .Union => return,
else => {},
}
return sema.failWithStructInitNotSupported(block, src, ty);
}
fn zirValidateStructInit(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const validate_inst = sema.code.instructions.items(.data)[inst].pl_node;
const init_src = validate_inst.src();
const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len];
const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node;
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
const object_ptr = try sema.resolveInst(field_ptr_extra.lhs);
const agg_ty = sema.typeOf(object_ptr).childType(mod);
switch (agg_ty.zigTypeTag(mod)) {
.Struct => return sema.validateStructInit(
block,
agg_ty,
init_src,
instrs,
),
.Union => return sema.validateUnionInit(
block,
agg_ty,
init_src,
instrs,
object_ptr,
),
else => unreachable,
}
}
fn validateUnionInit(
sema: *Sema,
block: *Block,
union_ty: Type,
init_src: LazySrcLoc,
instrs: []const Zir.Inst.Index,
union_ptr: Air.Inst.Ref,
) CompileError!void {
const mod = sema.mod;
if (instrs.len != 1) {
const msg = msg: {
const msg = try sema.errMsg(
block,
init_src,
"cannot initialize multiple union fields at once; unions can only have one active field",
.{},
);
errdefer msg.destroy(sema.gpa);
for (instrs[1..]) |inst| {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const inst_src: LazySrcLoc = .{ .node_offset_initializer = inst_data.src_node };
try sema.errNote(block, inst_src, msg, "additional initializer here", .{});
}
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (block.is_comptime and
(try sema.resolveDefinedValue(block, init_src, union_ptr)) != null)
{
// In this case, comptime machinery already did everything. No work to do here.
return;
}
const field_ptr = instrs[0];
const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node;
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node };
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start);
// Validate the field access but ignore the index since we want the tag enum field index.
_ = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
const field_ptr_air_ref = sema.inst_map.get(field_ptr).?;
const field_ptr_air_inst = Air.refToIndex(field_ptr_air_ref).?;
// Our task here is to determine if the union is comptime-known. In such case,
// we erase the runtime AIR instructions for initializing the union, and replace
// the mapping with the comptime value. Either way, we will need to populate the tag.
// We expect to see something like this in the current block AIR:
// %a = alloc(*const U)
// %b = bitcast(*U, %a)
// %c = field_ptr(..., %b)
// %e!= store(%c!, %d!)
// If %d is a comptime operand, the union is comptime.
// If the union is comptime, we want `first_block_index`
// to point at %c so that the bitcast becomes the last instruction in the block.
//
// In the case of a comptime-known pointer to a union, the
// the field_ptr instruction is missing, so we have to pattern-match
// based only on the store instructions.
// `first_block_index` needs to point to the `field_ptr` if it exists;
// the `store` otherwise.
//
// It's also possible for there to be no store instruction, in the case
// of nested `coerce_result_ptr` instructions. If we see the `field_ptr`
// but we have not found a `store`, treat as a runtime-known field.
var first_block_index = block.instructions.items.len;
var block_index = block.instructions.items.len - 1;
var init_val: ?Value = null;
var make_runtime = false;
while (block_index > 0) : (block_index -= 1) {
const store_inst = block.instructions.items[block_index];
if (store_inst == field_ptr_air_inst) break;
switch (air_tags[store_inst]) {
.store, .store_safe => {},
else => continue,
}
const bin_op = air_datas[store_inst].bin_op;
var lhs = bin_op.lhs;
if (Air.refToIndex(lhs)) |lhs_index| {
if (air_tags[lhs_index] == .bitcast) {
lhs = air_datas[lhs_index].ty_op.operand;
block_index -= 1;
}
}
if (lhs != field_ptr_air_ref) continue;
while (block_index > 0) : (block_index -= 1) {
const block_inst = block.instructions.items[block_index - 1];
if (air_tags[block_inst] != .dbg_stmt) break;
}
if (block_index > 0 and
field_ptr_air_inst == block.instructions.items[block_index - 1])
{
first_block_index = @min(first_block_index, block_index - 1);
} else {
first_block_index = @min(first_block_index, block_index);
}
init_val = try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime);
break;
}
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?);
const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
if (init_val) |val| {
// Our task is to delete all the `field_ptr` and `store` instructions, and insert
// instead a single `store` to the result ptr with a comptime union value.
block.instructions.shrinkRetainingCapacity(first_block_index);
var union_val = try Value.Tag.@"union".create(sema.arena, .{
.tag = tag_val,
.val = val,
});
if (make_runtime) union_val = try Value.Tag.runtime_value.create(sema.arena, union_val);
const union_init = try sema.addConstant(union_ty, union_val);
try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store);
return;
} else if (try sema.typeRequiresComptime(union_ty)) {
return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only union must be comptime-known");
}
const new_tag = try sema.addConstant(tag_ty, tag_val);
_ = try block.addBinOp(.set_union_tag, union_ptr, new_tag);
}
fn validateStructInit(
sema: *Sema,
block: *Block,
struct_ty: Type,
init_src: LazySrcLoc,
instrs: []const Zir.Inst.Index,
) CompileError!void {
const mod = sema.mod;
const gpa = sema.gpa;
// Maps field index to field_ptr index of where it was already initialized.
const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount(mod));
defer gpa.free(found_fields);
@memset(found_fields, 0);
var struct_ptr_zir_ref: Zir.Inst.Ref = undefined;
for (instrs) |field_ptr| {
const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node;
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node };
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
struct_ptr_zir_ref = field_ptr_extra.lhs;
const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start);
const field_index = if (struct_ty.isTuple(mod))
try sema.tupleFieldIndex(block, struct_ty, field_name, field_src)
else
try sema.structFieldIndex(block, struct_ty, field_name, field_src);
if (found_fields[field_index] != 0) {
const other_field_ptr = found_fields[field_index];
const other_field_ptr_data = sema.code.instructions.items(.data)[other_field_ptr].pl_node;
const other_field_src: LazySrcLoc = .{ .node_offset_initializer = other_field_ptr_data.src_node };
const msg = msg: {
const msg = try sema.errMsg(block, field_src, "duplicate field", .{});
errdefer msg.destroy(gpa);
try sema.errNote(block, other_field_src, msg, "other field here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
found_fields[field_index] = field_ptr;
}
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
const struct_ptr = try sema.resolveInst(struct_ptr_zir_ref);
if (block.is_comptime and
(try sema.resolveDefinedValue(block, init_src, struct_ptr)) != null)
{
try sema.resolveStructLayout(struct_ty);
// In this case the only thing we need to do is evaluate the implicit
// store instructions for default field values, and report any missing fields.
// Avoid the cost of the extra machinery for detecting a comptime struct init value.
for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) continue;
const default_val = struct_ty.structFieldDefaultValue(i, mod);
if (default_val.ip_index == .unreachable_value) {
if (struct_ty.isTuple(mod)) {
const template = "missing tuple field with index {d}";
if (root_msg) |msg| {
try sema.errNote(block, init_src, msg, template, .{i});
} else {
root_msg = try sema.errMsg(block, init_src, template, .{i});
}
continue;
}
const field_name = struct_ty.structFieldName(i, mod);
const template = "missing struct field: {s}";
const args = .{field_name};
if (root_msg) |msg| {
try sema.errNote(block, init_src, msg, template, args);
} else {
root_msg = try sema.errMsg(block, init_src, template, args);
}
continue;
}
const field_src = init_src; // TODO better source location
const default_field_ptr = if (struct_ty.isTuple(mod))
try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true)
else
try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true);
const field_ty = sema.typeOf(default_field_ptr).childType(mod);
const init = try sema.addConstant(field_ty, default_val);
try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store);
}
if (root_msg) |msg| {
if (mod.typeToStruct(struct_ty)) |struct_obj| {
const fqn = try struct_obj.getFullyQualifiedName(mod);
defer gpa.free(fqn);
try mod.errNoteNonLazy(
struct_obj.srcLoc(mod),
msg,
"struct '{s}' declared here",
.{fqn},
);
}
root_msg = null;
return sema.failWithOwnedErrorMsg(msg);
}
return;
}
var struct_is_comptime = true;
var first_block_index = block.instructions.items.len;
var make_runtime = false;
const require_comptime = try sema.typeRequiresComptime(struct_ty);
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
// We collect the comptime field values in case the struct initialization
// ends up being comptime-known.
const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount(mod));
field: for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) {
// Determine whether the value stored to this pointer is comptime-known.
const field_ty = struct_ty.structFieldType(i, mod);
if (try sema.typeHasOnePossibleValue(field_ty)) |opv| {
field_values[i] = opv;
continue;
}
const field_ptr_air_ref = sema.inst_map.get(field_ptr).?;
const field_ptr_air_inst = Air.refToIndex(field_ptr_air_ref).?;
//std.debug.print("validateStructInit (field_ptr_air_inst=%{d}):\n", .{
// field_ptr_air_inst,
//});
//for (block.instructions.items) |item| {
// std.debug.print(" %{d} = {s}\n", .{item, @tagName(air_tags[item])});
//}
// We expect to see something like this in the current block AIR:
// %a = field_ptr(...)
// store(%a, %b)
// With an optional bitcast between the store and the field_ptr.
// If %b is a comptime operand, this field is comptime.
//
// However, in the case of a comptime-known pointer to a struct, the
// the field_ptr instruction is missing, so we have to pattern-match
// based only on the store instructions.
// `first_block_index` needs to point to the `field_ptr` if it exists;
// the `store` otherwise.
//
// It's also possible for there to be no store instruction, in the case
// of nested `coerce_result_ptr` instructions. If we see the `field_ptr`
// but we have not found a `store`, treat as a runtime-known field.
// Possible performance enhancement: save the `block_index` between iterations
// of the for loop.
var block_index = block.instructions.items.len - 1;
while (block_index > 0) : (block_index -= 1) {
const store_inst = block.instructions.items[block_index];
if (store_inst == field_ptr_air_inst) {
struct_is_comptime = false;
continue :field;
}
switch (air_tags[store_inst]) {
.store, .store_safe => {},
else => continue,
}
const bin_op = air_datas[store_inst].bin_op;
var lhs = bin_op.lhs;
{
const lhs_index = Air.refToIndex(lhs) orelse continue;
if (air_tags[lhs_index] == .bitcast) {
lhs = air_datas[lhs_index].ty_op.operand;
block_index -= 1;
}
}
if (lhs != field_ptr_air_ref) continue;
while (block_index > 0) : (block_index -= 1) {
const block_inst = block.instructions.items[block_index - 1];
if (air_tags[block_inst] != .dbg_stmt) break;
}
if (block_index > 0 and
field_ptr_air_inst == block.instructions.items[block_index - 1])
{
first_block_index = @min(first_block_index, block_index - 1);
} else {
first_block_index = @min(first_block_index, block_index);
}
if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| {
field_values[i] = val;
} else if (require_comptime) {
const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node;
return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only struct must be comptime-known");
} else {
struct_is_comptime = false;
}
continue :field;
}
struct_is_comptime = false;
continue :field;
}
const default_val = struct_ty.structFieldDefaultValue(i, mod);
if (default_val.ip_index == .unreachable_value) {
if (struct_ty.isTuple(mod)) {
const template = "missing tuple field with index {d}";
if (root_msg) |msg| {
try sema.errNote(block, init_src, msg, template, .{i});
} else {
root_msg = try sema.errMsg(block, init_src, template, .{i});
}
continue;
}
const field_name = struct_ty.structFieldName(i, mod);
const template = "missing struct field: {s}";
const args = .{field_name};
if (root_msg) |msg| {
try sema.errNote(block, init_src, msg, template, args);
} else {
root_msg = try sema.errMsg(block, init_src, template, args);
}
continue;
}
field_values[i] = default_val;
}
if (root_msg) |msg| {
if (mod.typeToStruct(struct_ty)) |struct_obj| {
const fqn = try struct_obj.getFullyQualifiedName(sema.mod);
defer gpa.free(fqn);
try sema.mod.errNoteNonLazy(
struct_obj.srcLoc(sema.mod),
msg,
"struct '{s}' declared here",
.{fqn},
);
}
root_msg = null;
return sema.failWithOwnedErrorMsg(msg);
}
if (struct_is_comptime) {
// Our task is to delete all the `field_ptr` and `store` instructions, and insert
// instead a single `store` to the struct_ptr with a comptime struct value.
block.instructions.shrinkRetainingCapacity(first_block_index);
var struct_val = try Value.Tag.aggregate.create(sema.arena, field_values);
if (make_runtime) struct_val = try Value.Tag.runtime_value.create(sema.arena, struct_val);
const struct_init = try sema.addConstant(struct_ty, struct_val);
try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store);
return;
}
try sema.resolveStructLayout(struct_ty);
// Our task is to insert `store` instructions for all the default field values.
for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) continue;
const field_src = init_src; // TODO better source location
const default_field_ptr = if (struct_ty.isTuple(mod))
try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true)
else
try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true);
const field_ty = sema.typeOf(default_field_ptr).childType(mod);
const init = try sema.addConstant(field_ty, field_values[i]);
try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store);
}
}
fn zirValidateArrayInit(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!void {
const mod = sema.mod;
const validate_inst = sema.code.instructions.items(.data)[inst].pl_node;
const init_src = validate_inst.src();
const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len];
const first_elem_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node;
const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, first_elem_ptr_data.payload_index).data;
const array_ptr = try sema.resolveInst(elem_ptr_extra.ptr);
const array_ty = sema.typeOf(array_ptr).childType(mod);
const array_len = array_ty.arrayLen(mod);
if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) {
.Struct => {
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
var i = instrs.len;
while (i < array_len) : (i += 1) {
const default_val = array_ty.structFieldDefaultValue(i, mod);
if (default_val.ip_index == .unreachable_value) {
const template = "missing tuple field with index {d}";
if (root_msg) |msg| {
try sema.errNote(block, init_src, msg, template, .{i});
} else {
root_msg = try sema.errMsg(block, init_src, template, .{i});
}
}
}
if (root_msg) |msg| {
root_msg = null;
return sema.failWithOwnedErrorMsg(msg);
}
},
.Array => {
return sema.fail(block, init_src, "expected {d} array elements; found {d}", .{
array_len, instrs.len,
});
},
.Vector => {
return sema.fail(block, init_src, "expected {d} vector elements; found {d}", .{
array_len, instrs.len,
});
},
else => unreachable,
};
if (block.is_comptime and
(try sema.resolveDefinedValue(block, init_src, array_ptr)) != null)
{
// In this case the comptime machinery will have evaluated the store instructions
// at comptime so we have almost nothing to do here. However, in case of a
// sentinel-terminated array, the sentinel will not have been populated by
// any ZIR instructions at comptime; we need to do that here.
if (array_ty.sentinel(mod)) |sentinel_val| {
const array_len_ref = try sema.addIntUnsigned(Type.usize, array_len);
const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true);
const sentinel = try sema.addConstant(array_ty.childType(mod), sentinel_val);
try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store);
}
return;
}
var array_is_comptime = true;
var first_block_index = block.instructions.items.len;
var make_runtime = false;
// Collect the comptime element values in case the array literal ends up
// being comptime-known.
const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod));
const element_vals = try sema.arena.alloc(Value, array_len_s);
const opt_opv = try sema.typeHasOnePossibleValue(array_ty);
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
outer: for (instrs, 0..) |elem_ptr, i| {
// Determine whether the value stored to this pointer is comptime-known.
if (array_ty.isTuple(mod)) {
if (try array_ty.structFieldValueComptime(mod, i)) |opv| {
element_vals[i] = opv;
continue;
}
} else {
// Array has one possible value, so value is always comptime-known
if (opt_opv) |opv| {
element_vals[i] = opv;
continue;
}
}
const elem_ptr_air_ref = sema.inst_map.get(elem_ptr).?;
const elem_ptr_air_inst = Air.refToIndex(elem_ptr_air_ref).?;
// We expect to see something like this in the current block AIR:
// %a = elem_ptr(...)
// store(%a, %b)
// With an optional bitcast between the store and the elem_ptr.
// If %b is a comptime operand, this element is comptime.
//
// However, in the case of a comptime-known pointer to an array, the
// the elem_ptr instruction is missing, so we have to pattern-match
// based only on the store instructions.
// `first_block_index` needs to point to the `elem_ptr` if it exists;
// the `store` otherwise.
//
// It's also possible for there to be no store instruction, in the case
// of nested `coerce_result_ptr` instructions. If we see the `elem_ptr`
// but we have not found a `store`, treat as a runtime-known element.
//
// This is nearly identical to similar logic in `validateStructInit`.
// Possible performance enhancement: save the `block_index` between iterations
// of the for loop.
var block_index = block.instructions.items.len - 1;
while (block_index > 0) : (block_index -= 1) {
const store_inst = block.instructions.items[block_index];
if (store_inst == elem_ptr_air_inst) {
array_is_comptime = false;
continue :outer;
}
switch (air_tags[store_inst]) {
.store, .store_safe => {},
else => continue,
}
const bin_op = air_datas[store_inst].bin_op;
var lhs = bin_op.lhs;
{
const lhs_index = Air.refToIndex(lhs) orelse continue;
if (air_tags[lhs_index] == .bitcast) {
lhs = air_datas[lhs_index].ty_op.operand;
block_index -= 1;
}
}
if (lhs != elem_ptr_air_ref) continue;
while (block_index > 0) : (block_index -= 1) {
const block_inst = block.instructions.items[block_index - 1];
if (air_tags[block_inst] != .dbg_stmt) break;
}
if (block_index > 0 and
elem_ptr_air_inst == block.instructions.items[block_index - 1])
{
first_block_index = @min(first_block_index, block_index - 1);
} else {
first_block_index = @min(first_block_index, block_index);
}
if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| {
element_vals[i] = val;
} else {
array_is_comptime = false;
}
continue :outer;
}
array_is_comptime = false;
continue :outer;
}
if (array_is_comptime) {
if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| {
if (ptr_val.tag() == .comptime_field_ptr) {
// This store was validated by the individual elem ptrs.
return;
}
}
// Our task is to delete all the `elem_ptr` and `store` instructions, and insert
// instead a single `store` to the array_ptr with a comptime struct value.
// Also to populate the sentinel value, if any.
if (array_ty.sentinel(mod)) |sentinel_val| {
element_vals[instrs.len] = sentinel_val;
}
block.instructions.shrinkRetainingCapacity(first_block_index);
var array_val = try Value.Tag.aggregate.create(sema.arena, element_vals);
if (make_runtime) array_val = try Value.Tag.runtime_value.create(sema.arena, array_val);
const array_init = try sema.addConstant(array_ty, array_val);
try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store);
}
}
fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
if (operand_ty.zigTypeTag(mod) != .Pointer) {
return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(sema.mod)});
} else switch (operand_ty.ptrSize(mod)) {
.One, .C => {},
.Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(sema.mod)}),
.Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(sema.mod)}),
}
if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) {
// No need to validate the actual pointer value, we don't need it!
return;
}
const elem_ty = operand_ty.elemType2(mod);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef(mod)) {
return sema.fail(block, src, "cannot dereference undefined value", .{});
}
} else if (!(try sema.validateRunTimeType(elem_ty, false))) {
const msg = msg: {
const msg = try sema.errMsg(
block,
src,
"values of type '{}' must be comptime-known, but operand value is runtime-known",
.{elem_ty.fmt(sema.mod)},
);
errdefer msg.destroy(sema.gpa);
const src_decl = sema.mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), elem_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
fn failWithBadMemberAccess(
sema: *Sema,
block: *Block,
agg_ty: Type,
field_src: LazySrcLoc,
field_name: []const u8,
) CompileError {
const mod = sema.mod;
const kw_name = switch (agg_ty.zigTypeTag(mod)) {
.Union => "union",
.Struct => "struct",
.Opaque => "opaque",
.Enum => "enum",
else => unreachable,
};
if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (sema.mod.declIsRoot(some)) {
return sema.fail(block, field_src, "root struct of file '{}' has no member named '{s}'", .{
agg_ty.fmt(sema.mod), field_name,
});
};
const msg = msg: {
const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{s}'", .{
kw_name, agg_ty.fmt(sema.mod), field_name,
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, agg_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn failWithBadStructFieldAccess(
sema: *Sema,
block: *Block,
struct_obj: *Module.Struct,
field_src: LazySrcLoc,
field_name: []const u8,
) CompileError {
const gpa = sema.gpa;
const fqn = try struct_obj.getFullyQualifiedName(sema.mod);
defer gpa.free(fqn);
const msg = msg: {
const msg = try sema.errMsg(
block,
field_src,
"no field named '{s}' in struct '{s}'",
.{ field_name, fqn },
);
errdefer msg.destroy(gpa);
try sema.mod.errNoteNonLazy(struct_obj.srcLoc(sema.mod), msg, "struct declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn failWithBadUnionFieldAccess(
sema: *Sema,
block: *Block,
union_obj: *Module.Union,
field_src: LazySrcLoc,
field_name: []const u8,
) CompileError {
const gpa = sema.gpa;
const fqn = try union_obj.getFullyQualifiedName(sema.mod);
defer gpa.free(fqn);
const msg = msg: {
const msg = try sema.errMsg(
block,
field_src,
"no field named '{s}' in union '{s}'",
.{ field_name, fqn },
);
errdefer msg.destroy(gpa);
try sema.mod.errNoteNonLazy(union_obj.srcLoc(sema.mod), msg, "union declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void {
const mod = sema.mod;
const src_loc = decl_ty.declSrcLocOrNull(mod) orelse return;
const category = switch (decl_ty.zigTypeTag(mod)) {
.Union => "union",
.Struct => "struct",
.Enum => "enum",
.Opaque => "opaque",
.ErrorSet => "error set",
else => unreachable,
};
try mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category});
}
fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
const ptr = sema.inst_map.get(Zir.refToIndex(bin_inst.lhs).?) orelse {
// This is an elided instruction, but AstGen was unable to omit it.
return;
};
const operand = try sema.resolveInst(bin_inst.rhs);
const src: LazySrcLoc = sema.src;
blk: {
const ptr_inst = Air.refToIndex(ptr) orelse break :blk;
if (sema.air_instructions.items(.tag)[ptr_inst] != .constant) break :blk;
const air_datas = sema.air_instructions.items(.data);
const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload];
switch (ptr_val.tag()) {
.inferred_alloc_comptime => {
const iac = ptr_val.castTag(.inferred_alloc_comptime).?;
return sema.storeToInferredAllocComptime(block, src, operand, iac);
},
.inferred_alloc => {
const inferred_alloc = ptr_val.castTag(.inferred_alloc).?;
return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc);
},
else => break :blk,
}
}
return sema.storePtr(block, src, ptr, operand);
}
fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const src: LazySrcLoc = sema.src;
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
const ptr = try sema.resolveInst(bin_inst.lhs);
const operand = try sema.resolveInst(bin_inst.rhs);
const ptr_inst = Air.refToIndex(ptr).?;
assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant);
const air_datas = sema.air_instructions.items(.data);
const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload];
switch (ptr_val.tag()) {
.inferred_alloc_comptime => {
const iac = ptr_val.castTag(.inferred_alloc_comptime).?;
return sema.storeToInferredAllocComptime(block, src, operand, iac);
},
.inferred_alloc => {
const inferred_alloc = ptr_val.castTag(.inferred_alloc).?;
return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc);
},
else => unreachable,
}
}
fn storeToInferredAlloc(
sema: *Sema,
block: *Block,
ptr: Air.Inst.Ref,
operand: Air.Inst.Ref,
inferred_alloc: *Value.Payload.InferredAlloc,
) CompileError!void {
// Create a store instruction as a placeholder. This will be replaced by a
// proper store sequence once we know the stored type.
const dummy_store = try block.addBinOp(.store, ptr, operand);
// Add the stored instruction to the set we will use to resolve peer types
// for the inferred allocation.
try inferred_alloc.data.prongs.append(sema.arena, .{
.stored_inst = operand,
.placeholder = Air.refToIndex(dummy_store).?,
});
}
fn storeToInferredAllocComptime(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
iac: *Value.Payload.InferredAllocComptime,
) CompileError!void {
const operand_ty = sema.typeOf(operand);
// There will be only one store_to_inferred_ptr because we are running at comptime.
// The alloc will turn into a Decl.
if (try sema.resolveMaybeUndefValAllowVariables(operand)) |operand_val| store: {
if (operand_val.tagIsVariable()) break :store;
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
iac.data.decl_index = try anon_decl.finish(
operand_ty,
try operand_val.copy(anon_decl.arena()),
iac.data.alignment,
);
return;
}
return sema.failWithNeededComptime(block, src, "value being stored to a comptime variable must be comptime-known");
}
fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const quota = @intCast(u32, try sema.resolveInt(block, src, inst_data.operand, Type.u32, "eval branch quota must be comptime-known"));
sema.branch_quota = @max(sema.branch_quota, quota);
}
fn zirStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
const ptr = try sema.resolveInst(bin_inst.lhs);
const value = try sema.resolveInst(bin_inst.rhs);
return sema.storePtr(block, sema.src, ptr, value);
}
fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const zir_tags = sema.code.instructions.items(.tag);
const zir_datas = sema.code.instructions.items(.data);
const inst_data = zir_datas[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const ptr = try sema.resolveInst(extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
const is_ret = if (Zir.refToIndex(extra.lhs)) |ptr_index|
zir_tags[ptr_index] == .ret_ptr
else
false;
// Check for the possibility of this pattern:
// %a = ret_ptr
// %b = store(%a, %c)
// Where %c is an error union or error set. In such case we need to add
// to the current function's inferred error set, if any.
if (is_ret and (sema.typeOf(operand).zigTypeTag(mod) == .ErrorUnion or
sema.typeOf(operand).zigTypeTag(mod) == .ErrorSet) and
sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion)
{
try sema.addToInferredErrorSet(operand);
}
const ptr_src: LazySrcLoc = .{ .node_offset_store_ptr = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_store_operand = inst_data.src_node };
const air_tag: Air.Inst.Tag = if (is_ret)
.ret_ptr
else if (block.wantSafety())
.store_safe
else
.store;
return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag);
}
fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const bytes = sema.code.instructions.items(.data)[inst].str.get(sema.code);
return sema.addStrLit(block, bytes);
}
fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air.Inst.Ref {
// `zir_bytes` references memory inside the ZIR module, which can get deallocated
// after semantic analysis is complete, for example in the case of the initialization
// expression of a variable declaration.
const mod = sema.mod;
const gpa = sema.gpa;
const string_bytes = &mod.string_literal_bytes;
const StringLiteralAdapter = Module.StringLiteralAdapter;
const StringLiteralContext = Module.StringLiteralContext;
try string_bytes.ensureUnusedCapacity(gpa, zir_bytes.len);
const gop = try mod.string_literal_table.getOrPutContextAdapted(gpa, zir_bytes, StringLiteralAdapter{
.bytes = string_bytes,
}, StringLiteralContext{
.bytes = string_bytes,
});
if (!gop.found_existing) {
gop.key_ptr.* = .{
.index = @intCast(u32, string_bytes.items.len),
.len = @intCast(u32, zir_bytes.len),
};
string_bytes.appendSliceAssumeCapacity(zir_bytes);
gop.value_ptr.* = .none;
}
const decl_index = gop.value_ptr.unwrap() orelse di: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const decl_index = try anon_decl.finish(
try Type.array(anon_decl.arena(), gop.key_ptr.len, try mod.intValue(Type.u8, 0), Type.u8, mod),
try Value.Tag.str_lit.create(anon_decl.arena(), gop.key_ptr.*),
0, // default alignment
);
// Needed so that `Decl.clearValues` will additionally set the corresponding
// string literal table value back to `Decl.OptionalIndex.none`.
mod.declPtr(decl_index).owns_tv = true;
gop.value_ptr.* = decl_index.toOptional();
break :di decl_index;
};
return sema.analyzeDeclRef(decl_index);
}
fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
_ = block;
const tracy = trace(@src());
defer tracy.end();
const int = sema.code.instructions.items(.data)[inst].int;
return sema.addIntUnsigned(Type.comptime_int, int);
}
fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
_ = block;
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const int = sema.code.instructions.items(.data)[inst].str;
const byte_count = int.len * @sizeOf(std.math.big.Limb);
const limb_bytes = sema.code.string_bytes[int.start..][0..byte_count];
// TODO: this allocation and copy is only needed because the limbs may be unaligned.
// If ZIR is adjusted so that big int limbs are guaranteed to be aligned, these
// two lines can be removed.
const limbs = try sema.arena.alloc(std.math.big.Limb, int.len);
@memcpy(mem.sliceAsBytes(limbs), limb_bytes);
return sema.addConstant(
Type.comptime_int,
try mod.intValue_big(Type.comptime_int, .{
.limbs = limbs,
.positive = true,
}),
);
}
fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
_ = block;
const number = sema.code.instructions.items(.data)[inst].float;
return sema.addConstant(
Type.comptime_float,
try sema.mod.floatValue(Type.comptime_float, number),
);
}
fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
_ = block;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data;
const number = extra.get();
return sema.addConstant(
Type.comptime_float,
try sema.mod.floatValue(Type.comptime_float, number),
);
}
fn zirCompileError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const msg = try sema.resolveConstString(block, operand_src, inst_data.operand, "compile error string must be comptime-known");
return sema.fail(block, src, "{s}", .{msg});
}
fn zirCompileLog(
sema: *Sema,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
var managed = sema.mod.compile_log_text.toManaged(sema.gpa);
defer sema.mod.compile_log_text = managed.moveToUnmanaged();
const writer = managed.writer();
const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand);
const src_node = extra.data.src_node;
const args = sema.code.refSlice(extra.end, extended.small);
for (args, 0..) |arg_ref, i| {
if (i != 0) try writer.print(", ", .{});
const arg = try sema.resolveInst(arg_ref);
const arg_ty = sema.typeOf(arg);
if (try sema.resolveMaybeUndefVal(arg)) |val| {
try sema.resolveLazyValue(val);
try writer.print("@as({}, {})", .{
arg_ty.fmt(sema.mod), val.fmtValue(arg_ty, sema.mod),
});
} else {
try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(sema.mod)});
}
}
try writer.print("\n", .{});
const decl_index = if (sema.func) |some| some.owner_decl else sema.owner_decl_index;
const gop = try sema.mod.compile_log_decls.getOrPut(sema.gpa, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = src_node;
}
return Air.Inst.Ref.void_value;
}
fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const msg_inst = try sema.resolveInst(inst_data.operand);
if (block.is_comptime) {
return sema.fail(block, src, "encountered @panic at comptime", .{});
}
try sema.panicWithMsg(block, msg_inst);
return always_noreturn;
}
fn zirTrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
const src_node = sema.code.instructions.items(.data)[inst].node;
const src = LazySrcLoc.nodeOffset(src_node);
sema.src = src;
_ = try block.addNoOp(.trap);
return always_noreturn;
}
fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
const gpa = sema.gpa;
// AIR expects a block outside the loop block too.
// Reserve space for a Loop instruction so that generated Break instructions can
// point to it, even if it doesn't end up getting used because the code ends up being
// comptime evaluated.
const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
const loop_inst = block_inst + 1;
try sema.air_instructions.ensureUnusedCapacity(gpa, 2);
sema.air_instructions.appendAssumeCapacity(.{
.tag = .block,
.data = undefined,
});
sema.air_instructions.appendAssumeCapacity(.{
.tag = .loop,
.data = .{ .ty_pl = .{
.ty = .noreturn_type,
.payload = undefined,
} },
});
var label: Block.Label = .{
.zir_block = inst,
.merges = .{
.src_locs = .{},
.results = .{},
.br_list = .{},
.block_inst = block_inst,
},
};
var child_block = parent_block.makeSubBlock();
child_block.label = &label;
child_block.runtime_cond = null;
child_block.runtime_loop = src;
child_block.runtime_index.increment();
const merges = &child_block.label.?.merges;
defer child_block.instructions.deinit(gpa);
defer merges.deinit(gpa);
var loop_block = child_block.makeSubBlock();
defer loop_block.instructions.deinit(gpa);
try sema.analyzeBody(&loop_block, body);
const loop_block_len = loop_block.instructions.items.len;
if (loop_block_len > 0 and sema.typeOf(Air.indexToRef(loop_block.instructions.items[loop_block_len - 1])).isNoReturn(mod)) {
// If the loop ended with a noreturn terminator, then there is no way for it to loop,
// so we can just use the block instead.
try child_block.instructions.appendSlice(gpa, loop_block.instructions.items);
} else {
try child_block.instructions.append(gpa, loop_inst);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + loop_block_len);
sema.air_instructions.items(.data)[loop_inst].ty_pl.payload = sema.addExtraAssumeCapacity(
Air.Block{ .body_len = @intCast(u32, loop_block_len) },
);
sema.air_extra.appendSliceAssumeCapacity(loop_block.instructions.items);
}
return sema.analyzeBlockBody(parent_block, src, &child_block, merges);
}
fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const pl_node = sema.code.instructions.items(.data)[inst].pl_node;
const src = pl_node.src();
const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
// we check this here to avoid undefined symbols
if (!@import("build_options").have_llvm)
return sema.fail(parent_block, src, "cannot do C import on Zig compiler not built with LLVM-extension", .{});
var c_import_buf = std.ArrayList(u8).init(sema.gpa);
defer c_import_buf.deinit();
var comptime_reason: Block.ComptimeReason = .{ .c_import = .{
.block = parent_block,
.src = src,
} };
var child_block: Block = .{
.parent = parent_block,
.sema = sema,
.src_decl = parent_block.src_decl,
.namespace = parent_block.namespace,
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
.is_comptime = true,
.comptime_reason = &comptime_reason,
.c_import_buf = &c_import_buf,
.runtime_cond = parent_block.runtime_cond,
.runtime_loop = parent_block.runtime_loop,
.runtime_index = parent_block.runtime_index,
};
defer child_block.instructions.deinit(sema.gpa);
// Ignore the result, all the relevant operations have written to c_import_buf already.
_ = try sema.analyzeBodyBreak(&child_block, body);
const mod = sema.mod;
const c_import_res = mod.comp.cImport(c_import_buf.items) catch |err|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
if (c_import_res.errors.len != 0) {
const msg = msg: {
defer @import("clang.zig").ErrorMsg.delete(c_import_res.errors.ptr, c_import_res.errors.len);
const msg = try sema.errMsg(&child_block, src, "C import failed", .{});
errdefer msg.destroy(sema.gpa);
if (!mod.comp.bin_file.options.link_libc)
try sema.errNote(&child_block, src, msg, "libc headers not available; compilation does not link against libc", .{});
const gop = try sema.mod.cimport_errors.getOrPut(sema.gpa, sema.owner_decl_index);
if (!gop.found_existing) {
var errs = try std.ArrayListUnmanaged(Module.CImportError).initCapacity(sema.gpa, c_import_res.errors.len);
errdefer {
for (errs.items) |err| err.deinit(sema.gpa);
errs.deinit(sema.gpa);
}
for (c_import_res.errors) |c_error| {
const path = if (c_error.filename_ptr) |some|
try sema.gpa.dupeZ(u8, some[0..c_error.filename_len])
else
null;
errdefer if (path) |some| sema.gpa.free(some);
const c_msg = try sema.gpa.dupeZ(u8, c_error.msg_ptr[0..c_error.msg_len]);
errdefer sema.gpa.free(c_msg);
const line = line: {
const source = c_error.source orelse break :line null;
var start = c_error.offset;
while (start > 0) : (start -= 1) {
if (source[start - 1] == '\n') break;
}
var end = c_error.offset;
while (true) : (end += 1) {
if (source[end] == 0) break;
if (source[end] == '\n') break;
}
break :line try sema.gpa.dupeZ(u8, source[start..end]);
};
errdefer if (line) |some| sema.gpa.free(some);
errs.appendAssumeCapacity(.{
.path = path orelse null,
.source_line = line orelse null,
.line = c_error.line,
.column = c_error.column,
.offset = c_error.offset,
.msg = c_msg,
});
}
gop.value_ptr.* = errs.items;
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const c_import_pkg = Package.create(
sema.gpa,
null,
c_import_res.out_zig_path,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => unreachable, // we pass null for root_src_dir_path
};
const result = mod.importPkg(c_import_pkg) catch |err|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
mod.astGenFile(result.file) catch |err|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
try mod.semaFile(result.file);
const file_root_decl_index = result.file.root_decl.unwrap().?;
const file_root_decl = mod.declPtr(file_root_decl_index);
try mod.declareDeclDependency(sema.owner_decl_index, file_root_decl_index);
return sema.addConstant(file_root_decl.ty, file_root_decl.val);
}
fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
return sema.failWithUseOfAsync(parent_block, src);
}
fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, force_comptime: bool) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const pl_node = sema.code.instructions.items(.data)[inst].pl_node;
const src = pl_node.src();
const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
const gpa = sema.gpa;
// Reserve space for a Block instruction so that generated Break instructions can
// point to it, even if it doesn't end up getting used because the code ends up being
// comptime evaluated or is an unlabeled block.
const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
try sema.air_instructions.append(gpa, .{
.tag = .block,
.data = undefined,
});
var label: Block.Label = .{
.zir_block = inst,
.merges = .{
.src_locs = .{},
.results = .{},
.br_list = .{},
.block_inst = block_inst,
},
};
var child_block: Block = .{
.parent = parent_block,
.sema = sema,
.src_decl = parent_block.src_decl,
.namespace = parent_block.namespace,
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.label = &label,
.inlining = parent_block.inlining,
.is_comptime = parent_block.is_comptime or force_comptime,
.comptime_reason = parent_block.comptime_reason,
.is_typeof = parent_block.is_typeof,
.want_safety = parent_block.want_safety,
.float_mode = parent_block.float_mode,
.c_import_buf = parent_block.c_import_buf,
.runtime_cond = parent_block.runtime_cond,
.runtime_loop = parent_block.runtime_loop,
.runtime_index = parent_block.runtime_index,
.error_return_trace_index = parent_block.error_return_trace_index,
};
defer child_block.instructions.deinit(gpa);
defer label.merges.deinit(gpa);
return sema.resolveBlockBody(parent_block, src, &child_block, body, inst, &label.merges);
}
fn resolveBlockBody(
sema: *Sema,
parent_block: *Block,
src: LazySrcLoc,
child_block: *Block,
body: []const Zir.Inst.Index,
/// This is the instruction that a break instruction within `body` can
/// use to return from the body.
body_inst: Zir.Inst.Index,
merges: *Block.Merges,
) CompileError!Air.Inst.Ref {
if (child_block.is_comptime) {
return sema.resolveBody(child_block, body, body_inst);
} else {
if (sema.analyzeBodyInner(child_block, body)) |_| {
return sema.analyzeBlockBody(parent_block, src, child_block, merges);
} else |err| switch (err) {
error.ComptimeBreak => {
// Comptime control flow is happening, however child_block may still contain
// runtime instructions which need to be copied to the parent block.
try parent_block.instructions.appendSlice(sema.gpa, child_block.instructions.items);
const break_inst = sema.comptime_break_inst;
const break_data = sema.code.instructions.items(.data)[break_inst].@"break";
const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data;
if (extra.block_inst == body_inst) {
return try sema.resolveInst(break_data.operand);
} else {
return error.ComptimeBreak;
}
},
else => |e| return e,
}
}
}
fn analyzeBlockBody(
sema: *Sema,
parent_block: *Block,
src: LazySrcLoc,
child_block: *Block,
merges: *Block.Merges,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const gpa = sema.gpa;
const mod = sema.mod;
// Blocks must terminate with noreturn instruction.
assert(child_block.instructions.items.len != 0);
assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn(mod));
if (merges.results.items.len == 0) {
// No need for a block instruction. We can put the new instructions
// directly into the parent block.
try parent_block.instructions.appendSlice(gpa, child_block.instructions.items);
return Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]);
}
if (merges.results.items.len == 1) {
const last_inst_index = child_block.instructions.items.len - 1;
const last_inst = child_block.instructions.items[last_inst_index];
if (sema.getBreakBlock(last_inst)) |br_block| {
if (br_block == merges.block_inst) {
// No need for a block instruction. We can put the new instructions directly
// into the parent block. Here we omit the break instruction.
const without_break = child_block.instructions.items[0..last_inst_index];
try parent_block.instructions.appendSlice(gpa, without_break);
return merges.results.items[0];
}
}
}
// It is impossible to have the number of results be > 1 in a comptime scope.
assert(!child_block.is_comptime); // Should already got a compile error in the condbr condition.
// Need to set the type and emit the Block instruction. This allows machine code generation
// to emit a jump instruction to after the block when it encounters the break.
try parent_block.instructions.append(gpa, merges.block_inst);
const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items, .{ .override = merges.src_locs.items });
// TODO add note "missing else causes void value"
const type_src = src; // TODO: better source location
const valid_rt = try sema.validateRunTimeType(resolved_ty, false);
if (!valid_rt) {
const msg = msg: {
const msg = try sema.errMsg(child_block, type_src, "value with comptime-only type '{}' depends on runtime control flow", .{resolved_ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
const runtime_src = child_block.runtime_cond orelse child_block.runtime_loop.?;
try sema.errNote(child_block, runtime_src, msg, "runtime control flow here", .{});
const child_src_decl = mod.declPtr(child_block.src_decl);
try sema.explainWhyTypeIsComptime(msg, type_src.toSrcLoc(child_src_decl, mod), resolved_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const ty_inst = try sema.addType(resolved_ty);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
child_block.instructions.items.len);
sema.air_instructions.items(.data)[merges.block_inst] = .{ .ty_pl = .{
.ty = ty_inst,
.payload = sema.addExtraAssumeCapacity(Air.Block{
.body_len = @intCast(u32, child_block.instructions.items.len),
}),
} };
sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items);
// Now that the block has its type resolved, we need to go back into all the break
// instructions, and insert type coercion on the operands.
for (merges.br_list.items) |br| {
const br_operand = sema.air_instructions.items(.data)[br].br.operand;
const br_operand_src = src;
const br_operand_ty = sema.typeOf(br_operand);
if (br_operand_ty.eql(resolved_ty, mod)) {
// No type coercion needed.
continue;
}
var coerce_block = parent_block.makeSubBlock();
defer coerce_block.instructions.deinit(gpa);
const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br_operand, br_operand_src);
// If no instructions were produced, such as in the case of a coercion of a
// constant value to a new type, we can simply point the br operand to it.
if (coerce_block.instructions.items.len == 0) {
sema.air_instructions.items(.data)[br].br.operand = coerced_operand;
continue;
}
assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] ==
Air.refToIndex(coerced_operand).?);
// Convert the br instruction to a block instruction that has the coercion
// and then a new br inside that returns the coerced instruction.
const sub_block_len = @intCast(u32, coerce_block.instructions.items.len + 1);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
sub_block_len);
try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
const sub_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
sema.air_instructions.items(.tag)[br] = .block;
sema.air_instructions.items(.data)[br] = .{ .ty_pl = .{
.ty = Air.Inst.Ref.noreturn_type,
.payload = sema.addExtraAssumeCapacity(Air.Block{
.body_len = sub_block_len,
}),
} };
sema.air_extra.appendSliceAssumeCapacity(coerce_block.instructions.items);
sema.air_extra.appendAssumeCapacity(sub_br_inst);
sema.air_instructions.appendAssumeCapacity(.{
.tag = .br,
.data = .{ .br = .{
.block_inst = merges.block_inst,
.operand = coerced_operand,
} },
});
}
return Air.indexToRef(merges.block_inst);
}
fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const decl_name = sema.code.nullTerminatedString(extra.decl_name);
const decl_index = if (extra.namespace != .none) index_blk: {
const container_ty = try sema.resolveType(block, operand_src, extra.namespace);
const container_namespace = container_ty.getNamespaceIndex(mod).unwrap().?;
const maybe_index = try sema.lookupInNamespace(block, operand_src, container_namespace, decl_name, false);
break :index_blk maybe_index orelse
return sema.failWithBadMemberAccess(block, container_ty, operand_src, decl_name);
} else try sema.lookupIdentifier(block, operand_src, decl_name);
const options = sema.resolveExportOptions(block, .unneeded, extra.options) catch |err| switch (err) {
error.NeededSourceLocation => {
_ = try sema.resolveExportOptions(block, options_src, extra.options);
unreachable;
},
else => |e| return e,
};
{
try mod.ensureDeclAnalyzed(decl_index);
const exported_decl = mod.declPtr(decl_index);
if (exported_decl.val.castTag(.function)) |some| {
return sema.analyzeExport(block, src, options, some.data.owner_decl);
}
}
try sema.analyzeExport(block, src, options, decl_index);
}
fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const operand = try sema.resolveInstConst(block, operand_src, extra.operand, "export target must be comptime-known");
const options = sema.resolveExportOptions(block, .unneeded, extra.options) catch |err| switch (err) {
error.NeededSourceLocation => {
_ = try sema.resolveExportOptions(block, options_src, extra.options);
unreachable;
},
else => |e| return e,
};
const decl_index = switch (operand.val.tag()) {
.function => operand.val.castTag(.function).?.data.owner_decl,
else => blk: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
break :blk try anon_decl.finish(
operand.ty,
try operand.val.copy(anon_decl.arena()),
0,
);
},
};
try sema.analyzeExport(block, src, options, decl_index);
}
pub fn analyzeExport(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
borrowed_options: std.builtin.ExportOptions,
exported_decl_index: Decl.Index,
) !void {
const Export = Module.Export;
const mod = sema.mod;
if (borrowed_options.linkage == .Internal) {
return;
}
try mod.ensureDeclAnalyzed(exported_decl_index);
const exported_decl = mod.declPtr(exported_decl_index);
if (!try sema.validateExternType(exported_decl.ty, .other)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "unable to export type '{}'", .{exported_decl.ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
const src_decl = sema.mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), exported_decl.ty, .other);
try sema.addDeclaredHereNote(msg, exported_decl.ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
// TODO: some backends might support re-exporting extern decls
if (exported_decl.isExtern()) {
return sema.fail(block, src, "export target cannot be extern", .{});
}
// This decl is alive no matter what, since it's being exported
mod.markDeclAlive(exported_decl);
try sema.maybeQueueFuncBodyAnalysis(exported_decl_index);
const gpa = mod.gpa;
try mod.decl_exports.ensureUnusedCapacity(gpa, 1);
try mod.export_owners.ensureUnusedCapacity(gpa, 1);
const new_export = try gpa.create(Export);
errdefer gpa.destroy(new_export);
const symbol_name = try gpa.dupe(u8, borrowed_options.name);
errdefer gpa.free(symbol_name);
const section: ?[]const u8 = if (borrowed_options.section) |s| try gpa.dupe(u8, s) else null;
errdefer if (section) |s| gpa.free(s);
new_export.* = .{
.options = .{
.name = symbol_name,
.linkage = borrowed_options.linkage,
.section = section,
.visibility = borrowed_options.visibility,
},
.src = src,
.owner_decl = sema.owner_decl_index,
.src_decl = block.src_decl,
.exported_decl = exported_decl_index,
.status = .in_progress,
};
// Add to export_owners table.
const eo_gop = mod.export_owners.getOrPutAssumeCapacity(sema.owner_decl_index);
if (!eo_gop.found_existing) {
eo_gop.value_ptr.* = .{};
}
try eo_gop.value_ptr.append(gpa, new_export);
errdefer _ = eo_gop.value_ptr.pop();
// Add to exported_decl table.
const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl_index);
if (!de_gop.found_existing) {
de_gop.value_ptr.* = .{};
}
try de_gop.value_ptr.append(gpa, new_export);
errdefer _ = de_gop.value_ptr.pop();
}
fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const src = LazySrcLoc.nodeOffset(extra.node);
const alignment = try sema.resolveAlign(block, operand_src, extra.operand);
if (alignment > 256) {
return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{
alignment,
});
}
const func = sema.func orelse
return sema.fail(block, src, "@setAlignStack outside function body", .{});
const fn_owner_decl = mod.declPtr(func.owner_decl);
switch (fn_owner_decl.ty.fnCallingConvention(mod)) {
.Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}),
.Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}),
else => if (block.inlining != null) {
return sema.fail(block, src, "@setAlignStack in inline call", .{});
},
}
const gop = try mod.align_stack_fns.getOrPut(mod.gpa, func);
if (gop.found_existing) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, gop.value_ptr.src, msg, "other instance here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
gop.value_ptr.* = .{ .alignment = alignment, .src = src };
}
fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const is_cold = try sema.resolveConstBool(block, operand_src, extra.operand, "operand to @setCold must be comptime-known");
const func = sema.func orelse return; // does nothing outside a function
func.is_cold = is_cold;
}
fn zirSetFloatMode(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
block.float_mode = try sema.resolveBuiltinEnum(block, src, extra.operand, "FloatMode", "operand to @setFloatMode must be comptime-known");
}
fn zirSetRuntimeSafety(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
block.want_safety = try sema.resolveConstBool(block, operand_src, inst_data.operand, "operand to @setRuntimeSafety must be comptime-known");
}
fn zirFence(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
if (block.is_comptime) return;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const order = try sema.resolveAtomicOrder(block, order_src, extra.operand, "atomic order of @fence must be comptime-known");
if (@enumToInt(order) < @enumToInt(std.builtin.AtomicOrder.Acquire)) {
return sema.fail(block, order_src, "atomic ordering must be Acquire or stricter", .{});
}
_ = try block.addInst(.{
.tag = .fence,
.data = .{ .fence = order },
});
}
fn zirBreak(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].@"break";
const extra = sema.code.extraData(Zir.Inst.Break, inst_data.payload_index).data;
const operand = try sema.resolveInst(inst_data.operand);
const zir_block = extra.block_inst;
var block = start_block;
while (true) {
if (block.label) |label| {
if (label.zir_block == zir_block) {
const br_ref = try start_block.addBr(label.merges.block_inst, operand);
const src_loc = if (extra.operand_src_node != Zir.Inst.Break.no_src_node)
LazySrcLoc.nodeOffset(extra.operand_src_node)
else
null;
try label.merges.src_locs.append(sema.gpa, src_loc);
try label.merges.results.append(sema.gpa, operand);
try label.merges.br_list.append(sema.gpa, Air.refToIndex(br_ref).?);
block.runtime_index.increment();
if (block.runtime_cond == null and block.runtime_loop == null) {
block.runtime_cond = start_block.runtime_cond orelse start_block.runtime_loop;
block.runtime_loop = start_block.runtime_loop;
}
return inst;
}
}
block = block.parent.?;
}
}
fn zirDbgStmt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
// We do not set sema.src here because dbg_stmt instructions are only emitted for
// ZIR code that possibly will need to generate runtime code. So error messages
// and other source locations must not rely on sema.src being set from dbg_stmt
// instructions.
if (block.is_comptime or sema.mod.comp.bin_file.options.strip) return;
const inst_data = sema.code.instructions.items(.data)[inst].dbg_stmt;
if (block.instructions.items.len != 0) {
const idx = block.instructions.items[block.instructions.items.len - 1];
if (sema.air_instructions.items(.tag)[idx] == .dbg_stmt) {
// The previous dbg_stmt didn't correspond to any actual code, so replace it.
sema.air_instructions.items(.data)[idx].dbg_stmt = .{
.line = inst_data.line,
.column = inst_data.column,
};
return;
}
}
_ = try block.addInst(.{
.tag = .dbg_stmt,
.data = .{ .dbg_stmt = .{
.line = inst_data.line,
.column = inst_data.column,
} },
});
}
fn zirDbgBlockBegin(sema: *Sema, block: *Block) CompileError!void {
if (block.is_comptime or sema.mod.comp.bin_file.options.strip) return;
_ = try block.addInst(.{
.tag = .dbg_block_begin,
.data = undefined,
});
}
fn zirDbgBlockEnd(sema: *Sema, block: *Block) CompileError!void {
if (block.is_comptime or sema.mod.comp.bin_file.options.strip) return;
_ = try block.addInst(.{
.tag = .dbg_block_end,
.data = undefined,
});
}
fn zirDbgVar(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
) CompileError!void {
if (block.is_comptime or sema.mod.comp.bin_file.options.strip) return;
const str_op = sema.code.instructions.items(.data)[inst].str_op;
const operand = try sema.resolveInst(str_op.operand);
const name = str_op.getStr(sema.code);
try sema.addDbgVar(block, operand, air_tag, name);
}
fn addDbgVar(
sema: *Sema,
block: *Block,
operand: Air.Inst.Ref,
air_tag: Air.Inst.Tag,
name: []const u8,
) CompileError!void {
const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
switch (air_tag) {
.dbg_var_ptr => {
if (!(try sema.typeHasRuntimeBits(operand_ty.childType(mod)))) return;
},
.dbg_var_val => {
if (!(try sema.typeHasRuntimeBits(operand_ty))) return;
},
else => unreachable,
}
try sema.queueFullTypeResolution(operand_ty);
// Add the name to the AIR.
const name_extra_index = @intCast(u32, sema.air_extra.items.len);
const elements_used = name.len / 4 + 1;
try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements_used);
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
@memcpy(buffer[0..name.len], name);
buffer[name.len] = 0;
sema.air_extra.items.len += elements_used;
_ = try block.addInst(.{
.tag = air_tag,
.data = .{ .pl_op = .{
.payload = name_extra_index,
.operand = operand,
} },
});
}
fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
const src = inst_data.src();
const decl_name = inst_data.get(sema.code);
const decl_index = try sema.lookupIdentifier(block, src, decl_name);
try sema.addReferencedBy(block, src, decl_index);
return sema.analyzeDeclRef(decl_index);
}
fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
const src = inst_data.src();
const decl_name = inst_data.get(sema.code);
const decl = try sema.lookupIdentifier(block, src, decl_name);
return sema.analyzeDeclVal(block, src, decl);
}
fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8) !Decl.Index {
const mod = sema.mod;
var namespace = block.namespace;
while (true) {
if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl_index| {
return decl_index;
}
namespace = mod.namespacePtr(namespace).parent.unwrap() orelse break;
}
unreachable; // AstGen detects use of undeclared identifier errors.
}
/// This looks up a member of a specific namespace. It is affected by `usingnamespace` but
/// only for ones in the specified namespace.
fn lookupInNamespace(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
namespace_index: Namespace.Index,
ident_name: []const u8,
observe_usingnamespace: bool,
) CompileError!?Decl.Index {
const mod = sema.mod;
const namespace = mod.namespacePtr(namespace_index);
const namespace_decl_index = namespace.getDeclIndex(mod);
const namespace_decl = sema.mod.declPtr(namespace_decl_index);
if (namespace_decl.analysis == .file_failure) {
try mod.declareDeclDependency(sema.owner_decl_index, namespace_decl_index);
return error.AnalysisFail;
}
if (observe_usingnamespace and namespace.usingnamespace_set.count() != 0) {
const src_file = mod.namespacePtr(block.namespace).file_scope;
const gpa = sema.gpa;
var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, bool) = .{};
defer checked_namespaces.deinit(gpa);
// Keep track of name conflicts for error notes.
var candidates: std.ArrayListUnmanaged(Decl.Index) = .{};
defer candidates.deinit(gpa);
try checked_namespaces.put(gpa, namespace, namespace.file_scope == src_file);
var check_i: usize = 0;
while (check_i < checked_namespaces.count()) : (check_i += 1) {
const check_ns = checked_namespaces.keys()[check_i];
if (check_ns.decls.getKeyAdapted(ident_name, Module.DeclAdapter{ .mod = mod })) |decl_index| {
// Skip decls which are not marked pub, which are in a different
// file than the `a.b`/`@hasDecl` syntax.
const decl = mod.declPtr(decl_index);
if (decl.is_pub or (src_file == decl.getFileScope(mod) and checked_namespaces.values()[check_i])) {
try candidates.append(gpa, decl_index);
}
}
var it = check_ns.usingnamespace_set.iterator();
while (it.next()) |entry| {
const sub_usingnamespace_decl_index = entry.key_ptr.*;
// Skip the decl we're currently analysing.
if (sub_usingnamespace_decl_index == sema.owner_decl_index) continue;
const sub_usingnamespace_decl = mod.declPtr(sub_usingnamespace_decl_index);
const sub_is_pub = entry.value_ptr.*;
if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope(mod)) {
// Skip usingnamespace decls which are not marked pub, which are in
// a different file than the `a.b`/`@hasDecl` syntax.
continue;
}
try sema.ensureDeclAnalyzed(sub_usingnamespace_decl_index);
const ns_ty = sub_usingnamespace_decl.val.castTag(.ty).?.data;
const sub_ns = ns_ty.getNamespace(mod).?;
try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope(mod));
}
}
{
var i: usize = 0;
while (i < candidates.items.len) {
if (candidates.items[i] == sema.owner_decl_index) {
_ = candidates.orderedRemove(i);
} else {
i += 1;
}
}
}
switch (candidates.items.len) {
0 => {},
1 => {
const decl_index = candidates.items[0];
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
return decl_index;
},
else => {
const msg = msg: {
const msg = try sema.errMsg(block, src, "ambiguous reference", .{});
errdefer msg.destroy(gpa);
for (candidates.items) |candidate_index| {
const candidate = mod.declPtr(candidate_index);
const src_loc = candidate.srcLoc(mod);
try mod.errNoteNonLazy(src_loc, msg, "declared here", .{});
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
}
} else if (namespace.decls.getKeyAdapted(ident_name, Module.DeclAdapter{ .mod = mod })) |decl_index| {
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
return decl_index;
}
log.debug("{*} ({s}) depends on non-existence of '{s}' in {*} ({s})", .{
sema.owner_decl, sema.owner_decl.name, ident_name, namespace_decl, namespace_decl.name,
});
// TODO This dependency is too strong. Really, it should only be a dependency
// on the non-existence of `ident_name` in the namespace. We can lessen the number of
// outdated declarations by making this dependency more sophisticated.
try mod.declareDeclDependency(sema.owner_decl_index, namespace_decl_index);
return null;
}
fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl {
const mod = sema.mod;
const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null;
if (func_val.isUndef(mod)) return null;
const owner_decl_index = switch (func_val.tag()) {
.extern_fn => func_val.castTag(.extern_fn).?.data.owner_decl,
.function => func_val.castTag(.function).?.data.owner_decl,
.decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl,
else => return null,
};
return mod.declPtr(owner_decl_index);
}
pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref {
const src = sema.src;
if (!sema.mod.backendSupportsFeature(.error_return_trace)) return .none;
if (!sema.mod.comp.bin_file.options.error_return_tracing) return .none;
if (block.is_comptime)
return .none;
const unresolved_stack_trace_ty = sema.getBuiltinType("StackTrace") catch |err| switch (err) {
error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
else => |e| return e,
};
const stack_trace_ty = sema.resolveTypeFields(unresolved_stack_trace_ty) catch |err| switch (err) {
error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
else => |e| return e,
};
const field_index = sema.structFieldIndex(block, stack_trace_ty, "index", src) catch |err| switch (err) {
error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable,
else => |e| return e,
};
return try block.addInst(.{
.tag = .save_err_return_trace_index,
.data = .{ .ty_pl = .{
.ty = try sema.addType(stack_trace_ty),
.payload = @intCast(u32, field_index),
} },
});
}
/// Add instructions to block to "pop" the error return trace.
/// If `operand` is provided, only pops if operand is non-error.
fn popErrorReturnTrace(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
saved_error_trace_index: Air.Inst.Ref,
) CompileError!void {
const mod = sema.mod;
var is_non_error: ?bool = null;
var is_non_error_inst: Air.Inst.Ref = undefined;
if (operand != .none) {
is_non_error_inst = try sema.analyzeIsNonErr(block, src, operand);
if (try sema.resolveDefinedValue(block, src, is_non_error_inst)) |cond_val|
is_non_error = cond_val.toBool(mod);
} else is_non_error = true; // no operand means pop unconditionally
if (is_non_error == true) {
// AstGen determined this result does not go to an error-handling expr (try/catch/return etc.), or
// the result is comptime-known to be a non-error. Either way, pop unconditionally.
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, "index", src, stack_trace_ty, true);
try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store);
} else if (is_non_error == null) {
// The result might be an error. If it is, we leave the error trace alone. If it isn't, we need
// to pop any error trace that may have been propagated from our arguments.
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Block).Struct.fields.len);
const cond_block_inst = try block.addInstAsIndex(.{
.tag = .block,
.data = .{
.ty_pl = .{
.ty = Air.Inst.Ref.void_type,
.payload = undefined, // updated below
},
},
});
var then_block = block.makeSubBlock();
defer then_block.instructions.deinit(sema.gpa);
// If non-error, then pop the error return trace by restoring the index.
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty);
const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, "index", src, stack_trace_ty, true);
try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store);
_ = try then_block.addBr(cond_block_inst, Air.Inst.Ref.void_value);
// Otherwise, do nothing
var else_block = block.makeSubBlock();
defer else_block.instructions.deinit(sema.gpa);
_ = try else_block.addBr(cond_block_inst, Air.Inst.Ref.void_value);
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.CondBr).Struct.fields.len +
then_block.instructions.items.len + else_block.instructions.items.len +
@typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block
const cond_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
try sema.air_instructions.append(sema.gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{
.operand = is_non_error_inst,
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(u32, then_block.instructions.items.len),
.else_body_len = @intCast(u32, else_block.instructions.items.len),
}),
} } });
sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items);
sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items);
sema.air_instructions.items(.data)[cond_block_inst].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = 1 });
sema.air_extra.appendAssumeCapacity(cond_br_inst);
}
}
fn zirCall(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
comptime kind: enum { direct, field },
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const callee_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node };
const call_src = inst_data.src();
const ExtraType = switch (kind) {
.direct => Zir.Inst.Call,
.field => Zir.Inst.FieldCall,
};
const extra = sema.code.extraData(ExtraType, inst_data.payload_index);
const args_len = extra.data.flags.args_len;
const modifier = @intToEnum(std.builtin.CallModifier, extra.data.flags.packed_modifier);
const ensure_result_used = extra.data.flags.ensure_result_used;
const pop_error_return_trace = extra.data.flags.pop_error_return_trace;
const callee: ResolvedFieldCallee = switch (kind) {
.direct => .{ .direct = try sema.resolveInst(extra.data.callee) },
.field => blk: {
const object_ptr = try sema.resolveInst(extra.data.obj_ptr);
const field_name = sema.code.nullTerminatedString(extra.data.field_name_start);
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
break :blk try sema.fieldCallBind(block, callee_src, object_ptr, field_name, field_name_src);
},
};
var resolved_args: []Air.Inst.Ref = undefined;
var bound_arg_src: ?LazySrcLoc = null;
var func: Air.Inst.Ref = undefined;
var arg_index: u32 = 0;
switch (callee) {
.direct => |func_inst| {
resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_len);
func = func_inst;
},
.method => |method| {
resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_len + 1);
func = method.func_inst;
resolved_args[0] = method.arg0_inst;
arg_index += 1;
bound_arg_src = callee_src;
},
}
const callee_ty = sema.typeOf(func);
const total_args = args_len + @boolToInt(bound_arg_src != null);
const func_ty = try sema.checkCallArgumentCount(block, func, callee_src, callee_ty, total_args, bound_arg_src != null);
const args_body = sema.code.extra[extra.end..];
var input_is_error = false;
const block_index = @intCast(Air.Inst.Index, block.instructions.items.len);
const func_ty_info = mod.typeToFunc(func_ty).?;
const fn_params_len = func_ty_info.param_types.len;
const parent_comptime = block.is_comptime;
// `extra_index` and `arg_index` are separate since the bound function is passed as the first argument.
var extra_index: usize = 0;
var arg_start: u32 = args_len;
while (extra_index < args_len) : ({
extra_index += 1;
arg_index += 1;
}) {
const arg_end = sema.code.extra[extra.end + extra_index];
defer arg_start = arg_end;
// Generate args to comptime params in comptime block.
defer block.is_comptime = parent_comptime;
if (arg_index < fn_params_len and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) {
block.is_comptime = true;
// TODO set comptime_reason
}
sema.inst_map.putAssumeCapacity(inst, inst: {
if (arg_index >= fn_params_len)
break :inst Air.Inst.Ref.var_args_param_type;
if (func_ty_info.param_types[arg_index] == .generic_poison_type)
break :inst Air.Inst.Ref.generic_poison_type;
break :inst try sema.addType(func_ty_info.param_types[arg_index].toType());
});
const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst);
const resolved_ty = sema.typeOf(resolved);
if (resolved_ty.zigTypeTag(mod) == .NoReturn) {
return resolved;
}
if (resolved_ty.isError(mod)) {
input_is_error = true;
}
resolved_args[arg_index] = resolved;
}
if (sema.owner_func == null or !sema.owner_func.?.calls_or_awaits_errorable_fn) {
input_is_error = false; // input was an error type, but no errorable fn's were actually called
}
// AstGen ensures that a call instruction is always preceded by a dbg_stmt instruction.
const call_dbg_node = inst - 1;
if (sema.mod.backendSupportsFeature(.error_return_trace) and sema.mod.comp.bin_file.options.error_return_tracing and
!block.is_comptime and !block.is_typeof and (input_is_error or pop_error_return_trace))
{
const call_inst: Air.Inst.Ref = if (modifier == .always_tail) undefined else b: {
break :b try sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src, call_dbg_node);
};
const return_ty = sema.typeOf(call_inst);
if (modifier != .always_tail and return_ty.isNoReturn(mod))
return call_inst; // call to "fn(...) noreturn", don't pop
// If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only
// need to clean-up our own trace if we were passed to a non-error-handling expression.
if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError(mod))) {
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
const field_index = try sema.structFieldIndex(block, stack_trace_ty, "index", call_src);
// Insert a save instruction before the arg resolution + call instructions we just generated
const save_inst = try block.insertInst(block_index, .{
.tag = .save_err_return_trace_index,
.data = .{ .ty_pl = .{
.ty = try sema.addType(stack_trace_ty),
.payload = @intCast(u32, field_index),
} },
});
// Pop the error return trace, testing the result for non-error if necessary
const operand = if (pop_error_return_trace or modifier == .always_tail) .none else call_inst;
try sema.popErrorReturnTrace(block, call_src, operand, save_inst);
}
if (modifier == .always_tail) // Perform the call *after* the restore, so that a tail call is possible.
return sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src, call_dbg_node);
return call_inst;
} else {
return sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src, call_dbg_node);
}
}
fn checkCallArgumentCount(
sema: *Sema,
block: *Block,
func: Air.Inst.Ref,
func_src: LazySrcLoc,
callee_ty: Type,
total_args: usize,
member_fn: bool,
) !Type {
const mod = sema.mod;
const func_ty = func_ty: {
switch (callee_ty.zigTypeTag(mod)) {
.Fn => break :func_ty callee_ty,
.Pointer => {
const ptr_info = callee_ty.ptrInfo(mod);
if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) {
break :func_ty ptr_info.pointee_type;
}
},
.Optional => {
const opt_child = callee_ty.optionalChild(mod);
if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and
opt_child.childType(mod).zigTypeTag(mod) == .Fn))
{
const msg = msg: {
const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{
callee_ty.fmt(sema.mod),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, func_src, msg, "consider using '.?', 'orelse' or 'if'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
},
else => {},
}
return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(sema.mod)});
};
const func_ty_info = mod.typeToFunc(func_ty).?;
const fn_params_len = func_ty_info.param_types.len;
const args_len = total_args - @boolToInt(member_fn);
if (func_ty_info.is_var_args) {
assert(func_ty_info.cc == .C);
if (total_args >= fn_params_len) return func_ty;
} else if (fn_params_len == total_args) {
return func_ty;
}
const maybe_decl = try sema.funcDeclSrc(func);
const member_str = if (member_fn) "member function " else "";
const variadic_str = if (func_ty_info.is_var_args) "at least " else "";
const msg = msg: {
const msg = try sema.errMsg(
block,
func_src,
"{s}expected {s}{d} argument(s), found {d}",
.{
member_str,
variadic_str,
fn_params_len - @boolToInt(member_fn),
args_len,
},
);
errdefer msg.destroy(sema.gpa);
if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn callBuiltin(
sema: *Sema,
block: *Block,
builtin_fn: Air.Inst.Ref,
modifier: std.builtin.CallModifier,
args: []const Air.Inst.Ref,
) !void {
const mod = sema.mod;
const callee_ty = sema.typeOf(builtin_fn);
const func_ty = func_ty: {
switch (callee_ty.zigTypeTag(mod)) {
.Fn => break :func_ty callee_ty,
.Pointer => {
const ptr_info = callee_ty.ptrInfo(mod);
if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) {
break :func_ty ptr_info.pointee_type;
}
},
else => {},
}
std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(sema.mod)});
};
const func_ty_info = mod.typeToFunc(func_ty).?;
const fn_params_len = func_ty_info.param_types.len;
if (args.len != fn_params_len or (func_ty_info.is_var_args and args.len < fn_params_len)) {
std.debug.panic("parameter count mismatch calling builtin fn, expected {d}, found {d}", .{ fn_params_len, args.len });
}
_ = try sema.analyzeCall(block, builtin_fn, func_ty, sema.src, sema.src, modifier, false, args, null, null);
}
const GenericCallAdapter = struct {
generic_fn: *Module.Fn,
precomputed_hash: u64,
func_ty_info: InternPool.Key.FuncType,
args: []const Arg,
module: *Module,
const Arg = struct {
ty: Type,
val: Value,
is_anytype: bool,
};
pub fn eql(ctx: @This(), adapted_key: void, other_key: *Module.Fn) bool {
_ = adapted_key;
// Checking for equality may happen on an item that has been inserted
// into the map but is not yet fully initialized. In such case, the
// two initialized fields are `hash` and `generic_owner_decl`.
if (ctx.generic_fn.owner_decl != other_key.generic_owner_decl.unwrap().?) return false;
const other_comptime_args = other_key.comptime_args.?;
for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| {
const this_arg = ctx.args[i];
const this_is_comptime = !this_arg.val.isGenericPoison();
const other_is_comptime = !other_arg.val.isGenericPoison();
const this_is_anytype = this_arg.is_anytype;
const other_is_anytype = other_key.isAnytypeParam(ctx.module, @intCast(u32, i));
if (other_is_anytype != this_is_anytype) return false;
if (other_is_comptime != this_is_comptime) return false;
if (this_is_anytype) {
// Both are anytype parameters.
if (!this_arg.ty.eql(other_arg.ty, ctx.module)) {
return false;
}
if (this_is_comptime) {
// Both are comptime and anytype parameters with matching types.
if (!this_arg.val.eql(other_arg.val, other_arg.ty, ctx.module)) {
return false;
}
}
} else if (this_is_comptime) {
// Both are comptime parameters but not anytype parameters.
// We assert no error is possible here because any lazy values must be resolved
// before inserting into the generic function hash map.
const is_eql = Value.eqlAdvanced(
this_arg.val,
this_arg.ty,
other_arg.val,
other_arg.ty,
ctx.module,
null,
) catch unreachable;
if (!is_eql) {
return false;
}
}
}
return true;
}
/// The implementation of the hash is in semantic analysis of function calls, so
/// that any errors when computing the hash can be properly reported.
pub fn hash(ctx: @This(), adapted_key: void) u64 {
_ = adapted_key;
return ctx.precomputed_hash;
}
};
fn analyzeCall(
sema: *Sema,
block: *Block,
func: Air.Inst.Ref,
func_ty: Type,
func_src: LazySrcLoc,
call_src: LazySrcLoc,
modifier: std.builtin.CallModifier,
ensure_result_used: bool,
uncasted_args: []const Air.Inst.Ref,
bound_arg_src: ?LazySrcLoc,
call_dbg_node: ?Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const callee_ty = sema.typeOf(func);
const func_ty_info = mod.typeToFunc(func_ty).?;
const fn_params_len = func_ty_info.param_types.len;
const cc = func_ty_info.cc;
if (cc == .Naked) {
const maybe_decl = try sema.funcDeclSrc(func);
const msg = msg: {
const msg = try sema.errMsg(
block,
func_src,
"unable to call function with naked calling convention",
.{},
);
errdefer msg.destroy(sema.gpa);
if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const call_tag: Air.Inst.Tag = switch (modifier) {
.auto,
.always_inline,
.compile_time,
.no_async,
=> Air.Inst.Tag.call,
.never_tail => Air.Inst.Tag.call_never_tail,
.never_inline => Air.Inst.Tag.call_never_inline,
.always_tail => Air.Inst.Tag.call_always_tail,
.async_kw => return sema.failWithUseOfAsync(block, call_src),
};
if (modifier == .never_inline and func_ty_info.cc == .Inline) {
return sema.fail(block, call_src, "'never_inline' call of inline function", .{});
}
if (modifier == .always_inline and func_ty_info.is_noinline) {
return sema.fail(block, call_src, "'always_inline' call of noinline function", .{});
}
const gpa = sema.gpa;
var is_generic_call = func_ty_info.is_generic;
var is_comptime_call = block.is_comptime or modifier == .compile_time;
var comptime_reason_buf: Block.ComptimeReason = undefined;
var comptime_reason: ?*const Block.ComptimeReason = null;
if (!is_comptime_call) {
if (sema.typeRequiresComptime(func_ty_info.return_type.toType())) |ct| {
is_comptime_call = ct;
if (ct) {
// stage1 can't handle doing this directly
comptime_reason_buf = .{ .comptime_ret_ty = .{
.block = block,
.func = func,
.func_src = func_src,
.return_ty = func_ty_info.return_type.toType(),
} };
comptime_reason = &comptime_reason_buf;
}
} else |err| switch (err) {
error.GenericPoison => is_generic_call = true,
else => |e| return e,
}
}
var is_inline_call = is_comptime_call or modifier == .always_inline or
func_ty_info.cc == .Inline;
if (!is_inline_call and is_generic_call) {
if (sema.instantiateGenericCall(
block,
func,
func_src,
call_src,
func_ty_info,
ensure_result_used,
uncasted_args,
call_tag,
bound_arg_src,
call_dbg_node,
)) |some| {
return some;
} else |err| switch (err) {
error.GenericPoison => {
is_inline_call = true;
},
error.ComptimeReturn => {
is_inline_call = true;
is_comptime_call = true;
// stage1 can't handle doing this directly
comptime_reason_buf = .{ .comptime_ret_ty = .{
.block = block,
.func = func,
.func_src = func_src,
.return_ty = func_ty_info.return_type.toType(),
} };
comptime_reason = &comptime_reason_buf;
},
else => |e| return e,
}
}
if (is_comptime_call and modifier == .never_inline) {
return sema.fail(block, call_src, "unable to perform 'never_inline' call at compile-time", .{});
}
const result: Air.Inst.Ref = if (is_inline_call) res: {
const func_val = sema.resolveConstValue(block, func_src, func, "function being called at comptime must be comptime-known") catch |err| {
if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err);
return err;
};
const module_fn = switch (func_val.tag()) {
.decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data,
.function => func_val.castTag(.function).?.data,
.extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
}),
else => {
assert(callee_ty.isPtrAtRuntime(mod));
return sema.fail(block, call_src, "{s} call of function pointer", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
});
},
};
if (func_ty_info.is_var_args) {
return sema.fail(block, call_src, "{s} call of variadic function", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
});
}
// Analyze the ZIR. The same ZIR gets analyzed into a runtime function
// or an inlined call depending on what union tag the `label` field is
// set to in the `Block`.
// This block instruction will be used to capture the return value from the
// inlined function.
const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
try sema.air_instructions.append(gpa, .{
.tag = .block,
.data = undefined,
});
// This one is shared among sub-blocks within the same callee, but not
// shared among the entire inline/comptime call stack.
var inlining: Block.Inlining = .{
.func = null,
.comptime_result = undefined,
.merges = .{
.src_locs = .{},
.results = .{},
.br_list = .{},
.block_inst = block_inst,
},
};
// In order to save a bit of stack space, directly modify Sema rather
// than create a child one.
const parent_zir = sema.code;
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
sema.code = fn_owner_decl.getFileScope(mod).zir;
defer sema.code = parent_zir;
try mod.declareDeclDependencyType(sema.owner_decl_index, module_fn.owner_decl, .function_body);
const parent_inst_map = sema.inst_map;
sema.inst_map = .{};
defer {
sema.src = call_src;
sema.inst_map.deinit(gpa);
sema.inst_map = parent_inst_map;
}
const parent_func = sema.func;
sema.func = module_fn;
defer sema.func = parent_func;
const parent_err_ret_index = sema.error_return_trace_index_on_fn_entry;
sema.error_return_trace_index_on_fn_entry = block.error_return_trace_index;
defer sema.error_return_trace_index_on_fn_entry = parent_err_ret_index;
var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, fn_owner_decl.src_scope);
defer wip_captures.deinit();
var child_block: Block = .{
.parent = null,
.sema = sema,
.src_decl = module_fn.owner_decl,
.namespace = fn_owner_decl.src_namespace,
.wip_capture_scope = wip_captures.scope,
.instructions = .{},
.label = null,
.inlining = &inlining,
.is_typeof = block.is_typeof,
.is_comptime = is_comptime_call,
.comptime_reason = comptime_reason,
.error_return_trace_index = block.error_return_trace_index,
};
const merges = &child_block.inlining.?.merges;
defer child_block.instructions.deinit(gpa);
defer merges.deinit(gpa);
// If it's a comptime function call, we need to memoize it as long as no external
// comptime memory is mutated.
var memoized_call_key: Module.MemoizedCall.Key = undefined;
var delete_memoized_call_key = false;
defer if (delete_memoized_call_key) gpa.free(memoized_call_key.args);
if (is_comptime_call) {
memoized_call_key = .{
.func = module_fn,
.args = try gpa.alloc(TypedValue, func_ty_info.param_types.len),
};
delete_memoized_call_key = true;
}
try sema.emitBackwardBranch(block, call_src);
// Whether this call should be memoized, set to false if the call can mutate
// comptime state.
var should_memoize = true;
var new_fn_info = mod.typeToFunc(fn_owner_decl.ty).?;
new_fn_info.param_types = try sema.arena.alloc(InternPool.Index, new_fn_info.param_types.len);
new_fn_info.comptime_bits = 0;
// This will have return instructions analyzed as break instructions to
// the block_inst above. Here we are performing "comptime/inline semantic analysis"
// for a function body, which means we must map the parameter ZIR instructions to
// the AIR instructions of the callsite. The callee could be a generic function
// which means its parameter type expressions must be resolved in order and used
// to successively coerce the arguments.
const fn_info = sema.code.getFnInfo(module_fn.zir_body_inst);
try sema.inst_map.ensureSpaceForInstructions(sema.gpa, fn_info.param_body);
var has_comptime_args = false;
var arg_i: usize = 0;
for (fn_info.param_body) |inst| {
sema.analyzeInlineCallArg(
block,
&child_block,
.unneeded,
inst,
new_fn_info,
&arg_i,
uncasted_args,
is_comptime_call,
&should_memoize,
memoized_call_key,
func_ty_info.param_types,
func,
&has_comptime_args,
) catch |err| switch (err) {
error.NeededSourceLocation => {
_ = sema.inst_map.remove(inst);
const decl = sema.mod.declPtr(block.src_decl);
try sema.analyzeInlineCallArg(
block,
&child_block,
mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src),
inst,
new_fn_info,
&arg_i,
uncasted_args,
is_comptime_call,
&should_memoize,
memoized_call_key,
func_ty_info.param_types,
func,
&has_comptime_args,
);
unreachable;
},
else => |e| return e,
};
}
if (!has_comptime_args and module_fn.state == .sema_failure) return error.AnalysisFail;
const recursive_msg = "inline call is recursive";
var head = if (!has_comptime_args) block else null;
while (head) |some| {
const parent_inlining = some.inlining orelse break;
if (parent_inlining.func == module_fn) {
return sema.fail(block, call_src, recursive_msg, .{});
}
head = some.parent;
}
if (!has_comptime_args) inlining.func = module_fn;
// In case it is a generic function with an expression for the return type that depends
// on parameters, we must now do the same for the return type as we just did with
// each of the parameters, resolving the return type and providing it to the child
// `Sema` so that it can be used for the `ret_ptr` instruction.
const ret_ty_inst = if (fn_info.ret_ty_body.len != 0)
try sema.resolveBody(&child_block, fn_info.ret_ty_body, module_fn.zir_body_inst)
else
try sema.resolveInst(fn_info.ret_ty_ref);
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst);
// Create a fresh inferred error set type for inline/comptime calls.
const fn_ret_ty = blk: {
if (module_fn.hasInferredErrorSet(mod)) {
const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{
.func = module_fn,
});
const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index });
break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type);
}
break :blk bare_return_type;
};
new_fn_info.return_type = fn_ret_ty.ip_index;
const parent_fn_ret_ty = sema.fn_ret_ty;
sema.fn_ret_ty = fn_ret_ty;
defer sema.fn_ret_ty = parent_fn_ret_ty;
// This `res2` is here instead of directly breaking from `res` due to a stage1
// bug generating invalid LLVM IR.
const res2: Air.Inst.Ref = res2: {
if (should_memoize and is_comptime_call) {
if (mod.memoized_calls.getContext(memoized_call_key, .{ .module = mod })) |result| {
const ty_inst = try sema.addType(fn_ret_ty);
try sema.air_values.append(gpa, result.val);
sema.air_instructions.set(block_inst, .{
.tag = .constant,
.data = .{ .ty_pl = .{
.ty = ty_inst,
.payload = @intCast(u32, sema.air_values.items.len - 1),
} },
});
break :res2 Air.indexToRef(block_inst);
}
}
const new_func_resolved_ty = try mod.funcType(new_fn_info);
if (!is_comptime_call and !block.is_typeof) {
try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin);
const zir_tags = sema.code.instructions.items(.tag);
for (fn_info.param_body) |param| switch (zir_tags[param]) {
.param, .param_comptime => {
const inst_data = sema.code.instructions.items(.data)[param].pl_tok;
const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index);
const param_name = sema.code.nullTerminatedString(extra.data.name);
const inst = sema.inst_map.get(param).?;
try sema.addDbgVar(&child_block, inst, .dbg_var_val, param_name);
},
.param_anytype, .param_anytype_comptime => {
const inst_data = sema.code.instructions.items(.data)[param].str_tok;
const param_name = inst_data.get(sema.code);
const inst = sema.inst_map.get(param).?;
try sema.addDbgVar(&child_block, inst, .dbg_var_val, param_name);
},
else => continue,
};
}
if (is_comptime_call and ensure_result_used) {
try sema.ensureResultUsed(block, fn_ret_ty, call_src);
}
const result = result: {
sema.analyzeBody(&child_block, fn_info.body) catch |err| switch (err) {
error.ComptimeReturn => break :result inlining.comptime_result,
error.AnalysisFail => {
const err_msg = sema.err orelse return err;
if (std.mem.eql(u8, err_msg.msg, recursive_msg)) return err;
try sema.errNote(block, call_src, err_msg, "called from here", .{});
err_msg.clearTrace(sema.gpa);
return err;
},
else => |e| return e,
};
break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges);
};
if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag(mod) != .NoReturn) {
try sema.emitDbgInline(
block,
module_fn,
parent_func.?,
mod.declPtr(parent_func.?.owner_decl).ty,
.dbg_inline_end,
);
}
if (should_memoize and is_comptime_call) {
const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, "");
// TODO: check whether any external comptime memory was mutated by the
// comptime function call. If so, then do not memoize the call here.
// TODO: re-evaluate whether memoized_calls needs its own arena. I think
// it should be fine to use the Decl arena for the function.
{
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
const arena = arena_allocator.allocator();
for (memoized_call_key.args) |*arg| {
arg.* = try arg.*.copy(arena);
}
try mod.memoized_calls.putContext(gpa, memoized_call_key, .{
.val = try result_val.copy(arena),
.arena = arena_allocator.state,
}, .{ .module = mod });
delete_memoized_call_key = false;
}
}
break :res2 result;
};
try wip_captures.finalize();
break :res res2;
} else res: {
assert(!func_ty_info.is_generic);
const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len);
const fn_info = mod.typeToFunc(func_ty).?;
for (uncasted_args, 0..) |uncasted_arg, i| {
if (i < fn_params_len) {
const opts: CoerceOpts = .{ .param_src = .{
.func_inst = func,
.param_i = @intCast(u32, i),
} };
const param_ty = fn_info.param_types[i].toType();
args[i] = sema.analyzeCallArg(
block,
.unneeded,
param_ty,
uncasted_arg,
opts,
) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
_ = try sema.analyzeCallArg(
block,
mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src),
param_ty,
uncasted_arg,
opts,
);
unreachable;
},
else => |e| return e,
};
} else {
args[i] = sema.coerceVarArgParam(block, uncasted_arg, .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
_ = try sema.coerceVarArgParam(
block,
uncasted_arg,
mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src),
);
unreachable;
},
else => |e| return e,
};
}
}
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
try sema.queueFullTypeResolution(func_ty_info.return_type.toType());
if (sema.owner_func != null and func_ty_info.return_type.toType().isError(mod)) {
sema.owner_func.?.calls_or_awaits_errorable_fn = true;
}
if (try sema.resolveMaybeUndefVal(func)) |func_val| {
if (func_val.castTag(.function)) |func_obj| {
try sema.mod.ensureFuncBodyAnalysisQueued(func_obj.data);
}
}
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len +
args.len);
const func_inst = try block.addInst(.{
.tag = call_tag,
.data = .{ .pl_op = .{
.operand = func,
.payload = sema.addExtraAssumeCapacity(Air.Call{
.args_len = @intCast(u32, args.len),
}),
} },
});
sema.appendRefsAssumeCapacity(args);
if (call_tag == .call_always_tail) {
if (ensure_result_used) {
try sema.ensureResultUsed(block, sema.typeOf(func_inst), call_src);
}
return sema.handleTailCall(block, call_src, func_ty, func_inst);
} else if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) {
// Function pointers and extern functions aren't guaranteed to
// actually be noreturn so we add a safety check for them.
check: {
var func_val = (try sema.resolveMaybeUndefVal(func)) orelse break :check;
switch (func_val.tag()) {
.function, .decl_ref => {
_ = try block.addNoOp(.unreach);
return Air.Inst.Ref.unreachable_value;
},
else => break :check,
}
}
try sema.safetyPanic(block, .noreturn_returned);
return Air.Inst.Ref.unreachable_value;
} else if (func_ty_info.return_type == .noreturn_type) {
_ = try block.addNoOp(.unreach);
return Air.Inst.Ref.unreachable_value;
}
break :res func_inst;
};
if (ensure_result_used) {
try sema.ensureResultUsed(block, sema.typeOf(result), call_src);
}
return result;
}
fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref {
const target = sema.mod.getTarget();
const backend = sema.mod.comp.getZigBackend();
if (!target_util.supportsTailCall(target, backend)) {
return sema.fail(block, call_src, "unable to perform tail call: compiler backend '{s}' does not support tail calls on target architecture '{s}' with the selected CPU feature flags", .{
@tagName(backend), @tagName(target.cpu.arch),
});
}
const func_decl = sema.mod.declPtr(sema.owner_func.?.owner_decl);
if (!func_ty.eql(func_decl.ty, sema.mod)) {
return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{
func_ty.fmt(sema.mod), func_decl.ty.fmt(sema.mod),
});
}
_ = try block.addUnOp(.ret, result);
return Air.Inst.Ref.unreachable_value;
}
fn analyzeInlineCallArg(
sema: *Sema,
arg_block: *Block,
param_block: *Block,
arg_src: LazySrcLoc,
inst: Zir.Inst.Index,
new_fn_info: InternPool.Key.FuncType,
arg_i: *usize,
uncasted_args: []const Air.Inst.Ref,
is_comptime_call: bool,
should_memoize: *bool,
memoized_call_key: Module.MemoizedCall.Key,
raw_param_types: []const InternPool.Index,
func_inst: Air.Inst.Ref,
has_comptime_args: *bool,
) !void {
const zir_tags = sema.code.instructions.items(.tag);
switch (zir_tags[inst]) {
.param_comptime, .param_anytype_comptime => has_comptime_args.* = true,
else => {},
}
switch (zir_tags[inst]) {
.param, .param_comptime => {
// Evaluate the parameter type expression now that previous ones have
// been mapped, and coerce the corresponding argument to it.
const pl_tok = sema.code.instructions.items(.data)[inst].pl_tok;
const param_src = pl_tok.src();
const extra = sema.code.extraData(Zir.Inst.Param, pl_tok.payload_index);
const param_body = sema.code.extra[extra.end..][0..extra.data.body_len];
const param_ty = param_ty: {
const raw_param_ty = raw_param_types[arg_i.*];
if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty;
const param_ty_inst = try sema.resolveBody(param_block, param_body, inst);
const param_ty = try sema.analyzeAsType(param_block, param_src, param_ty_inst);
break :param_ty param_ty.toIntern();
};
new_fn_info.param_types[arg_i.*] = param_ty;
const uncasted_arg = uncasted_args[arg_i.*];
if (try sema.typeRequiresComptime(param_ty.toType())) {
_ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| {
if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err);
return err;
};
} else if (!is_comptime_call and zir_tags[inst] == .param_comptime) {
_ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
}
const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{
.func_inst = func_inst,
.param_i = @intCast(u32, arg_i.*),
} }) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
if (is_comptime_call) {
sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
const arg_val = sema.resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "argument to function being called at comptime must be comptime-known") catch |err| {
if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err);
return err;
};
switch (arg_val.ip_index) {
.generic_poison, .generic_poison_type => {
// This function is currently evaluated as part of an as-of-yet unresolvable
// parameter or return type.
return error.GenericPoison;
},
else => {
// Needed so that lazy values do not trigger
// assertion due to type not being resolved
// when the hash function is called.
try sema.resolveLazyValue(arg_val);
},
}
should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState();
memoized_call_key.args[arg_i.*] = .{
.ty = param_ty.toType(),
.val = arg_val,
};
} else {
sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
}
if (try sema.resolveMaybeUndefVal(casted_arg)) |_| {
has_comptime_args.* = true;
}
arg_i.* += 1;
},
.param_anytype, .param_anytype_comptime => {
// No coercion needed.
const uncasted_arg = uncasted_args[arg_i.*];
new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg).toIntern();
if (is_comptime_call) {
sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
const arg_val = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to function being called at comptime must be comptime-known") catch |err| {
if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err);
return err;
};
switch (arg_val.ip_index) {
.generic_poison, .generic_poison_type => {
// This function is currently evaluated as part of an as-of-yet unresolvable
// parameter or return type.
return error.GenericPoison;
},
else => {
// Needed so that lazy values do not trigger
// assertion due to type not being resolved
// when the hash function is called.
try sema.resolveLazyValue(arg_val);
},
}
should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState();
memoized_call_key.args[arg_i.*] = .{
.ty = sema.typeOf(uncasted_arg),
.val = arg_val,
};
} else {
if (zir_tags[inst] == .param_anytype_comptime) {
_ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
}
sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
}
if (try sema.resolveMaybeUndefVal(uncasted_arg)) |_| {
has_comptime_args.* = true;
}
arg_i.* += 1;
},
else => {},
}
}
fn analyzeCallArg(
sema: *Sema,
block: *Block,
arg_src: LazySrcLoc,
param_ty: Type,
uncasted_arg: Air.Inst.Ref,
opts: CoerceOpts,
) !Air.Inst.Ref {
try sema.resolveTypeFully(param_ty);
return sema.coerceExtra(block, param_ty, uncasted_arg, arg_src, opts) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
}
fn analyzeGenericCallArg(
sema: *Sema,
block: *Block,
arg_src: LazySrcLoc,
uncasted_arg: Air.Inst.Ref,
comptime_arg: TypedValue,
runtime_args: []Air.Inst.Ref,
new_fn_info: InternPool.Key.FuncType,
runtime_i: *u32,
) !void {
const mod = sema.mod;
const is_runtime = comptime_arg.val.isGenericPoison() and
comptime_arg.ty.hasRuntimeBits(mod) and
!(try sema.typeRequiresComptime(comptime_arg.ty));
if (is_runtime) {
const param_ty = new_fn_info.param_types[runtime_i.*].toType();
const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src);
try sema.queueFullTypeResolution(param_ty);
runtime_args[runtime_i.*] = casted_arg;
runtime_i.* += 1;
} else if (try sema.typeHasOnePossibleValue(comptime_arg.ty)) |_| {
_ = try sema.coerce(block, comptime_arg.ty, uncasted_arg, arg_src);
}
}
fn analyzeGenericCallArgVal(sema: *Sema, block: *Block, arg_src: LazySrcLoc, uncasted_arg: Air.Inst.Ref) !Value {
const arg_val = try sema.resolveValue(block, arg_src, uncasted_arg, "parameter is comptime");
try sema.resolveLazyValue(arg_val);
return arg_val;
}
fn instantiateGenericCall(
sema: *Sema,
block: *Block,
func: Air.Inst.Ref,
func_src: LazySrcLoc,
call_src: LazySrcLoc,
func_ty_info: InternPool.Key.FuncType,
ensure_result_used: bool,
uncasted_args: []const Air.Inst.Ref,
call_tag: Air.Inst.Tag,
bound_arg_src: ?LazySrcLoc,
call_dbg_node: ?Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known");
const module_fn = switch (func_val.tag()) {
.function => func_val.castTag(.function).?.data,
.decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data,
else => unreachable,
};
// Check the Module's generic function map with an adapted context, so that we
// can match against `uncasted_args` rather than doing the work below to create a
// generic Scope only to junk it if it matches an existing instantiation.
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
const namespace_index = fn_owner_decl.src_namespace;
const namespace = mod.namespacePtr(namespace_index);
const fn_zir = namespace.file_scope.zir;
const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst);
const zir_tags = fn_zir.instructions.items(.tag);
// This hash must match `Module.MonomorphedFuncsContext.hash`.
// For parameters explicitly marked comptime and simple parameter type expressions,
// we know whether a parameter is elided from a monomorphed function, and can
// use it in the hash here. However, for parameter type expressions that are not
// explicitly marked comptime and rely on previous parameter comptime values, we
// don't find out until after generating a monomorphed function whether the parameter
// type ended up being a "must-be-comptime-known" type.
var hasher = std.hash.Wyhash.init(0);
std.hash.autoHash(&hasher, module_fn.owner_decl);
const generic_args = try sema.arena.alloc(GenericCallAdapter.Arg, func_ty_info.param_types.len);
{
var i: usize = 0;
for (fn_info.param_body) |inst| {
var is_comptime = false;
var is_anytype = false;
switch (zir_tags[inst]) {
.param => {
is_comptime = func_ty_info.paramIsComptime(@intCast(u5, i));
},
.param_comptime => {
is_comptime = true;
},
.param_anytype => {
is_anytype = true;
is_comptime = func_ty_info.paramIsComptime(@intCast(u5, i));
},
.param_anytype_comptime => {
is_anytype = true;
is_comptime = true;
},
else => continue,
}
const arg_ty = sema.typeOf(uncasted_args[i]);
if (is_comptime or is_anytype) {
// Tuple default values are a part of the type and need to be
// resolved to hash the type.
try sema.resolveTupleLazyValues(block, call_src, arg_ty);
}
if (is_comptime) {
const arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, uncasted_args[i]) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
const arg_src = mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src);
_ = try sema.analyzeGenericCallArgVal(block, arg_src, uncasted_args[i]);
unreachable;
},
else => |e| return e,
};
arg_val.hashUncoerced(arg_ty, &hasher, mod);
if (is_anytype) {
arg_ty.hashWithHasher(&hasher, mod);
generic_args[i] = .{
.ty = arg_ty,
.val = arg_val,
.is_anytype = true,
};
} else {
generic_args[i] = .{
.ty = arg_ty,
.val = arg_val,
.is_anytype = false,
};
}
} else if (is_anytype) {
arg_ty.hashWithHasher(&hasher, mod);
generic_args[i] = .{
.ty = arg_ty,
.val = Value.generic_poison,
.is_anytype = true,
};
} else {
generic_args[i] = .{
.ty = arg_ty,
.val = Value.generic_poison,
.is_anytype = false,
};
}
i += 1;
}
}
const precomputed_hash = hasher.final();
const adapter: GenericCallAdapter = .{
.generic_fn = module_fn,
.precomputed_hash = precomputed_hash,
.func_ty_info = func_ty_info,
.args = generic_args,
.module = mod,
};
const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter);
const callee = if (!gop.found_existing) callee: {
const new_module_func = try gpa.create(Module.Fn);
// This ensures that we can operate on the hash map before the Module.Fn
// struct is fully initialized.
new_module_func.hash = precomputed_hash;
new_module_func.generic_owner_decl = module_fn.owner_decl.toOptional();
new_module_func.comptime_args = null;
gop.key_ptr.* = new_module_func;
try namespace.anon_decls.ensureUnusedCapacity(gpa, 1);
// Create a Decl for the new function.
const src_decl_index = namespace.getDeclIndex(mod);
const src_decl = mod.declPtr(src_decl_index);
const new_decl_index = try mod.allocateNewDecl(namespace_index, fn_owner_decl.src_node, src_decl.src_scope);
const new_decl = mod.declPtr(new_decl_index);
// TODO better names for generic function instantiations
const decl_name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{
fn_owner_decl.name, @enumToInt(new_decl_index),
});
new_decl.name = decl_name;
new_decl.src_line = fn_owner_decl.src_line;
new_decl.is_pub = fn_owner_decl.is_pub;
new_decl.is_exported = fn_owner_decl.is_exported;
new_decl.has_align = fn_owner_decl.has_align;
new_decl.has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace;
new_decl.@"linksection" = fn_owner_decl.@"linksection";
new_decl.@"addrspace" = fn_owner_decl.@"addrspace";
new_decl.zir_decl_index = fn_owner_decl.zir_decl_index;
new_decl.alive = true; // This Decl is called at runtime.
new_decl.analysis = .in_progress;
new_decl.generation = mod.generation;
namespace.anon_decls.putAssumeCapacityNoClobber(new_decl_index, {});
// The generic function Decl is guaranteed to be the first dependency
// of each of its instantiations.
assert(new_decl.dependencies.keys().len == 0);
try mod.declareDeclDependencyType(new_decl_index, module_fn.owner_decl, .function_body);
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
const new_decl_arena_allocator = new_decl_arena.allocator();
const new_func = sema.resolveGenericInstantiationType(
block,
new_decl_arena_allocator,
fn_zir,
new_decl,
new_decl_index,
uncasted_args,
module_fn,
new_module_func,
namespace_index,
func_ty_info,
call_src,
bound_arg_src,
) catch |err| switch (err) {
error.GenericPoison, error.ComptimeReturn => {
new_decl_arena.deinit();
// Resolving the new function type below will possibly declare more decl dependencies
// and so we remove them all here in case of error.
for (new_decl.dependencies.keys()) |dep_index| {
const dep = mod.declPtr(dep_index);
dep.removeDependant(new_decl_index);
}
assert(namespace.anon_decls.orderedRemove(new_decl_index));
mod.destroyDecl(new_decl_index);
assert(mod.monomorphed_funcs.remove(new_module_func));
gpa.destroy(new_module_func);
return err;
},
else => {
assert(mod.monomorphed_funcs.remove(new_module_func));
{
errdefer new_decl_arena.deinit();
try new_decl.finalizeNewArena(&new_decl_arena);
}
// TODO look up the compile error that happened here and attach a note to it
// pointing here, at the generic instantiation callsite.
if (sema.owner_func) |owner_func| {
owner_func.state = .dependency_failure;
} else {
sema.owner_decl.analysis = .dependency_failure;
}
return err;
},
};
errdefer new_decl_arena.deinit();
try new_decl.finalizeNewArena(&new_decl_arena);
break :callee new_func;
} else gop.key_ptr.*;
callee.branch_quota = @max(callee.branch_quota, sema.branch_quota);
const callee_inst = try sema.analyzeDeclVal(block, func_src, callee.owner_decl);
// Make a runtime call to the new function, making sure to omit the comptime args.
const comptime_args = callee.comptime_args.?;
const func_ty = mod.declPtr(callee.owner_decl).ty;
const new_fn_info = mod.typeToFunc(func_ty).?;
const runtime_args_len = @intCast(u32, new_fn_info.param_types.len);
const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len);
{
var runtime_i: u32 = 0;
var total_i: u32 = 0;
for (fn_info.param_body) |inst| {
switch (zir_tags[inst]) {
.param_comptime, .param_anytype_comptime, .param, .param_anytype => {},
else => continue,
}
sema.analyzeGenericCallArg(
block,
.unneeded,
uncasted_args[total_i],
comptime_args[total_i],
runtime_args,
new_fn_info,
&runtime_i,
) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
_ = try sema.analyzeGenericCallArg(
block,
mod.argSrc(call_src.node_offset.x, decl, total_i, bound_arg_src),
uncasted_args[total_i],
comptime_args[total_i],
runtime_args,
new_fn_info,
&runtime_i,
);
unreachable;
},
else => |e| return e,
};
total_i += 1;
}
try sema.queueFullTypeResolution(new_fn_info.return_type.toType());
}
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
if (sema.owner_func != null and new_fn_info.return_type.toType().isError(mod)) {
sema.owner_func.?.calls_or_awaits_errorable_fn = true;
}
try sema.mod.ensureFuncBodyAnalysisQueued(callee);
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len +
runtime_args_len);
const result = try block.addInst(.{
.tag = call_tag,
.data = .{ .pl_op = .{
.operand = callee_inst,
.payload = sema.addExtraAssumeCapacity(Air.Call{
.args_len = runtime_args_len,
}),
} },
});
sema.appendRefsAssumeCapacity(runtime_args);
if (ensure_result_used) {
try sema.ensureResultUsed(block, sema.typeOf(result), call_src);
}
if (call_tag == .call_always_tail) {
return sema.handleTailCall(block, call_src, func_ty, result);
}
if (new_fn_info.return_type == .noreturn_type) {
_ = try block.addNoOp(.unreach);
return Air.Inst.Ref.unreachable_value;
}
return result;
}
fn resolveGenericInstantiationType(
sema: *Sema,
block: *Block,
new_decl_arena_allocator: Allocator,
fn_zir: Zir,
new_decl: *Decl,
new_decl_index: Decl.Index,
uncasted_args: []const Air.Inst.Ref,
module_fn: *Module.Fn,
new_module_func: *Module.Fn,
namespace: Namespace.Index,
func_ty_info: InternPool.Key.FuncType,
call_src: LazySrcLoc,
bound_arg_src: ?LazySrcLoc,
) !*Module.Fn {
const mod = sema.mod;
const gpa = sema.gpa;
const zir_tags = fn_zir.instructions.items(.tag);
const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst);
// Re-run the block that creates the function, with the comptime parameters
// pre-populated inside `inst_map`. This causes `param_comptime` and
// `param_anytype_comptime` ZIR instructions to be ignored, resulting in a
// new, monomorphized function, with the comptime parameters elided.
var child_sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = sema.arena,
.perm_arena = new_decl_arena_allocator,
.code = fn_zir,
.owner_decl = new_decl,
.owner_decl_index = new_decl_index,
.func = null,
.fn_ret_ty = Type.void,
.owner_func = null,
.comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len),
.comptime_args_fn_inst = module_fn.zir_body_inst,
.preallocated_new_func = new_module_func,
.is_generic_instantiation = true,
.branch_quota = sema.branch_quota,
.branch_count = sema.branch_count,
};
defer child_sema.deinit();
var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope);
defer wip_captures.deinit();
var child_block: Block = .{
.parent = null,
.sema = &child_sema,
.src_decl = new_decl_index,
.namespace = namespace,
.wip_capture_scope = wip_captures.scope,
.instructions = .{},
.inlining = null,
.is_comptime = true,
};
defer {
child_block.instructions.deinit(gpa);
child_block.params.deinit(gpa);
}
try child_sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
var arg_i: usize = 0;
for (fn_info.param_body) |inst| {
var is_comptime = false;
var is_anytype = false;
switch (zir_tags[inst]) {
.param => {
is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i));
},
.param_comptime => {
is_comptime = true;
},
.param_anytype => {
is_anytype = true;
is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i));
},
.param_anytype_comptime => {
is_anytype = true;
is_comptime = true;
},
else => continue,
}
const arg = uncasted_args[arg_i];
if (is_comptime) {
const arg_val = (try sema.resolveMaybeUndefVal(arg)).?;
const child_arg = try child_sema.addConstant(sema.typeOf(arg), arg_val);
child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
} else if (is_anytype) {
const arg_ty = sema.typeOf(arg);
if (try sema.typeRequiresComptime(arg_ty)) {
const arg_val = sema.resolveConstValue(block, .unneeded, arg, "") catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src);
_ = try sema.resolveConstValue(block, arg_src, arg, "argument to parameter with comptime-only type must be comptime-known");
unreachable;
},
else => |e| return e,
};
const child_arg = try child_sema.addConstant(arg_ty, arg_val);
child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
} else {
// We insert into the map an instruction which is runtime-known
// but has the type of the argument.
const child_arg = try child_block.addInst(.{
.tag = .arg,
.data = .{ .arg = .{
.ty = try child_sema.addType(arg_ty),
.src_index = @intCast(u32, arg_i),
} },
});
child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
}
}
arg_i += 1;
}
// Save the error trace as our first action in the function.
// If this is unnecessary after all, Liveness will clean it up for us.
const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&child_block);
child_sema.error_return_trace_index_on_fn_entry = error_return_trace_index;
child_block.error_return_trace_index = error_return_trace_index;
const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst);
const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable;
const new_func = new_func_val.castTag(.function).?.data;
errdefer new_func.deinit(gpa);
assert(new_func == new_module_func);
arg_i = 0;
for (fn_info.param_body) |inst| {
var is_comptime = false;
switch (zir_tags[inst]) {
.param => {
is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i));
},
.param_comptime => {
is_comptime = true;
},
.param_anytype => {
is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i));
},
.param_anytype_comptime => {
is_comptime = true;
},
else => continue,
}
// We populate the Type here regardless because it is needed by
// `GenericCallAdapter.eql` as well as function body analysis.
// Whether it is anytype is communicated by `isAnytypeParam`.
const arg = child_sema.inst_map.get(inst).?;
const arg_ty = child_sema.typeOf(arg);
if (try sema.typeRequiresComptime(arg_ty)) {
is_comptime = true;
}
if (is_comptime) {
const arg_val = (child_sema.resolveMaybeUndefValAllowVariables(arg) catch unreachable).?;
child_sema.comptime_args[arg_i] = .{
.ty = arg_ty,
.val = try arg_val.copy(new_decl_arena_allocator),
};
} else {
child_sema.comptime_args[arg_i] = .{
.ty = arg_ty,
.val = Value.generic_poison,
};
}
arg_i += 1;
}
try wip_captures.finalize();
// Populate the Decl ty/val with the function and its type.
new_decl.ty = child_sema.typeOf(new_func_inst);
// If the call evaluated to a return type that requires comptime, never mind
// our generic instantiation. Instead we need to perform a comptime call.
const new_fn_info = mod.typeToFunc(new_decl.ty).?;
if (try sema.typeRequiresComptime(new_fn_info.return_type.toType())) {
return error.ComptimeReturn;
}
// Similarly, if the call evaluated to a generic type we need to instead
// call it inline.
if (new_fn_info.is_generic or new_fn_info.cc == .Inline) {
return error.GenericPoison;
}
new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func);
new_decl.@"align" = 0;
new_decl.has_tv = true;
new_decl.owns_tv = true;
new_decl.analysis = .complete;
log.debug("generic function '{s}' instantiated with type {}", .{
new_decl.name, new_decl.ty.fmtDebug(),
});
// Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field
// will be populated, ensuring it will have `analyzeBody` called with the ZIR
// parameters mapped appropriately.
try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func });
return new_func;
}
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
const mod = sema.mod;
const tuple = switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.anon_struct_type => |tuple| tuple,
else => return,
};
for (tuple.types, tuple.values) |field_ty, field_val| {
try sema.resolveTupleLazyValues(block, src, field_ty.toType());
if (field_val == .none) continue;
try sema.resolveLazyValue(field_val.toValue());
}
}
fn emitDbgInline(
sema: *Sema,
block: *Block,
old_func: *Module.Fn,
new_func: *Module.Fn,
new_func_ty: Type,
tag: Air.Inst.Tag,
) CompileError!void {
if (sema.mod.comp.bin_file.options.strip) return;
// Recursive inline call; no dbg_inline needed.
if (old_func == new_func) return;
try sema.air_values.append(sema.gpa, try Value.Tag.function.create(sema.arena, new_func));
_ = try block.addInst(.{
.tag = tag,
.data = .{ .ty_pl = .{
.ty = try sema.addType(new_func_ty),
.payload = @intCast(u32, sema.air_values.items.len - 1),
} },
});
}
fn zirIntType(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const int_type = sema.code.instructions.items(.data)[inst].int_type;
const ty = try mod.intType(int_type.signedness, int_type.bit_count);
return sema.addType(ty);
}
fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
const child_type = try sema.resolveType(block, operand_src, inst_data.operand);
if (child_type.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(sema.mod)});
} else if (child_type.zigTypeTag(mod) == .Null) {
return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)});
}
const opt_type = try Type.optional(sema.arena, child_type, mod);
return sema.addType(opt_type);
}
fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const bin = sema.code.instructions.items(.data)[inst].bin;
const indexable_ty = try sema.resolveType(block, .unneeded, bin.lhs);
assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction
if (indexable_ty.zigTypeTag(mod) == .Struct) {
const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs), mod);
return sema.addType(elem_type);
} else {
const elem_type = indexable_ty.elemType2(mod);
return sema.addType(elem_type);
}
}
fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known"));
const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs);
try sema.checkVectorElemType(block, elem_type_src, elem_type);
const vector_type = try mod.vectorType(.{
.len = len,
.child = elem_type.ip_index,
});
return sema.addType(vector_type);
}
fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const len_src: LazySrcLoc = .{ .node_offset_array_type_len = inst_data.src_node };
const elem_src: LazySrcLoc = .{ .node_offset_array_type_elem = inst_data.src_node };
const len = try sema.resolveInt(block, len_src, extra.lhs, Type.usize, "array length must be comptime-known");
const elem_type = try sema.resolveType(block, elem_src, extra.rhs);
try sema.validateArrayElemType(block, elem_type, elem_src);
const array_ty = try Type.array(sema.arena, len, null, elem_type, sema.mod);
return sema.addType(array_ty);
}
fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.ArrayTypeSentinel, inst_data.payload_index).data;
const len_src: LazySrcLoc = .{ .node_offset_array_type_len = inst_data.src_node };
const sentinel_src: LazySrcLoc = .{ .node_offset_array_type_sentinel = inst_data.src_node };
const elem_src: LazySrcLoc = .{ .node_offset_array_type_elem = inst_data.src_node };
const len = try sema.resolveInt(block, len_src, extra.len, Type.usize, "array length must be comptime-known");
const elem_type = try sema.resolveType(block, elem_src, extra.elem_type);
try sema.validateArrayElemType(block, elem_type, elem_src);
const uncasted_sentinel = try sema.resolveInst(extra.sentinel);
const sentinel = try sema.coerce(block, elem_type, uncasted_sentinel, sentinel_src);
const sentinel_val = try sema.resolveConstValue(block, sentinel_src, sentinel, "array sentinel value must be comptime-known");
const array_ty = try Type.array(sema.arena, len, sentinel_val, elem_type, sema.mod);
return sema.addType(array_ty);
}
fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void {
const mod = sema.mod;
if (elem_type.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(sema.mod)});
} else if (elem_type.zigTypeTag(mod) == .NoReturn) {
return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{});
}
}
fn zirAnyframeType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
if (true) {
return sema.failWithUseOfAsync(block, inst_data.src());
}
const mod = sema.mod;
const operand_src: LazySrcLoc = .{ .node_offset_anyframe_type = inst_data.src_node };
const return_type = try sema.resolveType(block, operand_src, inst_data.operand);
const anyframe_type = try mod.anyframeType(return_type);
return sema.addType(anyframe_type);
}
fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const error_set = try sema.resolveType(block, lhs_src, extra.lhs);
const payload = try sema.resolveType(block, rhs_src, extra.rhs);
if (error_set.zigTypeTag(mod) != .ErrorSet) {
return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{
error_set.fmt(sema.mod),
});
}
try sema.validateErrorUnionPayloadType(block, payload, rhs_src);
const err_union_ty = try mod.errorUnionType(error_set, payload);
return sema.addType(err_union_ty);
}
fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void {
const mod = sema.mod;
if (payload_ty.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{
payload_ty.fmt(sema.mod),
});
} else if (payload_ty.zigTypeTag(mod) == .ErrorSet) {
return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{
payload_ty.fmt(sema.mod),
});
}
}
fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
_ = block;
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
const name = inst_data.get(sema.code);
// Create an error set type with only this error value, and return the value.
const kv = try sema.mod.getErrorValue(name);
return sema.addConstant(
try mod.singleErrorSetType(kv.key),
try Value.Tag.@"error".create(sema.arena, .{
.name = kv.key,
}),
);
}
fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const uncasted_operand = try sema.resolveInst(extra.operand);
const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef(mod)) {
return sema.addConstUndef(Type.err_int);
}
switch (val.tag()) {
.@"error" => {
return sema.addConstant(
Type.err_int,
try mod.intValue(
Type.err_int,
(try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value,
),
);
},
// This is not a valid combination with the type `anyerror`.
.the_only_possible_value => unreachable,
// Assume it's already encoded as an integer.
else => return sema.addConstant(Type.err_int, val),
}
}
const op_ty = sema.typeOf(uncasted_operand);
try sema.resolveInferredErrorSetTy(block, src, op_ty);
if (!op_ty.isAnyError(mod)) {
const names = op_ty.errorSetNames(mod);
switch (names.len) {
0 => return sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)),
1 => {
const name = mod.intern_pool.stringToSlice(names[0]);
return sema.addIntUnsigned(Type.err_int, mod.global_error_set.get(name).?);
},
else => {},
}
}
try sema.requireRuntimeBlock(block, src, operand_src);
return block.addBitCast(Type.err_int, operand);
}
fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const uncasted_operand = try sema.resolveInst(extra.operand);
const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src);
const mod = sema.mod;
if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| {
const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod));
if (int > sema.mod.global_error_set.count() or int == 0)
return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int});
const payload = try sema.arena.create(Value.Payload.Error);
payload.* = .{
.base = .{ .tag = .@"error" },
.data = .{ .name = sema.mod.error_name_list.items[int] },
};
return sema.addConstant(Type.anyerror, Value.initPayload(&payload.base));
}
try sema.requireRuntimeBlock(block, src, operand_src);
if (block.wantSafety()) {
const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand);
const zero_val = try sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0));
const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val);
const ok = try block.addBinOp(.bit_and, is_lt_len, is_non_zero);
try sema.addSafetyCheck(block, ok, .invalid_error_code);
}
return block.addInst(.{
.tag = .bitcast,
.data = .{ .ty_op = .{
.ty = Air.Inst.Ref.anyerror_type,
.operand = operand,
} },
});
}
fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
if (sema.typeOf(lhs).zigTypeTag(mod) == .Bool and sema.typeOf(rhs).zigTypeTag(mod) == .Bool) {
const msg = msg: {
const msg = try sema.errMsg(block, lhs_src, "expected error set type, found 'bool'", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "'||' merges error sets; 'or' performs boolean OR", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs);
const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs);
if (lhs_ty.zigTypeTag(mod) != .ErrorSet)
return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(sema.mod)});
if (rhs_ty.zigTypeTag(mod) != .ErrorSet)
return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)});
// Anything merged with anyerror is anyerror.
if (lhs_ty.ip_index == .anyerror_type or rhs_ty.ip_index == .anyerror_type) {
return Air.Inst.Ref.anyerror_type;
}
if (mod.typeToInferredErrorSetIndex(lhs_ty).unwrap()) |ies_index| {
try sema.resolveInferredErrorSet(block, src, ies_index);
// isAnyError might have changed from a false negative to a true positive after resolution.
if (lhs_ty.isAnyError(mod)) {
return Air.Inst.Ref.anyerror_type;
}
}
if (mod.typeToInferredErrorSetIndex(rhs_ty).unwrap()) |ies_index| {
try sema.resolveInferredErrorSet(block, src, ies_index);
// isAnyError might have changed from a false negative to a true positive after resolution.
if (rhs_ty.isAnyError(mod)) {
return Air.Inst.Ref.anyerror_type;
}
}
const err_set_ty = try sema.errorSetMerge(lhs_ty, rhs_ty);
return sema.addType(err_set_ty);
}
fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
_ = block;
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code));
return sema.addConstant(
.{ .ip_index = .enum_literal_type, .legacy = undefined },
try Value.Tag.enum_literal.create(sema.arena, duped_name),
);
}
fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) {
.Enum => operand,
.Union => blk: {
const union_ty = try sema.resolveTypeFields(operand_ty);
const tag_ty = union_ty.unionTagType(mod) orelse {
return sema.fail(
block,
operand_src,
"untagged union '{}' cannot be converted to integer",
.{src},
);
};
break :blk try sema.unionToTag(block, tag_ty, operand, operand_src);
},
else => {
return sema.fail(block, operand_src, "expected enum or tagged union, found '{}'", .{
operand_ty.fmt(sema.mod),
});
},
};
const enum_tag_ty = sema.typeOf(enum_tag);
const int_tag_ty = try enum_tag_ty.intTagType(mod);
if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| {
return sema.addConstant(int_tag_ty, opv);
}
if (try sema.resolveMaybeUndefVal(enum_tag)) |enum_tag_val| {
const val = try enum_tag_val.enumToInt(enum_tag_ty, mod);
return sema.addConstant(int_tag_ty, try val.copy(sema.arena));
}
try sema.requireRuntimeBlock(block, src, operand_src);
return block.addBitCast(int_tag_ty, enum_tag);
}
fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = inst_data.src();
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
if (dest_ty.zigTypeTag(mod) != .Enum) {
return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(sema.mod)});
}
_ = try sema.checkIntType(block, operand_src, sema.typeOf(operand));
if (try sema.resolveMaybeUndefVal(operand)) |int_val| {
if (dest_ty.isNonexhaustiveEnum(mod)) {
const int_tag_ty = try dest_ty.intTagType(mod);
if (try sema.intFitsInType(int_val, int_tag_ty, null)) {
return sema.addConstant(dest_ty, int_val);
}
const msg = msg: {
const msg = try sema.errMsg(
block,
src,
"int value '{}' out of range of non-exhaustive enum '{}'",
.{ int_val.fmtValue(sema.typeOf(operand), sema.mod), dest_ty.fmt(sema.mod) },
);
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, dest_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (int_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, operand_src);
}
if (!(try sema.enumHasInt(dest_ty, int_val))) {
const msg = msg: {
const msg = try sema.errMsg(
block,
src,
"enum '{}' has no tag with value '{}'",
.{ dest_ty.fmt(sema.mod), int_val.fmtValue(sema.typeOf(operand), sema.mod) },
);
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, dest_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
return sema.addConstant(dest_ty, int_val);
}
if (try sema.typeHasOnePossibleValue(dest_ty)) |opv| {
const result = try sema.addConstant(dest_ty, opv);
// The operand is runtime-known but the result is comptime-known. In
// this case we still need a safety check.
// TODO add a safety check here. we can't use is_named_enum_value -
// it needs to convert the enum back to int and make sure it equals the operand int.
return result;
}
try sema.requireRuntimeBlock(block, src, operand_src);
const result = try block.addTyOp(.intcast, dest_ty, operand);
if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum(mod) and
sema.mod.backendSupportsFeature(.is_named_enum_value))
{
const ok = try block.addUnOp(.is_named_enum_value, result);
try sema.addSafetyCheck(block, ok, .invalid_enum_value);
}
return result;
}
/// Pointer in, pointer out.
fn zirOptionalPayloadPtr(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
safety_check: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const optional_ptr = try sema.resolveInst(inst_data.operand);
const src = inst_data.src();
return sema.analyzeOptionalPayloadPtr(block, src, optional_ptr, safety_check, false);
}
fn analyzeOptionalPayloadPtr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
optional_ptr: Air.Inst.Ref,
safety_check: bool,
initializing: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const optional_ptr_ty = sema.typeOf(optional_ptr);
assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer);
const opt_type = optional_ptr_ty.childType(mod);
if (opt_type.zigTypeTag(mod) != .Optional) {
return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(sema.mod)});
}
const child_type = opt_type.optionalChild(mod);
const child_pointer = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = child_type,
.mutable = !optional_ptr_ty.isConstPtr(mod),
.@"addrspace" = optional_ptr_ty.ptrAddressSpace(mod),
});
if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| {
if (initializing) {
if (!ptr_val.isComptimeMutablePtr()) {
// If the pointer resulting from this function was stored at comptime,
// the optional non-null bit would be set that way. But in this case,
// we need to emit a runtime instruction to do it.
_ = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr);
}
return sema.addConstant(
child_pointer,
try Value.Tag.opt_payload_ptr.create(sema.arena, .{
.container_ptr = ptr_val,
.container_ty = optional_ptr_ty.childType(mod),
}),
);
}
if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| {
if (val.isNull(mod)) {
return sema.fail(block, src, "unable to unwrap null", .{});
}
// The same Value represents the pointer to the optional and the payload.
return sema.addConstant(
child_pointer,
try Value.Tag.opt_payload_ptr.create(sema.arena, .{
.container_ptr = ptr_val,
.container_ty = optional_ptr_ty.childType(mod),
}),
);
}
}
try sema.requireRuntimeBlock(block, src, null);
if (safety_check and block.wantSafety()) {
const is_non_null = try block.addUnOp(.is_non_null_ptr, optional_ptr);
try sema.addSafetyCheck(block, is_non_null, .unwrap_null);
}
const air_tag: Air.Inst.Tag = if (initializing)
.optional_payload_ptr_set
else
.optional_payload_ptr;
return block.addTyOp(air_tag, child_pointer, optional_ptr);
}
/// Value in, value out.
fn zirOptionalPayload(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
safety_check: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const result_ty = switch (operand_ty.zigTypeTag(mod)) {
.Optional => operand_ty.optionalChild(mod),
.Pointer => t: {
if (operand_ty.ptrSize(mod) != .C) {
return sema.failWithExpectedOptionalType(block, src, operand_ty);
}
// TODO https://github.com/ziglang/zig/issues/6597
if (true) break :t operand_ty;
const ptr_info = operand_ty.ptrInfo(mod);
break :t try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = ptr_info.pointee_type,
.@"align" = ptr_info.@"align",
.@"addrspace" = ptr_info.@"addrspace",
.mutable = ptr_info.mutable,
.@"allowzero" = ptr_info.@"allowzero",
.@"volatile" = ptr_info.@"volatile",
.size = .One,
});
},
else => return sema.failWithExpectedOptionalType(block, src, operand_ty),
};
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
if (val.isNull(mod)) {
return sema.fail(block, src, "unable to unwrap null", .{});
}
if (val.castTag(.opt_payload)) |payload| {
return sema.addConstant(result_ty, payload.data);
}
return sema.addConstant(result_ty, val);
}
try sema.requireRuntimeBlock(block, src, null);
if (safety_check and block.wantSafety()) {
const is_non_null = try block.addUnOp(.is_non_null, operand);
try sema.addSafetyCheck(block, is_non_null, .unwrap_null);
}
return block.addTyOp(.optional_payload, result_ty, operand);
}
/// Value in, value out
fn zirErrUnionPayload(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
const operand_src = src;
const err_union_ty = sema.typeOf(operand);
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
err_union_ty.fmt(sema.mod),
});
}
return sema.analyzeErrUnionPayload(block, src, err_union_ty, operand, operand_src, false);
}
fn analyzeErrUnionPayload(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
err_union_ty: Type,
operand: Air.Inst.Ref,
operand_src: LazySrcLoc,
safety_check: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const payload_ty = err_union_ty.errorUnionPayload(mod);
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
if (val.getError()) |name| {
return sema.fail(block, src, "caught unexpected error '{s}'", .{name});
}
const data = val.castTag(.eu_payload).?.data;
return sema.addConstant(payload_ty, data);
}
try sema.requireRuntimeBlock(block, src, null);
// If the error set has no fields then no safety check is needed.
if (safety_check and block.wantSafety() and
!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod))
{
try sema.panicUnwrapError(block, operand, .unwrap_errunion_err, .is_non_err);
}
return block.addTyOp(.unwrap_errunion_payload, payload_ty, operand);
}
/// Pointer in, pointer out.
fn zirErrUnionPayloadPtr(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const src = inst_data.src();
return sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false);
}
fn analyzeErrUnionPayloadPtr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
safety_check: bool,
initializing: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
assert(operand_ty.zigTypeTag(mod) == .Pointer);
if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
operand_ty.childType(mod).fmt(sema.mod),
});
}
const err_union_ty = operand_ty.childType(mod);
const payload_ty = err_union_ty.errorUnionPayload(mod);
const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = payload_ty,
.mutable = !operand_ty.isConstPtr(mod),
.@"addrspace" = operand_ty.ptrAddressSpace(mod),
});
if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| {
if (initializing) {
if (!ptr_val.isComptimeMutablePtr()) {
// If the pointer resulting from this function was stored at comptime,
// the error union error code would be set that way. But in this case,
// we need to emit a runtime instruction to do it.
try sema.requireRuntimeBlock(block, src, null);
_ = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand);
}
return sema.addConstant(
operand_pointer_ty,
try Value.Tag.eu_payload_ptr.create(sema.arena, .{
.container_ptr = ptr_val,
.container_ty = operand_ty.childType(mod),
}),
);
}
if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| {
if (val.getError()) |name| {
return sema.fail(block, src, "caught unexpected error '{s}'", .{name});
}
return sema.addConstant(
operand_pointer_ty,
try Value.Tag.eu_payload_ptr.create(sema.arena, .{
.container_ptr = ptr_val,
.container_ty = operand_ty.childType(mod),
}),
);
}
}
try sema.requireRuntimeBlock(block, src, null);
// If the error set has no fields then no safety check is needed.
if (safety_check and block.wantSafety() and
!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod))
{
try sema.panicUnwrapError(block, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr);
}
const air_tag: Air.Inst.Tag = if (initializing)
.errunion_payload_ptr_set
else
.unwrap_errunion_payload_ptr;
return block.addTyOp(air_tag, operand_pointer_ty, operand);
}
/// Value in, value out
fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
return sema.analyzeErrUnionCode(block, src, operand);
}
fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
if (operand_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
operand_ty.fmt(sema.mod),
});
}
const result_ty = operand_ty.errorUnionSet(mod);
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
assert(val.getError() != null);
return sema.addConstant(result_ty, val);
}
try sema.requireRuntimeBlock(block, src, null);
return block.addTyOp(.unwrap_errunion_err, result_ty, operand);
}
/// Pointer in, value out
fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
assert(operand_ty.zigTypeTag(mod) == .Pointer);
if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
operand_ty.childType(mod).fmt(sema.mod),
});
}
const result_ty = operand_ty.childType(mod).errorUnionSet(mod);
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
assert(val.getError() != null);
return sema.addConstant(result_ty, val);
}
}
try sema.requireRuntimeBlock(block, src, null);
return block.addTyOp(.unwrap_errunion_err_ptr, result_ty, operand);
}
fn zirFunc(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
inferred_error_set: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index);
const target = sema.mod.getTarget();
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = inst_data.src_node };
var extra_index = extra.end;
const ret_ty: Type = switch (extra.data.ret_body_len) {
0 => Type.void,
1 => blk: {
const ret_ty_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
if (sema.resolveType(block, ret_ty_src, ret_ty_ref)) |ret_ty| {
break :blk ret_ty;
} else |err| switch (err) {
error.GenericPoison => {
break :blk Type.generic_poison;
},
else => |e| return e,
}
},
else => blk: {
const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len];
extra_index += ret_ty_body.len;
const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, "return type must be comptime-known");
break :blk ret_ty_val.toType();
},
};
var src_locs: Zir.Inst.Func.SrcLocs = undefined;
const has_body = extra.data.body_len != 0;
if (has_body) {
extra_index += extra.data.body_len;
src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data;
}
// If this instruction has a body it means it's the type of the `owner_decl`
// otherwise it's a function type without a `callconv` attribute and should
// never be `.C`.
// NOTE: revisit when doing #1717
const cc: std.builtin.CallingConvention = if (sema.owner_decl.is_exported and has_body)
.C
else
.Unspecified;
return sema.funcCommon(
block,
inst_data.src_node,
inst,
0,
target_util.defaultAddressSpace(target, .function),
FuncLinkSection.default,
cc,
ret_ty,
false,
inferred_error_set,
false,
has_body,
src_locs,
null,
0,
false,
);
}
fn resolveGenericBody(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
body: []const Zir.Inst.Index,
func_inst: Zir.Inst.Index,
dest_ty: Type,
reason: []const u8,
) !Value {
assert(body.len != 0);
const err = err: {
// Make sure any nested param instructions don't clobber our work.
const prev_params = block.params;
block.params = .{};
defer {
block.params.deinit(sema.gpa);
block.params = prev_params;
}
const uncasted = sema.resolveBody(block, body, func_inst) catch |err| break :err err;
const result = sema.coerce(block, dest_ty, uncasted, src) catch |err| break :err err;
const val = sema.resolveConstValue(block, src, result, reason) catch |err| break :err err;
return val;
};
switch (err) {
error.GenericPoison => {
if (dest_ty.ip_index == .type_type) {
return Value.generic_poison_type;
} else {
return Value.generic_poison;
}
},
else => |e| return e,
}
}
/// Given a library name, examines if the library name should end up in
/// `link.File.Options.system_libs` table (for example, libc is always
/// specified via dedicated flag `link.File.Options.link_libc` instead),
/// and puts it there if it doesn't exist.
/// It also dupes the library name which can then be saved as part of the
/// respective `Decl` (either `ExternFn` or `Var`).
/// The liveness of the duped library name is tied to liveness of `Module`.
/// To deallocate, call `deinit` on the respective `Decl` (`ExternFn` or `Var`).
fn handleExternLibName(
sema: *Sema,
block: *Block,
src_loc: LazySrcLoc,
lib_name: []const u8,
) CompileError![:0]u8 {
blk: {
const mod = sema.mod;
const comp = mod.comp;
const target = mod.getTarget();
log.debug("extern fn symbol expected in lib '{s}'", .{lib_name});
if (target_util.is_libc_lib_name(target, lib_name)) {
if (!comp.bin_file.options.link_libc and !comp.bin_file.options.parent_compilation_link_libc) {
return sema.fail(
block,
src_loc,
"dependency on libc must be explicitly specified in the build command",
.{},
);
}
comp.bin_file.options.link_libc = true;
break :blk;
}
if (target_util.is_libcpp_lib_name(target, lib_name)) {
if (!comp.bin_file.options.link_libcpp) {
return sema.fail(
block,
src_loc,
"dependency on libc++ must be explicitly specified in the build command",
.{},
);
}
comp.bin_file.options.link_libcpp = true;
break :blk;
}
if (mem.eql(u8, lib_name, "unwind")) {
comp.bin_file.options.link_libunwind = true;
break :blk;
}
if (!target.isWasm() and !comp.bin_file.options.pic) {
return sema.fail(
block,
src_loc,
"dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by '-l{s}' or '-fPIC'.",
.{ lib_name, lib_name },
);
}
comp.addLinkLib(lib_name) catch |err| {
return sema.fail(block, src_loc, "unable to add link lib '{s}': {s}", .{
lib_name, @errorName(err),
});
};
}
return sema.gpa.dupeZ(u8, lib_name);
}
const FuncLinkSection = union(enum) {
generic,
default,
explicit: []const u8,
};
fn funcCommon(
sema: *Sema,
block: *Block,
src_node_offset: i32,
func_inst: Zir.Inst.Index,
/// null means generic poison
alignment: ?u32,
/// null means generic poison
address_space: ?std.builtin.AddressSpace,
/// outer null means generic poison; inner null means default link section
section: FuncLinkSection,
/// null means generic poison
cc: ?std.builtin.CallingConvention,
/// this might be Type.generic_poison
bare_return_type: Type,
var_args: bool,
inferred_error_set: bool,
is_extern: bool,
has_body: bool,
src_locs: Zir.Inst.Func.SrcLocs,
opt_lib_name: ?[]const u8,
noalias_bits: u32,
is_noinline: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset };
const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset };
const func_src = LazySrcLoc.nodeOffset(src_node_offset);
var is_generic = bare_return_type.isGenericPoison() or
alignment == null or
address_space == null or
section == .generic or
cc == null;
if (var_args) {
if (is_generic) {
return sema.fail(block, func_src, "generic function cannot be variadic", .{});
}
if (cc.? != .C) {
return sema.fail(block, cc_src, "variadic function must have 'C' calling convention", .{});
}
}
var destroy_fn_on_error = false;
const new_func: *Module.Fn = new_func: {
if (!has_body) break :new_func undefined;
if (sema.comptime_args_fn_inst == func_inst) {
const new_func = sema.preallocated_new_func.?;
sema.preallocated_new_func = null; // take ownership
break :new_func new_func;
}
destroy_fn_on_error = true;
const new_func = try gpa.create(Module.Fn);
// Set this here so that the inferred return type can be printed correctly if it appears in an error.
new_func.owner_decl = sema.owner_decl_index;
break :new_func new_func;
};
errdefer if (destroy_fn_on_error) gpa.destroy(new_func);
const target = sema.mod.getTarget();
const fn_ty: Type = fn_ty: {
// In the case of generic calling convention, or generic alignment, we use
// default values which are only meaningful for the generic function, *not*
// the instantiation, which can depend on comptime parameters.
// Related proposal: https://github.com/ziglang/zig/issues/11834
const cc_resolved = cc orelse .Unspecified;
const param_types = try sema.arena.alloc(InternPool.Index, block.params.items.len);
var comptime_bits: u32 = 0;
for (param_types, block.params.items, 0..) |*dest_param_ty, param, i| {
const is_noalias = blk: {
const index = std.math.cast(u5, i) orelse break :blk false;
break :blk @truncate(u1, noalias_bits >> index) != 0;
};
dest_param_ty.* = param.ty.toIntern();
sema.analyzeParameter(
block,
.unneeded,
param,
&comptime_bits,
i,
&is_generic,
cc_resolved,
has_body,
is_noalias,
) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
try sema.analyzeParameter(
block,
Module.paramSrc(src_node_offset, mod, decl, i),
param,
&comptime_bits,
i,
&is_generic,
cc_resolved,
has_body,
is_noalias,
);
unreachable;
},
else => |e| return e,
};
}
var ret_ty_requires_comptime = false;
const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: {
ret_ty_requires_comptime = ret_comptime;
break :rp bare_return_type.isGenericPoison();
} else |err| switch (err) {
error.GenericPoison => rp: {
is_generic = true;
break :rp true;
},
else => |e| return e,
};
const return_type: Type = if (!inferred_error_set or ret_poison)
bare_return_type
else blk: {
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{
.func = new_func,
});
const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index });
break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type);
};
if (!return_type.isValidReturnType(mod)) {
const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else "";
const msg = msg: {
const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{
opaque_str, return_type.fmt(sema.mod),
});
errdefer msg.destroy(gpa);
try sema.addDeclaredHereNote(msg, return_type);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and
!try sema.validateExternType(return_type, .ret_ty))
{
const msg = msg: {
const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{
return_type.fmt(sema.mod), @tagName(cc_resolved),
});
errdefer msg.destroy(gpa);
const src_decl = sema.mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty);
try sema.addDeclaredHereNote(msg, return_type);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
// If the return type is comptime-only but not dependent on parameters then all parameter types also need to be comptime
if (!sema.is_generic_instantiation and has_body and ret_ty_requires_comptime) comptime_check: {
for (block.params.items) |param| {
if (!param.is_comptime) break;
} else break :comptime_check;
const msg = try sema.errMsg(
block,
ret_ty_src,
"function with comptime-only return type '{}' requires all parameters to be comptime",
.{return_type.fmt(sema.mod)},
);
try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl, mod), return_type);
const tags = sema.code.instructions.items(.tag);
const data = sema.code.instructions.items(.data);
const param_body = sema.code.getParamBody(func_inst);
for (block.params.items, 0..) |param, i| {
if (!param.is_comptime) {
const param_index = param_body[i];
const param_src = switch (tags[param_index]) {
.param => data[param_index].pl_tok.src(),
.param_anytype => data[param_index].str_tok.src(),
else => unreachable,
};
if (param.name.len != 0) {
try sema.errNote(block, param_src, msg, "param '{s}' is required to be comptime", .{param.name});
} else {
try sema.errNote(block, param_src, msg, "param is required to be comptime", .{});
}
}
}
return sema.failWithOwnedErrorMsg(msg);
}
const arch = sema.mod.getTarget().cpu.arch;
if (switch (cc_resolved) {
.Unspecified, .C, .Naked, .Async, .Inline => null,
.Interrupt => switch (arch) {
.x86, .x86_64, .avr, .msp430 => null,
else => @as([]const u8, "x86, x86_64, AVR, and MSP430"),
},
.Signal => switch (arch) {
.avr => null,
else => @as([]const u8, "AVR"),
},
.Stdcall, .Fastcall, .Thiscall => switch (arch) {
.x86 => null,
else => @as([]const u8, "x86"),
},
.Vectorcall => switch (arch) {
.x86, .aarch64, .aarch64_be, .aarch64_32 => null,
else => @as([]const u8, "x86 and AArch64"),
},
.APCS, .AAPCS, .AAPCSVFP => switch (arch) {
.arm, .armeb, .aarch64, .aarch64_be, .aarch64_32, .thumb, .thumbeb => null,
else => @as([]const u8, "ARM"),
},
.SysV, .Win64 => switch (arch) {
.x86_64 => null,
else => @as([]const u8, "x86_64"),
},
.Kernel => switch (arch) {
.nvptx, .nvptx64, .amdgcn, .spirv32, .spirv64 => null,
else => @as([]const u8, "nvptx, amdgcn and SPIR-V"),
},
}) |allowed_platform| {
return sema.fail(block, cc_src, "callconv '{s}' is only available on {s}, not {s}", .{
@tagName(cc_resolved),
allowed_platform,
@tagName(arch),
});
}
if (cc_resolved == .Inline and is_noinline) {
return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{});
}
if (is_generic and sema.no_partial_func_ty) return error.GenericPoison;
is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime;
if (!is_generic and sema.wantErrorReturnTracing(return_type)) {
// Make sure that StackTrace's fields are resolved so that the backend can
// lower this fn type.
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
_ = try sema.resolveTypeFields(unresolved_stack_trace_ty);
}
break :fn_ty try mod.funcType(.{
.param_types = param_types,
.noalias_bits = noalias_bits,
.comptime_bits = comptime_bits,
.return_type = return_type.toIntern(),
.cc = cc_resolved,
.cc_is_generic = cc == null,
.alignment = if (alignment) |a| InternPool.Alignment.fromByteUnits(a) else .none,
.align_is_generic = alignment == null,
.section_is_generic = section == .generic,
.addrspace_is_generic = address_space == null,
.is_var_args = var_args,
.is_generic = is_generic,
.is_noinline = is_noinline,
});
};
sema.owner_decl.@"linksection" = switch (section) {
.generic => undefined,
.default => null,
.explicit => |section_name| try sema.perm_arena.dupeZ(u8, section_name),
};
sema.owner_decl.@"align" = alignment orelse 0;
sema.owner_decl.@"addrspace" = address_space orelse .generic;
if (is_extern) {
const new_extern_fn = try gpa.create(Module.ExternFn);
errdefer gpa.destroy(new_extern_fn);
new_extern_fn.* = Module.ExternFn{
.owner_decl = sema.owner_decl_index,
.lib_name = null,
};
if (opt_lib_name) |lib_name| {
new_extern_fn.lib_name = try sema.handleExternLibName(block, .{
.node_offset_lib_name = src_node_offset,
}, lib_name);
}
const extern_fn_payload = try sema.arena.create(Value.Payload.ExternFn);
extern_fn_payload.* = .{
.base = .{ .tag = .extern_fn },
.data = new_extern_fn,
};
return sema.addConstant(fn_ty, Value.initPayload(&extern_fn_payload.base));
}
if (!has_body) {
return sema.addType(fn_ty);
}
const is_inline = fn_ty.fnCallingConvention(mod) == .Inline;
const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .none;
const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == func_inst) blk: {
break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr;
} else null;
const hash = new_func.hash;
const generic_owner_decl = if (comptime_args == null) .none else new_func.generic_owner_decl;
const fn_payload = try sema.arena.create(Value.Payload.Function);
new_func.* = .{
.state = anal_state,
.zir_body_inst = func_inst,
.owner_decl = sema.owner_decl_index,
.generic_owner_decl = generic_owner_decl,
.comptime_args = comptime_args,
.hash = hash,
.lbrace_line = src_locs.lbrace_line,
.rbrace_line = src_locs.rbrace_line,
.lbrace_column = @truncate(u16, src_locs.columns),
.rbrace_column = @truncate(u16, src_locs.columns >> 16),
.branch_quota = default_branch_quota,
.is_noinline = is_noinline,
};
fn_payload.* = .{
.base = .{ .tag = .function },
.data = new_func,
};
return sema.addConstant(fn_ty, Value.initPayload(&fn_payload.base));
}
fn analyzeParameter(
sema: *Sema,
block: *Block,
param_src: LazySrcLoc,
param: Block.Param,
comptime_bits: *u32,
i: usize,
is_generic: *bool,
cc: std.builtin.CallingConvention,
has_body: bool,
is_noalias: bool,
) !void {
const mod = sema.mod;
const requires_comptime = try sema.typeRequiresComptime(param.ty);
if (param.is_comptime or requires_comptime) {
comptime_bits.* |= @as(u32, 1) << @intCast(u5, i); // TODO: handle cast error
}
const this_generic = param.ty.isGenericPoison();
is_generic.* = is_generic.* or this_generic;
const target = mod.getTarget();
if (param.is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc)) {
return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
}
if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc)) {
return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
}
if (!param.ty.isValidParamType(mod)) {
const opaque_str = if (param.ty.zigTypeTag(mod) == .Opaque) "opaque " else "";
const msg = msg: {
const msg = try sema.errMsg(block, param_src, "parameter of {s}type '{}' not allowed", .{
opaque_str, param.ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, param.ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) {
const msg = msg: {
const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{
param.ty.fmt(mod), @tagName(cc),
});
errdefer msg.destroy(sema.gpa);
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl, mod), param.ty, .param_ty);
try sema.addDeclaredHereNote(msg, param.ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (!sema.is_generic_instantiation and requires_comptime and !param.is_comptime and has_body) {
const msg = msg: {
const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{
param.ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl, mod), param.ty);
try sema.addDeclaredHereNote(msg, param.ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (!sema.is_generic_instantiation and !this_generic and is_noalias and
!(param.ty.zigTypeTag(mod) == .Pointer or param.ty.isPtrLikeOptional(mod)))
{
return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
}
}
fn zirParam(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
comptime_syntax: bool,
) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].pl_tok;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index);
const param_name = sema.code.nullTerminatedString(extra.data.name);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
// We could be in a generic function instantiation, or we could be evaluating a generic
// function without any comptime args provided.
const param_ty = param_ty: {
const err = err: {
// Make sure any nested param instructions don't clobber our work.
const prev_params = block.params;
const prev_preallocated_new_func = sema.preallocated_new_func;
const prev_no_partial_func_type = sema.no_partial_func_ty;
block.params = .{};
sema.preallocated_new_func = null;
sema.no_partial_func_ty = true;
defer {
block.params.deinit(sema.gpa);
block.params = prev_params;
sema.preallocated_new_func = prev_preallocated_new_func;
sema.no_partial_func_ty = prev_no_partial_func_type;
}
if (sema.resolveBody(block, body, inst)) |param_ty_inst| {
if (sema.analyzeAsType(block, src, param_ty_inst)) |param_ty| {
break :param_ty param_ty;
} else |err| break :err err;
} else |err| break :err err;
};
switch (err) {
error.GenericPoison => {
if (sema.inst_map.get(inst)) |_| {
// A generic function is about to evaluate to another generic function.
// Return an error instead.
return error.GenericPoison;
}
// The type is not available until the generic instantiation.
// We result the param instruction with a poison value and
// insert an anytype parameter.
try block.params.append(sema.gpa, .{
.ty = Type.generic_poison,
.is_comptime = comptime_syntax,
.name = param_name,
});
sema.inst_map.putAssumeCapacityNoClobber(inst, .generic_poison);
return;
},
else => |e| return e,
}
};
const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) {
error.GenericPoison => {
if (sema.inst_map.get(inst)) |_| {
// A generic function is about to evaluate to another generic function.
// Return an error instead.
return error.GenericPoison;
}
// The type is not available until the generic instantiation.
// We result the param instruction with a poison value and
// insert an anytype parameter.
try block.params.append(sema.gpa, .{
.ty = Type.generic_poison,
.is_comptime = comptime_syntax,
.name = param_name,
});
sema.inst_map.putAssumeCapacityNoClobber(inst, .generic_poison);
return;
},
else => |e| return e,
} or comptime_syntax;
if (sema.inst_map.get(inst)) |arg| {
if (is_comptime and sema.preallocated_new_func != null) {
// We have a comptime value for this parameter so it should be elided from the
// function type of the function instruction in this block.
const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
// We are instantiating a generic function and a comptime arg
// cannot be coerced to the param type, but since we don't
// have the callee source location return `GenericPoison`
// so that the instantiation is failed and the coercion
// is handled by comptime call logic instead.
assert(sema.is_generic_instantiation);
return error.GenericPoison;
},
else => return err,
};
sema.inst_map.putAssumeCapacity(inst, coerced_arg);
return;
}
// Even though a comptime argument is provided, the generic function wants to treat
// this as a runtime parameter.
assert(sema.inst_map.remove(inst));
}
if (sema.preallocated_new_func != null) {
if (try sema.typeHasOnePossibleValue(param_ty)) |opv| {
// In this case we are instantiating a generic function call with a non-comptime
// non-anytype parameter that ended up being a one-possible-type.
// We don't want the parameter to be part of the instantiated function type.
const result = try sema.addConstant(param_ty, opv);
sema.inst_map.putAssumeCapacity(inst, result);
return;
}
}
try block.params.append(sema.gpa, .{
.ty = param_ty,
.is_comptime = comptime_syntax,
.name = param_name,
});
if (is_comptime) {
// If this is a comptime parameter we can add a constant generic_poison
// since this is also a generic parameter.
const result = try sema.addConstant(param_ty, Value.generic_poison);
sema.inst_map.putAssumeCapacityNoClobber(inst, result);
} else {
// Otherwise we need a dummy runtime instruction.
const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len);
try sema.air_instructions.append(sema.gpa, .{
.tag = .alloc,
.data = .{ .ty = param_ty },
});
const result = Air.indexToRef(result_index);
sema.inst_map.putAssumeCapacityNoClobber(inst, result);
}
}
fn zirParamAnytype(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
comptime_syntax: bool,
) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
const param_name = inst_data.get(sema.code);
if (sema.inst_map.get(inst)) |air_ref| {
const param_ty = sema.typeOf(air_ref);
if (comptime_syntax or try sema.typeRequiresComptime(param_ty)) {
// We have a comptime value for this parameter so it should be elided from the
// function type of the function instruction in this block.
return;
}
if (null != try sema.typeHasOnePossibleValue(param_ty)) {
return;
}
// The map is already populated but we do need to add a runtime parameter.
try block.params.append(sema.gpa, .{
.ty = param_ty,
.is_comptime = false,
.name = param_name,
});
return;
}
// We are evaluating a generic function without any comptime args provided.
try block.params.append(sema.gpa, .{
.ty = Type.generic_poison,
.is_comptime = comptime_syntax,
.name = param_name,
});
sema.inst_map.putAssumeCapacity(inst, .generic_poison);
}
fn zirAs(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
return sema.analyzeAs(block, sema.src, bin_inst.lhs, bin_inst.rhs, false);
}
fn zirAsNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data;
sema.src = src;
return sema.analyzeAs(block, src, extra.dest_type, extra.operand, false);
}
fn zirAsShiftOperand(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data;
return sema.analyzeAs(block, src, extra.dest_type, extra.operand, true);
}
fn analyzeAs(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_dest_type: Zir.Inst.Ref,
zir_operand: Zir.Inst.Ref,
no_cast_to_comptime_int: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const operand = try sema.resolveInst(zir_operand);
if (zir_dest_type == .var_args_param_type) return operand;
const dest_ty = sema.resolveType(block, src, zir_dest_type) catch |err| switch (err) {
error.GenericPoison => return operand,
else => |e| return e,
};
if (dest_ty.zigTypeTag(mod) == .NoReturn) {
return sema.fail(block, src, "cannot cast to noreturn", .{});
}
const is_ret = if (Zir.refToIndex(zir_dest_type)) |ptr_index|
sema.code.instructions.items(.tag)[ptr_index] == .ret_type
else
false;
return sema.coerceExtra(block, dest_ty, operand, src, .{ .is_ret = is_ret, .no_cast_to_comptime_int = no_cast_to_comptime_int }) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
}
fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ptr = try sema.resolveInst(inst_data.operand);
const ptr_ty = sema.typeOf(ptr);
if (!ptr_ty.isPtrAtRuntime(mod)) {
return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)});
}
if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| {
return sema.addConstant(Type.usize, ptr_val);
}
try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src);
return block.addUnOp(.ptrtoint, ptr);
}
fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(extra.field_name_start);
const object = try sema.resolveInst(extra.lhs);
return sema.fieldVal(block, src, object, field_name, field_name_src);
}
fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index, initializing: bool) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(extra.field_name_start);
const object_ptr = try sema.resolveInst(extra.lhs);
return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, initializing);
}
fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
const object = try sema.resolveInst(extra.lhs);
const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name, "field name must be comptime-known");
return sema.fieldVal(block, src, object, field_name, field_name_src);
}
fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
const object_ptr = try sema.resolveInst(extra.lhs);
const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name, "field name must be comptime-known");
return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, false);
}
fn zirIntCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
return sema.intCast(block, inst_data.src(), dest_ty, dest_ty_src, operand, operand_src, true);
}
fn intCast(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
dest_ty: Type,
dest_ty_src: LazySrcLoc,
operand: Air.Inst.Ref,
operand_src: LazySrcLoc,
runtime_safety: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src);
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
if (try sema.isComptimeKnown(operand)) {
return sema.coerce(block, dest_ty, operand, operand_src);
} else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{});
}
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src);
const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| {
// requirement: intCast(u0, input) iff input == 0
if (runtime_safety and block.wantSafety()) {
try sema.requireRuntimeBlock(block, src, operand_src);
const wanted_info = dest_scalar_ty.intInfo(mod);
const wanted_bits = wanted_info.bits;
if (wanted_bits == 0) {
const ok = if (is_vector) ok: {
const zeros = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0));
const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros);
const is_in_range = try block.addCmpVector(operand, zero_inst, .eq);
const all_in_range = try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{ .operand = is_in_range, .operation = .And } },
});
break :ok all_in_range;
} else ok: {
const zero_inst = try sema.addConstant(sema.typeOf(operand), try mod.intValue(operand_ty, 0));
const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst);
break :ok is_in_range;
};
try sema.addSafetyCheck(block, ok, .cast_truncated_data);
}
}
return sema.addConstant(dest_ty, opv);
}
try sema.requireRuntimeBlock(block, src, operand_src);
if (runtime_safety and block.wantSafety()) {
const actual_info = operand_scalar_ty.intInfo(mod);
const wanted_info = dest_scalar_ty.intInfo(mod);
const actual_bits = actual_info.bits;
const wanted_bits = wanted_info.bits;
const actual_value_bits = actual_bits - @boolToInt(actual_info.signedness == .signed);
const wanted_value_bits = wanted_bits - @boolToInt(wanted_info.signedness == .signed);
// range shrinkage
// requirement: int value fits into target type
if (wanted_value_bits < actual_value_bits) {
const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_ty);
const dest_max_val = if (is_vector)
try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar)
else
dest_max_val_scalar;
const dest_max = try sema.addConstant(operand_ty, dest_max_val);
const diff = try block.addBinOp(.subwrap, dest_max, operand);
if (actual_info.signedness == .signed) {
// Reinterpret the sign-bit as part of the value. This will make
// negative differences (`operand` > `dest_max`) appear too big.
const unsigned_operand_ty = try mod.intType(.unsigned, actual_bits);
const diff_unsigned = try block.addBitCast(unsigned_operand_ty, diff);
// If the destination type is signed, then we need to double its
// range to account for negative values.
const dest_range_val = if (wanted_info.signedness == .signed) range_val: {
const one = try mod.intValue(unsigned_operand_ty, 1);
const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, sema.mod);
break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty);
} else dest_max_val;
const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val);
const ok = if (is_vector) ok: {
const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = is_in_range,
.operation = .And,
} },
});
break :ok all_in_range;
} else ok: {
const is_in_range = try block.addBinOp(.cmp_lte, diff_unsigned, dest_range);
break :ok is_in_range;
};
// TODO negative_to_unsigned?
try sema.addSafetyCheck(block, ok, .cast_truncated_data);
} else {
const ok = if (is_vector) ok: {
const is_in_range = try block.addCmpVector(diff, dest_max, .lte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = is_in_range,
.operation = .And,
} },
});
break :ok all_in_range;
} else ok: {
const is_in_range = try block.addBinOp(.cmp_lte, diff, dest_max);
break :ok is_in_range;
};
try sema.addSafetyCheck(block, ok, .cast_truncated_data);
}
} else if (actual_info.signedness == .signed and wanted_info.signedness == .unsigned) {
// no shrinkage, yes sign loss
// requirement: signed to unsigned >= 0
const ok = if (is_vector) ok: {
const zero_val = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0));
const zero_inst = try sema.addConstant(operand_ty, zero_val);
const is_in_range = try block.addCmpVector(operand, zero_inst, .gte);
const all_in_range = try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = is_in_range,
.operation = .And,
} },
});
break :ok all_in_range;
} else ok: {
const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0));
const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst);
break :ok is_in_range;
};
try sema.addSafetyCheck(block, ok, .negative_to_unsigned);
}
}
return block.addTyOp(.intcast, dest_ty, operand);
}
fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
switch (dest_ty.zigTypeTag(mod)) {
.AnyFrame,
.ComptimeFloat,
.ComptimeInt,
.EnumLiteral,
.ErrorSet,
.ErrorUnion,
.Fn,
.Frame,
.NoReturn,
.Null,
.Opaque,
.Optional,
.Type,
.Undefined,
.Void,
=> return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}),
.Enum => {
const msg = msg: {
const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
switch (operand_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToEnum to cast from '{}'", .{operand_ty.fmt(sema.mod)}),
else => {},
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
.Pointer => {
const msg = msg: {
const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
switch (operand_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToPtr to cast from '{}'", .{operand_ty.fmt(sema.mod)}),
.Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(sema.mod)}),
else => {},
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
.Struct, .Union => if (dest_ty.containerLayout(mod) == .Auto) {
const container = switch (dest_ty.zigTypeTag(mod)) {
.Struct => "struct",
.Union => "union",
else => unreachable,
};
return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{
dest_ty.fmt(sema.mod), container,
});
},
.Array,
.Bool,
.Float,
.Int,
.Vector,
=> {},
}
switch (operand_ty.zigTypeTag(mod)) {
.AnyFrame,
.ComptimeFloat,
.ComptimeInt,
.EnumLiteral,
.ErrorSet,
.ErrorUnion,
.Fn,
.Frame,
.NoReturn,
.Null,
.Opaque,
.Optional,
.Type,
.Undefined,
.Void,
=> return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}),
.Enum => {
const msg = msg: {
const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
switch (dest_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @enumToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}),
else => {},
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
.Pointer => {
const msg = msg: {
const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
switch (dest_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @ptrToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}),
.Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(sema.mod)}),
else => {},
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
.Struct, .Union => if (operand_ty.containerLayout(mod) == .Auto) {
const container = switch (operand_ty.zigTypeTag(mod)) {
.Struct => "struct",
.Union => "union",
else => unreachable,
};
return sema.fail(block, operand_src, "cannot @bitCast from '{}'; {s} does not have a guaranteed in-memory layout", .{
operand_ty.fmt(sema.mod), container,
});
},
.Array,
.Bool,
.Float,
.Int,
.Vector,
=> {},
}
return sema.bitCast(block, dest_ty, operand, inst_data.src(), operand_src);
}
fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
const target = sema.mod.getTarget();
const dest_is_comptime_float = switch (dest_ty.zigTypeTag(mod)) {
.ComptimeFloat => true,
.Float => false,
else => return sema.fail(
block,
dest_ty_src,
"expected float type, found '{}'",
.{dest_ty.fmt(sema.mod)},
),
};
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float, .ComptimeInt => {},
else => return sema.fail(
block,
operand_src,
"expected float type, found '{}'",
.{operand_ty.fmt(sema.mod)},
),
}
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
return sema.addConstant(dest_ty, try operand_val.floatCast(dest_ty, mod));
}
if (dest_is_comptime_float) {
return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_float'", .{});
}
const src_bits = operand_ty.floatBits(target);
const dst_bits = dest_ty.floatBits(target);
if (dst_bits >= src_bits) {
return sema.coerce(block, dest_ty, operand, operand_src);
}
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
return block.addTyOp(.fptrunc, dest_ty, operand);
}
fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
return sema.elemVal(block, src, array, elem_index, src, false);
}
fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
return sema.elemVal(block, src, array, elem_index, elem_index_src, true);
}
fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
const indexable_ty = sema.typeOf(array_ptr);
if (indexable_ty.zigTypeTag(mod) != .Pointer) {
const capture_src: LazySrcLoc = .{ .for_capture_from_input = inst_data.src_node };
const msg = msg: {
const msg = try sema.errMsg(block, capture_src, "pointer capture of non pointer type '{}'", .{
indexable_ty.fmt(sema.mod),
});
errdefer msg.destroy(sema.gpa);
if (indexable_ty.zigTypeTag(mod) == .Array) {
try sema.errNote(block, src, msg, "consider using '&' here", .{});
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
return sema.elemPtrOneLayerOnly(block, src, array_ptr, elem_index, src, false, false);
}
fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, true);
}
fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.ptr);
const elem_index = try sema.addIntUnsigned(Type.usize, extra.index);
return sema.elemPtr(block, src, array_ptr, elem_index, src, true, true);
}
fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const start = try sema.resolveInst(extra.start);
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
return sema.analyzeSlice(block, src, array_ptr, start, .none, .none, .unneeded, ptr_src, start_src, end_src, false);
}
fn zirSliceEnd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const start = try sema.resolveInst(extra.start);
const end = try sema.resolveInst(extra.end);
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
return sema.analyzeSlice(block, src, array_ptr, start, end, .none, .unneeded, ptr_src, start_src, end_src, false);
}
fn zirSliceSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const sentinel_src: LazySrcLoc = .{ .node_offset_slice_sentinel = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const start = try sema.resolveInst(extra.start);
const end = try sema.resolveInst(extra.end);
const sentinel = try sema.resolveInst(extra.sentinel);
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src, ptr_src, start_src, end_src, false);
}
fn zirSliceLength(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.SliceLength, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const start = try sema.resolveInst(extra.start);
const len = try sema.resolveInst(extra.len);
const sentinel = try sema.resolveInst(extra.sentinel);
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
const start_src: LazySrcLoc = .{ .node_offset_slice_start = extra.start_src_node_offset };
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
const sentinel_src: LazySrcLoc = if (sentinel == .none)
.unneeded
else
.{ .node_offset_slice_sentinel = inst_data.src_node };
return sema.analyzeSlice(block, src, array_ptr, start, len, sentinel, sentinel_src, ptr_src, start_src, end_src, true);
}
fn zirSwitchCapture(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
is_multi: bool,
is_ref: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const gpa = sema.gpa;
const zir_datas = sema.code.instructions.items(.data);
const capture_info = zir_datas[inst].switch_capture;
const switch_info = zir_datas[capture_info.switch_inst].pl_node;
const switch_extra = sema.code.extraData(Zir.Inst.SwitchBlock, switch_info.payload_index);
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = switch_info.src_node };
const cond_inst = Zir.refToIndex(switch_extra.data.operand).?;
const cond_info = zir_datas[cond_inst].un_node;
const cond_tag = sema.code.instructions.items(.tag)[cond_inst];
const operand_is_ref = cond_tag == .switch_cond_ref;
const operand_ptr = try sema.resolveInst(cond_info.operand);
const operand_ptr_ty = sema.typeOf(operand_ptr);
const operand_ty = if (operand_is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty;
if (block.inline_case_capture != .none) {
const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable;
if (operand_ty.zigTypeTag(mod) == .Union) {
const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?);
const union_obj = mod.typeToUnion(operand_ty).?;
const field_ty = union_obj.fields.values()[field_index].ty;
if (try sema.resolveDefinedValue(block, sema.src, operand_ptr)) |union_val| {
if (is_ref) {
const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = field_ty,
.mutable = operand_ptr_ty.ptrIsMutable(mod),
.@"volatile" = operand_ptr_ty.isVolatilePtr(mod),
.@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod),
});
return sema.addConstant(
ptr_field_ty,
try Value.Tag.field_ptr.create(sema.arena, .{
.container_ptr = union_val,
.container_ty = operand_ty,
.field_index = field_index,
}),
);
}
const tag_and_val = union_val.castTag(.@"union").?.data;
return sema.addConstant(field_ty, tag_and_val.val);
}
if (is_ref) {
const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = field_ty,
.mutable = operand_ptr_ty.ptrIsMutable(mod),
.@"volatile" = operand_ptr_ty.isVolatilePtr(mod),
.@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod),
});
return block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty);
} else {
return block.addStructFieldVal(operand_ptr, field_index, field_ty);
}
} else if (is_ref) {
return sema.addConstantMaybeRef(block, operand_ty, item_val, true);
} else {
return block.inline_case_capture;
}
}
const operand = if (operand_is_ref)
try sema.analyzeLoad(block, operand_src, operand_ptr, operand_src)
else
operand_ptr;
if (capture_info.prong_index == std.math.maxInt(@TypeOf(capture_info.prong_index))) {
// It is the else/`_` prong.
if (is_ref) {
return operand_ptr;
}
switch (operand_ty.zigTypeTag(mod)) {
.ErrorSet => if (block.switch_else_err_ty) |some| {
return sema.bitCast(block, some, operand, operand_src, null);
} else {
try block.addUnreachable(false);
return Air.Inst.Ref.unreachable_value;
},
else => return operand,
}
}
const items = if (is_multi)
switch_extra.data.getMultiProng(sema.code, switch_extra.end, capture_info.prong_index).items
else
&[_]Zir.Inst.Ref{
switch_extra.data.getScalarProng(sema.code, switch_extra.end, capture_info.prong_index).item,
};
switch (operand_ty.zigTypeTag(mod)) {
.Union => {
const union_obj = mod.typeToUnion(operand_ty).?;
const first_item = try sema.resolveInst(items[0]);
// Previous switch validation ensured this will succeed
const first_item_val = sema.resolveConstValue(block, .unneeded, first_item, "") catch unreachable;
const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, sema.mod).?);
const first_field = union_obj.fields.values()[first_field_index];
for (items[1..], 0..) |item, i| {
const item_ref = try sema.resolveInst(item);
// Previous switch validation ensured this will succeed
const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable;
const field_index = operand_ty.unionTagFieldIndex(item_val, sema.mod).?;
const field = union_obj.fields.values()[field_index];
if (!field.ty.eql(first_field.ty, sema.mod)) {
const msg = msg: {
const raw_capture_src = Module.SwitchProngSrc{ .multi_capture = capture_info.prong_index };
const capture_src = raw_capture_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first);
const msg = try sema.errMsg(block, capture_src, "capture group with incompatible types", .{});
errdefer msg.destroy(gpa);
const raw_first_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 0 } };
const first_item_src = raw_first_item_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first);
const raw_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 1 + @intCast(u32, i) } };
const item_src = raw_item_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first);
try sema.errNote(block, first_item_src, msg, "type '{}' here", .{first_field.ty.fmt(sema.mod)});
try sema.errNote(block, item_src, msg, "type '{}' here", .{field.ty.fmt(sema.mod)});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
if (is_ref) {
const field_ty_ptr = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = first_field.ty,
.@"addrspace" = .generic,
.mutable = operand_ptr_ty.ptrIsMutable(mod),
});
if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| {
return sema.addConstant(
field_ty_ptr,
try Value.Tag.field_ptr.create(sema.arena, .{
.container_ptr = op_ptr_val,
.container_ty = operand_ty,
.field_index = first_field_index,
}),
);
}
try sema.requireRuntimeBlock(block, operand_src, null);
return block.addStructFieldPtr(operand_ptr, first_field_index, field_ty_ptr);
}
if (try sema.resolveDefinedValue(block, operand_src, operand)) |operand_val| {
return sema.addConstant(
first_field.ty,
operand_val.castTag(.@"union").?.data.val,
);
}
try sema.requireRuntimeBlock(block, operand_src, null);
return block.addStructFieldVal(operand, first_field_index, first_field.ty);
},
.ErrorSet => {
if (is_multi) {
var names: Module.Fn.InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, items.len);
for (items) |item| {
const item_ref = try sema.resolveInst(item);
// Previous switch validation ensured this will succeed
const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable;
const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError().?);
names.putAssumeCapacityNoClobber(name_ip, {});
}
const else_error_ty = try mod.errorSetFromUnsortedNames(names.keys());
return sema.bitCast(block, else_error_ty, operand, operand_src, null);
} else {
const item_ref = try sema.resolveInst(items[0]);
// Previous switch validation ensured this will succeed
const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable;
const item_ty = try mod.singleErrorSetType(item_val.getError().?);
return sema.bitCast(block, item_ty, operand, operand_src, null);
}
},
else => {
// In this case the capture value is just the passed-through value of the
// switch condition.
if (is_ref) {
return operand_ptr;
} else {
return operand;
}
},
}
}
fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const zir_datas = sema.code.instructions.items(.data);
const inst_data = zir_datas[inst].un_tok;
const src = inst_data.src();
const switch_tag = sema.code.instructions.items(.tag)[Zir.refToIndex(inst_data.operand).?];
const is_ref = switch_tag == .switch_cond_ref;
const cond_data = zir_datas[Zir.refToIndex(inst_data.operand).?].un_node;
const operand_ptr = try sema.resolveInst(cond_data.operand);
const operand_ptr_ty = sema.typeOf(operand_ptr);
const operand_ty = if (is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty;
if (operand_ty.zigTypeTag(mod) != .Union) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "cannot capture tag of non-union type '{}'", .{
operand_ty.fmt(sema.mod),
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, operand_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
return block.inline_case_capture;
}
fn zirSwitchCond(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
is_ref: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node };
const operand_ptr = try sema.resolveInst(inst_data.operand);
const operand = if (is_ref)
try sema.analyzeLoad(block, src, operand_ptr, operand_src)
else
operand_ptr;
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag(mod)) {
.Type,
.Void,
.Bool,
.Int,
.Float,
.ComptimeFloat,
.ComptimeInt,
.EnumLiteral,
.Pointer,
.Fn,
.ErrorSet,
.Enum,
=> {
if (operand_ty.isSlice(mod)) {
return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(sema.mod)});
}
if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| {
return sema.addConstant(operand_ty, opv);
}
return operand;
},
.Union => {
const union_ty = try sema.resolveTypeFields(operand_ty);
const enum_ty = union_ty.unionTagType(mod) orelse {
const msg = msg: {
const msg = try sema.errMsg(block, src, "switch on union with no attached enum", .{});
errdefer msg.destroy(sema.gpa);
if (union_ty.declSrcLocOrNull(sema.mod)) |union_src| {
try sema.mod.errNoteNonLazy(union_src, msg, "consider 'union(enum)' here", .{});
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
return sema.unionToTag(block, enum_ty, operand, src);
},
.ErrorUnion,
.NoReturn,
.Array,
.Struct,
.Undefined,
.Null,
.Optional,
.Opaque,
.Vector,
.Frame,
.AnyFrame,
=> return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(sema.mod)}),
}
}
const SwitchErrorSet = std.StringHashMap(Module.SwitchProngSrc);
fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const src_node_offset = inst_data.src_node;
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset };
const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset };
const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
const operand = try sema.resolveInst(extra.data.operand);
// AstGen guarantees that the instruction immediately following
// switch_cond(_ref) is a dbg_stmt
const cond_dbg_node_index = Zir.refToIndex(extra.data.operand).? + 1;
var header_extra_index: usize = extra.end;
const scalar_cases_len = extra.data.bits.scalar_cases_len;
const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
const multi_cases_len = sema.code.extra[header_extra_index];
header_extra_index += 1;
break :blk multi_cases_len;
} else 0;
const special_prong = extra.data.bits.specialProng();
const special: struct { body: []const Zir.Inst.Index, end: usize, is_inline: bool } = switch (special_prong) {
.none => .{ .body = &.{}, .end = header_extra_index, .is_inline = false },
.under, .@"else" => blk: {
const body_len = @truncate(u31, sema.code.extra[header_extra_index]);
const extra_body_start = header_extra_index + 1;
break :blk .{
.body = sema.code.extra[extra_body_start..][0..body_len],
.end = extra_body_start + body_len,
.is_inline = sema.code.extra[header_extra_index] >> 31 != 0,
};
},
};
const maybe_union_ty = blk: {
const zir_tags = sema.code.instructions.items(.tag);
const zir_data = sema.code.instructions.items(.data);
const cond_index = Zir.refToIndex(extra.data.operand).?;
const raw_operand = sema.resolveInst(zir_data[cond_index].un_node.operand) catch unreachable;
const target_ty = sema.typeOf(raw_operand);
break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.childType(mod) else target_ty;
};
const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union;
// Duplicate checking variables later also used for `inline else`.
var seen_enum_fields: []?Module.SwitchProngSrc = &.{};
var seen_errors = SwitchErrorSet.init(gpa);
var range_set = RangeSet.init(gpa, mod);
var true_count: u8 = 0;
var false_count: u8 = 0;
defer {
range_set.deinit();
gpa.free(seen_enum_fields);
seen_errors.deinit();
}
var empty_enum = false;
const operand_ty = sema.typeOf(operand);
const err_set = operand_ty.zigTypeTag(mod) == .ErrorSet;
var else_error_ty: ?Type = null;
// Validate usage of '_' prongs.
if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) {
const msg = msg: {
const msg = try sema.errMsg(
block,
src,
"'_' prong only allowed when switching on non-exhaustive enums",
.{},
);
errdefer msg.destroy(gpa);
try sema.errNote(
block,
special_prong_src,
msg,
"'_' prong here",
.{},
);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
// Validate for duplicate items, missing else prong, and invalid range.
switch (operand_ty.zigTypeTag(mod)) {
.Union => unreachable, // handled in zirSwitchCond
.Enum => {
seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount(mod));
empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(mod);
@memset(seen_enum_fields, null);
// `range_set` is used for non-exhaustive enum values that do not correspond to any tags.
var extra_index: usize = special.end;
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
extra_index += 1;
extra_index += body_len;
try sema.validateSwitchItemEnum(
block,
seen_enum_fields,
&range_set,
item_ref,
src_node_offset,
.{ .scalar = scalar_i },
);
}
}
{
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemEnum(
block,
seen_enum_fields,
&range_set,
item_ref,
src_node_offset,
.{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
);
}
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
}
}
const all_tags_handled = for (seen_enum_fields) |seen_src| {
if (seen_src == null) break false;
} else true;
if (special_prong == .@"else") {
if (all_tags_handled and !operand_ty.isNonexhaustiveEnum(mod)) return sema.fail(
block,
special_prong_src,
"unreachable else prong; all cases already handled",
.{},
);
} else if (!all_tags_handled) {
const msg = msg: {
const msg = try sema.errMsg(
block,
src,
"switch must handle all possibilities",
.{},
);
errdefer msg.destroy(sema.gpa);
for (seen_enum_fields, 0..) |seen_src, i| {
if (seen_src != null) continue;
const field_name = operand_ty.enumFieldName(i, mod);
try sema.addFieldErrNote(
operand_ty,
i,
msg,
"unhandled enumeration value: '{s}'",
.{field_name},
);
}
try mod.errNoteNonLazy(
operand_ty.declSrcLoc(mod),
msg,
"enum '{}' declared here",
.{operand_ty.fmt(mod)},
);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
} else if (special_prong == .none and operand_ty.isNonexhaustiveEnum(mod) and !union_originally) {
return sema.fail(
block,
src,
"switch on non-exhaustive enum must include 'else' or '_' prong",
.{},
);
}
},
.ErrorSet => {
var extra_index: usize = special.end;
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
extra_index += 1;
extra_index += body_len;
try sema.validateSwitchItemError(
block,
&seen_errors,
item_ref,
src_node_offset,
.{ .scalar = scalar_i },
);
}
}
{
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemError(
block,
&seen_errors,
item_ref,
src_node_offset,
.{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
);
}
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
}
}
try sema.resolveInferredErrorSetTy(block, src, operand_ty);
if (operand_ty.isAnyError(mod)) {
if (special_prong != .@"else") {
return sema.fail(
block,
src,
"else prong required when switching on type 'anyerror'",
.{},
);
}
else_error_ty = Type.anyerror;
} else else_validation: {
var maybe_msg: ?*Module.ErrorMsg = null;
errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa);
for (operand_ty.errorSetNames(mod)) |error_name_ip| {
const error_name = mod.intern_pool.stringToSlice(error_name_ip);
if (!seen_errors.contains(error_name) and special_prong != .@"else") {
const msg = maybe_msg orelse blk: {
maybe_msg = try sema.errMsg(
block,
src,
"switch must handle all possibilities",
.{},
);
break :blk maybe_msg.?;
};
try sema.errNote(
block,
src,
msg,
"unhandled error value: 'error.{s}'",
.{error_name},
);
}
}
if (maybe_msg) |msg| {
maybe_msg = null;
try sema.addDeclaredHereNote(msg, operand_ty);
return sema.failWithOwnedErrorMsg(msg);
}
if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames(mod).len) {
// In order to enable common patterns for generic code allow simple else bodies
// else => unreachable,
// else => return,
// else => |e| return e,
// even if all the possible errors were already handled.
const tags = sema.code.instructions.items(.tag);
for (special.body) |else_inst| switch (tags[else_inst]) {
.dbg_block_begin,
.dbg_block_end,
.dbg_stmt,
.dbg_var_val,
.switch_capture,
.ret_type,
.as_node,
.ret_node,
.@"unreachable",
.@"defer",
.defer_err_code,
.err_union_code,
.ret_err_value_code,
.restore_err_ret_index,
.is_non_err,
.ret_is_non_err,
.condbr,
=> {},
else => break,
} else break :else_validation;
return sema.fail(
block,
special_prong_src,
"unreachable else prong; all cases already handled",
.{},
);
}
const error_names = operand_ty.errorSetNames(mod);
var names: Module.Fn.InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, error_names.len);
for (error_names) |error_name_ip| {
const error_name = mod.intern_pool.stringToSlice(error_name_ip);
if (seen_errors.contains(error_name)) continue;
names.putAssumeCapacityNoClobber(error_name_ip, {});
}
// No need to keep the hash map metadata correct; here we
// extract the (sorted) keys only.
else_error_ty = try mod.errorSetFromUnsortedNames(names.keys());
}
},
.Int, .ComptimeInt => {
var extra_index: usize = special.end;
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
extra_index += 1;
extra_index += body_len;
try sema.validateSwitchItem(
block,
&range_set,
item_ref,
operand_ty,
src_node_offset,
.{ .scalar = scalar_i },
);
}
}
{
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len;
for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItem(
block,
&range_set,
item_ref,
operand_ty,
src_node_offset,
.{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
);
}
var range_i: u32 = 0;
while (range_i < ranges_len) : (range_i += 1) {
const item_first = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const item_last = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
try sema.validateSwitchRange(
block,
&range_set,
item_first,
item_last,
operand_ty,
src_node_offset,
.{ .range = .{ .prong = multi_i, .item = range_i } },
);
}
extra_index += body_len;
}
}
check_range: {
if (operand_ty.zigTypeTag(mod) == .Int) {
var arena = std.heap.ArenaAllocator.init(gpa);
defer arena.deinit();
const min_int = try operand_ty.minInt(arena.allocator(), mod);
const max_int = try operand_ty.maxIntScalar(mod, Type.comptime_int);
if (try range_set.spans(min_int, max_int, operand_ty)) {
if (special_prong == .@"else") {
return sema.fail(
block,
special_prong_src,
"unreachable else prong; all cases already handled",
.{},
);
}
break :check_range;
}
}
if (special_prong != .@"else") {
return sema.fail(
block,
src,
"switch must handle all possibilities",
.{},
);
}
}
},
.Bool => {
var extra_index: usize = special.end;
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
extra_index += 1;
extra_index += body_len;
try sema.validateSwitchItemBool(
block,
&true_count,
&false_count,
item_ref,
src_node_offset,
.{ .scalar = scalar_i },
);
}
}
{
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemBool(
block,
&true_count,
&false_count,
item_ref,
src_node_offset,
.{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
);
}
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
}
}
switch (special_prong) {
.@"else" => {
if (true_count + false_count == 2) {
return sema.fail(
block,
special_prong_src,
"unreachable else prong; all cases already handled",
.{},
);
}
},
.under, .none => {
if (true_count + false_count < 2) {
return sema.fail(
block,
src,
"switch must handle all possibilities",
.{},
);
}
},
}
},
.EnumLiteral, .Void, .Fn, .Pointer, .Type => {
if (special_prong != .@"else") {
return sema.fail(
block,
src,
"else prong required when switching on type '{}'",
.{operand_ty.fmt(mod)},
);
}
var seen_values = ValueSrcMap.initContext(gpa, .{
.ty = operand_ty,
.mod = mod,
});
defer seen_values.deinit();
var extra_index: usize = special.end;
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
extra_index += 1;
extra_index += body_len;
try sema.validateSwitchItemSparse(
block,
&seen_values,
item_ref,
src_node_offset,
.{ .scalar = scalar_i },
);
}
}
{
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemSparse(
block,
&seen_values,
item_ref,
src_node_offset,
.{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
);
}
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
}
}
},
.ErrorUnion,
.NoReturn,
.Array,
.Struct,
.Undefined,
.Null,
.Optional,
.Opaque,
.Vector,
.Frame,
.AnyFrame,
.ComptimeFloat,
.Float,
=> return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{
operand_ty.fmt(mod),
}),
}
const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
try sema.air_instructions.append(gpa, .{
.tag = .block,
.data = undefined,
});
var label: Block.Label = .{
.zir_block = inst,
.merges = .{
.src_locs = .{},
.results = .{},
.br_list = .{},
.block_inst = block_inst,
},
};
var child_block: Block = .{
.parent = block,
.sema = sema,
.src_decl = block.src_decl,
.namespace = block.namespace,
.wip_capture_scope = block.wip_capture_scope,
.instructions = .{},
.label = &label,
.inlining = block.inlining,
.is_comptime = block.is_comptime,
.comptime_reason = block.comptime_reason,
.is_typeof = block.is_typeof,
.switch_else_err_ty = else_error_ty,
.c_import_buf = block.c_import_buf,
.runtime_cond = block.runtime_cond,
.runtime_loop = block.runtime_loop,
.runtime_index = block.runtime_index,
.error_return_trace_index = block.error_return_trace_index,
};
const merges = &child_block.label.?.merges;
defer child_block.instructions.deinit(gpa);
defer merges.deinit(gpa);
if (try sema.resolveDefinedValue(&child_block, src, operand)) |operand_val| {
var extra_index: usize = special.end;
{
var scalar_i: usize = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
const is_inline = sema.code.extra[extra_index] >> 31 != 0;
extra_index += 1;
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body_len;
const item = try sema.resolveInst(item_ref);
// Validation above ensured these will succeed.
const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable;
if (operand_val.eql(item_val, operand_ty, mod)) {
if (is_inline) child_block.inline_case_capture = operand;
if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand);
return sema.resolveBlockBody(block, src, &child_block, body, inst, merges);
}
}
}
{
var multi_i: usize = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
const is_inline = sema.code.extra[extra_index] >> 31 != 0;
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len;
const body = sema.code.extra[extra_index + 2 * ranges_len ..][0..body_len];
for (items) |item_ref| {
const item = try sema.resolveInst(item_ref);
// Validation above ensured these will succeed.
const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable;
if (operand_val.eql(item_val, operand_ty, mod)) {
if (is_inline) child_block.inline_case_capture = operand;
if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand);
return sema.resolveBlockBody(block, src, &child_block, body, inst, merges);
}
}
var range_i: usize = 0;
while (range_i < ranges_len) : (range_i += 1) {
const item_first = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const item_last = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
// Validation above ensured these will succeed.
const first_tv = sema.resolveInstConst(&child_block, .unneeded, item_first, "") catch unreachable;
const last_tv = sema.resolveInstConst(&child_block, .unneeded, item_last, "") catch unreachable;
if ((try sema.compareAll(operand_val, .gte, first_tv.val, operand_ty)) and
(try sema.compareAll(operand_val, .lte, last_tv.val, operand_ty)))
{
if (is_inline) child_block.inline_case_capture = operand;
if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand);
return sema.resolveBlockBody(block, src, &child_block, body, inst, merges);
}
}
extra_index += body_len;
}
}
if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, special.body, operand);
if (special.is_inline) child_block.inline_case_capture = operand;
if (empty_enum) {
return Air.Inst.Ref.void_value;
}
return sema.resolveBlockBody(block, src, &child_block, special.body, inst, merges);
}
if (scalar_cases_len + multi_cases_len == 0 and !special.is_inline) {
if (empty_enum) {
return Air.Inst.Ref.void_value;
}
if (special_prong == .none) {
return sema.fail(block, src, "switch must handle all possibilities", .{});
}
if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand)) {
return Air.Inst.Ref.unreachable_value;
}
if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and
(!operand_ty.isNonexhaustiveEnum(mod) or union_originally))
{
try sema.zirDbgStmt(block, cond_dbg_node_index);
const ok = try block.addUnOp(.is_named_enum_value, operand);
try sema.addSafetyCheck(block, ok, .corrupt_switch);
}
return sema.resolveBlockBody(block, src, &child_block, special.body, inst, merges);
}
if (child_block.is_comptime) {
_ = sema.resolveConstValue(&child_block, operand_src, operand, "condition in comptime switch must be comptime-known") catch |err| {
if (err == error.AnalysisFail and child_block.comptime_reason != null) try child_block.comptime_reason.?.explain(sema, sema.err);
return err;
};
unreachable;
}
const estimated_cases_extra = (scalar_cases_len + multi_cases_len) *
@typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2;
var cases_extra = try std.ArrayListUnmanaged(u32).initCapacity(gpa, estimated_cases_extra);
defer cases_extra.deinit(gpa);
var case_block = child_block.makeSubBlock();
case_block.runtime_loop = null;
case_block.runtime_cond = operand_src;
case_block.runtime_index.increment();
defer case_block.instructions.deinit(gpa);
var extra_index: usize = special.end;
var scalar_i: usize = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
const is_inline = sema.code.extra[extra_index] >> 31 != 0;
extra_index += 1;
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body_len;
var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope);
defer wip_captures.deinit();
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = wip_captures.scope;
case_block.inline_case_capture = .none;
const item = try sema.resolveInst(item_ref);
if (is_inline) case_block.inline_case_capture = item;
// `item` is already guaranteed to be constant known.
const analyze_body = if (union_originally) blk: {
const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable;
const field_ty = maybe_union_ty.unionFieldType(item_val, mod);
break :blk field_ty.zigTypeTag(mod) != .NoReturn;
} else true;
if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand)) {
// nothing to do here
} else if (analyze_body) {
try sema.analyzeBodyRuntimeBreak(&case_block, body);
} else {
_ = try case_block.addNoOp(.unreach);
}
try wip_captures.finalize();
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
cases_extra.appendAssumeCapacity(1); // items_len
cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
cases_extra.appendAssumeCapacity(@enumToInt(item));
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
}
var is_first = true;
var prev_cond_br: Air.Inst.Index = undefined;
var first_else_body: []const Air.Inst.Index = &.{};
defer gpa.free(first_else_body);
var prev_then_body: []const Air.Inst.Index = &.{};
defer gpa.free(prev_then_body);
var cases_len = scalar_cases_len;
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const body_len = @truncate(u31, sema.code.extra[extra_index]);
const is_inline = sema.code.extra[extra_index] >> 31 != 0;
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len;
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
case_block.inline_case_capture = .none;
// Generate all possible cases as scalar prongs.
if (is_inline) {
const body_start = extra_index + 2 * ranges_len;
const body = sema.code.extra[body_start..][0..body_len];
var emit_bb = false;
var range_i: u32 = 0;
while (range_i < ranges_len) : (range_i += 1) {
const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const item_first_ref = try sema.resolveInst(first_ref);
var item = sema.resolveConstValue(block, .unneeded, item_first_ref, undefined) catch unreachable;
const item_last_ref = try sema.resolveInst(last_ref);
const item_last = sema.resolveConstValue(block, .unneeded, item_last_ref, undefined) catch unreachable;
while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({
// Previous validation has resolved any possible lazy values.
item = try sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty);
}) {
cases_len += 1;
const item_ref = try sema.addConstant(operand_ty, item);
case_block.inline_case_capture = item_ref;
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
const case_src = Module.SwitchProngSrc{ .range = .{ .prong = multi_i, .item = range_i } };
const decl = mod.declPtr(case_block.src_decl);
try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none));
unreachable;
},
else => return err,
};
emit_bb = true;
try sema.analyzeBodyRuntimeBreak(&case_block, body);
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
cases_extra.appendAssumeCapacity(1); // items_len
cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
cases_extra.appendAssumeCapacity(@enumToInt(case_block.inline_case_capture));
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
}
}
for (items, 0..) |item_ref, item_i| {
cases_len += 1;
const item = try sema.resolveInst(item_ref);
case_block.inline_case_capture = item;
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
const analyze_body = if (union_originally) blk: {
const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable;
const field_ty = maybe_union_ty.unionFieldType(item_val, mod);
break :blk field_ty.zigTypeTag(mod) != .NoReturn;
} else true;
if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } };
const decl = mod.declPtr(case_block.src_decl);
try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none));
unreachable;
},
else => return err,
};
emit_bb = true;
if (analyze_body) {
try sema.analyzeBodyRuntimeBreak(&case_block, body);
} else {
_ = try case_block.addNoOp(.unreach);
}
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
cases_extra.appendAssumeCapacity(1); // items_len
cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
cases_extra.appendAssumeCapacity(@enumToInt(case_block.inline_case_capture));
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
}
extra_index += body_len;
continue;
}
var any_ok: Air.Inst.Ref = .none;
// If there are any ranges, we have to put all the items into the
// else prong. Otherwise, we can take advantage of multiple items
// mapping to the same body.
if (ranges_len == 0) {
cases_len += 1;
const analyze_body = if (union_originally)
for (items) |item_ref| {
const item = try sema.resolveInst(item_ref);
const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable;
const field_ty = maybe_union_ty.unionFieldType(item_val, mod);
if (field_ty.zigTypeTag(mod) != .NoReturn) break true;
} else false
else
true;
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body_len;
if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand)) {
// nothing to do here
} else if (analyze_body) {
try sema.analyzeBodyRuntimeBreak(&case_block, body);
} else {
_ = try case_block.addNoOp(.unreach);
}
try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len +
case_block.instructions.items.len);
cases_extra.appendAssumeCapacity(@intCast(u32, items.len));
cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
for (items) |item_ref| {
const item = try sema.resolveInst(item_ref);
cases_extra.appendAssumeCapacity(@enumToInt(item));
}
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
} else {
for (items) |item_ref| {
const item = try sema.resolveInst(item_ref);
const cmp_ok = try case_block.addBinOp(if (case_block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, item);
if (any_ok != .none) {
any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok);
} else {
any_ok = cmp_ok;
}
}
var range_i: usize = 0;
while (range_i < ranges_len) : (range_i += 1) {
const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const item_first = try sema.resolveInst(first_ref);
const item_last = try sema.resolveInst(last_ref);
// operand >= first and operand <= last
const range_first_ok = try case_block.addBinOp(
if (case_block.float_mode == .Optimized) .cmp_gte_optimized else .cmp_gte,
operand,
item_first,
);
const range_last_ok = try case_block.addBinOp(
if (case_block.float_mode == .Optimized) .cmp_lte_optimized else .cmp_lte,
operand,
item_last,
);
const range_ok = try case_block.addBinOp(
.bool_and,
range_first_ok,
range_last_ok,
);
if (any_ok != .none) {
any_ok = try case_block.addBinOp(.bool_or, any_ok, range_ok);
} else {
any_ok = range_ok;
}
}
const new_cond_br = try case_block.addInstAsIndex(.{ .tag = .cond_br, .data = .{
.pl_op = .{
.operand = any_ok,
.payload = undefined,
},
} });
var cond_body = try case_block.instructions.toOwnedSlice(gpa);
defer gpa.free(cond_body);
var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope);
defer wip_captures.deinit();
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = wip_captures.scope;
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body_len;
if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand)) {
// nothing to do here
} else {
try sema.analyzeBodyRuntimeBreak(&case_block, body);
}
try wip_captures.finalize();
if (is_first) {
is_first = false;
first_else_body = cond_body;
cond_body = &.{};
} else {
try sema.air_extra.ensureUnusedCapacity(
gpa,
@typeInfo(Air.CondBr).Struct.fields.len + prev_then_body.len + cond_body.len,
);
sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload =
sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(u32, prev_then_body.len),
.else_body_len = @intCast(u32, cond_body.len),
});
sema.air_extra.appendSliceAssumeCapacity(prev_then_body);
sema.air_extra.appendSliceAssumeCapacity(cond_body);
}
gpa.free(prev_then_body);
prev_then_body = try case_block.instructions.toOwnedSlice(gpa);
prev_cond_br = new_cond_br;
}
}
var final_else_body: []const Air.Inst.Index = &.{};
if (special.body.len != 0 or !is_first or case_block.wantSafety()) {
var emit_bb = false;
if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) {
.Enum => {
if (operand_ty.isNonexhaustiveEnum(mod) and !union_originally) {
return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
operand_ty.fmt(mod),
});
}
for (seen_enum_fields, 0..) |f, i| {
if (f != null) continue;
cases_len += 1;
const item_val = try mod.enumValueFieldIndex(operand_ty, @intCast(u32, i));
const item_ref = try sema.addConstant(operand_ty, item_val);
case_block.inline_case_capture = item_ref;
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
const analyze_body = if (union_originally) blk: {
const field_ty = maybe_union_ty.unionFieldType(item_val, mod);
break :blk field_ty.zigTypeTag(mod) != .NoReturn;
} else true;
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
emit_bb = true;
if (analyze_body) {
try sema.analyzeBodyRuntimeBreak(&case_block, special.body);
} else {
_ = try case_block.addNoOp(.unreach);
}
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
cases_extra.appendAssumeCapacity(1); // items_len
cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
cases_extra.appendAssumeCapacity(@enumToInt(case_block.inline_case_capture));
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
}
},
.ErrorSet => {
if (operand_ty.isAnyError(mod)) {
return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
operand_ty.fmt(mod),
});
}
for (operand_ty.errorSetNames(mod)) |error_name_ip| {
const error_name = mod.intern_pool.stringToSlice(error_name_ip);
if (seen_errors.contains(error_name)) continue;
cases_len += 1;
const item_val = try Value.Tag.@"error".create(sema.arena, .{ .name = error_name });
const item_ref = try sema.addConstant(operand_ty, item_val);
case_block.inline_case_capture = item_ref;
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
emit_bb = true;
try sema.analyzeBodyRuntimeBreak(&case_block, special.body);
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
cases_extra.appendAssumeCapacity(1); // items_len
cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
cases_extra.appendAssumeCapacity(@enumToInt(case_block.inline_case_capture));
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
}
},
.Int => {
var it = try RangeSetUnhandledIterator.init(sema, operand_ty, range_set);
while (try it.next()) |cur| {
cases_len += 1;
const item_ref = try sema.addConstant(operand_ty, cur);
case_block.inline_case_capture = item_ref;
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
emit_bb = true;
try sema.analyzeBodyRuntimeBreak(&case_block, special.body);
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
cases_extra.appendAssumeCapacity(1); // items_len
cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
cases_extra.appendAssumeCapacity(@enumToInt(case_block.inline_case_capture));
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
}
},
.Bool => {
if (true_count == 0) {
cases_len += 1;
case_block.inline_case_capture = Air.Inst.Ref.bool_true;
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
emit_bb = true;
try sema.analyzeBodyRuntimeBreak(&case_block, special.body);
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
cases_extra.appendAssumeCapacity(1); // items_len
cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
cases_extra.appendAssumeCapacity(@enumToInt(case_block.inline_case_capture));
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
}
if (false_count == 0) {
cases_len += 1;
case_block.inline_case_capture = Air.Inst.Ref.bool_false;
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = child_block.wip_capture_scope;
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
emit_bb = true;
try sema.analyzeBodyRuntimeBreak(&case_block, special.body);
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
cases_extra.appendAssumeCapacity(1); // items_len
cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
cases_extra.appendAssumeCapacity(@enumToInt(case_block.inline_case_capture));
cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
}
},
else => return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
operand_ty.fmt(mod),
}),
};
var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope);
defer wip_captures.deinit();
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = wip_captures.scope;
case_block.inline_case_capture = .none;
if (mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and
operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally))
{
try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
const ok = try case_block.addUnOp(.is_named_enum_value, operand);
try sema.addSafetyCheck(&case_block, ok, .corrupt_switch);
}
const analyze_body = if (union_originally and !special.is_inline)
for (seen_enum_fields, 0..) |seen_field, index| {
if (seen_field != null) continue;
const union_obj = mod.typeToUnion(maybe_union_ty).?;
const field_ty = union_obj.fields.values()[index].ty;
if (field_ty.zigTypeTag(mod) != .NoReturn) break true;
} else false
else
true;
if (special.body.len != 0 and err_set and
try sema.maybeErrorUnwrap(&case_block, special.body, operand))
{
// nothing to do here
} else if (special.body.len != 0 and analyze_body and !special.is_inline) {
try sema.analyzeBodyRuntimeBreak(&case_block, special.body);
} else {
// We still need a terminator in this block, but we have proven
// that it is unreachable.
if (case_block.wantSafety()) {
try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
try sema.safetyPanic(&case_block, .corrupt_switch);
} else {
_ = try case_block.addNoOp(.unreach);
}
}
try wip_captures.finalize();
if (is_first) {
final_else_body = case_block.instructions.items;
} else {
try sema.air_extra.ensureUnusedCapacity(gpa, prev_then_body.len +
@typeInfo(Air.CondBr).Struct.fields.len + case_block.instructions.items.len);
sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload =
sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(u32, prev_then_body.len),
.else_body_len = @intCast(u32, case_block.instructions.items.len),
});
sema.air_extra.appendSliceAssumeCapacity(prev_then_body);
sema.air_extra.appendSliceAssumeCapacity(case_block.instructions.items);
final_else_body = first_else_body;
}
}
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len +
cases_extra.items.len + final_else_body.len);
_ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{
.operand = operand,
.payload = sema.addExtraAssumeCapacity(Air.SwitchBr{
.cases_len = @intCast(u32, cases_len),
.else_body_len = @intCast(u32, final_else_body.len),
}),
} } });
sema.air_extra.appendSliceAssumeCapacity(cases_extra.items);
sema.air_extra.appendSliceAssumeCapacity(final_else_body);
return sema.analyzeBlockBody(block, src, &child_block, merges);
}
const RangeSetUnhandledIterator = struct {
sema: *Sema,
ty: Type,
cur: Value,
max: Value,
ranges: []const RangeSet.Range,
range_i: usize = 0,
first: bool = true,
fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator {
const mod = sema.mod;
const min = try ty.minInt(sema.arena, mod);
const max = try ty.maxIntScalar(mod, Type.comptime_int);
return RangeSetUnhandledIterator{
.sema = sema,
.ty = ty,
.cur = min,
.max = max,
.ranges = range_set.ranges.items,
};
}
fn next(it: *RangeSetUnhandledIterator) !?Value {
while (it.range_i < it.ranges.len) : (it.range_i += 1) {
if (!it.first) {
it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty);
}
it.first = false;
if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) {
return it.cur;
}
it.cur = it.ranges[it.range_i].last;
}
if (!it.first) {
it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty);
}
it.first = false;
if (it.cur.compareScalar(.lte, it.max, it.ty, it.sema.mod)) {
return it.cur;
}
return null;
}
};
fn resolveSwitchItemVal(
sema: *Sema,
block: *Block,
item_ref: Zir.Inst.Ref,
switch_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
range_expand: Module.SwitchProngSrc.RangeExpand,
) CompileError!TypedValue {
const mod = sema.mod;
const item = try sema.resolveInst(item_ref);
const item_ty = sema.typeOf(item);
// Constructing a LazySrcLoc is costly because we only have the switch AST node.
// Only if we know for sure we need to report a compile error do we resolve the
// full source locations.
if (sema.resolveConstValue(block, .unneeded, item, "")) |val| {
try sema.resolveLazyValue(val);
return TypedValue{ .ty = item_ty, .val = val };
} else |err| switch (err) {
error.NeededSourceLocation => {
const src = switch_prong_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_node_offset, range_expand);
_ = try sema.resolveConstValue(block, src, item, "switch prong values must be comptime-known");
unreachable;
},
else => |e| return e,
}
}
fn validateSwitchRange(
sema: *Sema,
block: *Block,
range_set: *RangeSet,
first_ref: Zir.Inst.Ref,
last_ref: Zir.Inst.Ref,
operand_ty: Type,
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
const mod = sema.mod;
const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val;
const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val;
if (first_val.compareScalar(.gt, last_val, operand_ty, mod)) {
const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), src_node_offset, .first);
return sema.fail(block, src, "range start value is greater than the end value", .{});
}
const maybe_prev_src = try range_set.add(first_val, last_val, operand_ty, switch_prong_src);
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
}
fn validateSwitchItem(
sema: *Sema,
block: *Block,
range_set: *RangeSet,
item_ref: Zir.Inst.Ref,
operand_ty: Type,
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val;
const maybe_prev_src = try range_set.add(item_val, item_val, operand_ty, switch_prong_src);
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
}
fn validateSwitchItemEnum(
sema: *Sema,
block: *Block,
seen_fields: []?Module.SwitchProngSrc,
range_set: *RangeSet,
item_ref: Zir.Inst.Ref,
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val, sema.mod) orelse {
const maybe_prev_src = try range_set.add(item_tv.val, item_tv.val, item_tv.ty, switch_prong_src);
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
};
const maybe_prev_src = seen_fields[field_index];
seen_fields[field_index] = switch_prong_src;
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
}
fn validateSwitchItemError(
sema: *Sema,
block: *Block,
seen_errors: *SwitchErrorSet,
item_ref: Zir.Inst.Ref,
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
// TODO: Do i need to typecheck here?
const error_name = item_tv.val.castTag(.@"error").?.data.name;
const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev|
prev.value
else
null;
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
}
fn validateSwitchDupe(
sema: *Sema,
block: *Block,
maybe_prev_src: ?Module.SwitchProngSrc,
switch_prong_src: Module.SwitchProngSrc,
src_node_offset: i32,
) CompileError!void {
const prev_prong_src = maybe_prev_src orelse return;
const mod = sema.mod;
const block_src_decl = sema.mod.declPtr(block.src_decl);
const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none);
const prev_src = prev_prong_src.resolve(mod, block_src_decl, src_node_offset, .none);
const msg = msg: {
const msg = try sema.errMsg(
block,
src,
"duplicate switch value",
.{},
);
errdefer msg.destroy(sema.gpa);
try sema.errNote(
block,
prev_src,
msg,
"previous value here",
.{},
);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn validateSwitchItemBool(
sema: *Sema,
block: *Block,
true_count: *u8,
false_count: *u8,
item_ref: Zir.Inst.Ref,
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
const mod = sema.mod;
const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val;
if (item_val.toBool(mod)) {
true_count.* += 1;
} else {
false_count.* += 1;
}
if (true_count.* + false_count.* > 2) {
const block_src_decl = sema.mod.declPtr(block.src_decl);
const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none);
return sema.fail(block, src, "duplicate switch value", .{});
}
}
const ValueSrcMap = std.HashMap(Value, Module.SwitchProngSrc, Value.HashContext, std.hash_map.default_max_load_percentage);
fn validateSwitchItemSparse(
sema: *Sema,
block: *Block,
seen_values: *ValueSrcMap,
item_ref: Zir.Inst.Ref,
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val;
const kv = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return;
return sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset);
}
fn validateSwitchNoRange(
sema: *Sema,
block: *Block,
ranges_len: u32,
operand_ty: Type,
src_node_offset: i32,
) CompileError!void {
if (ranges_len == 0)
return;
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset };
const range_src: LazySrcLoc = .{ .node_offset_switch_range = src_node_offset };
const msg = msg: {
const msg = try sema.errMsg(
block,
operand_src,
"ranges not allowed when switching on type '{}'",
.{operand_ty.fmt(sema.mod)},
);
errdefer msg.destroy(sema.gpa);
try sema.errNote(
block,
range_src,
msg,
"range here",
.{},
);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, operand: Air.Inst.Ref) !bool {
const mod = sema.mod;
if (!mod.backendSupportsFeature(.panic_unwrap_error)) return false;
const tags = sema.code.instructions.items(.tag);
for (body) |inst| {
switch (tags[inst]) {
.save_err_ret_index,
.dbg_block_begin,
.dbg_block_end,
.dbg_stmt,
.@"unreachable",
.str,
.as_node,
.panic,
.field_val,
=> {},
else => return false,
}
}
for (body) |inst| {
const air_inst = switch (tags[inst]) {
.dbg_block_begin,
.dbg_block_end,
=> continue,
.dbg_stmt => {
try sema.zirDbgStmt(block, inst);
continue;
},
.save_err_ret_index => {
try sema.zirSaveErrRetIndex(block, inst);
continue;
},
.str => try sema.zirStr(block, inst),
.as_node => try sema.zirAsNode(block, inst),
.field_val => try sema.zirFieldVal(block, inst),
.@"unreachable" => {
if (!mod.comp.formatted_panics) {
try sema.safetyPanic(block, .unwrap_error);
return true;
}
const panic_fn = try sema.getBuiltin("panicUnwrapError");
const err_return_trace = try sema.getErrorReturnTrace(block);
const args: [2]Air.Inst.Ref = .{ err_return_trace, operand };
try sema.callBuiltin(block, panic_fn, .auto, &args);
return true;
},
.panic => {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const msg_inst = try sema.resolveInst(inst_data.operand);
const panic_fn = try sema.getBuiltin("panic");
const err_return_trace = try sema.getErrorReturnTrace(block);
const args: [3]Air.Inst.Ref = .{ msg_inst, err_return_trace, .null_value };
try sema.callBuiltin(block, panic_fn, .auto, &args);
return true;
},
else => unreachable,
};
if (sema.typeOf(air_inst).isNoReturn(mod))
return true;
sema.inst_map.putAssumeCapacity(inst, air_inst);
}
unreachable;
}
fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void {
const mod = sema.mod;
const index = Zir.refToIndex(cond) orelse return;
if (sema.code.instructions.items(.tag)[index] != .is_non_err) return;
const err_inst_data = sema.code.instructions.items(.data)[index].un_node;
const err_operand = try sema.resolveInst(err_inst_data.operand);
const operand_ty = sema.typeOf(err_operand);
if (operand_ty.zigTypeTag(mod) == .ErrorSet) {
try sema.maybeErrorUnwrapComptime(block, body, err_operand);
return;
}
if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| {
if (!operand_ty.isError(mod)) return;
if (val.getError() == null) return;
try sema.maybeErrorUnwrapComptime(block, body, err_operand);
}
}
fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, operand: Air.Inst.Ref) !void {
const tags = sema.code.instructions.items(.tag);
const inst = for (body) |inst| {
switch (tags[inst]) {
.dbg_block_begin,
.dbg_block_end,
.dbg_stmt,
.save_err_ret_index,
=> {},
.@"unreachable" => break inst,
else => return,
}
} else return;
const inst_data = sema.code.instructions.items(.data)[inst].@"unreachable";
const src = inst_data.src();
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
if (val.getError()) |name| {
return sema.fail(block, src, "caught unexpected error '{s}'", .{name});
}
}
}
fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs);
const field_name = try sema.resolveConstString(block, name_src, extra.rhs, "field name must be comptime-known");
const ty = try sema.resolveTypeFields(unresolved_ty);
const ip = &mod.intern_pool;
const has_field = hf: {
switch (ip.indexToKey(ty.ip_index)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.Slice => {
if (mem.eql(u8, field_name, "ptr")) break :hf true;
if (mem.eql(u8, field_name, "len")) break :hf true;
break :hf false;
},
else => {},
},
.anon_struct_type => |anon_struct| {
if (anon_struct.names.len != 0) {
// If the string is not interned, then the field certainly is not present.
const name_interned = ip.getString(field_name).unwrap() orelse break :hf false;
break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, name_interned) != null;
} else {
const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false;
break :hf field_index < ty.structFieldCount(mod);
}
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :hf false;
assert(struct_obj.haveFieldTypes());
break :hf struct_obj.fields.contains(field_name);
},
.union_type => |union_type| {
const union_obj = mod.unionPtr(union_type.index);
assert(union_obj.haveFieldTypes());
break :hf union_obj.fields.contains(field_name);
},
.enum_type => |enum_type| {
// If the string is not interned, then the field certainly is not present.
const name_interned = ip.getString(field_name).unwrap() orelse break :hf false;
break :hf enum_type.nameIndex(ip, name_interned) != null;
},
.array_type => break :hf mem.eql(u8, field_name, "len"),
else => {},
}
return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{
ty.fmt(sema.mod),
});
};
if (has_field) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
}
fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = inst_data.src();
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const container_type = try sema.resolveType(block, lhs_src, extra.lhs);
const decl_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "decl name must be comptime-known");
try sema.checkNamespaceType(block, lhs_src, container_type);
const namespace = container_type.getNamespaceIndex(mod).unwrap() orelse
return Air.Inst.Ref.bool_false;
if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| {
const decl = mod.declPtr(decl_index);
if (decl.is_pub or decl.getFileScope(mod) == block.getFileScope(mod)) {
return Air.Inst.Ref.bool_true;
}
}
return Air.Inst.Ref.bool_false;
}
fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
const operand_src = inst_data.src();
const operand = inst_data.get(sema.code);
const result = mod.importFile(block.getFileScope(mod), operand) catch |err| switch (err) {
error.ImportOutsidePkgPath => {
return sema.fail(block, operand_src, "import of file outside package path: '{s}'", .{operand});
},
error.PackageNotFound => {
const name = try block.getFileScope(mod).pkg.getName(sema.gpa, mod.*);
defer sema.gpa.free(name);
return sema.fail(block, operand_src, "no package named '{s}' available within package '{s}'", .{ operand, name });
},
else => {
// TODO: these errors are file system errors; make sure an update() will
// retry this and not cache the file system error, which may be transient.
return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) });
},
};
try mod.semaFile(result.file);
const file_root_decl_index = result.file.root_decl.unwrap().?;
const file_root_decl = mod.declPtr(file_root_decl_index);
try mod.declareDeclDependency(sema.owner_decl_index, file_root_decl_index);
return sema.addConstant(file_root_decl.ty, file_root_decl.val);
}
fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const name = try sema.resolveConstString(block, operand_src, inst_data.operand, "file path name must be comptime-known");
const embed_file = mod.embedFile(block.getFileScope(mod), name) catch |err| switch (err) {
error.ImportOutsidePkgPath => {
return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name});
},
else => {
// TODO: these errors are file system errors; make sure an update() will
// retry this and not cache the file system error, which may be transient.
return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ name, @errorName(err) });
},
};
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1];
// TODO instead of using `Value.Tag.bytes`, create a new value tag for pointing at
// a `*Module.EmbedFile`. The purpose of this would be:
// - If only the length is read and the bytes are not inspected by comptime code,
// there can be an optimization where the codegen backend does a copy_file_range
// into the final binary, and never loads the data into memory.
// - When a Decl is destroyed, it can free the `*Module.EmbedFile`.
embed_file.owner_decl = try anon_decl.finish(
try Type.array(anon_decl.arena(), embed_file.bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null),
0, // default alignment
);
return sema.analyzeDeclRef(embed_file.owner_decl);
}
fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
const err_name = inst_data.get(sema.code);
// Return the error code from the function.
const kv = try mod.getErrorValue(err_name);
const result_inst = try sema.addConstant(
try mod.singleErrorSetType(kv.key),
try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }),
);
return result_inst;
}
fn zirShl(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
const scalar_ty = lhs_ty.scalarType(mod);
const scalar_rhs_ty = rhs_ty.scalarType(mod);
// TODO coerce rhs if air_tag is not shl_sat
const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs);
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(sema.typeOf(lhs));
}
// If rhs is 0, return lhs without doing any calculations.
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
return lhs;
}
if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) {
const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits);
if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
const rhs_elem = try rhs_val.elemValue(sema.mod, i);
if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
scalar_ty.fmt(sema.mod),
});
}
}
} else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
rhs_val.fmtValue(scalar_ty, sema.mod),
scalar_ty.fmt(sema.mod),
});
}
}
if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
const rhs_elem = try rhs_val.elemValue(sema.mod, i);
if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
});
}
}
} else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
rhs_val.fmtValue(scalar_ty, sema.mod),
});
}
}
const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
if (lhs_val.isUndef(mod)) return sema.addConstUndef(lhs_ty);
const rhs_val = maybe_rhs_val orelse {
if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
}
break :rs rhs_src;
};
const val = switch (air_tag) {
.shl_exact => val: {
const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, sema.mod);
if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
break :val shifted.wrapped_result;
}
if (shifted.overflow_bit.compareAllWithZero(.eq, sema.mod)) {
break :val shifted.wrapped_result;
}
return sema.fail(block, src, "operation caused overflow", .{});
},
.shl_sat => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt)
try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod)
else
try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod),
.shl => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt)
try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod)
else
try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, sema.mod),
else => unreachable,
};
return sema.addConstant(lhs_ty, val);
} else lhs_src;
const new_rhs = if (air_tag == .shl_sat) rhs: {
// Limit the RHS type for saturating shl to be an integer as small as the LHS.
if (rhs_is_comptime_int or
scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits)
{
const max_int = try sema.addConstant(
lhs_ty,
try lhs_ty.maxInt(sema.arena, mod, lhs_ty),
);
const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src });
break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false);
} else {
break :rhs rhs;
}
} else rhs;
try sema.requireRuntimeBlock(block, src, runtime_src);
if (block.wantSafety()) {
const bit_count = scalar_ty.intInfo(mod).bits;
if (!std.math.isPowerOfTwo(bit_count)) {
const bit_count_val = try mod.intValue(scalar_rhs_ty, bit_count);
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
.operand = lt,
.operation = .And,
} },
});
} else ok: {
const bit_count_inst = try sema.addConstant(rhs_ty, bit_count_val);
break :ok try block.addBinOp(.cmp_lt, rhs, bit_count_inst);
};
try sema.addSafetyCheck(block, ok, .shift_rhs_too_big);
}
if (air_tag == .shl_exact) {
const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(lhs_ty);
const op_ov = try block.addInst(.{
.tag = .shl_with_overflow,
.data = .{ .ty_pl = .{
.ty = try sema.addType(op_ov_tuple_ty),
.payload = try sema.addExtra(Air.Bin{
.lhs = lhs,
.rhs = rhs,
}),
} },
});
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector)
try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = ov_bit,
.operation = .Or,
} },
})
else
ov_bit;
const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0));
const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
try sema.addSafetyCheck(block, no_ov, .shl_overflow);
return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty);
}
}
return block.addBinOp(air_tag, lhs, new_rhs);
}
fn zirShr(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
const scalar_ty = lhs_ty.scalarType(mod);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs);
const runtime_src = if (maybe_rhs_val) |rhs_val| rs: {
if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(lhs_ty);
}
// If rhs is 0, return lhs without doing any calculations.
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
return lhs;
}
if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) {
const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits);
if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
const rhs_elem = try rhs_val.elemValue(sema.mod, i);
if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
scalar_ty.fmt(sema.mod),
});
}
}
} else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
rhs_val.fmtValue(scalar_ty, sema.mod),
scalar_ty.fmt(sema.mod),
});
}
}
if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
const rhs_elem = try rhs_val.elemValue(sema.mod, i);
if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
});
}
}
} else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
rhs_val.fmtValue(scalar_ty, sema.mod),
});
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(lhs_ty);
}
if (air_tag == .shr_exact) {
// Detect if any ones would be shifted out.
const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, sema.mod);
if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) {
return sema.fail(block, src, "exact shift shifted out 1 bits", .{});
}
}
const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, sema.mod);
return sema.addConstant(lhs_ty, val);
} else {
break :rs lhs_src;
}
} else rhs_src;
if (maybe_rhs_val == null and scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
}
try sema.requireRuntimeBlock(block, src, runtime_src);
const result = try block.addBinOp(air_tag, lhs, rhs);
if (block.wantSafety()) {
const bit_count = scalar_ty.intInfo(mod).bits;
if (!std.math.isPowerOfTwo(bit_count)) {
const bit_count_val = try mod.intValue(scalar_ty, bit_count);
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
.operand = lt,
.operation = .And,
} },
});
} else ok: {
const bit_count_inst = try sema.addConstant(rhs_ty, bit_count_val);
break :ok try block.addBinOp(.cmp_lt, rhs, bit_count_inst);
};
try sema.addSafetyCheck(block, ok, .shift_rhs_too_big);
}
if (air_tag == .shr_exact) {
const back = try block.addBinOp(.shl, result, rhs);
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
const eql = try block.addCmpVector(lhs, back, .eq);
break :ok try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = eql,
.operation = .And,
} },
});
} else try block.addBinOp(.cmp_eq, lhs, back);
try sema.addSafetyCheck(block, ok, .shr_overflow);
}
}
return result;
}
fn zirBitwise(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } });
const scalar_type = resolved_type.scalarType(mod);
const scalar_tag = scalar_type.zigTypeTag(mod);
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
if (!is_int) {
return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(mod)), @tagName(rhs_ty.zigTypeTag(mod)) });
}
const runtime_src = runtime: {
// TODO: ask the linker what kind of relocations are available, and
// in some cases emit a Value that means "this decl's address AND'd with this operand".
if (try sema.resolveMaybeUndefValIntable(casted_lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefValIntable(casted_rhs)) |rhs_val| {
const result_val = switch (air_tag) {
.bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, sema.mod),
.bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, sema.mod),
.xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, sema.mod),
else => unreachable,
};
return sema.addConstant(resolved_type, result_val);
} else {
break :runtime rhs_src;
}
} else {
break :runtime lhs_src;
}
};
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
}
fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_type = sema.typeOf(operand);
const scalar_type = operand_type.scalarType(mod);
if (scalar_type.zigTypeTag(mod) != .Int) {
return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{
operand_type.fmt(sema.mod),
});
}
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef(mod)) {
return sema.addConstUndef(operand_type);
} else if (operand_type.zigTypeTag(mod) == .Vector) {
const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod));
const elems = try sema.arena.alloc(Value, vec_len);
for (elems, 0..) |*elem, i| {
const elem_val = try val.elemValue(sema.mod, i);
elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod);
}
return sema.addConstant(
operand_type,
try Value.Tag.aggregate.create(sema.arena, elems),
);
} else {
const result_val = try val.bitwiseNot(operand_type, sema.arena, sema.mod);
return sema.addConstant(operand_type, result_val);
}
}
try sema.requireRuntimeBlock(block, src, null);
return block.addTyOp(.not, operand_type, operand);
}
fn analyzeTupleCat(
sema: *Sema,
block: *Block,
src_node: i32,
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const src = LazySrcLoc.nodeOffset(src_node);
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node };
const lhs_len = lhs_ty.structFieldCount(mod);
const rhs_len = rhs_ty.structFieldCount(mod);
const dest_fields = lhs_len + rhs_len;
if (dest_fields == 0) {
return sema.addConstant(Type.empty_struct_literal, Value.empty_struct);
}
if (lhs_len == 0) {
return rhs;
}
if (rhs_len == 0) {
return lhs;
}
const final_len = try sema.usizeCast(block, rhs_src, dest_fields);
const types = try sema.arena.alloc(InternPool.Index, final_len);
const values = try sema.arena.alloc(InternPool.Index, final_len);
const opt_runtime_src = rs: {
var runtime_src: ?LazySrcLoc = null;
var i: u32 = 0;
while (i < lhs_len) : (i += 1) {
types[i] = lhs_ty.structFieldType(i, mod).ip_index;
const default_val = lhs_ty.structFieldDefaultValue(i, mod);
values[i] = default_val.ip_index;
const operand_src = lhs_src; // TODO better source location
if (default_val.ip_index == .unreachable_value) {
runtime_src = operand_src;
values[i] = .none;
}
}
i = 0;
while (i < rhs_len) : (i += 1) {
types[i + lhs_len] = rhs_ty.structFieldType(i, mod).ip_index;
const default_val = rhs_ty.structFieldDefaultValue(i, mod);
values[i + lhs_len] = default_val.ip_index;
const operand_src = rhs_src; // TODO better source location
if (default_val.ip_index == .unreachable_value) {
runtime_src = operand_src;
values[i + lhs_len] = .none;
}
}
break :rs runtime_src;
};
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
.types = types,
.values = values,
.names = &.{},
} });
const runtime_src = opt_runtime_src orelse {
const tuple_val = try mod.intern(.{ .aggregate = .{
.ty = tuple_ty,
.storage = .{ .elems = values },
} });
return sema.addConstant(tuple_ty.toType(), tuple_val.toValue());
};
try sema.requireRuntimeBlock(block, src, runtime_src);
const element_refs = try sema.arena.alloc(Air.Inst.Ref, final_len);
var i: u32 = 0;
while (i < lhs_len) : (i += 1) {
const operand_src = lhs_src; // TODO better source location
element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, lhs, i, lhs_ty);
}
i = 0;
while (i < rhs_len) : (i += 1) {
const operand_src = rhs_src; // TODO better source location
element_refs[i + lhs_len] =
try sema.tupleFieldValByIndex(block, operand_src, rhs, i, rhs_ty);
}
return block.addAggregateInit(tuple_ty.toType(), element_refs);
}
fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const src = inst_data.src();
const lhs_is_tuple = lhs_ty.isTuple(mod);
const rhs_is_tuple = rhs_ty.isTuple(mod);
if (lhs_is_tuple and rhs_is_tuple) {
return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs);
}
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: {
if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined);
return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)});
};
const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse {
assert(!rhs_is_tuple);
return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(mod)});
};
const resolved_elem_ty = t: {
var trash_block = block.makeSubBlock();
trash_block.is_comptime = false;
defer trash_block.instructions.deinit(sema.gpa);
const instructions = [_]Air.Inst.Ref{
try trash_block.addBitCast(lhs_info.elem_type, .void_value),
try trash_block.addBitCast(rhs_info.elem_type, .void_value),
};
break :t try sema.resolvePeerTypes(block, src, &instructions, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
};
// When there is a sentinel mismatch, no sentinel on the result.
// Otherwise, use the sentinel value provided by either operand,
// coercing it to the peer-resolved element type.
const res_sent_val: ?Value = s: {
if (lhs_info.sentinel) |lhs_sent_val| {
const lhs_sent = try sema.addConstant(lhs_info.elem_type, lhs_sent_val);
if (rhs_info.sentinel) |rhs_sent_val| {
const rhs_sent = try sema.addConstant(rhs_info.elem_type, rhs_sent_val);
const lhs_sent_casted = try sema.coerce(block, resolved_elem_ty, lhs_sent, lhs_src);
const rhs_sent_casted = try sema.coerce(block, resolved_elem_ty, rhs_sent, rhs_src);
const lhs_sent_casted_val = try sema.resolveConstValue(block, lhs_src, lhs_sent_casted, "array sentinel value must be comptime-known");
const rhs_sent_casted_val = try sema.resolveConstValue(block, rhs_src, rhs_sent_casted, "array sentinel value must be comptime-known");
if (try sema.valuesEqual(lhs_sent_casted_val, rhs_sent_casted_val, resolved_elem_ty)) {
break :s lhs_sent_casted_val;
} else {
break :s null;
}
} else {
const lhs_sent_casted = try sema.coerce(block, resolved_elem_ty, lhs_sent, lhs_src);
const lhs_sent_casted_val = try sema.resolveConstValue(block, lhs_src, lhs_sent_casted, "array sentinel value must be comptime-known");
break :s lhs_sent_casted_val;
}
} else {
if (rhs_info.sentinel) |rhs_sent_val| {
const rhs_sent = try sema.addConstant(rhs_info.elem_type, rhs_sent_val);
const rhs_sent_casted = try sema.coerce(block, resolved_elem_ty, rhs_sent, rhs_src);
const rhs_sent_casted_val = try sema.resolveConstValue(block, rhs_src, rhs_sent_casted, "array sentinel value must be comptime-known");
break :s rhs_sent_casted_val;
} else {
break :s null;
}
}
};
const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
const rhs_len = try sema.usizeCast(block, lhs_src, rhs_info.len);
const result_len = std.math.add(usize, lhs_len, rhs_len) catch |err| switch (err) {
error.Overflow => return sema.fail(
block,
src,
"concatenating arrays of length {d} and {d} produces an array too large for this compiler implementation to handle",
.{ lhs_len, rhs_len },
),
};
const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, mod);
const ptr_addrspace = p: {
if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod);
if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod);
break :p null;
};
const runtime_src = if (switch (lhs_ty.zigTypeTag(mod)) {
.Array, .Struct => try sema.resolveMaybeUndefVal(lhs),
.Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs),
else => unreachable,
}) |lhs_val| rs: {
if (switch (rhs_ty.zigTypeTag(mod)) {
.Array, .Struct => try sema.resolveMaybeUndefVal(rhs),
.Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs),
else => unreachable,
}) |rhs_val| {
const lhs_sub_val = if (lhs_ty.isSinglePointer(mod))
(try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).?
else
lhs_val;
const rhs_sub_val = if (rhs_ty.isSinglePointer(mod))
(try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).?
else
rhs_val;
const final_len_including_sent = result_len + @boolToInt(res_sent_val != null);
const element_vals = try sema.arena.alloc(Value, final_len_including_sent);
var elem_i: usize = 0;
while (elem_i < lhs_len) : (elem_i += 1) {
const lhs_elem_i = elem_i;
const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i, mod) else lhs_info.elem_type;
const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable";
const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val;
const elem_val_inst = try sema.addConstant(elem_ty, elem_val);
const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded);
const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, "");
element_vals[elem_i] = coerced_elem_val;
}
while (elem_i < result_len) : (elem_i += 1) {
const rhs_elem_i = elem_i - lhs_len;
const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i, mod) else rhs_info.elem_type;
const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable";
const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val;
const elem_val_inst = try sema.addConstant(elem_ty, elem_val);
const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded);
const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, "");
element_vals[elem_i] = coerced_elem_val;
}
if (res_sent_val) |sent_val| {
element_vals[result_len] = sent_val;
}
const val = try Value.Tag.aggregate.create(sema.arena, element_vals);
return sema.addConstantMaybeRef(block, result_ty, val, ptr_addrspace != null);
} else break :rs rhs_src;
} else lhs_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
if (ptr_addrspace) |ptr_as| {
const alloc_ty = try Type.ptr(sema.arena, mod, .{
.pointee_type = result_ty,
.@"addrspace" = ptr_as,
});
const alloc = try block.addTy(.alloc, alloc_ty);
const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{
.pointee_type = resolved_elem_ty,
.@"addrspace" = ptr_as,
});
var elem_i: usize = 0;
while (elem_i < lhs_len) : (elem_i += 1) {
const elem_index = try sema.addIntUnsigned(Type.usize, elem_i);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
const init = try sema.elemVal(block, lhs_src, lhs, elem_index, src, true);
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
}
while (elem_i < result_len) : (elem_i += 1) {
const elem_index = try sema.addIntUnsigned(Type.usize, elem_i);
const rhs_index = try sema.addIntUnsigned(Type.usize, elem_i - lhs_len);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
const init = try sema.elemVal(block, rhs_src, rhs, rhs_index, src, true);
try sema.storePtr2(block, src, elem_ptr, src, init, rhs_src, .store);
}
if (res_sent_val) |sent_val| {
const elem_index = try sema.addIntUnsigned(Type.usize, result_len);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
const init = try sema.addConstant(lhs_info.elem_type, sent_val);
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
}
return alloc;
}
const element_refs = try sema.arena.alloc(Air.Inst.Ref, result_len);
{
var elem_i: usize = 0;
while (elem_i < lhs_len) : (elem_i += 1) {
const index = try sema.addIntUnsigned(Type.usize, elem_i);
const init = try sema.elemVal(block, lhs_src, lhs, index, src, true);
element_refs[elem_i] = try sema.coerce(block, resolved_elem_ty, init, lhs_src);
}
while (elem_i < result_len) : (elem_i += 1) {
const index = try sema.addIntUnsigned(Type.usize, elem_i - lhs_len);
const init = try sema.elemVal(block, rhs_src, rhs, index, src, true);
element_refs[elem_i] = try sema.coerce(block, resolved_elem_ty, init, rhs_src);
}
}
return block.addAggregateInit(result_ty, element_refs);
}
fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo {
const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag(mod)) {
.Array => return operand_ty.arrayInfo(mod),
.Pointer => {
const ptr_info = operand_ty.ptrInfo(mod);
switch (ptr_info.size) {
// TODO: in the Many case here this should only work if the type
// has a sentinel, and this code should compute the length based
// on the sentinel value.
.Slice, .Many => {
const val = try sema.resolveConstValue(block, src, operand, "slice value being concatenated must be comptime-known");
return Type.ArrayInfo{
.elem_type = ptr_info.pointee_type,
.sentinel = ptr_info.sentinel,
.len = val.sliceLen(sema.mod),
};
},
.One => {
if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) {
return ptr_info.pointee_type.arrayInfo(mod);
}
},
.C => {},
}
},
.Struct => {
if (operand_ty.isTuple(mod) and peer_ty.isIndexable(mod)) {
assert(!peer_ty.isTuple(mod));
return .{
.elem_type = peer_ty.elemType2(mod),
.sentinel = null,
.len = operand_ty.arrayLen(mod),
};
}
},
else => {},
}
return null;
}
fn analyzeTupleMul(
sema: *Sema,
block: *Block,
src_node: i32,
operand: Air.Inst.Ref,
factor: usize,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
const src = LazySrcLoc.nodeOffset(src_node);
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node };
const tuple_len = operand_ty.structFieldCount(mod);
const final_len = std.math.mul(usize, tuple_len, factor) catch
return sema.fail(block, rhs_src, "operation results in overflow", .{});
if (final_len == 0) {
return sema.addConstant(Type.empty_struct_literal, Value.empty_struct);
}
const types = try sema.arena.alloc(InternPool.Index, final_len);
const values = try sema.arena.alloc(InternPool.Index, final_len);
const opt_runtime_src = rs: {
var runtime_src: ?LazySrcLoc = null;
for (0..tuple_len) |i| {
types[i] = operand_ty.structFieldType(i, mod).ip_index;
values[i] = operand_ty.structFieldDefaultValue(i, mod).ip_index;
const operand_src = lhs_src; // TODO better source location
if (values[i] == .unreachable_value) {
runtime_src = operand_src;
values[i] = .none; // TODO don't treat unreachable_value as special
}
}
for (0..factor) |i| {
mem.copyForwards(InternPool.Index, types[tuple_len * i ..], types[0..tuple_len]);
mem.copyForwards(InternPool.Index, values[tuple_len * i ..], values[0..tuple_len]);
}
break :rs runtime_src;
};
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
.types = types,
.values = values,
.names = &.{},
} });
const runtime_src = opt_runtime_src orelse {
const tuple_val = try mod.intern(.{ .aggregate = .{
.ty = tuple_ty,
.storage = .{ .elems = values },
} });
return sema.addConstant(tuple_ty.toType(), tuple_val.toValue());
};
try sema.requireRuntimeBlock(block, src, runtime_src);
const element_refs = try sema.arena.alloc(Air.Inst.Ref, final_len);
var i: u32 = 0;
while (i < tuple_len) : (i += 1) {
const operand_src = lhs_src; // TODO better source location
element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, operand, @intCast(u32, i), operand_ty);
}
i = 1;
while (i < factor) : (i += 1) {
@memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]);
}
return block.addAggregateInit(tuple_ty.toType(), element_refs);
}
fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const lhs_ty = sema.typeOf(lhs);
const src: LazySrcLoc = inst_data.src();
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const operator_src: LazySrcLoc = .{ .node_offset_main_token = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
if (lhs_ty.isTuple(mod)) {
// In `**` rhs must be comptime-known, but lhs can be runtime-known
const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, "array multiplication factor must be comptime-known");
const factor_casted = try sema.usizeCast(block, rhs_src, factor);
return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor_casted);
}
// Analyze the lhs first, to catch the case that someone tried to do exponentiation
const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse {
const msg = msg: {
const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
switch (lhs_ty.zigTypeTag(mod)) {
.Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => {
try sema.errNote(block, operator_src, msg, "this operator multiplies arrays; use std.math.pow for exponentiation", .{});
},
else => {},
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
// In `**` rhs must be comptime-known, but lhs can be runtime-known
const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, "array multiplication factor must be comptime-known");
const result_len_u64 = std.math.mul(u64, lhs_info.len, factor) catch
return sema.fail(block, rhs_src, "operation results in overflow", .{});
const result_len = try sema.usizeCast(block, src, result_len_u64);
const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, mod);
const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null;
const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
const final_len_including_sent = result_len + @boolToInt(lhs_info.sentinel != null);
const lhs_sub_val = if (lhs_ty.isSinglePointer(mod))
(try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).?
else
lhs_val;
const val = v: {
// Optimization for the common pattern of a single element repeated N times, such
// as zero-filling a byte array.
if (lhs_len == 1) {
const elem_val = try lhs_sub_val.elemValue(mod, 0);
break :v try mod.intern(.{ .aggregate = .{
.ty = result_ty.ip_index,
.storage = .{ .repeated_elem = elem_val.ip_index },
} });
}
const element_vals = try sema.arena.alloc(InternPool.Index, final_len_including_sent);
var elem_i: usize = 0;
while (elem_i < result_len) {
var lhs_i: usize = 0;
while (lhs_i < lhs_len) : (lhs_i += 1) {
const elem_val = try lhs_sub_val.elemValue(mod, lhs_i);
assert(elem_val.ip_index != .none);
element_vals[elem_i] = elem_val.ip_index;
elem_i += 1;
}
}
if (lhs_info.sentinel) |sent_val| {
element_vals[result_len] = sent_val.ip_index;
}
break :v try mod.intern(.{ .aggregate = .{
.ty = result_ty.ip_index,
.storage = .{ .elems = element_vals },
} });
};
return sema.addConstantMaybeRef(block, result_ty, val.toValue(), ptr_addrspace != null);
}
try sema.requireRuntimeBlock(block, src, lhs_src);
if (ptr_addrspace) |ptr_as| {
const alloc_ty = try Type.ptr(sema.arena, mod, .{
.pointee_type = result_ty,
.@"addrspace" = ptr_as,
});
const alloc = try block.addTy(.alloc, alloc_ty);
const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{
.pointee_type = lhs_info.elem_type,
.@"addrspace" = ptr_as,
});
var elem_i: usize = 0;
while (elem_i < result_len) {
var lhs_i: usize = 0;
while (lhs_i < lhs_len) : (lhs_i += 1) {
const elem_index = try sema.addIntUnsigned(Type.usize, elem_i);
elem_i += 1;
const lhs_index = try sema.addIntUnsigned(Type.usize, lhs_i);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
const init = try sema.elemVal(block, lhs_src, lhs, lhs_index, src, true);
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
}
}
if (lhs_info.sentinel) |sent_val| {
const elem_index = try sema.addIntUnsigned(Type.usize, result_len);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
const init = try sema.addConstant(lhs_info.elem_type, sent_val);
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
}
return alloc;
}
const element_refs = try sema.arena.alloc(Air.Inst.Ref, result_len);
var elem_i: usize = 0;
while (elem_i < result_len) {
var lhs_i: usize = 0;
while (lhs_i < lhs_len) : (lhs_i += 1) {
const lhs_index = try sema.addIntUnsigned(Type.usize, lhs_i);
const init = try sema.elemVal(block, lhs_src, lhs, lhs_index, src, true);
element_refs[elem_i] = init;
elem_i += 1;
}
}
return block.addAggregateInit(result_ty, element_refs);
}
fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const lhs_src = src;
const rhs_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
const rhs = try sema.resolveInst(inst_data.operand);
const rhs_ty = sema.typeOf(rhs);
const rhs_scalar_ty = rhs_ty.scalarType(mod);
if (rhs_scalar_ty.isUnsignedInt(mod) or switch (rhs_scalar_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt, .Float, .ComptimeFloat => false,
else => true,
}) {
return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)});
}
if (rhs_scalar_ty.isAnyFloat()) {
// We handle float negation here to ensure negative zero is represented in the bits.
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
if (rhs_val.isUndef(mod)) return sema.addConstUndef(rhs_ty);
return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, sema.mod));
}
try sema.requireRuntimeBlock(block, src, null);
return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs);
}
const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector)
try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0)))
else
try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0));
return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true);
}
fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const lhs_src = src;
const rhs_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
const rhs = try sema.resolveInst(inst_data.operand);
const rhs_ty = sema.typeOf(rhs);
const rhs_scalar_ty = rhs_ty.scalarType(mod);
switch (rhs_scalar_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt, .Float, .ComptimeFloat => {},
else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}),
}
const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector)
try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0)))
else
try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0));
return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true);
}
fn zirArithmetic(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
zir_tag: Zir.Inst.Tag,
safety: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
sema.src = .{ .node_offset_bin_op = inst_data.src_node };
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src, safety);
}
fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
const lhs_scalar_ty = lhs_ty.scalarType(mod);
const rhs_scalar_ty = rhs_ty.scalarType(mod);
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
if ((lhs_ty.zigTypeTag(mod) == .ComptimeFloat and rhs_ty.zigTypeTag(mod) == .ComptimeInt) or
(lhs_ty.zigTypeTag(mod) == .ComptimeInt and rhs_ty.zigTypeTag(mod) == .ComptimeFloat))
{
// If it makes a difference whether we coerce to ints or floats before doing the division, error.
// If lhs % rhs is 0, it doesn't matter.
const lhs_val = maybe_lhs_val orelse unreachable;
const rhs_val = maybe_rhs_val orelse unreachable;
const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable;
if (!rem.compareAllWithZero(.eq, mod)) {
return sema.fail(block, src, "ambiguous coercion of division operands '{s}' and '{s}'; non-zero remainder '{}'", .{
@tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()), rem.fmtValue(resolved_type, sema.mod),
});
}
}
// TODO: emit compile error when .div is used on integers and there would be an
// ambiguous result between div_floor and div_trunc.
// For integers:
// If the lhs is zero, then zero is returned regardless of rhs.
// If the rhs is zero, compile error for division by zero.
// If the rhs is undefined, compile error because there is a possible
// value (zero) for which the division would be illegal behavior.
// If the lhs is undefined:
// * if lhs type is signed:
// * if rhs is comptime-known and not -1, result is undefined
// * if rhs is -1 or runtime-known, compile error because there is a
// possible value (-min_int / -1) for which division would be
// illegal behavior.
// * if lhs type is unsigned, undef is returned regardless of rhs.
//
// For floats:
// If the rhs is zero:
// * comptime_float: compile error for division by zero.
// * other float type:
// * if the lhs is zero: QNaN
// * otherwise: +Inf or -Inf depending on lhs sign
// If the rhs is undefined:
// * comptime_float: compile error because there is a possible
// value (zero) for which the division would be illegal behavior.
// * other float type: result is undefined
// If the lhs is undefined, result is undefined.
switch (scalar_tag) {
.Int, .ComptimeInt, .ComptimeFloat => {
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const scalar_zero = switch (scalar_tag) {
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0),
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
} else scalar_zero;
return sema.addConstant(resolved_type, zero_val);
}
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
return sema.failWithDivideByZero(block, rhs_src);
}
// TODO: if the RHS is one, return the LHS directly
}
},
else => {},
}
const runtime_src = rs: {
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
if (maybe_rhs_val) |rhs_val| {
if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
return sema.addConstUndef(resolved_type);
}
}
return sema.failWithUseOfUndef(block, rhs_src);
}
return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index);
}
return sema.addConstant(resolved_type, res);
} else {
return sema.addConstant(
resolved_type,
try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod),
);
}
} else {
break :rs rhs_src;
}
} else {
break :rs lhs_src;
}
};
try sema.requireRuntimeBlock(block, src, runtime_src);
if (block.wantSafety()) {
try sema.addDivIntOverflowSafety(block, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int);
try sema.addDivByZeroSafety(block, resolved_type, maybe_rhs_val, casted_rhs, is_int);
}
const air_tag = if (is_int) blk: {
if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) {
return sema.fail(block, src, "division with '{s}' and '{s}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()) });
}
break :blk Air.Inst.Tag.div_trunc;
} else switch (block.float_mode) {
.Optimized => Air.Inst.Tag.div_float_optimized,
.Strict => Air.Inst.Tag.div_float,
};
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
}
fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
const lhs_scalar_ty = lhs_ty.scalarType(mod);
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
const runtime_src = rs: {
// For integers:
// If the lhs is zero, then zero is returned regardless of rhs.
// If the rhs is zero, compile error for division by zero.
// If the rhs is undefined, compile error because there is a possible
// value (zero) for which the division would be illegal behavior.
// If the lhs is undefined, compile error because there is a possible
// value for which the division would result in a remainder.
// TODO: emit runtime safety for if there is a remainder
// TODO: emit runtime safety for division by zero
//
// For floats:
// If the rhs is zero, compile error for division by zero.
// If the rhs is undefined, compile error because there is a possible
// value (zero) for which the division would be illegal behavior.
// If the lhs is undefined, compile error because there is a possible
// value for which the division would result in a remainder.
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const scalar_zero = switch (scalar_tag) {
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0),
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
} else scalar_zero;
return sema.addConstant(resolved_type, zero_val);
}
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
return sema.failWithDivideByZero(block, rhs_src);
}
// TODO: if the RHS is one, return the LHS directly
}
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod);
if (!(modulus_val.compareAllWithZero(.eq, mod))) {
return sema.fail(block, src, "exact division produced remainder", .{});
}
const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index);
}
return sema.addConstant(resolved_type, res);
} else {
const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod);
if (!(modulus_val.compareAllWithZero(.eq, mod))) {
return sema.fail(block, src, "exact division produced remainder", .{});
}
return sema.addConstant(
resolved_type,
try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, mod),
);
}
} else break :rs rhs_src;
} else break :rs lhs_src;
};
try sema.requireRuntimeBlock(block, src, runtime_src);
// Depending on whether safety is enabled, we will have a slightly different strategy
// here. The `div_exact` AIR instruction causes undefined behavior if a remainder
// is produced, so in the safety check case, it cannot be used. Instead we do a
// div_trunc and check for remainder.
if (block.wantSafety()) {
try sema.addDivIntOverflowSafety(block, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int);
try sema.addDivByZeroSafety(block, resolved_type, maybe_rhs_val, casted_rhs, is_int);
const result = try block.addBinOp(.div_trunc, casted_lhs, casted_rhs);
const ok = if (!is_int) ok: {
const floored = try block.addUnOp(.floor, result);
if (resolved_type.zigTypeTag(mod) == .Vector) {
const eql = try block.addCmpVector(result, floored, .eq);
break :ok try block.addInst(.{
.tag = switch (block.float_mode) {
.Strict => .reduce,
.Optimized => .reduce_optimized,
},
.data = .{ .reduce = .{
.operand = eql,
.operation = .And,
} },
});
} else {
const is_in_range = try block.addBinOp(switch (block.float_mode) {
.Strict => .cmp_eq,
.Optimized => .cmp_eq_optimized,
}, result, floored);
break :ok is_in_range;
}
} else ok: {
const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs);
const scalar_zero = switch (scalar_tag) {
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0),
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
if (resolved_type.zigTypeTag(mod) == .Vector) {
const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero);
const zero = try sema.addConstant(resolved_type, zero_val);
const eql = try block.addCmpVector(remainder, zero, .eq);
break :ok try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
.operand = eql,
.operation = .And,
} },
});
} else {
const zero = try sema.addConstant(resolved_type, scalar_zero);
const is_in_range = try block.addBinOp(.cmp_eq, remainder, zero);
break :ok is_in_range;
}
};
try sema.addSafetyCheck(block, ok, .exact_division_remainder);
return result;
}
return block.addBinOp(airTag(block, is_int, .div_exact, .div_exact_optimized), casted_lhs, casted_rhs);
}
fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
const lhs_scalar_ty = lhs_ty.scalarType(mod);
const rhs_scalar_ty = rhs_ty.scalarType(mod);
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
const runtime_src = rs: {
// For integers:
// If the lhs is zero, then zero is returned regardless of rhs.
// If the rhs is zero, compile error for division by zero.
// If the rhs is undefined, compile error because there is a possible
// value (zero) for which the division would be illegal behavior.
// If the lhs is undefined:
// * if lhs type is signed:
// * if rhs is comptime-known and not -1, result is undefined
// * if rhs is -1 or runtime-known, compile error because there is a
// possible value (-min_int / -1) for which division would be
// illegal behavior.
// * if lhs type is unsigned, undef is returned regardless of rhs.
// TODO: emit runtime safety for division by zero
//
// For floats:
// If the rhs is zero, compile error for division by zero.
// If the rhs is undefined, compile error because there is a possible
// value (zero) for which the division would be illegal behavior.
// If the lhs is undefined, result is undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const scalar_zero = switch (scalar_tag) {
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0),
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
} else scalar_zero;
return sema.addConstant(resolved_type, zero_val);
}
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
return sema.failWithDivideByZero(block, rhs_src);
}
// TODO: if the RHS is one, return the LHS directly
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
if (maybe_rhs_val) |rhs_val| {
if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
return sema.addConstUndef(resolved_type);
}
}
return sema.failWithUseOfUndef(block, rhs_src);
}
return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
return sema.addConstant(
resolved_type,
try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, mod),
);
} else {
return sema.addConstant(
resolved_type,
try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, mod),
);
}
} else break :rs rhs_src;
} else break :rs lhs_src;
};
try sema.requireRuntimeBlock(block, src, runtime_src);
if (block.wantSafety()) {
try sema.addDivIntOverflowSafety(block, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int);
try sema.addDivByZeroSafety(block, resolved_type, maybe_rhs_val, casted_rhs, is_int);
}
return block.addBinOp(airTag(block, is_int, .div_floor, .div_floor_optimized), casted_lhs, casted_rhs);
}
fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
const lhs_scalar_ty = lhs_ty.scalarType(mod);
const rhs_scalar_ty = rhs_ty.scalarType(mod);
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
const runtime_src = rs: {
// For integers:
// If the lhs is zero, then zero is returned regardless of rhs.
// If the rhs is zero, compile error for division by zero.
// If the rhs is undefined, compile error because there is a possible
// value (zero) for which the division would be illegal behavior.
// If the lhs is undefined:
// * if lhs type is signed:
// * if rhs is comptime-known and not -1, result is undefined
// * if rhs is -1 or runtime-known, compile error because there is a
// possible value (-min_int / -1) for which division would be
// illegal behavior.
// * if lhs type is unsigned, undef is returned regardless of rhs.
// TODO: emit runtime safety for division by zero
//
// For floats:
// If the rhs is zero, compile error for division by zero.
// If the rhs is undefined, compile error because there is a possible
// value (zero) for which the division would be illegal behavior.
// If the lhs is undefined, result is undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const scalar_zero = switch (scalar_tag) {
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0),
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
} else scalar_zero;
return sema.addConstant(resolved_type, zero_val);
}
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
return sema.failWithDivideByZero(block, rhs_src);
}
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
if (maybe_rhs_val) |rhs_val| {
if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
return sema.addConstUndef(resolved_type);
}
}
return sema.failWithUseOfUndef(block, rhs_src);
}
return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index);
}
return sema.addConstant(resolved_type, res);
} else {
return sema.addConstant(
resolved_type,
try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, mod),
);
}
} else break :rs rhs_src;
} else break :rs lhs_src;
};
try sema.requireRuntimeBlock(block, src, runtime_src);
if (block.wantSafety()) {
try sema.addDivIntOverflowSafety(block, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int);
try sema.addDivByZeroSafety(block, resolved_type, maybe_rhs_val, casted_rhs, is_int);
}
return block.addBinOp(airTag(block, is_int, .div_trunc, .div_trunc_optimized), casted_lhs, casted_rhs);
}
fn addDivIntOverflowSafety(
sema: *Sema,
block: *Block,
resolved_type: Type,
lhs_scalar_ty: Type,
maybe_lhs_val: ?Value,
maybe_rhs_val: ?Value,
casted_lhs: Air.Inst.Ref,
casted_rhs: Air.Inst.Ref,
is_int: bool,
) CompileError!void {
const mod = sema.mod;
if (!is_int) return;
// If the LHS is unsigned, it cannot cause overflow.
if (!lhs_scalar_ty.isSignedInt(mod)) return;
// If the LHS is widened to a larger integer type, no overflow is possible.
if (lhs_scalar_ty.intInfo(mod).bits < resolved_type.intInfo(mod).bits) {
return;
}
const min_int = try resolved_type.minInt(sema.arena, mod);
const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1);
const neg_one = if (resolved_type.zigTypeTag(mod) == .Vector)
try Value.Tag.repeated.create(sema.arena, neg_one_scalar)
else
neg_one_scalar;
// If the LHS is comptime-known to be not equal to the min int,
// no overflow is possible.
if (maybe_lhs_val) |lhs_val| {
if (try lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return;
}
// If the RHS is comptime-known to not be equal to -1, no overflow is possible.
if (maybe_rhs_val) |rhs_val| {
if (try rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return;
}
var ok: Air.Inst.Ref = .none;
if (resolved_type.zigTypeTag(mod) == .Vector) {
if (maybe_lhs_val == null) {
const min_int_ref = try sema.addConstant(resolved_type, min_int);
ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq);
}
if (maybe_rhs_val == null) {
const neg_one_ref = try sema.addConstant(resolved_type, neg_one);
const rhs_ok = try block.addCmpVector(casted_rhs, neg_one_ref, .neq);
if (ok == .none) {
ok = rhs_ok;
} else {
ok = try block.addBinOp(.bool_or, ok, rhs_ok);
}
}
assert(ok != .none);
ok = try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
.operand = ok,
.operation = .And,
} },
});
} else {
if (maybe_lhs_val == null) {
const min_int_ref = try sema.addConstant(resolved_type, min_int);
ok = try block.addBinOp(.cmp_neq, casted_lhs, min_int_ref);
}
if (maybe_rhs_val == null) {
const neg_one_ref = try sema.addConstant(resolved_type, neg_one);
const rhs_ok = try block.addBinOp(.cmp_neq, casted_rhs, neg_one_ref);
if (ok == .none) {
ok = rhs_ok;
} else {
ok = try block.addBinOp(.bool_or, ok, rhs_ok);
}
}
assert(ok != .none);
}
try sema.addSafetyCheck(block, ok, .integer_overflow);
}
fn addDivByZeroSafety(
sema: *Sema,
block: *Block,
resolved_type: Type,
maybe_rhs_val: ?Value,
casted_rhs: Air.Inst.Ref,
is_int: bool,
) CompileError!void {
// Strict IEEE floats have well-defined division by zero.
if (!is_int and block.float_mode == .Strict) return;
// If rhs was comptime-known to be zero a compile error would have been
// emitted above.
if (maybe_rhs_val != null) return;
const mod = sema.mod;
const scalar_zero = if (is_int)
try mod.intValue(resolved_type.scalarType(mod), 0)
else
try mod.floatValue(resolved_type.scalarType(mod), 0);
const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: {
const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero);
const zero = try sema.addConstant(resolved_type, zero_val);
const ok = try block.addCmpVector(casted_rhs, zero, .neq);
break :ok try block.addInst(.{
.tag = if (is_int) .reduce else .reduce_optimized,
.data = .{ .reduce = .{
.operand = ok,
.operation = .And,
} },
});
} else ok: {
const zero = try sema.addConstant(resolved_type, scalar_zero);
break :ok try block.addBinOp(if (is_int) .cmp_neq else .cmp_neq_optimized, casted_rhs, zero);
};
try sema.addSafetyCheck(block, ok, .divide_by_zero);
}
fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst.Tag) Air.Inst.Tag {
if (is_int) return normal;
return switch (block.float_mode) {
.Strict => normal,
.Optimized => optimized,
};
}
fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
const lhs_scalar_ty = lhs_ty.scalarType(mod);
const rhs_scalar_ty = rhs_ty.scalarType(mod);
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
const runtime_src = rs: {
// For integers:
// Either operand being undef is a compile error because there exists
// a possible value (TODO what is it?) that would invoke illegal behavior.
// TODO: can lhs undef be handled better?
//
// For floats:
// If the rhs is zero, compile error for division by zero.
// If the rhs is undefined, compile error because there is a possible
// value (zero) for which the division would be illegal behavior.
// If the lhs is undefined, result is undefined.
//
// For either one: if the result would be different between @mod and @rem,
// then emit a compile error saying you have to pick one.
if (is_int) {
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, lhs_src);
}
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const scalar_zero = switch (scalar_tag) {
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0),
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
} else scalar_zero;
return sema.addConstant(resolved_type, zero_val);
}
} else if (lhs_scalar_ty.isSignedInt(mod)) {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
return sema.failWithDivideByZero(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
}
if (maybe_lhs_val) |lhs_val| {
const rem_result = try sema.intRem(resolved_type, lhs_val, rhs_val);
// If this answer could possibly be different by doing `intMod`,
// we must emit a compile error. Otherwise, it's OK.
if (!(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) and
!(try rem_result.compareAllWithZeroAdvanced(.eq, sema)))
{
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
return sema.addConstant(resolved_type, rem_result);
}
break :rs lhs_src;
} else if (rhs_scalar_ty.isSignedInt(mod)) {
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
} else {
break :rs rhs_src;
}
}
// float operands
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
return sema.failWithDivideByZero(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
return sema.addConstant(
resolved_type,
try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod),
);
} else {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
} else {
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
}
};
try sema.requireRuntimeBlock(block, src, runtime_src);
if (block.wantSafety()) {
try sema.addDivByZeroSafety(block, resolved_type, maybe_rhs_val, casted_rhs, is_int);
}
const air_tag = airTag(block, is_int, .rem, .rem_optimized);
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
}
fn intRem(
sema: *Sema,
ty: Type,
lhs: Value,
rhs: Value,
) CompileError!Value {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(sema.mod, i);
const rhs_elem = try rhs.elemValue(sema.mod, i);
scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
return sema.intRemScalar(lhs, rhs, ty);
}
fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileError!Value {
const mod = sema.mod;
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs_q = try sema.arena.alloc(
math.big.Limb,
lhs_bigint.limbs.len,
);
const limbs_r = try sema.arena.alloc(
math.big.Limb,
// TODO: consider reworking Sema to re-use Values rather than
// always producing new Value objects.
rhs_bigint.limbs.len,
);
const limbs_buffer = try sema.arena.alloc(
math.big.Limb,
math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
return mod.intValue_big(scalar_ty, result_r.toConst());
}
fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
const runtime_src = rs: {
// For integers:
// Either operand being undef is a compile error because there exists
// a possible value (TODO what is it?) that would invoke illegal behavior.
// TODO: can lhs zero be handled better?
// TODO: can lhs undef be handled better?
//
// For floats:
// If the rhs is zero, compile error for division by zero.
// If the rhs is undefined, compile error because there is a possible
// value (zero) for which the division would be illegal behavior.
// If the lhs is undefined, result is undefined.
if (is_int) {
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, lhs_src);
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
return sema.failWithDivideByZero(block, rhs_src);
}
if (maybe_lhs_val) |lhs_val| {
return sema.addConstant(
resolved_type,
try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod),
);
}
break :rs lhs_src;
} else {
break :rs rhs_src;
}
}
// float operands
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
return sema.failWithDivideByZero(block, rhs_src);
}
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
resolved_type,
try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod),
);
} else break :rs rhs_src;
} else break :rs lhs_src;
};
try sema.requireRuntimeBlock(block, src, runtime_src);
if (block.wantSafety()) {
try sema.addDivByZeroSafety(block, resolved_type, maybe_rhs_val, casted_rhs, is_int);
}
const air_tag = airTag(block, is_int, .mod, .mod_optimized);
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
}
fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
const runtime_src = rs: {
// For integers:
// Either operand being undef is a compile error because there exists
// a possible value (TODO what is it?) that would invoke illegal behavior.
// TODO: can lhs zero be handled better?
// TODO: can lhs undef be handled better?
//
// For floats:
// If the rhs is zero, compile error for division by zero.
// If the rhs is undefined, compile error because there is a possible
// value (zero) for which the division would be illegal behavior.
// If the lhs is undefined, result is undefined.
if (is_int) {
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, lhs_src);
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
return sema.failWithDivideByZero(block, rhs_src);
}
if (maybe_lhs_val) |lhs_val| {
return sema.addConstant(
resolved_type,
try sema.intRem(resolved_type, lhs_val, rhs_val),
);
}
break :rs lhs_src;
} else {
break :rs rhs_src;
}
}
// float operands
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
return sema.failWithDivideByZero(block, rhs_src);
}
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
resolved_type,
try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod),
);
} else break :rs rhs_src;
} else break :rs lhs_src;
};
try sema.requireRuntimeBlock(block, src, runtime_src);
if (block.wantSafety()) {
try sema.addDivByZeroSafety(block, resolved_type, maybe_rhs_val, casted_rhs, is_int);
}
const air_tag = airTag(block, is_int, .rem, .rem_optimized);
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
}
fn zirOverflowArithmetic(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
zir_tag: Zir.Inst.Extended,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const uncasted_lhs = try sema.resolveInst(extra.lhs);
const uncasted_rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(uncasted_lhs);
const rhs_ty = sema.typeOf(uncasted_rhs);
const mod = sema.mod;
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
const instructions = &[_]Air.Inst.Ref{ uncasted_lhs, uncasted_rhs };
const dest_ty = if (zir_tag == .shl_with_overflow)
lhs_ty
else
try sema.resolvePeerTypes(block, src, instructions, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
const rhs_dest_ty = if (zir_tag == .shl_with_overflow)
try sema.log2IntType(block, lhs_ty, src)
else
dest_ty;
const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src);
const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src);
if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) {
return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(mod)});
}
const maybe_lhs_val = try sema.resolveMaybeUndefVal(lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefVal(rhs);
const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty);
var result: struct {
inst: Air.Inst.Ref = .none,
wrapped: Value = Value.@"unreachable",
overflow_bit: Value,
} = result: {
const zero = try mod.intValue(dest_ty.scalarType(mod), 0);
switch (zir_tag) {
.add_with_overflow => {
// If either of the arguments is zero, `false` is returned and the other is stored
// to the result, even if it is undefined..
// Otherwise, if either of the argument is undefined, undefined is returned.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
const result = try sema.intAddWithOverflow(lhs_val, rhs_val, dest_ty);
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
}
}
},
.sub_with_overflow => {
// If the rhs is zero, then the result is lhs and no overflow occured.
// Otherwise, if either result is undefined, both results are undefined.
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
} else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
} else if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
const result = try sema.intSubWithOverflow(lhs_val, rhs_val, dest_ty);
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
}
}
},
.mul_with_overflow => {
// If either of the arguments is zero, the result is zero and no overflow occured.
// If either of the arguments is one, the result is the other and no overflow occured.
// Otherwise, if either of the arguments is undefined, both results are undefined.
const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1);
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
} else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs };
}
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(mod)) {
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs };
} else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
}
}
}
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, mod);
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
}
}
},
.shl_with_overflow => {
// If lhs is zero, the result is zero and no overflow occurred.
// If rhs is zero, the result is lhs (even if undefined) and no overflow occurred.
// Oterhwise if either of the arguments is undefined, both results are undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, sema.mod);
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
}
}
},
else => unreachable,
}
const air_tag: Air.Inst.Tag = switch (zir_tag) {
.add_with_overflow => .add_with_overflow,
.mul_with_overflow => .mul_with_overflow,
.sub_with_overflow => .sub_with_overflow,
.shl_with_overflow => .shl_with_overflow,
else => unreachable,
};
const runtime_src = if (maybe_lhs_val == null) lhs_src else rhs_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addInst(.{
.tag = air_tag,
.data = .{ .ty_pl = .{
.ty = try block.sema.addType(tuple_ty),
.payload = try block.sema.addExtra(Air.Bin{
.lhs = lhs,
.rhs = rhs,
}),
} },
});
};
if (result.inst != .none) {
if (try sema.resolveMaybeUndefVal(result.inst)) |some| {
result.wrapped = some;
result.inst = .none;
}
}
if (result.inst == .none) {
const values = try sema.arena.alloc(Value, 2);
values[0] = result.wrapped;
values[1] = result.overflow_bit;
const tuple_val = try Value.Tag.aggregate.create(sema.arena, values);
return sema.addConstant(tuple_ty, tuple_val);
}
const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2);
element_refs[0] = result.inst;
element_refs[1] = try sema.addConstant(tuple_ty.structFieldType(1, mod), result.overflow_bit);
return block.addAggregateInit(tuple_ty, element_refs);
}
fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value {
const mod = sema.mod;
if (ty.zigTypeTag(mod) != .Vector) return val;
return Value.Tag.repeated.create(sema.arena, val);
}
fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
const mod = sema.mod;
const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try mod.vectorType(.{
.len = ty.vectorLen(mod),
.child = .u1_type,
}) else Type.u1;
const types = [2]InternPool.Index{ ty.ip_index, ov_ty.ip_index };
const values = [2]InternPool.Index{ .none, .none };
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
.types = &types,
.values = &values,
.names = &.{},
} });
return tuple_ty.toType();
}
fn analyzeArithmetic(
sema: *Sema,
block: *Block,
/// TODO performance investigation: make this comptime?
zir_tag: Zir.Inst.Tag,
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
src: LazySrcLoc,
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
want_safety: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize(mod)) {
.One, .Slice => {},
.Many, .C => {
const air_tag: Air.Inst.Tag = switch (zir_tag) {
.add => .ptr_add,
.sub => .ptr_sub,
else => return sema.fail(block, src, "invalid pointer arithmetic operator", .{}),
};
return sema.analyzePtrArithmetic(block, src, lhs, rhs, air_tag, lhs_src, rhs_src);
},
};
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: {
switch (zir_tag) {
.add, .add_unsafe => {
// For integers:intAddSat
// If either of the operands are zero, then the other operand is
// returned, even if it is undefined.
// If either of the operands are undefined, it's a compile error
// because there is a possible value for which the addition would
// overflow (max_int), causing illegal behavior.
// For floats: either operand being undef makes the result undef.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
return casted_rhs;
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
if (is_int) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
return sema.addConstUndef(resolved_type);
}
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
return casted_lhs;
}
}
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .add_optimized else .add;
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
if (is_int) {
return sema.failWithUseOfUndef(block, lhs_src);
} else {
return sema.addConstUndef(resolved_type);
}
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
const sum = try sema.intAdd(lhs_val, rhs_val, resolved_type);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(sum, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, sum, vector_index);
}
return sema.addConstant(resolved_type, sum);
} else {
return sema.addConstant(
resolved_type,
try Value.floatAdd(lhs_val, rhs_val, resolved_type, sema.arena, mod),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = air_tag };
} else break :rs .{ .src = lhs_src, .air_tag = air_tag };
},
.addwrap => {
// Integers only; floats are checked above.
// If either of the operands are zero, the other operand is returned.
// If either of the operands are undefined, the result is undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
return casted_rhs;
}
}
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .addwrap_optimized else .addwrap;
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
return sema.addConstant(
resolved_type,
try sema.numberAddWrapScalar(lhs_val, rhs_val, resolved_type),
);
} else break :rs .{ .src = lhs_src, .air_tag = air_tag };
} else break :rs .{ .src = rhs_src, .air_tag = air_tag };
},
.add_sat => {
// Integers only; floats are checked above.
// If either of the operands are zero, then the other operand is returned.
// If either of the operands are undefined, the result is undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
return casted_rhs;
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
const val = if (scalar_tag == .ComptimeInt)
try sema.intAdd(lhs_val, rhs_val, resolved_type)
else
try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, mod);
return sema.addConstant(resolved_type, val);
} else break :rs .{ .src = lhs_src, .air_tag = .add_sat };
} else break :rs .{ .src = rhs_src, .air_tag = .add_sat };
},
.sub => {
// For integers:
// If the rhs is zero, then the other operand is
// returned, even if it is undefined.
// If either of the operands are undefined, it's a compile error
// because there is a possible value for which the subtraction would
// overflow, causing illegal behavior.
// For floats: either operand being undef makes the result undef.
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
if (is_int) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
return sema.addConstUndef(resolved_type);
}
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
return casted_lhs;
}
}
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .sub_optimized else .sub;
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
if (is_int) {
return sema.failWithUseOfUndef(block, lhs_src);
} else {
return sema.addConstUndef(resolved_type);
}
}
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
const diff = try sema.intSub(lhs_val, rhs_val, resolved_type);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(diff, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, diff, vector_index);
}
return sema.addConstant(resolved_type, diff);
} else {
return sema.addConstant(
resolved_type,
try Value.floatSub(lhs_val, rhs_val, resolved_type, sema.arena, mod),
);
}
} else break :rs .{ .src = rhs_src, .air_tag = air_tag };
} else break :rs .{ .src = lhs_src, .air_tag = air_tag };
},
.subwrap => {
// Integers only; floats are checked above.
// If the RHS is zero, then the other operand is returned, even if it is undefined.
// If either of the operands are undefined, the result is undefined.
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
return casted_lhs;
}
}
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .subwrap_optimized else .subwrap;
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
return sema.addConstant(
resolved_type,
try sema.numberSubWrapScalar(lhs_val, rhs_val, resolved_type),
);
} else break :rs .{ .src = rhs_src, .air_tag = air_tag };
} else break :rs .{ .src = lhs_src, .air_tag = air_tag };
},
.sub_sat => {
// Integers only; floats are checked above.
// If the RHS is zero, result is LHS.
// If either of the operands are undefined, result is undefined.
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
return casted_lhs;
}
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
const val = if (scalar_tag == .ComptimeInt)
try sema.intSub(lhs_val, rhs_val, resolved_type)
else
try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, mod);
return sema.addConstant(resolved_type, val);
} else break :rs .{ .src = rhs_src, .air_tag = .sub_sat };
} else break :rs .{ .src = lhs_src, .air_tag = .sub_sat };
},
.mul => {
// For integers:
// If either of the operands are zero, the result is zero.
// If either of the operands are one, the result is the other
// operand, even if it is undefined.
// If either of the operands are undefined, it's a compile error
// because there is a possible value for which the addition would
// overflow (max_int), causing illegal behavior.
// For floats: either operand being undef makes the result undef.
// If either of the operands are inf, and the other operand is zero,
// the result is nan.
// If either of the operands are nan, the result is nan.
const scalar_zero = switch (scalar_tag) {
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0),
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
if (lhs_val.isNan(mod)) {
return sema.addConstant(resolved_type, lhs_val);
}
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) lz: {
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isNan(mod)) {
return sema.addConstant(resolved_type, rhs_val);
}
if (rhs_val.isInf(mod)) {
return sema.addConstant(
resolved_type,
try mod.floatValue(resolved_type, std.math.nan_f128),
);
}
} else if (resolved_type.isAnyFloat()) {
break :lz;
}
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
} else scalar_zero;
return sema.addConstant(resolved_type, zero_val);
}
if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
return casted_rhs;
}
}
}
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mul_optimized else .mul;
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
if (is_int) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
return sema.addConstUndef(resolved_type);
}
}
if (rhs_val.isNan(mod)) {
return sema.addConstant(resolved_type, rhs_val);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) rz: {
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isInf(mod)) {
return sema.addConstant(
resolved_type,
try mod.floatValue(resolved_type, std.math.nan_f128),
);
}
} else if (resolved_type.isAnyFloat()) {
break :rz;
}
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
} else scalar_zero;
return sema.addConstant(resolved_type, zero_val);
}
if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
if (is_int) {
return sema.failWithUseOfUndef(block, lhs_src);
} else {
return sema.addConstUndef(resolved_type);
}
}
if (is_int) {
const product = try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod);
var vector_index: usize = undefined;
if (!(try sema.intFitsInType(product, resolved_type, &vector_index))) {
return sema.failWithIntegerOverflow(block, src, resolved_type, product, vector_index);
}
return sema.addConstant(resolved_type, product);
} else {
return sema.addConstant(
resolved_type,
try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, sema.mod),
);
}
} else break :rs .{ .src = lhs_src, .air_tag = air_tag };
} else break :rs .{ .src = rhs_src, .air_tag = air_tag };
},
.mulwrap => {
// Integers only; floats are handled above.
// If either of the operands are zero, result is zero.
// If either of the operands are one, result is the other operand.
// If either of the operands are undefined, result is undefined.
const scalar_zero = switch (scalar_tag) {
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0),
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
} else scalar_zero;
return sema.addConstant(resolved_type, zero_val);
}
if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
return casted_rhs;
}
}
}
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mulwrap_optimized else .mulwrap;
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
} else scalar_zero;
return sema.addConstant(resolved_type, zero_val);
}
if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
return sema.addConstant(
resolved_type,
try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, sema.mod),
);
} else break :rs .{ .src = lhs_src, .air_tag = air_tag };
} else break :rs .{ .src = rhs_src, .air_tag = air_tag };
},
.mul_sat => {
// Integers only; floats are checked above.
// If either of the operands are zero, result is zero.
// If either of the operands are one, result is the other operand.
// If either of the operands are undefined, result is undefined.
const scalar_zero = switch (scalar_tag) {
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0),
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
} else scalar_zero;
return sema.addConstant(resolved_type, zero_val);
}
if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
return casted_rhs;
}
}
}
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
} else scalar_zero;
return sema.addConstant(resolved_type, zero_val);
}
if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
const val = if (scalar_tag == .ComptimeInt)
try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod)
else
try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, sema.mod);
return sema.addConstant(resolved_type, val);
} else break :rs .{ .src = lhs_src, .air_tag = .mul_sat };
} else break :rs .{ .src = rhs_src, .air_tag = .mul_sat };
},
else => unreachable,
}
};
try sema.requireRuntimeBlock(block, src, rs.src);
if (block.wantSafety() and want_safety) {
if (scalar_tag == .Int) {
const maybe_op_ov: ?Air.Inst.Tag = switch (rs.air_tag) {
.add => .add_with_overflow,
.sub => .sub_with_overflow,
.mul => .mul_with_overflow,
else => null,
};
if (maybe_op_ov) |op_ov_tag| {
const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(resolved_type);
const op_ov = try block.addInst(.{
.tag = op_ov_tag,
.data = .{ .ty_pl = .{
.ty = try sema.addType(op_ov_tuple_ty),
.payload = try sema.addExtra(Air.Bin{
.lhs = casted_lhs,
.rhs = casted_rhs,
}),
} },
});
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector)
try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = ov_bit,
.operation = .Or,
} },
})
else
ov_bit;
const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0));
const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
try sema.addSafetyCheck(block, no_ov, .integer_overflow);
return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty);
}
}
}
return block.addBinOp(rs.air_tag, casted_lhs, casted_rhs);
}
fn analyzePtrArithmetic(
sema: *Sema,
block: *Block,
op_src: LazySrcLoc,
ptr: Air.Inst.Ref,
uncasted_offset: Air.Inst.Ref,
air_tag: Air.Inst.Tag,
ptr_src: LazySrcLoc,
offset_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
// TODO if the operand is comptime-known to be negative, or is a negative int,
// coerce to isize instead of usize.
const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src);
const mod = sema.mod;
const opt_ptr_val = try sema.resolveMaybeUndefVal(ptr);
const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset);
const ptr_ty = sema.typeOf(ptr);
const ptr_info = ptr_ty.ptrInfo(mod);
const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Array)
ptr_info.pointee_type.childType(mod)
else
ptr_info.pointee_type;
const new_ptr_ty = t: {
// Calculate the new pointer alignment.
// This code is duplicated in `elemPtrType`.
if (ptr_info.@"align" == 0) {
// ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness.
break :t ptr_ty;
}
// If the addend is not a comptime-known value we can still count on
// it being a multiple of the type size.
const elem_size = elem_ty.abiSize(mod);
const addend = if (opt_off_val) |off_val| a: {
const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(mod));
break :a elem_size * off_int;
} else elem_size;
// The resulting pointer is aligned to the lcd between the offset (an
// arbitrary number) and the alignment factor (always a power of two,
// non zero).
const new_align = @as(u32, 1) << @intCast(u5, @ctz(addend | ptr_info.@"align"));
break :t try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = ptr_info.pointee_type,
.sentinel = ptr_info.sentinel,
.@"align" = new_align,
.@"addrspace" = ptr_info.@"addrspace",
.mutable = ptr_info.mutable,
.@"allowzero" = ptr_info.@"allowzero",
.@"volatile" = ptr_info.@"volatile",
.size = ptr_info.size,
});
};
const runtime_src = rs: {
if (opt_ptr_val) |ptr_val| {
if (opt_off_val) |offset_val| {
if (ptr_val.isUndef(mod)) return sema.addConstUndef(new_ptr_ty);
const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod));
if (offset_int == 0) return ptr;
if (try ptr_val.getUnsignedIntAdvanced(mod, sema)) |addr| {
const elem_size = elem_ty.abiSize(mod);
const new_addr = switch (air_tag) {
.ptr_add => addr + elem_size * offset_int,
.ptr_sub => addr - elem_size * offset_int,
else => unreachable,
};
const new_ptr_val = try mod.ptrIntValue(new_ptr_ty, new_addr);
return sema.addConstant(new_ptr_ty, new_ptr_val);
}
if (air_tag == .ptr_sub) {
return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{});
}
const new_ptr_val = try ptr_val.elemPtr(ptr_ty, sema.arena, offset_int, sema.mod);
return sema.addConstant(new_ptr_ty, new_ptr_val);
} else break :rs offset_src;
} else break :rs ptr_src;
};
try sema.requireRuntimeBlock(block, op_src, runtime_src);
return block.addInst(.{
.tag = air_tag,
.data = .{ .ty_pl = .{
.ty = try sema.addType(new_ptr_ty),
.payload = try sema.addExtra(Air.Bin{
.lhs = ptr,
.rhs = offset,
}),
} },
});
}
fn zirLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ptr_src = src; // TODO better source location
const ptr = try sema.resolveInst(inst_data.operand);
return sema.analyzeLoad(block, src, ptr, ptr_src);
}
fn zirAsm(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
tmpl_is_expr: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand);
const src = LazySrcLoc.nodeOffset(extra.data.src_node);
const ret_ty_src: LazySrcLoc = .{ .node_offset_asm_ret_ty = extra.data.src_node };
const outputs_len = @truncate(u5, extended.small);
const inputs_len = @truncate(u5, extended.small >> 5);
const clobbers_len = @truncate(u5, extended.small >> 10);
const is_volatile = @truncate(u1, extended.small >> 15) != 0;
const is_global_assembly = sema.func == null;
const asm_source: []const u8 = if (tmpl_is_expr) blk: {
const tmpl = @intToEnum(Zir.Inst.Ref, extra.data.asm_source);
const s: []const u8 = try sema.resolveConstString(block, src, tmpl, "assembly code must be comptime-known");
break :blk s;
} else sema.code.nullTerminatedString(extra.data.asm_source);
if (is_global_assembly) {
if (outputs_len != 0) {
return sema.fail(block, src, "module-level assembly does not support outputs", .{});
}
if (inputs_len != 0) {
return sema.fail(block, src, "module-level assembly does not support inputs", .{});
}
if (clobbers_len != 0) {
return sema.fail(block, src, "module-level assembly does not support clobbers", .{});
}
if (is_volatile) {
return sema.fail(block, src, "volatile keyword is redundant on module-level assembly", .{});
}
try sema.mod.addGlobalAssembly(sema.owner_decl_index, asm_source);
return Air.Inst.Ref.void_value;
}
if (block.is_comptime) {
try sema.requireRuntimeBlock(block, src, null);
}
var extra_i = extra.end;
var output_type_bits = extra.data.output_type_bits;
var needed_capacity: usize = @typeInfo(Air.Asm).Struct.fields.len + outputs_len + inputs_len;
const ConstraintName = struct { c: []const u8, n: []const u8 };
const out_args = try sema.arena.alloc(Air.Inst.Ref, outputs_len);
const outputs = try sema.arena.alloc(ConstraintName, outputs_len);
var expr_ty = Air.Inst.Ref.void_type;
for (out_args, 0..) |*arg, out_i| {
const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i);
extra_i = output.end;
const is_type = @truncate(u1, output_type_bits) != 0;
output_type_bits >>= 1;
if (is_type) {
// Indicate the output is the asm instruction return value.
arg.* = .none;
const out_ty = try sema.resolveType(block, ret_ty_src, output.data.operand);
try sema.queueFullTypeResolution(out_ty);
expr_ty = try sema.addType(out_ty);
} else {
arg.* = try sema.resolveInst(output.data.operand);
}
const constraint = sema.code.nullTerminatedString(output.data.constraint);
const name = sema.code.nullTerminatedString(output.data.name);
needed_capacity += (constraint.len + name.len + (2 + 3)) / 4;
outputs[out_i] = .{ .c = constraint, .n = name };
}
const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len);
const inputs = try sema.arena.alloc(ConstraintName, inputs_len);
const mod = sema.mod;
for (args, 0..) |*arg, arg_i| {
const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i);
extra_i = input.end;
const uncasted_arg = try sema.resolveInst(input.data.operand);
const uncasted_arg_ty = sema.typeOf(uncasted_arg);
switch (uncasted_arg_ty.zigTypeTag(mod)) {
.ComptimeInt => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src),
.ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src),
else => {
arg.* = uncasted_arg;
try sema.queueFullTypeResolution(uncasted_arg_ty);
},
}
const constraint = sema.code.nullTerminatedString(input.data.constraint);
const name = sema.code.nullTerminatedString(input.data.name);
needed_capacity += (constraint.len + name.len + (2 + 3)) / 4;
inputs[arg_i] = .{ .c = constraint, .n = name };
}
const clobbers = try sema.arena.alloc([]const u8, clobbers_len);
for (clobbers) |*name| {
name.* = sema.code.nullTerminatedString(sema.code.extra[extra_i]);
extra_i += 1;
needed_capacity += name.*.len / 4 + 1;
}
needed_capacity += (asm_source.len + 3) / 4;
const gpa = sema.gpa;
try sema.air_extra.ensureUnusedCapacity(gpa, needed_capacity);
const asm_air = try block.addInst(.{
.tag = .assembly,
.data = .{ .ty_pl = .{
.ty = expr_ty,
.payload = sema.addExtraAssumeCapacity(Air.Asm{
.source_len = @intCast(u32, asm_source.len),
.outputs_len = outputs_len,
.inputs_len = @intCast(u32, args.len),
.flags = (@as(u32, @boolToInt(is_volatile)) << 31) | @intCast(u32, clobbers.len),
}),
} },
});
sema.appendRefsAssumeCapacity(out_args);
sema.appendRefsAssumeCapacity(args);
for (outputs) |o| {
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
@memcpy(buffer[0..o.c.len], o.c);
buffer[o.c.len] = 0;
@memcpy(buffer[o.c.len + 1 ..][0..o.n.len], o.n);
buffer[o.c.len + 1 + o.n.len] = 0;
sema.air_extra.items.len += (o.c.len + o.n.len + (2 + 3)) / 4;
}
for (inputs) |input| {
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
@memcpy(buffer[0..input.c.len], input.c);
buffer[input.c.len] = 0;
@memcpy(buffer[input.c.len + 1 ..][0..input.n.len], input.n);
buffer[input.c.len + 1 + input.n.len] = 0;
sema.air_extra.items.len += (input.c.len + input.n.len + (2 + 3)) / 4;
}
for (clobbers) |clobber| {
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
@memcpy(buffer[0..clobber.len], clobber);
buffer[clobber.len] = 0;
sema.air_extra.items.len += clobber.len / 4 + 1;
}
{
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
@memcpy(buffer[0..asm_source.len], asm_source);
sema.air_extra.items.len += (asm_source.len + 3) / 4;
}
return asm_air;
}
/// Only called for equality operators. See also `zirCmp`.
fn zirCmpEq(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
op: std.math.CompareOperator,
air_tag: Air.Inst.Tag,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src: LazySrcLoc = inst_data.src();
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const lhs_ty_tag = lhs_ty.zigTypeTag(mod);
const rhs_ty_tag = rhs_ty.zigTypeTag(mod);
if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) {
// null == null, null != null
if (op == .eq) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
}
// comparing null with optionals
if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr(mod))) {
return sema.analyzeIsNull(block, src, rhs, op == .neq);
}
if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr(mod))) {
return sema.analyzeIsNull(block, src, lhs, op == .neq);
}
if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) {
const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty;
return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(sema.mod)});
}
if (lhs_ty_tag == .Union and (rhs_ty_tag == .EnumLiteral or rhs_ty_tag == .Enum)) {
return sema.analyzeCmpUnionTag(block, src, lhs, lhs_src, rhs, rhs_src, op);
}
if (rhs_ty_tag == .Union and (lhs_ty_tag == .EnumLiteral or lhs_ty_tag == .Enum)) {
return sema.analyzeCmpUnionTag(block, src, rhs, rhs_src, lhs, lhs_src, op);
}
if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) {
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(lhs)) |lval| {
if (try sema.resolveMaybeUndefVal(rhs)) |rval| {
if (lval.isUndef(mod) or rval.isUndef(mod)) {
return sema.addConstUndef(Type.bool);
}
// TODO optimisation opportunity: evaluate if mem.eql is faster with the names,
// or calling to Module.getErrorValue to get the values and then compare them is
// faster.
const lhs_name = lval.castTag(.@"error").?.data.name;
const rhs_name = rval.castTag(.@"error").?.data.name;
if (mem.eql(u8, lhs_name, rhs_name) == (op == .eq)) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
} else {
break :src rhs_src;
}
} else {
break :src lhs_src;
}
};
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addBinOp(air_tag, lhs, rhs);
}
if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) {
const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs);
const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs);
if (lhs_as_type.eql(rhs_as_type, sema.mod) == (op == .eq)) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
}
return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, true);
}
fn analyzeCmpUnionTag(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
un: Air.Inst.Ref,
un_src: LazySrcLoc,
tag: Air.Inst.Ref,
tag_src: LazySrcLoc,
op: std.math.CompareOperator,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const union_ty = try sema.resolveTypeFields(sema.typeOf(un));
const union_tag_ty = union_ty.unionTagType(mod) orelse {
const msg = msg: {
const msg = try sema.errMsg(block, un_src, "comparison of union and enum literal is only valid for tagged union types", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNoteNonLazy(union_ty.declSrcLoc(sema.mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(sema.mod)});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
// Coerce both the union and the tag to the union's tag type, and then execute the
// enum comparison codepath.
const coerced_tag = try sema.coerce(block, union_tag_ty, tag, tag_src);
const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src);
if (try sema.resolveMaybeUndefVal(coerced_tag)) |enum_val| {
if (enum_val.isUndef(mod)) return sema.addConstUndef(Type.bool);
const field_ty = union_ty.unionFieldType(enum_val, sema.mod);
if (field_ty.zigTypeTag(mod) == .NoReturn) {
return Air.Inst.Ref.bool_false;
}
}
return sema.cmpSelf(block, src, coerced_union, coerced_tag, op, un_src, tag_src);
}
/// Only called for non-equality operators. See also `zirCmpEq`.
fn zirCmp(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
op: std.math.CompareOperator,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src: LazySrcLoc = inst_data.src();
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, false);
}
fn analyzeCmp(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
op: std.math.CompareOperator,
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
is_equality_cmp: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) {
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
}
if (lhs_ty.zigTypeTag(mod) == .Vector and rhs_ty.zigTypeTag(mod) == .Vector) {
return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src);
}
if (lhs_ty.isNumeric(mod) and rhs_ty.isNumeric(mod)) {
// This operation allows any combination of integer and float types, regardless of the
// signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for
// numeric types.
return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src);
}
if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorUnion and rhs_ty.zigTypeTag(mod) == .ErrorSet) {
const casted_lhs = try sema.analyzeErrUnionCode(block, lhs_src, lhs);
return sema.cmpSelf(block, src, casted_lhs, rhs, op, lhs_src, rhs_src);
}
if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorSet and rhs_ty.zigTypeTag(mod) == .ErrorUnion) {
const casted_rhs = try sema.analyzeErrUnionCode(block, rhs_src, rhs);
return sema.cmpSelf(block, src, lhs, casted_rhs, op, lhs_src, rhs_src);
}
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } });
if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) {
return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{
compareOperatorName(op), resolved_type.fmt(sema.mod),
});
}
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
return sema.cmpSelf(block, src, casted_lhs, casted_rhs, op, lhs_src, rhs_src);
}
fn compareOperatorName(comp: std.math.CompareOperator) []const u8 {
return switch (comp) {
.lt => "<",
.lte => "<=",
.eq => "==",
.gte => ">=",
.gt => ">",
.neq => "!=",
};
}
fn cmpSelf(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
casted_lhs: Air.Inst.Ref,
casted_rhs: Air.Inst.Ref,
op: std.math.CompareOperator,
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const resolved_type = sema.typeOf(casted_lhs);
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| {
if (lhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool);
if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| {
if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool);
if (resolved_type.zigTypeTag(mod) == .Vector) {
const result_ty = try mod.vectorType(.{
.len = resolved_type.vectorLen(mod),
.child = .bool_type,
});
const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type);
return sema.addConstant(result_ty, cmp_val);
}
if (try sema.compareAll(lhs_val, op, rhs_val, resolved_type)) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
} else {
if (resolved_type.zigTypeTag(mod) == .Bool) {
// We can lower bool eq/neq more efficiently.
return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(mod), rhs_src);
}
break :src rhs_src;
}
} else {
// For bools, we still check the other operand, because we can lower
// bool eq/neq more efficiently.
if (resolved_type.zigTypeTag(mod) == .Bool) {
if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| {
if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool);
return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(mod), lhs_src);
}
}
break :src lhs_src;
}
};
try sema.requireRuntimeBlock(block, src, runtime_src);
if (resolved_type.zigTypeTag(mod) == .Vector) {
return block.addCmpVector(casted_lhs, casted_rhs, op);
}
const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized);
return block.addBinOp(tag, casted_lhs, casted_rhs);
}
/// cmp_eq (x, false) => not(x)
/// cmp_eq (x, true ) => x
/// cmp_neq(x, false) => x
/// cmp_neq(x, true ) => not(x)
fn runtimeBoolCmp(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
op: std.math.CompareOperator,
lhs: Air.Inst.Ref,
rhs: bool,
runtime_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
if ((op == .neq) == rhs) {
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addTyOp(.not, Type.bool, lhs);
} else {
return lhs;
}
}
fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
switch (ty.zigTypeTag(mod)) {
.Fn,
.NoReturn,
.Undefined,
.Null,
.Opaque,
=> return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(sema.mod)}),
.Type,
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
.Void,
=> return sema.addIntUnsigned(Type.comptime_int, 0),
.Bool,
.Int,
.Float,
.Pointer,
.Array,
.Struct,
.Optional,
.ErrorUnion,
.ErrorSet,
.Enum,
.Union,
.Vector,
.Frame,
.AnyFrame,
=> {},
}
const val = try ty.lazyAbiSize(mod, sema.arena);
if (val.isLazySize()) {
try sema.queueFullTypeResolution(ty);
}
return sema.addConstant(Type.comptime_int, val);
}
fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
switch (operand_ty.zigTypeTag(mod)) {
.Fn,
.NoReturn,
.Undefined,
.Null,
.Opaque,
=> return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(sema.mod)}),
.Type,
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
.Void,
=> return sema.addIntUnsigned(Type.comptime_int, 0),
.Bool,
.Int,
.Float,
.Pointer,
.Array,
.Struct,
.Optional,
.ErrorUnion,
.ErrorSet,
.Enum,
.Union,
.Vector,
.Frame,
.AnyFrame,
=> {},
}
const bit_size = try operand_ty.bitSizeAdvanced(mod, sema);
return sema.addIntUnsigned(Type.comptime_int, bit_size);
}
fn zirThis(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const this_decl_index = mod.namespaceDeclIndex(block.namespace);
const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
return sema.analyzeDeclVal(block, src, this_decl_index);
}
fn zirClosureCapture(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!void {
// TODO: Compile error when closed over values are modified
const inst_data = sema.code.instructions.items(.data)[inst].un_tok;
// Closures are not necessarily constant values. For example, the
// code might do something like this:
// fn foo(x: anytype) void { const S = struct {field: @TypeOf(x)}; }
// ...in which case the closure_capture instruction has access to a runtime
// value only. In such case we preserve the type and use a dummy runtime value.
const operand = try sema.resolveInst(inst_data.operand);
const val = (try sema.resolveMaybeUndefValAllowVariables(operand)) orelse
Value.@"unreachable";
try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, .{
.ty = sema.typeOf(operand),
.val = try val.copy(sema.perm_arena),
});
}
fn zirClosureGet(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
// TODO CLOSURE: Test this with inline functions
const inst_data = sema.code.instructions.items(.data)[inst].inst_node;
var scope: *CaptureScope = mod.declPtr(block.src_decl).src_scope.?;
// Note: The target closure must be in this scope list.
// If it's not here, the zir is invalid, or the list is broken.
const tv = while (true) {
// Note: We don't need to add a dependency here, because
// decls always depend on their lexical parents.
// Fail this decl if a scope it depended on failed.
if (scope.failed()) {
if (sema.owner_func) |owner_func| {
owner_func.state = .dependency_failure;
} else {
sema.owner_decl.analysis = .dependency_failure;
}
return error.AnalysisFail;
}
if (scope.captures.getPtr(inst_data.inst)) |tv| {
break tv;
}
scope = scope.parent.?;
};
if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func == null) {
const msg = msg: {
const name = name: {
const file = sema.owner_decl.getFileScope(mod);
const tree = file.getTree(mod.gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
file.sub_file_path, @errorName(err),
});
break :name null;
};
const node = sema.owner_decl.relativeToNodeIndex(inst_data.src_node);
const token = tree.nodes.items(.main_token)[node];
break :name tree.tokenSlice(token);
};
const msg = if (name) |some|
try sema.errMsg(block, inst_data.src(), "'{s}' not accessible outside function scope", .{some})
else
try sema.errMsg(block, inst_data.src(), "variable not accessible outside function scope", .{});
errdefer msg.destroy(sema.gpa);
// TODO add "declared here" note
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) {
const msg = msg: {
const name = name: {
const file = sema.owner_decl.getFileScope(mod);
const tree = file.getTree(mod.gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
file.sub_file_path, @errorName(err),
});
break :name null;
};
const node = sema.owner_decl.relativeToNodeIndex(inst_data.src_node);
const token = tree.nodes.items(.main_token)[node];
break :name tree.tokenSlice(token);
};
const msg = if (name) |some|
try sema.errMsg(block, inst_data.src(), "'{s}' not accessible from inner function", .{some})
else
try sema.errMsg(block, inst_data.src(), "variable not accessible from inner function", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, LazySrcLoc.nodeOffset(0), msg, "crossed function definition here", .{});
// TODO add "declared here" note
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (tv.val.ip_index == .unreachable_value) {
assert(block.is_typeof);
// We need a dummy runtime instruction with the correct type.
return block.addTy(.alloc, tv.ty);
}
return sema.addConstant(tv.ty, tv.val);
}
fn zirRetAddr(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
_ = extended;
if (block.is_comptime) {
// TODO: we could give a meaningful lazy value here. #14938
return sema.addIntUnsigned(Type.usize, 0);
} else {
return block.addNoOp(.ret_addr);
}
}
fn zirFrameAddress(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
try sema.requireRuntimeBlock(block, src, null);
return try block.addNoOp(.frame_addr);
}
fn zirBuiltinSrc(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{});
const fn_owner_decl = mod.declPtr(func.owner_decl);
const func_name_val = blk: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const name = std.mem.span(fn_owner_decl.name);
const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]);
const new_decl = try anon_decl.finish(
try Type.array(anon_decl.arena(), bytes.len - 1, try mod.intValue(Type.u8, 0), Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes),
0, // default alignment
);
break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl);
};
const file_name_val = blk: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
// The compiler must not call realpath anywhere.
const name = try fn_owner_decl.getFileScope(mod).fullPathZ(anon_decl.arena());
const new_decl = try anon_decl.finish(
try Type.array(anon_decl.arena(), name.len, try mod.intValue(Type.u8, 0), Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]),
0, // default alignment
);
break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl);
};
const field_values = try sema.arena.alloc(Value, 4);
// file: [:0]const u8,
field_values[0] = file_name_val;
// fn_name: [:0]const u8,
field_values[1] = func_name_val;
// line: u32
field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try mod.intValue(Type.u32, extra.line + 1));
// column: u32,
field_values[3] = try mod.intValue(Type.u32, extra.column + 1);
return sema.addConstant(
try sema.getBuiltinType("SourceLocation"),
try Value.Tag.aggregate.create(sema.arena, field_values),
);
}
fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ty = try sema.resolveType(block, src, inst_data.operand);
const type_info_ty = try sema.getBuiltinType("Type");
const type_info_tag_ty = type_info_ty.unionTagType(mod).?;
switch (ty.zigTypeTag(mod)) {
.Type => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Type)),
.val = Value.void,
}),
),
.Void => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Void)),
.val = Value.void,
}),
),
.Bool => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Bool)),
.val = Value.void,
}),
),
.NoReturn => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.NoReturn)),
.val = Value.void,
}),
),
.ComptimeFloat => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeFloat)),
.val = Value.void,
}),
),
.ComptimeInt => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeInt)),
.val = Value.void,
}),
),
.Undefined => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Undefined)),
.val = Value.void,
}),
),
.Null => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Null)),
.val = Value.void,
}),
),
.EnumLiteral => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.EnumLiteral)),
.val = Value.void,
}),
),
.Fn => {
// TODO: look into memoizing this result.
const info = mod.typeToFunc(ty).?;
var params_anon_decl = try block.startAnonDecl();
defer params_anon_decl.deinit();
const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len);
for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| {
const is_generic = param_ty == .generic_poison_type;
const param_ty_val = try mod.intern_pool.get(mod.gpa, .{ .opt = .{
.ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }),
.val = if (is_generic) .none else param_ty,
} });
const is_noalias = blk: {
const index = std.math.cast(u5, i) orelse break :blk false;
break :blk @truncate(u1, info.noalias_bits >> index) != 0;
};
const param_fields = try params_anon_decl.arena().create([3]Value);
param_fields.* = .{
// is_generic: bool,
Value.makeBool(is_generic),
// is_noalias: bool,
Value.makeBool(is_noalias),
// type: ?type,
param_ty_val.toValue(),
};
param_val.* = try Value.Tag.aggregate.create(params_anon_decl.arena(), param_fields);
}
const args_val = v: {
const fn_info_decl_index = (try sema.namespaceLookup(
block,
src,
type_info_ty.getNamespaceIndex(mod).unwrap().?,
"Fn",
)).?;
try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index);
try sema.ensureDeclAnalyzed(fn_info_decl_index);
const fn_info_decl = mod.declPtr(fn_info_decl_index);
const fn_ty = fn_info_decl.val.toType();
const param_info_decl_index = (try sema.namespaceLookup(
block,
src,
fn_ty.getNamespaceIndex(mod).unwrap().?,
"Param",
)).?;
try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index);
try sema.ensureDeclAnalyzed(param_info_decl_index);
const param_info_decl = mod.declPtr(param_info_decl_index);
const param_ty = param_info_decl.val.toType();
const new_decl = try params_anon_decl.finish(
try mod.arrayType(.{
.len = param_vals.len,
.child = param_ty.ip_index,
.sentinel = .none,
}),
try Value.Tag.aggregate.create(
params_anon_decl.arena(),
param_vals,
),
0, // default alignment
);
break :v try Value.Tag.slice.create(sema.arena, .{
.ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl),
.len = try mod.intValue(Type.usize, param_vals.len),
});
};
const ret_ty_opt = try mod.intern_pool.get(mod.gpa, .{ .opt = .{
.ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }),
.val = if (info.return_type == .generic_poison_type) .none else info.return_type,
} });
const callconv_ty = try sema.getBuiltinType("CallingConvention");
const field_values = try sema.arena.create([6]Value);
field_values.* = .{
// calling_convention: CallingConvention,
try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc)),
// alignment: comptime_int,
try mod.intValue(Type.comptime_int, ty.abiAlignment(mod)),
// is_generic: bool,
Value.makeBool(info.is_generic),
// is_var_args: bool,
Value.makeBool(info.is_var_args),
// return_type: ?type,
ret_ty_opt.toValue(),
// args: []const Fn.Param,
args_val,
};
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.Int => {
const signedness_ty = try sema.getBuiltinType("Signedness");
const info = ty.intInfo(mod);
const field_values = try sema.arena.alloc(Value, 2);
// signedness: Signedness,
field_values[0] = try mod.enumValueFieldIndex(signedness_ty, @enumToInt(info.signedness));
// bits: u16,
field_values[1] = try mod.intValue(Type.u16, info.bits);
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Int)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.Float => {
const field_values = try sema.arena.alloc(Value, 1);
// bits: u16,
field_values[0] = try mod.intValue(Type.u16, ty.bitSize(mod));
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.Pointer => {
const info = ty.ptrInfo(mod);
const alignment = if (info.@"align" != 0)
try mod.intValue(Type.comptime_int, info.@"align")
else
try info.pointee_type.lazyAbiAlignment(mod, sema.arena);
const addrspace_ty = try sema.getBuiltinType("AddressSpace");
const ptr_size_ty = try sema.getBuiltinType("PtrSize");
const field_values = try sema.arena.create([8]Value);
field_values.* = .{
// size: Size,
try mod.enumValueFieldIndex(ptr_size_ty, @enumToInt(info.size)),
// is_const: bool,
Value.makeBool(!info.mutable),
// is_volatile: bool,
Value.makeBool(info.@"volatile"),
// alignment: comptime_int,
alignment,
// address_space: AddressSpace
try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace")),
// child: type,
try Value.Tag.ty.create(sema.arena, info.pointee_type),
// is_allowzero: bool,
Value.makeBool(info.@"allowzero"),
// sentinel: ?*const anyopaque,
try sema.optRefValue(block, info.pointee_type, info.sentinel),
};
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Pointer)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.Array => {
const info = ty.arrayInfo(mod);
const field_values = try sema.arena.alloc(Value, 3);
// len: comptime_int,
field_values[0] = try mod.intValue(Type.comptime_int, info.len);
// child: type,
field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type);
// sentinel: ?*const anyopaque,
field_values[2] = try sema.optRefValue(block, info.elem_type, info.sentinel);
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Array)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.Vector => {
const info = ty.arrayInfo(mod);
const field_values = try sema.arena.alloc(Value, 2);
// len: comptime_int,
field_values[0] = try mod.intValue(Type.comptime_int, info.len);
// child: type,
field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type);
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.Optional => {
const field_values = try sema.arena.alloc(Value, 1);
// child: type,
field_values[0] = try Value.Tag.ty.create(sema.arena, ty.optionalChild(mod));
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Optional)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.ErrorSet => {
var fields_anon_decl = try block.startAnonDecl();
defer fields_anon_decl.deinit();
// Get the Error type
const error_field_ty = t: {
const set_field_ty_decl_index = (try sema.namespaceLookup(
block,
src,
type_info_ty.getNamespaceIndex(mod).unwrap().?,
"Error",
)).?;
try mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index);
try sema.ensureDeclAnalyzed(set_field_ty_decl_index);
const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index);
break :t set_field_ty_decl.val.toType();
};
try sema.queueFullTypeResolution(error_field_ty);
// If the error set is inferred it must be resolved at this point
try sema.resolveInferredErrorSetTy(block, src, ty);
// Build our list of Error values
// Optional value is only null if anyerror
// Value can be zero-length slice otherwise
const error_field_vals: ?[]Value = if (ty.isAnyError(mod)) null else blk: {
const names = ty.errorSetNames(mod);
const vals = try fields_anon_decl.arena().alloc(Value, names.len);
for (vals, names) |*field_val, name_ip| {
const name = mod.intern_pool.stringToSlice(name_ip);
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, name);
const new_decl = try anon_decl.finish(
try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl);
};
const error_field_fields = try fields_anon_decl.arena().create([1]Value);
error_field_fields.* = .{
// name: []const u8,
name_val,
};
field_val.* = try Value.Tag.aggregate.create(
fields_anon_decl.arena(),
error_field_fields,
);
}
break :blk vals;
};
// Build our ?[]const Error value
const errors_val = if (error_field_vals) |vals| v: {
const new_decl = try fields_anon_decl.finish(
try mod.arrayType(.{
.len = vals.len,
.child = error_field_ty.ip_index,
.sentinel = .none,
}),
try Value.Tag.aggregate.create(
fields_anon_decl.arena(),
vals,
),
0, // default alignment
);
const new_decl_val = try Value.Tag.decl_ref.create(sema.arena, new_decl);
const slice_val = try Value.Tag.slice.create(sema.arena, .{
.ptr = new_decl_val,
.len = try mod.intValue(Type.usize, vals.len),
});
break :v try Value.Tag.opt_payload.create(sema.arena, slice_val);
} else Value.null;
// Construct Type{ .ErrorSet = errors_val }
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet)),
.val = errors_val,
}),
);
},
.ErrorUnion => {
const field_values = try sema.arena.alloc(Value, 2);
// error_set: type,
field_values[0] = try Value.Tag.ty.create(sema.arena, ty.errorUnionSet(mod));
// payload: type,
field_values[1] = try Value.Tag.ty.create(sema.arena, ty.errorUnionPayload(mod));
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.Enum => {
// TODO: look into memoizing this result.
const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type;
const is_exhaustive = Value.makeBool(enum_type.tag_mode != .nonexhaustive);
var fields_anon_decl = try block.startAnonDecl();
defer fields_anon_decl.deinit();
const enum_field_ty = t: {
const enum_field_ty_decl_index = (try sema.namespaceLookup(
block,
src,
type_info_ty.getNamespaceIndex(mod).unwrap().?,
"EnumField",
)).?;
try mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index);
try sema.ensureDeclAnalyzed(enum_field_ty_decl_index);
const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index);
break :t enum_field_ty_decl.val.toType();
};
const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_type.names.len);
for (enum_field_vals, 0..) |*field_val, i| {
const name_ip = enum_type.names[i];
const name = mod.intern_pool.stringToSlice(name_ip);
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, name);
const new_decl = try anon_decl.finish(
try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl);
};
const enum_field_fields = try fields_anon_decl.arena().create([2]Value);
enum_field_fields.* = .{
// name: []const u8,
name_val,
// value: comptime_int,
try mod.intValue(Type.comptime_int, i),
};
field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), enum_field_fields);
}
const fields_val = v: {
const new_decl = try fields_anon_decl.finish(
try mod.arrayType(.{
.len = enum_field_vals.len,
.child = enum_field_ty.ip_index,
.sentinel = .none,
}),
try Value.Tag.aggregate.create(
fields_anon_decl.arena(),
enum_field_vals,
),
0, // default alignment
);
break :v try Value.Tag.decl_ref.create(sema.arena, new_decl);
};
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, enum_type.namespace);
const field_values = try sema.arena.create([4]Value);
field_values.* = .{
// tag_type: type,
enum_type.tag_ty.toValue(),
// fields: []const EnumField,
fields_val,
// decls: []const Declaration,
decls_val,
// is_exhaustive: bool,
is_exhaustive,
};
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.Union => {
// TODO: look into memoizing this result.
var fields_anon_decl = try block.startAnonDecl();
defer fields_anon_decl.deinit();
const union_field_ty = t: {
const union_field_ty_decl_index = (try sema.namespaceLookup(
block,
src,
type_info_ty.getNamespaceIndex(mod).unwrap().?,
"UnionField",
)).?;
try mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index);
try sema.ensureDeclAnalyzed(union_field_ty_decl_index);
const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index);
break :t union_field_ty_decl.val.toType();
};
const union_ty = try sema.resolveTypeFields(ty);
try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
const layout = union_ty.containerLayout(mod);
const union_fields = union_ty.unionFields(mod);
const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count());
for (union_field_vals, 0..) |*field_val, i| {
const field = union_fields.values()[i];
const name = union_fields.keys()[i];
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, name);
const new_decl = try anon_decl.finish(
try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl);
};
const union_field_fields = try fields_anon_decl.arena().create([3]Value);
const alignment = switch (layout) {
.Auto, .Extern => try sema.unionFieldAlignment(field),
.Packed => 0,
};
union_field_fields.* = .{
// name: []const u8,
name_val,
// type: type,
try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty),
// alignment: comptime_int,
try mod.intValue(Type.comptime_int, alignment),
};
field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), union_field_fields);
}
const fields_val = v: {
const new_decl = try fields_anon_decl.finish(
try mod.arrayType(.{
.len = union_field_vals.len,
.child = union_field_ty.ip_index,
.sentinel = .none,
}),
try Value.Tag.aggregate.create(
fields_anon_decl.arena(),
try fields_anon_decl.arena().dupe(Value, union_field_vals),
),
0, // default alignment
);
break :v try Value.Tag.slice.create(sema.arena, .{
.ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl),
.len = try mod.intValue(Type.usize, union_field_vals.len),
});
};
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod));
const enum_tag_ty_val = if (union_ty.unionTagType(mod)) |tag_ty| v: {
const ty_val = try Value.Tag.ty.create(sema.arena, tag_ty);
break :v try Value.Tag.opt_payload.create(sema.arena, ty_val);
} else Value.null;
const container_layout_ty = try sema.getBuiltinType("TmpContainerLayoutAlias");
const field_values = try sema.arena.create([4]Value);
field_values.* = .{
// layout: ContainerLayout,
try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)),
// tag_type: ?type,
enum_tag_ty_val,
// fields: []const UnionField,
fields_val,
// decls: []const Declaration,
decls_val,
};
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.Struct => {
// TODO: look into memoizing this result.
var fields_anon_decl = try block.startAnonDecl();
defer fields_anon_decl.deinit();
const struct_field_ty = t: {
const struct_field_ty_decl_index = (try sema.namespaceLookup(
block,
src,
type_info_ty.getNamespaceIndex(mod).unwrap().?,
"StructField",
)).?;
try mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index);
try sema.ensureDeclAnalyzed(struct_field_ty_decl_index);
const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index);
break :t struct_field_ty_decl.val.toType();
};
const struct_ty = try sema.resolveTypeFields(ty);
try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
const layout = struct_ty.containerLayout(mod);
const struct_field_vals = fv: {
const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
.anon_struct_type => |tuple| {
const struct_field_vals = try fields_anon_decl.arena().alloc(Value, tuple.types.len);
for (
tuple.types,
tuple.values,
struct_field_vals,
0..,
) |field_ty, field_val, *struct_field_val, i| {
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const bytes = if (tuple.names.len != 0)
// https://github.com/ziglang/zig/issues/15709
@as([]const u8, mod.intern_pool.stringToSlice(tuple.names[i]))
else
try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i});
const new_decl = try anon_decl.finish(
try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{
.ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl),
.len = try mod.intValue(Type.usize, bytes.len),
});
};
const struct_field_fields = try fields_anon_decl.arena().create([5]Value);
const is_comptime = field_val != .none;
const opt_default_val = if (is_comptime) field_val.toValue() else null;
const default_val_ptr = try sema.optRefValue(block, field_ty.toType(), opt_default_val);
struct_field_fields.* = .{
// name: []const u8,
name_val,
// type: type,
field_ty.toValue(),
// default_value: ?*const anyopaque,
try default_val_ptr.copy(fields_anon_decl.arena()),
// is_comptime: bool,
Value.makeBool(is_comptime),
// alignment: comptime_int,
try field_ty.toType().lazyAbiAlignment(mod, fields_anon_decl.arena()),
};
struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields);
}
break :fv struct_field_vals;
},
.struct_type => |s| s,
else => unreachable,
};
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
break :fv &[0]Value{};
const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_obj.fields.count());
for (
struct_field_vals,
struct_obj.fields.keys(),
struct_obj.fields.values(),
) |*field_val, name, field| {
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, name);
const new_decl = try anon_decl.finish(
try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{
.ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl),
.len = try mod.intValue(Type.usize, bytes.len),
});
};
const struct_field_fields = try fields_anon_decl.arena().create([5]Value);
const opt_default_val = if (field.default_val.ip_index == .unreachable_value)
null
else
field.default_val;
const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val);
const alignment = field.alignment(mod, layout);
struct_field_fields.* = .{
// name: []const u8,
name_val,
// type: type,
try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty),
// default_value: ?*const anyopaque,
try default_val_ptr.copy(fields_anon_decl.arena()),
// is_comptime: bool,
Value.makeBool(field.is_comptime),
// alignment: comptime_int,
try mod.intValue(Type.comptime_int, alignment),
};
field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields);
}
break :fv struct_field_vals;
};
const fields_val = v: {
const new_decl = try fields_anon_decl.finish(
try mod.arrayType(.{
.len = struct_field_vals.len,
.child = struct_field_ty.ip_index,
.sentinel = .none,
}),
try Value.Tag.aggregate.create(
fields_anon_decl.arena(),
try fields_anon_decl.arena().dupe(Value, struct_field_vals),
),
0, // default alignment
);
break :v try Value.Tag.slice.create(sema.arena, .{
.ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl),
.len = try mod.intValue(Type.usize, struct_field_vals.len),
});
};
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespaceIndex(mod));
const backing_integer_val = blk: {
if (layout == .Packed) {
const struct_obj = mod.typeToStruct(struct_ty).?;
assert(struct_obj.haveLayout());
assert(struct_obj.backing_int_ty.isInt(mod));
const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty);
break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val);
} else {
break :blk Value.null;
}
};
const container_layout_ty = try sema.getBuiltinType("TmpContainerLayoutAlias");
const field_values = try sema.arena.create([5]Value);
field_values.* = .{
// layout: ContainerLayout,
try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)),
// backing_integer: ?type,
backing_integer_val,
// fields: []const StructField,
fields_val,
// decls: []const Declaration,
decls_val,
// is_tuple: bool,
Value.makeBool(struct_ty.isTuple(mod)),
};
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.Opaque => {
// TODO: look into memoizing this result.
const opaque_ty = try sema.resolveTypeFields(ty);
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespaceIndex(mod));
const field_values = try sema.arena.create([1]Value);
field_values.* = .{
// decls: []const Declaration,
decls_val,
};
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
.tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.Frame => return sema.failWithUseOfAsync(block, src),
.AnyFrame => return sema.failWithUseOfAsync(block, src),
}
}
fn typeInfoDecls(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
type_info_ty: Type,
opt_namespace: Module.Namespace.OptionalIndex,
) CompileError!Value {
const mod = sema.mod;
var decls_anon_decl = try block.startAnonDecl();
defer decls_anon_decl.deinit();
const declaration_ty = t: {
const declaration_ty_decl_index = (try sema.namespaceLookup(
block,
src,
type_info_ty.getNamespaceIndex(mod).unwrap().?,
"Declaration",
)).?;
try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index);
try sema.ensureDeclAnalyzed(declaration_ty_decl_index);
const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index);
break :t declaration_ty_decl.val.toType();
};
try sema.queueFullTypeResolution(declaration_ty);
var decl_vals = std.ArrayList(Value).init(sema.gpa);
defer decl_vals.deinit();
var seen_namespaces = std.AutoHashMap(*Namespace, void).init(sema.gpa);
defer seen_namespaces.deinit();
if (opt_namespace.unwrap()) |namespace_index| {
const namespace = mod.namespacePtr(namespace_index);
try sema.typeInfoNamespaceDecls(block, decls_anon_decl.arena(), namespace, &decl_vals, &seen_namespaces);
}
const new_decl = try decls_anon_decl.finish(
try mod.arrayType(.{
.len = decl_vals.items.len,
.child = declaration_ty.ip_index,
.sentinel = .none,
}),
try Value.Tag.aggregate.create(
decls_anon_decl.arena(),
try decls_anon_decl.arena().dupe(Value, decl_vals.items),
),
0, // default alignment
);
return try Value.Tag.slice.create(sema.arena, .{
.ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl),
.len = try mod.intValue(Type.usize, decl_vals.items.len),
});
}
fn typeInfoNamespaceDecls(
sema: *Sema,
block: *Block,
decls_anon_decl: Allocator,
namespace: *Namespace,
decl_vals: *std.ArrayList(Value),
seen_namespaces: *std.AutoHashMap(*Namespace, void),
) !void {
const mod = sema.mod;
const gop = try seen_namespaces.getOrPut(namespace);
if (gop.found_existing) return;
const decls = namespace.decls.keys();
for (decls) |decl_index| {
const decl = mod.declPtr(decl_index);
if (decl.kind == .@"usingnamespace") {
if (decl.analysis == .in_progress) continue;
try mod.ensureDeclAnalyzed(decl_index);
const new_ns = decl.val.toType().getNamespace(mod).?;
try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces);
continue;
}
if (decl.kind != .named) continue;
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0));
const new_decl = try anon_decl.finish(
try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
break :v try Value.Tag.slice.create(decls_anon_decl, .{
.ptr = try Value.Tag.decl_ref.create(decls_anon_decl, new_decl),
.len = try mod.intValue(Type.usize, bytes.len),
});
};
const fields = try decls_anon_decl.create([2]Value);
fields.* = .{
//name: []const u8,
name_val,
//is_pub: bool,
Value.makeBool(decl.is_pub),
};
try decl_vals.append(try Value.Tag.aggregate.create(decls_anon_decl, fields));
}
}
fn zirTypeof(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
_ = block;
const zir_datas = sema.code.instructions.items(.data);
const inst_data = zir_datas[inst].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
return sema.addType(operand_ty);
}
fn zirTypeofBuiltin(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const pl_node = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
var child_block: Block = .{
.parent = block,
.sema = sema,
.src_decl = block.src_decl,
.namespace = block.namespace,
.wip_capture_scope = block.wip_capture_scope,
.instructions = .{},
.inlining = block.inlining,
.is_comptime = false,
.is_typeof = true,
.want_safety = false,
.error_return_trace_index = block.error_return_trace_index,
};
defer child_block.instructions.deinit(sema.gpa);
const operand = try sema.resolveBody(&child_block, body, inst);
const operand_ty = sema.typeOf(operand);
if (operand_ty.isGenericPoison()) return error.GenericPoison;
return sema.addType(operand_ty);
}
fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const res_ty = try sema.log2IntType(block, operand_ty, src);
return sema.addType(res_ty);
}
fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type {
const mod = sema.mod;
switch (operand.zigTypeTag(mod)) {
.ComptimeInt => return Type.comptime_int,
.Int => {
const bits = operand.bitSize(mod);
const count = if (bits == 0)
0
else blk: {
var count: u16 = 0;
var s = bits - 1;
while (s != 0) : (s >>= 1) {
count += 1;
}
break :blk count;
};
return mod.intType(.unsigned, count);
},
.Vector => {
const elem_ty = operand.elemType2(mod);
const log2_elem_ty = try sema.log2IntType(block, elem_ty, src);
return mod.vectorType(.{
.len = operand.vectorLen(mod),
.child = log2_elem_ty.ip_index,
});
},
else => {},
}
return sema.fail(
block,
src,
"bit shifting operation expected integer type, found '{}'",
.{operand.fmt(sema.mod)},
);
}
fn zirTypeofPeer(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const extra = sema.code.extraData(Zir.Inst.TypeOfPeer, extended.operand);
const src = LazySrcLoc.nodeOffset(extra.data.src_node);
const body = sema.code.extra[extra.data.body_index..][0..extra.data.body_len];
var child_block: Block = .{
.parent = block,
.sema = sema,
.src_decl = block.src_decl,
.namespace = block.namespace,
.wip_capture_scope = block.wip_capture_scope,
.instructions = .{},
.inlining = block.inlining,
.is_comptime = false,
.is_typeof = true,
.runtime_cond = block.runtime_cond,
.runtime_loop = block.runtime_loop,
.runtime_index = block.runtime_index,
};
defer child_block.instructions.deinit(sema.gpa);
// Ignore the result, we only care about the instructions in `args`.
_ = try sema.analyzeBodyBreak(&child_block, body);
const args = sema.code.refSlice(extra.end, extended.small);
const inst_list = try sema.gpa.alloc(Air.Inst.Ref, args.len);
defer sema.gpa.free(inst_list);
for (args, 0..) |arg_ref, i| {
inst_list[i] = try sema.resolveInst(arg_ref);
}
const result_type = try sema.resolvePeerTypes(block, src, inst_list, .{ .typeof_builtin_call_node_offset = extra.data.src_node });
return sema.addType(result_type);
}
fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
const uncasted_operand = try sema.resolveInst(inst_data.operand);
const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
return if (val.isUndef(mod))
sema.addConstUndef(Type.bool)
else if (val.toBool(mod))
Air.Inst.Ref.bool_false
else
Air.Inst.Ref.bool_true;
}
try sema.requireRuntimeBlock(block, src, null);
return block.addTyOp(.not, Type.bool, operand);
}
fn zirBoolBr(
sema: *Sema,
parent_block: *Block,
inst: Zir.Inst.Index,
is_bool_or: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const datas = sema.code.instructions.items(.data);
const inst_data = datas[inst].bool_br;
const lhs = try sema.resolveInst(inst_data.lhs);
const lhs_src = sema.src;
const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
const gpa = sema.gpa;
if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| {
if (is_bool_or and lhs_val.toBool(mod)) {
return Air.Inst.Ref.bool_true;
} else if (!is_bool_or and !lhs_val.toBool(mod)) {
return Air.Inst.Ref.bool_false;
}
// comptime-known left-hand side. No need for a block here; the result
// is simply the rhs expression. Here we rely on there only being 1
// break instruction (`break_inline`).
return sema.resolveBody(parent_block, body, inst);
}
const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
try sema.air_instructions.append(gpa, .{
.tag = .block,
.data = .{ .ty_pl = .{
.ty = .bool_type,
.payload = undefined,
} },
});
var child_block = parent_block.makeSubBlock();
child_block.runtime_loop = null;
child_block.runtime_cond = lhs_src;
child_block.runtime_index.increment();
defer child_block.instructions.deinit(gpa);
var then_block = child_block.makeSubBlock();
defer then_block.instructions.deinit(gpa);
var else_block = child_block.makeSubBlock();
defer else_block.instructions.deinit(gpa);
const lhs_block = if (is_bool_or) &then_block else &else_block;
const rhs_block = if (is_bool_or) &else_block else &then_block;
const lhs_result: Air.Inst.Ref = if (is_bool_or) .bool_true else .bool_false;
_ = try lhs_block.addBr(block_inst, lhs_result);
const rhs_result = try sema.resolveBody(rhs_block, body, inst);
if (!sema.typeOf(rhs_result).isNoReturn(mod)) {
_ = try rhs_block.addBr(block_inst, rhs_result);
}
const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst);
if (!sema.typeOf(rhs_result).isNoReturn(mod)) {
if (try sema.resolveDefinedValue(rhs_block, sema.src, rhs_result)) |rhs_val| {
if (is_bool_or and rhs_val.toBool(mod)) {
return Air.Inst.Ref.bool_true;
} else if (!is_bool_or and !rhs_val.toBool(mod)) {
return Air.Inst.Ref.bool_false;
}
}
}
return result;
}
fn finishCondBr(
sema: *Sema,
parent_block: *Block,
child_block: *Block,
then_block: *Block,
else_block: *Block,
cond: Air.Inst.Ref,
block_inst: Air.Inst.Index,
) !Air.Inst.Ref {
const gpa = sema.gpa;
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
then_block.instructions.items.len + else_block.instructions.items.len +
@typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1);
const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(u32, then_block.instructions.items.len),
.else_body_len = @intCast(u32, else_block.instructions.items.len),
});
sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items);
sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items);
_ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{
.operand = cond,
.payload = cond_br_payload,
} } });
sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity(
Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len) },
);
sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items);
try parent_block.instructions.append(gpa, block_inst);
return Air.indexToRef(block_inst);
}
fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Optional, .Null, .Undefined => return,
.Pointer => if (ty.isPtrLikeOptional(mod)) return,
else => {},
}
return sema.failWithExpectedOptionalType(block, src, ty);
}
fn zirIsNonNull(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
try sema.checkNullableType(block, src, sema.typeOf(operand));
return sema.analyzeIsNull(block, src, operand, true);
}
fn zirIsNonNullPtr(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ptr = try sema.resolveInst(inst_data.operand);
try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2(mod));
if ((try sema.resolveMaybeUndefVal(ptr)) == null) {
return block.addUnOp(.is_non_null_ptr, ptr);
}
const loaded = try sema.analyzeLoad(block, src, ptr, src);
return sema.analyzeIsNull(block, src, loaded, true);
}
fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.ErrorSet, .ErrorUnion, .Undefined => return,
else => return sema.fail(block, src, "expected error union type, found '{}'", .{
ty.fmt(sema.mod),
}),
}
}
fn zirIsNonErr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
try sema.checkErrorType(block, src, sema.typeOf(operand));
return sema.analyzeIsNonErr(block, src, operand);
}
fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ptr = try sema.resolveInst(inst_data.operand);
try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2(mod));
const loaded = try sema.analyzeLoad(block, src, ptr, src);
return sema.analyzeIsNonErr(block, src, loaded);
}
fn zirRetIsNonErr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
return sema.analyzeIsNonErr(block, src, operand);
}
fn zirCondbr(
sema: *Sema,
parent_block: *Block,
inst: Zir.Inst.Index,
) CompileError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len];
const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
const uncasted_cond = try sema.resolveInst(extra.data.condition);
const cond = try sema.coerce(parent_block, Type.bool, uncasted_cond, cond_src);
if (try sema.resolveDefinedValue(parent_block, cond_src, cond)) |cond_val| {
const body = if (cond_val.toBool(mod)) then_body else else_body;
try sema.maybeErrorUnwrapCondbr(parent_block, body, extra.data.condition, cond_src);
// We use `analyzeBodyInner` since we want to propagate any possible
// `error.ComptimeBreak` to the caller.
return sema.analyzeBodyInner(parent_block, body);
}
const gpa = sema.gpa;
// We'll re-use the sub block to save on memory bandwidth, and yank out the
// instructions array in between using it for the then block and else block.
var sub_block = parent_block.makeSubBlock();
sub_block.runtime_loop = null;
sub_block.runtime_cond = cond_src;
sub_block.runtime_index.increment();
defer sub_block.instructions.deinit(gpa);
try sema.analyzeBodyRuntimeBreak(&sub_block, then_body);
const true_instructions = try sub_block.instructions.toOwnedSlice(gpa);
defer gpa.free(true_instructions);
const err_cond = blk: {
const index = Zir.refToIndex(extra.data.condition) orelse break :blk null;
if (sema.code.instructions.items(.tag)[index] != .is_non_err) break :blk null;
const err_inst_data = sema.code.instructions.items(.data)[index].un_node;
const err_operand = try sema.resolveInst(err_inst_data.operand);
const operand_ty = sema.typeOf(err_operand);
assert(operand_ty.zigTypeTag(mod) == .ErrorUnion);
const result_ty = operand_ty.errorUnionSet(mod);
break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand);
};
if (err_cond != null and try sema.maybeErrorUnwrap(&sub_block, else_body, err_cond.?)) {
// nothing to do
} else {
try sema.analyzeBodyRuntimeBreak(&sub_block, else_body);
}
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
true_instructions.len + sub_block.instructions.items.len);
_ = try parent_block.addInst(.{
.tag = .cond_br,
.data = .{ .pl_op = .{
.operand = cond,
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(u32, true_instructions.len),
.else_body_len = @intCast(u32, sub_block.instructions.items.len),
}),
} },
});
sema.air_extra.appendSliceAssumeCapacity(true_instructions);
sema.air_extra.appendSliceAssumeCapacity(sub_block.instructions.items);
return always_noreturn;
}
fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
const err_union = try sema.resolveInst(extra.data.operand);
const err_union_ty = sema.typeOf(err_union);
const mod = sema.mod;
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
err_union_ty.fmt(sema.mod),
});
}
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union);
if (is_non_err != .none) {
const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?;
if (is_non_err_val.toBool(mod)) {
return sema.analyzeErrUnionPayload(parent_block, src, err_union_ty, err_union, operand_src, false);
}
// We can analyze the body directly in the parent block because we know there are
// no breaks from the body possible, and that the body is noreturn.
return sema.resolveBody(parent_block, body, inst);
}
var sub_block = parent_block.makeSubBlock();
defer sub_block.instructions.deinit(sema.gpa);
// This body is guaranteed to end with noreturn and has no breaks.
_ = try sema.analyzeBodyInner(&sub_block, body);
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Try).Struct.fields.len +
sub_block.instructions.items.len);
const try_inst = try parent_block.addInst(.{
.tag = .@"try",
.data = .{ .pl_op = .{
.operand = err_union,
.payload = sema.addExtraAssumeCapacity(Air.Try{
.body_len = @intCast(u32, sub_block.instructions.items.len),
}),
} },
});
sema.air_extra.appendSliceAssumeCapacity(sub_block.instructions.items);
return try_inst;
}
fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index);
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
const operand = try sema.resolveInst(extra.data.operand);
const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src);
const err_union_ty = sema.typeOf(err_union);
const mod = sema.mod;
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
err_union_ty.fmt(sema.mod),
});
}
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union);
if (is_non_err != .none) {
const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?;
if (is_non_err_val.toBool(mod)) {
return sema.analyzeErrUnionPayloadPtr(parent_block, src, operand, false, false);
}
// We can analyze the body directly in the parent block because we know there are
// no breaks from the body possible, and that the body is noreturn.
return sema.resolveBody(parent_block, body, inst);
}
var sub_block = parent_block.makeSubBlock();
defer sub_block.instructions.deinit(sema.gpa);
// This body is guaranteed to end with noreturn and has no breaks.
_ = try sema.analyzeBodyInner(&sub_block, body);
const operand_ty = sema.typeOf(operand);
const ptr_info = operand_ty.ptrInfo(mod);
const res_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = err_union_ty.errorUnionPayload(mod),
.@"addrspace" = ptr_info.@"addrspace",
.mutable = ptr_info.mutable,
.@"allowzero" = ptr_info.@"allowzero",
.@"volatile" = ptr_info.@"volatile",
});
const res_ty_ref = try sema.addType(res_ty);
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.TryPtr).Struct.fields.len +
sub_block.instructions.items.len);
const try_inst = try parent_block.addInst(.{
.tag = .try_ptr,
.data = .{ .ty_pl = .{
.ty = res_ty_ref,
.payload = sema.addExtraAssumeCapacity(Air.TryPtr{
.ptr = operand,
.body_len = @intCast(u32, sub_block.instructions.items.len),
}),
} },
});
sema.air_extra.appendSliceAssumeCapacity(sub_block.instructions.items);
return try_inst;
}
// A `break` statement is inside a runtime condition, but trying to
// break from an inline loop. In such case we must convert it to
// a runtime break.
fn addRuntimeBreak(sema: *Sema, child_block: *Block, break_data: BreakData) !void {
const gop = sema.inst_map.getOrPutAssumeCapacity(break_data.block_inst);
const labeled_block = if (!gop.found_existing) blk: {
try sema.post_hoc_blocks.ensureUnusedCapacity(sema.gpa, 1);
const new_block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
gop.value_ptr.* = Air.indexToRef(new_block_inst);
try sema.air_instructions.append(sema.gpa, .{
.tag = .block,
.data = undefined,
});
const labeled_block = try sema.gpa.create(LabeledBlock);
labeled_block.* = .{
.label = .{
.zir_block = break_data.block_inst,
.merges = .{
.src_locs = .{},
.results = .{},
.br_list = .{},
.block_inst = new_block_inst,
},
},
.block = .{
.parent = child_block,
.sema = sema,
.src_decl = child_block.src_decl,
.namespace = child_block.namespace,
.wip_capture_scope = child_block.wip_capture_scope,
.instructions = .{},
.label = &labeled_block.label,
.inlining = child_block.inlining,
.is_comptime = child_block.is_comptime,
},
};
sema.post_hoc_blocks.putAssumeCapacityNoClobber(new_block_inst, labeled_block);
break :blk labeled_block;
} else blk: {
const new_block_inst = Air.refToIndex(gop.value_ptr.*).?;
const labeled_block = sema.post_hoc_blocks.get(new_block_inst).?;
break :blk labeled_block;
};
const operand = try sema.resolveInst(break_data.operand);
const br_ref = try child_block.addBr(labeled_block.label.merges.block_inst, operand);
try labeled_block.label.merges.results.append(sema.gpa, operand);
try labeled_block.label.merges.br_list.append(sema.gpa, Air.refToIndex(br_ref).?);
labeled_block.block.runtime_index.increment();
if (labeled_block.block.runtime_cond == null and labeled_block.block.runtime_loop == null) {
labeled_block.block.runtime_cond = child_block.runtime_cond orelse child_block.runtime_loop;
labeled_block.block.runtime_loop = child_block.runtime_loop;
}
}
fn zirUnreachable(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
const inst_data = sema.code.instructions.items(.data)[inst].@"unreachable";
const src = inst_data.src();
if (block.is_comptime) {
return sema.fail(block, src, "reached unreachable code", .{});
}
// TODO Add compile error for @optimizeFor occurring too late in a scope.
try block.addUnreachable(true);
return always_noreturn;
}
fn zirRetErrValue(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Zir.Inst.Index {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
const err_name = inst_data.get(sema.code);
const src = inst_data.src();
// Return the error code from the function.
const kv = try mod.getErrorValue(err_name);
const result_inst = try sema.addConstant(
try mod.singleErrorSetType(err_name),
try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }),
);
return sema.analyzeRet(block, result_inst, src);
}
fn zirRetImplicit(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_tok;
const operand = try sema.resolveInst(inst_data.operand);
const r_brace_src = inst_data.src();
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
const base_tag = sema.fn_ret_ty.baseZigTypeTag(mod);
if (base_tag == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, ret_ty_src, "function declared '{}' implicitly returns", .{
sema.fn_ret_ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
} else if (base_tag != .Void) {
const msg = msg: {
const msg = try sema.errMsg(block, ret_ty_src, "function with non-void return type '{}' implicitly returns", .{
sema.fn_ret_ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
return sema.analyzeRet(block, operand, .unneeded);
}
fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const src = inst_data.src();
return sema.analyzeRet(block, operand, src);
}
fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ret_ptr = try sema.resolveInst(inst_data.operand);
if (block.is_comptime or block.inlining != null) {
const operand = try sema.analyzeLoad(block, src, ret_ptr, src);
return sema.analyzeRet(block, operand, src);
}
if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) {
const is_non_err = try sema.analyzePtrIsNonErr(block, src, ret_ptr);
return sema.retWithErrTracing(block, is_non_err, .ret_load, ret_ptr);
}
_ = try block.addUnOp(.ret_load, ret_ptr);
return always_noreturn;
}
fn retWithErrTracing(
sema: *Sema,
block: *Block,
is_non_err: Air.Inst.Ref,
ret_tag: Air.Inst.Tag,
operand: Air.Inst.Ref,
) CompileError!Zir.Inst.Index {
const mod = sema.mod;
const need_check = switch (is_non_err) {
.bool_true => {
_ = try block.addUnOp(ret_tag, operand);
return always_noreturn;
},
.bool_false => false,
else => true,
};
const gpa = sema.gpa;
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
const return_err_fn = try sema.getBuiltin("returnError");
const args: [1]Air.Inst.Ref = .{err_return_trace};
if (!need_check) {
try sema.callBuiltin(block, return_err_fn, .never_inline, &args);
_ = try block.addUnOp(ret_tag, operand);
return always_noreturn;
}
var then_block = block.makeSubBlock();
defer then_block.instructions.deinit(gpa);
_ = try then_block.addUnOp(ret_tag, operand);
var else_block = block.makeSubBlock();
defer else_block.instructions.deinit(gpa);
try sema.callBuiltin(&else_block, return_err_fn, .never_inline, &args);
_ = try else_block.addUnOp(ret_tag, operand);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
then_block.instructions.items.len + else_block.instructions.items.len +
@typeInfo(Air.Block).Struct.fields.len + 1);
const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(u32, then_block.instructions.items.len),
.else_body_len = @intCast(u32, else_block.instructions.items.len),
});
sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items);
sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items);
_ = try block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{
.operand = is_non_err,
.payload = cond_br_payload,
} } });
return always_noreturn;
}
fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool {
const mod = sema.mod;
if (!mod.backendSupportsFeature(.error_return_trace)) return false;
return fn_ret_ty.isError(mod) and
mod.comp.bin_file.options.error_return_tracing;
}
fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].save_err_ret_index;
if (!mod.backendSupportsFeature(.error_return_trace)) return;
if (!mod.comp.bin_file.options.error_return_tracing) return;
// This is only relevant at runtime.
if (block.is_comptime or block.is_typeof) return;
const save_index = inst_data.operand == .none or b: {
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
break :b operand_ty.isError(mod);
};
if (save_index)
block.error_return_trace_index = try sema.analyzeSaveErrRetIndex(block);
}
fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].restore_err_ret_index;
const src = sema.src; // TODO
// This is only relevant at runtime.
if (start_block.is_comptime or start_block.is_typeof) return;
if (!sema.mod.backendSupportsFeature(.error_return_trace)) return;
if (!sema.owner_func.?.calls_or_awaits_errorable_fn) return;
if (!sema.mod.comp.bin_file.options.error_return_tracing) return;
const tracy = trace(@src());
defer tracy.end();
const saved_index = if (Zir.refToIndexAllowNone(inst_data.block)) |zir_block| b: {
var block = start_block;
while (true) {
if (block.label) |label| {
if (label.zir_block == zir_block) {
const target_trace_index = if (block.parent) |parent_block| tgt: {
break :tgt parent_block.error_return_trace_index;
} else sema.error_return_trace_index_on_fn_entry;
if (start_block.error_return_trace_index != target_trace_index)
break :b target_trace_index;
return; // No need to restore
}
}
block = block.parent.?;
}
} else b: {
if (start_block.error_return_trace_index != sema.error_return_trace_index_on_fn_entry)
break :b sema.error_return_trace_index_on_fn_entry;
return; // No need to restore
};
assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere
const operand = try sema.resolveInstAllowNone(inst_data.operand);
return sema.popErrorReturnTrace(start_block, src, operand, saved_index);
}
fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion);
if (mod.typeToInferredErrorSet(sema.fn_ret_ty.errorUnionSet(mod))) |ies| {
const op_ty = sema.typeOf(uncasted_operand);
switch (op_ty.zigTypeTag(mod)) {
.ErrorSet => try ies.addErrorSet(op_ty, ip, gpa),
.ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(mod), ip, gpa),
else => {},
}
}
}
fn analyzeRet(
sema: *Sema,
block: *Block,
uncasted_operand: Air.Inst.Ref,
src: LazySrcLoc,
) CompileError!Zir.Inst.Index {
// Special case for returning an error to an inferred error set; we need to
// add the error tag to the inferred error set of the in-scope function, so
// that the coercion below works correctly.
const mod = sema.mod;
if (sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) {
try sema.addToInferredErrorSet(uncasted_operand);
}
const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
if (block.inlining) |inlining| {
if (block.is_comptime) {
_ = try sema.resolveConstMaybeUndefVal(block, src, operand, "value being returned at comptime must be comptime-known");
inlining.comptime_result = operand;
return error.ComptimeReturn;
}
// We are inlining a function call; rewrite the `ret` as a `break`.
try inlining.merges.results.append(sema.gpa, operand);
_ = try block.addBr(inlining.merges.block_inst, operand);
return always_noreturn;
} else if (block.is_comptime) {
return sema.fail(block, src, "function called at runtime cannot return value at comptime", .{});
}
try sema.resolveTypeLayout(sema.fn_ret_ty);
if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) {
// Avoid adding a frame to the error return trace in case the value is comptime-known
// to be not an error.
const is_non_err = try sema.analyzeIsNonErr(block, src, operand);
return sema.retWithErrTracing(block, is_non_err, .ret, operand);
}
_ = try block.addUnOp(.ret, operand);
return always_noreturn;
}
fn floatOpAllowed(tag: Zir.Inst.Tag) bool {
// extend this swich as additional operators are implemented
return switch (tag) {
.add, .sub, .mul, .div, .div_exact, .div_trunc, .div_floor, .mod, .rem, .mod_rem => true,
else => false,
};
}
fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].ptr_type;
const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index);
const elem_ty_src: LazySrcLoc = .{ .node_offset_ptr_elem = extra.data.src_node };
const sentinel_src: LazySrcLoc = .{ .node_offset_ptr_sentinel = extra.data.src_node };
const align_src: LazySrcLoc = .{ .node_offset_ptr_align = extra.data.src_node };
const addrspace_src: LazySrcLoc = .{ .node_offset_ptr_addrspace = extra.data.src_node };
const bitoffset_src: LazySrcLoc = .{ .node_offset_ptr_bitoffset = extra.data.src_node };
const hostsize_src: LazySrcLoc = .{ .node_offset_ptr_hostsize = extra.data.src_node };
const elem_ty = blk: {
const air_inst = try sema.resolveInst(extra.data.elem_type);
const ty = sema.analyzeAsType(block, elem_ty_src, air_inst) catch |err| {
if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer(mod)) {
try sema.errNote(block, elem_ty_src, sema.err.?, "use '.*' to dereference pointer", .{});
}
return err;
};
if (ty.isGenericPoison()) return error.GenericPoison;
break :blk ty;
};
if (elem_ty.zigTypeTag(mod) == .NoReturn)
return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{});
const target = sema.mod.getTarget();
var extra_i = extra.end;
const sentinel = if (inst_data.flags.has_sentinel) blk: {
const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
const coerced = try sema.coerce(block, elem_ty, try sema.resolveInst(ref), sentinel_src);
const val = try sema.resolveConstValue(block, sentinel_src, coerced, "pointer sentinel value must be comptime-known");
break :blk val.toIntern();
} else .none;
const abi_align: InternPool.Alignment = if (inst_data.flags.has_align) blk: {
const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src);
const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known");
// Check if this happens to be the lazy alignment of our element type, in
// which case we can make this 0 without resolving it.
if (val.castTag(.lazy_align)) |payload| {
if (payload.data.eql(elem_ty, sema.mod)) {
break :blk .none;
}
}
const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?);
try sema.validateAlign(block, align_src, abi_align);
break :blk InternPool.Alignment.fromByteUnits(abi_align);
} else .none;
const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: {
const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
break :blk try sema.analyzeAddressSpace(block, addrspace_src, ref, .pointer);
} else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic;
const bit_offset = if (inst_data.flags.has_bit_range) blk: {
const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, Type.u16, "pointer bit-offset must be comptime-known");
break :blk @intCast(u16, bit_offset);
} else 0;
const host_size: u16 = if (inst_data.flags.has_bit_range) blk: {
const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
const host_size = try sema.resolveInt(block, hostsize_src, ref, Type.u16, "pointer host size must be comptime-known");
break :blk @intCast(u16, host_size);
} else 0;
if (host_size != 0 and bit_offset >= host_size * 8) {
return sema.fail(block, bitoffset_src, "bit offset starts after end of host integer", .{});
}
if (elem_ty.zigTypeTag(mod) == .Fn) {
if (inst_data.size != .One) {
return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{});
}
const fn_align = mod.typeToFunc(elem_ty).?.alignment;
if (inst_data.flags.has_align and abi_align != .none and fn_align != .none and
abi_align != fn_align)
{
return sema.fail(block, align_src, "function pointer alignment disagrees with function alignment", .{});
}
} else if (inst_data.size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, elem_ty_src, "unknown-length pointer to opaque not allowed", .{});
} else if (inst_data.size == .C) {
if (!try sema.validateExternType(elem_ty, .other)) {
const msg = msg: {
const msg = try sema.errMsg(block, elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
const src_decl = sema.mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src.toSrcLoc(src_decl, mod), elem_ty, .other);
try sema.addDeclaredHereNote(msg, elem_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (elem_ty.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, elem_ty_src, "C pointers cannot point to opaque types", .{});
}
}
const ty = try mod.ptrType(.{
.elem_type = elem_ty.toIntern(),
.sentinel = sentinel,
.alignment = abi_align,
.address_space = address_space,
.bit_offset = bit_offset,
.host_size = host_size,
.is_const = !inst_data.flags.is_mutable,
.is_allowzero = inst_data.flags.is_allowzero,
.is_volatile = inst_data.flags.is_volatile,
.size = inst_data.size,
});
return sema.addType(ty);
}
fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const obj_ty = try sema.resolveType(block, src, inst_data.operand);
const mod = sema.mod;
switch (obj_ty.zigTypeTag(mod)) {
.Struct => return sema.structInitEmpty(block, obj_ty, src, src),
.Array, .Vector => return sema.arrayInitEmpty(block, src, obj_ty),
.Void => return sema.addConstant(obj_ty, Value.void),
.Union => return sema.fail(block, src, "union initializer must initialize one field", .{}),
else => return sema.failWithArrayInitNotSupported(block, src, obj_ty),
}
}
fn structInitEmpty(
sema: *Sema,
block: *Block,
obj_ty: Type,
dest_src: LazySrcLoc,
init_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
// This logic must be synchronized with that in `zirStructInit`.
const struct_ty = try sema.resolveTypeFields(obj_ty);
// The init values to use for the struct instance.
const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod));
defer gpa.free(field_inits);
@memset(field_inits, .none);
return sema.finishStructInit(block, init_src, dest_src, field_inits, struct_ty, false);
}
fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const arr_len = obj_ty.arrayLen(mod);
if (arr_len != 0) {
if (obj_ty.zigTypeTag(mod) == .Array) {
return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len});
} else {
return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len});
}
}
if (obj_ty.sentinel(mod)) |sentinel| {
const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel);
return sema.addConstant(obj_ty, val);
} else {
return sema.addConstant(obj_ty, Value.initTag(.empty_array));
}
}
fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const field_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const init_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.UnionInit, inst_data.payload_index).data;
const union_ty = try sema.resolveType(block, ty_src, extra.union_type);
const field_name = try sema.resolveConstString(block, field_src, extra.field_name, "name of field being initialized must be comptime-known");
const init = try sema.resolveInst(extra.init);
return sema.unionInit(block, init, init_src, union_ty, ty_src, field_name, field_src);
}
fn unionInit(
sema: *Sema,
block: *Block,
uncasted_init: Air.Inst.Ref,
init_src: LazySrcLoc,
union_ty: Type,
union_ty_src: LazySrcLoc,
field_name: []const u8,
field_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
const field = union_ty.unionFields(mod).values()[field_index];
const init = try sema.coerce(block, field.ty, uncasted_init, init_src);
if (try sema.resolveMaybeUndefVal(init)) |init_val| {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?);
const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{
.tag = tag_val,
.val = init_val,
}));
}
try sema.requireRuntimeBlock(block, init_src, null);
_ = union_ty_src;
try sema.queueFullTypeResolution(union_ty);
return block.addUnionInit(union_ty, field_index, init);
}
fn zirStructInit(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
is_ref: bool,
) CompileError!Air.Inst.Ref {
const gpa = sema.gpa;
const zir_datas = sema.code.instructions.items(.data);
const inst_data = zir_datas[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index);
const src = inst_data.src();
const mod = sema.mod;
const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data;
const first_field_type_data = zir_datas[first_item.field_type].pl_node;
const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data;
const resolved_ty = try sema.resolveType(block, src, first_field_type_extra.container_type);
try sema.resolveTypeLayout(resolved_ty);
if (resolved_ty.zigTypeTag(mod) == .Struct) {
// This logic must be synchronized with that in `zirStructInitEmpty`.
// Maps field index to field_type index of where it was already initialized.
// For making sure all fields are accounted for and no fields are duplicated.
const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount(mod));
defer gpa.free(found_fields);
// The init values to use for the struct instance.
const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount(mod));
defer gpa.free(field_inits);
@memset(field_inits, .none);
var field_i: u32 = 0;
var extra_index = extra.end;
const is_packed = resolved_ty.containerLayout(mod) == .Packed;
while (field_i < extra.data.fields_len) : (field_i += 1) {
const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index);
extra_index = item.end;
const field_type_data = zir_datas[item.data.field_type].pl_node;
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node };
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(field_type_extra.name_start);
const field_index = if (resolved_ty.isTuple(mod))
try sema.tupleFieldIndex(block, resolved_ty, field_name, field_src)
else
try sema.structFieldIndex(block, resolved_ty, field_name, field_src);
if (field_inits[field_index] != .none) {
const other_field_type = found_fields[field_index];
const other_field_type_data = zir_datas[other_field_type].pl_node;
const other_field_src: LazySrcLoc = .{ .node_offset_initializer = other_field_type_data.src_node };
const msg = msg: {
const msg = try sema.errMsg(block, field_src, "duplicate field", .{});
errdefer msg.destroy(gpa);
try sema.errNote(block, other_field_src, msg, "other field here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
found_fields[field_index] = item.data.field_type;
field_inits[field_index] = try sema.resolveInst(item.data.init);
if (!is_packed) if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| {
const init_val = (try sema.resolveMaybeUndefVal(field_inits[field_index])) orelse {
return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known");
};
if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, mod), sema.mod)) {
return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index);
}
};
}
return sema.finishStructInit(block, src, src, field_inits, resolved_ty, is_ref);
} else if (resolved_ty.zigTypeTag(mod) == .Union) {
if (extra.data.fields_len != 1) {
return sema.fail(block, src, "union initialization expects exactly one field", .{});
}
const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end);
const field_type_data = zir_datas[item.data.field_type].pl_node;
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node };
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(field_type_extra.name_start);
const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src);
const tag_ty = resolved_ty.unionTagTypeHypothetical(mod);
const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?);
const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
const init_inst = try sema.resolveInst(item.data.init);
if (try sema.resolveMaybeUndefVal(init_inst)) |val| {
return sema.addConstantMaybeRef(
block,
resolved_ty,
try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val }),
is_ref,
);
}
if (is_ref) {
const target = sema.mod.getTarget();
const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = resolved_ty,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
const field_ptr = try sema.unionFieldPtr(block, field_src, alloc, field_name, field_src, resolved_ty, true);
try sema.storePtr(block, src, field_ptr, init_inst);
const new_tag = try sema.addConstant(resolved_ty.unionTagTypeHypothetical(mod), tag_val);
_ = try block.addBinOp(.set_union_tag, alloc, new_tag);
return sema.makePtrConst(block, alloc);
}
try sema.requireRuntimeBlock(block, src, null);
try sema.queueFullTypeResolution(resolved_ty);
return block.addUnionInit(resolved_ty, field_index, init_inst);
} else if (resolved_ty.isAnonStruct(mod)) {
return sema.fail(block, src, "TODO anon struct init validation", .{});
}
unreachable;
}
fn finishStructInit(
sema: *Sema,
block: *Block,
init_src: LazySrcLoc,
dest_src: LazySrcLoc,
field_inits: []Air.Inst.Ref,
struct_ty: Type,
is_ref: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
.anon_struct_type => |anon_struct| {
for (anon_struct.types, anon_struct.values, 0..) |field_ty, default_val, i| {
if (field_inits[i] != .none) continue;
if (default_val == .none) {
if (anon_struct.names.len == 0) {
const template = "missing tuple field with index {d}";
if (root_msg) |msg| {
try sema.errNote(block, init_src, msg, template, .{i});
} else {
root_msg = try sema.errMsg(block, init_src, template, .{i});
}
} else {
const field_name = mod.intern_pool.stringToSlice(anon_struct.names[i]);
const template = "missing struct field: {s}";
const args = .{field_name};
if (root_msg) |msg| {
try sema.errNote(block, init_src, msg, template, args);
} else {
root_msg = try sema.errMsg(block, init_src, template, args);
}
}
} else {
field_inits[i] = try sema.addConstant(field_ty.toType(), default_val.toValue());
}
}
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
for (struct_obj.fields.values(), 0..) |field, i| {
if (field_inits[i] != .none) continue;
if (field.default_val.ip_index == .unreachable_value) {
const field_name = struct_obj.fields.keys()[i];
const template = "missing struct field: {s}";
const args = .{field_name};
if (root_msg) |msg| {
try sema.errNote(block, init_src, msg, template, args);
} else {
root_msg = try sema.errMsg(block, init_src, template, args);
}
} else {
field_inits[i] = try sema.addConstant(field.ty, field.default_val);
}
}
},
else => unreachable,
}
if (root_msg) |msg| {
if (mod.typeToStruct(struct_ty)) |struct_obj| {
const fqn = try struct_obj.getFullyQualifiedName(sema.mod);
defer gpa.free(fqn);
try sema.mod.errNoteNonLazy(
struct_obj.srcLoc(sema.mod),
msg,
"struct '{s}' declared here",
.{fqn},
);
}
root_msg = null;
return sema.failWithOwnedErrorMsg(msg);
}
// Find which field forces the expression to be runtime, if any.
const opt_runtime_index = for (field_inits, 0..) |field_init, i| {
if (!(try sema.isComptimeKnown(field_init))) {
break i;
}
} else null;
const runtime_index = opt_runtime_index orelse {
const elems = try sema.arena.alloc(InternPool.Index, field_inits.len);
for (elems, field_inits, 0..) |*elem, field_init, field_i| {
elem.* = try (sema.resolveMaybeUndefVal(field_init) catch unreachable).?
.intern(struct_ty.structFieldType(field_i, mod), mod);
}
const struct_val = try mod.intern(.{ .aggregate = .{
.ty = struct_ty.ip_index,
.storage = .{ .elems = elems },
} });
return sema.addConstantMaybeRef(block, struct_ty, struct_val.toValue(), is_ref);
};
if (is_ref) {
try sema.resolveStructLayout(struct_ty);
const target = sema.mod.getTarget();
const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = struct_ty,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
for (field_inits, 0..) |field_init, i_usize| {
const i = @intCast(u32, i_usize);
const field_src = dest_src;
const field_ptr = try sema.structFieldPtrByIndex(block, dest_src, alloc, i, field_src, struct_ty, true);
try sema.storePtr(block, dest_src, field_ptr, field_init);
}
return sema.makePtrConst(block, alloc);
}
sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = mod.declPtr(block.src_decl);
const field_src = mod.initSrc(dest_src.node_offset.x, decl, runtime_index);
try sema.requireRuntimeBlock(block, dest_src, field_src);
unreachable;
},
else => |e| return e,
};
try sema.queueFullTypeResolution(struct_ty);
return block.addAggregateInit(struct_ty, field_inits);
}
fn zirStructInitAnon(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
is_ref: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index);
const types = try sema.arena.alloc(InternPool.Index, extra.data.fields_len);
const values = try sema.arena.alloc(InternPool.Index, types.len);
var fields = std.AutoArrayHashMap(InternPool.NullTerminatedString, u32).init(sema.arena);
try fields.ensureUnusedCapacity(types.len);
// Find which field forces the expression to be runtime, if any.
const opt_runtime_index = rs: {
var runtime_index: ?usize = null;
var extra_index = extra.end;
for (types, 0..) |*field_ty, i_usize| {
const i = @intCast(u32, i_usize);
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
const name = sema.code.nullTerminatedString(item.data.field_name);
const name_ip = try mod.intern_pool.getOrPutString(gpa, name);
const gop = fields.getOrPutAssumeCapacity(name_ip);
if (gop.found_existing) {
const msg = msg: {
const decl = sema.mod.declPtr(block.src_decl);
const field_src = mod.initSrc(src.node_offset.x, decl, i);
const msg = try sema.errMsg(block, field_src, "duplicate field", .{});
errdefer msg.destroy(gpa);
const prev_source = mod.initSrc(src.node_offset.x, decl, gop.value_ptr.*);
try sema.errNote(block, prev_source, msg, "other field here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
gop.value_ptr.* = i;
const init = try sema.resolveInst(item.data.init);
field_ty.* = sema.typeOf(init).ip_index;
if (field_ty.toType().zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const decl = sema.mod.declPtr(block.src_decl);
const field_src = mod.initSrc(src.node_offset.x, decl, i);
const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, field_ty.toType());
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (try sema.resolveMaybeUndefVal(init)) |init_val| {
values[i] = try init_val.intern(field_ty.toType(), mod);
} else {
values[i] = .none;
runtime_index = i;
}
}
break :rs runtime_index;
};
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
.names = fields.keys(),
.types = types,
.values = values,
} });
const runtime_index = opt_runtime_index orelse {
const tuple_val = try mod.intern(.{ .aggregate = .{
.ty = tuple_ty,
.storage = .{ .elems = values },
} });
return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref);
};
sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
const field_src = mod.initSrc(src.node_offset.x, decl, runtime_index);
try sema.requireRuntimeBlock(block, src, field_src);
unreachable;
},
else => |e| return e,
};
if (is_ref) {
const target = sema.mod.getTarget();
const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = tuple_ty.toType(),
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
var extra_index = extra.end;
for (types, 0..) |field_ty, i_usize| {
const i = @intCast(u32, i_usize);
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
.pointee_type = field_ty.toType(),
});
if (values[i] == .none) {
const init = try sema.resolveInst(item.data.init);
const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty);
_ = try block.addBinOp(.store, field_ptr, init);
}
}
return sema.makePtrConst(block, alloc);
}
const element_refs = try sema.arena.alloc(Air.Inst.Ref, types.len);
var extra_index = extra.end;
for (types, 0..) |_, i| {
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
element_refs[i] = try sema.resolveInst(item.data.init);
}
return block.addAggregateInit(tuple_ty.toType(), element_refs);
}
fn zirArrayInit(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
is_ref: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
const args = sema.code.refSlice(extra.end, extra.data.operands_len);
assert(args.len >= 2); // array_ty + at least one element
const array_ty = try sema.resolveType(block, src, args[0]);
const sentinel_val = array_ty.sentinel(mod);
const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null));
defer gpa.free(resolved_args);
for (args[1..], 0..) |arg, i| {
const resolved_arg = try sema.resolveInst(arg);
const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct)
array_ty.structFieldType(i, mod)
else
array_ty.elemType2(mod);
resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
const elem_src = mod.initSrc(src.node_offset.x, decl, i);
_ = try sema.coerce(block, elem_ty, resolved_arg, elem_src);
unreachable;
},
else => return err,
};
}
if (sentinel_val) |some| {
resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(mod), some);
}
const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| {
const comptime_known = try sema.isComptimeKnown(arg);
if (!comptime_known) break @intCast(u32, i);
} else null;
const runtime_index = opt_runtime_index orelse {
const elem_vals = try sema.arena.alloc(Value, resolved_args.len);
for (resolved_args, 0..) |arg, i| {
// We checked that all args are comptime above.
elem_vals[i] = (sema.resolveMaybeUndefVal(arg) catch unreachable).?;
}
const array_val = try Value.Tag.aggregate.create(sema.arena, elem_vals);
return sema.addConstantMaybeRef(block, array_ty, array_val, is_ref);
};
sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
const elem_src = mod.initSrc(src.node_offset.x, decl, runtime_index);
try sema.requireRuntimeBlock(block, src, elem_src);
unreachable;
},
else => return err,
};
try sema.queueFullTypeResolution(array_ty);
if (is_ref) {
const target = sema.mod.getTarget();
const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = array_ty,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
if (array_ty.isTuple(mod)) {
for (resolved_args, 0..) |arg, i| {
const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
.pointee_type = array_ty.structFieldType(i, mod),
});
const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty);
const index = try sema.addIntUnsigned(Type.usize, i);
const elem_ptr = try block.addPtrElemPtrTypeRef(alloc, index, elem_ptr_ty_ref);
_ = try block.addBinOp(.store, elem_ptr, arg);
}
return sema.makePtrConst(block, alloc);
}
const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
.pointee_type = array_ty.elemType2(mod),
});
const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty);
for (resolved_args, 0..) |arg, i| {
const index = try sema.addIntUnsigned(Type.usize, i);
const elem_ptr = try block.addPtrElemPtrTypeRef(alloc, index, elem_ptr_ty_ref);
_ = try block.addBinOp(.store, elem_ptr, arg);
}
return sema.makePtrConst(block, alloc);
}
return block.addAggregateInit(array_ty, resolved_args);
}
fn zirArrayInitAnon(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
is_ref: bool,
) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
const operands = sema.code.refSlice(extra.end, extra.data.operands_len);
const mod = sema.mod;
const types = try sema.arena.alloc(InternPool.Index, operands.len);
const values = try sema.arena.alloc(InternPool.Index, operands.len);
const opt_runtime_src = rs: {
var runtime_src: ?LazySrcLoc = null;
for (operands, 0..) |operand, i| {
const operand_src = src; // TODO better source location
const elem = try sema.resolveInst(operand);
types[i] = sema.typeOf(elem).ip_index;
if (types[i].toType().zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, types[i].toType());
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (try sema.resolveMaybeUndefVal(elem)) |val| {
values[i] = val.ip_index;
} else {
values[i] = .none;
runtime_src = operand_src;
}
}
break :rs runtime_src;
};
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
.types = types,
.values = values,
.names = &.{},
} });
const runtime_src = opt_runtime_src orelse {
const tuple_val = try mod.intern(.{ .aggregate = .{
.ty = tuple_ty,
.storage = .{ .elems = values },
} });
return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref);
};
try sema.requireRuntimeBlock(block, src, runtime_src);
if (is_ref) {
const target = sema.mod.getTarget();
const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = tuple_ty.toType(),
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
for (operands, 0..) |operand, i_usize| {
const i = @intCast(u32, i_usize);
const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
.pointee_type = types[i].toType(),
});
if (values[i] == .none) {
const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty);
_ = try block.addBinOp(.store, field_ptr, try sema.resolveInst(operand));
}
}
return sema.makePtrConst(block, alloc);
}
const element_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len);
for (operands, 0..) |operand, i| {
element_refs[i] = try sema.resolveInst(operand);
}
return block.addAggregateInit(tuple_ty.toType(), element_refs);
}
fn addConstantMaybeRef(
sema: *Sema,
block: *Block,
ty: Type,
val: Value,
is_ref: bool,
) !Air.Inst.Ref {
if (!is_ref) return sema.addConstant(ty, val);
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const decl = try anon_decl.finish(
ty,
try val.copy(anon_decl.arena()),
0, // default alignment
);
return sema.analyzeDeclRef(decl);
}
fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.FieldTypeRef, inst_data.payload_index).data;
const ty_src = inst_data.src();
const field_src = inst_data.src();
const aggregate_ty = try sema.resolveType(block, ty_src, extra.container_type);
const field_name = try sema.resolveConstString(block, field_src, extra.field_name, "field name must be comptime-known");
return sema.fieldType(block, aggregate_ty, field_name, field_src, ty_src);
}
fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data;
const ty_src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
const aggregate_ty = sema.resolveType(block, ty_src, extra.container_type) catch |err| switch (err) {
// Since this is a ZIR instruction that returns a type, encountering
// generic poison should not result in a failed compilation, but the
// generic poison type. This prevents unnecessary failures when
// constructing types at compile-time.
error.GenericPoison => return Air.Inst.Ref.generic_poison_type,
else => |e| return e,
};
const field_name = sema.code.nullTerminatedString(extra.name_start);
return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src);
}
fn fieldType(
sema: *Sema,
block: *Block,
aggregate_ty: Type,
field_name: []const u8,
field_src: LazySrcLoc,
ty_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
var cur_ty = aggregate_ty;
while (true) {
const resolved_ty = try sema.resolveTypeFields(cur_ty);
cur_ty = resolved_ty;
switch (cur_ty.zigTypeTag(mod)) {
.Struct => switch (mod.intern_pool.indexToKey(cur_ty.ip_index)) {
.anon_struct_type => |anon_struct| {
const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src);
return sema.addType(anon_struct.types[field_index].toType());
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
const field = struct_obj.fields.get(field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
return sema.addType(field.ty);
},
else => unreachable,
},
.Union => {
const union_obj = mod.typeToUnion(cur_ty).?;
const field = union_obj.fields.get(field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
return sema.addType(field.ty);
},
.Optional => {
// Struct/array init through optional requires the child type to not be a pointer.
// If the child of .optional is a pointer it'll error on the next loop.
cur_ty = mod.intern_pool.indexToKey(cur_ty.ip_index).opt_type.toType();
continue;
},
.ErrorUnion => {
cur_ty = cur_ty.errorUnionPayload(mod);
continue;
},
else => {},
}
return sema.fail(block, ty_src, "expected struct or union; found '{}'", .{
resolved_ty.fmt(sema.mod),
});
}
}
fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
return sema.getErrorReturnTrace(block);
}
fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
const opt_ptr_stack_trace_ty = try Type.optional(sema.arena, ptr_stack_trace_ty, mod);
if (sema.owner_func != null and
sema.owner_func.?.calls_or_awaits_errorable_fn and
mod.comp.bin_file.options.error_return_tracing and
mod.backendSupportsFeature(.error_return_trace))
{
return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty);
}
return sema.addConstant(opt_ptr_stack_trace_ty, Value.null);
}
fn zirFrame(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
return sema.failWithUseOfAsync(block, src);
}
fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
if (ty.isNoReturn(mod)) {
return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)});
}
const val = try ty.lazyAbiAlignment(mod, sema.arena);
if (val.isLazyAlign()) {
try sema.queueFullTypeResolution(ty);
}
return sema.addConstant(Type.comptime_int, val);
}
fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand = try sema.resolveInst(inst_data.operand);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef(mod)) return sema.addConstUndef(Type.u1);
if (val.toBool(mod)) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1));
return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0));
}
return block.addUnOp(.bool_to_int, operand);
}
fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
const bytes = val.castTag(.@"error").?.data.name;
return sema.addStrLit(block, bytes);
}
// Similar to zirTagName, we have special AIR instruction for the error name in case an optimimzation pass
// might be able to resolve the result at compile time.
return block.addUnOp(.error_name, operand);
}
fn zirUnaryMath(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
comptime eval: fn (Value, Type, Allocator, *Module) Allocator.Error!Value,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float => {},
.Vector => {
const scalar_ty = operand_ty.scalarType(mod);
switch (scalar_ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float => {},
else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{scalar_ty.fmt(sema.mod)}),
}
},
else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{operand_ty.fmt(sema.mod)}),
}
switch (operand_ty.zigTypeTag(mod)) {
.Vector => {
const scalar_ty = operand_ty.scalarType(mod);
const vec_len = operand_ty.vectorLen(mod);
const result_ty = try mod.vectorType(.{
.len = vec_len,
.child = scalar_ty.ip_index,
});
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef(mod))
return sema.addConstUndef(result_ty);
const elems = try sema.arena.alloc(Value, vec_len);
for (elems, 0..) |*elem, i| {
const elem_val = try val.elemValue(sema.mod, i);
elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod);
}
return sema.addConstant(
result_ty,
try Value.Tag.aggregate.create(sema.arena, elems),
);
}
try sema.requireRuntimeBlock(block, operand_src, null);
return block.addUnOp(air_tag, operand);
},
.ComptimeFloat, .Float => {
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
if (operand_val.isUndef(mod))
return sema.addConstUndef(operand_ty);
const result_val = try eval(operand_val, operand_ty, sema.arena, sema.mod);
return sema.addConstant(operand_ty, result_val);
}
try sema.requireRuntimeBlock(block, operand_src, null);
return block.addUnOp(air_tag, operand);
},
else => unreachable,
}
}
fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const mod = sema.mod;
try sema.resolveTypeLayout(operand_ty);
const enum_ty = switch (operand_ty.zigTypeTag(mod)) {
.EnumLiteral => {
const val = try sema.resolveConstValue(block, .unneeded, operand, "");
const bytes = val.castTag(.enum_literal).?.data;
return sema.addStrLit(block, bytes);
},
.Enum => operand_ty,
.Union => operand_ty.unionTagType(mod) orelse {
const msg = msg: {
const msg = try sema.errMsg(block, src, "union '{}' is untagged", .{
operand_ty.fmt(sema.mod),
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, operand_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
else => return sema.fail(block, operand_src, "expected enum or union; found '{}'", .{
operand_ty.fmt(mod),
}),
};
if (enum_ty.enumFieldCount(mod) == 0) {
// TODO I don't think this is the correct way to handle this but
// it prevents a crash.
return sema.fail(block, operand_src, "cannot get @tagName of empty enum '{}'", .{
enum_ty.fmt(mod),
});
}
const enum_decl_index = enum_ty.getOwnerDecl(mod);
const casted_operand = try sema.coerce(block, enum_ty, operand, operand_src);
if (try sema.resolveDefinedValue(block, operand_src, casted_operand)) |val| {
const field_index = enum_ty.enumTagFieldIndex(val, mod) orelse {
const enum_decl = mod.declPtr(enum_decl_index);
const msg = msg: {
const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{s}'", .{
val.fmtValue(enum_ty, sema.mod), enum_decl.name,
});
errdefer msg.destroy(sema.gpa);
try mod.errNoteNonLazy(enum_decl.srcLoc(mod), msg, "declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
const field_name = enum_ty.enumFieldName(field_index, mod);
return sema.addStrLit(block, field_name);
}
try sema.requireRuntimeBlock(block, src, operand_src);
if (block.wantSafety() and sema.mod.backendSupportsFeature(.is_named_enum_value)) {
const ok = try block.addUnOp(.is_named_enum_value, casted_operand);
try sema.addSafetyCheck(block, ok, .invalid_enum_value);
}
// In case the value is runtime-known, we have an AIR instruction for this instead
// of trying to lower it in Sema because an optimization pass may result in the operand
// being comptime-known, which would let us elide the `tag_name` AIR instruction.
return block.addUnOp(.tag_name, casted_operand);
}
fn zirReify(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const name_strategy = @intToEnum(Zir.Inst.NameStrategy, extended.small);
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const type_info_ty = try sema.getBuiltinType("Type");
const uncasted_operand = try sema.resolveInst(extra.operand);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src);
const val = try sema.resolveConstValue(block, operand_src, type_info, "operand to @Type must be comptime-known");
const union_val = val.cast(Value.Payload.Union).?.data;
const target = mod.getTarget();
const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag, mod).?;
if (try union_val.val.anyUndef(mod)) return sema.failWithUseOfUndef(block, src);
const ip = &mod.intern_pool;
switch (@intToEnum(std.builtin.TypeId, tag_index)) {
.Type => return Air.Inst.Ref.type_type,
.Void => return Air.Inst.Ref.void_type,
.Bool => return Air.Inst.Ref.bool_type,
.NoReturn => return Air.Inst.Ref.noreturn_type,
.ComptimeFloat => return Air.Inst.Ref.comptime_float_type,
.ComptimeInt => return Air.Inst.Ref.comptime_int_type,
.Undefined => return Air.Inst.Ref.undefined_type,
.Null => return Air.Inst.Ref.null_type,
.AnyFrame => return sema.failWithUseOfAsync(block, src),
.EnumLiteral => return Air.Inst.Ref.enum_literal_type,
.Int => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
const signedness_index = fields.getIndex("signedness").?;
const bits_index = fields.getIndex("bits").?;
const signedness_val = try union_val.val.fieldValue(fields.values()[signedness_index].ty, mod, signedness_index);
const bits_val = try union_val.val.fieldValue(fields.values()[bits_index].ty, mod, bits_index);
const signedness = mod.toEnum(std.builtin.Signedness, signedness_val);
const bits = @intCast(u16, bits_val.toUnsignedInt(mod));
const ty = try mod.intType(signedness, bits);
return sema.addType(ty);
},
.Vector => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
const len_index = fields.getIndex("len").?;
const child_index = fields.getIndex("child").?;
const len_val = try union_val.val.fieldValue(fields.values()[len_index].ty, mod, len_index);
const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index);
const len = @intCast(u32, len_val.toUnsignedInt(mod));
const child_ty = child_val.toType();
try sema.checkVectorElemType(block, src, child_ty);
const ty = try mod.vectorType(.{
.len = len,
.child = child_ty.ip_index,
});
return sema.addType(ty);
},
.Float => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
const bits_index = fields.getIndex("bits").?;
const bits_val = try union_val.val.fieldValue(fields.values()[bits_index].ty, mod, bits_index);
const bits = @intCast(u16, bits_val.toUnsignedInt(mod));
const ty = switch (bits) {
16 => Type.f16,
32 => Type.f32,
64 => Type.f64,
80 => Type.f80,
128 => Type.f128,
else => return sema.fail(block, src, "{}-bit float unsupported", .{bits}),
};
return sema.addType(ty);
},
.Pointer => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
const size_index = fields.getIndex("size").?;
const is_const_index = fields.getIndex("is_const").?;
const is_volatile_index = fields.getIndex("is_volatile").?;
const alignment_index = fields.getIndex("alignment").?;
const address_space_index = fields.getIndex("address_space").?;
const child_index = fields.getIndex("child").?;
const is_allowzero_index = fields.getIndex("is_allowzero").?;
const sentinel_index = fields.getIndex("sentinel").?;
const size_val = try union_val.val.fieldValue(fields.values()[size_index].ty, mod, size_index);
const is_const_val = try union_val.val.fieldValue(fields.values()[is_const_index].ty, mod, is_const_index);
const is_volatile_val = try union_val.val.fieldValue(fields.values()[is_volatile_index].ty, mod, is_volatile_index);
const alignment_val = try union_val.val.fieldValue(fields.values()[alignment_index].ty, mod, alignment_index);
const address_space_val = try union_val.val.fieldValue(fields.values()[address_space_index].ty, mod, address_space_index);
const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index);
const is_allowzero_val = try union_val.val.fieldValue(fields.values()[is_allowzero_index].ty, mod, is_allowzero_index);
const sentinel_val = try union_val.val.fieldValue(fields.values()[sentinel_index].ty, mod, sentinel_index);
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
const abi_align = InternPool.Alignment.fromByteUnits(
(try alignment_val.getUnsignedIntAdvanced(mod, sema)).?,
);
const unresolved_elem_ty = child_val.toType();
const elem_ty = if (abi_align == .none)
unresolved_elem_ty
else t: {
const elem_ty = try sema.resolveTypeFields(unresolved_elem_ty);
try sema.resolveTypeLayout(elem_ty);
break :t elem_ty;
};
const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val);
const actual_sentinel: InternPool.Index = s: {
if (!sentinel_val.isNull(mod)) {
if (ptr_size == .One or ptr_size == .C) {
return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{});
}
const sentinel_ptr_val = sentinel_val.castTag(.opt_payload).?.data;
const ptr_ty = try Type.ptr(sema.arena, mod, .{
.@"addrspace" = .generic,
.pointee_type = elem_ty,
});
const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?;
break :s sent_val.toIntern();
}
break :s .none;
};
if (elem_ty.zigTypeTag(mod) == .NoReturn) {
return sema.fail(block, src, "pointer to noreturn not allowed", .{});
} else if (elem_ty.zigTypeTag(mod) == .Fn) {
if (ptr_size != .One) {
return sema.fail(block, src, "function pointers must be single pointers", .{});
}
const fn_align = mod.typeToFunc(elem_ty).?.alignment;
if (abi_align != .none and fn_align != .none and
abi_align != fn_align)
{
return sema.fail(block, src, "function pointer alignment disagrees with function alignment", .{});
}
} else if (ptr_size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, src, "unknown-length pointer to opaque not allowed", .{});
} else if (ptr_size == .C) {
if (!try sema.validateExternType(elem_ty, .other)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)});
errdefer msg.destroy(gpa);
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), elem_ty, .other);
try sema.addDeclaredHereNote(msg, elem_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (elem_ty.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, src, "C pointers cannot point to opaque types", .{});
}
}
const ty = try mod.ptrType(.{
.size = ptr_size,
.is_const = is_const_val.toBool(mod),
.is_volatile = is_volatile_val.toBool(mod),
.alignment = abi_align,
.address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val),
.elem_type = elem_ty.toIntern(),
.is_allowzero = is_allowzero_val.toBool(mod),
.sentinel = actual_sentinel,
});
return sema.addType(ty);
},
.Array => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
const len_index = fields.getIndex("len").?;
const child_index = fields.getIndex("child").?;
const sentinel_index = fields.getIndex("sentinel").?;
const len_val = try union_val.val.fieldValue(fields.values()[len_index].ty, mod, len_index);
const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index);
const sentinel_val = try union_val.val.fieldValue(fields.values()[sentinel_index].ty, mod, sentinel_index);
const len = len_val.toUnsignedInt(mod);
const child_ty = child_val.toType();
const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: {
const ptr_ty = try Type.ptr(sema.arena, mod, .{
.@"addrspace" = .generic,
.pointee_type = child_ty,
});
break :blk (try sema.pointerDeref(block, src, p.data, ptr_ty)).?;
} else null;
const ty = try Type.array(sema.arena, len, sentinel, child_ty, mod);
return sema.addType(ty);
},
.Optional => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
const child_index = fields.getIndex("child").?;
const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index);
const child_ty = child_val.toType();
const ty = try Type.optional(sema.arena, child_ty, mod);
return sema.addType(ty);
},
.ErrorUnion => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
const error_set_index = fields.getIndex("error_set").?;
const payload_index = fields.getIndex("payload").?;
const error_set_val = try union_val.val.fieldValue(fields.values()[error_set_index].ty, mod, error_set_index);
const payload_val = try union_val.val.fieldValue(fields.values()[payload_index].ty, mod, payload_index);
const error_set_ty = error_set_val.toType();
const payload_ty = payload_val.toType();
if (error_set_ty.zigTypeTag(mod) != .ErrorSet) {
return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{});
}
const ty = try mod.errorUnionType(error_set_ty, payload_ty);
return sema.addType(ty);
},
.ErrorSet => {
const payload_val = union_val.val.optionalValue(mod) orelse
return sema.addType(Type.anyerror);
const slice_val = payload_val.castTag(.slice).?.data;
const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod));
var names: Module.Fn.InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, len);
for (0..len) |i| {
const elem_val = try slice_val.ptr.elemValue(mod, i);
const struct_val = elem_val.castTag(.aggregate).?.data;
// TODO use reflection instead of magic numbers here
// error_set: type,
const name_val = struct_val[0];
const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod);
const name_ip = try mod.intern_pool.getOrPutString(gpa, name_str);
const gop = names.getOrPutAssumeCapacity(name_ip);
if (gop.found_existing) {
return sema.fail(block, src, "duplicate error '{s}'", .{name_str});
}
}
const ty = try mod.errorSetFromUnsortedNames(names.keys());
return sema.addType(ty);
},
.Struct => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
const layout_index = fields.getIndex("layout").?;
const backing_integer_index = fields.getIndex("backing_integer").?;
const fields_index = fields.getIndex("fields").?;
const decls_index = fields.getIndex("decls").?;
const is_tuple_index = fields.getIndex("is_tuple").?;
const layout_val = try union_val.val.fieldValue(fields.values()[layout_index].ty, mod, layout_index);
const backing_integer_val = try union_val.val.fieldValue(fields.values()[backing_integer_index].ty, mod, backing_integer_index);
const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index);
const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index);
const is_tuple_val = try union_val.val.fieldValue(fields.values()[is_tuple_index].ty, mod, is_tuple_index);
const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
// Decls
if (decls_val.sliceLen(mod) > 0) {
return sema.fail(block, src, "reified structs must have no decls", .{});
}
if (layout != .Packed and !backing_integer_val.isNull(mod)) {
return sema.fail(block, src, "non-packed struct does not support backing integer type", .{});
}
return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool(mod));
},
.Enum => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
const tag_type_index = fields.getIndex("tag_type").?;
const fields_index = fields.getIndex("fields").?;
const decls_index = fields.getIndex("decls").?;
const is_exhaustive_index = fields.getIndex("is_exhaustive").?;
const tag_type_val = try union_val.val.fieldValue(fields.values()[tag_type_index].ty, mod, tag_type_index);
const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index);
const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index);
const is_exhaustive_val = try union_val.val.fieldValue(fields.values()[is_exhaustive_index].ty, mod, is_exhaustive_index);
// Decls
if (decls_val.sliceLen(mod) > 0) {
return sema.fail(block, src, "reified enums must have no decls", .{});
}
const int_tag_ty = tag_type_val.toType();
if (int_tag_ty.zigTypeTag(mod) != .Int) {
return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{});
}
// Because these things each reference each other, `undefined`
// placeholders are used before being set after the enum type gains
// an InternPool index.
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = undefined,
}, name_strategy, "enum", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
// Define our empty enum decl
const fields_len = @intCast(u32, try sema.usizeCast(block, src, fields_val.sliceLen(mod)));
const incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{
.decl = new_decl_index,
.namespace = .none,
.fields_len = fields_len,
.has_values = true,
.tag_mode = if (!is_exhaustive_val.toBool(mod))
.nonexhaustive
else
.explicit,
.tag_ty = int_tag_ty.ip_index,
});
errdefer mod.intern_pool.remove(incomplete_enum.index);
new_decl.val = incomplete_enum.index.toValue();
for (0..fields_len) |field_i| {
const elem_val = try fields_val.elemValue(mod, field_i);
const field_struct_val: []const Value = elem_val.castTag(.aggregate).?.data;
// TODO use reflection instead of magic numbers here
// name: []const u8
const name_val = field_struct_val[0];
// value: comptime_int
const value_val = field_struct_val[1];
const field_name = try name_val.toAllocatedBytes(
Type.const_slice_u8,
sema.arena,
mod,
);
const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name);
if (!try sema.intFitsInType(value_val, int_tag_ty, null)) {
// TODO: better source location
return sema.fail(block, src, "field '{s}' with enumeration value '{}' is too large for backing int type '{}'", .{
field_name,
value_val.fmtValue(Type.comptime_int, mod),
int_tag_ty.fmt(mod),
});
}
if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name_ip)) |other_index| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "duplicate enum field '{s}'", .{field_name});
errdefer msg.destroy(gpa);
_ = other_index; // TODO: this note is incorrect
try sema.errNote(block, src, msg, "other field here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, value_val.ip_index)) |other| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)});
errdefer msg.destroy(gpa);
_ = other; // TODO: this note is incorrect
try sema.errNote(block, src, msg, "other enum tag value here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
return sema.analyzeDeclVal(block, src, new_decl_index);
},
.Opaque => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
const decls_index = fields.getIndex("decls").?;
const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index);
// Decls
if (decls_val.sliceLen(mod) > 0) {
return sema.fail(block, src, "reified opaque must have no decls", .{});
}
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
// Because these three things each reference each other,
// `undefined` placeholders are used in two places before being set
// after the opaque type gains an InternPool index.
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = undefined,
}, name_strategy, "opaque", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.ty = undefined,
.file_scope = block.getFileScope(mod),
});
const new_namespace = mod.namespacePtr(new_namespace_index);
errdefer mod.destroyNamespace(new_namespace_index);
const opaque_ty = try mod.intern(.{ .opaque_type = .{
.decl = new_decl_index,
.namespace = new_namespace_index,
} });
errdefer mod.intern_pool.remove(opaque_ty);
new_decl.val = opaque_ty.toValue();
new_namespace.ty = opaque_ty.toType();
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
},
.Union => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
const layout_index = fields.getIndex("layout").?;
const tag_type_index = fields.getIndex("tag_type").?;
const fields_index = fields.getIndex("fields").?;
const decls_index = fields.getIndex("decls").?;
const layout_val = try union_val.val.fieldValue(fields.values()[layout_index].ty, mod, layout_index);
const tag_type_val = try union_val.val.fieldValue(fields.values()[tag_type_index].ty, mod, tag_type_index);
const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index);
const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index);
// Decls
if (decls_val.sliceLen(mod) > 0) {
return sema.fail(block, src, "reified unions must have no decls", .{});
}
const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
const new_decl_arena_allocator = new_decl_arena.allocator();
// Because these three things each reference each other, `undefined`
// placeholders are used before being set after the union type gains an
// InternPool index.
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = undefined,
}, name_strategy, "union", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.ty = undefined,
.file_scope = block.getFileScope(mod),
});
const new_namespace = mod.namespacePtr(new_namespace_index);
errdefer mod.destroyNamespace(new_namespace_index);
const union_index = try mod.createUnion(.{
.owner_decl = new_decl_index,
.tag_ty = Type.null,
.fields = .{},
.zir_index = inst,
.layout = layout,
.status = .have_field_types,
.namespace = new_namespace_index,
});
const union_obj = mod.unionPtr(union_index);
errdefer mod.destroyUnion(union_index);
const union_ty = try mod.intern_pool.get(gpa, .{ .union_type = .{
.index = union_index,
.runtime_tag = if (!tag_type_val.isNull(mod))
.tagged
else if (layout != .Auto)
.none
else switch (mod.optimizeMode()) {
.Debug, .ReleaseSafe => .safety,
.ReleaseFast, .ReleaseSmall => .none,
},
} });
errdefer mod.intern_pool.remove(union_ty);
new_decl.val = union_ty.toValue();
new_namespace.ty = union_ty.toType();
// Tag type
const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod));
var explicit_tags_seen: []bool = &.{};
var explicit_enum_info: ?InternPool.Key.EnumType = null;
var enum_field_names: []InternPool.NullTerminatedString = &.{};
if (tag_type_val.optionalValue(mod)) |payload_val| {
union_obj.tag_ty = payload_val.toType();
const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) {
.enum_type => |x| x,
else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}),
};
explicit_enum_info = enum_type;
explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len);
@memset(explicit_tags_seen, false);
} else {
enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
}
// Fields
try union_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
for (0..fields_len) |i| {
const elem_val = try fields_val.elemValue(mod, i);
const field_struct_val = elem_val.castTag(.aggregate).?.data;
// TODO use reflection instead of magic numbers here
// name: []const u8
const name_val = field_struct_val[0];
// type: type,
const type_val = field_struct_val[1];
// alignment: comptime_int,
const alignment_val = field_struct_val[2];
const field_name = try name_val.toAllocatedBytes(
Type.const_slice_u8,
new_decl_arena_allocator,
mod,
);
const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name);
if (enum_field_names.len != 0) {
enum_field_names[i] = field_name_ip;
}
if (explicit_enum_info) |tag_info| {
const enum_index = tag_info.nameIndex(&mod.intern_pool, field_name_ip) orelse {
const msg = msg: {
const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) });
errdefer msg.destroy(gpa);
try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
// No check for duplicate because the check already happened in order
// to create the enum type in the first place.
assert(!explicit_tags_seen[enum_index]);
explicit_tags_seen[enum_index] = true;
}
const gop = union_obj.fields.getOrPutAssumeCapacity(field_name);
if (gop.found_existing) {
// TODO: better source location
return sema.fail(block, src, "duplicate union field {s}", .{field_name});
}
const field_ty = type_val.toType();
gop.value_ptr.* = .{
.ty = field_ty,
.abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?),
};
if (field_ty.zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
errdefer msg.destroy(gpa);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(gpa);
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .union_field);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
} else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(gpa);
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
if (explicit_enum_info) |tag_info| {
if (tag_info.names.len > fields_len) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{});
errdefer msg.destroy(gpa);
const enum_ty = union_obj.tag_ty;
for (tag_info.names, 0..) |field_name, field_index| {
if (explicit_tags_seen[field_index]) continue;
try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{
mod.intern_pool.stringToSlice(field_name),
});
}
try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
} else {
union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, enum_field_names, null);
}
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
},
.Fn => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
const calling_convention_index = fields.getIndex("calling_convention").?;
const alignment_index = fields.getIndex("alignment").?;
const is_generic_index = fields.getIndex("is_generic").?;
const is_var_args_index = fields.getIndex("is_var_args").?;
const return_type_index = fields.getIndex("return_type").?;
const params_index = fields.getIndex("params").?;
const calling_convention_val = try union_val.val.fieldValue(fields.values()[calling_convention_index].ty, mod, calling_convention_index);
const alignment_val = try union_val.val.fieldValue(fields.values()[alignment_index].ty, mod, alignment_index);
const is_generic_val = try union_val.val.fieldValue(fields.values()[is_generic_index].ty, mod, is_generic_index);
const is_var_args_val = try union_val.val.fieldValue(fields.values()[is_var_args_index].ty, mod, is_var_args_index);
const return_type_val = try union_val.val.fieldValue(fields.values()[return_type_index].ty, mod, return_type_index);
const params_val = try union_val.val.fieldValue(fields.values()[params_index].ty, mod, params_index);
const is_generic = is_generic_val.toBool(mod);
if (is_generic) {
return sema.fail(block, src, "Type.Fn.is_generic must be false for @Type", .{});
}
const is_var_args = is_var_args_val.toBool(mod);
const cc = mod.toEnum(std.builtin.CallingConvention, calling_convention_val);
if (is_var_args and cc != .C) {
return sema.fail(block, src, "varargs functions must have C calling convention", .{});
}
const alignment = alignment: {
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
const alignment = @intCast(u29, alignment_val.toUnsignedInt(mod));
if (alignment == target_util.defaultFunctionAlignment(target)) {
break :alignment .none;
} else {
break :alignment InternPool.Alignment.fromByteUnits(alignment);
}
};
const return_type = return_type_val.optionalValue(mod) orelse
return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{});
const args_slice_val = params_val.castTag(.slice).?.data;
const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod));
const param_types = try sema.arena.alloc(InternPool.Index, args_len);
var noalias_bits: u32 = 0;
for (param_types, 0..) |*param_type, i| {
const arg = try args_slice_val.ptr.elemValue(mod, i);
const arg_val = arg.castTag(.aggregate).?.data;
// TODO use reflection instead of magic numbers here
// is_generic: bool,
const arg_is_generic = arg_val[0].toBool(mod);
// is_noalias: bool,
const arg_is_noalias = arg_val[1].toBool(mod);
// type: ?type,
const param_type_opt_val = arg_val[2];
if (arg_is_generic) {
return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{});
}
const param_type_val = param_type_opt_val.optionalValue(mod) orelse
return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{});
param_type.* = param_type_val.ip_index;
if (arg_is_noalias) {
if (!param_type.toType().isPtrAtRuntime(mod)) {
return sema.fail(block, src, "non-pointer parameter declared noalias", .{});
}
noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse
return sema.fail(block, src, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{}));
}
}
const ty = try mod.funcType(.{
.param_types = param_types,
.comptime_bits = 0,
.noalias_bits = noalias_bits,
.return_type = return_type.toIntern(),
.alignment = alignment,
.cc = cc,
.is_var_args = is_var_args,
.is_generic = false,
.is_noinline = false,
.align_is_generic = false,
.cc_is_generic = false,
.section_is_generic = false,
.addrspace_is_generic = false,
});
return sema.addType(ty);
},
.Frame => return sema.failWithUseOfAsync(block, src),
}
}
fn reifyStruct(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
src: LazySrcLoc,
layout: std.builtin.Type.ContainerLayout,
backing_int_val: Value,
fields_val: Value,
name_strategy: Zir.Inst.NameStrategy,
is_tuple: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
const new_decl_arena_allocator = new_decl_arena.allocator();
// Because these three things each reference each other, `undefined`
// placeholders are used before being set after the struct type gains an
// InternPool index.
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = undefined,
}, name_strategy, "struct", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.ty = undefined,
.file_scope = block.getFileScope(mod),
});
const new_namespace = mod.namespacePtr(new_namespace_index);
errdefer mod.destroyNamespace(new_namespace_index);
const struct_index = try mod.createStruct(.{
.owner_decl = new_decl_index,
.fields = .{},
.zir_index = inst,
.layout = layout,
.status = .have_field_types,
.known_non_opv = false,
.is_tuple = is_tuple,
.namespace = new_namespace_index,
});
const struct_obj = mod.structPtr(struct_index);
errdefer mod.destroyStruct(struct_index);
const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
.index = struct_index.toOptional(),
.namespace = new_namespace_index.toOptional(),
} });
errdefer mod.intern_pool.remove(struct_ty);
new_decl.val = struct_ty.toValue();
new_namespace.ty = struct_ty.toType();
// Fields
const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod));
try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
var i: usize = 0;
while (i < fields_len) : (i += 1) {
const elem_val = try fields_val.elemValue(sema.mod, i);
const field_struct_val = elem_val.castTag(.aggregate).?.data;
// TODO use reflection instead of magic numbers here
// name: []const u8
const name_val = field_struct_val[0];
// type: type,
const type_val = field_struct_val[1];
// default_value: ?*const anyopaque,
const default_value_val = field_struct_val[2];
// is_comptime: bool,
const is_comptime_val = field_struct_val[3];
// alignment: comptime_int,
const alignment_val = field_struct_val[4];
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?);
if (layout == .Packed) {
if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
if (is_comptime_val.toBool(mod)) return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{});
}
if (layout == .Extern and is_comptime_val.toBool(mod)) {
return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{});
}
const field_name = try name_val.toAllocatedBytes(
Type.const_slice_u8,
new_decl_arena_allocator,
mod,
);
if (is_tuple) {
const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch {
return sema.fail(
block,
src,
"tuple cannot have non-numeric field '{s}'",
.{field_name},
);
};
if (field_index >= fields_len) {
return sema.fail(
block,
src,
"tuple field {} exceeds tuple field count",
.{field_index},
);
}
}
const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
if (gop.found_existing) {
// TODO: better source location
return sema.fail(block, src, "duplicate struct field {s}", .{field_name});
}
const default_val = if (default_value_val.optionalValue(mod)) |opt_val| blk: {
const payload_val = if (opt_val.pointerDecl()) |opt_decl|
mod.declPtr(opt_decl).val
else
opt_val;
break :blk try payload_val.copy(new_decl_arena_allocator);
} else Value.@"unreachable";
if (is_comptime_val.toBool(mod) and default_val.ip_index == .unreachable_value) {
return sema.fail(block, src, "comptime field without default initialization value", .{});
}
const field_ty = type_val.toType();
gop.value_ptr.* = .{
.ty = field_ty,
.abi_align = abi_align,
.default_val = default_val,
.is_comptime = is_comptime_val.toBool(mod),
.offset = undefined,
};
if (field_ty.zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(gpa);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (field_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{});
errdefer msg.destroy(gpa);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (struct_obj.layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(gpa);
const src_decl = sema.mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .struct_field);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
} else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(gpa);
const src_decl = sema.mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
if (layout == .Packed) {
struct_obj.status = .layout_wip;
for (struct_obj.fields.values(), 0..) |field, index| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
try sema.addFieldErrNote(struct_ty.toType(), index, msg, "while checking this field", .{});
return err;
},
else => return err,
};
}
var fields_bit_sum: u64 = 0;
for (struct_obj.fields.values()) |field| {
fields_bit_sum += field.ty.bitSize(mod);
}
if (backing_int_val.optionalValue(mod)) |payload| {
const backing_int_ty = payload.toType();
try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
struct_obj.backing_int_ty = backing_int_ty;
} else {
struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum));
}
struct_obj.status = .have_layout;
}
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
}
fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const addrspace_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const dest_addrspace = try sema.analyzeAddressSpace(block, addrspace_src, extra.lhs, .pointer);
const ptr = try sema.resolveInst(extra.rhs);
const ptr_ty = sema.typeOf(ptr);
try sema.checkPtrOperand(block, ptr_src, ptr_ty);
var ptr_info = ptr_ty.ptrInfo(mod);
const src_addrspace = ptr_info.@"addrspace";
if (!target_util.addrSpaceCastIsValid(sema.mod.getTarget(), src_addrspace, dest_addrspace)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "invalid address space cast", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "address space '{s}' is not compatible with address space '{s}'", .{ @tagName(src_addrspace), @tagName(dest_addrspace) });
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
ptr_info.@"addrspace" = dest_addrspace;
const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional)
try Type.optional(sema.arena, dest_ptr_ty, mod)
else
dest_ptr_ty;
try sema.requireRuntimeBlock(block, src, ptr_src);
// TODO: Address space cast safety?
return block.addInst(.{
.tag = .addrspace_cast,
.data = .{ .ty_op = .{
.ty = try sema.addType(dest_ty),
.operand = ptr,
} },
});
}
fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref {
const va_list_ty = try sema.getBuiltinType("VaList");
const va_list_ptr = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = va_list_ty,
.mutable = true,
.@"addrspace" = .generic,
});
const inst = try sema.resolveInst(zir_ref);
return sema.coerce(block, va_list_ptr, inst, src);
}
fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.lhs);
const arg_ty = try sema.resolveType(block, ty_src, extra.rhs);
if (!try sema.validateExternType(arg_ty, .param_ty)) {
const msg = msg: {
const msg = try sema.errMsg(block, ty_src, "cannot get '{}' from variadic argument", .{arg_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
const src_decl = sema.mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl, mod), arg_ty, .param_ty);
try sema.addDeclaredHereNote(msg, arg_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
try sema.requireRuntimeBlock(block, src, null);
return block.addTyOp(.c_va_arg, arg_ty, va_list_ref);
}
fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand);
const va_list_ty = try sema.getBuiltinType("VaList");
try sema.requireRuntimeBlock(block, src, null);
return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref);
}
fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand);
try sema.requireRuntimeBlock(block, src, null);
return block.addUnOp(.c_va_end, va_list_ref);
}
fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
const va_list_ty = try sema.getBuiltinType("VaList");
try sema.requireRuntimeBlock(block, src, null);
return block.addInst(.{
.tag = .c_va_start,
.data = .{ .ty = va_list_ty },
});
}
fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ty = try sema.resolveType(block, ty_src, inst_data.operand);
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const bytes = try ty.nameAllocArena(anon_decl.arena(), mod);
const new_decl = try anon_decl.finish(
try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
return sema.analyzeDeclRef(new_decl);
}
fn zirFrameType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
return sema.failWithUseOfAsync(block, src);
}
fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
return sema.failWithUseOfAsync(block, src);
}
fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const dest_ty = try sema.resolveType(block, ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
_ = try sema.checkIntType(block, ty_src, dest_ty);
try sema.checkFloatType(block, operand_src, operand_ty);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
const result_val = try sema.floatToInt(block, operand_src, val, operand_ty, dest_ty);
return sema.addConstant(dest_ty, result_val);
} else if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_int' must be comptime-known");
}
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
if (dest_ty.intInfo(mod).bits == 0) {
if (block.wantSafety()) {
const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0)));
try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds);
}
return sema.addConstant(dest_ty, try mod.intValue(dest_ty, 0));
}
const result = try block.addTyOp(if (block.float_mode == .Optimized) .float_to_int_optimized else .float_to_int, dest_ty, operand);
if (block.wantSafety()) {
const back = try block.addTyOp(.int_to_float, operand_ty, result);
const diff = try block.addBinOp(.sub, operand, back);
const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 1)));
const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, -1)));
const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg);
try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds);
}
return result;
}
fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const dest_ty = try sema.resolveType(block, ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
try sema.checkFloatType(block, ty_src, dest_ty);
_ = try sema.checkIntType(block, operand_src, operand_ty);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, sema.mod, sema);
return sema.addConstant(dest_ty, result_val);
} else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_float' must be comptime-known");
}
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
return block.addTyOp(.int_to_float, dest_ty, operand);
}
fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const operand_res = try sema.resolveInst(extra.rhs);
const operand_coerced = try sema.coerce(block, Type.usize, operand_res, operand_src);
const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ptr_ty = try sema.resolveType(block, src, extra.lhs);
try sema.checkPtrType(block, type_src, ptr_ty);
const elem_ty = ptr_ty.elemType2(mod);
const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema);
if (ptr_ty.isSlice(mod)) {
const msg = msg: {
const msg = try sema.errMsg(block, type_src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, type_src, msg, "slice length cannot be inferred from address", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| {
const addr = val.toUnsignedInt(mod);
if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0)
return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)});
if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0)
return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)});
return sema.addConstant(ptr_ty, try mod.ptrIntValue(ptr_ty, addr));
}
try sema.requireRuntimeBlock(block, src, operand_src);
if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) {
if (!ptr_ty.isAllowzeroPtr(mod)) {
const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize);
try sema.addSafetyCheck(block, is_non_zero, .cast_to_null);
}
if (ptr_align > 1) {
const align_minus_1 = try sema.addConstant(
Type.usize,
try mod.intValue(Type.usize, ptr_align - 1),
);
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
try sema.addSafetyCheck(block, is_aligned, .incorrect_alignment);
}
}
return block.addBitCast(ptr_ty, operand_coerced);
}
fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const ip = &mod.intern_pool;
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
try sema.checkErrorSetType(block, dest_ty_src, dest_ty);
try sema.checkErrorSetType(block, operand_src, operand_ty);
// operand must be defined since it can be an invalid error value
const maybe_operand_val = try sema.resolveDefinedValue(block, operand_src, operand);
if (disjoint: {
// Try avoiding resolving inferred error sets if we can
if (!dest_ty.isAnyError(mod) and dest_ty.errorSetNames(mod).len == 0) break :disjoint true;
if (!operand_ty.isAnyError(mod) and operand_ty.errorSetNames(mod).len == 0) break :disjoint true;
if (dest_ty.isAnyError(mod)) break :disjoint false;
if (operand_ty.isAnyError(mod)) break :disjoint false;
for (dest_ty.errorSetNames(mod)) |dest_err_name| {
if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name))
break :disjoint false;
}
if (!ip.isInferredErrorSetType(dest_ty.ip_index) and
!ip.isInferredErrorSetType(operand_ty.ip_index))
{
break :disjoint true;
}
try sema.resolveInferredErrorSetTy(block, dest_ty_src, dest_ty);
try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty);
for (dest_ty.errorSetNames(mod)) |dest_err_name| {
if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name))
break :disjoint false;
}
break :disjoint true;
}) {
const msg = msg: {
const msg = try sema.errMsg(
block,
src,
"error sets '{}' and '{}' have no common errors",
.{ operand_ty.fmt(sema.mod), dest_ty.fmt(sema.mod) },
);
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, operand_ty);
try sema.addDeclaredHereNote(msg, dest_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (maybe_operand_val) |val| {
if (!dest_ty.isAnyError(mod)) {
const error_name = val.castTag(.@"error").?.data.name;
if (!dest_ty.errorSetHasField(error_name, mod)) {
const msg = msg: {
const msg = try sema.errMsg(
block,
src,
"'error.{s}' not a member of error set '{}'",
.{ error_name, dest_ty.fmt(sema.mod) },
);
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, dest_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
return sema.addConstant(dest_ty, val);
}
try sema.requireRuntimeBlock(block, src, operand_src);
if (block.wantSafety() and !dest_ty.isAnyError(mod) and sema.mod.backendSupportsFeature(.error_set_has_value)) {
const err_int_inst = try block.addBitCast(Type.err_int, operand);
const ok = try block.addTyOp(.error_set_has_value, dest_ty, err_int_inst);
try sema.addSafetyCheck(block, ok, .invalid_error_code);
}
return block.addBitCast(dest_ty, operand);
}
fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
try sema.checkPtrType(block, dest_ty_src, dest_ty);
try sema.checkPtrOperand(block, operand_src, operand_ty);
const operand_info = operand_ty.ptrInfo(mod);
const dest_info = dest_ty.ptrInfo(mod);
if (!operand_info.mutable and dest_info.mutable) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "consider using '@constCast'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (operand_info.@"volatile" and !dest_info.@"volatile") {
const msg = msg: {
const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "consider using '@volatileCast'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (operand_info.@"addrspace" != dest_info.@"addrspace") {
const msg = msg: {
const msg = try sema.errMsg(block, src, "cast changes pointer address space", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "consider using '@addrSpaceCast'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const dest_is_slice = dest_ty.isSlice(mod);
const operand_is_slice = operand_ty.isSlice(mod);
if (dest_is_slice and !operand_is_slice) {
return sema.fail(block, dest_ty_src, "illegal pointer cast to slice", .{});
}
const ptr = if (operand_is_slice and !dest_is_slice)
try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty)
else
operand;
const dest_elem_ty = dest_ty.elemType2(mod);
try sema.resolveTypeLayout(dest_elem_ty);
const dest_align = dest_ty.ptrAlignment(mod);
const operand_elem_ty = operand_ty.elemType2(mod);
try sema.resolveTypeLayout(operand_elem_ty);
const operand_align = operand_ty.ptrAlignment(mod);
// If the destination is less aligned than the source, preserve the source alignment
const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: {
// Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result
if (dest_ty.zigTypeTag(mod) == .Optional) {
var dest_ptr_info = dest_ty.optionalChild(mod).ptrInfo(mod);
dest_ptr_info.@"align" = operand_align;
break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info), mod);
} else {
var dest_ptr_info = dest_ty.ptrInfo(mod);
dest_ptr_info.@"align" = operand_align;
break :blk try Type.ptr(sema.arena, sema.mod, dest_ptr_info);
}
};
if (dest_is_slice) {
const operand_elem_size = operand_elem_ty.abiSize(mod);
const dest_elem_size = dest_elem_ty.abiSize(mod);
if (operand_elem_size != dest_elem_size) {
return sema.fail(block, dest_ty_src, "TODO: implement @ptrCast between slices changing the length", .{});
}
}
if (dest_align > operand_align) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{
operand_ty.fmt(sema.mod), operand_align,
});
try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{
dest_ty.fmt(sema.mod), dest_align,
});
try sema.errNote(block, src, msg, "consider using '@alignCast'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (try sema.resolveMaybeUndefVal(ptr)) |operand_val| {
if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, operand_src);
}
if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) {
return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)});
}
if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) {
return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, operand_val));
}
return sema.addConstant(aligned_dest_ty, operand_val);
}
try sema.requireRuntimeBlock(block, src, null);
if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and
(try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn))
{
const ptr_int = try block.addUnOp(.ptrtoint, ptr);
const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
const ok = if (operand_is_slice) ok: {
const len = try sema.analyzeSliceLen(block, operand_src, operand);
const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
break :ok try block.addBinOp(.bit_or, len_zero, is_non_zero);
} else is_non_zero;
try sema.addSafetyCheck(block, ok, .cast_to_null);
}
return block.addBitCast(aligned_dest_ty, ptr);
}
fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const operand = try sema.resolveInst(extra.operand);
const operand_ty = sema.typeOf(operand);
try sema.checkPtrOperand(block, operand_src, operand_ty);
var ptr_info = operand_ty.ptrInfo(mod);
ptr_info.mutable = true;
const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
return sema.addConstant(dest_ty, operand_val);
}
try sema.requireRuntimeBlock(block, src, null);
return block.addBitCast(dest_ty, operand);
}
fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const operand = try sema.resolveInst(extra.operand);
const operand_ty = sema.typeOf(operand);
try sema.checkPtrOperand(block, operand_src, operand_ty);
var ptr_info = operand_ty.ptrInfo(mod);
ptr_info.@"volatile" = false;
const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
return sema.addConstant(dest_ty, operand_val);
}
try sema.requireRuntimeBlock(block, src, null);
return block.addBitCast(dest_ty, operand);
}
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_scalar_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_scalar_ty);
const operand_ty = sema.typeOf(operand);
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
const is_vector = operand_ty.zigTypeTag(mod) == .Vector;
const dest_ty = if (is_vector)
try mod.vectorType(.{
.len = operand_ty.vectorLen(mod),
.child = dest_scalar_ty.ip_index,
})
else
dest_scalar_ty;
if (dest_is_comptime_int) {
return sema.coerce(block, dest_ty, operand, operand_src);
}
const dest_info = dest_scalar_ty.intInfo(mod);
if (try sema.typeHasOnePossibleValue(dest_ty)) |val| {
return sema.addConstant(dest_ty, val);
}
if (operand_scalar_ty.zigTypeTag(mod) != .ComptimeInt) {
const operand_info = operand_ty.intInfo(mod);
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
return sema.addConstant(operand_ty, val);
}
if (operand_info.signedness != dest_info.signedness) {
return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{
@tagName(dest_info.signedness), operand_ty.fmt(sema.mod),
});
}
if (operand_info.bits < dest_info.bits) {
const msg = msg: {
const msg = try sema.errMsg(
block,
src,
"destination type '{}' has more bits than source type '{}'",
.{ dest_ty.fmt(sema.mod), operand_ty.fmt(sema.mod) },
);
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, dest_ty_src, msg, "destination type has {d} bits", .{
dest_info.bits,
});
try sema.errNote(block, operand_src, msg, "operand type has {d} bits", .{
operand_info.bits,
});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
if (try sema.resolveMaybeUndefValIntable(operand)) |val| {
if (val.isUndef(mod)) return sema.addConstUndef(dest_ty);
if (!is_vector) {
return sema.addConstant(
dest_ty,
try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod),
);
}
const elems = try sema.arena.alloc(Value, operand_ty.vectorLen(mod));
for (elems, 0..) |*elem, i| {
const elem_val = try val.elemValue(sema.mod, i);
elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod);
}
return sema.addConstant(
dest_ty,
try Value.Tag.aggregate.create(sema.arena, elems),
);
}
try sema.requireRuntimeBlock(block, src, operand_src);
return block.addTyOp(.trunc, dest_ty, operand);
}
fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const dest_align = try sema.resolveAlign(block, align_src, extra.lhs);
const ptr = try sema.resolveInst(extra.rhs);
const ptr_ty = sema.typeOf(ptr);
try sema.checkPtrOperand(block, ptr_src, ptr_ty);
var ptr_info = ptr_ty.ptrInfo(mod);
ptr_info.@"align" = dest_align;
var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
if (ptr_ty.zigTypeTag(mod) == .Optional) {
dest_ty = try mod.optionalType(dest_ty.toIntern());
}
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |val| {
if (try val.getUnsignedIntAdvanced(mod, null)) |addr| {
if (addr % dest_align != 0) {
return sema.fail(block, ptr_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align });
}
}
return sema.addConstant(dest_ty, val);
}
try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src);
if (block.wantSafety() and dest_align > 1 and
try sema.typeHasRuntimeBits(ptr_info.pointee_type))
{
const align_minus_1 = try sema.addConstant(
Type.usize,
try mod.intValue(Type.usize, dest_align - 1),
);
const actual_ptr = if (ptr_ty.isSlice(mod))
try sema.analyzeSlicePtr(block, ptr_src, ptr, ptr_ty)
else
ptr;
const ptr_int = try block.addUnOp(.ptrtoint, actual_ptr);
const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
const ok = if (ptr_ty.isSlice(mod)) ok: {
const len = try sema.analyzeSliceLen(block, ptr_src, ptr);
const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
break :ok try block.addBinOp(.bit_or, len_zero, is_aligned);
} else is_aligned;
try sema.addSafetyCheck(block, ok, .incorrect_alignment);
}
return sema.bitCast(block, dest_ty, ptr, ptr_src, null);
}
fn zirBitCount(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
comptime comptimeOp: fn (val: Value, ty: Type, mod: *Module) u64,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
_ = try sema.checkIntOrVector(block, operand, operand_src);
const bits = operand_ty.intInfo(mod).bits;
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
return sema.addConstant(operand_ty, val);
}
const result_scalar_ty = try mod.smallestUnsignedInt(bits);
switch (operand_ty.zigTypeTag(mod)) {
.Vector => {
const vec_len = operand_ty.vectorLen(mod);
const result_ty = try mod.vectorType(.{
.len = vec_len,
.child = result_scalar_ty.ip_index,
});
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef(mod)) return sema.addConstUndef(result_ty);
const elems = try sema.arena.alloc(Value, vec_len);
const scalar_ty = operand_ty.scalarType(mod);
for (elems, 0..) |*elem, i| {
const elem_val = try val.elemValue(sema.mod, i);
const count = comptimeOp(elem_val, scalar_ty, mod);
elem.* = try mod.intValue(scalar_ty, count);
}
return sema.addConstant(
result_ty,
try Value.Tag.aggregate.create(sema.arena, elems),
);
} else {
try sema.requireRuntimeBlock(block, src, operand_src);
return block.addTyOp(air_tag, result_ty, operand);
}
},
.Int => {
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef(mod)) return sema.addConstUndef(result_scalar_ty);
try sema.resolveLazyValue(val);
return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, mod));
} else {
try sema.requireRuntimeBlock(block, src, operand_src);
return block.addTyOp(air_tag, result_scalar_ty, operand);
}
},
else => unreachable,
}
}
fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src);
const bits = scalar_ty.intInfo(mod).bits;
if (bits % 8 != 0) {
return sema.fail(
block,
operand_src,
"@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits",
.{ scalar_ty.fmt(sema.mod), bits },
);
}
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
return sema.addConstant(operand_ty, val);
}
switch (operand_ty.zigTypeTag(mod)) {
.Int => {
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef(mod)) return sema.addConstUndef(operand_ty);
const result_val = try val.byteSwap(operand_ty, mod, sema.arena);
return sema.addConstant(operand_ty, result_val);
} else operand_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addTyOp(.byte_swap, operand_ty, operand);
},
.Vector => {
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef(mod))
return sema.addConstUndef(operand_ty);
const vec_len = operand_ty.vectorLen(mod);
const elems = try sema.arena.alloc(Value, vec_len);
for (elems, 0..) |*elem, i| {
const elem_val = try val.elemValue(sema.mod, i);
elem.* = try elem_val.byteSwap(operand_ty, mod, sema.arena);
}
return sema.addConstant(
operand_ty,
try Value.Tag.aggregate.create(sema.arena, elems),
);
} else operand_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addTyOp(.byte_swap, operand_ty, operand);
},
else => unreachable,
}
}
fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src);
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
return sema.addConstant(operand_ty, val);
}
const mod = sema.mod;
switch (operand_ty.zigTypeTag(mod)) {
.Int => {
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef(mod)) return sema.addConstUndef(operand_ty);
const result_val = try val.bitReverse(operand_ty, mod, sema.arena);
return sema.addConstant(operand_ty, result_val);
} else operand_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addTyOp(.bit_reverse, operand_ty, operand);
},
.Vector => {
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef(mod))
return sema.addConstUndef(operand_ty);
const vec_len = operand_ty.vectorLen(mod);
const elems = try sema.arena.alloc(Value, vec_len);
for (elems, 0..) |*elem, i| {
const elem_val = try val.elemValue(sema.mod, i);
elem.* = try elem_val.bitReverse(scalar_ty, mod, sema.arena);
}
return sema.addConstant(
operand_ty,
try Value.Tag.aggregate.create(sema.arena, elems),
);
} else operand_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addTyOp(.bit_reverse, operand_ty, operand);
},
else => unreachable,
}
}
fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const offset = try sema.bitOffsetOf(block, inst);
return sema.addIntUnsigned(Type.comptime_int, offset);
}
fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const offset = try sema.bitOffsetOf(block, inst);
// TODO reminder to make this a compile error for packed structs
return sema.addIntUnsigned(Type.comptime_int, offset / 8);
}
fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u64 {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const ty = try sema.resolveType(block, lhs_src, extra.lhs);
const field_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "name of field must be comptime-known");
const mod = sema.mod;
try sema.resolveTypeLayout(ty);
switch (ty.zigTypeTag(mod)) {
.Struct => {},
else => {
const msg = msg: {
const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
}
const field_index = if (ty.isTuple(mod)) blk: {
if (mem.eql(u8, field_name, "len")) {
return sema.fail(block, src, "no offset available for 'len' field of tuple", .{});
}
break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src);
} else try sema.structFieldIndex(block, ty, field_name, rhs_src);
if (ty.structFieldIsComptime(field_index, mod)) {
return sema.fail(block, src, "no offset available for comptime field", .{});
}
switch (ty.containerLayout(mod)) {
.Packed => {
var bit_sum: u64 = 0;
const fields = ty.structFields(mod);
for (fields.values(), 0..) |field, i| {
if (i == field_index) {
return bit_sum;
}
bit_sum += field.ty.bitSize(mod);
} else unreachable;
},
else => return ty.structFieldOffset(field_index, mod) * 8,
}
}
fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Struct, .Enum, .Union, .Opaque => return,
else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(sema.mod)}),
}
}
/// Returns `true` if the type was a comptime_int.
fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
const mod = sema.mod;
switch (try ty.zigTypeTagOrPoison(mod)) {
.ComptimeInt => return true,
.Int => return false,
else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(sema.mod)}),
}
}
fn checkInvalidPtrArithmetic(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ty: Type,
) CompileError!void {
const mod = sema.mod;
switch (try ty.zigTypeTagOrPoison(mod)) {
.Pointer => switch (ty.ptrSize(mod)) {
.One, .Slice => return,
.Many, .C => return sema.fail(
block,
src,
"invalid pointer arithmetic operator",
.{},
),
},
else => return,
}
}
fn checkArithmeticOp(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
scalar_tag: std.builtin.TypeId,
lhs_zig_ty_tag: std.builtin.TypeId,
rhs_zig_ty_tag: std.builtin.TypeId,
zir_tag: Zir.Inst.Tag,
) CompileError!void {
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat;
if (!is_int and !(is_float and floatOpAllowed(zir_tag))) {
return sema.fail(block, src, "invalid operands to binary expression: '{s}' and '{s}'", .{
@tagName(lhs_zig_ty_tag), @tagName(rhs_zig_ty_tag),
});
}
}
fn checkPtrOperand(
sema: *Sema,
block: *Block,
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Pointer => return,
.Fn => {
const msg = msg: {
const msg = try sema.errMsg(
block,
ty_src,
"expected pointer, found '{}'",
.{ty.fmt(sema.mod)},
);
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, ty_src, msg, "use '&' to obtain a function pointer", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
.Optional => if (ty.isPtrLikeOptional(mod)) return,
else => {},
}
return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)});
}
fn checkPtrType(
sema: *Sema,
block: *Block,
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Pointer => return,
.Fn => {
const msg = msg: {
const msg = try sema.errMsg(
block,
ty_src,
"expected pointer type, found '{}'",
.{ty.fmt(sema.mod)},
);
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, ty_src, msg, "use '*const ' to make a function pointer type", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
.Optional => if (ty.isPtrLikeOptional(mod)) return,
else => {},
}
return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)});
}
fn checkVectorElemType(
sema: *Sema,
block: *Block,
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Int, .Float, .Bool => return,
else => if (ty.isPtrAtRuntime(mod)) return,
}
return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(sema.mod)});
}
fn checkFloatType(
sema: *Sema,
block: *Block,
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.ComptimeInt, .ComptimeFloat, .Float => {},
else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(sema.mod)}),
}
}
fn checkNumericType(
sema: *Sema,
block: *Block,
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float, .ComptimeInt, .Int => {},
.Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
.ComptimeFloat, .Float, .ComptimeInt, .Int => {},
else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}),
},
else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(sema.mod)}),
}
}
/// Returns the casted pointer.
fn checkAtomicPtrOperand(
sema: *Sema,
block: *Block,
elem_ty: Type,
elem_ty_src: LazySrcLoc,
ptr: Air.Inst.Ref,
ptr_src: LazySrcLoc,
ptr_const: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
var diag: Module.AtomicPtrAlignmentDiagnostics = .{};
const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.FloatTooBig => return sema.fail(
block,
elem_ty_src,
"expected {d}-bit float type or smaller; found {d}-bit float type",
.{ diag.max_bits, diag.bits },
),
error.IntTooBig => return sema.fail(
block,
elem_ty_src,
"expected {d}-bit integer type or smaller; found {d}-bit integer type",
.{ diag.max_bits, diag.bits },
),
error.BadType => return sema.fail(
block,
elem_ty_src,
"expected bool, integer, float, enum, or pointer type; found '{}'",
.{elem_ty.fmt(sema.mod)},
),
};
var wanted_ptr_data: Type.Payload.Pointer.Data = .{
.pointee_type = elem_ty,
.@"align" = alignment,
.@"addrspace" = .generic,
.mutable = !ptr_const,
};
const ptr_ty = sema.typeOf(ptr);
const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) {
.Pointer => ptr_ty.ptrInfo(mod),
else => {
const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data);
_ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
unreachable;
},
};
wanted_ptr_data.@"addrspace" = ptr_data.@"addrspace";
wanted_ptr_data.@"allowzero" = ptr_data.@"allowzero";
wanted_ptr_data.@"volatile" = ptr_data.@"volatile";
const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data);
const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
return casted_ptr;
}
fn checkPtrIsNotComptimeMutable(
sema: *Sema,
block: *Block,
ptr_val: Value,
ptr_src: LazySrcLoc,
operand_src: LazySrcLoc,
) CompileError!void {
_ = operand_src;
if (ptr_val.isComptimeMutablePtr()) {
return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{});
}
}
fn checkComptimeVarStore(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
decl_ref_mut: Value.Payload.DeclRefMut.Data,
) CompileError!void {
if (@enumToInt(decl_ref_mut.runtime_index) < @enumToInt(block.runtime_index)) {
if (block.runtime_cond) |cond_src| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "store to comptime variable depends on runtime condition", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, cond_src, msg, "runtime condition here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (block.runtime_loop) |loop_src| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "cannot store to comptime variable in non-inline loop", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, loop_src, msg, "non-inline loop here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
unreachable;
}
}
fn checkIntOrVector(
sema: *Sema,
block: *Block,
operand: Air.Inst.Ref,
operand_src: LazySrcLoc,
) CompileError!Type {
const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
switch (try operand_ty.zigTypeTagOrPoison(mod)) {
.Int => return operand_ty,
.Vector => {
const elem_ty = operand_ty.childType(mod);
switch (try elem_ty.zigTypeTagOrPoison(mod)) {
.Int => return elem_ty,
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
elem_ty.fmt(sema.mod),
}),
}
},
else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{
operand_ty.fmt(sema.mod),
}),
}
}
fn checkIntOrVectorAllowComptime(
sema: *Sema,
block: *Block,
operand_ty: Type,
operand_src: LazySrcLoc,
) CompileError!Type {
const mod = sema.mod;
switch (try operand_ty.zigTypeTagOrPoison(mod)) {
.Int, .ComptimeInt => return operand_ty,
.Vector => {
const elem_ty = operand_ty.childType(mod);
switch (try elem_ty.zigTypeTagOrPoison(mod)) {
.Int, .ComptimeInt => return elem_ty,
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
elem_ty.fmt(sema.mod),
}),
}
},
else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{
operand_ty.fmt(sema.mod),
}),
}
}
fn checkErrorSetType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.ErrorSet => return,
else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(sema.mod)}),
}
}
const SimdBinOp = struct {
len: ?usize,
/// Coerced to `result_ty`.
lhs: Air.Inst.Ref,
/// Coerced to `result_ty`.
rhs: Air.Inst.Ref,
lhs_val: ?Value,
rhs_val: ?Value,
/// Only different than `scalar_ty` when it is a vector operation.
result_ty: Type,
scalar_ty: Type,
};
fn checkSimdBinOp(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
uncasted_lhs: Air.Inst.Ref,
uncasted_rhs: Air.Inst.Ref,
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!SimdBinOp {
const mod = sema.mod;
const lhs_ty = sema.typeOf(uncasted_lhs);
const rhs_ty = sema.typeOf(uncasted_rhs);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen(mod) else null;
const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
const lhs = try sema.coerce(block, result_ty, uncasted_lhs, lhs_src);
const rhs = try sema.coerce(block, result_ty, uncasted_rhs, rhs_src);
return SimdBinOp{
.len = vec_len,
.lhs = lhs,
.rhs = rhs,
.lhs_val = try sema.resolveMaybeUndefVal(lhs),
.rhs_val = try sema.resolveMaybeUndefVal(rhs),
.result_ty = result_ty,
.scalar_ty = result_ty.scalarType(mod),
};
}
fn checkVectorizableBinaryOperands(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
lhs_ty: Type,
rhs_ty: Type,
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!void {
const mod = sema.mod;
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return;
const lhs_is_vector = switch (lhs_zig_ty_tag) {
.Vector, .Array => true,
else => false,
};
const rhs_is_vector = switch (rhs_zig_ty_tag) {
.Vector, .Array => true,
else => false,
};
if (lhs_is_vector and rhs_is_vector) {
const lhs_len = lhs_ty.arrayLen(mod);
const rhs_len = rhs_ty.arrayLen(mod);
if (lhs_len != rhs_len) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "vector length mismatch", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, lhs_src, msg, "length {d} here", .{lhs_len});
try sema.errNote(block, rhs_src, msg, "length {d} here", .{rhs_len});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
} else {
const msg = msg: {
const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: '{}' and '{}'", .{
lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod),
});
errdefer msg.destroy(sema.gpa);
if (lhs_is_vector) {
try sema.errNote(block, lhs_src, msg, "vector here", .{});
try sema.errNote(block, rhs_src, msg, "scalar here", .{});
} else {
try sema.errNote(block, lhs_src, msg, "scalar here", .{});
try sema.errNote(block, rhs_src, msg, "vector here", .{});
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
fn maybeOptionsSrc(sema: *Sema, block: *Block, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc {
if (base_src == .unneeded) return .unneeded;
const mod = sema.mod;
return mod.optionsSrc(sema.mod.declPtr(block.src_decl), base_src, wanted);
}
fn resolveExportOptions(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
) CompileError!std.builtin.ExportOptions {
const mod = sema.mod;
const export_options_ty = try sema.getBuiltinType("ExportOptions");
const air_ref = try sema.resolveInst(zir_ref);
const options = try sema.coerce(block, export_options_ty, air_ref, src);
const name_src = sema.maybeOptionsSrc(block, src, "name");
const linkage_src = sema.maybeOptionsSrc(block, src, "linkage");
const section_src = sema.maybeOptionsSrc(block, src, "section");
const visibility_src = sema.maybeOptionsSrc(block, src, "visibility");
const name_operand = try sema.fieldVal(block, src, options, "name", name_src);
const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known");
const name_ty = Type.const_slice_u8;
const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod);
const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src);
const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known");
const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val);
const section_operand = try sema.fieldVal(block, src, options, "section", section_src);
const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known");
const section_ty = Type.const_slice_u8;
const section = if (section_opt_val.optionalValue(mod)) |section_val|
try section_val.toAllocatedBytes(section_ty, sema.arena, mod)
else
null;
const visibility_operand = try sema.fieldVal(block, src, options, "visibility", visibility_src);
const visibility_val = try sema.resolveConstValue(block, visibility_src, visibility_operand, "visibility of exported value must be comptime-known");
const visibility = mod.toEnum(std.builtin.SymbolVisibility, visibility_val);
if (name.len < 1) {
return sema.fail(block, name_src, "exported symbol name cannot be empty", .{});
}
if (visibility != .default and linkage == .Internal) {
return sema.fail(block, visibility_src, "symbol '{s}' exported with internal linkage has non-default visibility {s}", .{
name, @tagName(visibility),
});
}
return std.builtin.ExportOptions{
.name = name,
.linkage = linkage,
.section = section,
.visibility = visibility,
};
}
fn resolveBuiltinEnum(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
comptime name: []const u8,
reason: []const u8,
) CompileError!@field(std.builtin, name) {
const mod = sema.mod;
const ty = try sema.getBuiltinType(name);
const air_ref = try sema.resolveInst(zir_ref);
const coerced = try sema.coerce(block, ty, air_ref, src);
const val = try sema.resolveConstValue(block, src, coerced, reason);
return mod.toEnum(@field(std.builtin, name), val);
}
fn resolveAtomicOrder(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
reason: []const u8,
) CompileError!std.builtin.AtomicOrder {
return sema.resolveBuiltinEnum(block, src, zir_ref, "AtomicOrder", reason);
}
fn resolveAtomicRmwOp(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
) CompileError!std.builtin.AtomicRmwOp {
return sema.resolveBuiltinEnum(block, src, zir_ref, "AtomicRmwOp", "@atomicRmW operation must be comptime-known");
}
fn zirCmpxchg(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data;
const air_tag: Air.Inst.Tag = switch (extended.small) {
0 => .cmpxchg_weak,
1 => .cmpxchg_strong,
else => unreachable,
};
const src = LazySrcLoc.nodeOffset(extra.node);
// zig fmt: off
const elem_ty_src : LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const expected_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = extra.node };
const new_value_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = extra.node };
const success_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg4 = extra.node };
const failure_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg5 = extra.node };
// zig fmt: on
const expected_value = try sema.resolveInst(extra.expected_value);
const elem_ty = sema.typeOf(expected_value);
if (elem_ty.zigTypeTag(mod) == .Float) {
return sema.fail(
block,
elem_ty_src,
"expected bool, integer, enum, or pointer type; found '{}'",
.{elem_ty.fmt(sema.mod)},
);
}
const uncasted_ptr = try sema.resolveInst(extra.ptr);
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
const new_value = try sema.coerce(block, elem_ty, try sema.resolveInst(extra.new_value), new_value_src);
const success_order = try sema.resolveAtomicOrder(block, success_order_src, extra.success_order, "atomic order of cmpxchg success must be comptime-known");
const failure_order = try sema.resolveAtomicOrder(block, failure_order_src, extra.failure_order, "atomic order of cmpxchg failure must be comptime-known");
if (@enumToInt(success_order) < @enumToInt(std.builtin.AtomicOrder.Monotonic)) {
return sema.fail(block, success_order_src, "success atomic ordering must be Monotonic or stricter", .{});
}
if (@enumToInt(failure_order) < @enumToInt(std.builtin.AtomicOrder.Monotonic)) {
return sema.fail(block, failure_order_src, "failure atomic ordering must be Monotonic or stricter", .{});
}
if (@enumToInt(failure_order) > @enumToInt(success_order)) {
return sema.fail(block, failure_order_src, "failure atomic ordering must be no stricter than success", .{});
}
if (failure_order == .Release or failure_order == .AcqRel) {
return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{});
}
const result_ty = try Type.optional(sema.arena, elem_ty, mod);
// special case zero bit types
if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) {
return sema.addConstant(result_ty, Value.null);
}
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
if (try sema.resolveMaybeUndefVal(expected_value)) |expected_val| {
if (try sema.resolveMaybeUndefVal(new_value)) |new_val| {
if (expected_val.isUndef(mod) or new_val.isUndef(mod)) {
// TODO: this should probably cause the memory stored at the pointer
// to become undef as well
return sema.addConstUndef(result_ty);
}
const ptr_ty = sema.typeOf(ptr);
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
const result_val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: {
try sema.storePtr(block, src, ptr, new_value);
break :blk Value.null;
} else try Value.Tag.opt_payload.create(sema.arena, stored_val);
return sema.addConstant(result_ty, result_val);
} else break :rs new_value_src;
} else break :rs expected_src;
} else ptr_src;
const flags: u32 = @as(u32, @enumToInt(success_order)) |
(@as(u32, @enumToInt(failure_order)) << 3);
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addInst(.{
.tag = air_tag,
.data = .{ .ty_pl = .{
.ty = try sema.addType(result_ty),
.payload = try sema.addExtra(Air.Cmpxchg{
.ptr = ptr,
.expected_value = expected_value,
.new_value = new_value,
.flags = flags,
}),
} },
});
}
fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const len_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const scalar_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector splat destination length must be comptime-known"));
const scalar = try sema.resolveInst(extra.rhs);
const scalar_ty = sema.typeOf(scalar);
try sema.checkVectorElemType(block, scalar_src, scalar_ty);
const vector_ty = try mod.vectorType(.{
.len = len,
.child = scalar_ty.ip_index,
});
if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| {
if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty);
return sema.addConstant(
vector_ty,
try Value.Tag.repeated.create(sema.arena, scalar_val),
);
}
try sema.requireRuntimeBlock(block, inst_data.src(), scalar_src);
return block.addTyOp(.splat, vector_ty, scalar);
}
fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const op_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const operation = try sema.resolveBuiltinEnum(block, op_src, extra.lhs, "ReduceOp", "@reduce operation must be comptime-known");
const operand = try sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
const mod = sema.mod;
if (operand_ty.zigTypeTag(mod) != .Vector) {
return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)});
}
const scalar_ty = operand_ty.childType(mod);
// Type-check depending on operation.
switch (operation) {
.And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) {
.Int, .Bool => {},
else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{
@tagName(operation), operand_ty.fmt(mod),
}),
},
.Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) {
.Int, .Float => {},
else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{
@tagName(operation), operand_ty.fmt(mod),
}),
},
}
const vec_len = operand_ty.vectorLen(mod);
if (vec_len == 0) {
// TODO re-evaluate if we should introduce a "neutral value" for some operations,
// e.g. zero for add and one for mul.
return sema.fail(block, operand_src, "@reduce operation requires a vector with nonzero length", .{});
}
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
if (operand_val.isUndef(mod)) return sema.addConstUndef(scalar_ty);
var accum: Value = try operand_val.elemValue(mod, 0);
var i: u32 = 1;
while (i < vec_len) : (i += 1) {
const elem_val = try operand_val.elemValue(mod, i);
switch (operation) {
.And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod),
.Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod),
.Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, mod),
.Min => accum = accum.numberMin(elem_val, mod),
.Max => accum = accum.numberMax(elem_val, mod),
.Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty),
.Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, mod),
}
}
return sema.addConstant(scalar_ty, accum);
}
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
return block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = operand,
.operation = operation,
} },
});
}
fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data;
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const mask_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type);
try sema.checkVectorElemType(block, elem_ty_src, elem_ty);
var a = try sema.resolveInst(extra.a);
var b = try sema.resolveInst(extra.b);
var mask = try sema.resolveInst(extra.mask);
var mask_ty = sema.typeOf(mask);
const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) {
.Array, .Vector => sema.typeOf(mask).arrayLen(mod),
else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}),
};
mask_ty = try mod.vectorType(.{
.len = @intCast(u32, mask_len),
.child = .i32_type,
});
mask = try sema.coerce(block, mask_ty, mask, mask_src);
const mask_val = try sema.resolveConstMaybeUndefVal(block, mask_src, mask, "shuffle mask must be comptime-known");
return sema.analyzeShuffle(block, inst_data.src_node, elem_ty, a, b, mask_val, @intCast(u32, mask_len));
}
fn analyzeShuffle(
sema: *Sema,
block: *Block,
src_node: i32,
elem_ty: Type,
a_arg: Air.Inst.Ref,
b_arg: Air.Inst.Ref,
mask: Value,
mask_len: u32,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const a_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = src_node };
const b_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = src_node };
const mask_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = src_node };
var a = a_arg;
var b = b_arg;
const res_ty = try mod.vectorType(.{
.len = mask_len,
.child = elem_ty.ip_index,
});
var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) {
.Array, .Vector => sema.typeOf(a).arrayLen(mod),
.Undefined => null,
else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{
elem_ty.fmt(sema.mod),
sema.typeOf(a).fmt(sema.mod),
}),
};
var maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) {
.Array, .Vector => sema.typeOf(b).arrayLen(mod),
.Undefined => null,
else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{
elem_ty.fmt(sema.mod),
sema.typeOf(b).fmt(sema.mod),
}),
};
if (maybe_a_len == null and maybe_b_len == null) {
return sema.addConstUndef(res_ty);
}
const a_len = @intCast(u32, maybe_a_len orelse maybe_b_len.?);
const b_len = @intCast(u32, maybe_b_len orelse a_len);
const a_ty = try mod.vectorType(.{
.len = a_len,
.child = elem_ty.ip_index,
});
const b_ty = try mod.vectorType(.{
.len = b_len,
.child = elem_ty.ip_index,
});
if (maybe_a_len == null) a = try sema.addConstUndef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src);
if (maybe_b_len == null) b = try sema.addConstUndef(b_ty) else b = try sema.coerce(block, b_ty, b, b_src);
const operand_info = [2]std.meta.Tuple(&.{ u64, LazySrcLoc, Type }){
.{ a_len, a_src, a_ty },
.{ b_len, b_src, b_ty },
};
var i: usize = 0;
while (i < mask_len) : (i += 1) {
const elem = try mask.elemValue(sema.mod, i);
if (elem.isUndef(mod)) continue;
const int = elem.toSignedInt(mod);
var unsigned: u32 = undefined;
var chosen: u32 = undefined;
if (int >= 0) {
unsigned = @intCast(u32, int);
chosen = 0;
} else {
unsigned = @intCast(u32, ~int);
chosen = 1;
}
if (unsigned >= operand_info[chosen][0]) {
const msg = msg: {
const msg = try sema.errMsg(block, mask_src, "mask index '{d}' has out-of-bounds selection", .{i});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, operand_info[chosen][1], msg, "selected index '{d}' out of bounds of '{}'", .{
unsigned,
operand_info[chosen][2].fmt(sema.mod),
});
if (chosen == 0) {
try sema.errNote(block, b_src, msg, "selections from the second vector are specified with negative numbers", .{});
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
if (try sema.resolveMaybeUndefVal(a)) |a_val| {
if (try sema.resolveMaybeUndefVal(b)) |b_val| {
const values = try sema.arena.alloc(Value, mask_len);
i = 0;
while (i < mask_len) : (i += 1) {
const mask_elem_val = try mask.elemValue(sema.mod, i);
if (mask_elem_val.isUndef(mod)) {
values[i] = Value.undef;
continue;
}
const int = mask_elem_val.toSignedInt(mod);
const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int);
if (int >= 0) {
values[i] = try a_val.elemValue(sema.mod, unsigned);
} else {
values[i] = try b_val.elemValue(sema.mod, unsigned);
}
}
const res_val = try Value.Tag.aggregate.create(sema.arena, values);
return sema.addConstant(res_ty, res_val);
}
}
// All static analysis passed, and not comptime.
// For runtime codegen, vectors a and b must be the same length. Here we
// recursively @shuffle the smaller vector to append undefined elements
// to it up to the length of the longer vector. This recursion terminates
// in 1 call because these calls to analyzeShuffle guarantee a_len == b_len.
if (a_len != b_len) {
const min_len = std.math.min(a_len, b_len);
const max_src = if (a_len > b_len) a_src else b_src;
const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len));
const expand_mask_values = try sema.arena.alloc(Value, max_len);
i = 0;
while (i < min_len) : (i += 1) {
expand_mask_values[i] = try mod.intValue(Type.comptime_int, i);
}
while (i < max_len) : (i += 1) {
expand_mask_values[i] = try mod.intValue(Type.comptime_int, -1);
}
const expand_mask = try Value.Tag.aggregate.create(sema.arena, expand_mask_values);
if (a_len < b_len) {
const undef = try sema.addConstUndef(a_ty);
a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask, @intCast(u32, max_len));
} else {
const undef = try sema.addConstUndef(b_ty);
b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask, @intCast(u32, max_len));
}
}
const mask_index = @intCast(u32, sema.air_values.items.len);
try sema.air_values.append(sema.gpa, mask);
return block.addInst(.{
.tag = .shuffle,
.data = .{ .ty_pl = .{
.ty = try sema.addType(res_ty),
.payload = try block.sema.addExtra(Air.Shuffle{
.a = a,
.b = b,
.mask = mask_index,
.mask_len = mask_len,
}),
} },
});
}
fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const pred_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const a_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = extra.node };
const b_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = extra.node };
const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type);
try sema.checkVectorElemType(block, elem_ty_src, elem_ty);
const pred_uncoerced = try sema.resolveInst(extra.pred);
const pred_ty = sema.typeOf(pred_uncoerced);
const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) {
.Vector, .Array => pred_ty.arrayLen(mod),
else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}),
};
const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64));
const bool_vec_ty = try mod.vectorType(.{
.len = vec_len,
.child = .bool_type,
});
const pred = try sema.coerce(block, bool_vec_ty, pred_uncoerced, pred_src);
const vec_ty = try mod.vectorType(.{
.len = vec_len,
.child = elem_ty.ip_index,
});
const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src);
const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src);
const maybe_pred = try sema.resolveMaybeUndefVal(pred);
const maybe_a = try sema.resolveMaybeUndefVal(a);
const maybe_b = try sema.resolveMaybeUndefVal(b);
const runtime_src = if (maybe_pred) |pred_val| rs: {
if (pred_val.isUndef(mod)) return sema.addConstUndef(vec_ty);
if (maybe_a) |a_val| {
if (a_val.isUndef(mod)) return sema.addConstUndef(vec_ty);
if (maybe_b) |b_val| {
if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty);
const elems = try sema.gpa.alloc(Value, vec_len);
for (elems, 0..) |*elem, i| {
const pred_elem_val = try pred_val.elemValue(sema.mod, i);
const should_choose_a = pred_elem_val.toBool(mod);
if (should_choose_a) {
elem.* = try a_val.elemValue(sema.mod, i);
} else {
elem.* = try b_val.elemValue(sema.mod, i);
}
}
return sema.addConstant(
vec_ty,
try Value.Tag.aggregate.create(sema.arena, elems),
);
} else {
break :rs b_src;
}
} else {
if (maybe_b) |b_val| {
if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty);
}
break :rs a_src;
}
} else rs: {
if (maybe_a) |a_val| {
if (a_val.isUndef(mod)) return sema.addConstUndef(vec_ty);
}
if (maybe_b) |b_val| {
if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty);
}
break :rs pred_src;
};
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addInst(.{
.tag = .select,
.data = .{ .pl_op = .{
.operand = pred,
.payload = try block.sema.addExtra(Air.Bin{
.lhs = a,
.rhs = b,
}),
} },
});
}
fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.AtomicLoad, inst_data.payload_index).data;
// zig fmt: off
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
// zig fmt: on
const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type);
const uncasted_ptr = try sema.resolveInst(extra.ptr);
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, true);
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, "atomic order of @atomicLoad must be comptime-known");
switch (order) {
.Release, .AcqRel => {
return sema.fail(
block,
order_src,
"@atomicLoad atomic ordering must not be Release or AcqRel",
.{},
);
},
else => {},
}
if (try sema.typeHasOnePossibleValue(elem_ty)) |val| {
return sema.addConstant(elem_ty, val);
}
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
if (try sema.pointerDeref(block, ptr_src, ptr_val, sema.typeOf(ptr))) |elem_val| {
return sema.addConstant(elem_ty, elem_val);
}
}
try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src);
return block.addInst(.{
.tag = .atomic_load,
.data = .{ .atomic_load = .{
.ptr = ptr,
.order = order,
} },
});
}
fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data;
const src = inst_data.src();
// zig fmt: off
const elem_ty_src : LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const op_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node };
// zig fmt: on
const operand = try sema.resolveInst(extra.operand);
const elem_ty = sema.typeOf(operand);
const uncasted_ptr = try sema.resolveInst(extra.ptr);
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation);
switch (elem_ty.zigTypeTag(mod)) {
.Enum => if (op != .Xchg) {
return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{});
},
.Bool => if (op != .Xchg) {
return sema.fail(block, op_src, "@atomicRmw with bool only allowed with .Xchg", .{});
},
.Float => switch (op) {
.Xchg, .Add, .Sub, .Max, .Min => {},
else => return sema.fail(block, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, .Sub, .Max, and .Min", .{}),
},
else => {},
}
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, "atomic order of @atomicRmW must be comptime-known");
if (order == .Unordered) {
return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be Unordered", .{});
}
// special case zero bit types
if (try sema.typeHasOnePossibleValue(elem_ty)) |val| {
return sema.addConstant(elem_ty, val);
}
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
const maybe_operand_val = try sema.resolveMaybeUndefVal(operand);
const operand_val = maybe_operand_val orelse {
try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src);
break :rs operand_src;
};
if (ptr_val.isComptimeMutablePtr()) {
const ptr_ty = sema.typeOf(ptr);
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
const new_val = switch (op) {
// zig fmt: off
.Xchg => operand_val,
.Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty),
.Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty),
.And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, mod),
.Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, mod),
.Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, mod),
.Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, mod),
.Max => stored_val.numberMax (operand_val, mod),
.Min => stored_val.numberMin (operand_val, mod),
// zig fmt: on
};
try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty);
return sema.addConstant(elem_ty, stored_val);
} else break :rs ptr_src;
} else ptr_src;
const flags: u32 = @as(u32, @enumToInt(order)) | (@as(u32, @enumToInt(op)) << 3);
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addInst(.{
.tag = .atomic_rmw,
.data = .{ .pl_op = .{
.operand = ptr,
.payload = try sema.addExtra(Air.AtomicRmw{
.operand = operand,
.flags = flags,
}),
} },
});
}
fn zirAtomicStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data;
const src = inst_data.src();
// zig fmt: off
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
// zig fmt: on
const operand = try sema.resolveInst(extra.operand);
const elem_ty = sema.typeOf(operand);
const uncasted_ptr = try sema.resolveInst(extra.ptr);
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, "atomic order of @atomicStore must be comptime-known");
const air_tag: Air.Inst.Tag = switch (order) {
.Acquire, .AcqRel => {
return sema.fail(
block,
order_src,
"@atomicStore atomic ordering must not be Acquire or AcqRel",
.{},
);
},
.Unordered => .atomic_store_unordered,
.Monotonic => .atomic_store_monotonic,
.Release => .atomic_store_release,
.SeqCst => .atomic_store_seq_cst,
};
return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag);
}
fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.MulAdd, inst_data.payload_index).data;
const src = inst_data.src();
const mulend1_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const mulend2_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
const addend_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
const addend = try sema.resolveInst(extra.addend);
const ty = sema.typeOf(addend);
const mulend1 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend1), mulend1_src);
const mulend2 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend2), mulend2_src);
const maybe_mulend1 = try sema.resolveMaybeUndefVal(mulend1);
const maybe_mulend2 = try sema.resolveMaybeUndefVal(mulend2);
const maybe_addend = try sema.resolveMaybeUndefVal(addend);
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float, .Vector => {},
else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(sema.mod)}),
}
const runtime_src = if (maybe_mulend1) |mulend1_val| rs: {
if (maybe_mulend2) |mulend2_val| {
if (mulend2_val.isUndef(mod)) return sema.addConstUndef(ty);
if (maybe_addend) |addend_val| {
if (addend_val.isUndef(mod)) return sema.addConstUndef(ty);
const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod);
return sema.addConstant(ty, result_val);
} else {
break :rs addend_src;
}
} else {
if (maybe_addend) |addend_val| {
if (addend_val.isUndef(mod)) return sema.addConstUndef(ty);
}
break :rs mulend2_src;
}
} else rs: {
if (maybe_mulend2) |mulend2_val| {
if (mulend2_val.isUndef(mod)) return sema.addConstUndef(ty);
}
if (maybe_addend) |addend_val| {
if (addend_val.isUndef(mod)) return sema.addConstUndef(ty);
}
break :rs mulend1_src;
};
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addInst(.{
.tag = .mul_add,
.data = .{ .pl_op = .{
.operand = addend,
.payload = try sema.addExtra(Air.Bin{
.lhs = mulend1,
.rhs = mulend2,
}),
} },
});
}
fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const modifier_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const func_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const args_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
const call_src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.BuiltinCall, inst_data.payload_index).data;
var func = try sema.resolveInst(extra.callee);
const modifier_ty = try sema.getBuiltinType("CallModifier");
const air_ref = try sema.resolveInst(extra.modifier);
const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src);
const modifier_val = try sema.resolveConstValue(block, modifier_src, modifier_ref, "call modifier must be comptime-known");
var modifier = mod.toEnum(std.builtin.CallModifier, modifier_val);
switch (modifier) {
// These can be upgraded to comptime or nosuspend calls.
.auto, .never_tail, .no_async => {
if (block.is_comptime) {
if (modifier == .never_tail) {
return sema.fail(block, modifier_src, "unable to perform 'never_tail' call at compile-time", .{});
}
modifier = .compile_time;
} else if (extra.flags.is_nosuspend) {
modifier = .no_async;
}
},
// These can be upgraded to comptime. nosuspend bit can be safely ignored.
.always_inline, .compile_time => {
_ = (try sema.resolveDefinedValue(block, func_src, func)) orelse {
return sema.fail(block, func_src, "modifier '{s}' requires a comptime-known function", .{@tagName(modifier)});
};
if (block.is_comptime) {
modifier = .compile_time;
}
},
.always_tail => {
if (block.is_comptime) {
modifier = .compile_time;
}
},
.async_kw => {
if (extra.flags.is_nosuspend) {
return sema.fail(block, modifier_src, "modifier 'async_kw' cannot be used inside nosuspend block", .{});
}
if (block.is_comptime) {
return sema.fail(block, modifier_src, "modifier 'async_kw' cannot be used in combination with comptime function call", .{});
}
},
.never_inline => {
if (block.is_comptime) {
return sema.fail(block, modifier_src, "unable to perform 'never_inline' call at compile-time", .{});
}
},
}
const args = try sema.resolveInst(extra.args);
const args_ty = sema.typeOf(args);
if (!args_ty.isTuple(mod) and args_ty.ip_index != .empty_struct_type) {
return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)});
}
var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod));
for (resolved_args, 0..) |*resolved, i| {
resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty);
}
const callee_ty = sema.typeOf(func);
const func_ty = try sema.checkCallArgumentCount(block, func, func_src, callee_ty, resolved_args.len, false);
const ensure_result_used = extra.flags.ensure_result_used;
return sema.analyzeCall(block, func, func_ty, func_src, call_src, modifier, ensure_result_used, resolved_args, null, null);
}
fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.FieldParentPtr, inst_data.payload_index).data;
const src = inst_data.src();
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
const parent_ty = try sema.resolveType(block, ty_src, extra.parent_type);
const field_name = try sema.resolveConstString(block, name_src, extra.field_name, "field name must be comptime-known");
const field_ptr = try sema.resolveInst(extra.field_ptr);
const field_ptr_ty = sema.typeOf(field_ptr);
const mod = sema.mod;
if (parent_ty.zigTypeTag(mod) != .Struct and parent_ty.zigTypeTag(mod) != .Union) {
return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)});
}
try sema.resolveTypeLayout(parent_ty);
const field_index = switch (parent_ty.zigTypeTag(mod)) {
.Struct => blk: {
if (parent_ty.isTuple(mod)) {
if (mem.eql(u8, field_name, "len")) {
return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{});
}
break :blk try sema.tupleFieldIndex(block, parent_ty, field_name, name_src);
} else {
break :blk try sema.structFieldIndex(block, parent_ty, field_name, name_src);
}
},
.Union => try sema.unionFieldIndex(block, parent_ty, field_name, name_src),
else => unreachable,
};
if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index, mod)) {
return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{});
}
try sema.checkPtrOperand(block, ptr_src, field_ptr_ty);
const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod);
var ptr_ty_data: Type.Payload.Pointer.Data = .{
.pointee_type = parent_ty.structFieldType(field_index, mod),
.mutable = field_ptr_ty_info.mutable,
.@"addrspace" = field_ptr_ty_info.@"addrspace",
};
if (parent_ty.containerLayout(mod) == .Packed) {
return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{});
} else {
ptr_ty_data.@"align" = blk: {
if (mod.typeToStruct(parent_ty)) |struct_obj| {
break :blk struct_obj.fields.values()[field_index].abi_align;
} else if (mod.typeToUnion(parent_ty)) |union_obj| {
break :blk union_obj.fields.values()[field_index].abi_align;
} else {
break :blk 0;
}
};
}
const actual_field_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_ty_data);
const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, ptr_src);
ptr_ty_data.pointee_type = parent_ty;
const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data);
if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| {
const payload = field_ptr_val.castTag(.field_ptr) orelse {
return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{});
};
if (payload.data.field_index != field_index) {
const msg = msg: {
const msg = try sema.errMsg(
block,
src,
"field '{s}' has index '{d}' but pointer value is index '{d}' of struct '{}'",
.{
field_name,
field_index,
payload.data.field_index,
parent_ty.fmt(sema.mod),
},
);
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, parent_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
return sema.addConstant(result_ptr, payload.data.container_ptr);
}
try sema.requireRuntimeBlock(block, src, ptr_src);
try sema.queueFullTypeResolution(result_ptr);
return block.addInst(.{
.tag = .field_parent_ptr,
.data = .{ .ty_pl = .{
.ty = try sema.addType(result_ptr),
.payload = try block.sema.addExtra(Air.FieldParentPtr{
.field_ptr = casted_field_ptr,
.field_index = @intCast(u32, field_index),
}),
} },
});
}
fn zirMinMax(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
comptime air_tag: Air.Inst.Tag,
) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = inst_data.src();
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
try sema.checkNumericType(block, lhs_src, sema.typeOf(lhs));
try sema.checkNumericType(block, rhs_src, sema.typeOf(rhs));
return sema.analyzeMinMax(block, src, air_tag, &.{ lhs, rhs }, &.{ lhs_src, rhs_src });
}
fn zirMinMaxMulti(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
comptime air_tag: Air.Inst.Tag,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand);
const src_node = extra.data.src_node;
const src = LazySrcLoc.nodeOffset(src_node);
const operands = sema.code.refSlice(extra.end, extended.small);
const air_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len);
const operand_srcs = try sema.arena.alloc(LazySrcLoc, operands.len);
for (operands, air_refs, operand_srcs, 0..) |zir_ref, *air_ref, *op_src, i| {
op_src.* = switch (i) {
0 => .{ .node_offset_builtin_call_arg0 = src_node },
1 => .{ .node_offset_builtin_call_arg1 = src_node },
2 => .{ .node_offset_builtin_call_arg2 = src_node },
3 => .{ .node_offset_builtin_call_arg3 = src_node },
4 => .{ .node_offset_builtin_call_arg4 = src_node },
5 => .{ .node_offset_builtin_call_arg5 = src_node },
else => src, // TODO: better source location
};
air_ref.* = try sema.resolveInst(zir_ref);
try sema.checkNumericType(block, op_src.*, sema.typeOf(air_ref.*));
}
return sema.analyzeMinMax(block, src, air_tag, air_refs, operand_srcs);
}
fn analyzeMinMax(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
comptime air_tag: Air.Inst.Tag,
operands: []const Air.Inst.Ref,
operand_srcs: []const LazySrcLoc,
) CompileError!Air.Inst.Ref {
assert(operands.len == operand_srcs.len);
assert(operands.len > 0);
const mod = sema.mod;
if (operands.len == 1) return operands[0];
const opFunc = switch (air_tag) {
.min => Value.numberMin,
.max => Value.numberMax,
else => @compileError("unreachable"),
};
// First, find all comptime-known arguments, and get their min/max
var runtime_known = try std.DynamicBitSet.initFull(sema.arena, operands.len);
var cur_minmax: ?Air.Inst.Ref = null;
var cur_minmax_src: LazySrcLoc = undefined; // defined if cur_minmax not null
for (operands, operand_srcs, 0..) |operand, operand_src, operand_idx| {
// Resolve the value now to avoid redundant calls to `checkSimdBinOp` - we'll have to call
// it in the runtime path anyway since the result type may have been refined
const uncasted_operand_val = (try sema.resolveMaybeUndefVal(operand)) orelse continue;
if (cur_minmax) |cur| {
const simd_op = try sema.checkSimdBinOp(block, src, cur, operand, cur_minmax_src, operand_src);
const cur_val = simd_op.lhs_val.?; // cur_minmax is comptime-known
const operand_val = simd_op.rhs_val.?; // we checked the operand was resolvable above
runtime_known.unset(operand_idx);
if (cur_val.isUndef(mod)) continue; // result is also undef
if (operand_val.isUndef(mod)) {
cur_minmax = try sema.addConstUndef(simd_op.result_ty);
continue;
}
try sema.resolveLazyValue(cur_val);
try sema.resolveLazyValue(operand_val);
const vec_len = simd_op.len orelse {
const result_val = opFunc(cur_val, operand_val, mod);
cur_minmax = try sema.addConstant(simd_op.result_ty, result_val);
continue;
};
const elems = try sema.arena.alloc(Value, vec_len);
for (elems, 0..) |*elem, i| {
const lhs_elem_val = try cur_val.elemValue(mod, i);
const rhs_elem_val = try operand_val.elemValue(mod, i);
elem.* = opFunc(lhs_elem_val, rhs_elem_val, mod);
}
cur_minmax = try sema.addConstant(
simd_op.result_ty,
try Value.Tag.aggregate.create(sema.arena, elems),
);
} else {
runtime_known.unset(operand_idx);
cur_minmax = try sema.addConstant(sema.typeOf(operand), uncasted_operand_val);
cur_minmax_src = operand_src;
}
}
const opt_runtime_idx = runtime_known.findFirstSet();
const comptime_refined_ty: ?Type = if (cur_minmax) |ct_minmax_ref| refined: {
// Refine the comptime-known result type based on the operation
const val = (try sema.resolveMaybeUndefVal(ct_minmax_ref)).?;
const orig_ty = sema.typeOf(ct_minmax_ref);
if (opt_runtime_idx == null and orig_ty.eql(Type.comptime_int, mod)) {
// If all arguments were `comptime_int`, and there are no runtime args, we'll preserve that type
break :refined orig_ty;
}
const refined_ty = if (orig_ty.zigTypeTag(mod) == .Vector) blk: {
const elem_ty = orig_ty.childType(mod);
const len = orig_ty.vectorLen(mod);
if (len == 0) break :blk orig_ty;
if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats
var cur_min: Value = try val.elemValue(mod, 0);
var cur_max: Value = cur_min;
for (1..len) |idx| {
const elem_val = try val.elemValue(mod, idx);
if (elem_val.isUndef(mod)) break :blk orig_ty; // can't refine undef
if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val;
if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val;
}
const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max);
break :blk try mod.vectorType(.{
.len = len,
.child = refined_elem_ty.ip_index,
});
} else blk: {
if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats
if (val.isUndef(mod)) break :blk orig_ty; // can't refine undef
break :blk try mod.intFittingRange(val, val);
};
// Apply the refined type to the current value - this isn't strictly necessary in the
// runtime case since we'll refine again afterwards, but keeping things as small as possible
// will allow us to emit more optimal AIR (if all the runtime operands have smaller types
// than the non-refined comptime type).
if (!refined_ty.eql(orig_ty, mod)) {
if (std.debug.runtime_safety) {
assert(try sema.intFitsInType(val, refined_ty, null));
}
cur_minmax = try sema.addConstant(refined_ty, val);
}
break :refined refined_ty;
} else null;
const runtime_idx = opt_runtime_idx orelse return cur_minmax.?;
const runtime_src = operand_srcs[runtime_idx];
try sema.requireRuntimeBlock(block, src, runtime_src);
// Now, iterate over runtime operands, emitting a min/max instruction for each. We'll refine the
// type again at the end, based on the comptime-known bound.
// If the comptime-known part is undef we can avoid emitting actual instructions later
const known_undef = if (cur_minmax) |operand| blk: {
const val = (try sema.resolveMaybeUndefVal(operand)).?;
break :blk val.isUndef(mod);
} else false;
if (cur_minmax == null) {
// No comptime operands - use the first operand as the starting value
assert(runtime_idx == 0);
cur_minmax = operands[0];
cur_minmax_src = runtime_src;
runtime_known.unset(0); // don't look at this operand in the loop below
}
var it = runtime_known.iterator(.{});
while (it.next()) |idx| {
const lhs = cur_minmax.?;
const lhs_src = cur_minmax_src;
const rhs = operands[idx];
const rhs_src = operand_srcs[idx];
const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src);
if (known_undef) {
cur_minmax = try sema.addConstant(simd_op.result_ty, Value.undef);
} else {
cur_minmax = try block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs);
}
}
if (comptime_refined_ty) |comptime_ty| refine: {
// Finally, refine the type based on the comptime-known bound.
if (known_undef) break :refine; // can't refine undef
const unrefined_ty = sema.typeOf(cur_minmax.?);
const is_vector = unrefined_ty.zigTypeTag(mod) == .Vector;
const comptime_elem_ty = if (is_vector) comptime_ty.childType(mod) else comptime_ty;
const unrefined_elem_ty = if (is_vector) unrefined_ty.childType(mod) else unrefined_ty;
if (unrefined_elem_ty.isAnyFloat()) break :refine; // we can't refine floats
// Compute the final bounds based on the runtime type and the comptime-known bound type
const min_val = switch (air_tag) {
.min => try unrefined_elem_ty.minInt(sema.arena, mod),
.max => try comptime_elem_ty.minInt(sema.arena, mod), // @max(ct, rt) >= ct
else => unreachable,
};
const max_val = switch (air_tag) {
.min => try comptime_elem_ty.maxInt(sema.arena, mod, Type.comptime_int), // @min(ct, rt) <= ct
.max => try unrefined_elem_ty.maxInt(sema.arena, mod, Type.comptime_int),
else => unreachable,
};
// Find the smallest type which can contain these bounds
const final_elem_ty = try mod.intFittingRange(min_val, max_val);
const final_ty = if (is_vector)
try mod.vectorType(.{
.len = unrefined_ty.vectorLen(mod),
.child = final_elem_ty.ip_index,
})
else
final_elem_ty;
if (!final_ty.eql(unrefined_ty, mod)) {
// We've reduced the type - cast the result down
return block.addTyOp(.intcast, final_ty, cur_minmax.?);
}
}
return cur_minmax.?;
}
fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref {
const mod = sema.mod;
const info = sema.typeOf(ptr).ptrInfo(mod);
if (info.size == .One) {
// Already an array pointer.
return ptr;
}
const new_ty = try Type.ptr(sema.arena, mod, .{
.pointee_type = try Type.array(sema.arena, len, info.sentinel, info.pointee_type, mod),
.sentinel = null,
.@"align" = info.@"align",
.@"addrspace" = info.@"addrspace",
.mutable = info.mutable,
.@"allowzero" = info.@"allowzero",
.@"volatile" = info.@"volatile",
.size = .One,
});
if (info.size == .Slice) {
return block.addTyOp(.slice_ptr, new_ty, ptr);
}
return block.addBitCast(new_ty, ptr);
}
fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = inst_data.src();
const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const dest_ptr = try sema.resolveInst(extra.lhs);
const src_ptr = try sema.resolveInst(extra.rhs);
const dest_ty = sema.typeOf(dest_ptr);
const src_ty = sema.typeOf(src_ptr);
const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr);
const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr);
const target = sema.mod.getTarget();
const mod = sema.mod;
if (dest_ty.isConstPtr(mod)) {
return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{});
}
if (dest_len == .none and src_len == .none) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "unknown @memcpy length", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, dest_src, msg, "destination type '{}' provides no length", .{
dest_ty.fmt(sema.mod),
});
try sema.errNote(block, src_src, msg, "source type '{}' provides no length", .{
src_ty.fmt(sema.mod),
});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
var len_val: ?Value = null;
if (dest_len != .none and src_len != .none) check: {
// If we can check at compile-time, no need for runtime safety.
if (try sema.resolveDefinedValue(block, dest_src, dest_len)) |dest_len_val| {
len_val = dest_len_val;
if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
if (!(try sema.valuesEqual(dest_len_val, src_len_val, Type.usize))) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "non-matching @memcpy lengths", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, dest_src, msg, "length {} here", .{
dest_len_val.fmtValue(Type.usize, sema.mod),
});
try sema.errNote(block, src_src, msg, "length {} here", .{
src_len_val.fmtValue(Type.usize, sema.mod),
});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
break :check;
}
} else if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
len_val = src_len_val;
}
if (block.wantSafety()) {
const ok = try block.addBinOp(.cmp_eq, dest_len, src_len);
try sema.addSafetyCheck(block, ok, .memcpy_len_mismatch);
}
} else if (dest_len != .none) {
if (try sema.resolveDefinedValue(block, dest_src, dest_len)) |dest_len_val| {
len_val = dest_len_val;
}
} else if (src_len != .none) {
if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
len_val = src_len_val;
}
}
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: {
if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src;
if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| {
const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?;
const len = try sema.usizeCast(block, dest_src, len_u64);
for (0..len) |i| {
const elem_index = try sema.addIntUnsigned(Type.usize, i);
const dest_elem_ptr = try sema.elemPtrOneLayerOnly(
block,
src,
dest_ptr,
elem_index,
src,
true, // init
false, // oob_safety
);
const src_elem_ptr = try sema.elemPtrOneLayerOnly(
block,
src,
src_ptr,
elem_index,
src,
false, // init
false, // oob_safety
);
const uncoerced_elem = try sema.analyzeLoad(block, src, src_elem_ptr, src_src);
try sema.storePtr2(
block,
src,
dest_elem_ptr,
dest_src,
uncoerced_elem,
src_src,
.store,
);
}
return;
} else break :rs src_src;
} else dest_src;
// If in-memory coercion is not allowed, explode this memcpy call into a
// for loop that copies element-wise.
// Likewise if this is an iterable rather than a pointer, do the same
// lowering. The AIR instruction requires pointers with element types of
// equal ABI size.
if (dest_ty.zigTypeTag(mod) != .Pointer or src_ty.zigTypeTag(mod) != .Pointer) {
return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{});
}
const dest_elem_ty = dest_ty.elemType2(mod);
const src_elem_ty = src_ty.elemType2(mod);
if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src)) {
return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{});
}
// If the length is comptime-known, then upgrade src and destination types
// into pointer-to-array. At this point we know they are both pointers
// already.
var new_dest_ptr = dest_ptr;
var new_src_ptr = src_ptr;
if (len_val) |val| {
const len = val.toUnsignedInt(mod);
if (len == 0) {
// This AIR instruction guarantees length > 0 if it is comptime-known.
return;
}
new_dest_ptr = try upgradeToArrayPtr(sema, block, dest_ptr, len);
new_src_ptr = try upgradeToArrayPtr(sema, block, src_ptr, len);
}
if (dest_len != .none) {
// Change the src from slice to a many pointer, to avoid multiple ptr
// slice extractions in AIR instructions.
const new_src_ptr_ty = sema.typeOf(new_src_ptr);
if (new_src_ptr_ty.isSlice(mod)) {
new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty);
}
} else if (dest_len == .none and len_val == null) {
// Change the dest to a slice, since its type must have the length.
const dest_ptr_ptr = try sema.analyzeRef(block, dest_src, new_dest_ptr);
new_dest_ptr = try sema.analyzeSlice(block, dest_src, dest_ptr_ptr, .zero, src_len, .none, .unneeded, dest_src, dest_src, dest_src, false);
const new_src_ptr_ty = sema.typeOf(new_src_ptr);
if (new_src_ptr_ty.isSlice(mod)) {
new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty);
}
}
try sema.requireRuntimeBlock(block, src, runtime_src);
// Aliasing safety check.
if (block.wantSafety()) {
const len = if (len_val) |v|
try sema.addConstant(Type.usize, v)
else if (dest_len != .none)
dest_len
else
src_len;
// Extract raw pointer from dest slice. The AIR instructions could support them, but
// it would cause redundant machine code instructions.
const new_dest_ptr_ty = sema.typeOf(new_dest_ptr);
const raw_dest_ptr = if (new_dest_ptr_ty.isSlice(mod))
try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty)
else
new_dest_ptr;
// ok1: dest >= src + len
// ok2: src >= dest + len
const src_plus_len = try sema.analyzePtrArithmetic(block, src, new_src_ptr, len, .ptr_add, src_src, src);
const dest_plus_len = try sema.analyzePtrArithmetic(block, src, raw_dest_ptr, len, .ptr_add, dest_src, src);
const ok1 = try block.addBinOp(.cmp_gte, raw_dest_ptr, src_plus_len);
const ok2 = try block.addBinOp(.cmp_gte, new_src_ptr, dest_plus_len);
const ok = try block.addBinOp(.bit_or, ok1, ok2);
try sema.addSafetyCheck(block, ok, .memcpy_alias);
}
_ = try block.addInst(.{
.tag = .memcpy,
.data = .{ .bin_op = .{
.lhs = new_dest_ptr,
.rhs = new_src_ptr,
} },
});
}
fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = inst_data.src();
const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const dest_ptr = try sema.resolveInst(extra.lhs);
const uncoerced_elem = try sema.resolveInst(extra.rhs);
const dest_ptr_ty = sema.typeOf(dest_ptr);
try checkMemOperand(sema, block, dest_src, dest_ptr_ty);
if (dest_ptr_ty.isConstPtr(mod)) {
return sema.fail(block, dest_src, "cannot memset constant pointer", .{});
}
const dest_elem_ty = dest_ptr_ty.elemType2(mod);
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: {
const len_air_ref = try sema.fieldVal(block, src, dest_ptr, "len", dest_src);
const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse
break :rs dest_src;
const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?;
const len = try sema.usizeCast(block, dest_src, len_u64);
if (len == 0) {
// This AIR instruction guarantees length > 0 if it is comptime-known.
return;
}
if (!ptr_val.isComptimeMutablePtr()) break :rs dest_src;
if (try sema.resolveMaybeUndefVal(uncoerced_elem)) |_| {
for (0..len) |i| {
const elem_index = try sema.addIntUnsigned(Type.usize, i);
const elem_ptr = try sema.elemPtrOneLayerOnly(
block,
src,
dest_ptr,
elem_index,
src,
true, // init
false, // oob_safety
);
try sema.storePtr2(
block,
src,
elem_ptr,
dest_src,
uncoerced_elem,
value_src,
.store,
);
}
return;
} else break :rs value_src;
} else dest_src;
const elem = try sema.coerce(block, dest_elem_ty, uncoerced_elem, value_src);
try sema.requireRuntimeBlock(block, src, runtime_src);
_ = try block.addInst(.{
.tag = if (block.wantSafety()) .memset_safe else .memset,
.data = .{ .bin_op = .{
.lhs = dest_ptr,
.rhs = elem,
} },
});
}
fn zirBuiltinAsyncCall(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
return sema.failWithUseOfAsync(block, src);
}
fn zirResume(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
return sema.failWithUseOfAsync(block, src);
}
fn zirAwait(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
return sema.failWithUseOfAsync(block, src);
}
fn zirAwaitNosuspend(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
return sema.failWithUseOfAsync(block, src);
}
fn zirVarExtended(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand);
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 };
const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 };
const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small);
var extra_index: usize = extra.end;
const lib_name: ?[]const u8 = if (small.has_lib_name) blk: {
const lib_name = sema.code.nullTerminatedString(sema.code.extra[extra_index]);
extra_index += 1;
break :blk lib_name;
} else null;
// ZIR supports encoding this information but it is not used; the information
// is encoded via the Decl entry.
assert(!small.has_align);
const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: {
const init_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
break :blk try sema.resolveInst(init_ref);
} else .none;
const have_ty = extra.data.var_type != .none;
const var_ty = if (have_ty)
try sema.resolveType(block, ty_src, extra.data.var_type)
else
sema.typeOf(uncasted_init);
const init_val = if (uncasted_init != .none) blk: {
const init = if (have_ty)
try sema.coerce(block, var_ty, uncasted_init, init_src)
else
uncasted_init;
break :blk (try sema.resolveMaybeUndefVal(init)) orelse
return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime-known");
} else Value.@"unreachable";
try sema.validateVarType(block, ty_src, var_ty, small.is_extern);
const new_var = try sema.gpa.create(Module.Var);
errdefer sema.gpa.destroy(new_var);
log.debug("created variable {*} owner_decl: {*} ({s})", .{
new_var, sema.owner_decl, sema.owner_decl.name,
});
new_var.* = .{
.owner_decl = sema.owner_decl_index,
.init = init_val,
.is_extern = small.is_extern,
.is_mutable = true,
.is_threadlocal = small.is_threadlocal,
.is_weak_linkage = false,
.lib_name = null,
};
if (lib_name) |lname| {
new_var.lib_name = try sema.handleExternLibName(block, ty_src, lname);
}
const result = try sema.addConstant(
var_ty,
try Value.Tag.variable.create(sema.arena, new_var),
);
return result;
}
fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
const target = mod.getTarget();
const align_src: LazySrcLoc = .{ .node_offset_fn_type_align = inst_data.src_node };
const addrspace_src: LazySrcLoc = .{ .node_offset_fn_type_addrspace = inst_data.src_node };
const section_src: LazySrcLoc = .{ .node_offset_fn_type_section = inst_data.src_node };
const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = inst_data.src_node };
const ret_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = inst_data.src_node };
const has_body = extra.data.body_len != 0;
var extra_index: usize = extra.end;
const lib_name: ?[]const u8 = if (extra.data.bits.has_lib_name) blk: {
const lib_name = sema.code.nullTerminatedString(sema.code.extra[extra_index]);
extra_index += 1;
break :blk lib_name;
} else null;
if (has_body and
(extra.data.bits.has_align_body or extra.data.bits.has_align_ref) and
!target_util.supportsFunctionAlignment(target))
{
return sema.fail(block, align_src, "target does not support function alignment", .{});
}
const @"align": ?u32 = if (extra.data.bits.has_align_body) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body.len;
const val = try sema.resolveGenericBody(block, align_src, body, inst, Type.u29, "alignment must be comptime-known");
if (val.isGenericPoison()) {
break :blk null;
}
const alignment = @intCast(u32, val.toUnsignedInt(mod));
try sema.validateAlign(block, align_src, alignment);
if (alignment == target_util.defaultFunctionAlignment(target)) {
break :blk 0;
} else {
break :blk alignment;
}
} else if (extra.data.bits.has_align_ref) blk: {
const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const align_tv = sema.resolveInstConst(block, align_src, align_ref, "alignment must be comptime-known") catch |err| switch (err) {
error.GenericPoison => {
break :blk null;
},
else => |e| return e,
};
const alignment = @intCast(u32, align_tv.val.toUnsignedInt(mod));
try sema.validateAlign(block, align_src, alignment);
if (alignment == target_util.defaultFunctionAlignment(target)) {
break :blk 0;
} else {
break :blk alignment;
}
} else 0;
const @"addrspace": ?std.builtin.AddressSpace = if (extra.data.bits.has_addrspace_body) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body.len;
const addrspace_ty = try sema.getBuiltinType("AddressSpace");
const val = try sema.resolveGenericBody(block, addrspace_src, body, inst, addrspace_ty, "addrespace must be comptime-known");
if (val.isGenericPoison()) {
break :blk null;
}
break :blk mod.toEnum(std.builtin.AddressSpace, val);
} else if (extra.data.bits.has_addrspace_ref) blk: {
const addrspace_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const addrspace_tv = sema.resolveInstConst(block, addrspace_src, addrspace_ref, "addrespace must be comptime-known") catch |err| switch (err) {
error.GenericPoison => {
break :blk null;
},
else => |e| return e,
};
break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val);
} else target_util.defaultAddressSpace(target, .function);
const @"linksection": FuncLinkSection = if (extra.data.bits.has_section_body) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body.len;
const ty = Type.const_slice_u8;
const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known");
if (val.isGenericPoison()) {
break :blk FuncLinkSection{ .generic = {} };
}
break :blk FuncLinkSection{ .explicit = try val.toAllocatedBytes(ty, sema.arena, sema.mod) };
} else if (extra.data.bits.has_section_ref) blk: {
const section_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const section_name = sema.resolveConstString(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) {
error.GenericPoison => {
break :blk FuncLinkSection{ .generic = {} };
},
else => |e| return e,
};
break :blk FuncLinkSection{ .explicit = section_name };
} else FuncLinkSection{ .default = {} };
const cc: ?std.builtin.CallingConvention = if (extra.data.bits.has_cc_body) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body.len;
const cc_ty = try sema.getBuiltinType("CallingConvention");
const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, "calling convention must be comptime-known");
if (val.isGenericPoison()) {
break :blk null;
}
break :blk mod.toEnum(std.builtin.CallingConvention, val);
} else if (extra.data.bits.has_cc_ref) blk: {
const cc_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const cc_tv = sema.resolveInstConst(block, cc_src, cc_ref, "calling convention must be comptime-known") catch |err| switch (err) {
error.GenericPoison => {
break :blk null;
},
else => |e| return e,
};
break :blk mod.toEnum(std.builtin.CallingConvention, cc_tv.val);
} else if (sema.owner_decl.is_exported and has_body)
.C
else
.Unspecified;
const ret_ty: Type = if (extra.data.bits.has_ret_ty_body) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body.len;
const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, "return type must be comptime-known");
const ty = val.toType();
break :blk ty;
} else if (extra.data.bits.has_ret_ty_ref) blk: {
const ret_ty_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
const ret_ty_tv = sema.resolveInstConst(block, ret_src, ret_ty_ref, "return type must be comptime-known") catch |err| switch (err) {
error.GenericPoison => {
break :blk Type.generic_poison;
},
else => |e| return e,
};
const ty = ret_ty_tv.val.toType();
break :blk ty;
} else Type.void;
const noalias_bits: u32 = if (extra.data.bits.has_any_noalias) blk: {
const x = sema.code.extra[extra_index];
extra_index += 1;
break :blk x;
} else 0;
var src_locs: Zir.Inst.Func.SrcLocs = undefined;
if (has_body) {
extra_index += extra.data.body_len;
src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data;
}
const is_var_args = extra.data.bits.is_var_args;
const is_inferred_error = extra.data.bits.is_inferred_error;
const is_extern = extra.data.bits.is_extern;
const is_noinline = extra.data.bits.is_noinline;
return sema.funcCommon(
block,
inst_data.src_node,
inst,
@"align",
@"addrspace",
@"linksection",
cc,
ret_ty,
is_var_args,
is_inferred_error,
is_extern,
has_body,
src_locs,
lib_name,
noalias_bits,
is_noinline,
);
}
fn zirCUndef(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const name = try sema.resolveConstString(block, src, extra.operand, "name of macro being undefined must be comptime-known");
try block.c_import_buf.?.writer().print("#undef {s}\n", .{name});
return Air.Inst.Ref.void_value;
}
fn zirCInclude(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const name = try sema.resolveConstString(block, src, extra.operand, "path being included must be comptime-known");
try block.c_import_buf.?.writer().print("#include <{s}>\n", .{name});
return Air.Inst.Ref.void_value;
}
fn zirCDefine(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const val_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const name = try sema.resolveConstString(block, name_src, extra.lhs, "name of macro being undefined must be comptime-known");
const rhs = try sema.resolveInst(extra.rhs);
if (sema.typeOf(rhs).zigTypeTag(mod) != .Void) {
const value = try sema.resolveConstString(block, val_src, extra.rhs, "value of macro being undefined must be comptime-known");
try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value });
} else {
try block.c_import_buf.?.writer().print("#define {s}\n", .{name});
}
return Air.Inst.Ref.void_value;
}
fn zirWasmMemorySize(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const index_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const builtin_src = LazySrcLoc.nodeOffset(extra.node);
const target = sema.mod.getTarget();
if (!target.isWasm()) {
return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
}
const index = @intCast(u32, try sema.resolveInt(block, index_src, extra.operand, Type.u32, "wasm memory size index must be comptime-known"));
try sema.requireRuntimeBlock(block, builtin_src, null);
return block.addInst(.{
.tag = .wasm_memory_size,
.data = .{ .pl_op = .{
.operand = .none,
.payload = index,
} },
});
}
fn zirWasmMemoryGrow(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const builtin_src = LazySrcLoc.nodeOffset(extra.node);
const index_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const delta_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const target = sema.mod.getTarget();
if (!target.isWasm()) {
return sema.fail(block, builtin_src, "builtin @wasmMemoryGrow is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
}
const index = @intCast(u32, try sema.resolveInt(block, index_src, extra.lhs, Type.u32, "wasm memory size index must be comptime-known"));
const delta = try sema.coerce(block, Type.u32, try sema.resolveInst(extra.rhs), delta_src);
try sema.requireRuntimeBlock(block, builtin_src, null);
return block.addInst(.{
.tag = .wasm_memory_grow,
.data = .{ .pl_op = .{
.operand = delta,
.payload = index,
} },
});
}
fn resolvePrefetchOptions(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
) CompileError!std.builtin.PrefetchOptions {
const mod = sema.mod;
const options_ty = try sema.getBuiltinType("PrefetchOptions");
const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src);
const rw_src = sema.maybeOptionsSrc(block, src, "rw");
const locality_src = sema.maybeOptionsSrc(block, src, "locality");
const cache_src = sema.maybeOptionsSrc(block, src, "cache");
const rw = try sema.fieldVal(block, src, options, "rw", rw_src);
const rw_val = try sema.resolveConstValue(block, rw_src, rw, "prefetch read/write must be comptime-known");
const locality = try sema.fieldVal(block, src, options, "locality", locality_src);
const locality_val = try sema.resolveConstValue(block, locality_src, locality, "prefetch locality must be comptime-known");
const cache = try sema.fieldVal(block, src, options, "cache", cache_src);
const cache_val = try sema.resolveConstValue(block, cache_src, cache, "prefetch cache must be comptime-known");
return std.builtin.PrefetchOptions{
.rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val),
.locality = @intCast(u2, locality_val.toUnsignedInt(mod)),
.cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val),
};
}
fn zirPrefetch(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const opts_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const ptr = try sema.resolveInst(extra.lhs);
try sema.checkPtrOperand(block, ptr_src, sema.typeOf(ptr));
const options = sema.resolvePrefetchOptions(block, .unneeded, extra.rhs) catch |err| switch (err) {
error.NeededSourceLocation => {
_ = try sema.resolvePrefetchOptions(block, opts_src, extra.rhs);
unreachable;
},
else => |e| return e,
};
if (!block.is_comptime) {
_ = try block.addInst(.{
.tag = .prefetch,
.data = .{ .prefetch = .{
.ptr = ptr,
.rw = options.rw,
.locality = options.locality,
.cache = options.cache,
} },
});
}
return Air.Inst.Ref.void_value;
}
fn resolveExternOptions(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
) CompileError!std.builtin.ExternOptions {
const options_inst = try sema.resolveInst(zir_ref);
const extern_options_ty = try sema.getBuiltinType("ExternOptions");
const options = try sema.coerce(block, extern_options_ty, options_inst, src);
const mod = sema.mod;
const name_src = sema.maybeOptionsSrc(block, src, "name");
const library_src = sema.maybeOptionsSrc(block, src, "library");
const linkage_src = sema.maybeOptionsSrc(block, src, "linkage");
const thread_local_src = sema.maybeOptionsSrc(block, src, "thread_local");
const name_ref = try sema.fieldVal(block, src, options, "name", name_src);
const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known");
const name = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod);
const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src);
const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known");
const linkage_ref = try sema.fieldVal(block, src, options, "linkage", linkage_src);
const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_ref, "linkage of the extern symbol must be comptime-known");
const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val);
const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src);
const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known");
const library_name = if (!library_name_val.isNull(mod)) blk: {
const payload = library_name_val.castTag(.opt_payload).?.data;
const library_name = try payload.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod);
if (library_name.len == 0) {
return sema.fail(block, library_src, "library name cannot be empty", .{});
}
break :blk try sema.handleExternLibName(block, library_src, library_name);
} else null;
if (name.len == 0) {
return sema.fail(block, name_src, "extern symbol name cannot be empty", .{});
}
if (linkage != .Weak and linkage != .Strong) {
return sema.fail(block, linkage_src, "extern symbol must use strong or weak linkage", .{});
}
return std.builtin.ExternOptions{
.name = name,
.library_name = library_name,
.linkage = linkage,
.is_thread_local = is_thread_local_val.toBool(mod),
};
}
fn zirBuiltinExtern(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
var ty = try sema.resolveType(block, ty_src, extra.lhs);
if (!ty.isPtrAtRuntime(mod)) {
return sema.fail(block, ty_src, "expected (optional) pointer", .{});
}
if (!try sema.validateExternType(ty.childType(mod), .other)) {
const msg = msg: {
const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
const src_decl = sema.mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl, mod), ty, .other);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const options = sema.resolveExternOptions(block, .unneeded, extra.rhs) catch |err| switch (err) {
error.NeededSourceLocation => {
_ = try sema.resolveExternOptions(block, options_src, extra.rhs);
unreachable;
},
else => |e| return e,
};
if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) {
ty = try Type.optional(sema.arena, ty, mod);
}
// TODO check duplicate extern
const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null);
errdefer mod.destroyDecl(new_decl_index);
const new_decl = mod.declPtr(new_decl_index);
new_decl.name = try sema.gpa.dupeZ(u8, options.name);
{
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
const new_decl_arena_allocator = new_decl_arena.allocator();
const new_var = try new_decl_arena_allocator.create(Module.Var);
new_var.* = .{
.owner_decl = sema.owner_decl_index,
.init = Value.@"unreachable",
.is_extern = true,
.is_mutable = false,
.is_threadlocal = options.is_thread_local,
.is_weak_linkage = options.linkage == .Weak,
.lib_name = null,
};
new_decl.src_line = sema.owner_decl.src_line;
// We only access this decl through the decl_ref with the correct type created
// below, so this type doesn't matter
new_decl.ty = Type.anyopaque;
new_decl.val = try Value.Tag.variable.create(new_decl_arena_allocator, new_var);
new_decl.@"align" = 0;
new_decl.@"linksection" = null;
new_decl.has_tv = true;
new_decl.analysis = .complete;
new_decl.generation = mod.generation;
try new_decl.finalizeNewArena(&new_decl_arena);
}
try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index);
try sema.ensureDeclAnalyzed(new_decl_index);
const ref = try Value.Tag.decl_ref.create(sema.arena, new_decl_index);
return sema.addConstant(ty, ref);
}
fn zirWorkItem(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
zir_tag: Zir.Inst.Extended,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const dimension_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const builtin_src = LazySrcLoc.nodeOffset(extra.node);
const target = sema.mod.getTarget();
switch (target.cpu.arch) {
// TODO: Allow for other GPU targets.
.amdgcn => {},
else => {
return sema.fail(block, builtin_src, "builtin only available on GPU targets; targeted architecture is {s}", .{@tagName(target.cpu.arch)});
},
}
const dimension = @intCast(u32, try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, "dimension must be comptime-known"));
try sema.requireRuntimeBlock(block, builtin_src, null);
return block.addInst(.{
.tag = switch (zir_tag) {
.work_item_id => .work_item_id,
.work_group_size => .work_group_size,
.work_group_id => .work_group_id,
else => unreachable,
},
.data = .{ .pl_op = .{
.operand = .none,
.payload = dimension,
} },
});
}
fn zirInComptime(
sema: *Sema,
block: *Block,
) CompileError!Air.Inst.Ref {
_ = sema;
if (block.is_comptime) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
}
fn requireRuntimeBlock(sema: *Sema, block: *Block, src: LazySrcLoc, runtime_src: ?LazySrcLoc) !void {
if (block.is_comptime) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "unable to evaluate comptime expression", .{});
errdefer msg.destroy(sema.gpa);
if (runtime_src) |some| {
try sema.errNote(block, some, msg, "operation is runtime due to this operand", .{});
}
if (block.comptime_reason) |some| {
try some.explain(sema, msg);
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
/// Emit a compile error if type cannot be used for a runtime variable.
fn validateVarType(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
var_ty: Type,
is_extern: bool,
) CompileError!void {
const mod = sema.mod;
if (is_extern and !try sema.validateExternType(var_ty, .other)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "extern variable cannot have type '{}'", .{var_ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), var_ty, .other);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (try sema.validateRunTimeType(var_ty, is_extern)) return;
const msg = msg: {
const msg = try sema.errMsg(block, src, "variable of type '{}' must be const or comptime", .{var_ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), var_ty);
if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) {
try sema.errNote(block, src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{});
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn validateRunTimeType(
sema: *Sema,
var_ty: Type,
is_extern: bool,
) CompileError!bool {
const mod = sema.mod;
var ty = var_ty;
while (true) switch (ty.zigTypeTag(mod)) {
.Bool,
.Int,
.Float,
.ErrorSet,
.Frame,
.AnyFrame,
.Void,
=> return true,
.Enum => return !(try sema.typeRequiresComptime(ty)),
.ComptimeFloat,
.ComptimeInt,
.EnumLiteral,
.NoReturn,
.Type,
.Undefined,
.Null,
.Fn,
=> return false,
.Pointer => {
const elem_ty = ty.childType(mod);
switch (elem_ty.zigTypeTag(mod)) {
.Opaque => return true,
.Fn => return elem_ty.isFnOrHasRuntimeBits(mod),
else => ty = elem_ty,
}
},
.Opaque => return is_extern,
.Optional => {
const child_ty = ty.optionalChild(mod);
return sema.validateRunTimeType(child_ty, is_extern);
},
.Array, .Vector => ty = ty.childType(mod),
.ErrorUnion => ty = ty.errorUnionPayload(mod),
.Struct, .Union => {
const resolved_ty = try sema.resolveTypeFields(ty);
const needs_comptime = try sema.typeRequiresComptime(resolved_ty);
return !needs_comptime;
},
};
}
const TypeSet = std.HashMapUnmanaged(Type, void, Type.HashContext64, std.hash_map.default_max_load_percentage);
fn explainWhyTypeIsComptime(
sema: *Sema,
msg: *Module.ErrorMsg,
src_loc: Module.SrcLoc,
ty: Type,
) CompileError!void {
var type_set = TypeSet{};
defer type_set.deinit(sema.gpa);
try sema.resolveTypeFully(ty);
return sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty, &type_set);
}
fn explainWhyTypeIsComptimeInner(
sema: *Sema,
msg: *Module.ErrorMsg,
src_loc: Module.SrcLoc,
ty: Type,
type_set: *TypeSet,
) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Bool,
.Int,
.Float,
.ErrorSet,
.Enum,
.Frame,
.AnyFrame,
.Void,
=> return,
.Fn => {
try mod.errNoteNonLazy(src_loc, msg, "use '*const {}' for a function pointer type", .{
ty.fmt(sema.mod),
});
},
.Type => {
try mod.errNoteNonLazy(src_loc, msg, "types are not available at runtime", .{});
},
.ComptimeFloat,
.ComptimeInt,
.EnumLiteral,
.NoReturn,
.Undefined,
.Null,
=> return,
.Opaque => {
try mod.errNoteNonLazy(src_loc, msg, "opaque type '{}' has undefined size", .{ty.fmt(sema.mod)});
},
.Array, .Vector => {
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set);
},
.Pointer => {
const elem_ty = ty.elemType2(mod);
if (elem_ty.zigTypeTag(mod) == .Fn) {
const fn_info = mod.typeToFunc(elem_ty).?;
if (fn_info.is_generic) {
try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{});
}
switch (fn_info.cc) {
.Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}),
else => {},
}
if (fn_info.return_type.toType().comptimeOnly(mod)) {
try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{});
}
return;
}
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set);
},
.Optional => {
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set);
},
.ErrorUnion => {
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(mod), type_set);
},
.Struct => {
if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return;
if (mod.typeToStruct(ty)) |struct_obj| {
for (struct_obj.fields.values(), 0..) |field, i| {
const field_src_loc = mod.fieldSrcLoc(struct_obj.owner_decl, .{
.index = i,
.range = .type,
});
if (try sema.typeRequiresComptime(field.ty)) {
try mod.errNoteNonLazy(field_src_loc, msg, "struct requires comptime because of this field", .{});
try sema.explainWhyTypeIsComptimeInner(msg, field_src_loc, field.ty, type_set);
}
}
}
// TODO tuples
},
.Union => {
if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return;
if (mod.typeToUnion(ty)) |union_obj| {
for (union_obj.fields.values(), 0..) |field, i| {
const field_src_loc = mod.fieldSrcLoc(union_obj.owner_decl, .{
.index = i,
.range = .type,
});
if (try sema.typeRequiresComptime(field.ty)) {
try mod.errNoteNonLazy(field_src_loc, msg, "union requires comptime because of this field", .{});
try sema.explainWhyTypeIsComptimeInner(msg, field_src_loc, field.ty, type_set);
}
}
}
},
}
}
const ExternPosition = enum {
ret_ty,
param_ty,
union_field,
struct_field,
element,
other,
};
/// Returns true if `ty` is allowed in extern types.
/// Does *NOT* require `ty` to be resolved in any way.
/// Calls `resolveTypeLayout` for packed containers.
fn validateExternType(
sema: *Sema,
ty: Type,
position: ExternPosition,
) !bool {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Type,
.ComptimeFloat,
.ComptimeInt,
.EnumLiteral,
.Undefined,
.Null,
.ErrorUnion,
.ErrorSet,
.Frame,
=> return false,
.Void => return position == .union_field or position == .ret_ty,
.NoReturn => return position == .ret_ty,
.Opaque,
.Bool,
.Float,
.AnyFrame,
=> return true,
.Pointer => return !(ty.isSlice(mod) or try sema.typeRequiresComptime(ty)),
.Int => switch (ty.intInfo(mod).bits) {
8, 16, 32, 64, 128 => return true,
else => return false,
},
.Fn => {
if (position != .other) return false;
const target = sema.mod.getTarget();
// For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI.
// The goal is to experiment with more integrated CPU/GPU code.
if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) {
return true;
}
return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod));
},
.Enum => {
return sema.validateExternType(try ty.intTagType(mod), position);
},
.Struct, .Union => switch (ty.containerLayout(mod)) {
.Extern => return true,
.Packed => {
const bit_size = try ty.bitSizeAdvanced(mod, sema);
switch (bit_size) {
8, 16, 32, 64, 128 => return true,
else => return false,
}
},
.Auto => return false,
},
.Array => {
if (position == .ret_ty or position == .param_ty) return false;
return sema.validateExternType(ty.elemType2(mod), .element);
},
.Vector => return sema.validateExternType(ty.elemType2(mod), .element),
.Optional => return ty.isPtrLikeOptional(mod),
}
}
fn explainWhyTypeIsNotExtern(
sema: *Sema,
msg: *Module.ErrorMsg,
src_loc: Module.SrcLoc,
ty: Type,
position: ExternPosition,
) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Opaque,
.Bool,
.Float,
.AnyFrame,
=> return,
.Type,
.ComptimeFloat,
.ComptimeInt,
.EnumLiteral,
.Undefined,
.Null,
.ErrorUnion,
.ErrorSet,
.Frame,
=> return,
.Pointer => {
if (ty.isSlice(mod)) {
try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{});
} else {
const pointee_ty = ty.childType(mod);
try mod.errNoteNonLazy(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(sema.mod)});
try sema.explainWhyTypeIsComptime(msg, src_loc, pointee_ty);
}
},
.Void => try mod.errNoteNonLazy(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}),
.NoReturn => try mod.errNoteNonLazy(src_loc, msg, "'noreturn' is only allowed as a return type", .{}),
.Int => if (!std.math.isPowerOfTwo(ty.intInfo(mod).bits)) {
try mod.errNoteNonLazy(src_loc, msg, "only integers with power of two bits are extern compatible", .{});
} else {
try mod.errNoteNonLazy(src_loc, msg, "only integers with 8, 16, 32, 64 and 128 bits are extern compatible", .{});
},
.Fn => {
if (position != .other) {
try mod.errNoteNonLazy(src_loc, msg, "type has no guaranteed in-memory representation", .{});
try mod.errNoteNonLazy(src_loc, msg, "use '*const ' to make a function pointer type", .{});
return;
}
switch (ty.fnCallingConvention(mod)) {
.Unspecified => try mod.errNoteNonLazy(src_loc, msg, "extern function must specify calling convention", .{}),
.Async => try mod.errNoteNonLazy(src_loc, msg, "async function cannot be extern", .{}),
.Inline => try mod.errNoteNonLazy(src_loc, msg, "inline function cannot be extern", .{}),
else => return,
}
},
.Enum => {
const tag_ty = try ty.intTagType(mod);
try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)});
try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position);
},
.Struct => try mod.errNoteNonLazy(src_loc, msg, "only extern structs and ABI sized packed structs are extern compatible", .{}),
.Union => try mod.errNoteNonLazy(src_loc, msg, "only extern unions and ABI sized packed unions are extern compatible", .{}),
.Array => {
if (position == .ret_ty) {
return mod.errNoteNonLazy(src_loc, msg, "arrays are not allowed as a return type", .{});
} else if (position == .param_ty) {
return mod.errNoteNonLazy(src_loc, msg, "arrays are not allowed as a parameter type", .{});
}
try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element);
},
.Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element),
.Optional => try mod.errNoteNonLazy(src_loc, msg, "only pointer like optionals are extern compatible", .{}),
}
}
/// Returns true if `ty` is allowed in packed types.
/// Does *NOT* require `ty` to be resolved in any way.
fn validatePackedType(ty: Type, mod: *Module) bool {
switch (ty.zigTypeTag(mod)) {
.Type,
.ComptimeFloat,
.ComptimeInt,
.EnumLiteral,
.Undefined,
.Null,
.ErrorUnion,
.ErrorSet,
.Frame,
.NoReturn,
.Opaque,
.AnyFrame,
.Fn,
.Array,
=> return false,
.Optional => return ty.isPtrLikeOptional(mod),
.Void,
.Bool,
.Float,
.Int,
.Vector,
.Enum,
=> return true,
.Pointer => return !ty.isSlice(mod),
.Struct, .Union => return ty.containerLayout(mod) == .Packed,
}
}
fn explainWhyTypeIsNotPacked(
sema: *Sema,
msg: *Module.ErrorMsg,
src_loc: Module.SrcLoc,
ty: Type,
) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Void,
.Bool,
.Float,
.Int,
.Vector,
.Enum,
=> return,
.Type,
.ComptimeFloat,
.ComptimeInt,
.EnumLiteral,
.Undefined,
.Null,
.Frame,
.NoReturn,
.Opaque,
.ErrorUnion,
.ErrorSet,
.AnyFrame,
.Optional,
.Array,
=> try mod.errNoteNonLazy(src_loc, msg, "type has no guaranteed in-memory representation", .{}),
.Pointer => try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{}),
.Fn => {
try mod.errNoteNonLazy(src_loc, msg, "type has no guaranteed in-memory representation", .{});
try mod.errNoteNonLazy(src_loc, msg, "use '*const ' to make a function pointer type", .{});
},
.Struct => try mod.errNoteNonLazy(src_loc, msg, "only packed structs layout are allowed in packed types", .{}),
.Union => try mod.errNoteNonLazy(src_loc, msg, "only packed unions layout are allowed in packed types", .{}),
}
}
pub const PanicId = enum {
unreach,
unwrap_null,
cast_to_null,
incorrect_alignment,
invalid_error_code,
cast_truncated_data,
negative_to_unsigned,
integer_overflow,
shl_overflow,
shr_overflow,
divide_by_zero,
exact_division_remainder,
inactive_union_field,
integer_part_out_of_bounds,
corrupt_switch,
shift_rhs_too_big,
invalid_enum_value,
sentinel_mismatch,
unwrap_error,
index_out_of_bounds,
start_index_greater_than_end,
for_len_mismatch,
memcpy_len_mismatch,
memcpy_alias,
noreturn_returned,
};
fn addSafetyCheck(
sema: *Sema,
parent_block: *Block,
ok: Air.Inst.Ref,
panic_id: PanicId,
) !void {
const gpa = sema.gpa;
assert(!parent_block.is_comptime);
var fail_block: Block = .{
.parent = parent_block,
.sema = sema,
.src_decl = parent_block.src_decl,
.namespace = parent_block.namespace,
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
.is_comptime = false,
};
defer fail_block.instructions.deinit(gpa);
try sema.safetyPanic(&fail_block, panic_id);
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
}
fn addSafetyCheckExtra(
sema: *Sema,
parent_block: *Block,
ok: Air.Inst.Ref,
fail_block: *Block,
) !void {
const gpa = sema.gpa;
try parent_block.instructions.ensureUnusedCapacity(gpa, 1);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
1 + // The main block only needs space for the cond_br.
@typeInfo(Air.CondBr).Struct.fields.len +
1 + // The ok branch of the cond_br only needs space for the br.
fail_block.instructions.items.len);
try sema.air_instructions.ensureUnusedCapacity(gpa, 3);
const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
const cond_br_inst = block_inst + 1;
const br_inst = cond_br_inst + 1;
sema.air_instructions.appendAssumeCapacity(.{
.tag = .block,
.data = .{ .ty_pl = .{
.ty = .void_type,
.payload = sema.addExtraAssumeCapacity(Air.Block{
.body_len = 1,
}),
} },
});
sema.air_extra.appendAssumeCapacity(cond_br_inst);
sema.air_instructions.appendAssumeCapacity(.{
.tag = .cond_br,
.data = .{ .pl_op = .{
.operand = ok,
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = 1,
.else_body_len = @intCast(u32, fail_block.instructions.items.len),
}),
} },
});
sema.air_extra.appendAssumeCapacity(br_inst);
sema.air_extra.appendSliceAssumeCapacity(fail_block.instructions.items);
sema.air_instructions.appendAssumeCapacity(.{
.tag = .br,
.data = .{ .br = .{
.block_inst = block_inst,
.operand = .void_value,
} },
});
parent_block.instructions.appendAssumeCapacity(block_inst);
}
fn panicWithMsg(
sema: *Sema,
block: *Block,
msg_inst: Air.Inst.Ref,
) !void {
const mod = sema.mod;
const arena = sema.arena;
if (!mod.backendSupportsFeature(.panic_fn)) {
_ = try block.addNoOp(.trap);
return;
}
const panic_fn = try sema.getBuiltin("panic");
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
const target = mod.getTarget();
const ptr_stack_trace_ty = try Type.ptr(arena, mod, .{
.pointee_type = stack_trace_ty,
.@"addrspace" = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic
});
const null_stack_trace = try sema.addConstant(
try Type.optional(arena, ptr_stack_trace_ty, mod),
Value.null,
);
const args: [3]Air.Inst.Ref = .{ msg_inst, null_stack_trace, .null_value };
try sema.callBuiltin(block, panic_fn, .auto, &args);
}
fn panicUnwrapError(
sema: *Sema,
parent_block: *Block,
operand: Air.Inst.Ref,
unwrap_err_tag: Air.Inst.Tag,
is_non_err_tag: Air.Inst.Tag,
) !void {
assert(!parent_block.is_comptime);
const ok = try parent_block.addUnOp(is_non_err_tag, operand);
if (!sema.mod.comp.formatted_panics) {
return sema.addSafetyCheck(parent_block, ok, .unwrap_error);
}
const gpa = sema.gpa;
var fail_block: Block = .{
.parent = parent_block,
.sema = sema,
.src_decl = parent_block.src_decl,
.namespace = parent_block.namespace,
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
.is_comptime = false,
};
defer fail_block.instructions.deinit(gpa);
{
if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) {
_ = try fail_block.addNoOp(.trap);
} else {
const panic_fn = try sema.getBuiltin("panicUnwrapError");
const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand);
const err_return_trace = try sema.getErrorReturnTrace(&fail_block);
const args: [2]Air.Inst.Ref = .{ err_return_trace, err };
try sema.callBuiltin(&fail_block, panic_fn, .auto, &args);
}
}
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
}
fn panicIndexOutOfBounds(
sema: *Sema,
parent_block: *Block,
index: Air.Inst.Ref,
len: Air.Inst.Ref,
cmp_op: Air.Inst.Tag,
) !void {
assert(!parent_block.is_comptime);
const ok = try parent_block.addBinOp(cmp_op, index, len);
if (!sema.mod.comp.formatted_panics) {
return sema.addSafetyCheck(parent_block, ok, .index_out_of_bounds);
}
try sema.safetyCheckFormatted(parent_block, ok, "panicOutOfBounds", &.{ index, len });
}
fn panicStartGreaterThanEnd(
sema: *Sema,
parent_block: *Block,
start: Air.Inst.Ref,
end: Air.Inst.Ref,
) !void {
assert(!parent_block.is_comptime);
const ok = try parent_block.addBinOp(.cmp_lte, start, end);
if (!sema.mod.comp.formatted_panics) {
return sema.addSafetyCheck(parent_block, ok, .start_index_greater_than_end);
}
try sema.safetyCheckFormatted(parent_block, ok, "panicStartGreaterThanEnd", &.{ start, end });
}
fn panicInactiveUnionField(
sema: *Sema,
parent_block: *Block,
active_tag: Air.Inst.Ref,
wanted_tag: Air.Inst.Ref,
) !void {
assert(!parent_block.is_comptime);
const ok = try parent_block.addBinOp(.cmp_eq, active_tag, wanted_tag);
if (!sema.mod.comp.formatted_panics) {
return sema.addSafetyCheck(parent_block, ok, .inactive_union_field);
}
try sema.safetyCheckFormatted(parent_block, ok, "panicInactiveUnionField", &.{ active_tag, wanted_tag });
}
fn panicSentinelMismatch(
sema: *Sema,
parent_block: *Block,
maybe_sentinel: ?Value,
sentinel_ty: Type,
ptr: Air.Inst.Ref,
sentinel_index: Air.Inst.Ref,
) !void {
assert(!parent_block.is_comptime);
const mod = sema.mod;
const expected_sentinel_val = maybe_sentinel orelse return;
const expected_sentinel = try sema.addConstant(sentinel_ty, expected_sentinel_val);
const ptr_ty = sema.typeOf(ptr);
const actual_sentinel = if (ptr_ty.isSlice(mod))
try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index)
else blk: {
const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null);
const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty);
break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr);
};
const ok = if (sentinel_ty.zigTypeTag(mod) == .Vector) ok: {
const eql =
try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq);
break :ok try parent_block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
.operand = eql,
.operation = .And,
} },
});
} else if (sentinel_ty.isSelfComparable(mod, true))
try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel)
else {
const panic_fn = try sema.getBuiltin("checkNonScalarSentinel");
const args: [2]Air.Inst.Ref = .{ expected_sentinel, actual_sentinel };
try sema.callBuiltin(parent_block, panic_fn, .auto, &args);
return;
};
if (!sema.mod.comp.formatted_panics) {
return sema.addSafetyCheck(parent_block, ok, .sentinel_mismatch);
}
try sema.safetyCheckFormatted(parent_block, ok, "panicSentinelMismatch", &.{ expected_sentinel, actual_sentinel });
}
fn safetyCheckFormatted(
sema: *Sema,
parent_block: *Block,
ok: Air.Inst.Ref,
func: []const u8,
args: []const Air.Inst.Ref,
) CompileError!void {
assert(sema.mod.comp.formatted_panics);
const gpa = sema.gpa;
var fail_block: Block = .{
.parent = parent_block,
.sema = sema,
.src_decl = parent_block.src_decl,
.namespace = parent_block.namespace,
.wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.inlining = parent_block.inlining,
.is_comptime = false,
};
defer fail_block.instructions.deinit(gpa);
if (!sema.mod.backendSupportsFeature(.safety_check_formatted)) {
_ = try fail_block.addNoOp(.trap);
} else {
const panic_fn = try sema.getBuiltin(func);
try sema.callBuiltin(&fail_block, panic_fn, .auto, args);
}
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
}
fn safetyPanic(
sema: *Sema,
block: *Block,
panic_id: PanicId,
) CompileError!void {
const mod = sema.mod;
const panic_messages_ty = try sema.getBuiltinType("panic_messages");
const msg_decl_index = (try sema.namespaceLookup(
block,
sema.src,
panic_messages_ty.getNamespaceIndex(mod).unwrap().?,
@tagName(panic_id),
)).?;
const msg_inst = try sema.analyzeDeclVal(block, sema.src, msg_decl_index);
try sema.panicWithMsg(block, msg_inst);
}
fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void {
sema.branch_count += 1;
if (sema.branch_count > sema.branch_quota) {
const msg = try sema.errMsg(
block,
src,
"evaluation exceeded {d} backwards branches",
.{sema.branch_quota},
);
try sema.errNote(
block,
src,
msg,
"use @setEvalBranchQuota() to raise the branch limit from {d}",
.{sema.branch_quota},
);
return sema.failWithOwnedErrorMsg(msg);
}
}
fn fieldVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
object: Air.Inst.Ref,
field_name: []const u8,
field_name_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
// When editing this function, note that there is corresponding logic to be edited
// in `fieldPtr`. This function takes a value and returns a value.
const mod = sema.mod;
const gpa = sema.gpa;
const arena = sema.arena;
const ip = &mod.intern_pool;
const object_src = src; // TODO better source location
const object_ty = sema.typeOf(object);
// Zig allows dereferencing a single pointer during field lookup. Note that
// we don't actually need to generate the dereference some field lookups, like the
// length of arrays and other comptime operations.
const is_pointer_to = object_ty.isSinglePointer(mod);
const inner_ty = if (is_pointer_to)
object_ty.childType(mod)
else
object_ty;
switch (inner_ty.zigTypeTag(mod)) {
.Array => {
if (mem.eql(u8, field_name, "len")) {
return sema.addConstant(
Type.usize,
try mod.intValue(Type.usize, inner_ty.arrayLen(mod)),
);
} else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) {
const ptr_info = object_ty.ptrInfo(mod);
const result_ty = try Type.ptr(sema.arena, mod, .{
.pointee_type = ptr_info.pointee_type.childType(mod),
.sentinel = ptr_info.sentinel,
.@"align" = ptr_info.@"align",
.@"addrspace" = ptr_info.@"addrspace",
.bit_offset = ptr_info.bit_offset,
.host_size = ptr_info.host_size,
.vector_index = ptr_info.vector_index,
.@"allowzero" = ptr_info.@"allowzero",
.mutable = ptr_info.mutable,
.@"volatile" = ptr_info.@"volatile",
.size = .Many,
});
return sema.coerce(block, result_ty, object, src);
} else {
return sema.fail(
block,
field_name_src,
"no member named '{s}' in '{}'",
.{ field_name, object_ty.fmt(mod) },
);
}
},
.Pointer => {
const ptr_info = inner_ty.ptrInfo(mod);
if (ptr_info.size == .Slice) {
if (mem.eql(u8, field_name, "ptr")) {
const slice = if (is_pointer_to)
try sema.analyzeLoad(block, src, object, object_src)
else
object;
return sema.analyzeSlicePtr(block, object_src, slice, inner_ty);
} else if (mem.eql(u8, field_name, "len")) {
const slice = if (is_pointer_to)
try sema.analyzeLoad(block, src, object, object_src)
else
object;
return sema.analyzeSliceLen(block, src, slice);
} else {
return sema.fail(
block,
field_name_src,
"no member named '{s}' in '{}'",
.{ field_name, object_ty.fmt(mod) },
);
}
}
},
.Type => {
const dereffed_type = if (is_pointer_to)
try sema.analyzeLoad(block, src, object, object_src)
else
object;
const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?;
const child_type = val.toType();
switch (try child_type.zigTypeTagOrPoison(mod)) {
.ErrorSet => {
const name = try ip.getOrPutString(gpa, field_name);
switch (ip.indexToKey(child_type.ip_index)) {
.error_set_type => |error_set_type| blk: {
if (error_set_type.nameIndex(ip, name) != null) break :blk;
const msg = msg: {
const msg = try sema.errMsg(block, src, "no error named '{s}' in '{}'", .{
field_name, child_type.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, child_type);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
.inferred_error_set_type => {
return sema.fail(block, src, "TODO handle inferred error sets here", .{});
},
.simple_type => |t| assert(t == .anyerror),
else => unreachable,
}
return sema.addConstant(
if (!child_type.isAnyError(mod))
child_type
else
try mod.singleErrorSetTypeNts(name),
try Value.Tag.@"error".create(arena, .{ .name = ip.stringToSlice(name) }),
);
},
.Union => {
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
return inst;
}
}
const union_ty = try sema.resolveTypeFields(child_type);
if (union_ty.unionTagType(mod)) |enum_ty| {
if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| {
const field_index = @intCast(u32, field_index_usize);
return sema.addConstant(
enum_ty,
try mod.enumValueFieldIndex(enum_ty, field_index),
);
}
}
return sema.failWithBadMemberAccess(block, union_ty, field_name_src, field_name);
},
.Enum => {
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
return inst;
}
}
const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
const field_index = @intCast(u32, field_index_usize);
const enum_val = try mod.enumValueFieldIndex(child_type, field_index);
return sema.addConstant(child_type, enum_val);
},
.Struct, .Opaque => {
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
return inst;
}
}
return sema.failWithBadMemberAccess(block, child_type, src, field_name);
},
else => {
const msg = msg: {
const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(mod)});
errdefer msg.destroy(sema.gpa);
if (child_type.isSlice(mod)) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{});
if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
},
}
},
.Struct => if (is_pointer_to) {
// Avoid loading the entire struct by fetching a pointer and loading that
const field_ptr = try sema.structFieldPtr(block, src, object, field_name, field_name_src, inner_ty, false);
return sema.analyzeLoad(block, src, field_ptr, object_src);
} else {
return sema.structFieldVal(block, src, object, field_name, field_name_src, inner_ty);
},
.Union => if (is_pointer_to) {
// Avoid loading the entire union by fetching a pointer and loading that
const field_ptr = try sema.unionFieldPtr(block, src, object, field_name, field_name_src, inner_ty, false);
return sema.analyzeLoad(block, src, field_ptr, object_src);
} else {
return sema.unionFieldVal(block, src, object, field_name, field_name_src, inner_ty);
},
else => {},
}
return sema.failWithInvalidFieldAccess(block, src, object_ty, field_name);
}
fn fieldPtr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
object_ptr: Air.Inst.Ref,
field_name: []const u8,
field_name_src: LazySrcLoc,
initializing: bool,
) CompileError!Air.Inst.Ref {
// When editing this function, note that there is corresponding logic to be edited
// in `fieldVal`. This function takes a pointer and returns a pointer.
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const object_ptr_src = src; // TODO better source location
const object_ptr_ty = sema.typeOf(object_ptr);
const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) {
.Pointer => object_ptr_ty.childType(mod),
else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(mod)}),
};
// Zig allows dereferencing a single pointer during field lookup. Note that
// we don't actually need to generate the dereference some field lookups, like the
// length of arrays and other comptime operations.
const is_pointer_to = object_ty.isSinglePointer(mod);
const inner_ty = if (is_pointer_to)
object_ty.childType(mod)
else
object_ty;
switch (inner_ty.zigTypeTag(mod)) {
.Array => {
if (mem.eql(u8, field_name, "len")) {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
Type.usize,
try mod.intValue(Type.usize, inner_ty.arrayLen(mod)),
0, // default alignment
));
} else {
return sema.fail(
block,
field_name_src,
"no member named '{s}' in '{}'",
.{ field_name, object_ty.fmt(mod) },
);
}
},
.Pointer => if (inner_ty.isSlice(mod)) {
const inner_ptr = if (is_pointer_to)
try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
else
object_ptr;
const attr_ptr_ty = if (is_pointer_to) object_ty else object_ptr_ty;
if (mem.eql(u8, field_name, "ptr")) {
const slice_ptr_ty = inner_ty.slicePtrFieldType(mod);
const result_ty = try Type.ptr(sema.arena, mod, .{
.pointee_type = slice_ptr_ty,
.mutable = attr_ptr_ty.ptrIsMutable(mod),
.@"volatile" = attr_ptr_ty.isVolatilePtr(mod),
.@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod),
});
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
return sema.addConstant(
result_ty,
try Value.Tag.field_ptr.create(sema.arena, .{
.container_ptr = val,
.container_ty = inner_ty,
.field_index = Value.Payload.Slice.ptr_index,
}),
);
}
try sema.requireRuntimeBlock(block, src, null);
return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr);
} else if (mem.eql(u8, field_name, "len")) {
const result_ty = try Type.ptr(sema.arena, mod, .{
.pointee_type = Type.usize,
.mutable = attr_ptr_ty.ptrIsMutable(mod),
.@"volatile" = attr_ptr_ty.isVolatilePtr(mod),
.@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod),
});
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
return sema.addConstant(
result_ty,
try Value.Tag.field_ptr.create(sema.arena, .{
.container_ptr = val,
.container_ty = inner_ty,
.field_index = Value.Payload.Slice.len_index,
}),
);
}
try sema.requireRuntimeBlock(block, src, null);
return block.addTyOp(.ptr_slice_len_ptr, result_ty, inner_ptr);
} else {
return sema.fail(
block,
field_name_src,
"no member named '{s}' in '{}'",
.{ field_name, object_ty.fmt(mod) },
);
}
},
.Type => {
_ = try sema.resolveConstValue(block, .unneeded, object_ptr, "");
const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
const inner = if (is_pointer_to)
try sema.analyzeLoad(block, src, result, object_ptr_src)
else
result;
const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?;
const child_type = val.toType();
switch (child_type.zigTypeTag(mod)) {
.ErrorSet => {
const name = try ip.getOrPutString(gpa, field_name);
switch (ip.indexToKey(child_type.ip_index)) {
.error_set_type => |error_set_type| blk: {
if (error_set_type.nameIndex(ip, name) != null) {
break :blk;
}
return sema.fail(block, src, "no error named '{s}' in '{}'", .{
field_name, child_type.fmt(mod),
});
},
.inferred_error_set_type => {
return sema.fail(block, src, "TODO handle inferred error sets here", .{});
},
.simple_type => |t| assert(t == .anyerror),
else => unreachable,
}
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
if (!child_type.isAnyError(mod))
child_type
else
try mod.singleErrorSetTypeNts(name),
try Value.Tag.@"error".create(anon_decl.arena(), .{
.name = ip.stringToSlice(name),
}),
0, // default alignment
));
},
.Union => {
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
return inst;
}
}
const union_ty = try sema.resolveTypeFields(child_type);
if (union_ty.unionTagType(mod)) |enum_ty| {
if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| {
const field_index_u32 = @intCast(u32, field_index);
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
enum_ty,
try mod.enumValueFieldIndex(enum_ty, field_index_u32),
0, // default alignment
));
}
}
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
},
.Enum => {
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
return inst;
}
}
const field_index = child_type.enumFieldIndex(field_name, mod) orelse {
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
};
const field_index_u32 = @intCast(u32, field_index);
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
child_type,
try mod.enumValueFieldIndex(child_type, field_index_u32),
0, // default alignment
));
},
.Struct, .Opaque => {
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
return inst;
}
}
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
},
else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(mod)}),
}
},
.Struct => {
const inner_ptr = if (is_pointer_to)
try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
else
object_ptr;
return sema.structFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty, initializing);
},
.Union => {
const inner_ptr = if (is_pointer_to)
try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
else
object_ptr;
return sema.unionFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty, initializing);
},
else => {},
}
return sema.failWithInvalidFieldAccess(block, src, object_ty, field_name);
}
const ResolvedFieldCallee = union(enum) {
/// The LHS of the call was an actual field with this value.
direct: Air.Inst.Ref,
/// This is a method call, with the function and first argument given.
method: struct {
func_inst: Air.Inst.Ref,
arg0_inst: Air.Inst.Ref,
},
};
fn fieldCallBind(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
raw_ptr: Air.Inst.Ref,
field_name: []const u8,
field_name_src: LazySrcLoc,
) CompileError!ResolvedFieldCallee {
// When editing this function, note that there is corresponding logic to be edited
// in `fieldVal`. This function takes a pointer and returns a pointer.
const mod = sema.mod;
const raw_ptr_src = src; // TODO better source location
const raw_ptr_ty = sema.typeOf(raw_ptr);
const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C))
raw_ptr_ty.childType(mod)
else
return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(mod)});
// Optionally dereference a second pointer to get the concrete type.
const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One;
const concrete_ty = if (is_double_ptr) inner_ty.childType(mod) else inner_ty;
const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty;
const object_ptr = if (is_double_ptr)
try sema.analyzeLoad(block, src, raw_ptr, src)
else
raw_ptr;
find_field: {
switch (concrete_ty.zigTypeTag(mod)) {
.Struct => {
const struct_ty = try sema.resolveTypeFields(concrete_ty);
if (mod.typeToStruct(struct_ty)) |struct_obj| {
const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
break :find_field;
const field_index = @intCast(u32, field_index_usize);
const field = struct_obj.fields.values()[field_index];
return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr);
} else if (struct_ty.isTuple(mod)) {
if (mem.eql(u8, field_name, "len")) {
return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)) };
}
if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| {
if (field_index >= struct_ty.structFieldCount(mod)) break :find_field;
return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index, mod), field_index, object_ptr);
} else |_| {}
} else {
const max = struct_ty.structFieldCount(mod);
var i: u32 = 0;
while (i < max) : (i += 1) {
if (mem.eql(u8, struct_ty.structFieldName(i, mod), field_name)) {
return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i, mod), i, object_ptr);
}
}
}
},
.Union => {
const union_ty = try sema.resolveTypeFields(concrete_ty);
const fields = union_ty.unionFields(mod);
const field_index_usize = fields.getIndex(field_name) orelse break :find_field;
const field_index = @intCast(u32, field_index_usize);
const field = fields.values()[field_index];
return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr);
},
.Type => {
const namespace = try sema.analyzeLoad(block, src, object_ptr, src);
return .{ .direct = try sema.fieldVal(block, src, namespace, field_name, field_name_src) };
},
else => {},
}
}
// If we get here, we need to look for a decl in the struct type instead.
const found_decl = switch (concrete_ty.zigTypeTag(mod)) {
.Struct, .Opaque, .Union, .Enum => found_decl: {
if (concrete_ty.getNamespaceIndex(mod).unwrap()) |namespace| {
if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| {
try sema.addReferencedBy(block, src, decl_idx);
const decl_val = try sema.analyzeDeclVal(block, src, decl_idx);
const decl_type = sema.typeOf(decl_val);
if (mod.typeToFunc(decl_type)) |func_type| f: {
if (func_type.param_types.len == 0) break :f;
const first_param_type = func_type.param_types[0].toType();
// zig fmt: off
if (first_param_type.isGenericPoison() or (
first_param_type.zigTypeTag(mod) == .Pointer and
(first_param_type.ptrSize(mod) == .One or
first_param_type.ptrSize(mod) == .C) and
first_param_type.childType(mod).eql(concrete_ty, mod)))
{
// zig fmt: on
// Note that if the param type is generic poison, we know that it must
// specifically be `anytype` since it's the first parameter, meaning we
// can safely assume it can be a pointer.
// TODO: bound fn calls on rvalues should probably
// generate a by-value argument somehow.
return .{ .method = .{
.func_inst = decl_val,
.arg0_inst = object_ptr,
} };
} else if (first_param_type.eql(concrete_ty, mod)) {
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
return .{ .method = .{
.func_inst = decl_val,
.arg0_inst = deref,
} };
} else if (first_param_type.zigTypeTag(mod) == .Optional) {
const child = first_param_type.optionalChild(mod);
if (child.eql(concrete_ty, mod)) {
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
return .{ .method = .{
.func_inst = decl_val,
.arg0_inst = deref,
} };
} else if (child.zigTypeTag(mod) == .Pointer and
child.ptrSize(mod) == .One and
child.childType(mod).eql(concrete_ty, mod))
{
return .{ .method = .{
.func_inst = decl_val,
.arg0_inst = object_ptr,
} };
}
} else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and
first_param_type.errorUnionPayload(mod).eql(concrete_ty, mod))
{
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
return .{ .method = .{
.func_inst = decl_val,
.arg0_inst = deref,
} };
}
}
break :found_decl decl_idx;
}
}
break :found_decl null;
},
else => null,
};
const msg = msg: {
const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ field_name, concrete_ty.fmt(mod) });
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, concrete_ty);
if (found_decl) |decl_idx| {
const decl = mod.declPtr(decl_idx);
try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{s}' is not a member function", .{field_name});
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn finishFieldCallBind(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr_ty: Type,
field_ty: Type,
field_index: u32,
object_ptr: Air.Inst.Ref,
) CompileError!ResolvedFieldCallee {
const mod = sema.mod;
const arena = sema.arena;
const ptr_field_ty = try Type.ptr(arena, mod, .{
.pointee_type = field_ty,
.mutable = ptr_ty.ptrIsMutable(mod),
.@"addrspace" = ptr_ty.ptrAddressSpace(mod),
});
const container_ty = ptr_ty.childType(mod);
if (container_ty.zigTypeTag(mod) == .Struct) {
if (try container_ty.structFieldValueComptime(mod, field_index)) |default_val| {
return .{ .direct = try sema.addConstant(field_ty, default_val) };
}
}
if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| {
const pointer = try sema.addConstant(
ptr_field_ty,
try Value.Tag.field_ptr.create(arena, .{
.container_ptr = struct_ptr_val,
.container_ty = container_ty,
.field_index = field_index,
}),
);
return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) };
}
try sema.requireRuntimeBlock(block, src, null);
const ptr_inst = try block.addStructFieldPtr(object_ptr, field_index, ptr_field_ty);
return .{ .direct = try sema.analyzeLoad(block, src, ptr_inst, src) };
}
fn namespaceLookup(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
namespace: Namespace.Index,
decl_name: []const u8,
) CompileError!?Decl.Index {
const mod = sema.mod;
const gpa = sema.gpa;
if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| {
const decl = mod.declPtr(decl_index);
if (!decl.is_pub and decl.getFileScope(mod) != block.getFileScope(mod)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "'{s}' is not marked 'pub'", .{
decl_name,
});
errdefer msg.destroy(gpa);
try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
return decl_index;
}
return null;
}
fn namespaceLookupRef(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
namespace: Namespace.Index,
decl_name: []const u8,
) CompileError!?Air.Inst.Ref {
const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null;
try sema.addReferencedBy(block, src, decl);
return try sema.analyzeDeclRef(decl);
}
fn namespaceLookupVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
namespace: Namespace.Index,
decl_name: []const u8,
) CompileError!?Air.Inst.Ref {
const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null;
return try sema.analyzeDeclVal(block, src, decl);
}
fn structFieldPtr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
struct_ptr: Air.Inst.Ref,
field_name: []const u8,
field_name_src: LazySrcLoc,
unresolved_struct_ty: Type,
initializing: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct);
const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty);
try sema.resolveStructLayout(struct_ty);
if (struct_ty.isTuple(mod)) {
if (mem.eql(u8, field_name, "len")) {
const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod));
return sema.analyzeRef(block, src, len_inst);
}
const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src);
return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
} else if (struct_ty.isAnonStruct(mod)) {
const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src);
return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
}
const struct_obj = mod.typeToStruct(struct_ty).?;
const field_index_big = struct_obj.fields.getIndex(field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
const field_index = @intCast(u32, field_index_big);
return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, field_name_src, struct_ty, initializing);
}
fn structFieldPtrByIndex(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
struct_ptr: Air.Inst.Ref,
field_index: u32,
field_src: LazySrcLoc,
struct_ty: Type,
initializing: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
if (struct_ty.isAnonStruct(mod)) {
return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing);
}
const struct_obj = mod.typeToStruct(struct_ty).?;
const field = struct_obj.fields.values()[field_index];
const struct_ptr_ty = sema.typeOf(struct_ptr);
const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
var ptr_ty_data: Type.Payload.Pointer.Data = .{
.pointee_type = field.ty,
.mutable = struct_ptr_ty_info.mutable,
.@"volatile" = struct_ptr_ty_info.@"volatile",
.@"addrspace" = struct_ptr_ty_info.@"addrspace",
};
const target = mod.getTarget();
if (struct_obj.layout == .Packed) {
comptime assert(Type.packed_struct_layout_version == 2);
var running_bits: u16 = 0;
for (struct_obj.fields.values(), 0..) |f, i| {
if (!(try sema.typeHasRuntimeBits(f.ty))) continue;
if (i == field_index) {
ptr_ty_data.bit_offset = running_bits;
}
running_bits += @intCast(u16, f.ty.bitSize(mod));
}
ptr_ty_data.host_size = (running_bits + 7) / 8;
// If this is a packed struct embedded in another one, we need to offset
// the bits against each other.
if (struct_ptr_ty_info.host_size != 0) {
ptr_ty_data.host_size = struct_ptr_ty_info.host_size;
ptr_ty_data.bit_offset += struct_ptr_ty_info.bit_offset;
}
const parent_align = if (struct_ptr_ty_info.@"align" != 0)
struct_ptr_ty_info.@"align"
else
struct_ptr_ty_info.pointee_type.abiAlignment(mod);
ptr_ty_data.@"align" = parent_align;
// If the field happens to be byte-aligned, simplify the pointer type.
// The pointee type bit size must match its ABI byte size so that loads and stores
// do not interfere with the surrounding packed bits.
// We do not attempt this with big-endian targets yet because of nested
// structs and floats. I need to double-check the desired behavior for big endian
// targets before adding the necessary complications to this code. This will not
// cause miscompilations; it only means the field pointer uses bit masking when it
// might not be strictly necessary.
if (parent_align != 0 and ptr_ty_data.bit_offset % 8 == 0 and
target.cpu.arch.endian() == .Little)
{
const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(mod);
const elem_size_bits = ptr_ty_data.pointee_type.bitSize(mod);
if (elem_size_bytes * 8 == elem_size_bits) {
const byte_offset = ptr_ty_data.bit_offset / 8;
const new_align = @as(u32, 1) << @intCast(u5, @ctz(byte_offset | parent_align));
ptr_ty_data.bit_offset = 0;
ptr_ty_data.host_size = 0;
ptr_ty_data.@"align" = new_align;
}
}
} else {
ptr_ty_data.@"align" = field.abi_align;
}
const ptr_field_ty = try Type.ptr(sema.arena, mod, ptr_ty_data);
if (field.is_comptime) {
const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{
.field_ty = field.ty,
.field_val = try field.default_val.copy(sema.arena),
});
return sema.addConstant(ptr_field_ty, val);
}
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
return sema.addConstant(
ptr_field_ty,
try Value.Tag.field_ptr.create(sema.arena, .{
.container_ptr = struct_ptr_val,
.container_ty = struct_ptr_ty.childType(mod),
.field_index = field_index,
}),
);
}
try sema.requireRuntimeBlock(block, src, null);
return block.addStructFieldPtr(struct_ptr, field_index, ptr_field_ty);
}
fn structFieldVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
struct_byval: Air.Inst.Ref,
field_name: []const u8,
field_name_src: LazySrcLoc,
unresolved_struct_ty: Type,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct);
const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty);
switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
const field_index = @intCast(u32, field_index_usize);
const field = struct_obj.fields.values()[field_index];
if (field.is_comptime) {
return sema.addConstant(field.ty, field.default_val);
}
if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| {
if (struct_val.isUndef(mod)) return sema.addConstUndef(field.ty);
if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| {
return sema.addConstant(field.ty, opv);
}
return sema.addConstant(field.ty, try struct_val.fieldValue(field.ty, mod, field_index));
}
try sema.requireRuntimeBlock(block, src, null);
return block.addStructFieldVal(struct_byval, field_index, field.ty);
},
.anon_struct_type => |anon_struct| {
if (anon_struct.names.len == 0) {
return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
} else {
const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src);
return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty);
}
},
else => unreachable,
}
}
fn tupleFieldVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
tuple_byval: Air.Inst.Ref,
field_name: []const u8,
field_name_src: LazySrcLoc,
tuple_ty: Type,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
if (mem.eql(u8, field_name, "len")) {
return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount(mod));
}
const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src);
return sema.tupleFieldValByIndex(block, src, tuple_byval, field_index, tuple_ty);
}
/// Asserts that `field_name` is not "len".
fn tupleFieldIndex(
sema: *Sema,
block: *Block,
tuple_ty: Type,
field_name: []const u8,
field_name_src: LazySrcLoc,
) CompileError!u32 {
const mod = sema.mod;
assert(!std.mem.eql(u8, field_name, "len"));
if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| {
if (field_index < tuple_ty.structFieldCount(mod)) return field_index;
return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{
field_name, tuple_ty.fmt(mod),
});
} else |_| {}
return sema.fail(block, field_name_src, "no field named '{s}' in tuple '{}'", .{
field_name, tuple_ty.fmt(mod),
});
}
fn tupleFieldValByIndex(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
tuple_byval: Air.Inst.Ref,
field_index: u32,
tuple_ty: Type,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const field_ty = tuple_ty.structFieldType(field_index, mod);
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
return sema.addConstant(field_ty, default_value);
}
if (try sema.resolveMaybeUndefVal(tuple_byval)) |tuple_val| {
if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty);
if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| {
return sema.addConstant(field_ty, opv);
}
const field_values = tuple_val.castTag(.aggregate).?.data;
return sema.addConstant(field_ty, field_values[field_index]);
}
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| {
return sema.addConstant(field_ty, default_val);
}
try sema.requireRuntimeBlock(block, src, null);
return block.addStructFieldVal(tuple_byval, field_index, field_ty);
}
fn unionFieldPtr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
union_ptr: Air.Inst.Ref,
field_name: []const u8,
field_name_src: LazySrcLoc,
unresolved_union_ty: Type,
initializing: bool,
) CompileError!Air.Inst.Ref {
const arena = sema.arena;
const mod = sema.mod;
assert(unresolved_union_ty.zigTypeTag(mod) == .Union);
const union_ptr_ty = sema.typeOf(union_ptr);
const union_ty = try sema.resolveTypeFields(unresolved_union_ty);
const union_obj = mod.typeToUnion(union_ty).?;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
const field = union_obj.fields.values()[field_index];
const ptr_field_ty = try Type.ptr(arena, mod, .{
.pointee_type = field.ty,
.mutable = union_ptr_ty.ptrIsMutable(mod),
.@"volatile" = union_ptr_ty.isVolatilePtr(mod),
.@"addrspace" = union_ptr_ty.ptrAddressSpace(mod),
});
const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?);
if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{});
errdefer msg.destroy(sema.gpa);
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name});
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| ct: {
switch (union_obj.layout) {
.Auto => if (!initializing) {
const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse
break :ct;
if (union_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, src);
}
const tag_and_val = union_val.castTag(.@"union").?.data;
const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index);
const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod);
if (!tag_matches) {
const msg = msg: {
const active_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, mod).?;
const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod);
const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name });
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
},
.Packed, .Extern => {},
}
return sema.addConstant(
ptr_field_ty,
try Value.Tag.field_ptr.create(arena, .{
.container_ptr = union_ptr_val,
.container_ty = union_ty,
.field_index = field_index,
}),
);
}
try sema.requireRuntimeBlock(block, src, null);
if (!initializing and union_obj.layout == .Auto and block.wantSafety() and
union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1)
{
const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index);
const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val);
// TODO would it be better if get_union_tag supported pointers to unions?
const union_val = try block.addTyOp(.load, union_ty, union_ptr);
const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_val);
try sema.panicInactiveUnionField(block, active_tag, wanted_tag);
}
if (field.ty.zigTypeTag(mod) == .NoReturn) {
_ = try block.addNoOp(.unreach);
return Air.Inst.Ref.unreachable_value;
}
return block.addStructFieldPtr(union_ptr, field_index, ptr_field_ty);
}
fn unionFieldVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
union_byval: Air.Inst.Ref,
field_name: []const u8,
field_name_src: LazySrcLoc,
unresolved_union_ty: Type,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
assert(unresolved_union_ty.zigTypeTag(mod) == .Union);
const union_ty = try sema.resolveTypeFields(unresolved_union_ty);
const union_obj = mod.typeToUnion(union_ty).?;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
const field = union_obj.fields.values()[field_index];
const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?);
if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| {
if (union_val.isUndef(mod)) return sema.addConstUndef(field.ty);
const tag_and_val = union_val.castTag(.@"union").?.data;
const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index);
const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod);
switch (union_obj.layout) {
.Auto => {
if (tag_matches) {
return sema.addConstant(field.ty, tag_and_val.val);
} else {
const msg = msg: {
const active_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, mod).?;
const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod);
const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name });
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
},
.Packed, .Extern => {
if (tag_matches) {
return sema.addConstant(field.ty, tag_and_val.val);
} else {
const old_ty = union_ty.unionFieldType(tag_and_val.tag, mod);
if (try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0)) |new_val| {
return sema.addConstant(field.ty, new_val);
}
}
},
}
}
try sema.requireRuntimeBlock(block, src, null);
if (union_obj.layout == .Auto and block.wantSafety() and
union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1)
{
const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index);
const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val);
const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval);
try sema.panicInactiveUnionField(block, active_tag, wanted_tag);
}
if (field.ty.zigTypeTag(mod) == .NoReturn) {
_ = try block.addNoOp(.unreach);
return Air.Inst.Ref.unreachable_value;
}
return block.addStructFieldVal(union_byval, field_index, field.ty);
}
fn elemPtr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
indexable_ptr: Air.Inst.Ref,
elem_index: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
init: bool,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const indexable_ptr_src = src; // TODO better source location
const indexable_ptr_ty = sema.typeOf(indexable_ptr);
const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) {
.Pointer => indexable_ptr_ty.childType(mod),
else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(mod)}),
};
try checkIndexable(sema, block, src, indexable_ty);
switch (indexable_ty.zigTypeTag(mod)) {
.Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety),
.Struct => {
// Tuple field access.
const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
const index = @intCast(u32, index_val.toUnsignedInt(mod));
return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init);
},
else => {
const indexable = try sema.analyzeLoad(block, indexable_ptr_src, indexable_ptr, indexable_ptr_src);
return elemPtrOneLayerOnly(sema, block, src, indexable, elem_index, elem_index_src, init, oob_safety);
},
}
}
/// Asserts that the type of indexable is pointer.
fn elemPtrOneLayerOnly(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
indexable: Air.Inst.Ref,
elem_index: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
init: bool,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
const indexable_src = src; // TODO better source location
const indexable_ty = sema.typeOf(indexable);
const mod = sema.mod;
try checkIndexable(sema, block, src, indexable_ty);
switch (indexable_ty.ptrSize(mod)) {
.Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
.Many, .C => {
const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
const runtime_src = rs: {
const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
const index_val = maybe_index_val orelse break :rs elem_index_src;
const index = @intCast(usize, index_val.toUnsignedInt(mod));
const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, mod);
const result_ty = try sema.elemPtrType(indexable_ty, index);
return sema.addConstant(result_ty, elem_ptr);
};
const result_ty = try sema.elemPtrType(indexable_ty, null);
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addPtrElemPtr(indexable, elem_index, result_ty);
},
.One => {
assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable
return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety);
},
}
}
fn elemVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
indexable: Air.Inst.Ref,
elem_index_uncasted: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
const indexable_src = src; // TODO better source location
const indexable_ty = sema.typeOf(indexable);
const mod = sema.mod;
try checkIndexable(sema, block, src, indexable_ty);
// TODO in case of a vector of pointers, we need to detect whether the element
// index is a scalar or vector instead of unconditionally casting to usize.
const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src);
switch (indexable_ty.zigTypeTag(mod)) {
.Pointer => switch (indexable_ty.ptrSize(mod)) {
.Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
.Many, .C => {
const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
const runtime_src = rs: {
const indexable_val = maybe_indexable_val orelse break :rs indexable_src;
const index_val = maybe_index_val orelse break :rs elem_index_src;
const index = @intCast(usize, index_val.toUnsignedInt(mod));
const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, mod);
if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| {
return sema.addConstant(indexable_ty.elemType2(mod), elem_val);
}
break :rs indexable_src;
};
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addBinOp(.ptr_elem_val, indexable, elem_index);
},
.One => {
assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable
const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety);
return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src);
},
},
.Array => return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
.Vector => {
// TODO: If the index is a vector, the result should be a vector.
return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety);
},
.Struct => {
// Tuple field access.
const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
const index = @intCast(u32, index_val.toUnsignedInt(mod));
return sema.tupleField(block, indexable_src, indexable, elem_index_src, index);
},
else => unreachable,
}
}
fn validateRuntimeElemAccess(
sema: *Sema,
block: *Block,
elem_index_src: LazySrcLoc,
elem_ty: Type,
parent_ty: Type,
parent_src: LazySrcLoc,
) CompileError!void {
const mod = sema.mod;
const valid_rt = try sema.validateRunTimeType(elem_ty, false);
if (!valid_rt) {
const msg = msg: {
const msg = try sema.errMsg(
block,
elem_index_src,
"values of type '{}' must be comptime-known, but index value is runtime-known",
.{parent_ty.fmt(mod)},
);
errdefer msg.destroy(sema.gpa);
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsComptime(msg, parent_src.toSrcLoc(src_decl, mod), parent_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
fn tupleFieldPtr(
sema: *Sema,
block: *Block,
tuple_ptr_src: LazySrcLoc,
tuple_ptr: Air.Inst.Ref,
field_index_src: LazySrcLoc,
field_index: u32,
init: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const tuple_ptr_ty = sema.typeOf(tuple_ptr);
const tuple_ty = tuple_ptr_ty.childType(mod);
_ = try sema.resolveTypeFields(tuple_ty);
const field_count = tuple_ty.structFieldCount(mod);
if (field_count == 0) {
return sema.fail(block, tuple_ptr_src, "indexing into empty tuple is not allowed", .{});
}
if (field_index >= field_count) {
return sema.fail(block, field_index_src, "index {d} outside tuple of length {d}", .{
field_index, field_count,
});
}
const field_ty = tuple_ty.structFieldType(field_index, mod);
const ptr_field_ty = try Type.ptr(sema.arena, mod, .{
.pointee_type = field_ty,
.mutable = tuple_ptr_ty.ptrIsMutable(mod),
.@"volatile" = tuple_ptr_ty.isVolatilePtr(mod),
.@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod),
});
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| {
const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{
.field_ty = field_ty,
.field_val = default_val,
});
return sema.addConstant(ptr_field_ty, val);
}
if (try sema.resolveMaybeUndefVal(tuple_ptr)) |tuple_ptr_val| {
return sema.addConstant(
ptr_field_ty,
try Value.Tag.field_ptr.create(sema.arena, .{
.container_ptr = tuple_ptr_val,
.container_ty = tuple_ty,
.field_index = field_index,
}),
);
}
if (!init) {
try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_ptr_src);
}
try sema.requireRuntimeBlock(block, tuple_ptr_src, null);
return block.addStructFieldPtr(tuple_ptr, field_index, ptr_field_ty);
}
fn tupleField(
sema: *Sema,
block: *Block,
tuple_src: LazySrcLoc,
tuple: Air.Inst.Ref,
field_index_src: LazySrcLoc,
field_index: u32,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const tuple_ty = try sema.resolveTypeFields(sema.typeOf(tuple));
const field_count = tuple_ty.structFieldCount(mod);
if (field_count == 0) {
return sema.fail(block, tuple_src, "indexing into empty tuple is not allowed", .{});
}
if (field_index >= field_count) {
return sema.fail(block, field_index_src, "index {d} outside tuple of length {d}", .{
field_index, field_count,
});
}
const field_ty = tuple_ty.structFieldType(field_index, mod);
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
return sema.addConstant(field_ty, default_value); // comptime field
}
if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| {
if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty);
return sema.addConstant(field_ty, try tuple_val.fieldValue(tuple_ty, mod, field_index));
}
try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src);
try sema.requireRuntimeBlock(block, tuple_src, null);
return block.addStructFieldVal(tuple, field_index, field_ty);
}
fn elemValArray(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
array_src: LazySrcLoc,
array: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
elem_index: Air.Inst.Ref,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const array_ty = sema.typeOf(array);
const array_sent = array_ty.sentinel(mod);
const array_len = array_ty.arrayLen(mod);
const array_len_s = array_len + @boolToInt(array_sent != null);
const elem_ty = array_ty.childType(mod);
if (array_len_s == 0) {
return sema.fail(block, array_src, "indexing into empty array is not allowed", .{});
}
const maybe_undef_array_val = try sema.resolveMaybeUndefVal(array);
// index must be defined since it can access out of bounds
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
if (maybe_index_val) |index_val| {
const index = @intCast(usize, index_val.toUnsignedInt(mod));
if (array_sent) |s| {
if (index == array_len) {
return sema.addConstant(elem_ty, s);
}
}
if (index >= array_len_s) {
const sentinel_label: []const u8 = if (array_sent != null) " +1 (sentinel)" else "";
return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label });
}
}
if (maybe_undef_array_val) |array_val| {
if (array_val.isUndef(mod)) {
return sema.addConstUndef(elem_ty);
}
if (maybe_index_val) |index_val| {
const index = @intCast(usize, index_val.toUnsignedInt(mod));
const elem_val = try array_val.elemValue(mod, index);
return sema.addConstant(elem_ty, elem_val);
}
}
try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ty, array_ty, array_src);
const runtime_src = if (maybe_undef_array_val != null) elem_index_src else array_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
if (oob_safety and block.wantSafety()) {
// Runtime check is only needed if unable to comptime check
if (maybe_index_val == null) {
const len_inst = try sema.addIntUnsigned(Type.usize, array_len);
const cmp_op: Air.Inst.Tag = if (array_sent != null) .cmp_lte else .cmp_lt;
try sema.panicIndexOutOfBounds(block, elem_index, len_inst, cmp_op);
}
}
return block.addBinOp(.array_elem_val, array, elem_index);
}
fn elemPtrArray(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
array_ptr_src: LazySrcLoc,
array_ptr: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
elem_index: Air.Inst.Ref,
init: bool,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const array_ptr_ty = sema.typeOf(array_ptr);
const array_ty = array_ptr_ty.childType(mod);
const array_sent = array_ty.sentinel(mod) != null;
const array_len = array_ty.arrayLen(mod);
const array_len_s = array_len + @boolToInt(array_sent);
if (array_len_s == 0) {
return sema.fail(block, array_ptr_src, "indexing into empty array is not allowed", .{});
}
const maybe_undef_array_ptr_val = try sema.resolveMaybeUndefVal(array_ptr);
// The index must not be undefined since it can be out of bounds.
const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: {
const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod));
if (index >= array_len_s) {
const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else "";
return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label });
}
break :o index;
} else null;
const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset);
if (maybe_undef_array_ptr_val) |array_ptr_val| {
if (array_ptr_val.isUndef(mod)) {
return sema.addConstUndef(elem_ptr_ty);
}
if (offset) |index| {
const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, mod);
return sema.addConstant(elem_ptr_ty, elem_ptr);
}
}
if (!init) {
try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(mod), array_ty, array_ptr_src);
}
const runtime_src = if (maybe_undef_array_ptr_val != null) elem_index_src else array_ptr_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
// Runtime check is only needed if unable to comptime check.
if (oob_safety and block.wantSafety() and offset == null) {
const len_inst = try sema.addIntUnsigned(Type.usize, array_len);
const cmp_op: Air.Inst.Tag = if (array_sent) .cmp_lte else .cmp_lt;
try sema.panicIndexOutOfBounds(block, elem_index, len_inst, cmp_op);
}
return block.addPtrElemPtr(array_ptr, elem_index, elem_ptr_ty);
}
fn elemValSlice(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
slice_src: LazySrcLoc,
slice: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
elem_index: Air.Inst.Ref,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const slice_ty = sema.typeOf(slice);
const slice_sent = slice_ty.sentinel(mod) != null;
const elem_ty = slice_ty.elemType2(mod);
var runtime_src = slice_src;
// slice must be defined since it can dereferenced as null
const maybe_slice_val = try sema.resolveDefinedValue(block, slice_src, slice);
// index must be defined since it can index out of bounds
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
if (maybe_slice_val) |slice_val| {
runtime_src = elem_index_src;
const slice_len = slice_val.sliceLen(mod);
const slice_len_s = slice_len + @boolToInt(slice_sent);
if (slice_len_s == 0) {
return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
}
if (maybe_index_val) |index_val| {
const index = @intCast(usize, index_val.toUnsignedInt(mod));
if (index >= slice_len_s) {
const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
}
const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod);
if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| {
return sema.addConstant(elem_ty, elem_val);
}
runtime_src = slice_src;
}
}
try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ty, slice_ty, slice_src);
try sema.requireRuntimeBlock(block, src, runtime_src);
if (oob_safety and block.wantSafety()) {
const len_inst = if (maybe_slice_val) |slice_val|
try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod))
else
try block.addTyOp(.slice_len, Type.usize, slice);
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
try sema.panicIndexOutOfBounds(block, elem_index, len_inst, cmp_op);
}
try sema.queueFullTypeResolution(sema.typeOf(slice));
return block.addBinOp(.slice_elem_val, slice, elem_index);
}
fn elemPtrSlice(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
slice_src: LazySrcLoc,
slice: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
elem_index: Air.Inst.Ref,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const slice_ty = sema.typeOf(slice);
const slice_sent = slice_ty.sentinel(mod) != null;
const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(slice);
// The index must not be undefined since it can be out of bounds.
const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: {
const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod));
break :o index;
} else null;
const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset);
if (maybe_undef_slice_val) |slice_val| {
if (slice_val.isUndef(mod)) {
return sema.addConstUndef(elem_ptr_ty);
}
const slice_len = slice_val.sliceLen(mod);
const slice_len_s = slice_len + @boolToInt(slice_sent);
if (slice_len_s == 0) {
return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
}
if (offset) |index| {
if (index >= slice_len_s) {
const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
}
const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod);
return sema.addConstant(elem_ptr_ty, elem_ptr_val);
}
}
try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ptr_ty, slice_ty, slice_src);
const runtime_src = if (maybe_undef_slice_val != null) elem_index_src else slice_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
if (oob_safety and block.wantSafety()) {
const len_inst = len: {
if (maybe_undef_slice_val) |slice_val|
if (!slice_val.isUndef(mod))
break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod));
break :len try block.addTyOp(.slice_len, Type.usize, slice);
};
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
try sema.panicIndexOutOfBounds(block, elem_index, len_inst, cmp_op);
}
return block.addSliceElemPtr(slice, elem_index, elem_ptr_ty);
}
fn coerce(
sema: *Sema,
block: *Block,
dest_ty_unresolved: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
return sema.coerceExtra(block, dest_ty_unresolved, inst, inst_src, .{}) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
}
const CoersionError = CompileError || error{
/// When coerce is called recursively, this error should be returned instead of using `fail`
/// to ensure correct types in compile errors.
NotCoercible,
};
const CoerceOpts = struct {
/// Should coerceExtra emit error messages.
report_err: bool = true,
/// Ignored if `report_err == false`.
is_ret: bool = false,
/// Should coercion to comptime_int ermit an error message.
no_cast_to_comptime_int: bool = false,
param_src: struct {
func_inst: Air.Inst.Ref = .none,
param_i: u32 = undefined,
fn get(info: @This(), sema: *Sema) !?Module.SrcLoc {
if (info.func_inst == .none) return null;
const mod = sema.mod;
const fn_decl = (try sema.funcDeclSrc(info.func_inst)) orelse return null;
const param_src = Module.paramSrc(0, mod, fn_decl, info.param_i);
if (param_src == .node_offset_param) {
return Module.SrcLoc{
.file_scope = fn_decl.getFileScope(mod),
.parent_decl_node = fn_decl.src_node,
.lazy = LazySrcLoc.nodeOffset(param_src.node_offset_param),
};
}
return param_src.toSrcLoc(fn_decl, mod);
}
} = .{},
};
fn coerceExtra(
sema: *Sema,
block: *Block,
dest_ty_unresolved: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
opts: CoerceOpts,
) CoersionError!Air.Inst.Ref {
if (dest_ty_unresolved.isGenericPoison()) return inst;
const mod = sema.mod;
const dest_ty_src = inst_src; // TODO better source location
const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved);
const inst_ty = try sema.resolveTypeFields(sema.typeOf(inst));
const target = mod.getTarget();
// If the types are the same, we can return the operand.
if (dest_ty.eql(inst_ty, mod))
return inst;
const maybe_inst_val = try sema.resolveMaybeUndefVal(inst);
var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src);
if (in_memory_result == .ok) {
if (maybe_inst_val) |val| {
if (val.ip_index == .none or val.ip_index == .null_value) {
// Keep the comptime Value representation; take the new type.
return sema.addConstant(dest_ty, val);
} else {
const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.ip_index, dest_ty.ip_index);
return sema.addConstant(dest_ty, new_val.toValue());
}
}
try sema.requireRuntimeBlock(block, inst_src, null);
return block.addBitCast(dest_ty, inst);
}
const is_undef = inst_ty.zigTypeTag(mod) == .Undefined;
switch (dest_ty.zigTypeTag(mod)) {
.Optional => optional: {
// undefined sets the optional bit also to undefined.
if (is_undef) {
return sema.addConstUndef(dest_ty);
}
// null to ?T
if (inst_ty.zigTypeTag(mod) == .Null) {
return sema.addConstant(dest_ty, Value.null);
}
// cast from ?*T and ?[*]T to ?*anyopaque
// but don't do it if the source type is a double pointer
if (dest_ty.isPtrLikeOptional(mod) and
dest_ty.elemType2(mod).ip_index == .anyopaque_type and
inst_ty.isPtrAtRuntime(mod))
anyopaque_check: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional;
const elem_ty = inst_ty.elemType2(mod);
if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) {
in_memory_result = .{ .double_ptr_to_anyopaque = .{
.actual = inst_ty,
.wanted = dest_ty,
} };
break :optional;
}
// Let the logic below handle wrapping the optional now that
// it has been checked to correctly coerce.
if (!inst_ty.isPtrLikeOptional(mod)) break :anyopaque_check;
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
}
// T to ?T
const child_type = dest_ty.optionalChild(mod);
const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => {
if (in_memory_result == .no_match) {
// Try to give more useful notes
in_memory_result = try sema.coerceInMemoryAllowed(block, child_type, inst_ty, false, target, dest_ty_src, inst_src);
}
break :optional;
},
else => |e| return e,
};
return try sema.wrapOptional(block, dest_ty, intermediate, inst_src);
},
.Pointer => pointer: {
const dest_info = dest_ty.ptrInfo(mod);
// Function body to function pointer.
if (inst_ty.zigTypeTag(mod) == .Fn) {
const fn_val = try sema.resolveConstValue(block, .unneeded, inst, "");
const fn_decl = fn_val.pointerDecl().?;
const inst_as_ptr = try sema.analyzeDeclRef(fn_decl);
return sema.coerce(block, dest_ty, inst_as_ptr, inst_src);
}
// *T to *[1]T
single_item: {
if (dest_info.size != .One) break :single_item;
if (!inst_ty.isSinglePointer(mod)) break :single_item;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const ptr_elem_ty = inst_ty.childType(mod);
const array_ty = dest_info.pointee_type;
if (array_ty.zigTypeTag(mod) != .Array) break :single_item;
const array_elem_ty = array_ty.childType(mod);
if (array_ty.arrayLen(mod) != 1) break :single_item;
const dest_is_mut = dest_info.mutable;
switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) {
.ok => {},
else => break :single_item,
}
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
}
// Coercions where the source is a single pointer to an array.
src_array_ptr: {
if (!inst_ty.isSinglePointer(mod)) break :src_array_ptr;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const array_ty = inst_ty.childType(mod);
if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr;
const array_elem_type = array_ty.childType(mod);
const dest_is_mut = dest_info.mutable;
const dst_elem_type = dest_info.pointee_type;
const elem_res = try sema.coerceInMemoryAllowed(block, dst_elem_type, array_elem_type, dest_is_mut, target, dest_ty_src, inst_src);
switch (elem_res) {
.ok => {},
else => {
in_memory_result = .{ .ptr_child = .{
.child = try elem_res.dupe(sema.arena),
.actual = array_elem_type,
.wanted = dst_elem_type,
} };
break :src_array_ptr;
},
}
if (dest_info.sentinel) |dest_sent| {
if (array_ty.sentinel(mod)) |inst_sent| {
if (!dest_sent.eql(inst_sent, dst_elem_type, sema.mod)) {
in_memory_result = .{ .ptr_sentinel = .{
.actual = inst_sent,
.wanted = dest_sent,
.ty = dst_elem_type,
} };
break :src_array_ptr;
}
} else {
in_memory_result = .{ .ptr_sentinel = .{
.actual = Value.@"unreachable",
.wanted = dest_sent,
.ty = dst_elem_type,
} };
break :src_array_ptr;
}
}
switch (dest_info.size) {
.Slice => {
// *[N]T to []T
return sema.coerceArrayPtrToSlice(block, dest_ty, inst, inst_src);
},
.C => {
// *[N]T to [*c]T
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
},
.Many => {
// *[N]T to [*]T
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
},
.One => {},
}
}
// coercion from C pointer
if (inst_ty.isCPtr(mod)) src_c_ptr: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr;
// In this case we must add a safety check because the C pointer
// could be null.
const src_elem_ty = inst_ty.childType(mod);
const dest_is_mut = dest_info.mutable;
const dst_elem_type = dest_info.pointee_type;
switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) {
.ok => {},
else => break :src_c_ptr,
}
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
}
// cast from *T and [*]T to *anyopaque
// but don't do it if the source type is a double pointer
if (dest_info.pointee_type.ip_index == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const elem_ty = inst_ty.elemType2(mod);
if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) {
in_memory_result = .{ .double_ptr_to_anyopaque = .{
.actual = inst_ty,
.wanted = dest_ty,
} };
break :pointer;
}
if (dest_ty.isSlice(mod)) break :to_anyopaque;
if (inst_ty.isSlice(mod)) {
in_memory_result = .{ .slice_to_anyopaque = .{
.actual = inst_ty,
.wanted = dest_ty,
} };
break :pointer;
}
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
}
switch (dest_info.size) {
// coercion to C pointer
.C => switch (inst_ty.zigTypeTag(mod)) {
.Null => {
return sema.addConstant(dest_ty, Value.null);
},
.ComptimeInt => {
const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => break :pointer,
else => |e| return e,
};
return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
},
.Int => {
const ptr_size_ty = switch (inst_ty.intInfo(mod).signedness) {
.signed => Type.isize,
.unsigned => Type.usize,
};
const addr = sema.coerceExtra(block, ptr_size_ty, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => {
// Try to give more useful notes
in_memory_result = try sema.coerceInMemoryAllowed(block, ptr_size_ty, inst_ty, false, target, dest_ty_src, inst_src);
break :pointer;
},
else => |e| return e,
};
return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
},
.Pointer => p: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p;
const inst_info = inst_ty.ptrInfo(mod);
switch (try sema.coerceInMemoryAllowed(
block,
dest_info.pointee_type,
inst_info.pointee_type,
dest_info.mutable,
target,
dest_ty_src,
inst_src,
)) {
.ok => {},
else => break :p,
}
if (inst_info.size == .Slice) {
assert(dest_info.sentinel == null);
if (inst_info.sentinel == null or
!inst_info.sentinel.?.eql(try mod.intValue(dest_info.pointee_type, 0), dest_info.pointee_type, sema.mod))
break :p;
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
return sema.coerceCompatiblePtrs(block, dest_ty, slice_ptr, inst_src);
}
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
},
else => {},
},
.One => switch (dest_info.pointee_type.zigTypeTag(mod)) {
.Union => {
// pointer to anonymous struct to pointer to union
if (inst_ty.isSinglePointer(mod) and
inst_ty.childType(mod).isAnonStruct(mod) and
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
{
return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
.Struct => {
// pointer to anonymous struct to pointer to struct
if (inst_ty.isSinglePointer(mod) and
inst_ty.childType(mod).isAnonStruct(mod) and
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
{
return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) {
error.NotCoercible => break :pointer,
else => |e| return e,
};
}
},
.Array => {
// pointer to tuple to pointer to array
if (inst_ty.isSinglePointer(mod) and
inst_ty.childType(mod).isTuple(mod) and
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
{
return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
else => {},
},
.Slice => to_slice: {
if (inst_ty.zigTypeTag(mod) == .Array) {
return sema.fail(
block,
inst_src,
"array literal requires address-of operator (&) to coerce to slice type '{}'",
.{dest_ty.fmt(sema.mod)},
);
}
if (!inst_ty.isSinglePointer(mod)) break :to_slice;
const inst_child_ty = inst_ty.childType(mod);
if (!inst_child_ty.isTuple(mod)) break :to_slice;
// empty tuple to zero-length slice
// note that this allows coercing to a mutable slice.
if (inst_child_ty.structFieldCount(mod) == 0) {
// Optional slice is represented with a null pointer so
// we use a dummy pointer value with the required alignment.
const slice_val = try Value.Tag.slice.create(sema.arena, .{
.ptr = if (dest_info.@"align" != 0)
try mod.intValue(Type.usize, dest_info.@"align")
else
try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena),
.len = try mod.intValue(Type.usize, 0),
});
return sema.addConstant(dest_ty, slice_val);
}
// pointer to tuple to slice
if (dest_info.mutable) {
const err_msg = err_msg: {
const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(sema.mod)});
errdefer err_msg.deinit(sema.gpa);
try sema.errNote(block, dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{});
break :err_msg err_msg;
};
return sema.failWithOwnedErrorMsg(err_msg);
}
return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src);
},
.Many => p: {
if (!inst_ty.isSlice(mod)) break :p;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p;
const inst_info = inst_ty.ptrInfo(mod);
switch (try sema.coerceInMemoryAllowed(
block,
dest_info.pointee_type,
inst_info.pointee_type,
dest_info.mutable,
target,
dest_ty_src,
inst_src,
)) {
.ok => {},
else => break :p,
}
if (dest_info.sentinel == null or inst_info.sentinel == null or
!dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, sema.mod))
break :p;
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
return sema.coerceCompatiblePtrs(block, dest_ty, slice_ptr, inst_src);
},
}
},
.Int, .ComptimeInt => switch (inst_ty.zigTypeTag(mod)) {
.Float, .ComptimeFloat => float: {
if (is_undef) {
return sema.addConstUndef(dest_ty);
}
const val = (try sema.resolveMaybeUndefVal(inst)) orelse {
if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known");
}
break :float;
};
if (val.floatHasFraction(mod)) {
return sema.fail(
block,
inst_src,
"fractional component prevents float value '{}' from coercion to type '{}'",
.{ val.fmtValue(inst_ty, sema.mod), dest_ty.fmt(sema.mod) },
);
}
const result_val = try sema.floatToInt(block, inst_src, val, inst_ty, dest_ty);
return try sema.addConstant(dest_ty, result_val);
},
.Int, .ComptimeInt => {
if (is_undef) {
return sema.addConstUndef(dest_ty);
}
if (try sema.resolveMaybeUndefVal(inst)) |val| {
// comptime-known integer to other number
if (!(try sema.intFitsInType(val, dest_ty, null))) {
if (!opts.report_err) return error.NotCoercible;
return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) });
}
const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.ip_index, dest_ty.ip_index);
return try sema.addConstant(dest_ty, new_val.toValue());
}
if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
if (!opts.report_err) return error.NotCoercible;
if (opts.no_cast_to_comptime_int) return inst;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known");
}
// integer widening
const dst_info = dest_ty.intInfo(mod);
const src_info = inst_ty.intInfo(mod);
if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or
// small enough unsigned ints can get casted to large enough signed ints
(dst_info.signedness == .signed and dst_info.bits > src_info.bits))
{
try sema.requireRuntimeBlock(block, inst_src, null);
return block.addTyOp(.intcast, dest_ty, inst);
}
},
.Undefined => {
return sema.addConstUndef(dest_ty);
},
else => {},
},
.Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) {
.ComptimeFloat => {
const val = try sema.resolveConstValue(block, .unneeded, inst, "");
const result_val = try val.floatCast(dest_ty, mod);
return try sema.addConstant(dest_ty, result_val);
},
.Float => {
if (is_undef) {
return sema.addConstUndef(dest_ty);
}
if (try sema.resolveMaybeUndefVal(inst)) |val| {
const result_val = try val.floatCast(dest_ty, mod);
if (!val.eql(result_val, inst_ty, sema.mod)) {
return sema.fail(
block,
inst_src,
"type '{}' cannot represent float value '{}'",
.{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) },
);
}
return try sema.addConstant(dest_ty, result_val);
} else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known");
}
// float widening
const src_bits = inst_ty.floatBits(target);
const dst_bits = dest_ty.floatBits(target);
if (dst_bits >= src_bits) {
try sema.requireRuntimeBlock(block, inst_src, null);
return block.addTyOp(.fpext, dest_ty, inst);
}
},
.Int, .ComptimeInt => int: {
if (is_undef) {
return sema.addConstUndef(dest_ty);
}
const val = (try sema.resolveMaybeUndefVal(inst)) orelse {
if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known");
}
break :int;
};
const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, sema.mod, sema);
// TODO implement this compile error
//const int_again_val = try result_val.floatToInt(sema.arena, inst_ty);
//if (!int_again_val.eql(val, inst_ty, mod)) {
// return sema.fail(
// block,
// inst_src,
// "type '{}' cannot represent integer value '{}'",
// .{ dest_ty.fmt(sema.mod), val },
// );
//}
return try sema.addConstant(dest_ty, result_val);
},
.Undefined => {
return sema.addConstUndef(dest_ty);
},
else => {},
},
.Enum => switch (inst_ty.zigTypeTag(mod)) {
.EnumLiteral => {
// enum literal to enum
const val = try sema.resolveConstValue(block, .unneeded, inst, "");
const bytes = val.castTag(.enum_literal).?.data;
const field_index = dest_ty.enumFieldIndex(bytes, mod) orelse {
const msg = msg: {
const msg = try sema.errMsg(
block,
inst_src,
"no field named '{s}' in enum '{}'",
.{ bytes, dest_ty.fmt(sema.mod) },
);
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, dest_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
return sema.addConstant(
dest_ty,
try mod.enumValueFieldIndex(dest_ty, @intCast(u32, field_index)),
);
},
.Union => blk: {
// union to its own tag type
const union_tag_ty = inst_ty.unionTagType(mod) orelse break :blk;
if (union_tag_ty.eql(dest_ty, sema.mod)) {
return sema.unionToTag(block, dest_ty, inst, inst_src);
}
},
.Undefined => {
return sema.addConstUndef(dest_ty);
},
else => {},
},
.ErrorUnion => switch (inst_ty.zigTypeTag(mod)) {
.ErrorUnion => eu: {
if (maybe_inst_val) |inst_val| {
switch (inst_val.ip_index) {
.undef => return sema.addConstUndef(dest_ty),
.none => switch (inst_val.tag()) {
.eu_payload => {
const payload = try sema.addConstant(
inst_ty.errorUnionPayload(mod),
inst_val.castTag(.eu_payload).?.data,
);
return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) {
error.NotCoercible => break :eu,
else => |e| return e,
};
},
else => {},
},
else => {},
}
const error_set = try sema.addConstant(
inst_ty.errorUnionSet(mod),
inst_val,
);
return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src);
}
},
.ErrorSet => {
// E to E!T
return sema.wrapErrorUnionSet(block, dest_ty, inst, inst_src);
},
.Undefined => {
return sema.addConstUndef(dest_ty);
},
else => eu: {
// T to E!T
return sema.wrapErrorUnionPayload(block, dest_ty, inst, inst_src) catch |err| switch (err) {
error.NotCoercible => break :eu,
else => |e| return e,
};
},
},
.Union => switch (inst_ty.zigTypeTag(mod)) {
.Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src),
.Struct => {
if (inst_ty.isAnonStruct(mod)) {
return sema.coerceAnonStructToUnion(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
.Undefined => {
return sema.addConstUndef(dest_ty);
},
else => {},
},
.Array => switch (inst_ty.zigTypeTag(mod)) {
.Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
.Struct => {
if (inst == .empty_struct) {
return sema.arrayInitEmpty(block, inst_src, dest_ty);
}
if (inst_ty.isTuple(mod)) {
return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
.Undefined => {
return sema.addConstUndef(dest_ty);
},
else => {},
},
.Vector => switch (inst_ty.zigTypeTag(mod)) {
.Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
.Struct => {
if (inst_ty.isTuple(mod)) {
return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
.Undefined => {
return sema.addConstUndef(dest_ty);
},
else => {},
},
.Struct => blk: {
if (inst == .empty_struct) {
return sema.structInitEmpty(block, dest_ty, dest_ty_src, inst_src);
}
if (inst_ty.isTupleOrAnonStruct(mod)) {
return sema.coerceTupleToStruct(block, dest_ty, inst, inst_src) catch |err| switch (err) {
error.NotCoercible => break :blk,
else => |e| return e,
};
}
},
else => {},
}
// undefined to anything. We do this after the big switch above so that
// special logic has a chance to run first, such as `*[N]T` to `[]T` which
// should initialize the length field of the slice.
if (is_undef) {
return sema.addConstUndef(dest_ty);
}
if (!opts.report_err) return error.NotCoercible;
if (opts.is_ret and dest_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "function declared 'noreturn' returns", .{});
errdefer msg.destroy(sema.gpa);
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
const src_decl = sema.mod.declPtr(sema.func.?.owner_decl);
try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "'noreturn' declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod) });
errdefer msg.destroy(sema.gpa);
// E!T to T
if (inst_ty.zigTypeTag(mod) == .ErrorUnion and
(try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{
try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{});
try sema.errNote(block, inst_src, msg, "consider using 'try', 'catch', or 'if'", .{});
}
// ?T to T
if (inst_ty.zigTypeTag(mod) == .Optional and
(try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{
try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{});
try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{});
}
try in_memory_result.report(sema, block, inst_src, msg);
// Add notes about function return type
if (opts.is_ret and sema.mod.test_functions.get(sema.func.?.owner_decl) == null) {
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
const src_decl = sema.mod.declPtr(sema.func.?.owner_decl);
if (inst_ty.isError(mod) and !dest_ty.isError(mod)) {
try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function cannot return an error", .{});
} else {
try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function return type declared here", .{});
}
}
if (try opts.param_src.get(sema)) |param_src| {
try sema.mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{});
}
// TODO maybe add "cannot store an error in type '{}'" note
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const InMemoryCoercionResult = union(enum) {
ok,
no_match: Pair,
int_not_coercible: Int,
error_union_payload: PairAndChild,
array_len: IntPair,
array_sentinel: Sentinel,
array_elem: PairAndChild,
vector_len: IntPair,
vector_elem: PairAndChild,
optional_shape: Pair,
optional_child: PairAndChild,
from_anyerror,
missing_error: []const InternPool.NullTerminatedString,
/// true if wanted is var args
fn_var_args: bool,
/// true if wanted is generic
fn_generic: bool,
fn_param_count: IntPair,
fn_param_noalias: IntPair,
fn_param_comptime: ComptimeParam,
fn_param: Param,
fn_cc: CC,
fn_return_type: PairAndChild,
ptr_child: PairAndChild,
ptr_addrspace: AddressSpace,
ptr_sentinel: Sentinel,
ptr_size: Size,
ptr_qualifiers: Qualifiers,
ptr_allowzero: Pair,
ptr_bit_range: BitRange,
ptr_alignment: IntPair,
double_ptr_to_anyopaque: Pair,
slice_to_anyopaque: Pair,
const Pair = struct {
actual: Type,
wanted: Type,
};
const PairAndChild = struct {
child: *InMemoryCoercionResult,
actual: Type,
wanted: Type,
};
const Param = struct {
child: *InMemoryCoercionResult,
actual: Type,
wanted: Type,
index: u64,
};
const ComptimeParam = struct {
index: u64,
wanted: bool,
};
const Sentinel = struct {
// unreachable_value indicates no sentinel
actual: Value,
wanted: Value,
ty: Type,
};
const Int = struct {
actual_signedness: std.builtin.Signedness,
wanted_signedness: std.builtin.Signedness,
actual_bits: u16,
wanted_bits: u16,
};
const IntPair = struct {
actual: u64,
wanted: u64,
};
const Size = struct {
actual: std.builtin.Type.Pointer.Size,
wanted: std.builtin.Type.Pointer.Size,
};
const Qualifiers = struct {
actual_const: bool,
wanted_const: bool,
actual_volatile: bool,
wanted_volatile: bool,
};
const AddressSpace = struct {
actual: std.builtin.AddressSpace,
wanted: std.builtin.AddressSpace,
};
const CC = struct {
actual: std.builtin.CallingConvention,
wanted: std.builtin.CallingConvention,
};
const BitRange = struct {
actual_host: u16,
wanted_host: u16,
actual_offset: u16,
wanted_offset: u16,
};
fn dupe(child: *const InMemoryCoercionResult, arena: Allocator) !*InMemoryCoercionResult {
const res = try arena.create(InMemoryCoercionResult);
res.* = child.*;
return res;
}
fn report(res: *const InMemoryCoercionResult, sema: *Sema, block: *Block, src: LazySrcLoc, msg: *Module.ErrorMsg) !void {
const mod = sema.mod;
var cur = res;
while (true) switch (cur.*) {
.ok => unreachable,
.no_match => |types| {
try sema.addDeclaredHereNote(msg, types.wanted);
try sema.addDeclaredHereNote(msg, types.actual);
break;
},
.int_not_coercible => |int| {
try sema.errNote(block, src, msg, "{s} {d}-bit int cannot represent all possible {s} {d}-bit values", .{
@tagName(int.wanted_signedness), int.wanted_bits, @tagName(int.actual_signedness), int.actual_bits,
});
break;
},
.error_union_payload => |pair| {
try sema.errNote(block, src, msg, "error union payload '{}' cannot cast into error union payload '{}'", .{
pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod),
});
cur = pair.child;
},
.array_len => |lens| {
try sema.errNote(block, src, msg, "array of length {d} cannot cast into an array of length {d}", .{
lens.actual, lens.wanted,
});
break;
},
.array_sentinel => |sentinel| {
if (sentinel.actual.ip_index != .unreachable_value) {
try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{
sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod),
});
} else {
try sema.errNote(block, src, msg, "destination array requires '{}' sentinel", .{
sentinel.wanted.fmtValue(sentinel.ty, sema.mod),
});
}
break;
},
.array_elem => |pair| {
try sema.errNote(block, src, msg, "array element type '{}' cannot cast into array element type '{}'", .{
pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod),
});
cur = pair.child;
},
.vector_len => |lens| {
try sema.errNote(block, src, msg, "vector of length {d} cannot cast into a vector of length {d}", .{
lens.actual, lens.wanted,
});
break;
},
.vector_elem => |pair| {
try sema.errNote(block, src, msg, "vector element type '{}' cannot cast into vector element type '{}'", .{
pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod),
});
cur = pair.child;
},
.optional_shape => |pair| {
try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{
pair.actual.optionalChild(mod).fmt(sema.mod), pair.wanted.optionalChild(mod).fmt(sema.mod),
});
break;
},
.optional_child => |pair| {
try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{
pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod),
});
cur = pair.child;
},
.from_anyerror => {
try sema.errNote(block, src, msg, "global error set cannot cast into a smaller set", .{});
break;
},
.missing_error => |missing_errors| {
for (missing_errors) |err_index| {
const err = mod.intern_pool.stringToSlice(err_index);
try sema.errNote(block, src, msg, "'error.{s}' not a member of destination error set", .{err});
}
break;
},
.fn_var_args => |wanted_var_args| {
if (wanted_var_args) {
try sema.errNote(block, src, msg, "non-variadic function cannot cast into a variadic function", .{});
} else {
try sema.errNote(block, src, msg, "variadic function cannot cast into a non-variadic function", .{});
}
break;
},
.fn_generic => |wanted_generic| {
if (wanted_generic) {
try sema.errNote(block, src, msg, "non-generic function cannot cast into a generic function", .{});
} else {
try sema.errNote(block, src, msg, "generic function cannot cast into a non-generic function", .{});
}
break;
},
.fn_param_count => |lens| {
try sema.errNote(block, src, msg, "function with {d} parameters cannot cast into a function with {d} parameters", .{
lens.actual, lens.wanted,
});
break;
},
.fn_param_noalias => |param| {
var index: u6 = 0;
var actual_noalias = false;
while (true) : (index += 1) {
const actual = @truncate(u1, param.actual >> index);
const wanted = @truncate(u1, param.wanted >> index);
if (actual != wanted) {
actual_noalias = actual == 1;
break;
}
}
if (!actual_noalias) {
try sema.errNote(block, src, msg, "regular parameter {d} cannot cast into a noalias parameter", .{index});
} else {
try sema.errNote(block, src, msg, "noalias parameter {d} cannot cast into a regular parameter", .{index});
}
break;
},
.fn_param_comptime => |param| {
if (param.wanted) {
try sema.errNote(block, src, msg, "non-comptime parameter {d} cannot cast into a comptime parameter", .{param.index});
} else {
try sema.errNote(block, src, msg, "comptime parameter {d} cannot cast into a non-comptime parameter", .{param.index});
}
break;
},
.fn_param => |param| {
try sema.errNote(block, src, msg, "parameter {d} '{}' cannot cast into '{}'", .{
param.index, param.actual.fmt(sema.mod), param.wanted.fmt(sema.mod),
});
cur = param.child;
},
.fn_cc => |cc| {
try sema.errNote(block, src, msg, "calling convention '{s}' cannot cast into calling convention '{s}'", .{ @tagName(cc.actual), @tagName(cc.wanted) });
break;
},
.fn_return_type => |pair| {
try sema.errNote(block, src, msg, "return type '{}' cannot cast into return type '{}'", .{
pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod),
});
cur = pair.child;
},
.ptr_child => |pair| {
try sema.errNote(block, src, msg, "pointer type child '{}' cannot cast into pointer type child '{}'", .{
pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod),
});
cur = pair.child;
},
.ptr_addrspace => |@"addrspace"| {
try sema.errNote(block, src, msg, "address space '{s}' cannot cast into address space '{s}'", .{ @tagName(@"addrspace".actual), @tagName(@"addrspace".wanted) });
break;
},
.ptr_sentinel => |sentinel| {
if (sentinel.actual.ip_index != .unreachable_value) {
try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{
sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod),
});
} else {
try sema.errNote(block, src, msg, "destination pointer requires '{}' sentinel", .{
sentinel.wanted.fmtValue(sentinel.ty, sema.mod),
});
}
break;
},
.ptr_size => |size| {
try sema.errNote(block, src, msg, "a {s} pointer cannot cast into a {s} pointer", .{ pointerSizeString(size.actual), pointerSizeString(size.wanted) });
break;
},
.ptr_qualifiers => |qualifiers| {
const ok_const = !qualifiers.actual_const or qualifiers.wanted_const;
const ok_volatile = !qualifiers.actual_volatile or qualifiers.wanted_volatile;
if (!ok_const) {
try sema.errNote(block, src, msg, "cast discards const qualifier", .{});
} else if (!ok_volatile) {
try sema.errNote(block, src, msg, "cast discards volatile qualifier", .{});
}
break;
},
.ptr_allowzero => |pair| {
const wanted_allow_zero = pair.wanted.ptrAllowsZero(mod);
const actual_allow_zero = pair.actual.ptrAllowsZero(mod);
if (actual_allow_zero and !wanted_allow_zero) {
try sema.errNote(block, src, msg, "'{}' could have null values which are illegal in type '{}'", .{
pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod),
});
} else {
try sema.errNote(block, src, msg, "mutable '{}' allows illegal null values stored to type '{}'", .{
pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod),
});
}
break;
},
.ptr_bit_range => |bit_range| {
if (bit_range.actual_host != bit_range.wanted_host) {
try sema.errNote(block, src, msg, "pointer host size '{}' cannot cast into pointer host size '{}'", .{
bit_range.actual_host, bit_range.wanted_host,
});
}
if (bit_range.actual_offset != bit_range.wanted_offset) {
try sema.errNote(block, src, msg, "pointer bit offset '{}' cannot cast into pointer bit offset '{}'", .{
bit_range.actual_offset, bit_range.wanted_offset,
});
}
break;
},
.ptr_alignment => |pair| {
try sema.errNote(block, src, msg, "pointer alignment '{}' cannot cast into pointer alignment '{}'", .{
pair.actual, pair.wanted,
});
break;
},
.double_ptr_to_anyopaque => |pair| {
try sema.errNote(block, src, msg, "cannot implicitly cast double pointer '{}' to anyopaque pointer '{}'", .{
pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod),
});
break;
},
.slice_to_anyopaque => |pair| {
try sema.errNote(block, src, msg, "cannot implicitly cast slice '{}' to anyopaque pointer '{}'", .{
pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod),
});
try sema.errNote(block, src, msg, "consider using '.ptr'", .{});
break;
},
};
}
};
fn pointerSizeString(size: std.builtin.Type.Pointer.Size) []const u8 {
return switch (size) {
.One => "single",
.Many => "many",
.C => "C",
.Slice => unreachable,
};
}
/// If pointers have the same representation in runtime memory, a bitcast AIR instruction
/// may be used for the coercion.
/// * `const` attribute can be gained
/// * `volatile` attribute can be gained
/// * `allowzero` attribute can be gained (whether from explicit attribute, C pointer, or optional pointer) but only if !dest_is_mut
/// * alignment can be decreased
/// * bit offset attributes must match exactly
/// * `*`/`[*]` must match exactly, but `[*c]` matches either one
/// * sentinel-terminated pointers can coerce into `[*]`
fn coerceInMemoryAllowed(
sema: *Sema,
block: *Block,
dest_ty: Type,
src_ty: Type,
dest_is_mut: bool,
target: std.Target,
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) CompileError!InMemoryCoercionResult {
const mod = sema.mod;
if (dest_ty.eql(src_ty, mod))
return .ok;
// Differently-named integers with the same number of bits.
if (dest_ty.zigTypeTag(mod) == .Int and src_ty.zigTypeTag(mod) == .Int) {
const dest_info = dest_ty.intInfo(mod);
const src_info = src_ty.intInfo(mod);
if (dest_info.signedness == src_info.signedness and
dest_info.bits == src_info.bits)
{
return .ok;
}
if ((src_info.signedness == dest_info.signedness and dest_info.bits < src_info.bits) or
// small enough unsigned ints can get casted to large enough signed ints
(dest_info.signedness == .signed and (src_info.signedness == .unsigned or dest_info.bits <= src_info.bits)) or
(dest_info.signedness == .unsigned and src_info.signedness == .signed))
{
return InMemoryCoercionResult{ .int_not_coercible = .{
.actual_signedness = src_info.signedness,
.wanted_signedness = dest_info.signedness,
.actual_bits = src_info.bits,
.wanted_bits = dest_info.bits,
} };
}
}
// Differently-named floats with the same number of bits.
if (dest_ty.zigTypeTag(mod) == .Float and src_ty.zigTypeTag(mod) == .Float) {
const dest_bits = dest_ty.floatBits(target);
const src_bits = src_ty.floatBits(target);
if (dest_bits == src_bits) {
return .ok;
}
}
// Pointers / Pointer-like Optionals
const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty);
const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty);
if (maybe_dest_ptr_ty) |dest_ptr_ty| {
if (maybe_src_ptr_ty) |src_ptr_ty| {
return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target, dest_src, src_src);
}
}
// Slices
if (dest_ty.isSlice(mod) and src_ty.isSlice(mod)) {
return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src);
}
const dest_tag = dest_ty.zigTypeTag(mod);
const src_tag = src_ty.zigTypeTag(mod);
// Functions
if (dest_tag == .Fn and src_tag == .Fn) {
return try sema.coerceInMemoryAllowedFns(block, dest_ty, src_ty, target, dest_src, src_src);
}
// Error Unions
if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) {
const dest_payload = dest_ty.errorUnionPayload(mod);
const src_payload = src_ty.errorUnionPayload(mod);
const child = try sema.coerceInMemoryAllowed(block, dest_payload, src_payload, dest_is_mut, target, dest_src, src_src);
if (child != .ok) {
return InMemoryCoercionResult{ .error_union_payload = .{
.child = try child.dupe(sema.arena),
.actual = src_payload,
.wanted = dest_payload,
} };
}
return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(mod), src_ty.errorUnionSet(mod), dest_is_mut, target, dest_src, src_src);
}
// Error Sets
if (dest_tag == .ErrorSet and src_tag == .ErrorSet) {
return try sema.coerceInMemoryAllowedErrorSets(block, dest_ty, src_ty, dest_src, src_src);
}
// Arrays
if (dest_tag == .Array and src_tag == .Array) {
const dest_info = dest_ty.arrayInfo(mod);
const src_info = src_ty.arrayInfo(mod);
if (dest_info.len != src_info.len) {
return InMemoryCoercionResult{ .array_len = .{
.actual = src_info.len,
.wanted = dest_info.len,
} };
}
const child = try sema.coerceInMemoryAllowed(block, dest_info.elem_type, src_info.elem_type, dest_is_mut, target, dest_src, src_src);
if (child != .ok) {
return InMemoryCoercionResult{ .array_elem = .{
.child = try child.dupe(sema.arena),
.actual = src_info.elem_type,
.wanted = dest_info.elem_type,
} };
}
const ok_sent = dest_info.sentinel == null or
(src_info.sentinel != null and
dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, mod));
if (!ok_sent) {
return InMemoryCoercionResult{ .array_sentinel = .{
.actual = src_info.sentinel orelse Value.@"unreachable",
.wanted = dest_info.sentinel orelse Value.@"unreachable",
.ty = dest_info.elem_type,
} };
}
return .ok;
}
// Vectors
if (dest_tag == .Vector and src_tag == .Vector) {
const dest_len = dest_ty.vectorLen(mod);
const src_len = src_ty.vectorLen(mod);
if (dest_len != src_len) {
return InMemoryCoercionResult{ .vector_len = .{
.actual = src_len,
.wanted = dest_len,
} };
}
const dest_elem_ty = dest_ty.scalarType(mod);
const src_elem_ty = src_ty.scalarType(mod);
const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src);
if (child != .ok) {
return InMemoryCoercionResult{ .vector_elem = .{
.child = try child.dupe(sema.arena),
.actual = src_elem_ty,
.wanted = dest_elem_ty,
} };
}
return .ok;
}
// Optionals
if (dest_tag == .Optional and src_tag == .Optional) {
if ((maybe_dest_ptr_ty != null) != (maybe_src_ptr_ty != null)) {
return InMemoryCoercionResult{ .optional_shape = .{
.actual = src_ty,
.wanted = dest_ty,
} };
}
const dest_child_type = dest_ty.optionalChild(mod);
const src_child_type = src_ty.optionalChild(mod);
const child = try sema.coerceInMemoryAllowed(block, dest_child_type, src_child_type, dest_is_mut, target, dest_src, src_src);
if (child != .ok) {
return InMemoryCoercionResult{ .optional_child = .{
.child = try child.dupe(sema.arena),
.actual = src_child_type,
.wanted = dest_child_type,
} };
}
return .ok;
}
return InMemoryCoercionResult{ .no_match = .{
.actual = dest_ty,
.wanted = src_ty,
} };
}
fn coerceInMemoryAllowedErrorSets(
sema: *Sema,
block: *Block,
dest_ty: Type,
src_ty: Type,
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
// Coercion to `anyerror`. Note that this check can return false negatives
// in case the error sets did not get resolved.
if (dest_ty.isAnyError(mod)) {
return .ok;
}
if (mod.typeToInferredErrorSetIndex(dest_ty).unwrap()) |dst_ies_index| {
const dst_ies = mod.inferredErrorSetPtr(dst_ies_index);
// We will make an effort to return `ok` without resolving either error set, to
// avoid unnecessary "unable to resolve error set" dependency loop errors.
switch (src_ty.ip_index) {
.anyerror_type => {},
else => switch (ip.indexToKey(src_ty.ip_index)) {
.inferred_error_set_type => |src_index| {
// If both are inferred error sets of functions, and
// the dest includes the source function, the coercion is OK.
// This check is important because it works without forcing a full resolution
// of inferred error sets.
if (dst_ies.inferred_error_sets.contains(src_index)) {
return .ok;
}
},
.error_set_type => |error_set_type| {
for (error_set_type.names) |name| {
if (!dst_ies.errors.contains(name)) break;
} else return .ok;
},
else => unreachable,
},
}
if (dst_ies.func == sema.owner_func) {
// We are trying to coerce an error set to the current function's
// inferred error set.
try dst_ies.addErrorSet(src_ty, ip, gpa);
return .ok;
}
try sema.resolveInferredErrorSet(block, dest_src, dst_ies_index);
// isAnyError might have changed from a false negative to a true positive after resolution.
if (dest_ty.isAnyError(mod)) {
return .ok;
}
}
var missing_error_buf = std.ArrayList(InternPool.NullTerminatedString).init(gpa);
defer missing_error_buf.deinit();
switch (src_ty.ip_index) {
.anyerror_type => switch (ip.indexToKey(dest_ty.ip_index)) {
.inferred_error_set_type => unreachable, // Caught by dest_ty.isAnyError(mod) above.
.simple_type => unreachable, // filtered out above
.error_set_type => return .from_anyerror,
else => unreachable,
},
else => switch (ip.indexToKey(src_ty.ip_index)) {
.inferred_error_set_type => |src_index| {
const src_data = mod.inferredErrorSetPtr(src_index);
try sema.resolveInferredErrorSet(block, src_src, src_index);
// src anyerror status might have changed after the resolution.
if (src_ty.isAnyError(mod)) {
// dest_ty.isAnyError(mod) == true is already checked for at this point.
return .from_anyerror;
}
for (src_data.errors.keys()) |key| {
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), key)) {
try missing_error_buf.append(key);
}
}
if (missing_error_buf.items.len != 0) {
return InMemoryCoercionResult{
.missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items),
};
}
return .ok;
},
.error_set_type => |error_set_type| {
for (error_set_type.names) |name| {
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), name)) {
try missing_error_buf.append(name);
}
}
if (missing_error_buf.items.len != 0) {
return InMemoryCoercionResult{
.missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items),
};
}
return .ok;
},
else => unreachable,
},
}
unreachable;
}
fn coerceInMemoryAllowedFns(
sema: *Sema,
block: *Block,
dest_ty: Type,
src_ty: Type,
target: std.Target,
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
const mod = sema.mod;
const dest_info = mod.typeToFunc(dest_ty).?;
const src_info = mod.typeToFunc(src_ty).?;
if (dest_info.is_var_args != src_info.is_var_args) {
return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args };
}
if (dest_info.is_generic != src_info.is_generic) {
return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic };
}
if (dest_info.cc != src_info.cc) {
return InMemoryCoercionResult{ .fn_cc = .{
.actual = src_info.cc,
.wanted = dest_info.cc,
} };
}
if (src_info.return_type != .noreturn_type) {
const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type.toType(), src_info.return_type.toType(), false, target, dest_src, src_src);
if (rt != .ok) {
return InMemoryCoercionResult{ .fn_return_type = .{
.child = try rt.dupe(sema.arena),
.actual = src_info.return_type.toType(),
.wanted = dest_info.return_type.toType(),
} };
}
}
if (dest_info.param_types.len != src_info.param_types.len) {
return InMemoryCoercionResult{ .fn_param_count = .{
.actual = src_info.param_types.len,
.wanted = dest_info.param_types.len,
} };
}
if (dest_info.noalias_bits != src_info.noalias_bits) {
return InMemoryCoercionResult{ .fn_param_noalias = .{
.actual = src_info.noalias_bits,
.wanted = dest_info.noalias_bits,
} };
}
for (dest_info.param_types, 0..) |dest_param_ty, i| {
const src_param_ty = src_info.param_types[i].toType();
const i_small = @intCast(u5, i);
if (dest_info.paramIsComptime(i_small) != src_info.paramIsComptime(i_small)) {
return InMemoryCoercionResult{ .fn_param_comptime = .{
.index = i,
.wanted = dest_info.paramIsComptime(i_small),
} };
}
// Note: Cast direction is reversed here.
const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty.toType(), false, target, dest_src, src_src);
if (param != .ok) {
return InMemoryCoercionResult{ .fn_param = .{
.child = try param.dupe(sema.arena),
.actual = src_param_ty,
.wanted = dest_param_ty.toType(),
.index = i,
} };
}
}
return .ok;
}
fn coerceInMemoryAllowedPtrs(
sema: *Sema,
block: *Block,
dest_ty: Type,
src_ty: Type,
dest_ptr_ty: Type,
src_ptr_ty: Type,
dest_is_mut: bool,
target: std.Target,
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
const mod = sema.mod;
const dest_info = dest_ptr_ty.ptrInfo(mod);
const src_info = src_ptr_ty.ptrInfo(mod);
const ok_ptr_size = src_info.size == dest_info.size or
src_info.size == .C or dest_info.size == .C;
if (!ok_ptr_size) {
return InMemoryCoercionResult{ .ptr_size = .{
.actual = src_info.size,
.wanted = dest_info.size,
} };
}
const ok_cv_qualifiers =
(src_info.mutable or !dest_info.mutable) and
(!src_info.@"volatile" or dest_info.@"volatile");
if (!ok_cv_qualifiers) {
return InMemoryCoercionResult{ .ptr_qualifiers = .{
.actual_const = !src_info.mutable,
.wanted_const = !dest_info.mutable,
.actual_volatile = src_info.@"volatile",
.wanted_volatile = dest_info.@"volatile",
} };
}
if (dest_info.@"addrspace" != src_info.@"addrspace") {
return InMemoryCoercionResult{ .ptr_addrspace = .{
.actual = src_info.@"addrspace",
.wanted = dest_info.@"addrspace",
} };
}
const child = try sema.coerceInMemoryAllowed(block, dest_info.pointee_type, src_info.pointee_type, dest_info.mutable, target, dest_src, src_src);
if (child != .ok) {
return InMemoryCoercionResult{ .ptr_child = .{
.child = try child.dupe(sema.arena),
.actual = src_info.pointee_type,
.wanted = dest_info.pointee_type,
} };
}
const dest_allow_zero = dest_ty.ptrAllowsZero(mod);
const src_allow_zero = src_ty.ptrAllowsZero(mod);
const ok_allows_zero = (dest_allow_zero and
(src_allow_zero or !dest_is_mut)) or
(!dest_allow_zero and !src_allow_zero);
if (!ok_allows_zero) {
return InMemoryCoercionResult{ .ptr_allowzero = .{
.actual = src_ty,
.wanted = dest_ty,
} };
}
if (src_info.host_size != dest_info.host_size or
src_info.bit_offset != dest_info.bit_offset)
{
return InMemoryCoercionResult{ .ptr_bit_range = .{
.actual_host = src_info.host_size,
.wanted_host = dest_info.host_size,
.actual_offset = src_info.bit_offset,
.wanted_offset = dest_info.bit_offset,
} };
}
const ok_sent = dest_info.sentinel == null or src_info.size == .C or
(src_info.sentinel != null and
dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type, sema.mod));
if (!ok_sent) {
return InMemoryCoercionResult{ .ptr_sentinel = .{
.actual = src_info.sentinel orelse Value.@"unreachable",
.wanted = dest_info.sentinel orelse Value.@"unreachable",
.ty = dest_info.pointee_type,
} };
}
// If both pointers have alignment 0, it means they both want ABI alignment.
// In this case, if they share the same child type, no need to resolve
// pointee type alignment. Otherwise both pointee types must have their alignment
// resolved and we compare the alignment numerically.
alignment: {
if (src_info.@"align" == 0 and dest_info.@"align" == 0 and
dest_info.pointee_type.eql(src_info.pointee_type, sema.mod))
{
break :alignment;
}
const src_align = if (src_info.@"align" != 0)
src_info.@"align"
else
src_info.pointee_type.abiAlignment(mod);
const dest_align = if (dest_info.@"align" != 0)
dest_info.@"align"
else
dest_info.pointee_type.abiAlignment(mod);
if (dest_align > src_align) {
return InMemoryCoercionResult{ .ptr_alignment = .{
.actual = src_align,
.wanted = dest_align,
} };
}
break :alignment;
}
return .ok;
}
fn coerceVarArgParam(
sema: *Sema,
block: *Block,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
if (block.is_typeof) return inst;
const mod = sema.mod;
const uncasted_ty = sema.typeOf(inst);
const coerced = switch (uncasted_ty.zigTypeTag(mod)) {
// TODO consider casting to c_int/f64 if they fit
.ComptimeInt, .ComptimeFloat => return sema.fail(
block,
inst_src,
"integer and float literals passed to variadic function must be casted to a fixed-size number type",
.{},
),
.Fn => blk: {
const fn_val = try sema.resolveConstValue(block, .unneeded, inst, "");
const fn_decl = fn_val.pointerDecl().?;
break :blk try sema.analyzeDeclRef(fn_decl);
},
.Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}),
.Float => float: {
const target = sema.mod.getTarget();
const double_bits = target.c_type_bit_size(.double);
const inst_bits = uncasted_ty.floatBits(sema.mod.getTarget());
if (inst_bits >= double_bits) break :float inst;
switch (double_bits) {
32 => break :float try sema.coerce(block, Type.f32, inst, inst_src),
64 => break :float try sema.coerce(block, Type.f64, inst, inst_src),
else => unreachable,
}
},
else => inst,
};
const coerced_ty = sema.typeOf(coerced);
if (!try sema.validateExternType(coerced_ty, .param_ty)) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "cannot pass '{}' to variadic function", .{coerced_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
const src_decl = sema.mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, inst_src.toSrcLoc(src_decl, mod), coerced_ty, .param_ty);
try sema.addDeclaredHereNote(msg, coerced_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
return coerced;
}
// TODO migrate callsites to use storePtr2 instead.
fn storePtr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr: Air.Inst.Ref,
uncasted_operand: Air.Inst.Ref,
) CompileError!void {
const air_tag: Air.Inst.Tag = if (block.wantSafety()) .store_safe else .store;
return sema.storePtr2(block, src, ptr, src, uncasted_operand, src, air_tag);
}
fn storePtr2(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr: Air.Inst.Ref,
ptr_src: LazySrcLoc,
uncasted_operand: Air.Inst.Ref,
operand_src: LazySrcLoc,
air_tag: Air.Inst.Tag,
) CompileError!void {
const mod = sema.mod;
const ptr_ty = sema.typeOf(ptr);
if (ptr_ty.isConstPtr(mod))
return sema.fail(block, ptr_src, "cannot assign to constant", .{});
const elem_ty = ptr_ty.childType(mod);
// To generate better code for tuples, we detect a tuple operand here, and
// analyze field loads and stores directly. This avoids an extra allocation + memcpy
// which would occur if we used `coerce`.
// However, we avoid this mechanism if the destination element type is a tuple,
// because the regular store will be better for this case.
// If the destination type is a struct we don't want this mechanism to trigger, because
// this code does not handle tuple-to-struct coercion which requires dealing with missing
// fields.
const operand_ty = sema.typeOf(uncasted_operand);
if (operand_ty.isTuple(mod) and elem_ty.zigTypeTag(mod) == .Array) {
const field_count = operand_ty.structFieldCount(mod);
var i: u32 = 0;
while (i < field_count) : (i += 1) {
const elem_src = operand_src; // TODO better source location
const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i);
const elem_index = try sema.addIntUnsigned(Type.usize, i);
const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, true);
try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store);
}
return;
}
// TODO do the same thing for anon structs as for tuples above.
// However, beware of the need to handle missing/extra fields.
const is_ret = air_tag == .ret_ptr;
// Detect if we are storing an array operand to a bitcasted vector pointer.
// If so, we instead reach through the bitcasted pointer to the vector pointer,
// bitcast the array operand to a vector, and then lower this as a store of
// a vector value to a vector pointer. This generally results in better code,
// as well as working around an LLVM bug:
// https://github.com/ziglang/zig/issues/11154
if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| {
const vector_ty = sema.typeOf(vector_ptr).childType(mod);
const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
try sema.storePtr2(block, src, vector_ptr, ptr_src, vector, operand_src, .store);
return;
}
const operand = sema.coerceExtra(block, elem_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
const maybe_operand_val = try sema.resolveMaybeUndefVal(operand);
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
const operand_val = maybe_operand_val orelse {
try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src);
break :rs operand_src;
};
if (ptr_val.isComptimeMutablePtr()) {
try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty);
return;
} else break :rs ptr_src;
} else ptr_src;
// We do this after the possible comptime store above, for the case of field_ptr stores
// to unions because we want the comptime tag to be set, even if the field type is void.
if ((try sema.typeHasOnePossibleValue(elem_ty)) != null)
return;
if (air_tag == .bitcast) {
// `air_tag == .bitcast` is used as a special case for `zirCoerceResultPtr`
// to avoid calling `requireRuntimeBlock` for the dummy block.
_ = try block.addBinOp(.store, ptr, operand);
return;
}
try sema.requireRuntimeBlock(block, src, runtime_src);
try sema.queueFullTypeResolution(elem_ty);
if (ptr_ty.ptrInfo(mod).vector_index == .runtime) {
const ptr_inst = Air.refToIndex(ptr).?;
const air_tags = sema.air_instructions.items(.tag);
if (air_tags[ptr_inst] == .ptr_elem_ptr) {
const ty_pl = sema.air_instructions.items(.data)[ptr_inst].ty_pl;
const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data;
_ = try block.addInst(.{
.tag = .vector_store_elem,
.data = .{ .vector_store_elem = .{
.vector_ptr = bin_op.lhs,
.payload = try block.sema.addExtra(Air.Bin{
.lhs = bin_op.rhs,
.rhs = operand,
}),
} },
});
return;
}
return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{
ptr_ty.fmt(sema.mod),
});
}
if (is_ret) {
_ = try block.addBinOp(.store, ptr, operand);
} else {
_ = try block.addBinOp(air_tag, ptr, operand);
}
}
/// Traverse an arbitrary number of bitcasted pointers and return the underyling vector
/// pointer. Only if the final element type matches the vector element type, and the
/// lengths match.
fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
const mod = sema.mod;
const array_ty = sema.typeOf(ptr).childType(mod);
if (array_ty.zigTypeTag(mod) != .Array) return null;
var ptr_inst = Air.refToIndex(ptr) orelse return null;
const air_datas = sema.air_instructions.items(.data);
const air_tags = sema.air_instructions.items(.tag);
const prev_ptr = while (air_tags[ptr_inst] == .bitcast) {
const prev_ptr = air_datas[ptr_inst].ty_op.operand;
const prev_ptr_ty = sema.typeOf(prev_ptr);
if (prev_ptr_ty.zigTypeTag(mod) != .Pointer) return null;
// TODO: I noticed that the behavior tests do not pass if these two
// checks are missing. I don't understand why the presence of inferred
// allocations is relevant to this function, or why it would have
// different behavior depending on whether the types were inferred.
// Something seems wrong here.
if (prev_ptr_ty.ip_index == .none) {
if (prev_ptr_ty.tag() == .inferred_alloc_mut) return null;
if (prev_ptr_ty.tag() == .inferred_alloc_const) return null;
}
const prev_ptr_child_ty = prev_ptr_ty.childType(mod);
if (prev_ptr_child_ty.zigTypeTag(mod) == .Vector) break prev_ptr;
ptr_inst = Air.refToIndex(prev_ptr) orelse return null;
} else return null;
// We have a pointer-to-array and a pointer-to-vector. If the elements and
// lengths match, return the result.
const vector_ty = sema.typeOf(prev_ptr).childType(mod);
if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and
array_ty.arrayLen(mod) == vector_ty.vectorLen(mod))
{
return prev_ptr;
} else {
return null;
}
}
/// Call when you have Value objects rather than Air instructions, and you want to
/// assert the store must be done at comptime.
fn storePtrVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr_val: Value,
operand_val: Value,
operand_ty: Type,
) !void {
const mod = sema.mod;
var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty);
try sema.checkComptimeVarStore(block, src, mut_kit.decl_ref_mut);
switch (mut_kit.pointee) {
.direct => |val_ptr| {
if (mut_kit.decl_ref_mut.runtime_index == .comptime_field_ptr) {
if (!operand_val.eql(val_ptr.*, operand_ty, sema.mod)) {
// TODO use failWithInvalidComptimeFieldStore
return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{});
}
return;
}
const arena = mut_kit.beginArena(sema.mod);
defer mut_kit.finishArena(sema.mod);
val_ptr.* = try operand_val.copy(arena);
},
.reinterpret => |reinterpret| {
const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod));
const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer);
reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReinterpretDeclRef => unreachable,
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}),
};
operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReinterpretDeclRef => unreachable,
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}),
};
const arena = mut_kit.beginArena(sema.mod);
defer mut_kit.finishArena(sema.mod);
reinterpret.val_ptr.* = try Value.readFromMemory(mut_kit.ty, sema.mod, buffer, arena);
},
.bad_decl_ty, .bad_ptr_ty => {
// TODO show the decl declaration site in a note and explain whether the decl
// or the pointer is the problematic type
return sema.fail(block, src, "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", .{mut_kit.ty.fmt(sema.mod)});
},
}
}
const ComptimePtrMutationKit = struct {
decl_ref_mut: Value.Payload.DeclRefMut.Data,
pointee: union(enum) {
/// The pointer type matches the actual comptime Value so a direct
/// modification is possible.
direct: *Value,
/// The largest parent Value containing pointee and having a well-defined memory layout.
/// This is used for bitcasting, if direct dereferencing failed.
reinterpret: struct {
val_ptr: *Value,
byte_offset: usize,
},
/// If the root decl could not be used as parent, this means `ty` is the type that
/// caused that by not having a well-defined layout.
/// This one means the Decl that owns the value trying to be modified does not
/// have a well defined memory layout.
bad_decl_ty,
/// If the root decl could not be used as parent, this means `ty` is the type that
/// caused that by not having a well-defined layout.
/// This one means the pointer type that is being stored through does not
/// have a well defined memory layout.
bad_ptr_ty,
},
ty: Type,
decl_arena: std.heap.ArenaAllocator = undefined,
fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator {
const decl = mod.declPtr(self.decl_ref_mut.decl_index);
return decl.value_arena.?.acquire(mod.gpa, &self.decl_arena);
}
fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void {
const decl = mod.declPtr(self.decl_ref_mut.decl_index);
decl.value_arena.?.release(&self.decl_arena);
self.decl_arena = undefined;
}
};
fn beginComptimePtrMutation(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr_val: Value,
ptr_elem_ty: Type,
) CompileError!ComptimePtrMutationKit {
const mod = sema.mod;
switch (ptr_val.tag()) {
.decl_ref_mut => {
const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data;
const decl = sema.mod.declPtr(decl_ref_mut.decl_index);
return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, decl_ref_mut);
},
.comptime_field_ptr => {
const payload = ptr_val.castTag(.comptime_field_ptr).?.data;
const duped = try sema.arena.create(Value);
duped.* = payload.field_val;
return sema.beginComptimePtrMutationInner(block, src, payload.field_ty, duped, ptr_elem_ty, .{
.decl_index = @intToEnum(Module.Decl.Index, 0),
.runtime_index = .comptime_field_ptr,
});
},
.elem_ptr => {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty);
switch (parent.pointee) {
.direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) {
.Array, .Vector => {
const check_len = parent.ty.arrayLenIncludingSentinel(mod);
if (elem_ptr.index >= check_len) {
// TODO have the parent include the decl so we can say "declared here"
return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{
elem_ptr.index, check_len,
});
}
const elem_ty = parent.ty.childType(mod);
// We might have a pointer to multiple elements of the array (e.g. a pointer
// to a sub-array). In this case, we just have to reinterpret the relevant
// bytes of the whole array rather than any single element.
const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty);
if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) {
const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64);
return .{
.decl_ref_mut = parent.decl_ref_mut,
.pointee = .{ .reinterpret = .{
.val_ptr = val_ptr,
.byte_offset = elem_abi_size * elem_ptr.index,
} },
.ty = parent.ty,
};
}
switch (val_ptr.ip_index) {
.undef => {
// An array has been initialized to undefined at comptime and now we
// are for the first time setting an element. We must change the representation
// of the array from `undef` to `array`.
const arena = parent.beginArena(sema.mod);
defer parent.finishArena(sema.mod);
const array_len_including_sentinel =
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
const elems = try arena.alloc(Value, array_len_including_sentinel);
@memset(elems, Value.undef);
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
return beginComptimePtrMutationInner(
sema,
block,
src,
elem_ty,
&elems[elem_ptr.index],
ptr_elem_ty,
parent.decl_ref_mut,
);
},
.none => switch (val_ptr.tag()) {
.bytes => {
// An array is memory-optimized to store a slice of bytes, but we are about
// to modify an individual field and the representation has to change.
// If we wanted to avoid this, there would need to be special detection
// elsewhere to identify when writing a value to an array element that is stored
// using the `bytes` tag, and handle it without making a call to this function.
const arena = parent.beginArena(sema.mod);
defer parent.finishArena(sema.mod);
const bytes = val_ptr.castTag(.bytes).?.data;
const dest_len = parent.ty.arrayLenIncludingSentinel(mod);
// bytes.len may be one greater than dest_len because of the case when
// assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted.
assert(bytes.len >= dest_len);
const elems = try arena.alloc(Value, @intCast(usize, dest_len));
for (elems, 0..) |*elem, i| {
elem.* = try mod.intValue(elem_ty, bytes[i]);
}
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
return beginComptimePtrMutationInner(
sema,
block,
src,
elem_ty,
&elems[elem_ptr.index],
ptr_elem_ty,
parent.decl_ref_mut,
);
},
.str_lit => {
// An array is memory-optimized to store a slice of bytes, but we are about
// to modify an individual field and the representation has to change.
// If we wanted to avoid this, there would need to be special detection
// elsewhere to identify when writing a value to an array element that is stored
// using the `str_lit` tag, and handle it without making a call to this function.
const arena = parent.beginArena(sema.mod);
defer parent.finishArena(sema.mod);
const str_lit = val_ptr.castTag(.str_lit).?.data;
const dest_len = parent.ty.arrayLenIncludingSentinel(mod);
const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
const elems = try arena.alloc(Value, @intCast(usize, dest_len));
for (bytes, 0..) |byte, i| {
elems[i] = try mod.intValue(elem_ty, byte);
}
if (parent.ty.sentinel(mod)) |sent_val| {
assert(elems.len == bytes.len + 1);
elems[bytes.len] = sent_val;
}
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
return beginComptimePtrMutationInner(
sema,
block,
src,
elem_ty,
&elems[elem_ptr.index],
ptr_elem_ty,
parent.decl_ref_mut,
);
},
.repeated => {
// An array is memory-optimized to store only a single element value, and
// that value is understood to be the same for the entire length of the array.
// However, now we want to modify an individual field and so the
// representation has to change. If we wanted to avoid this, there would
// need to be special detection elsewhere to identify when writing a value to an
// array element that is stored using the `repeated` tag, and handle it
// without making a call to this function.
const arena = parent.beginArena(sema.mod);
defer parent.finishArena(sema.mod);
const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena);
const array_len_including_sentinel =
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
const elems = try arena.alloc(Value, array_len_including_sentinel);
if (elems.len > 0) elems[0] = repeated_val;
for (elems[1..]) |*elem| {
elem.* = try repeated_val.copy(arena);
}
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
return beginComptimePtrMutationInner(
sema,
block,
src,
elem_ty,
&elems[elem_ptr.index],
ptr_elem_ty,
parent.decl_ref_mut,
);
},
.aggregate => return beginComptimePtrMutationInner(
sema,
block,
src,
elem_ty,
&val_ptr.castTag(.aggregate).?.data[elem_ptr.index],
ptr_elem_ty,
parent.decl_ref_mut,
),
.the_only_possible_value => {
const duped = try sema.arena.create(Value);
duped.* = Value.initTag(.the_only_possible_value);
return beginComptimePtrMutationInner(
sema,
block,
src,
elem_ty,
duped,
ptr_elem_ty,
parent.decl_ref_mut,
);
},
else => unreachable,
},
else => unreachable,
}
},
else => {
if (elem_ptr.index != 0) {
// TODO include a "declared here" note for the decl
return sema.fail(block, src, "out of bounds comptime store of index {d}", .{
elem_ptr.index,
});
}
return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty,
val_ptr,
ptr_elem_ty,
parent.decl_ref_mut,
);
},
},
.reinterpret => |reinterpret| {
if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) {
// Even though the parent value type has well-defined memory layout, our
// pointer type does not.
return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.pointee = .bad_ptr_ty,
.ty = elem_ptr.elem_ty,
};
}
const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty);
const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64);
return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.pointee = .{ .reinterpret = .{
.val_ptr = reinterpret.val_ptr,
.byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index,
} },
.ty = parent.ty,
};
},
.bad_decl_ty, .bad_ptr_ty => return parent,
}
},
.field_ptr => {
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
const field_index = @intCast(u32, field_ptr.field_index);
var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty);
switch (parent.pointee) {
.direct => |val_ptr| switch (val_ptr.ip_index) {
.undef => {
// A struct or union has been initialized to undefined at comptime and now we
// are for the first time setting a field. We must change the representation
// of the struct/union from `undef` to `struct`/`union`.
const arena = parent.beginArena(sema.mod);
defer parent.finishArena(sema.mod);
switch (parent.ty.zigTypeTag(mod)) {
.Struct => {
const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod));
@memset(fields, Value.undef);
val_ptr.* = try Value.Tag.aggregate.create(arena, fields);
return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty.structFieldType(field_index, mod),
&fields[field_index],
ptr_elem_ty,
parent.decl_ref_mut,
);
},
.Union => {
const payload = try arena.create(Value.Payload.Union);
const tag_ty = parent.ty.unionTagTypeHypothetical(mod);
payload.* = .{ .data = .{
.tag = try mod.enumValueFieldIndex(tag_ty, field_index),
.val = Value.undef,
} };
val_ptr.* = Value.initPayload(&payload.base);
return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty.structFieldType(field_index, mod),
&payload.data.val,
ptr_elem_ty,
parent.decl_ref_mut,
);
},
.Pointer => {
assert(parent.ty.isSlice(mod));
val_ptr.* = try Value.Tag.slice.create(arena, .{
.ptr = Value.undef,
.len = Value.undef,
});
switch (field_index) {
Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty.slicePtrFieldType(mod),
&val_ptr.castTag(.slice).?.data.ptr,
ptr_elem_ty,
parent.decl_ref_mut,
),
Value.Payload.Slice.len_index => return beginComptimePtrMutationInner(
sema,
block,
src,
Type.usize,
&val_ptr.castTag(.slice).?.data.len,
ptr_elem_ty,
parent.decl_ref_mut,
),
else => unreachable,
}
},
else => unreachable,
}
},
.empty_struct => {
const duped = try sema.arena.create(Value);
duped.* = Value.initTag(.the_only_possible_value);
return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty.structFieldType(field_index, mod),
duped,
ptr_elem_ty,
parent.decl_ref_mut,
);
},
.none => switch (val_ptr.tag()) {
.aggregate => return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty.structFieldType(field_index, mod),
&val_ptr.castTag(.aggregate).?.data[field_index],
ptr_elem_ty,
parent.decl_ref_mut,
),
.repeated => {
const arena = parent.beginArena(sema.mod);
defer parent.finishArena(sema.mod);
const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod));
@memset(elems, val_ptr.castTag(.repeated).?.data);
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty.structFieldType(field_index, mod),
&elems[field_index],
ptr_elem_ty,
parent.decl_ref_mut,
);
},
.@"union" => {
// We need to set the active field of the union.
const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod);
const payload = &val_ptr.castTag(.@"union").?.data;
payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index);
return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty.structFieldType(field_index, mod),
&payload.val,
ptr_elem_ty,
parent.decl_ref_mut,
);
},
.slice => switch (field_index) {
Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty.slicePtrFieldType(mod),
&val_ptr.castTag(.slice).?.data.ptr,
ptr_elem_ty,
parent.decl_ref_mut,
),
Value.Payload.Slice.len_index => return beginComptimePtrMutationInner(
sema,
block,
src,
Type.usize,
&val_ptr.castTag(.slice).?.data.len,
ptr_elem_ty,
parent.decl_ref_mut,
),
else => unreachable,
},
else => unreachable,
},
else => unreachable,
},
.reinterpret => |reinterpret| {
const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod);
const field_offset = try sema.usizeCast(block, src, field_offset_u64);
return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.pointee = .{ .reinterpret = .{
.val_ptr = reinterpret.val_ptr,
.byte_offset = reinterpret.byte_offset + field_offset,
} },
.ty = parent.ty,
};
},
.bad_decl_ty, .bad_ptr_ty => return parent,
}
},
.eu_payload_ptr => {
const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data;
var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty);
switch (parent.pointee) {
.direct => |val_ptr| {
const payload_ty = parent.ty.errorUnionPayload(mod);
if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) {
return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data },
.ty = payload_ty,
};
} else {
// An error union has been initialized to undefined at comptime and now we
// are for the first time setting the payload. We must change the
// representation of the error union from `undef` to `opt_payload`.
const arena = parent.beginArena(sema.mod);
defer parent.finishArena(sema.mod);
const payload = try arena.create(Value.Payload.SubValue);
payload.* = .{
.base = .{ .tag = .eu_payload },
.data = Value.undef,
};
val_ptr.* = Value.initPayload(&payload.base);
return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.pointee = .{ .direct = &payload.data },
.ty = payload_ty,
};
}
},
.bad_decl_ty, .bad_ptr_ty => return parent,
// Even though the parent value type has well-defined memory layout, our
// pointer type does not.
.reinterpret => return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.pointee = .bad_ptr_ty,
.ty = eu_ptr.container_ty,
},
}
},
.opt_payload_ptr => {
const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else {
return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod));
};
var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty);
switch (parent.pointee) {
.direct => |val_ptr| {
const payload_ty = parent.ty.optionalChild(mod);
switch (val_ptr.ip_index) {
.undef, .null_value => {
// An optional has been initialized to undefined at comptime and now we
// are for the first time setting the payload. We must change the
// representation of the optional from `undef` to `opt_payload`.
const arena = parent.beginArena(sema.mod);
defer parent.finishArena(sema.mod);
const payload = try arena.create(Value.Payload.SubValue);
payload.* = .{
.base = .{ .tag = .opt_payload },
.data = Value.undef,
};
val_ptr.* = Value.initPayload(&payload.base);
return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.pointee = .{ .direct = &payload.data },
.ty = payload_ty,
};
},
.none => switch (val_ptr.tag()) {
.opt_payload => return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data },
.ty = payload_ty,
},
else => return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.pointee = .{ .direct = val_ptr },
.ty = payload_ty,
},
},
else => return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.pointee = .{ .direct = val_ptr },
.ty = payload_ty,
},
}
},
.bad_decl_ty, .bad_ptr_ty => return parent,
// Even though the parent value type has well-defined memory layout, our
// pointer type does not.
.reinterpret => return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.pointee = .bad_ptr_ty,
.ty = opt_ptr.container_ty,
},
}
},
.decl_ref => unreachable, // isComptimeMutablePtr() has been checked already
else => unreachable,
}
}
fn beginComptimePtrMutationInner(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
decl_ty: Type,
decl_val: *Value,
ptr_elem_ty: Type,
decl_ref_mut: Value.Payload.DeclRefMut.Data,
) CompileError!ComptimePtrMutationKit {
const mod = sema.mod;
const target = mod.getTarget();
const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok;
const decl = mod.declPtr(decl_ref_mut.decl_index);
var decl_arena: std.heap.ArenaAllocator = undefined;
const allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena);
defer decl.value_arena.?.release(&decl_arena);
decl_val.* = try decl_val.unintern(allocator, mod);
if (coerce_ok) {
return ComptimePtrMutationKit{
.decl_ref_mut = decl_ref_mut,
.pointee = .{ .direct = decl_val },
.ty = decl_ty,
};
}
// Handle the case that the decl is an array and we're actually trying to point to an element.
if (decl_ty.isArrayOrVector(mod)) {
const decl_elem_ty = decl_ty.childType(mod);
if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) {
return ComptimePtrMutationKit{
.decl_ref_mut = decl_ref_mut,
.pointee = .{ .direct = decl_val },
.ty = decl_ty,
};
}
}
if (!decl_ty.hasWellDefinedLayout(mod)) {
return ComptimePtrMutationKit{
.decl_ref_mut = decl_ref_mut,
.pointee = .{ .bad_decl_ty = {} },
.ty = decl_ty,
};
}
if (!ptr_elem_ty.hasWellDefinedLayout(mod)) {
return ComptimePtrMutationKit{
.decl_ref_mut = decl_ref_mut,
.pointee = .{ .bad_ptr_ty = {} },
.ty = ptr_elem_ty,
};
}
return ComptimePtrMutationKit{
.decl_ref_mut = decl_ref_mut,
.pointee = .{ .reinterpret = .{
.val_ptr = decl_val,
.byte_offset = 0,
} },
.ty = decl_ty,
};
}
const TypedValueAndOffset = struct {
tv: TypedValue,
byte_offset: usize,
};
const ComptimePtrLoadKit = struct {
/// The Value and Type corresponding to the pointee of the provided pointer.
/// If a direct dereference is not possible, this is null.
pointee: ?TypedValue,
/// The largest parent Value containing `pointee` and having a well-defined memory layout.
/// This is used for bitcasting, if direct dereferencing failed (i.e. `pointee` is null).
parent: ?TypedValueAndOffset,
/// Whether the `pointee` could be mutated by further
/// semantic analysis and a copy must be performed.
is_mutable: bool,
/// If the root decl could not be used as `parent`, this is the type that
/// caused that by not having a well-defined layout
ty_without_well_defined_layout: ?Type,
};
const ComptimePtrLoadError = CompileError || error{
RuntimeLoad,
};
/// If `maybe_array_ty` is provided, it will be used to directly dereference an
/// .elem_ptr of type T to a value of [N]T, if necessary.
fn beginComptimePtrLoad(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr_val: Value,
maybe_array_ty: ?Type,
) ComptimePtrLoadError!ComptimePtrLoadKit {
const mod = sema.mod;
const target = mod.getTarget();
var deref: ComptimePtrLoadKit = switch (ptr_val.ip_index) {
.null_value => {
return sema.fail(block, src, "attempt to use null value", .{});
},
.none => switch (ptr_val.tag()) {
.decl_ref,
.decl_ref_mut,
=> blk: {
const decl_index = switch (ptr_val.tag()) {
.decl_ref => ptr_val.castTag(.decl_ref).?.data,
.decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index,
else => unreachable,
};
const is_mutable = ptr_val.tag() == .decl_ref_mut;
const decl = mod.declPtr(decl_index);
const decl_tv = try decl.typedValue();
if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad;
const layout_defined = decl.ty.hasWellDefinedLayout(mod);
break :blk ComptimePtrLoadKit{
.parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null,
.pointee = decl_tv,
.is_mutable = is_mutable,
.ty_without_well_defined_layout = if (!layout_defined) decl.ty else null,
};
},
.elem_ptr => blk: {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
const elem_ty = elem_ptr.elem_ty;
var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.array_ptr, null);
// This code assumes that elem_ptrs have been "flattened" in order for direct dereference
// to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that
// our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened"
if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| {
assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, mod)));
}
if (elem_ptr.index != 0) {
if (elem_ty.hasWellDefinedLayout(mod)) {
if (deref.parent) |*parent| {
// Update the byte offset (in-place)
const elem_size = try sema.typeAbiSize(elem_ty);
const offset = parent.byte_offset + elem_size * elem_ptr.index;
parent.byte_offset = try sema.usizeCast(block, src, offset);
}
} else {
deref.parent = null;
deref.ty_without_well_defined_layout = elem_ty;
}
}
// If we're loading an elem_ptr that was derived from a different type
// than the true type of the underlying decl, we cannot deref directly
const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: {
const deref_elem_ty = deref.pointee.?.ty.childType(mod);
break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok;
} else false;
if (!ty_matches) {
deref.pointee = null;
break :blk deref;
}
var array_tv = deref.pointee.?;
const check_len = array_tv.ty.arrayLenIncludingSentinel(mod);
if (maybe_array_ty) |load_ty| {
// It's possible that we're loading a [N]T, in which case we'd like to slice
// the pointee array directly from our parent array.
if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) {
const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod));
deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{
.ty = try Type.array(sema.arena, N, null, elem_ty, mod),
.val = try array_tv.val.sliceArray(mod, sema.arena, elem_ptr.index, elem_ptr.index + N),
} else null;
break :blk deref;
}
}
if (elem_ptr.index >= check_len) {
deref.pointee = null;
break :blk deref;
}
if (elem_ptr.index == check_len - 1) {
if (array_tv.ty.sentinel(mod)) |sent| {
deref.pointee = TypedValue{
.ty = elem_ty,
.val = sent,
};
break :blk deref;
}
}
deref.pointee = TypedValue{
.ty = elem_ty,
.val = try array_tv.val.elemValue(mod, elem_ptr.index),
};
break :blk deref;
},
.slice => blk: {
const slice = ptr_val.castTag(.slice).?.data;
break :blk try sema.beginComptimePtrLoad(block, src, slice.ptr, null);
},
.field_ptr => blk: {
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
const field_index = @intCast(u32, field_ptr.field_index);
var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty);
if (field_ptr.container_ty.hasWellDefinedLayout(mod)) {
const struct_obj = mod.typeToStruct(field_ptr.container_ty);
if (struct_obj != null and struct_obj.?.layout == .Packed) {
// packed structs are not byte addressable
deref.parent = null;
} else if (deref.parent) |*parent| {
// Update the byte offset (in-place)
try sema.resolveTypeLayout(field_ptr.container_ty);
const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod);
parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset);
}
} else {
deref.parent = null;
deref.ty_without_well_defined_layout = field_ptr.container_ty;
}
const tv = deref.pointee orelse {
deref.pointee = null;
break :blk deref;
};
const coerce_in_mem_ok =
(try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok;
if (!coerce_in_mem_ok) {
deref.pointee = null;
break :blk deref;
}
if (field_ptr.container_ty.isSlice(mod)) {
const slice_val = tv.val.castTag(.slice).?.data;
deref.pointee = switch (field_index) {
Value.Payload.Slice.ptr_index => TypedValue{
.ty = field_ptr.container_ty.slicePtrFieldType(mod),
.val = slice_val.ptr,
},
Value.Payload.Slice.len_index => TypedValue{
.ty = Type.usize,
.val = slice_val.len,
},
else => unreachable,
};
} else {
const field_ty = field_ptr.container_ty.structFieldType(field_index, mod);
deref.pointee = TypedValue{
.ty = field_ty,
.val = try tv.val.fieldValue(tv.ty, mod, field_index),
};
}
break :blk deref;
},
.comptime_field_ptr => blk: {
const comptime_field_ptr = ptr_val.castTag(.comptime_field_ptr).?.data;
break :blk ComptimePtrLoadKit{
.parent = null,
.pointee = .{ .ty = comptime_field_ptr.field_ty, .val = comptime_field_ptr.field_val },
.is_mutable = false,
.ty_without_well_defined_layout = comptime_field_ptr.field_ty,
};
},
.opt_payload_ptr,
.eu_payload_ptr,
=> blk: {
const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data;
const payload_ty = switch (ptr_val.tag()) {
.eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(mod),
.opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod),
else => unreachable,
};
var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty);
// eu_payload_ptr and opt_payload_ptr never have a well-defined layout
if (deref.parent != null) {
deref.parent = null;
deref.ty_without_well_defined_layout = payload_ptr.container_ty;
}
if (deref.pointee) |*tv| {
const coerce_in_mem_ok =
(try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok;
if (coerce_in_mem_ok) {
const payload_val = switch (ptr_val.tag()) {
.eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else {
return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name});
},
.opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: {
if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{});
break :opt tv.val;
},
else => unreachable,
};
tv.* = TypedValue{ .ty = payload_ty, .val = payload_val };
break :blk deref;
}
}
deref.pointee = null;
break :blk deref;
},
.opt_payload => blk: {
const opt_payload = ptr_val.castTag(.opt_payload).?.data;
break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null);
},
.variable,
.extern_fn,
.function,
=> return error.RuntimeLoad,
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) {
.int => return error.RuntimeLoad,
.ptr => |ptr| switch (ptr.addr) {
.@"var", .int => return error.RuntimeLoad,
.decl, .mut_decl => blk: {
const decl_index = switch (ptr.addr) {
.decl => |decl| decl,
.mut_decl => |mut_decl| mut_decl.decl,
else => unreachable,
};
const decl = mod.declPtr(decl_index);
const decl_tv = try decl.typedValue();
if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad;
const layout_defined = decl.ty.hasWellDefinedLayout(mod);
break :blk ComptimePtrLoadKit{
.parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null,
.pointee = decl_tv,
.is_mutable = false,
.ty_without_well_defined_layout = if (!layout_defined) decl.ty else null,
};
},
},
else => unreachable,
},
};
if (deref.pointee) |tv| {
if (deref.parent == null and tv.ty.hasWellDefinedLayout(mod)) {
deref.parent = .{ .tv = tv, .byte_offset = 0 };
}
}
return deref;
}
fn bitCast(
sema: *Sema,
block: *Block,
dest_ty_unresolved: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
operand_src: ?LazySrcLoc,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved);
try sema.resolveTypeLayout(dest_ty);
const old_ty = try sema.resolveTypeFields(sema.typeOf(inst));
try sema.resolveTypeLayout(old_ty);
const dest_bits = dest_ty.bitSize(mod);
const old_bits = old_ty.bitSize(mod);
if (old_bits != dest_bits) {
return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{
dest_ty.fmt(mod),
dest_bits,
old_ty.fmt(mod),
old_bits,
});
}
if (try sema.resolveMaybeUndefVal(inst)) |val| {
if (try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty, 0)) |result_val| {
return sema.addConstant(dest_ty, result_val);
}
}
try sema.requireRuntimeBlock(block, inst_src, operand_src);
return block.addBitCast(dest_ty, inst);
}
fn bitCastVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
val: Value,
old_ty: Type,
new_ty: Type,
buffer_offset: usize,
) !?Value {
const mod = sema.mod;
if (old_ty.eql(new_ty, mod)) return val;
// For types with well-defined memory layouts, we serialize them a byte buffer,
// then deserialize to the new type.
const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(mod));
const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer);
val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReinterpretDeclRef => return null,
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}),
};
return try Value.readFromMemory(new_ty, mod, buffer[buffer_offset..], sema.arena);
}
fn coerceArrayPtrToSlice(
sema: *Sema,
block: *Block,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
if (try sema.resolveMaybeUndefVal(inst)) |val| {
const ptr_array_ty = sema.typeOf(inst);
const array_ty = ptr_array_ty.childType(mod);
const slice_val = try Value.Tag.slice.create(sema.arena, .{
.ptr = val,
.len = try mod.intValue(Type.usize, array_ty.arrayLen(mod)),
});
return sema.addConstant(dest_ty, slice_val);
}
try sema.requireRuntimeBlock(block, inst_src, null);
return block.addTyOp(.array_to_slice, dest_ty, inst);
}
fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool {
const mod = sema.mod;
const dest_info = dest_ty.ptrInfo(mod);
const inst_info = inst_ty.ptrInfo(mod);
const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel(mod) == 0 or
(inst_info.pointee_type.arrayLen(mod) == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or
(inst_info.pointee_type.isTuple(mod) and inst_info.pointee_type.structFieldCount(mod) == 0);
const ok_cv_qualifiers =
((inst_info.mutable or !dest_info.mutable) or len0) and
(!inst_info.@"volatile" or dest_info.@"volatile");
if (!ok_cv_qualifiers) {
in_memory_result.* = .{ .ptr_qualifiers = .{
.actual_const = !inst_info.mutable,
.wanted_const = !dest_info.mutable,
.actual_volatile = inst_info.@"volatile",
.wanted_volatile = dest_info.@"volatile",
} };
return false;
}
if (dest_info.@"addrspace" != inst_info.@"addrspace") {
in_memory_result.* = .{ .ptr_addrspace = .{
.actual = inst_info.@"addrspace",
.wanted = dest_info.@"addrspace",
} };
return false;
}
if (inst_info.@"align" == 0 and dest_info.@"align" == 0) return true;
if (len0) return true;
const inst_align = if (inst_info.@"align" != 0)
inst_info.@"align"
else
inst_info.pointee_type.abiAlignment(mod);
const dest_align = if (dest_info.@"align" != 0)
dest_info.@"align"
else
dest_info.pointee_type.abiAlignment(mod);
if (dest_align > inst_align) {
in_memory_result.* = .{ .ptr_alignment = .{
.actual = inst_align,
.wanted = dest_align,
} };
return false;
}
return true;
}
fn coerceCompatiblePtrs(
sema: *Sema,
block: *Block,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const inst_ty = sema.typeOf(inst);
if (try sema.resolveMaybeUndefVal(inst)) |val| {
if (!val.isUndef(mod) and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) {
return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)});
}
// The comptime Value representation is compatible with both types.
return sema.addConstant(dest_ty, val);
}
try sema.requireRuntimeBlock(block, inst_src, null);
const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod);
if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and
(try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn))
{
const actual_ptr = if (inst_ty.isSlice(mod))
try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty)
else
inst;
const ptr_int = try block.addUnOp(.ptrtoint, actual_ptr);
const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
const ok = if (inst_ty.isSlice(mod)) ok: {
const len = try sema.analyzeSliceLen(block, inst_src, inst);
const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
break :ok try block.addBinOp(.bit_or, len_zero, is_non_zero);
} else is_non_zero;
try sema.addSafetyCheck(block, ok, .cast_to_null);
}
return sema.bitCast(block, dest_ty, inst, inst_src, null);
}
fn coerceEnumToUnion(
sema: *Sema,
block: *Block,
union_ty: Type,
union_ty_src: LazySrcLoc,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const inst_ty = sema.typeOf(inst);
const tag_ty = union_ty.unionTagType(mod) orelse {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{
union_ty.fmt(sema.mod), inst_ty.fmt(sema.mod),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, union_ty_src, msg, "cannot coerce enum to untagged union", .{});
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
const enum_tag = try sema.coerce(block, tag_ty, inst, inst_src);
if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| {
const field_index = union_ty.unionTagFieldIndex(val, sema.mod) orelse {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "union '{}' has no tag with value '{}'", .{
union_ty.fmt(sema.mod), val.fmtValue(tag_ty, sema.mod),
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
const union_obj = mod.typeToUnion(union_ty).?;
const field = union_obj.fields.values()[field_index];
const field_ty = try sema.resolveTypeFields(field.ty);
if (field_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "cannot initialize 'noreturn' field of union", .{});
errdefer msg.destroy(sema.gpa);
const field_name = union_obj.fields.keys()[field_index];
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name});
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const opv = (try sema.typeHasOnePossibleValue(field_ty)) orelse {
const msg = msg: {
const field_name = union_obj.fields.keys()[field_index];
const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{s}'", .{
inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod), field_ty.fmt(sema.mod), field_name,
});
errdefer msg.destroy(sema.gpa);
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name});
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
return sema.addConstant(union_ty, try mod.unionValue(union_ty, val, opv));
}
try sema.requireRuntimeBlock(block, inst_src, null);
if (tag_ty.isNonexhaustiveEnum(mod)) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "runtime coercion to union '{}' from non-exhaustive enum", .{
union_ty.fmt(sema.mod),
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, tag_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const union_obj = mod.typeToUnion(union_ty).?;
{
var msg: ?*Module.ErrorMsg = null;
errdefer if (msg) |some| some.destroy(sema.gpa);
for (union_obj.fields.values(), 0..) |field, i| {
if (field.ty.zigTypeTag(mod) == .NoReturn) {
const err_msg = msg orelse try sema.errMsg(
block,
inst_src,
"runtime coercion from enum '{}' to union '{}' which has a 'noreturn' field",
.{ tag_ty.fmt(sema.mod), union_ty.fmt(sema.mod) },
);
msg = err_msg;
try sema.addFieldErrNote(union_ty, i, err_msg, "'noreturn' field here", .{});
}
}
if (msg) |some| {
msg = null;
try sema.addDeclaredHereNote(some, union_ty);
return sema.failWithOwnedErrorMsg(some);
}
}
// If the union has all fields 0 bits, the union value is just the enum value.
if (union_ty.unionHasAllZeroBitFieldTypes(mod)) {
return block.addBitCast(union_ty, enum_tag);
}
const msg = msg: {
const msg = try sema.errMsg(
block,
inst_src,
"runtime coercion from enum '{}' to union '{}' which has non-void fields",
.{ tag_ty.fmt(sema.mod), union_ty.fmt(sema.mod) },
);
errdefer msg.destroy(sema.gpa);
var it = union_obj.fields.iterator();
var field_index: usize = 0;
while (it.next()) |field| : (field_index += 1) {
const field_name = field.key_ptr.*;
const field_ty = field.value_ptr.ty;
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' has type '{}'", .{ field_name, field_ty.fmt(sema.mod) });
}
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn coerceAnonStructToUnion(
sema: *Sema,
block: *Block,
union_ty: Type,
union_ty_src: LazySrcLoc,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const inst_ty = sema.typeOf(inst);
const field_count = inst_ty.structFieldCount(mod);
if (field_count != 1) {
const msg = msg: {
const msg = if (field_count > 1) try sema.errMsg(
block,
inst_src,
"cannot initialize multiple union fields at once; unions can only have one active field",
.{},
) else try sema.errMsg(
block,
inst_src,
"union initializer must initialize one field",
.{},
);
errdefer msg.destroy(sema.gpa);
// TODO add notes for where the anon struct was created to point out
// the extra fields.
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type;
const field_name = mod.intern_pool.stringToSlice(anon_struct.names[0]);
const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty);
return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src);
}
fn coerceAnonStructToUnionPtrs(
sema: *Sema,
block: *Block,
ptr_union_ty: Type,
union_ty_src: LazySrcLoc,
ptr_anon_struct: Air.Inst.Ref,
anon_struct_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const union_ty = ptr_union_ty.childType(mod);
const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src);
const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src);
return sema.analyzeRef(block, union_ty_src, union_inst);
}
fn coerceAnonStructToStructPtrs(
sema: *Sema,
block: *Block,
ptr_struct_ty: Type,
struct_ty_src: LazySrcLoc,
ptr_anon_struct: Air.Inst.Ref,
anon_struct_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const struct_ty = ptr_struct_ty.childType(mod);
const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src);
const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src);
return sema.analyzeRef(block, struct_ty_src, struct_inst);
}
/// If the lengths match, coerces element-wise.
fn coerceArrayLike(
sema: *Sema,
block: *Block,
dest_ty: Type,
dest_ty_src: LazySrcLoc,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const inst_ty = sema.typeOf(inst);
const inst_len = inst_ty.arrayLen(mod);
const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen(mod));
const target = mod.getTarget();
if (dest_len != inst_len) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{
dest_ty.fmt(mod), inst_ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len});
try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const dest_elem_ty = dest_ty.childType(mod);
const inst_elem_ty = inst_ty.childType(mod);
const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_elem_ty, inst_elem_ty, false, target, dest_ty_src, inst_src);
if (in_memory_result == .ok) {
if (try sema.resolveMaybeUndefVal(inst)) |inst_val| {
// These types share the same comptime value representation.
return sema.addConstant(dest_ty, inst_val);
}
try sema.requireRuntimeBlock(block, inst_src, null);
return block.addBitCast(dest_ty, inst);
}
const element_vals = try sema.arena.alloc(Value, dest_len);
const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len);
var runtime_src: ?LazySrcLoc = null;
for (element_vals, 0..) |*elem, i| {
const index_ref = try sema.addConstant(
Type.usize,
try mod.intValue(Type.usize, i),
);
const src = inst_src; // TODO better source location
const elem_src = inst_src; // TODO better source location
const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true);
const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src);
element_refs[i] = coerced;
if (runtime_src == null) {
if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| {
elem.* = elem_val;
} else {
runtime_src = elem_src;
}
}
}
if (runtime_src) |rs| {
try sema.requireRuntimeBlock(block, inst_src, rs);
return block.addAggregateInit(dest_ty, element_refs);
}
return sema.addConstant(
dest_ty,
try Value.Tag.aggregate.create(sema.arena, element_vals),
);
}
/// If the lengths match, coerces element-wise.
fn coerceTupleToArray(
sema: *Sema,
block: *Block,
dest_ty: Type,
dest_ty_src: LazySrcLoc,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const inst_ty = sema.typeOf(inst);
const inst_len = inst_ty.arrayLen(mod);
const dest_len = dest_ty.arrayLen(mod);
if (dest_len != inst_len) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{
dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len});
try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel(mod));
const element_vals = try sema.arena.alloc(Value, dest_elems);
const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems);
const dest_elem_ty = dest_ty.childType(mod);
var runtime_src: ?LazySrcLoc = null;
for (element_vals, 0..) |*elem, i_usize| {
const i = @intCast(u32, i_usize);
if (i_usize == inst_len) {
elem.* = dest_ty.sentinel(mod).?;
element_refs[i] = try sema.addConstant(dest_elem_ty, elem.*);
break;
}
const elem_src = inst_src; // TODO better source location
const elem_ref = try sema.tupleField(block, inst_src, inst, elem_src, i);
const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src);
element_refs[i] = coerced;
if (runtime_src == null) {
if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| {
elem.* = elem_val;
} else {
runtime_src = elem_src;
}
}
}
if (runtime_src) |rs| {
try sema.requireRuntimeBlock(block, inst_src, rs);
return block.addAggregateInit(dest_ty, element_refs);
}
return sema.addConstant(
dest_ty,
try Value.Tag.aggregate.create(sema.arena, element_vals),
);
}
/// If the lengths match, coerces element-wise.
fn coerceTupleToSlicePtrs(
sema: *Sema,
block: *Block,
slice_ty: Type,
slice_ty_src: LazySrcLoc,
ptr_tuple: Air.Inst.Ref,
tuple_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const tuple_ty = sema.typeOf(ptr_tuple).childType(mod);
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
const slice_info = slice_ty.ptrInfo(mod);
const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(mod), slice_info.sentinel, slice_info.pointee_type, sema.mod);
const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src);
if (slice_info.@"align" != 0) {
return sema.fail(block, slice_ty_src, "TODO: override the alignment of the array decl we create here", .{});
}
const ptr_array = try sema.analyzeRef(block, slice_ty_src, array_inst);
return sema.coerceArrayPtrToSlice(block, slice_ty, ptr_array, slice_ty_src);
}
/// If the lengths match, coerces element-wise.
fn coerceTupleToArrayPtrs(
sema: *Sema,
block: *Block,
ptr_array_ty: Type,
array_ty_src: LazySrcLoc,
ptr_tuple: Air.Inst.Ref,
tuple_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
const ptr_info = ptr_array_ty.ptrInfo(mod);
const array_ty = ptr_info.pointee_type;
const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src);
if (ptr_info.@"align" != 0) {
return sema.fail(block, array_ty_src, "TODO: override the alignment of the array decl we create here", .{});
}
const ptr_array = try sema.analyzeRef(block, array_ty_src, array_inst);
return ptr_array;
}
/// Handles both tuples and anon struct literals. Coerces field-wise. Reports
/// errors for both extra fields and missing fields.
fn coerceTupleToStruct(
sema: *Sema,
block: *Block,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const struct_ty = try sema.resolveTypeFields(dest_ty);
if (struct_ty.isTupleOrAnonStruct(mod)) {
return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src);
}
const fields = struct_ty.structFields(mod);
const field_vals = try sema.arena.alloc(InternPool.Index, fields.count());
const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
@memset(field_refs, .none);
const inst_ty = sema.typeOf(inst);
const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type;
var runtime_src: ?LazySrcLoc = null;
for (0..anon_struct.types.len) |field_index_usize| {
const field_i = @intCast(u32, field_index_usize);
const field_src = inst_src; // TODO better source location
const field_name = if (anon_struct.names.len != 0)
// https://github.com/ziglang/zig/issues/15709
@as([]const u8, mod.intern_pool.stringToSlice(anon_struct.names[field_i]))
else
try std.fmt.allocPrint(sema.arena, "{d}", .{field_i});
const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src);
const field = fields.values()[field_index];
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
const coerced = try sema.coerce(block, field.ty, elem_ref, field_src);
field_refs[field_index] = coerced;
if (field.is_comptime) {
const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known");
};
if (!init_val.eql(field.default_val, field.ty, sema.mod)) {
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
}
}
if (runtime_src == null) {
if (try sema.resolveMaybeUndefVal(coerced)) |field_val| {
assert(field_val.ip_index != .none);
field_vals[field_index] = field_val.ip_index;
} else {
runtime_src = field_src;
}
}
}
// Populate default field values and report errors for missing fields.
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
for (field_refs, 0..) |*field_ref, i| {
if (field_ref.* != .none) continue;
const field_name = fields.keys()[i];
const field = fields.values()[i];
const field_src = inst_src; // TODO better source location
if (field.default_val.ip_index == .unreachable_value) {
const template = "missing struct field: {s}";
const args = .{field_name};
if (root_msg) |msg| {
try sema.errNote(block, field_src, msg, template, args);
} else {
root_msg = try sema.errMsg(block, field_src, template, args);
}
continue;
}
if (runtime_src == null) {
assert(field.default_val.ip_index != .none);
field_vals[i] = field.default_val.ip_index;
} else {
field_ref.* = try sema.addConstant(field.ty, field.default_val);
}
}
if (root_msg) |msg| {
try sema.addDeclaredHereNote(msg, struct_ty);
root_msg = null;
return sema.failWithOwnedErrorMsg(msg);
}
if (runtime_src) |rs| {
try sema.requireRuntimeBlock(block, inst_src, rs);
return block.addAggregateInit(struct_ty, field_refs);
}
assert(struct_ty.ip_index != .none);
const struct_val = try mod.intern(.{ .aggregate = .{
.ty = struct_ty.ip_index,
.storage = .{ .elems = field_vals },
} });
errdefer mod.intern_pool.remove(struct_val);
return sema.addConstant(struct_ty, struct_val.toValue());
}
fn coerceTupleToTuple(
sema: *Sema,
block: *Block,
tuple_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const dest_tuple = mod.intern_pool.indexToKey(tuple_ty.ip_index).anon_struct_type;
const field_vals = try sema.arena.alloc(InternPool.Index, dest_tuple.types.len);
const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
@memset(field_refs, .none);
const inst_ty = sema.typeOf(inst);
const src_tuple = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type;
if (src_tuple.types.len > dest_tuple.types.len) return error.NotCoercible;
var runtime_src: ?LazySrcLoc = null;
for (dest_tuple.types, dest_tuple.values, 0..) |field_ty, default_val, field_index_usize| {
const field_i = @intCast(u32, field_index_usize);
const field_src = inst_src; // TODO better source location
const field_name = if (src_tuple.names.len != 0)
// https://github.com/ziglang/zig/issues/15709
@as([]const u8, mod.intern_pool.stringToSlice(src_tuple.names[field_i]))
else
try std.fmt.allocPrint(sema.arena, "{d}", .{field_i});
if (mem.eql(u8, field_name, "len")) {
return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{});
}
const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src);
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
const coerced = try sema.coerce(block, field_ty.toType(), elem_ref, field_src);
field_refs[field_index] = coerced;
if (default_val != .none) {
const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known");
};
if (!init_val.eql(default_val.toValue(), field_ty.toType(), sema.mod)) {
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
}
}
if (runtime_src == null) {
if (try sema.resolveMaybeUndefVal(coerced)) |field_val| {
field_vals[field_index] = field_val.ip_index;
} else {
runtime_src = field_src;
}
}
}
// Populate default field values and report errors for missing fields.
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
for (
dest_tuple.types,
dest_tuple.values,
field_refs,
0..,
) |field_ty, default_val, *field_ref, i| {
if (field_ref.* != .none) continue;
const field_src = inst_src; // TODO better source location
if (default_val == .none) {
if (tuple_ty.isTuple(mod)) {
const template = "missing tuple field: {d}";
if (root_msg) |msg| {
try sema.errNote(block, field_src, msg, template, .{i});
} else {
root_msg = try sema.errMsg(block, field_src, template, .{i});
}
continue;
}
const template = "missing struct field: {s}";
const args = .{tuple_ty.structFieldName(i, mod)};
if (root_msg) |msg| {
try sema.errNote(block, field_src, msg, template, args);
} else {
root_msg = try sema.errMsg(block, field_src, template, args);
}
continue;
}
if (runtime_src == null) {
field_vals[i] = default_val;
} else {
field_ref.* = try sema.addConstant(field_ty.toType(), default_val.toValue());
}
}
if (root_msg) |msg| {
try sema.addDeclaredHereNote(msg, tuple_ty);
root_msg = null;
return sema.failWithOwnedErrorMsg(msg);
}
if (runtime_src) |rs| {
try sema.requireRuntimeBlock(block, inst_src, rs);
return block.addAggregateInit(tuple_ty, field_refs);
}
return sema.addConstant(
tuple_ty,
(try mod.intern(.{ .aggregate = .{
.ty = tuple_ty.ip_index,
.storage = .{ .elems = field_vals },
} })).toValue(),
);
}
fn analyzeDeclVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
decl_index: Decl.Index,
) CompileError!Air.Inst.Ref {
try sema.addReferencedBy(block, src, decl_index);
if (sema.decl_val_table.get(decl_index)) |result| {
return result;
}
const decl_ref = try sema.analyzeDeclRefInner(decl_index, false);
const result = try sema.analyzeLoad(block, src, decl_ref, src);
if (Air.refToIndex(result)) |index| {
if (sema.air_instructions.items(.tag)[index] == .constant and !block.is_typeof) {
try sema.decl_val_table.put(sema.gpa, decl_index, result);
}
}
return result;
}
fn addReferencedBy(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
decl_index: Decl.Index,
) !void {
if (sema.mod.comp.reference_trace == @as(u32, 0)) return;
try sema.mod.reference_table.put(sema.gpa, decl_index, .{
.referencer = block.src_decl,
.src = src,
});
}
fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void {
const mod = sema.mod;
const decl = mod.declPtr(decl_index);
if (decl.analysis == .in_progress) {
const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(mod), "dependency loop detected", .{});
return sema.failWithOwnedErrorMsg(msg);
}
mod.ensureDeclAnalyzed(decl_index) catch |err| {
if (sema.owner_func) |owner_func| {
owner_func.state = .dependency_failure;
} else {
sema.owner_decl.analysis = .dependency_failure;
}
return err;
};
}
fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void {
sema.mod.ensureFuncBodyAnalyzed(func) catch |err| {
if (sema.owner_func) |owner_func| {
owner_func.state = .dependency_failure;
} else {
sema.owner_decl.analysis = .dependency_failure;
}
return err;
};
}
fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const decl = try anon_decl.finish(
ty,
try val.copy(anon_decl.arena()),
0, // default alignment
);
try sema.maybeQueueFuncBodyAnalysis(decl);
try sema.mod.declareDeclDependency(sema.owner_decl_index, decl);
return try Value.Tag.decl_ref.create(sema.arena, decl);
}
fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value {
const val = opt_val orelse return Value.null;
const ptr_val = try sema.refValue(block, ty, val);
const result = try Value.Tag.opt_payload.create(sema.arena, ptr_val);
return result;
}
fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref {
return sema.analyzeDeclRefInner(decl_index, true);
}
/// Analyze a reference to the decl at the given index. Ensures the underlying decl is analyzed, but
/// only triggers analysis for function bodies if `analyze_fn_body` is true. If it's possible for a
/// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps
/// this function with `analyze_fn_body` set to true.
fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: bool) CompileError!Air.Inst.Ref {
const mod = sema.mod;
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
try sema.ensureDeclAnalyzed(decl_index);
const decl = mod.declPtr(decl_index);
const decl_tv = try decl.typedValue();
const ptr_ty = try mod.ptrType(.{
.elem_type = decl_tv.ty.ip_index,
.alignment = InternPool.Alignment.fromByteUnits(decl.@"align"),
.is_const = if (decl_tv.val.castTag(.variable)) |payload|
!payload.data.is_mutable
else
false,
.address_space = decl.@"addrspace",
});
if (analyze_fn_body) {
try sema.maybeQueueFuncBodyAnalysis(decl_index);
}
return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{
.ty = ptr_ty.ip_index,
.addr = .{ .decl = decl_index },
} })).toValue());
}
fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void {
const mod = sema.mod;
const decl = mod.declPtr(decl_index);
const tv = try decl.typedValue();
if (tv.ty.zigTypeTag(mod) != .Fn) return;
if (!try sema.fnHasRuntimeBits(tv.ty)) return;
const func = tv.val.castTag(.function) orelse return; // undef or extern_fn
try mod.ensureFuncBodyAnalysisQueued(func.data);
}
fn analyzeRef(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const operand_ty = sema.typeOf(operand);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
switch (val.ip_index) {
.none => switch (val.tag()) {
.extern_fn, .function => {
const decl_index = val.pointerDecl().?;
return sema.analyzeDeclRef(decl_index);
},
else => {},
},
else => {},
}
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
operand_ty,
try val.copy(anon_decl.arena()),
0, // default alignment
));
}
try sema.requireRuntimeBlock(block, src, null);
const address_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local);
const ptr_type = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = operand_ty,
.mutable = false,
.@"addrspace" = address_space,
});
const mut_ptr_type = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = operand_ty,
.@"addrspace" = address_space,
});
const alloc = try block.addTy(.alloc, mut_ptr_type);
try sema.storePtr(block, src, alloc, operand);
// TODO: Replace with sema.coerce when that supports adding pointer constness.
return sema.bitCast(block, ptr_type, alloc, src, null);
}
fn analyzeLoad(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr: Air.Inst.Ref,
ptr_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const ptr_ty = sema.typeOf(ptr);
const elem_ty = switch (ptr_ty.zigTypeTag(mod)) {
.Pointer => ptr_ty.childType(mod),
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}),
};
if (try sema.typeHasOnePossibleValue(elem_ty)) |opv| {
return sema.addConstant(elem_ty, opv);
}
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
if (try sema.pointerDeref(block, src, ptr_val, ptr_ty)) |elem_val| {
return sema.addConstant(elem_ty, elem_val);
}
}
if (ptr_ty.ptrInfo(mod).vector_index == .runtime) {
const ptr_inst = Air.refToIndex(ptr).?;
const air_tags = sema.air_instructions.items(.tag);
if (air_tags[ptr_inst] == .ptr_elem_ptr) {
const ty_pl = sema.air_instructions.items(.data)[ptr_inst].ty_pl;
const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data;
return block.addBinOp(.ptr_elem_val, bin_op.lhs, bin_op.rhs);
}
return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{
ptr_ty.fmt(sema.mod),
});
}
return block.addTyOp(.load, elem_ty, ptr);
}
fn analyzeSlicePtr(
sema: *Sema,
block: *Block,
slice_src: LazySrcLoc,
slice: Air.Inst.Ref,
slice_ty: Type,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const result_ty = slice_ty.slicePtrFieldType(mod);
if (try sema.resolveMaybeUndefVal(slice)) |val| {
if (val.isUndef(mod)) return sema.addConstUndef(result_ty);
return sema.addConstant(result_ty, val.slicePtr(mod));
}
try sema.requireRuntimeBlock(block, slice_src, null);
return block.addTyOp(.slice_ptr, result_ty, slice);
}
fn analyzeSliceLen(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
slice_inst: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
if (try sema.resolveMaybeUndefVal(slice_inst)) |slice_val| {
if (slice_val.isUndef(mod)) {
return sema.addConstUndef(Type.usize);
}
return sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod));
}
try sema.requireRuntimeBlock(block, src, null);
return block.addTyOp(.slice_len, Type.usize, slice_inst);
}
fn analyzeIsNull(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
invert_logic: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const result_ty = Type.bool;
if (try sema.resolveMaybeUndefVal(operand)) |opt_val| {
if (opt_val.isUndef(mod)) {
return sema.addConstUndef(result_ty);
}
const is_null = opt_val.isNull(mod);
const bool_value = if (invert_logic) !is_null else is_null;
if (bool_value) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
}
const inverted_non_null_res = if (invert_logic) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
const operand_ty = sema.typeOf(operand);
if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(mod).zigTypeTag(mod) == .NoReturn) {
return inverted_non_null_res;
}
if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) {
return inverted_non_null_res;
}
try sema.requireRuntimeBlock(block, src, null);
const air_tag: Air.Inst.Tag = if (invert_logic) .is_non_null else .is_null;
return block.addUnOp(air_tag, operand);
}
fn analyzePtrIsNonErrComptimeOnly(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const ptr_ty = sema.typeOf(operand);
assert(ptr_ty.zigTypeTag(mod) == .Pointer);
const child_ty = ptr_ty.childType(mod);
const child_tag = child_ty.zigTypeTag(mod);
if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true;
if (child_tag == .ErrorSet) return Air.Inst.Ref.bool_false;
assert(child_tag == .ErrorUnion);
_ = block;
_ = src;
return Air.Inst.Ref.none;
}
fn analyzeIsNonErrComptimeOnly(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
const ot = operand_ty.zigTypeTag(mod);
if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true;
if (ot == .ErrorSet) return Air.Inst.Ref.bool_false;
assert(ot == .ErrorUnion);
const payload_ty = operand_ty.errorUnionPayload(mod);
if (payload_ty.zigTypeTag(mod) == .NoReturn) {
return Air.Inst.Ref.bool_false;
}
if (Air.refToIndex(operand)) |operand_inst| {
switch (sema.air_instructions.items(.tag)[operand_inst]) {
.wrap_errunion_payload => return Air.Inst.Ref.bool_true,
.wrap_errunion_err => return Air.Inst.Ref.bool_false,
else => {},
}
} else if (operand == .undef) {
return sema.addConstUndef(Type.bool);
} else {
// None of the ref tags can be errors.
return Air.Inst.Ref.bool_true;
}
const maybe_operand_val = try sema.resolveMaybeUndefVal(operand);
// exception if the error union error set is known to be empty,
// we allow the comparison but always make it comptime-known.
const set_ty = operand_ty.errorUnionSet(mod);
switch (set_ty.ip_index) {
.anyerror_type => {},
else => switch (mod.intern_pool.indexToKey(set_ty.ip_index)) {
.error_set_type => |error_set_type| {
if (error_set_type.names.len == 0) return Air.Inst.Ref.bool_true;
},
.inferred_error_set_type => |ies_index| blk: {
// If the error set is empty, we must return a comptime true or false.
// However we want to avoid unnecessarily resolving an inferred error set
// in case it is already non-empty.
const ies = mod.inferredErrorSetPtr(ies_index);
if (ies.is_anyerror) break :blk;
if (ies.errors.count() != 0) break :blk;
if (maybe_operand_val == null) {
// Try to avoid resolving inferred error set if possible.
if (ies.errors.count() != 0) break :blk;
if (ies.is_anyerror) break :blk;
for (ies.inferred_error_sets.keys()) |other_ies_index| {
if (ies_index == other_ies_index) continue;
try sema.resolveInferredErrorSet(block, src, other_ies_index);
const other_ies = mod.inferredErrorSetPtr(other_ies_index);
if (other_ies.is_anyerror) {
ies.is_anyerror = true;
ies.is_resolved = true;
break :blk;
}
if (other_ies.errors.count() != 0) break :blk;
}
if (ies.func == sema.owner_func) {
// We're checking the inferred errorset of the current function and none of
// its child inferred error sets contained any errors meaning that any value
// so far with this type can't contain errors either.
return Air.Inst.Ref.bool_true;
}
try sema.resolveInferredErrorSet(block, src, ies_index);
if (ies.is_anyerror) break :blk;
if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true;
}
},
else => unreachable,
},
}
if (maybe_operand_val) |err_union| {
if (err_union.isUndef(mod)) {
return sema.addConstUndef(Type.bool);
}
if (err_union.getError() == null) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
}
return Air.Inst.Ref.none;
}
fn analyzeIsNonErr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const result = try sema.analyzeIsNonErrComptimeOnly(block, src, operand);
if (result == .none) {
try sema.requireRuntimeBlock(block, src, null);
return block.addUnOp(.is_non_err, operand);
} else {
return result;
}
}
fn analyzePtrIsNonErr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const result = try sema.analyzePtrIsNonErrComptimeOnly(block, src, operand);
if (result == .none) {
try sema.requireRuntimeBlock(block, src, null);
return block.addUnOp(.is_non_err_ptr, operand);
} else {
return result;
}
}
fn analyzeSlice(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ptr_ptr: Air.Inst.Ref,
uncasted_start: Air.Inst.Ref,
uncasted_end_opt: Air.Inst.Ref,
sentinel_opt: Air.Inst.Ref,
sentinel_src: LazySrcLoc,
ptr_src: LazySrcLoc,
start_src: LazySrcLoc,
end_src: LazySrcLoc,
by_length: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
// Slice expressions can operate on a variable whose type is an array. This requires
// the slice operand to be a pointer. In the case of a non-array, it will be a double pointer.
const ptr_ptr_ty = sema.typeOf(ptr_ptr);
const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) {
.Pointer => ptr_ptr_ty.childType(mod),
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}),
};
var array_ty = ptr_ptr_child_ty;
var slice_ty = ptr_ptr_ty;
var ptr_or_slice = ptr_ptr;
var elem_ty: Type = undefined;
var ptr_sentinel: ?Value = null;
switch (ptr_ptr_child_ty.zigTypeTag(mod)) {
.Array => {
ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
elem_ty = ptr_ptr_child_ty.childType(mod);
},
.Pointer => switch (ptr_ptr_child_ty.ptrSize(mod)) {
.One => {
const double_child_ty = ptr_ptr_child_ty.childType(mod);
if (double_child_ty.zigTypeTag(mod) == .Array) {
ptr_sentinel = double_child_ty.sentinel(mod);
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
slice_ty = ptr_ptr_child_ty;
array_ty = double_child_ty;
elem_ty = double_child_ty.childType(mod);
} else {
return sema.fail(block, src, "slice of single-item pointer", .{});
}
},
.Many, .C => {
ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
slice_ty = ptr_ptr_child_ty;
array_ty = ptr_ptr_child_ty;
elem_ty = ptr_ptr_child_ty.childType(mod);
if (ptr_ptr_child_ty.ptrSize(mod) == .C) {
if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| {
if (ptr_val.isNull(mod)) {
return sema.fail(block, src, "slice of null pointer", .{});
}
}
}
},
.Slice => {
ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
slice_ty = ptr_ptr_child_ty;
array_ty = ptr_ptr_child_ty;
elem_ty = ptr_ptr_child_ty.childType(mod);
},
},
else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}),
}
const ptr = if (slice_ty.isSlice(mod))
try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty)
else
ptr_or_slice;
const start = try sema.coerce(block, Type.usize, uncasted_start, start_src);
const new_ptr = try sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src);
// true if and only if the end index of the slice, implicitly or explicitly, equals
// the length of the underlying object being sliced. we might learn the length of the
// underlying object because it is an array (which has the length in the type), or
// we might learn of the length because it is a comptime-known slice value.
var end_is_len = uncasted_end_opt == .none;
const end = e: {
if (array_ty.zigTypeTag(mod) == .Array) {
const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod));
if (!end_is_len) {
const end = if (by_length) end: {
const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
break :end try sema.coerce(block, Type.usize, uncasted_end, end_src);
} else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
if (try sema.resolveMaybeUndefVal(end)) |end_val| {
const len_s_val = try mod.intValue(
Type.usize,
array_ty.arrayLenIncludingSentinel(mod),
);
if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) {
const sentinel_label: []const u8 = if (array_ty.sentinel(mod) != null)
" +1 (sentinel)"
else
"";
return sema.fail(
block,
end_src,
"end index {} out of bounds for array of length {}{s}",
.{
end_val.fmtValue(Type.usize, mod),
len_val.fmtValue(Type.usize, mod),
sentinel_label,
},
);
}
// end_is_len is only true if we are NOT using the sentinel
// length. For sentinel-length, we don't want the type to
// contain the sentinel.
if (end_val.eql(len_val, Type.usize, mod)) {
end_is_len = true;
}
}
break :e end;
}
break :e try sema.addConstant(Type.usize, len_val);
} else if (slice_ty.isSlice(mod)) {
if (!end_is_len) {
const end = if (by_length) end: {
const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
break :end try sema.coerce(block, Type.usize, uncasted_end, end_src);
} else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
if (try sema.resolveMaybeUndefVal(ptr_or_slice)) |slice_val| {
if (slice_val.isUndef(mod)) {
return sema.fail(block, src, "slice of undefined", .{});
}
const has_sentinel = slice_ty.sentinel(mod) != null;
const slice_len = slice_val.sliceLen(mod);
const len_plus_sent = slice_len + @boolToInt(has_sentinel);
const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent);
if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) {
const sentinel_label: []const u8 = if (has_sentinel)
" +1 (sentinel)"
else
"";
return sema.fail(
block,
end_src,
"end index {} out of bounds for slice of length {d}{s}",
.{
end_val.fmtValue(Type.usize, mod),
slice_val.sliceLen(mod),
sentinel_label,
},
);
}
// If the slice has a sentinel, we consider end_is_len
// is only true if it equals the length WITHOUT the
// sentinel, so we don't add a sentinel type.
const slice_len_val = try mod.intValue(Type.usize, slice_len);
if (end_val.eql(slice_len_val, Type.usize, mod)) {
end_is_len = true;
}
}
}
break :e end;
}
break :e try sema.analyzeSliceLen(block, src, ptr_or_slice);
}
if (!end_is_len) {
if (by_length) {
const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
break :e try sema.coerce(block, Type.usize, uncasted_end, end_src);
} else break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
}
return sema.fail(block, src, "slice of pointer must include end value", .{});
};
const sentinel = s: {
if (sentinel_opt != .none) {
const casted = try sema.coerce(block, elem_ty, sentinel_opt, sentinel_src);
break :s try sema.resolveConstValue(block, sentinel_src, casted, "slice sentinel must be comptime-known");
}
// If we are slicing to the end of something that is sentinel-terminated
// then the resulting slice type is also sentinel-terminated.
if (end_is_len) {
if (ptr_sentinel) |sent| {
break :s sent;
}
}
break :s null;
};
const slice_sentinel = if (sentinel_opt != .none) sentinel else null;
// requirement: start <= end
var need_start_gt_end_check = true;
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
if (try sema.resolveDefinedValue(block, start_src, start)) |start_val| {
need_start_gt_end_check = false;
if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, Type.usize))) {
return sema.fail(
block,
start_src,
"start index {} is larger than end index {}",
.{
start_val.fmtValue(Type.usize, mod),
end_val.fmtValue(Type.usize, mod),
},
);
}
if (try sema.resolveMaybeUndefVal(new_ptr)) |ptr_val| sentinel_check: {
const expected_sentinel = sentinel orelse break :sentinel_check;
const start_int = start_val.getUnsignedInt(mod).?;
const end_int = end_val.getUnsignedInt(mod).?;
const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int);
const elem_ptr = try ptr_val.elemPtr(sema.typeOf(new_ptr), sema.arena, sentinel_index, sema.mod);
const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty, false);
const actual_sentinel = switch (res) {
.runtime_load => break :sentinel_check,
.val => |v| v,
.needed_well_defined => |ty| return sema.fail(
block,
src,
"comptime dereference requires '{}' to have a well-defined layout, but it does not.",
.{ty.fmt(sema.mod)},
),
.out_of_bounds => |ty| return sema.fail(
block,
end_src,
"slice end index {d} exceeds bounds of containing decl of type '{}'",
.{ end_int, ty.fmt(sema.mod) },
),
};
if (!actual_sentinel.eql(expected_sentinel, elem_ty, sema.mod)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "value in memory does not match slice sentinel", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "expected '{}', found '{}'", .{
expected_sentinel.fmtValue(elem_ty, sema.mod),
actual_sentinel.fmtValue(elem_ty, sema.mod),
});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
}
}
if (!by_length and block.wantSafety() and !block.is_comptime and need_start_gt_end_check) {
// requirement: start <= end
try sema.panicStartGreaterThanEnd(block, start, end);
}
const new_len = if (by_length)
try sema.coerce(block, Type.usize, uncasted_end_opt, end_src)
else
try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false);
const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len);
const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo(mod);
const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize(mod) != .C;
if (opt_new_len_val) |new_len_val| {
const new_len_int = new_len_val.toUnsignedInt(mod);
const return_ty = try Type.ptr(sema.arena, mod, .{
.pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty, mod),
.sentinel = null,
.@"align" = new_ptr_ty_info.@"align",
.@"addrspace" = new_ptr_ty_info.@"addrspace",
.mutable = new_ptr_ty_info.mutable,
.@"allowzero" = new_allowzero,
.@"volatile" = new_ptr_ty_info.@"volatile",
.size = .One,
});
const opt_new_ptr_val = try sema.resolveMaybeUndefVal(new_ptr);
const new_ptr_val = opt_new_ptr_val orelse {
const result = try block.addBitCast(return_ty, new_ptr);
if (block.wantSafety()) {
// requirement: slicing C ptr is non-null
if (ptr_ptr_child_ty.isCPtr(mod)) {
const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true);
try sema.addSafetyCheck(block, is_non_null, .unwrap_null);
}
if (slice_ty.isSlice(mod)) {
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
const actual_len = if (slice_ty.sentinel(mod) == null)
slice_len_inst
else
try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true);
const actual_end = if (slice_sentinel != null)
try sema.analyzeArithmetic(block, .add, end, .one, src, end_src, end_src, true)
else
end;
try sema.panicIndexOutOfBounds(block, actual_end, actual_len, .cmp_lte);
}
// requirement: result[new_len] == slice_sentinel
try sema.panicSentinelMismatch(block, slice_sentinel, elem_ty, result, new_len);
}
return result;
};
if (!new_ptr_val.isUndef(mod)) {
return sema.addConstant(return_ty, new_ptr_val);
}
// Special case: @as([]i32, undefined)[x..x]
if (new_len_int == 0) {
return sema.addConstUndef(return_ty);
}
return sema.fail(block, src, "non-zero length slice of undefined pointer", .{});
}
const return_ty = try Type.ptr(sema.arena, mod, .{
.pointee_type = elem_ty,
.sentinel = sentinel,
.@"align" = new_ptr_ty_info.@"align",
.@"addrspace" = new_ptr_ty_info.@"addrspace",
.mutable = new_ptr_ty_info.mutable,
.@"allowzero" = new_allowzero,
.@"volatile" = new_ptr_ty_info.@"volatile",
.size = .Slice,
});
const runtime_src = if ((try sema.resolveMaybeUndefVal(ptr_or_slice)) == null)
ptr_src
else if ((try sema.resolveMaybeUndefVal(start)) == null)
start_src
else
end_src;
try sema.requireRuntimeBlock(block, src, runtime_src);
if (block.wantSafety()) {
// requirement: slicing C ptr is non-null
if (ptr_ptr_child_ty.isCPtr(mod)) {
const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true);
try sema.addSafetyCheck(block, is_non_null, .unwrap_null);
}
// requirement: end <= len
const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array)
try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel(mod))
else if (slice_ty.isSlice(mod)) blk: {
if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
// we don't need to add one for sentinels because the
// underlying value data includes the sentinel
break :blk try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod));
}
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
if (slice_ty.sentinel(mod) == null) break :blk slice_len_inst;
// we have to add one because slice lengths don't include the sentinel
break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true);
} else null;
if (opt_len_inst) |len_inst| {
const actual_end = if (slice_sentinel != null)
try sema.analyzeArithmetic(block, .add, end, .one, src, end_src, end_src, true)
else
end;
try sema.panicIndexOutOfBounds(block, actual_end, len_inst, .cmp_lte);
}
// requirement: start <= end
try sema.panicIndexOutOfBounds(block, start, end, .cmp_lte);
}
const result = try block.addInst(.{
.tag = .slice,
.data = .{ .ty_pl = .{
.ty = try sema.addType(return_ty),
.payload = try sema.addExtra(Air.Bin{
.lhs = new_ptr,
.rhs = new_len,
}),
} },
});
if (block.wantSafety()) {
// requirement: result[new_len] == slice_sentinel
try sema.panicSentinelMismatch(block, slice_sentinel, elem_ty, result, new_len);
}
return result;
}
/// Asserts that lhs and rhs types are both numeric.
fn cmpNumeric(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
uncasted_lhs: Air.Inst.Ref,
uncasted_rhs: Air.Inst.Ref,
op: std.math.CompareOperator,
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const lhs_ty = sema.typeOf(uncasted_lhs);
const rhs_ty = sema.typeOf(uncasted_rhs);
assert(lhs_ty.isNumeric(mod));
assert(rhs_ty.isNumeric(mod));
const lhs_ty_tag = lhs_ty.zigTypeTag(mod);
const rhs_ty_tag = rhs_ty.zigTypeTag(mod);
const target = sema.mod.getTarget();
// One exception to heterogeneous comparison: comptime_float needs to
// coerce to fixed-width float.
const lhs = if (lhs_ty_tag == .ComptimeFloat and rhs_ty_tag == .Float)
try sema.coerce(block, rhs_ty, uncasted_lhs, lhs_src)
else
uncasted_lhs;
const rhs = if (lhs_ty_tag == .Float and rhs_ty_tag == .ComptimeFloat)
try sema.coerce(block, lhs_ty, uncasted_rhs, rhs_src)
else
uncasted_rhs;
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
// Compare ints: const vs. undefined (or vice versa)
if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef(mod)) {
try sema.resolveLazyValue(lhs_val);
if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| {
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
}
} else if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef(mod)) {
try sema.resolveLazyValue(rhs_val);
if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| {
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
}
}
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
return sema.addConstUndef(Type.bool);
}
if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) {
if (op == std.math.CompareOperator.neq) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
}
if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema)) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
} else {
if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) {
// Compare ints: const vs. var
try sema.resolveLazyValue(lhs_val);
if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| {
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
}
}
break :src rhs_src;
}
} else {
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) {
// Compare ints: var vs. const
try sema.resolveLazyValue(rhs_val);
if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| {
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
}
}
}
break :src lhs_src;
}
};
// TODO handle comparisons against lazy zero values
// Some values can be compared against zero without being runtime-known or without forcing
// a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to
// always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout
// of this function if we don't need to.
try sema.requireRuntimeBlock(block, src, runtime_src);
// For floats, emit a float comparison instruction.
const lhs_is_float = switch (lhs_ty_tag) {
.Float, .ComptimeFloat => true,
else => false,
};
const rhs_is_float = switch (rhs_ty_tag) {
.Float, .ComptimeFloat => true,
else => false,
};
if (lhs_is_float and rhs_is_float) {
// Smaller fixed-width floats coerce to larger fixed-width floats.
// comptime_float coerces to fixed-width float.
const dest_ty = x: {
if (lhs_ty_tag == .ComptimeFloat) {
break :x rhs_ty;
} else if (rhs_ty_tag == .ComptimeFloat) {
break :x lhs_ty;
}
if (lhs_ty.floatBits(target) >= rhs_ty.floatBits(target)) {
break :x lhs_ty;
} else {
break :x rhs_ty;
}
};
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs);
}
// For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
// For mixed signed and unsigned integers, implicit cast both operands to a signed
// integer with + 1 bit.
// For mixed floats and integers, extract the integer part from the float, cast that to
// a signed integer with mantissa bits + 1, and if there was any non-integral part of the float,
// add/subtract 1.
const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val|
!(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))
else
(lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod));
const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val|
!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))
else
(rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod));
const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
var dest_float_type: ?Type = null;
var lhs_bits: usize = undefined;
if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| {
try sema.resolveLazyValue(lhs_val);
if (lhs_val.isUndef(mod))
return sema.addConstUndef(Type.bool);
if (lhs_val.isNan(mod)) switch (op) {
.neq => return Air.Inst.Ref.bool_true,
else => return Air.Inst.Ref.bool_false,
};
if (lhs_val.isInf(mod)) switch (op) {
.neq => return Air.Inst.Ref.bool_true,
.eq => return Air.Inst.Ref.bool_false,
.gt, .gte => return if (lhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true,
.lt, .lte => return if (lhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false,
};
if (!rhs_is_signed) {
switch (lhs_val.orderAgainstZero(mod)) {
.gt => {},
.eq => switch (op) { // LHS = 0, RHS is unsigned
.lte => return Air.Inst.Ref.bool_true,
.gt => return Air.Inst.Ref.bool_false,
else => {},
},
.lt => switch (op) { // LHS < 0, RHS is unsigned
.neq, .lt, .lte => return Air.Inst.Ref.bool_true,
.eq, .gt, .gte => return Air.Inst.Ref.bool_false,
},
}
}
if (lhs_is_float) {
if (lhs_val.floatHasFraction(mod)) {
switch (op) {
.eq => return Air.Inst.Ref.bool_false,
.neq => return Air.Inst.Ref.bool_true,
else => {},
}
}
var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, mod));
defer bigint.deinit();
if (lhs_val.floatHasFraction(mod)) {
if (lhs_is_signed) {
try bigint.addScalar(&bigint, -1);
} else {
try bigint.addScalar(&bigint, 1);
}
}
lhs_bits = bigint.toConst().bitCountTwosComp();
} else {
lhs_bits = lhs_val.intBitCountTwosComp(mod);
}
lhs_bits += @boolToInt(!lhs_is_signed and dest_int_is_signed);
} else if (lhs_is_float) {
dest_float_type = lhs_ty;
} else {
const int_info = lhs_ty.intInfo(mod);
lhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed);
}
var rhs_bits: usize = undefined;
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
try sema.resolveLazyValue(rhs_val);
if (rhs_val.isUndef(mod))
return sema.addConstUndef(Type.bool);
if (rhs_val.isNan(mod)) switch (op) {
.neq => return Air.Inst.Ref.bool_true,
else => return Air.Inst.Ref.bool_false,
};
if (rhs_val.isInf(mod)) switch (op) {
.neq => return Air.Inst.Ref.bool_true,
.eq => return Air.Inst.Ref.bool_false,
.gt, .gte => return if (rhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false,
.lt, .lte => return if (rhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true,
};
if (!lhs_is_signed) {
switch (rhs_val.orderAgainstZero(mod)) {
.gt => {},
.eq => switch (op) { // RHS = 0, LHS is unsigned
.gte => return Air.Inst.Ref.bool_true,
.lt => return Air.Inst.Ref.bool_false,
else => {},
},
.lt => switch (op) { // RHS < 0, LHS is unsigned
.neq, .gt, .gte => return Air.Inst.Ref.bool_true,
.eq, .lt, .lte => return Air.Inst.Ref.bool_false,
},
}
}
if (rhs_is_float) {
if (rhs_val.floatHasFraction(mod)) {
switch (op) {
.eq => return Air.Inst.Ref.bool_false,
.neq => return Air.Inst.Ref.bool_true,
else => {},
}
}
var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, mod));
defer bigint.deinit();
if (rhs_val.floatHasFraction(mod)) {
if (rhs_is_signed) {
try bigint.addScalar(&bigint, -1);
} else {
try bigint.addScalar(&bigint, 1);
}
}
rhs_bits = bigint.toConst().bitCountTwosComp();
} else {
rhs_bits = rhs_val.intBitCountTwosComp(mod);
}
rhs_bits += @boolToInt(!rhs_is_signed and dest_int_is_signed);
} else if (rhs_is_float) {
dest_float_type = rhs_ty;
} else {
const int_info = rhs_ty.intInfo(mod);
rhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed);
}
const dest_ty = if (dest_float_type) |ft| ft else blk: {
const max_bits = std.math.max(lhs_bits, rhs_bits);
const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits});
const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned;
break :blk try mod.intType(signedness, casted_bits);
};
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs);
}
/// Asserts that LHS value is an int or comptime int and not undefined, and
/// that RHS type is an int. Given a const LHS and an unknown RHS, attempt to
/// determine whether `op` has a guaranteed result.
/// If it cannot be determined, returns null.
/// Otherwise returns a bool for the guaranteed comparison operation.
fn compareIntsOnlyPossibleResult(
sema: *Sema,
lhs_val: Value,
op: std.math.CompareOperator,
rhs_ty: Type,
) Allocator.Error!?bool {
const mod = sema.mod;
const rhs_info = rhs_ty.intInfo(mod);
const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable;
const is_zero = vs_zero == .eq;
const is_negative = vs_zero == .lt;
const is_positive = vs_zero == .gt;
// Anything vs. zero-sized type has guaranteed outcome.
if (rhs_info.bits == 0) return switch (op) {
.eq, .lte, .gte => is_zero,
.neq, .lt, .gt => !is_zero,
};
// Special case for i1, which can only be 0 or -1.
// Zero and positive ints have guaranteed outcome.
if (rhs_info.bits == 1 and rhs_info.signedness == .signed) {
if (is_positive) return switch (op) {
.gt, .gte, .neq => true,
.lt, .lte, .eq => false,
};
if (is_zero) return switch (op) {
.gte => true,
.lt => false,
.gt, .lte, .eq, .neq => null,
};
}
// Negative vs. unsigned has guaranteed outcome.
if (rhs_info.signedness == .unsigned and is_negative) return switch (op) {
.eq, .gt, .gte => false,
.neq, .lt, .lte => true,
};
const sign_adj = @boolToInt(!is_negative and rhs_info.signedness == .signed);
const req_bits = lhs_val.intBitCountTwosComp(mod) + sign_adj;
// No sized type can have more than 65535 bits.
// The RHS type operand is either a runtime value or sized (but undefined) constant.
if (req_bits > 65535) return switch (op) {
.lt, .lte => is_negative,
.gt, .gte => is_positive,
.eq => false,
.neq => true,
};
const fits = req_bits <= rhs_info.bits;
// Oversized int has guaranteed outcome.
switch (op) {
.eq => return if (!fits) false else null,
.neq => return if (!fits) true else null,
.lt, .lte => if (!fits) return is_negative,
.gt, .gte => if (!fits) return !is_negative,
}
// For any other comparison, we need to know if the LHS value is
// equal to the maximum or minimum possible value of the RHS type.
const edge: struct { min: bool, max: bool } = edge: {
if (is_zero and rhs_info.signedness == .unsigned) break :edge .{
.min = true,
.max = false,
};
if (req_bits != rhs_info.bits) break :edge .{
.min = false,
.max = false,
};
const ty = try mod.intType(
if (is_negative) .signed else .unsigned,
@intCast(u16, req_bits),
);
const pop_count = lhs_val.popCount(ty, mod);
if (is_negative) {
break :edge .{
.min = pop_count == 1,
.max = false,
};
} else {
break :edge .{
.min = false,
.max = pop_count == req_bits - sign_adj,
};
}
};
assert(fits);
return switch (op) {
.lt => if (edge.max) false else null,
.lte => if (edge.min) true else null,
.gt => if (edge.min) false else null,
.gte => if (edge.max) true else null,
.eq, .neq => unreachable,
};
}
/// Asserts that lhs and rhs types are both vectors.
fn cmpVector(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
op: std.math.CompareOperator,
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
assert(lhs_ty.zigTypeTag(mod) == .Vector);
assert(rhs_ty.zigTypeTag(mod) == .Vector);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
const resolved_ty = try sema.resolvePeerTypes(block, src, &.{ lhs, rhs }, .{ .override = &.{ lhs_src, rhs_src } });
const casted_lhs = try sema.coerce(block, resolved_ty, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src);
const result_ty = try mod.vectorType(.{
.len = lhs_ty.vectorLen(mod),
.child = .bool_type,
});
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| {
if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
return sema.addConstUndef(result_ty);
}
const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty);
return sema.addConstant(result_ty, cmp_val);
} else {
break :src rhs_src;
}
} else {
break :src lhs_src;
}
};
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addCmpVector(casted_lhs, casted_rhs, op);
}
fn wrapOptional(
sema: *Sema,
block: *Block,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
if (try sema.resolveMaybeUndefVal(inst)) |val| {
return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, val));
}
try sema.requireRuntimeBlock(block, inst_src, null);
return block.addTyOp(.wrap_optional, dest_ty, inst);
}
fn wrapErrorUnionPayload(
sema: *Sema,
block: *Block,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const dest_payload_ty = dest_ty.errorUnionPayload(mod);
const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false });
if (try sema.resolveMaybeUndefVal(coerced)) |val| {
return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val));
}
try sema.requireRuntimeBlock(block, inst_src, null);
try sema.queueFullTypeResolution(dest_payload_ty);
return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced);
}
fn wrapErrorUnionSet(
sema: *Sema,
block: *Block,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const ip = &mod.intern_pool;
const inst_ty = sema.typeOf(inst);
const dest_err_set_ty = dest_ty.errorUnionSet(mod);
if (try sema.resolveMaybeUndefVal(inst)) |val| {
switch (dest_err_set_ty.ip_index) {
.anyerror_type => {},
else => switch (ip.indexToKey(dest_err_set_ty.ip_index)) {
.error_set_type => |error_set_type| ok: {
const expected_name = val.castTag(.@"error").?.data.name;
if (ip.getString(expected_name).unwrap()) |expected_name_interned| {
if (error_set_type.nameIndex(ip, expected_name_interned) != null)
break :ok;
}
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
.inferred_error_set_type => |ies_index| ok: {
const ies = mod.inferredErrorSetPtr(ies_index);
const expected_name = val.castTag(.@"error").?.data.name;
// We carefully do this in an order that avoids unnecessarily
// resolving the destination error set type.
if (ies.is_anyerror) break :ok;
if (ip.getString(expected_name).unwrap()) |expected_name_interned| {
if (ies.errors.contains(expected_name_interned)) break :ok;
}
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) {
break :ok;
}
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
else => unreachable,
},
}
return sema.addConstant(dest_ty, val);
}
try sema.requireRuntimeBlock(block, inst_src, null);
const coerced = try sema.coerce(block, dest_err_set_ty, inst, inst_src);
return block.addTyOp(.wrap_errunion_err, dest_ty, coerced);
}
fn unionToTag(
sema: *Sema,
block: *Block,
enum_ty: Type,
un: Air.Inst.Ref,
un_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
if ((try sema.typeHasOnePossibleValue(enum_ty))) |opv| {
return sema.addConstant(enum_ty, opv);
}
if (try sema.resolveMaybeUndefVal(un)) |un_val| {
return sema.addConstant(enum_ty, un_val.unionTag(mod));
}
try sema.requireRuntimeBlock(block, un_src, null);
return block.addTyOp(.get_union_tag, enum_ty, un);
}
fn resolvePeerTypes(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
instructions: []const Air.Inst.Ref,
candidate_srcs: Module.PeerTypeCandidateSrc,
) !Type {
const mod = sema.mod;
switch (instructions.len) {
0 => return Type.noreturn,
1 => return sema.typeOf(instructions[0]),
else => {},
}
const target = mod.getTarget();
var chosen = instructions[0];
// If this is non-null then it does the following thing, depending on the chosen zigTypeTag(mod).
// * ErrorSet: this is an override
// * ErrorUnion: this is an override of the error set only
// * other: at the end we make an ErrorUnion with the other thing and this
var err_set_ty: ?Type = null;
var any_are_null = false;
var seen_const = false;
var convert_to_slice = false;
var chosen_i: usize = 0;
for (instructions[1..], 0..) |candidate, candidate_i| {
const candidate_ty = sema.typeOf(candidate);
const chosen_ty = sema.typeOf(chosen);
const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(mod);
const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(mod);
// If the candidate can coerce into our chosen type, we're done.
// If the chosen type can coerce into the candidate, use that.
if ((try sema.coerceInMemoryAllowed(block, chosen_ty, candidate_ty, false, target, src, src)) == .ok) {
continue;
}
if ((try sema.coerceInMemoryAllowed(block, candidate_ty, chosen_ty, false, target, src, src)) == .ok) {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
}
switch (candidate_ty_tag) {
.NoReturn, .Undefined => continue,
.Null => {
any_are_null = true;
continue;
},
.Int => switch (chosen_ty_tag) {
.ComptimeInt => {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
},
.Int => {
const chosen_info = chosen_ty.intInfo(mod);
const candidate_info = candidate_ty.intInfo(mod);
if (chosen_info.bits < candidate_info.bits) {
chosen = candidate;
chosen_i = candidate_i + 1;
}
continue;
},
.Pointer => if (chosen_ty.ptrSize(mod) == .C) continue,
else => {},
},
.ComptimeInt => switch (chosen_ty_tag) {
.Int, .Float, .ComptimeFloat => continue,
.Pointer => if (chosen_ty.ptrSize(mod) == .C) continue,
else => {},
},
.Float => switch (chosen_ty_tag) {
.Float => {
if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) {
chosen = candidate;
chosen_i = candidate_i + 1;
}
continue;
},
.ComptimeFloat, .ComptimeInt => {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
},
else => {},
},
.ComptimeFloat => switch (chosen_ty_tag) {
.Float => continue,
.ComptimeInt => {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
},
else => {},
},
.Enum => switch (chosen_ty_tag) {
.EnumLiteral => {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
},
.Union => continue,
else => {},
},
.EnumLiteral => switch (chosen_ty_tag) {
.Enum, .Union => continue,
else => {},
},
.Union => switch (chosen_ty_tag) {
.Enum, .EnumLiteral => {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
},
else => {},
},
.ErrorSet => switch (chosen_ty_tag) {
.ErrorSet => {
// If chosen is superset of candidate, keep it.
// If candidate is superset of chosen, switch it.
// If neither is a superset, merge errors.
const chosen_set_ty = err_set_ty orelse chosen_ty;
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_ty, src, src)) {
continue;
}
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_ty, chosen_set_ty, src, src)) {
err_set_ty = null;
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
}
err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty);
continue;
},
.ErrorUnion => {
const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod);
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_ty, src, src)) {
continue;
}
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_ty, chosen_set_ty, src, src)) {
err_set_ty = candidate_ty;
continue;
}
err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty);
continue;
},
else => {
if (err_set_ty) |chosen_set_ty| {
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_ty, src, src)) {
continue;
}
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_ty, chosen_set_ty, src, src)) {
err_set_ty = candidate_ty;
continue;
}
err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty);
continue;
} else {
err_set_ty = candidate_ty;
continue;
}
},
},
.ErrorUnion => switch (chosen_ty_tag) {
.ErrorSet => {
const chosen_set_ty = err_set_ty orelse chosen_ty;
const candidate_set_ty = candidate_ty.errorUnionSet(mod);
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) {
err_set_ty = chosen_set_ty;
} else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) {
err_set_ty = null;
} else {
err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty);
}
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
},
.ErrorUnion => {
const chosen_payload_ty = chosen_ty.errorUnionPayload(mod);
const candidate_payload_ty = candidate_ty.errorUnionPayload(mod);
const coerce_chosen = (try sema.coerceInMemoryAllowed(block, chosen_payload_ty, candidate_payload_ty, false, target, src, src)) == .ok;
const coerce_candidate = (try sema.coerceInMemoryAllowed(block, candidate_payload_ty, chosen_payload_ty, false, target, src, src)) == .ok;
if (coerce_chosen or coerce_candidate) {
// If we can coerce to the candidate, we switch to that
// type. This is the same logic as the bare (non-union)
// coercion check we do at the top of this func.
if (coerce_candidate) {
chosen = candidate;
chosen_i = candidate_i + 1;
}
const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod);
const candidate_set_ty = candidate_ty.errorUnionSet(mod);
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) {
err_set_ty = chosen_set_ty;
} else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) {
err_set_ty = candidate_set_ty;
} else {
err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty);
}
continue;
}
},
else => {
if (err_set_ty) |chosen_set_ty| {
const candidate_set_ty = candidate_ty.errorUnionSet(mod);
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) {
err_set_ty = chosen_set_ty;
} else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) {
err_set_ty = null;
} else {
err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty);
}
}
seen_const = seen_const or chosen_ty.isConstPtr(mod);
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
},
},
.Pointer => {
const cand_info = candidate_ty.ptrInfo(mod);
switch (chosen_ty_tag) {
.Pointer => {
const chosen_info = chosen_ty.ptrInfo(mod);
seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable;
// *[N]T to [*]T
// *[N]T to []T
if ((cand_info.size == .Many or cand_info.size == .Slice) and
chosen_info.size == .One and
chosen_info.pointee_type.zigTypeTag(mod) == .Array)
{
// In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T`
convert_to_slice = false;
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
}
if (cand_info.size == .One and
cand_info.pointee_type.zigTypeTag(mod) == .Array and
(chosen_info.size == .Many or chosen_info.size == .Slice))
{
// In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T`
convert_to_slice = false;
continue;
}
// *[N]T and *[M]T
// Verify both are single-pointers to arrays.
// Keep the one whose element type can be coerced into.
if (chosen_info.size == .One and
cand_info.size == .One and
chosen_info.pointee_type.zigTypeTag(mod) == .Array and
cand_info.pointee_type.zigTypeTag(mod) == .Array)
{
const chosen_elem_ty = chosen_info.pointee_type.childType(mod);
const cand_elem_ty = cand_info.pointee_type.childType(mod);
const chosen_ok = .ok == try sema.coerceInMemoryAllowed(block, chosen_elem_ty, cand_elem_ty, chosen_info.mutable, target, src, src);
if (chosen_ok) {
convert_to_slice = true;
continue;
}
const cand_ok = .ok == try sema.coerceInMemoryAllowed(block, cand_elem_ty, chosen_elem_ty, cand_info.mutable, target, src, src);
if (cand_ok) {
convert_to_slice = true;
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
}
// They're both bad. Report error.
// In the future we probably want to use the
// coerceInMemoryAllowed error reporting mechanism,
// however, for now we just fall through for the
// "incompatible types" error below.
}
// [*c]T and any other pointer size
// Whichever element type can coerce to the other one, is
// the one we will keep. If they're both OK then we keep the
// C pointer since it matches both single and many pointers.
if (cand_info.size == .C or chosen_info.size == .C) {
const cand_ok = .ok == try sema.coerceInMemoryAllowed(block, cand_info.pointee_type, chosen_info.pointee_type, cand_info.mutable, target, src, src);
const chosen_ok = .ok == try sema.coerceInMemoryAllowed(block, chosen_info.pointee_type, cand_info.pointee_type, chosen_info.mutable, target, src, src);
if (cand_ok) {
if (chosen_ok) {
if (chosen_info.size == .C) {
continue;
} else {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
}
} else {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
}
} else {
if (chosen_ok) {
continue;
} else {
// They're both bad. Report error.
// In the future we probably want to use the
// coerceInMemoryAllowed error reporting mechanism,
// however, for now we just fall through for the
// "incompatible types" error below.
}
}
}
},
.Int, .ComptimeInt => {
if (cand_info.size == .C) {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
}
},
.Optional => {
const chosen_ptr_ty = chosen_ty.optionalChild(mod);
if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) {
const chosen_info = chosen_ptr_ty.ptrInfo(mod);
seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable;
// *[N]T to ?![*]T
// *[N]T to ?![]T
if (cand_info.size == .One and
cand_info.pointee_type.zigTypeTag(mod) == .Array and
(chosen_info.size == .Many or chosen_info.size == .Slice))
{
continue;
}
}
},
.ErrorUnion => {
const chosen_ptr_ty = chosen_ty.errorUnionPayload(mod);
if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) {
const chosen_info = chosen_ptr_ty.ptrInfo(mod);
seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable;
// *[N]T to E![*]T
// *[N]T to E![]T
if (cand_info.size == .One and
cand_info.pointee_type.zigTypeTag(mod) == .Array and
(chosen_info.size == .Many or chosen_info.size == .Slice))
{
continue;
}
}
},
.Fn => {
if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag(mod) == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
}
},
else => {},
}
},
.Optional => {
const opt_child_ty = candidate_ty.optionalChild(mod);
if ((try sema.coerceInMemoryAllowed(block, chosen_ty, opt_child_ty, false, target, src, src)) == .ok) {
seen_const = seen_const or opt_child_ty.isConstPtr(mod);
any_are_null = true;
continue;
}
seen_const = seen_const or chosen_ty.isConstPtr(mod);
any_are_null = false;
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
},
.Vector => switch (chosen_ty_tag) {
.Vector => {
const chosen_len = chosen_ty.vectorLen(mod);
const candidate_len = candidate_ty.vectorLen(mod);
if (chosen_len != candidate_len)
continue;
const chosen_child_ty = chosen_ty.childType(mod);
const candidate_child_ty = candidate_ty.childType(mod);
if (chosen_child_ty.zigTypeTag(mod) == .Int and candidate_child_ty.zigTypeTag(mod) == .Int) {
const chosen_info = chosen_child_ty.intInfo(mod);
const candidate_info = candidate_child_ty.intInfo(mod);
if (chosen_info.bits < candidate_info.bits) {
chosen = candidate;
chosen_i = candidate_i + 1;
}
continue;
}
if (chosen_child_ty.zigTypeTag(mod) == .Float and candidate_child_ty.zigTypeTag(mod) == .Float) {
if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) {
chosen = candidate;
chosen_i = candidate_i + 1;
}
continue;
}
},
.Array => {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
},
else => {},
},
.Array => switch (chosen_ty_tag) {
.Vector => continue,
else => {},
},
.Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr(mod) and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) {
if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(mod), candidate_ty, target, src, src)) {
continue;
}
},
else => {},
}
switch (chosen_ty_tag) {
.NoReturn, .Undefined => {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
},
.Null => {
any_are_null = true;
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
},
.Optional => {
const opt_child_ty = chosen_ty.optionalChild(mod);
if ((try sema.coerceInMemoryAllowed(block, opt_child_ty, candidate_ty, false, target, src, src)) == .ok) {
continue;
}
if ((try sema.coerceInMemoryAllowed(block, candidate_ty, opt_child_ty, false, target, src, src)) == .ok) {
any_are_null = true;
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
}
},
.ErrorUnion => {
const payload_ty = chosen_ty.errorUnionPayload(mod);
if ((try sema.coerceInMemoryAllowed(block, payload_ty, candidate_ty, false, target, src, src)) == .ok) {
continue;
}
},
.ErrorSet => {
chosen = candidate;
chosen_i = candidate_i + 1;
if (err_set_ty) |chosen_set_ty| {
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, chosen_ty, src, src)) {
continue;
}
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_ty, chosen_set_ty, src, src)) {
err_set_ty = chosen_ty;
continue;
}
err_set_ty = try sema.errorSetMerge(chosen_set_ty, chosen_ty);
continue;
} else {
err_set_ty = chosen_ty;
continue;
}
},
else => {},
}
// At this point, we hit a compile error. We need to recover
// the source locations.
const chosen_src = candidate_srcs.resolve(
mod,
mod.declPtr(block.src_decl),
chosen_i,
);
const candidate_src = candidate_srcs.resolve(
mod,
mod.declPtr(block.src_decl),
candidate_i + 1,
);
const msg = msg: {
const msg = try sema.errMsg(block, src, "incompatible types: '{}' and '{}'", .{
chosen_ty.fmt(mod),
candidate_ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
if (chosen_src) |src_loc|
try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(mod)});
if (candidate_src) |src_loc|
try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(mod)});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const chosen_ty = sema.typeOf(chosen);
if (convert_to_slice) {
// turn *[N]T => []T
const chosen_child_ty = chosen_ty.childType(mod);
var info = chosen_ty.ptrInfo(mod);
info.sentinel = chosen_child_ty.sentinel(mod);
info.size = .Slice;
info.mutable = !(seen_const or chosen_child_ty.isConstPtr(mod));
info.pointee_type = chosen_child_ty.elemType2(mod);
const new_ptr_ty = try Type.ptr(sema.arena, mod, info);
const opt_ptr_ty = if (any_are_null)
try Type.optional(sema.arena, new_ptr_ty, mod)
else
new_ptr_ty;
const set_ty = err_set_ty orelse return opt_ptr_ty;
return try mod.errorUnionType(set_ty, opt_ptr_ty);
}
if (seen_const) {
// turn []T => []const T
switch (chosen_ty.zigTypeTag(mod)) {
.ErrorUnion => {
const ptr_ty = chosen_ty.errorUnionPayload(mod);
var info = ptr_ty.ptrInfo(mod);
info.mutable = false;
const new_ptr_ty = try Type.ptr(sema.arena, mod, info);
const opt_ptr_ty = if (any_are_null)
try Type.optional(sema.arena, new_ptr_ty, mod)
else
new_ptr_ty;
const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod);
return try mod.errorUnionType(set_ty, opt_ptr_ty);
},
.Pointer => {
var info = chosen_ty.ptrInfo(mod);
info.mutable = false;
const new_ptr_ty = try Type.ptr(sema.arena, mod, info);
const opt_ptr_ty = if (any_are_null)
try Type.optional(sema.arena, new_ptr_ty, mod)
else
new_ptr_ty;
const set_ty = err_set_ty orelse return opt_ptr_ty;
return try mod.errorUnionType(set_ty, opt_ptr_ty);
},
else => return chosen_ty,
}
}
if (any_are_null) {
const opt_ty = switch (chosen_ty.zigTypeTag(mod)) {
.Null, .Optional => chosen_ty,
else => try Type.optional(sema.arena, chosen_ty, mod),
};
const set_ty = err_set_ty orelse return opt_ty;
return try mod.errorUnionType(set_ty, opt_ty);
}
if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag(mod)) {
.ErrorSet => return ty,
.ErrorUnion => {
const payload_ty = chosen_ty.errorUnionPayload(mod);
return try mod.errorUnionType(ty, payload_ty);
},
else => return try mod.errorUnionType(ty, chosen_ty),
};
return chosen_ty;
}
pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileError!void {
const mod = sema.mod;
try sema.resolveTypeFully(fn_info.return_type.toType());
if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.toType().isError(mod)) {
// Ensure the type exists so that backends can assume that.
_ = try sema.getBuiltinType("StackTrace");
}
for (fn_info.param_types) |param_ty| {
try sema.resolveTypeFully(param_ty.toType());
}
}
/// Make it so that calling hash() and eql() on `val` will not assert due
/// to a type not having its layout resolved.
fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void {
switch (val.ip_index) {
.none => switch (val.tag()) {
.lazy_align => {
const ty = val.castTag(.lazy_align).?.data;
return sema.resolveTypeLayout(ty);
},
.lazy_size => {
const ty = val.castTag(.lazy_size).?.data;
return sema.resolveTypeLayout(ty);
},
.comptime_field_ptr => {
const field_ptr = val.castTag(.comptime_field_ptr).?.data;
return sema.resolveLazyValue(field_ptr.field_val);
},
.eu_payload,
.opt_payload,
=> {
const sub_val = val.cast(Value.Payload.SubValue).?.data;
return sema.resolveLazyValue(sub_val);
},
.@"union" => {
const union_val = val.castTag(.@"union").?.data;
return sema.resolveLazyValue(union_val.val);
},
.aggregate => {
const aggregate = val.castTag(.aggregate).?.data;
for (aggregate) |elem_val| {
try sema.resolveLazyValue(elem_val);
}
},
.slice => {
const slice = val.castTag(.slice).?.data;
try sema.resolveLazyValue(slice.ptr);
return sema.resolveLazyValue(slice.len);
},
else => return,
},
else => return,
}
}
pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Struct => return sema.resolveStructLayout(ty),
.Union => return sema.resolveUnionLayout(ty),
.Array => {
if (ty.arrayLenIncludingSentinel(mod) == 0) return;
const elem_ty = ty.childType(mod);
return sema.resolveTypeLayout(elem_ty);
},
.Optional => {
const payload_ty = ty.optionalChild(mod);
// In case of querying the ABI alignment of this optional, we will ask
// for hasRuntimeBits() of the payload type, so we need "requires comptime"
// to be known already before this function returns.
_ = try sema.typeRequiresComptime(payload_ty);
return sema.resolveTypeLayout(payload_ty);
},
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload(mod);
return sema.resolveTypeLayout(payload_ty);
},
.Fn => {
const info = mod.typeToFunc(ty).?;
if (info.is_generic) {
// Resolving of generic function types is deferred to when
// the function is instantiated.
return;
}
for (info.param_types) |param_ty| {
try sema.resolveTypeLayout(param_ty.toType());
}
try sema.resolveTypeLayout(info.return_type.toType());
},
else => {},
}
}
fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
const mod = sema.mod;
const resolved_ty = try sema.resolveTypeFields(ty);
if (mod.typeToStruct(resolved_ty)) |struct_obj| {
switch (struct_obj.status) {
.none, .have_field_types => {},
.field_types_wip, .layout_wip => {
const msg = try Module.ErrorMsg.create(
sema.gpa,
struct_obj.srcLoc(mod),
"struct '{}' depends on itself",
.{ty.fmt(mod)},
);
return sema.failWithOwnedErrorMsg(msg);
},
.have_layout, .fully_resolved_wip, .fully_resolved => return,
}
const prev_status = struct_obj.status;
errdefer if (struct_obj.status == .layout_wip) {
struct_obj.status = prev_status;
};
struct_obj.status = .layout_wip;
for (struct_obj.fields.values(), 0..) |field, i| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
return err;
},
else => return err,
};
}
if (struct_obj.layout == .Packed) {
try semaBackingIntType(mod, struct_obj);
}
struct_obj.status = .have_layout;
_ = try sema.resolveTypeRequiresComptime(resolved_ty);
if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
struct_obj.srcLoc(mod),
"struct layout depends on it having runtime bits",
.{},
);
return sema.failWithOwnedErrorMsg(msg);
}
if (struct_obj.layout == .Auto and mod.backendSupportsFeature(.field_reordering)) {
const optimized_order = if (struct_obj.owner_decl == sema.owner_decl_index)
try sema.perm_arena.alloc(u32, struct_obj.fields.count())
else blk: {
const decl = mod.declPtr(struct_obj.owner_decl);
var decl_arena: std.heap.ArenaAllocator = undefined;
const decl_arena_allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena);
defer decl.value_arena.?.release(&decl_arena);
break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count());
};
for (struct_obj.fields.values(), 0..) |field, i| {
optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty))
@intCast(u32, i)
else
Module.Struct.omitted_field;
}
const AlignSortContext = struct {
struct_obj: *Module.Struct,
sema: *Sema,
fn lessThan(ctx: @This(), a: u32, b: u32) bool {
const m = ctx.sema.mod;
if (a == Module.Struct.omitted_field) return false;
if (b == Module.Struct.omitted_field) return true;
return ctx.struct_obj.fields.values()[a].ty.abiAlignment(m) >
ctx.struct_obj.fields.values()[b].ty.abiAlignment(m);
}
};
mem.sort(u32, optimized_order, AlignSortContext{
.struct_obj = struct_obj,
.sema = sema,
}, AlignSortContext.lessThan);
struct_obj.optimized_order = optimized_order.ptr;
}
}
// otherwise it's a tuple; no need to resolve anything
}
fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
const gpa = mod.gpa;
var fields_bit_sum: u64 = 0;
for (struct_obj.fields.values()) |field| {
fields_bit_sum += field.ty.bitSize(mod);
}
const decl_index = struct_obj.owner_decl;
const decl = mod.declPtr(decl_index);
var decl_arena: std.heap.ArenaAllocator = undefined;
const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena);
defer decl.value_arena.?.release(&decl_arena);
const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir;
const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
assert(extended.opcode == .struct_decl);
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
if (small.has_backing_int) {
var extra_index: usize = extended.operand;
extra_index += @boolToInt(small.has_src_node);
extra_index += @boolToInt(small.has_fields_len);
extra_index += @boolToInt(small.has_decls_len);
const backing_int_body_len = zir.extra[extra_index];
extra_index += 1;
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
.func = null,
.fn_ret_ty = Type.void,
.owner_func = null,
};
defer sema.deinit();
var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
defer wip_captures.deinit();
var block: Block = .{
.parent = null,
.sema = &sema,
.src_decl = decl_index,
.namespace = struct_obj.namespace,
.wip_capture_scope = wip_captures.scope,
.instructions = .{},
.inlining = null,
.is_comptime = true,
};
defer {
assert(block.instructions.items.len == 0);
block.params.deinit(gpa);
}
const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 };
const backing_int_ty = blk: {
if (backing_int_body_len == 0) {
const backing_int_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref);
} else {
const body = zir.extra[extra_index..][0..backing_int_body_len];
const ty_ref = try sema.resolveBody(&block, body, struct_obj.zir_index);
break :blk try sema.analyzeAsType(&block, backing_int_src, ty_ref);
}
};
try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum);
struct_obj.backing_int_ty = backing_int_ty;
try wip_captures.finalize();
} else {
if (fields_bit_sum > std.math.maxInt(u16)) {
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = undefined,
.perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
.func = null,
.fn_ret_ty = Type.void,
.owner_func = null,
};
defer sema.deinit();
var block: Block = .{
.parent = null,
.sema = &sema,
.src_decl = decl_index,
.namespace = struct_obj.namespace,
.wip_capture_scope = undefined,
.instructions = .{},
.inlining = null,
.is_comptime = true,
};
return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum});
}
struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum));
}
}
fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void {
const mod = sema.mod;
if (!backing_int_ty.isInt(mod)) {
return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)});
}
if (backing_int_ty.bitSize(mod) != fields_bit_sum) {
return sema.fail(
block,
src,
"backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}",
.{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(mod), fields_bit_sum },
);
}
}
fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
const mod = sema.mod;
if (!ty.isIndexable(mod)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "type '{}' does not support indexing", .{ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "operand must be an array, slice, tuple, or vector", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Pointer) {
switch (ty.ptrSize(mod)) {
.Slice, .Many, .C => return,
.One => {
const elem_ty = ty.childType(mod);
if (elem_ty.zigTypeTag(mod) == .Array) return;
// TODO https://github.com/ziglang/zig/issues/15479
// if (elem_ty.isTuple()) return;
},
}
}
const msg = msg: {
const msg = try sema.errMsg(block, src, "type '{}' is not an indexable pointer", .{ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "operand must be a slice, a many pointer or a pointer to an array", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
const mod = sema.mod;
const resolved_ty = try sema.resolveTypeFields(ty);
const union_obj = mod.typeToUnion(resolved_ty).?;
switch (union_obj.status) {
.none, .have_field_types => {},
.field_types_wip, .layout_wip => {
const msg = try Module.ErrorMsg.create(
sema.gpa,
union_obj.srcLoc(sema.mod),
"union '{}' depends on itself",
.{ty.fmt(sema.mod)},
);
return sema.failWithOwnedErrorMsg(msg);
},
.have_layout, .fully_resolved_wip, .fully_resolved => return,
}
const prev_status = union_obj.status;
errdefer if (union_obj.status == .layout_wip) {
union_obj.status = prev_status;
};
union_obj.status = .layout_wip;
for (union_obj.fields.values(), 0..) |field, i| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
return err;
},
else => return err,
};
}
union_obj.status = .have_layout;
_ = try sema.resolveTypeRequiresComptime(resolved_ty);
if (union_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
union_obj.srcLoc(sema.mod),
"union layout depends on it having runtime bits",
.{},
);
return sema.failWithOwnedErrorMsg(msg);
}
}
// In case of querying the ABI alignment of this struct, we will ask
// for hasRuntimeBits() of each field, so we need "requires comptime"
// to be known already before this function returns.
pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
const mod = sema.mod;
return switch (ty.ip_index) {
.empty_struct_type => false,
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => false,
.ptr_type => |ptr_type| {
const child_ty = ptr_type.elem_type.toType();
if (child_ty.zigTypeTag(mod) == .Fn) {
return mod.typeToFunc(child_ty).?.is_generic;
} else {
return sema.resolveTypeRequiresComptime(child_ty);
}
},
.anyframe_type => |child| {
if (child == .none) return false;
return sema.resolveTypeRequiresComptime(child.toType());
},
.array_type => |array_type| return sema.resolveTypeRequiresComptime(array_type.child.toType()),
.vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()),
.opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()),
.error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()),
.error_set_type, .inferred_error_set_type => false,
.func_type => true,
.simple_type => |t| switch (t) {
.f16,
.f32,
.f64,
.f80,
.f128,
.usize,
.isize,
.c_char,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.anyopaque,
.bool,
.void,
.anyerror,
.noreturn,
.generic_poison,
.var_args_param,
.atomic_order,
.atomic_rmw_op,
.calling_convention,
.address_space,
.float_mode,
.reduce_op,
.call_modifier,
.prefetch_options,
.export_options,
.extern_options,
=> false,
.type,
.comptime_int,
.comptime_float,
.null,
.undefined,
.enum_literal,
.type_info,
=> true,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
switch (struct_obj.requires_comptime) {
.no, .wip => return false,
.yes => return true,
.unknown => {
var requires_comptime = false;
struct_obj.requires_comptime = .wip;
for (struct_obj.fields.values()) |field| {
if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true;
}
if (requires_comptime) {
struct_obj.requires_comptime = .yes;
} else {
struct_obj.requires_comptime = .no;
}
return requires_comptime;
},
}
},
.anon_struct_type => |tuple| {
for (tuple.types, tuple.values) |field_ty, field_val| {
const have_comptime_val = field_val != .none;
if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty.toType())) {
return true;
}
}
return false;
},
.union_type => |union_type| {
const union_obj = mod.unionPtr(union_type.index);
switch (union_obj.requires_comptime) {
.no, .wip => return false,
.yes => return true,
.unknown => {
var requires_comptime = false;
union_obj.requires_comptime = .wip;
for (union_obj.fields.values()) |field| {
if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true;
}
if (requires_comptime) {
union_obj.requires_comptime = .yes;
} else {
union_obj.requires_comptime = .no;
}
return requires_comptime;
},
}
},
.opaque_type => false,
.enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()),
// values, not types
.undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
.int => unreachable,
.float => unreachable,
.ptr => unreachable,
.opt => unreachable,
.enum_tag => unreachable,
.aggregate => unreachable,
},
};
}
/// Returns `error.AnalysisFail` if any of the types (recursively) failed to
/// be resolved.
pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Pointer => {
const child_ty = try sema.resolveTypeFields(ty.childType(mod));
return sema.resolveTypeFully(child_ty);
},
.Struct => switch (ty.ip_index) {
.none => {}, // TODO make this unreachable when all types are migrated to InternPool
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.struct_type => return sema.resolveStructFully(ty),
.anon_struct_type => |tuple| {
for (tuple.types) |field_ty| {
try sema.resolveTypeFully(field_ty.toType());
}
},
else => {},
},
},
.Union => return sema.resolveUnionFully(ty),
.Array => return sema.resolveTypeFully(ty.childType(mod)),
.Optional => {
return sema.resolveTypeFully(ty.optionalChild(mod));
},
.ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload(mod)),
.Fn => {
const info = mod.typeToFunc(ty).?;
if (info.is_generic) {
// Resolving of generic function types is deferred to when
// the function is instantiated.
return;
}
for (info.param_types) |param_ty| {
try sema.resolveTypeFully(param_ty.toType());
}
try sema.resolveTypeFully(info.return_type.toType());
},
else => {},
}
}
fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void {
try sema.resolveStructLayout(ty);
const mod = sema.mod;
const resolved_ty = try sema.resolveTypeFields(ty);
const struct_obj = mod.typeToStruct(resolved_ty).?;
switch (struct_obj.status) {
.none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
.fully_resolved_wip, .fully_resolved => return,
}
{
// After we have resolve struct layout we have to go over the fields again to
// make sure pointer fields get their child types resolved as well.
// See also similar code for unions.
const prev_status = struct_obj.status;
errdefer struct_obj.status = prev_status;
struct_obj.status = .fully_resolved_wip;
for (struct_obj.fields.values()) |field| {
try sema.resolveTypeFully(field.ty);
}
struct_obj.status = .fully_resolved;
}
// And let's not forget comptime-only status.
_ = try sema.typeRequiresComptime(ty);
}
fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void {
try sema.resolveUnionLayout(ty);
const mod = sema.mod;
const resolved_ty = try sema.resolveTypeFields(ty);
const union_obj = mod.typeToUnion(resolved_ty).?;
switch (union_obj.status) {
.none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
.fully_resolved_wip, .fully_resolved => return,
}
{
// After we have resolve union layout we have to go over the fields again to
// make sure pointer fields get their child types resolved as well.
// See also similar code for structs.
const prev_status = union_obj.status;
errdefer union_obj.status = prev_status;
union_obj.status = .fully_resolved_wip;
for (union_obj.fields.values()) |field| {
try sema.resolveTypeFully(field.ty);
}
union_obj.status = .fully_resolved;
}
// And let's not forget comptime-only status.
_ = try sema.typeRequiresComptime(ty);
}
pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type {
const mod = sema.mod;
switch (ty.ip_index) {
// TODO: After the InternPool transition is complete, change this to `unreachable`.
.none => return ty,
.u1_type,
.u8_type,
.i8_type,
.u16_type,
.i16_type,
.u29_type,
.u32_type,
.i32_type,
.u64_type,
.i64_type,
.u80_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_char_type,
.c_short_type,
.c_ushort_type,
.c_int_type,
.c_uint_type,
.c_long_type,
.c_ulong_type,
.c_longlong_type,
.c_ulonglong_type,
.c_longdouble_type,
.f16_type,
.f32_type,
.f64_type,
.f80_type,
.f128_type,
.anyopaque_type,
.bool_type,
.void_type,
.type_type,
.anyerror_type,
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
.anyframe_type,
.null_type,
.undefined_type,
.enum_literal_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.const_slice_u8_sentinel_0_type,
.anyerror_void_error_union_type,
.generic_poison_type,
.var_args_param_type,
.empty_struct_type,
=> return ty,
.undef => unreachable,
.zero => unreachable,
.zero_usize => unreachable,
.zero_u8 => unreachable,
.one => unreachable,
.one_usize => unreachable,
.one_u8 => unreachable,
.four_u8 => unreachable,
.negative_one => unreachable,
.calling_convention_c => unreachable,
.calling_convention_inline => unreachable,
.void_value => unreachable,
.unreachable_value => unreachable,
.null_value => unreachable,
.bool_true => unreachable,
.bool_false => unreachable,
.empty_struct => unreachable,
.generic_poison => unreachable,
.type_info_type => return sema.getBuiltinType("Type"),
.extern_options_type => return sema.getBuiltinType("ExternOptions"),
.export_options_type => return sema.getBuiltinType("ExportOptions"),
.atomic_order_type => return sema.getBuiltinType("AtomicOrder"),
.atomic_rmw_op_type => return sema.getBuiltinType("AtomicRmwOp"),
.calling_convention_type => return sema.getBuiltinType("CallingConvention"),
.address_space_type => return sema.getBuiltinType("AddressSpace"),
.float_mode_type => return sema.getBuiltinType("FloatMode"),
.reduce_op_type => return sema.getBuiltinType("ReduceOp"),
.call_modifier_type => return sema.getBuiltinType("CallModifier"),
.prefetch_options_type => return sema.getBuiltinType("PrefetchOptions"),
_ => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return ty;
try sema.resolveTypeFieldsStruct(ty, struct_obj);
return ty;
},
.union_type => |union_type| {
const union_obj = mod.unionPtr(union_type.index);
try sema.resolveTypeFieldsUnion(ty, union_obj);
return ty;
},
else => return ty,
},
}
}
fn resolveTypeFieldsStruct(
sema: *Sema,
ty: Type,
struct_obj: *Module.Struct,
) CompileError!void {
switch (sema.mod.declPtr(struct_obj.owner_decl).analysis) {
.file_failure,
.dependency_failure,
.sema_failure,
.sema_failure_retryable,
=> {
sema.owner_decl.analysis = .dependency_failure;
sema.owner_decl.generation = sema.mod.generation;
return error.AnalysisFail;
},
else => {},
}
switch (struct_obj.status) {
.none => {},
.field_types_wip => {
const msg = try Module.ErrorMsg.create(
sema.gpa,
struct_obj.srcLoc(sema.mod),
"struct '{}' depends on itself",
.{ty.fmt(sema.mod)},
);
return sema.failWithOwnedErrorMsg(msg);
},
.have_field_types,
.have_layout,
.layout_wip,
.fully_resolved_wip,
.fully_resolved,
=> return,
}
struct_obj.status = .field_types_wip;
errdefer struct_obj.status = .none;
try semaStructFields(sema.mod, struct_obj);
}
fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_obj: *Module.Union) CompileError!void {
switch (sema.mod.declPtr(union_obj.owner_decl).analysis) {
.file_failure,
.dependency_failure,
.sema_failure,
.sema_failure_retryable,
=> {
sema.owner_decl.analysis = .dependency_failure;
sema.owner_decl.generation = sema.mod.generation;
return error.AnalysisFail;
},
else => {},
}
switch (union_obj.status) {
.none => {},
.field_types_wip => {
const msg = try Module.ErrorMsg.create(
sema.gpa,
union_obj.srcLoc(sema.mod),
"union '{}' depends on itself",
.{ty.fmt(sema.mod)},
);
return sema.failWithOwnedErrorMsg(msg);
},
.have_field_types,
.have_layout,
.layout_wip,
.fully_resolved_wip,
.fully_resolved,
=> return,
}
union_obj.status = .field_types_wip;
errdefer union_obj.status = .none;
try semaUnionFields(sema.mod, union_obj);
union_obj.status = .have_field_types;
}
fn resolveInferredErrorSet(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ies_index: Module.Fn.InferredErrorSet.Index,
) CompileError!void {
const mod = sema.mod;
const ies = mod.inferredErrorSetPtr(ies_index);
if (ies.is_resolved) return;
if (ies.func.state == .in_progress) {
return sema.fail(block, src, "unable to resolve inferred error set", .{});
}
// In order to ensure that all dependencies are properly added to the set, we
// need to ensure the function body is analyzed of the inferred error set.
// However, in the case of comptime/inline function calls with inferred error sets,
// each call gets a new InferredErrorSet object, which points to the same
// `*Module.Fn`. Not only is the function not relevant to the inferred error set
// in this case, it may be a generic function which would cause an assertion failure
// if we called `ensureFuncBodyAnalyzed` on it here.
const ies_func_owner_decl = mod.declPtr(ies.func.owner_decl);
const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?;
// if ies declared by a inline function with generic return type, the return_type should be generic_poison,
// because inline function does not create a new declaration, and the ies has been filled with analyzeCall,
// so here we can simply skip this case.
if (ies_func_info.return_type == .generic_poison_type) {
assert(ies_func_info.cc == .Inline);
} else if (mod.typeToInferredErrorSet(ies_func_info.return_type.toType().errorUnionSet(mod)).? == ies) {
if (ies_func_info.is_generic) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{});
errdefer msg.destroy(sema.gpa);
try sema.mod.errNoteNonLazy(ies_func_owner_decl.srcLoc(mod), msg, "generic function declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
// In this case we are dealing with the actual InferredErrorSet object that
// corresponds to the function, not one created to track an inline/comptime call.
try sema.ensureFuncBodyAnalyzed(ies.func);
}
ies.is_resolved = true;
for (ies.inferred_error_sets.keys()) |other_ies_index| {
if (ies_index == other_ies_index) continue;
try sema.resolveInferredErrorSet(block, src, other_ies_index);
const other_ies = mod.inferredErrorSetPtr(other_ies_index);
for (other_ies.errors.keys()) |key| {
try ies.errors.put(sema.gpa, key, {});
}
if (other_ies.is_anyerror)
ies.is_anyerror = true;
}
}
fn resolveInferredErrorSetTy(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ty: Type,
) CompileError!void {
const mod = sema.mod;
if (mod.typeToInferredErrorSetIndex(ty).unwrap()) |ies_index| {
try sema.resolveInferredErrorSet(block, src, ies_index);
}
}
fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
const gpa = mod.gpa;
const decl_index = struct_obj.owner_decl;
const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir;
const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
assert(extended.opcode == .struct_decl);
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
var extra_index: usize = extended.operand;
const src = LazySrcLoc.nodeOffset(0);
extra_index += @boolToInt(small.has_src_node);
const fields_len = if (small.has_fields_len) blk: {
const fields_len = zir.extra[extra_index];
extra_index += 1;
break :blk fields_len;
} else 0;
const decls_len = if (small.has_decls_len) decls_len: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :decls_len decls_len;
} else 0;
// The backing integer cannot be handled until `resolveStructLayout()`.
if (small.has_backing_int) {
const backing_int_body_len = zir.extra[extra_index];
extra_index += 1; // backing_int_body_len
if (backing_int_body_len == 0) {
extra_index += 1; // backing_int_ref
} else {
extra_index += backing_int_body_len; // backing_int_body_inst
}
}
// Skip over decls.
var decls_it = zir.declIteratorInner(extra_index, decls_len);
while (decls_it.next()) |_| {}
extra_index = decls_it.extra_index;
if (fields_len == 0) {
if (struct_obj.layout == .Packed) {
try semaBackingIntType(mod, struct_obj);
}
struct_obj.status = .have_layout;
return;
}
const decl = mod.declPtr(decl_index);
var decl_arena: std.heap.ArenaAllocator = undefined;
const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena);
defer decl.value_arena.?.release(&decl_arena);
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
.func = null,
.fn_ret_ty = Type.void,
.owner_func = null,
};
defer sema.deinit();
var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
defer wip_captures.deinit();
var block_scope: Block = .{
.parent = null,
.sema = &sema,
.src_decl = decl_index,
.namespace = struct_obj.namespace,
.wip_capture_scope = wip_captures.scope,
.instructions = .{},
.inlining = null,
.is_comptime = true,
};
defer {
assert(block_scope.instructions.items.len == 0);
block_scope.params.deinit(gpa);
}
struct_obj.fields = .{};
try struct_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len);
const Field = struct {
type_body_len: u32 = 0,
align_body_len: u32 = 0,
init_body_len: u32 = 0,
type_ref: Zir.Inst.Ref = .none,
};
const fields = try sema.arena.alloc(Field, fields_len);
var any_inits = false;
{
const bits_per_field = 4;
const fields_per_u32 = 32 / bits_per_field;
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
const flags_index = extra_index;
var bit_bag_index: usize = flags_index;
extra_index += bit_bags_count;
var cur_bit_bag: u32 = undefined;
var field_i: u32 = 0;
while (field_i < fields_len) : (field_i += 1) {
if (field_i % fields_per_u32 == 0) {
cur_bit_bag = zir.extra[bit_bag_index];
bit_bag_index += 1;
}
const has_align = @truncate(u1, cur_bit_bag) != 0;
cur_bit_bag >>= 1;
const has_init = @truncate(u1, cur_bit_bag) != 0;
cur_bit_bag >>= 1;
const is_comptime = @truncate(u1, cur_bit_bag) != 0;
cur_bit_bag >>= 1;
const has_type_body = @truncate(u1, cur_bit_bag) != 0;
cur_bit_bag >>= 1;
var field_name_zir: ?[:0]const u8 = null;
if (!small.is_tuple) {
field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]);
extra_index += 1;
}
extra_index += 1; // doc_comment
fields[field_i] = .{};
if (has_type_body) {
fields[field_i].type_body_len = zir.extra[extra_index];
} else {
fields[field_i].type_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
}
extra_index += 1;
// This string needs to outlive the ZIR code.
const field_name = if (field_name_zir) |some|
try decl_arena_allocator.dupe(u8, some)
else
try std.fmt.allocPrint(decl_arena_allocator, "{d}", .{field_i});
const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
if (gop.found_existing) {
const msg = msg: {
const field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i }).lazy;
const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{s}'", .{field_name});
errdefer msg.destroy(gpa);
const prev_field_index = struct_obj.fields.getIndex(field_name).?;
const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index });
try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{});
try sema.errNote(&block_scope, src, msg, "struct declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
gop.value_ptr.* = .{
.ty = Type.noreturn,
.abi_align = 0,
.default_val = Value.@"unreachable",
.is_comptime = is_comptime,
.offset = undefined,
};
if (has_align) {
fields[field_i].align_body_len = zir.extra[extra_index];
extra_index += 1;
}
if (has_init) {
fields[field_i].init_body_len = zir.extra[extra_index];
extra_index += 1;
any_inits = true;
}
}
}
// Next we do only types and alignments, saving the inits for a second pass,
// so that init values may depend on type layout.
const bodies_index = extra_index;
for (fields, 0..) |zir_field, field_i| {
const field_ty: Type = ty: {
if (zir_field.type_ref != .none) {
break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) {
error.NeededSourceLocation => {
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
.index = field_i,
.range = .type,
}).lazy;
_ = try sema.resolveType(&block_scope, ty_src, zir_field.type_ref);
unreachable;
},
else => |e| return e,
};
}
assert(zir_field.type_body_len != 0);
const body = zir.extra[extra_index..][0..zir_field.type_body_len];
extra_index += body.len;
const ty_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index);
break :ty sema.analyzeAsType(&block_scope, .unneeded, ty_ref) catch |err| switch (err) {
error.NeededSourceLocation => {
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
.index = field_i,
.range = .type,
}).lazy;
_ = try sema.analyzeAsType(&block_scope, ty_src, ty_ref);
unreachable;
},
else => |e| return e,
};
};
if (field_ty.isGenericPoison()) {
return error.GenericPoison;
}
const field = &struct_obj.fields.values()[field_i];
field.ty = field_ty;
if (field_ty.zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
.index = field_i,
.range = .type,
}).lazy;
const msg = try sema.errMsg(&block_scope, ty_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (field_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
.index = field_i,
.range = .type,
}).lazy;
const msg = try sema.errMsg(&block_scope, ty_src, "struct fields cannot be 'noreturn'", .{});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (struct_obj.layout == .Extern and !try sema.validateExternType(field.ty, .struct_field)) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
.index = field_i,
.range = .type,
});
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, ty_src, field.ty, .struct_field);
try sema.addDeclaredHereNote(msg, field.ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
} else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
.index = field_i,
.range = .type,
});
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotPacked(msg, ty_src, field.ty);
try sema.addDeclaredHereNote(msg, field.ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (zir_field.align_body_len > 0) {
const body = zir.extra[extra_index..][0..zir_field.align_body_len];
extra_index += body.len;
const align_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index);
field.abi_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) {
error.NeededSourceLocation => {
const align_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
.index = field_i,
.range = .alignment,
}).lazy;
_ = try sema.analyzeAsAlign(&block_scope, align_src, align_ref);
unreachable;
},
else => |e| return e,
};
}
extra_index += zir_field.init_body_len;
}
struct_obj.status = .have_field_types;
if (any_inits) {
extra_index = bodies_index;
for (fields, 0..) |zir_field, field_i| {
extra_index += zir_field.type_body_len;
extra_index += zir_field.align_body_len;
if (zir_field.init_body_len > 0) {
const body = zir.extra[extra_index..][0..zir_field.init_body_len];
extra_index += body.len;
const init = try sema.resolveBody(&block_scope, body, struct_obj.zir_index);
const field = &struct_obj.fields.values()[field_i];
const coerced = sema.coerce(&block_scope, field.ty, init, .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
.index = field_i,
.range = .value,
}).lazy;
_ = try sema.coerce(&block_scope, field.ty, init, init_src);
unreachable;
},
else => |e| return e,
};
const default_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
.index = field_i,
.range = .value,
}).lazy;
return sema.failWithNeededComptime(&block_scope, init_src, "struct field default value must be comptime-known");
};
field.default_val = try default_val.copy(decl_arena_allocator);
}
}
}
try wip_captures.finalize();
struct_obj.have_field_inits = true;
}
fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
const gpa = mod.gpa;
const decl_index = union_obj.owner_decl;
const zir = mod.namespacePtr(union_obj.namespace).file_scope.zir;
const extended = zir.instructions.items(.data)[union_obj.zir_index].extended;
assert(extended.opcode == .union_decl);
const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small);
var extra_index: usize = extended.operand;
const src = LazySrcLoc.nodeOffset(0);
extra_index += @boolToInt(small.has_src_node);
const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: {
const ty_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
break :blk ty_ref;
} else .none;
const body_len = if (small.has_body_len) blk: {
const body_len = zir.extra[extra_index];
extra_index += 1;
break :blk body_len;
} else 0;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = zir.extra[extra_index];
extra_index += 1;
break :blk fields_len;
} else 0;
const decls_len = if (small.has_decls_len) decls_len: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :decls_len decls_len;
} else 0;
// Skip over decls.
var decls_it = zir.declIteratorInner(extra_index, decls_len);
while (decls_it.next()) |_| {}
extra_index = decls_it.extra_index;
const body = zir.extra[extra_index..][0..body_len];
extra_index += body.len;
const decl = mod.declPtr(decl_index);
var decl_arena: std.heap.ArenaAllocator = undefined;
const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena);
defer decl.value_arena.?.release(&decl_arena);
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
.func = null,
.fn_ret_ty = Type.void,
.owner_func = null,
};
defer sema.deinit();
var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
defer wip_captures.deinit();
var block_scope: Block = .{
.parent = null,
.sema = &sema,
.src_decl = decl_index,
.namespace = union_obj.namespace,
.wip_capture_scope = wip_captures.scope,
.instructions = .{},
.inlining = null,
.is_comptime = true,
};
defer {
assert(block_scope.instructions.items.len == 0);
block_scope.params.deinit(gpa);
}
if (body.len != 0) {
try sema.analyzeBody(&block_scope, body);
}
try wip_captures.finalize();
try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len);
var int_tag_ty: Type = undefined;
var enum_field_names: []InternPool.NullTerminatedString = &.{};
var enum_field_vals: []InternPool.Index = &.{};
var enum_field_vals_map: std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false) = .{};
var explicit_tags_seen: []bool = &.{};
var explicit_enum_info: ?InternPool.Key.EnumType = null;
if (tag_type_ref != .none) {
const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x };
const provided_ty = try sema.resolveType(&block_scope, tag_ty_src, tag_type_ref);
if (small.auto_enum_tag) {
// The provided type is an integer type and we must construct the enum tag type here.
int_tag_ty = provided_ty;
if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) {
return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(mod)});
}
if (fields_len > 0) {
const field_count_val = try mod.intValue(int_tag_ty, fields_len - 1);
if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) {
const msg = msg: {
const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(&block_scope, tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{
int_tag_ty.fmt(mod),
fields_len - 1,
});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
enum_field_vals = try sema.arena.alloc(InternPool.Index, fields_len);
} else {
// The provided type is the enum tag type.
union_obj.tag_ty = provided_ty;
const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) {
.enum_type => |x| x,
else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}),
};
// The fields of the union must match the enum exactly.
// A flag per field is used to check for missing and extraneous fields.
explicit_enum_info = enum_type;
explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len);
@memset(explicit_tags_seen, false);
}
} else {
// If auto_enum_tag is false, this is an untagged union. However, for semantic analysis
// purposes, we still auto-generate an enum tag type the same way. That the union is
// untagged is represented by the Type tag (union vs union_tagged).
enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
}
if (fields_len == 0) {
return;
}
const bits_per_field = 4;
const fields_per_u32 = 32 / bits_per_field;
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
var bit_bag_index: usize = extra_index;
extra_index += bit_bags_count;
var cur_bit_bag: u32 = undefined;
var field_i: u32 = 0;
var last_tag_val: ?Value = null;
while (field_i < fields_len) : (field_i += 1) {
if (field_i % fields_per_u32 == 0) {
cur_bit_bag = zir.extra[bit_bag_index];
bit_bag_index += 1;
}
const has_type = @truncate(u1, cur_bit_bag) != 0;
cur_bit_bag >>= 1;
const has_align = @truncate(u1, cur_bit_bag) != 0;
cur_bit_bag >>= 1;
const has_tag = @truncate(u1, cur_bit_bag) != 0;
cur_bit_bag >>= 1;
const unused = @truncate(u1, cur_bit_bag) != 0;
cur_bit_bag >>= 1;
_ = unused;
const field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]);
extra_index += 1;
// doc_comment
extra_index += 1;
const field_type_ref: Zir.Inst.Ref = if (has_type) blk: {
const field_type_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
break :blk field_type_ref;
} else .none;
const align_ref: Zir.Inst.Ref = if (has_align) blk: {
const align_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
break :blk align_ref;
} else .none;
const tag_ref: Air.Inst.Ref = if (has_tag) blk: {
const tag_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
break :blk try sema.resolveInst(tag_ref);
} else .none;
if (enum_field_vals.len != 0) {
const copied_val = if (tag_ref != .none) blk: {
const val = sema.semaUnionFieldVal(&block_scope, .unneeded, int_tag_ty, tag_ref) catch |err| switch (err) {
error.NeededSourceLocation => {
const val_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
.index = field_i,
.range = .value,
}).lazy;
_ = try sema.semaUnionFieldVal(&block_scope, val_src, int_tag_ty, tag_ref);
unreachable;
},
else => |e| return e,
};
last_tag_val = val;
break :blk val;
} else blk: {
const val = if (last_tag_val) |val|
try sema.intAdd(val, Value.one_comptime_int, int_tag_ty)
else
try mod.intValue(int_tag_ty, 0);
last_tag_val = val;
break :blk val;
};
enum_field_vals[field_i] = copied_val.ip_index;
const gop = enum_field_vals_map.getOrPutAssumeCapacityContext(copied_val, .{
.ty = int_tag_ty,
.mod = mod,
});
if (gop.found_existing) {
const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy;
const other_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = gop.index }).lazy;
const msg = msg: {
const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, mod)});
errdefer msg.destroy(gpa);
try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
}
// This string needs to outlive the ZIR code.
const field_name = try decl_arena_allocator.dupe(u8, field_name_zir);
const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name);
if (enum_field_names.len != 0) {
enum_field_names[field_i] = field_name_ip;
}
const field_ty: Type = if (!has_type)
Type.void
else if (field_type_ref == .none)
Type.noreturn
else
sema.resolveType(&block_scope, .unneeded, field_type_ref) catch |err| switch (err) {
error.NeededSourceLocation => {
const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
.index = field_i,
.range = .type,
}).lazy;
_ = try sema.resolveType(&block_scope, ty_src, field_type_ref);
unreachable;
},
else => |e| return e,
};
if (field_ty.isGenericPoison()) {
return error.GenericPoison;
}
const gop = union_obj.fields.getOrPutAssumeCapacity(field_name);
if (gop.found_existing) {
const msg = msg: {
const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy;
const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{s}'", .{field_name});
errdefer msg.destroy(gpa);
const prev_field_index = union_obj.fields.getIndex(field_name).?;
const prev_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = prev_field_index }).lazy;
try mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{});
try sema.errNote(&block_scope, src, msg, "union declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (explicit_enum_info) |tag_info| {
const enum_index = tag_info.nameIndex(&mod.intern_pool, field_name_ip) orelse {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
.index = field_i,
.range = .type,
}).lazy;
const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{
field_name, union_obj.tag_ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
// No check for duplicate because the check already happened in order
// to create the enum type in the first place.
assert(!explicit_tags_seen[enum_index]);
explicit_tags_seen[enum_index] = true;
}
if (field_ty.zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
.index = field_i,
.range = .type,
}).lazy;
const msg = try sema.errMsg(&block_scope, ty_src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
.index = field_i,
.range = .type,
});
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .union_field);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
} else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
.index = field_i,
.range = .type,
});
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
gop.value_ptr.* = .{
.ty = field_ty,
.abi_align = 0,
};
if (align_ref != .none) {
gop.value_ptr.abi_align = sema.resolveAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) {
error.NeededSourceLocation => {
const align_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
.index = field_i,
.range = .alignment,
}).lazy;
_ = try sema.resolveAlign(&block_scope, align_src, align_ref);
unreachable;
},
else => |e| return e,
};
} else {
gop.value_ptr.abi_align = 0;
}
}
if (explicit_enum_info) |tag_info| {
if (tag_info.names.len > fields_len) {
const msg = msg: {
const msg = try sema.errMsg(&block_scope, src, "enum field(s) missing in union", .{});
errdefer msg.destroy(sema.gpa);
const enum_ty = union_obj.tag_ty;
for (tag_info.names, 0..) |field_name, field_index| {
if (explicit_tags_seen[field_index]) continue;
try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{
mod.intern_pool.stringToSlice(field_name),
});
}
try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
} else if (enum_field_vals.len != 0) {
union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals, union_obj);
} else {
union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, union_obj);
}
}
fn semaUnionFieldVal(sema: *Sema, block: *Block, src: LazySrcLoc, int_tag_ty: Type, tag_ref: Air.Inst.Ref) CompileError!Value {
const coerced = try sema.coerce(block, int_tag_ty, tag_ref, src);
return sema.resolveConstValue(block, src, coerced, "enum tag value must be comptime-known");
}
fn generateUnionTagTypeNumbered(
sema: *Sema,
block: *Block,
enum_field_names: []const InternPool.NullTerminatedString,
enum_field_vals: []const InternPool.Index,
union_obj: *Module.Union,
) !Type {
const mod = sema.mod;
const src_decl = mod.declPtr(block.src_decl);
const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope);
errdefer mod.destroyDecl(new_decl_index);
const name = name: {
const fqn = try union_obj.getFullyQualifiedName(mod);
defer sema.gpa.free(fqn);
break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn});
};
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{
.ty = Type.type,
.val = undefined,
}, name);
const new_decl = mod.declPtr(new_decl_index);
new_decl.name_fully_qualified = true;
new_decl.owns_tv = true;
new_decl.name_fully_qualified = true;
const enum_ty = try mod.intern(.{ .enum_type = .{
.decl = new_decl_index,
.namespace = .none,
.tag_ty = if (enum_field_vals.len == 0)
.noreturn_type
else
mod.intern_pool.typeOf(enum_field_vals[0]),
.names = enum_field_names,
.values = enum_field_vals,
.tag_mode = .explicit,
} });
new_decl.val = enum_ty.toValue();
return enum_ty.toType();
}
fn generateUnionTagTypeSimple(
sema: *Sema,
block: *Block,
enum_field_names: []const InternPool.NullTerminatedString,
maybe_union_obj: ?*Module.Union,
) !Type {
const mod = sema.mod;
const new_decl_index = new_decl_index: {
const union_obj = maybe_union_obj orelse {
break :new_decl_index try mod.createAnonymousDecl(block, .{
.ty = Type.type,
.val = undefined,
});
};
const src_decl = mod.declPtr(block.src_decl);
const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope);
errdefer mod.destroyDecl(new_decl_index);
const name = name: {
const fqn = try union_obj.getFullyQualifiedName(mod);
defer sema.gpa.free(fqn);
break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn});
};
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{
.ty = Type.type,
.val = undefined,
}, name);
mod.declPtr(new_decl_index).name_fully_qualified = true;
break :new_decl_index new_decl_index;
};
const enum_ty = try mod.intern(.{ .enum_type = .{
.decl = new_decl_index,
.namespace = .none,
.tag_ty = if (enum_field_names.len == 0)
.noreturn_type
else
(try mod.smallestUnsignedInt(enum_field_names.len - 1)).ip_index,
.names = enum_field_names,
.values = &.{},
.tag_mode = .auto,
} });
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
new_decl.val = enum_ty.toValue();
return enum_ty.toType();
}
fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref {
var wip_captures = try WipCaptureScope.init(sema.gpa, sema.perm_arena, sema.owner_decl.src_scope);
defer wip_captures.deinit();
var block: Block = .{
.parent = null,
.sema = sema,
.src_decl = sema.owner_decl_index,
.namespace = sema.owner_decl.src_namespace,
.wip_capture_scope = wip_captures.scope,
.instructions = .{},
.inlining = null,
.is_comptime = true,
};
defer {
block.instructions.deinit(sema.gpa);
block.params.deinit(sema.gpa);
}
const src = LazySrcLoc.nodeOffset(0);
const mod = sema.mod;
const std_pkg = mod.main_pkg.table.get("std").?;
const std_file = (mod.importPkg(std_pkg) catch unreachable).file;
const opt_builtin_inst = (try sema.namespaceLookupRef(
&block,
src,
mod.declPtr(std_file.root_decl.unwrap().?).src_namespace,
"builtin",
)) orelse @panic("lib/std.zig is corrupt and missing 'builtin'");
const builtin_inst = try sema.analyzeLoad(&block, src, opt_builtin_inst, src);
const builtin_ty = sema.analyzeAsType(&block, src, builtin_inst) catch |err| switch (err) {
error.AnalysisFail => std.debug.panic("std.builtin is corrupt", .{}),
else => |e| return e,
};
const opt_ty_decl = (try sema.namespaceLookup(
&block,
src,
builtin_ty.getNamespaceIndex(mod).unwrap().?,
name,
)) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name});
return sema.analyzeDeclVal(&block, src, opt_ty_decl);
}
fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type {
const ty_inst = try sema.getBuiltin(name);
var wip_captures = try WipCaptureScope.init(sema.gpa, sema.perm_arena, sema.owner_decl.src_scope);
defer wip_captures.deinit();
var block: Block = .{
.parent = null,
.sema = sema,
.src_decl = sema.owner_decl_index,
.namespace = sema.owner_decl.src_namespace,
.wip_capture_scope = wip_captures.scope,
.instructions = .{},
.inlining = null,
.is_comptime = true,
};
defer {
block.instructions.deinit(sema.gpa);
block.params.deinit(sema.gpa);
}
const src = LazySrcLoc.nodeOffset(0);
const result_ty = sema.analyzeAsType(&block, src, ty_inst) catch |err| switch (err) {
error.AnalysisFail => std.debug.panic("std.builtin.{s} is corrupt", .{name}),
else => |e| return e,
};
try sema.resolveTypeFully(result_ty); // Should not fail
return result_ty;
}
/// There is another implementation of this in `Type.onePossibleValue`. This one
/// in `Sema` is for calling during semantic analysis, and performs field resolution
/// to get the answer. The one in `Type` is for calling during codegen and asserts
/// that the types are already resolved.
/// TODO assert the return value matches `ty.onePossibleValue`
pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
const mod = sema.mod;
switch (ty.ip_index) {
.empty_struct_type => return Value.empty_struct,
.none => switch (ty.tag()) {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => |int_type| {
if (int_type.bits == 0) {
return try mod.intValue(ty, 0);
} else {
return null;
}
},
.ptr_type,
.error_union_type,
.func_type,
.anyframe_type,
.error_set_type,
.inferred_error_set_type,
=> null,
.array_type => |array_type| {
if (array_type.len == 0)
return Value.initTag(.empty_array);
if ((try sema.typeHasOnePossibleValue(array_type.child.toType())) != null) {
return Value.initTag(.the_only_possible_value);
}
return null;
},
.vector_type => |vector_type| {
if (vector_type.len == 0) return Value.initTag(.empty_array);
if (try sema.typeHasOnePossibleValue(vector_type.child.toType())) |v| return v;
return null;
},
.opt_type => |child| {
if (child == .noreturn_type) {
return try mod.nullValue(ty);
} else {
return null;
}
},
.simple_type => |t| switch (t) {
.f16,
.f32,
.f64,
.f80,
.f128,
.usize,
.isize,
.c_char,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.anyopaque,
.bool,
.type,
.anyerror,
.comptime_int,
.comptime_float,
.enum_literal,
.atomic_order,
.atomic_rmw_op,
.calling_convention,
.address_space,
.float_mode,
.reduce_op,
.call_modifier,
.prefetch_options,
.export_options,
.extern_options,
.type_info,
=> null,
.void => Value.void,
.noreturn => Value.@"unreachable",
.null => Value.null,
.undefined => Value.undef,
.generic_poison => return error.GenericPoison,
.var_args_param => unreachable,
},
.struct_type => |struct_type| {
const resolved_ty = try sema.resolveTypeFields(ty);
if (mod.structPtrUnwrap(struct_type.index)) |s| {
for (s.fields.values(), 0..) |field, i| {
if (field.is_comptime) continue;
if (field.ty.eql(resolved_ty, sema.mod)) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
s.srcLoc(sema.mod),
"struct '{}' depends on itself",
.{ty.fmt(sema.mod)},
);
try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{});
return sema.failWithOwnedErrorMsg(msg);
}
if ((try sema.typeHasOnePossibleValue(field.ty)) == null) {
return null;
}
}
}
// In this case the struct has no runtime-known fields and
// therefore has one possible value.
// TODO: this is incorrect for structs with comptime fields, I think
// we should use a temporary allocator to construct an aggregate that
// is populated with the comptime values and then intern that value here.
// This TODO is repeated in the redundant implementation of
// one-possible-value in type.zig.
const empty = try mod.intern(.{ .aggregate = .{
.ty = ty.ip_index,
.storage = .{ .elems = &.{} },
} });
return empty.toValue();
},
.anon_struct_type => |tuple| {
for (tuple.values) |val| {
if (val == .none) return null;
}
// In this case the struct has all comptime-known fields and
// therefore has one possible value.
return (try mod.intern(.{ .aggregate = .{
.ty = ty.ip_index,
.storage = .{ .elems = tuple.values },
} })).toValue();
},
.union_type => |union_type| {
const resolved_ty = try sema.resolveTypeFields(ty);
const union_obj = mod.unionPtr(union_type.index);
const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse
return null;
const fields = union_obj.fields.values();
if (fields.len == 0) return Value.@"unreachable";
const only_field = fields[0];
if (only_field.ty.eql(resolved_ty, sema.mod)) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
union_obj.srcLoc(sema.mod),
"union '{}' depends on itself",
.{ty.fmt(sema.mod)},
);
try sema.addFieldErrNote(resolved_ty, 0, msg, "while checking this field", .{});
return sema.failWithOwnedErrorMsg(msg);
}
const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse
return null;
const only = try mod.intern(.{ .un = .{
.ty = resolved_ty.ip_index,
.tag = tag_val.ip_index,
.val = val_val.ip_index,
} });
return only.toValue();
},
.opaque_type => null,
.enum_type => |enum_type| switch (enum_type.tag_mode) {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
if (try sema.typeHasOnePossibleValue(enum_type.tag_ty.toType())) |int_opv| {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.ip_index,
.int = int_opv.ip_index,
} });
return only.toValue();
}
return null;
},
.auto, .explicit => switch (enum_type.names.len) {
0 => return Value.@"unreachable",
1 => {
if (enum_type.values.len == 0) {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.ip_index,
.int = try mod.intern(.{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = 0 },
} }),
} });
return only.toValue();
} else {
return enum_type.values[0].toValue();
}
},
else => return null,
},
},
// values, not types
.undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
.int => unreachable,
.float => unreachable,
.ptr => unreachable,
.opt => unreachable,
.enum_tag => unreachable,
.aggregate => unreachable,
},
}
}
/// Returns the type of the AIR instruction.
fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type {
return sema.getTmpAir().typeOf(inst, sema.mod.intern_pool);
}
pub fn getTmpAir(sema: Sema) Air {
return .{
.instructions = sema.air_instructions.slice(),
.extra = sema.air_extra.items,
.values = sema.air_values.items,
};
}
pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref {
if (ty.ip_index != .none) {
if (@enumToInt(ty.ip_index) < Air.ref_start_index)
return @intToEnum(Air.Inst.Ref, @enumToInt(ty.ip_index));
try sema.air_instructions.append(sema.gpa, .{
.tag = .interned,
.data = .{ .interned = ty.ip_index },
});
return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
} else {
try sema.air_instructions.append(sema.gpa, .{
.tag = .const_ty,
.data = .{ .ty = ty },
});
return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
}
}
fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref {
const mod = sema.mod;
return sema.addConstant(ty, try mod.intValue(ty, int));
}
fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref {
return sema.addConstant(ty, Value.undef);
}
pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref {
const gpa = sema.gpa;
if (val.ip_index != .none and val.ip_index != .null_value) {
if (@enumToInt(val.ip_index) < Air.ref_start_index)
return @intToEnum(Air.Inst.Ref, @enumToInt(val.ip_index));
try sema.air_instructions.append(gpa, .{
.tag = .interned,
.data = .{ .interned = val.ip_index },
});
const result = Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
// This assertion can be removed when the `ty` parameter is removed from
// this function thanks to the InternPool transition being complete.
if (std.debug.runtime_safety) {
const val_ty = sema.typeOf(result);
if (!Type.eql(val_ty, ty, sema.mod)) {
std.debug.panic("addConstant type mismatch: '{}' vs '{}'\n", .{
ty.fmt(sema.mod), val_ty.fmt(sema.mod),
});
}
}
return result;
}
const ty_inst = try sema.addType(ty);
try sema.air_values.append(gpa, val);
try sema.air_instructions.append(gpa, .{
.tag = .constant,
.data = .{ .ty_pl = .{
.ty = ty_inst,
.payload = @intCast(u32, sema.air_values.items.len - 1),
} },
});
return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
}
pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 {
const fields = std.meta.fields(@TypeOf(extra));
try sema.air_extra.ensureUnusedCapacity(sema.gpa, fields.len);
return sema.addExtraAssumeCapacity(extra);
}
pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, sema.air_extra.items.len);
inline for (fields) |field| {
sema.air_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
Air.Inst.Ref => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
});
}
return result;
}
fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void {
const coerced = @ptrCast([]const u32, refs);
sema.air_extra.appendSliceAssumeCapacity(coerced);
}
fn getBreakBlock(sema: *Sema, inst_index: Air.Inst.Index) ?Air.Inst.Index {
const air_datas = sema.air_instructions.items(.data);
const air_tags = sema.air_instructions.items(.tag);
switch (air_tags[inst_index]) {
.br => return air_datas[inst_index].br.block_inst,
else => return null,
}
}
fn isComptimeKnown(
sema: *Sema,
inst: Air.Inst.Ref,
) !bool {
return (try sema.resolveMaybeUndefVal(inst)) != null;
}
fn analyzeComptimeAlloc(
sema: *Sema,
block: *Block,
var_type: Type,
alignment: u32,
) CompileError!Air.Inst.Ref {
// Needed to make an anon decl with type `var_type` (the `finish()` call below).
_ = try sema.typeHasOnePossibleValue(var_type);
const ptr_type = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = var_type,
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant),
.@"align" = alignment,
});
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const decl_index = try anon_decl.finish(
var_type,
// There will be stores before the first load, but they may be to sub-elements or
// sub-fields. So we need to initialize with undef to allow the mechanism to expand
// into fields/elements and have those overridden with stored values.
Value.undef,
alignment,
);
const decl = sema.mod.declPtr(decl_index);
decl.@"align" = alignment;
try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index);
return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{
.runtime_index = block.runtime_index,
.decl_index = decl_index,
}));
}
/// The places where a user can specify an address space attribute
pub const AddressSpaceContext = enum {
/// A function is specified to be placed in a certain address space.
function,
/// A (global) variable is specified to be placed in a certain address space.
/// In contrast to .constant, these values (and thus the address space they will be
/// placed in) are required to be mutable.
variable,
/// A (global) constant value is specified to be placed in a certain address space.
/// In contrast to .variable, values placed in this address space are not required to be mutable.
constant,
/// A pointer is ascripted to point into a certain address space.
pointer,
};
pub fn analyzeAddressSpace(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
ctx: AddressSpaceContext,
) !std.builtin.AddressSpace {
const mod = sema.mod;
const addrspace_tv = try sema.resolveInstConst(block, src, zir_ref, "addresspace must be comptime-known");
const address_space = mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val);
const target = sema.mod.getTarget();
const arch = target.cpu.arch;
const is_nv = arch == .nvptx or arch == .nvptx64;
const is_amd = arch == .amdgcn;
const is_spirv = arch == .spirv32 or arch == .spirv64;
const is_gpu = is_nv or is_amd or is_spirv;
const supported = switch (address_space) {
// TODO: on spir-v only when os is opencl.
.generic => true,
.gs, .fs, .ss => (arch == .x86 or arch == .x86_64) and ctx == .pointer,
// TODO: check that .shared and .local are left uninitialized
.param => is_nv,
.global, .shared, .local => is_gpu,
.constant => is_gpu and (ctx == .constant),
// TODO this should also check how many flash banks the cpu has
.flash, .flash1, .flash2, .flash3, .flash4, .flash5 => arch == .avr,
};
if (!supported) {
// TODO error messages could be made more elaborate here
const entity = switch (ctx) {
.function => "functions",
.variable => "mutable values",
.constant => "constant values",
.pointer => "pointers",
};
return sema.fail(
block,
src,
"{s} with address space '{s}' are not supported on {s}",
.{ entity, @tagName(address_space), arch.genericName() },
);
}
return address_space;
}
/// Asserts the value is a pointer and dereferences it.
/// Returns `null` if the pointer contents cannot be loaded at comptime.
fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value {
const mod = sema.mod;
const load_ty = ptr_ty.childType(mod);
const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty, true);
switch (res) {
.runtime_load => return null,
.val => |v| return v,
.needed_well_defined => |ty| return sema.fail(
block,
src,
"comptime dereference requires '{}' to have a well-defined layout, but it does not.",
.{ty.fmt(sema.mod)},
),
.out_of_bounds => |ty| return sema.fail(
block,
src,
"dereference of '{}' exceeds bounds of containing decl of type '{}'",
.{ ptr_ty.fmt(sema.mod), ty.fmt(sema.mod) },
),
}
}
const DerefResult = union(enum) {
runtime_load,
val: Value,
needed_well_defined: Type,
out_of_bounds: Type,
};
fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type, want_mutable: bool) CompileError!DerefResult {
const mod = sema.mod;
const target = mod.getTarget();
const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) {
error.RuntimeLoad => return DerefResult{ .runtime_load = {} },
else => |e| return e,
};
if (deref.pointee) |tv| {
const coerce_in_mem_ok =
(try sema.coerceInMemoryAllowed(block, load_ty, tv.ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, tv.ty, load_ty, false, target, src, src)) == .ok;
if (coerce_in_mem_ok) {
// We have a Value that lines up in virtual memory exactly with what we want to load,
// and it is in-memory coercible to load_ty. It may be returned without modifications.
if (deref.is_mutable and want_mutable) {
// The decl whose value we are obtaining here may be overwritten with
// a different value upon further semantic analysis, which would
// invalidate this memory. So we must copy here.
return DerefResult{ .val = try tv.val.copy(sema.arena) };
}
return DerefResult{ .val = tv.val };
}
}
// The type is not in-memory coercible or the direct dereference failed, so it must
// be bitcast according to the pointer type we are performing the load through.
if (!load_ty.hasWellDefinedLayout(mod)) {
return DerefResult{ .needed_well_defined = load_ty };
}
const load_sz = try sema.typeAbiSize(load_ty);
// Try the smaller bit-cast first, since that's more efficient than using the larger `parent`
if (deref.pointee) |tv| if (load_sz <= try sema.typeAbiSize(tv.ty))
return DerefResult{ .val = (try sema.bitCastVal(block, src, tv.val, tv.ty, load_ty, 0)) orelse return .runtime_load };
// If that fails, try to bit-cast from the largest parent value with a well-defined layout
if (deref.parent) |parent| if (load_sz + parent.byte_offset <= try sema.typeAbiSize(parent.tv.ty))
return DerefResult{ .val = (try sema.bitCastVal(block, src, parent.tv.val, parent.tv.ty, load_ty, parent.byte_offset)) orelse return .runtime_load };
if (deref.ty_without_well_defined_layout) |bad_ty| {
// We got no parent for bit-casting, or the parent we got was too small. Either way, the problem
// is that some type we encountered when de-referencing does not have a well-defined layout.
return DerefResult{ .needed_well_defined = bad_ty };
} else {
// If all encountered types had well-defined layouts, the parent is the root decl and it just
// wasn't big enough for the load.
return DerefResult{ .out_of_bounds = deref.parent.?.tv.ty };
}
}
/// Used to convert a u64 value to a usize value, emitting a compile error if the number
/// is too big to fit.
fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError!usize {
if (@bitSizeOf(u64) <= @bitSizeOf(usize)) return int;
return std.math.cast(usize, int) orelse return sema.fail(block, src, "expression produces integer value '{d}' which is too big for this compiler implementation to handle", .{int});
}
/// For pointer-like optionals, it returns the pointer type. For pointers,
/// the type is returned unmodified.
/// This can return `error.AnalysisFail` because it sometimes requires resolving whether
/// a type has zero bits, which can cause a "foo depends on itself" compile error.
/// This logic must be kept in sync with `Type.isPtrLikeOptional`.
fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
const mod = sema.mod;
if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.Slice => return null,
.C => return ptr_type.elem_type.toType(),
.One, .Many => return ty,
},
.opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.Slice, .C => return null,
.Many, .One => {
if (ptr_type.is_allowzero) return null;
// optionals of zero sized types behave like bools, not pointers
const payload_ty = opt_child.toType();
if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) {
return null;
}
return payload_ty;
},
},
else => return null,
},
else => return null,
};
switch (ty.tag()) {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
}
}
/// `generic_poison` will return false.
/// This function returns false negatives when structs and unions are having their
/// field types resolved.
/// TODO assert the return value matches `ty.comptimeOnly`
/// TODO merge these implementations together with the "advanced"/opt_sema pattern seen
/// elsewhere in value.zig
pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
const mod = sema.mod;
return switch (ty.ip_index) {
.empty_struct_type => false,
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => return false,
.ptr_type => |ptr_type| {
const child_ty = ptr_type.elem_type.toType();
if (child_ty.zigTypeTag(mod) == .Fn) {
return mod.typeToFunc(child_ty).?.is_generic;
} else {
return sema.typeRequiresComptime(child_ty);
}
},
.anyframe_type => |child| {
if (child == .none) return false;
return sema.typeRequiresComptime(child.toType());
},
.array_type => |array_type| return sema.typeRequiresComptime(array_type.child.toType()),
.vector_type => |vector_type| return sema.typeRequiresComptime(vector_type.child.toType()),
.opt_type => |child| return sema.typeRequiresComptime(child.toType()),
.error_union_type => |error_union_type| {
return sema.typeRequiresComptime(error_union_type.payload_type.toType());
},
.error_set_type, .inferred_error_set_type => false,
.func_type => true,
.simple_type => |t| return switch (t) {
.f16,
.f32,
.f64,
.f80,
.f128,
.usize,
.isize,
.c_char,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.anyopaque,
.bool,
.void,
.anyerror,
.noreturn,
.generic_poison,
.atomic_order,
.atomic_rmw_op,
.calling_convention,
.address_space,
.float_mode,
.reduce_op,
.call_modifier,
.prefetch_options,
.export_options,
.extern_options,
=> false,
.type,
.comptime_int,
.comptime_float,
.null,
.undefined,
.enum_literal,
.type_info,
=> true,
.var_args_param => unreachable,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
switch (struct_obj.requires_comptime) {
.no, .wip => return false,
.yes => return true,
.unknown => {
if (struct_obj.status == .field_types_wip)
return false;
try sema.resolveTypeFieldsStruct(ty, struct_obj);
struct_obj.requires_comptime = .wip;
for (struct_obj.fields.values()) |field| {
if (field.is_comptime) continue;
if (try sema.typeRequiresComptime(field.ty)) {
struct_obj.requires_comptime = .yes;
return true;
}
}
struct_obj.requires_comptime = .no;
return false;
},
}
},
.anon_struct_type => |tuple| {
for (tuple.types, tuple.values) |field_ty, val| {
const have_comptime_val = val != .none;
if (!have_comptime_val and try sema.typeRequiresComptime(field_ty.toType())) {
return true;
}
}
return false;
},
.union_type => |union_type| {
const union_obj = mod.unionPtr(union_type.index);
switch (union_obj.requires_comptime) {
.no, .wip => return false,
.yes => return true,
.unknown => {
if (union_obj.status == .field_types_wip)
return false;
try sema.resolveTypeFieldsUnion(ty, union_obj);
union_obj.requires_comptime = .wip;
for (union_obj.fields.values()) |field| {
if (try sema.typeRequiresComptime(field.ty)) {
union_obj.requires_comptime = .yes;
return true;
}
}
union_obj.requires_comptime = .no;
return false;
},
}
},
.opaque_type => false,
.enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()),
// values, not types
.undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
.int => unreachable,
.float => unreachable,
.ptr => unreachable,
.opt => unreachable,
.enum_tag => unreachable,
.aggregate => unreachable,
},
};
}
pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
const mod = sema.mod;
return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) {
error.NeedLazy => unreachable,
else => |e| return e,
};
}
fn typeAbiSize(sema: *Sema, ty: Type) !u64 {
try sema.resolveTypeLayout(ty);
return ty.abiSize(sema.mod);
}
fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 {
return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar;
}
/// Not valid to call for packed unions.
/// Keep implementation in sync with `Module.Union.Field.normalAlignment`.
fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 {
const mod = sema.mod;
if (field.ty.zigTypeTag(mod) == .NoReturn) {
return @as(u32, 0);
} else if (field.abi_align == 0) {
return sema.typeAbiAlignment(field.ty);
} else {
return field.abi_align;
}
}
/// Synchronize logic with `Type.isFnOrHasRuntimeBits`.
pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
const mod = sema.mod;
const fn_info = mod.typeToFunc(ty).?;
if (fn_info.is_generic) return false;
if (fn_info.is_var_args) return true;
switch (fn_info.cc) {
// If there was a comptime calling convention, it should also return false here.
.Inline => return false,
else => {},
}
if (try sema.typeRequiresComptime(fn_info.return_type.toType())) {
return false;
}
return true;
}
fn unionFieldIndex(
sema: *Sema,
block: *Block,
unresolved_union_ty: Type,
field_name: []const u8,
field_src: LazySrcLoc,
) !u32 {
const mod = sema.mod;
const union_ty = try sema.resolveTypeFields(unresolved_union_ty);
const union_obj = mod.typeToUnion(union_ty).?;
const field_index_usize = union_obj.fields.getIndex(field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
return @intCast(u32, field_index_usize);
}
fn structFieldIndex(
sema: *Sema,
block: *Block,
unresolved_struct_ty: Type,
field_name: []const u8,
field_src: LazySrcLoc,
) !u32 {
const mod = sema.mod;
const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty);
if (struct_ty.isAnonStruct(mod)) {
return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src);
} else {
const struct_obj = mod.typeToStruct(struct_ty).?;
const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
return @intCast(u32, field_index_usize);
}
}
fn anonStructFieldIndex(
sema: *Sema,
block: *Block,
struct_ty: Type,
field_name: []const u8,
field_src: LazySrcLoc,
) !u32 {
const mod = sema.mod;
const anon_struct = mod.intern_pool.indexToKey(struct_ty.ip_index).anon_struct_type;
for (anon_struct.names, 0..) |name, i| {
if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) {
return @intCast(u32, i);
}
}
return sema.fail(block, field_src, "no field named '{s}' in anonymous struct '{}'", .{
field_name, struct_ty.fmt(sema.mod),
});
}
fn queueFullTypeResolution(sema: *Sema, ty: Type) !void {
const inst_ref = try sema.addType(ty);
try sema.types_to_resolve.append(sema.gpa, inst_ref);
}
fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(mod, i);
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
return sema.intAddScalar(lhs, rhs, ty);
}
fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
const mod = sema.mod;
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.add(lhs_bigint, rhs_bigint);
return mod.intValue_big(scalar_ty, result_bigint.toConst());
}
/// Supports both floats and ints; handles undefined.
fn numberAddWrapScalar(
sema: *Sema,
lhs: Value,
rhs: Value,
ty: Type,
) !Value {
const mod = sema.mod;
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
if (ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.intAdd(lhs, rhs, ty);
}
if (ty.isAnyFloat()) {
return Value.floatAdd(lhs, rhs, ty, sema.arena, mod);
}
const overflow_result = try sema.intAddWithOverflow(lhs, rhs, ty);
return overflow_result.wrapped_result;
}
fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
const scalar_ty = ty.scalarType(mod);
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(sema.mod, i);
const rhs_elem = try rhs.elemValue(sema.mod, i);
scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
return sema.intSubScalar(lhs, rhs, ty);
}
fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
const mod = sema.mod;
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.sub(lhs_bigint, rhs_bigint);
return mod.intValue_big(scalar_ty, result_bigint.toConst());
}
/// Supports both floats and ints; handles undefined.
fn numberSubWrapScalar(
sema: *Sema,
lhs: Value,
rhs: Value,
ty: Type,
) !Value {
const mod = sema.mod;
if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
if (ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.intSub(lhs, rhs, ty);
}
if (ty.isAnyFloat()) {
return Value.floatSub(lhs, rhs, ty, sema.arena, mod);
}
const overflow_result = try sema.intSubWithOverflow(lhs, rhs, ty);
return overflow_result.wrapped_result;
}
fn intSubWithOverflow(
sema: *Sema,
lhs: Value,
rhs: Value,
ty: Type,
) !Value.OverflowArithmeticResult {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Vector) {
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(sema.mod, i);
const rhs_elem = try rhs.elemValue(sema.mod, i);
const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod));
overflowed_data[i] = of_math_result.overflow_bit;
scalar.* = of_math_result.wrapped_result;
}
return Value.OverflowArithmeticResult{
.overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data),
.wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data),
};
}
return sema.intSubWithOverflowScalar(lhs, rhs, ty);
}
fn intSubWithOverflowScalar(
sema: *Sema,
lhs: Value,
rhs: Value,
ty: Type,
) !Value.OverflowArithmeticResult {
const mod = sema.mod;
const info = ty.intInfo(mod);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
const wrapped_result = try mod.intValue_big(ty, result_bigint.toConst());
return Value.OverflowArithmeticResult{
.overflow_bit = Value.boolToInt(overflowed),
.wrapped_result = wrapped_result,
};
}
fn floatToInt(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
val: Value,
float_ty: Type,
int_ty: Type,
) CompileError!Value {
const mod = sema.mod;
if (float_ty.zigTypeTag(mod) == .Vector) {
const elem_ty = float_ty.childType(mod);
const result_data = try sema.arena.alloc(Value, float_ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(sema.mod, i);
scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod));
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
return sema.floatToIntScalar(block, src, val, float_ty, int_ty);
}
// float is expected to be finite and non-NaN
fn float128IntPartToBigInt(
arena: Allocator,
float: f128,
) !std.math.big.int.Managed {
const is_negative = std.math.signbit(float);
const floored = @floor(@fabs(float));
var rational = try std.math.big.Rational.init(arena);
defer rational.q.deinit();
rational.setFloat(f128, floored) catch |err| switch (err) {
error.NonFiniteFloat => unreachable,
error.OutOfMemory => return error.OutOfMemory,
};
// The float is reduced in rational.setFloat, so we assert that denominator is equal to one
const big_one = std.math.big.int.Const{ .limbs = &.{1}, .positive = true };
assert(rational.q.toConst().eqAbs(big_one));
if (is_negative) {
rational.negate();
}
return rational.p;
}
fn floatToIntScalar(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
val: Value,
float_ty: Type,
int_ty: Type,
) CompileError!Value {
const mod = sema.mod;
const float = val.toFloat(f128, mod);
if (std.math.isNan(float)) {
return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{
int_ty.fmt(sema.mod),
});
}
if (std.math.isInf(float)) {
return sema.fail(block, src, "float value Inf cannot be stored in integer type '{}'", .{
int_ty.fmt(sema.mod),
});
}
var big_int = try float128IntPartToBigInt(sema.arena, float);
defer big_int.deinit();
const result = try mod.intValue_big(int_ty, big_int.toConst());
if (!(try sema.intFitsInType(result, int_ty, null))) {
return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{
val.fmtValue(float_ty, sema.mod), int_ty.fmt(sema.mod),
});
}
return result;
}
/// Asserts the value is an integer, and the destination type is ComptimeInt or Int.
/// Vectors are also accepted. Vector results are reduced with AND.
///
/// If provided, `vector_index` reports the first element that failed the range check.
fn intFitsInType(
sema: *Sema,
val: Value,
ty: Type,
vector_index: ?*usize,
) CompileError!bool {
if (ty.ip_index == .comptime_int_type) return true;
const mod = sema.mod;
switch (val.ip_index) {
.undef,
.zero,
.zero_usize,
.zero_u8,
=> return true,
.none => switch (val.tag()) {
.lazy_align => {
const info = ty.intInfo(mod);
const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed);
// If it is u16 or bigger we know the alignment fits without resolving it.
if (info.bits >= max_needed_bits) return true;
const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data);
if (x == 0) return true;
const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
return info.bits >= actual_needed_bits;
},
.lazy_size => {
const info = ty.intInfo(mod);
const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed);
// If it is u64 or bigger we know the size fits without resolving it.
if (info.bits >= max_needed_bits) return true;
const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data);
if (x == 0) return true;
const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
return info.bits >= actual_needed_bits;
},
.the_only_possible_value => {
assert(ty.intInfo(mod).bits == 0);
return true;
},
.decl_ref_mut,
.extern_fn,
.decl_ref,
.function,
.variable,
=> {
const info = ty.intInfo(mod);
const target = mod.getTarget();
const ptr_bits = target.ptrBitWidth();
return switch (info.signedness) {
.signed => info.bits > ptr_bits,
.unsigned => info.bits >= ptr_bits,
};
},
.aggregate => {
assert(ty.zigTypeTag(mod) == .Vector);
for (val.castTag(.aggregate).?.data, 0..) |elem, i| {
if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) {
if (vector_index) |some| some.* = i;
return false;
}
}
return true;
},
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
.int => |int| {
const info = ty.intInfo(mod);
var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined;
const big_int = int.storage.toBigInt(&buffer);
return big_int.fitsInTwosComp(info.signedness, info.bits);
},
else => unreachable,
},
}
}
fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool {
const mod = sema.mod;
if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false;
const end_val = try mod.intValue(tag_ty, end);
if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false;
return true;
}
/// Asserts the type is an enum.
fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
const mod = sema.mod;
const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type;
assert(enum_type.tag_mode != .nonexhaustive);
// The `tagValueIndex` function call below relies on the type being the integer tag type.
// `getCoerced` assumes the value will fit the new type.
if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false;
const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.ip_index, enum_type.tag_ty);
return enum_type.tagValueIndex(&mod.intern_pool, int_coerced) != null;
}
fn intAddWithOverflow(
sema: *Sema,
lhs: Value,
rhs: Value,
ty: Type,
) !Value.OverflowArithmeticResult {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Vector) {
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(sema.mod, i);
const rhs_elem = try rhs.elemValue(sema.mod, i);
const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod));
overflowed_data[i] = of_math_result.overflow_bit;
scalar.* = of_math_result.wrapped_result;
}
return Value.OverflowArithmeticResult{
.overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data),
.wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data),
};
}
return sema.intAddWithOverflowScalar(lhs, rhs, ty);
}
fn intAddWithOverflowScalar(
sema: *Sema,
lhs: Value,
rhs: Value,
ty: Type,
) !Value.OverflowArithmeticResult {
const mod = sema.mod;
const info = ty.intInfo(mod);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
const result = try mod.intValue_big(ty, result_bigint.toConst());
return Value.OverflowArithmeticResult{
.overflow_bit = Value.boolToInt(overflowed),
.wrapped_result = result,
};
}
/// Asserts the values are comparable. Both operands have type `ty`.
/// For vectors, returns true if the comparison is true for ALL elements.
///
/// Note that `!compareAll(.eq, ...) != compareAll(.neq, ...)`
fn compareAll(
sema: *Sema,
lhs: Value,
op: std.math.CompareOperator,
rhs: Value,
ty: Type,
) CompileError!bool {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < ty.vectorLen(mod)) : (i += 1) {
const lhs_elem = try lhs.elemValue(sema.mod, i);
const rhs_elem = try rhs.elemValue(sema.mod, i);
if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) {
return false;
}
}
return true;
}
return sema.compareScalar(lhs, op, rhs, ty);
}
/// Asserts the values are comparable. Both operands have type `ty`.
fn compareScalar(
sema: *Sema,
lhs: Value,
op: std.math.CompareOperator,
rhs: Value,
ty: Type,
) CompileError!bool {
switch (op) {
.eq => return sema.valuesEqual(lhs, rhs, ty),
.neq => return !(try sema.valuesEqual(lhs, rhs, ty)),
else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod, sema),
}
}
fn valuesEqual(
sema: *Sema,
lhs: Value,
rhs: Value,
ty: Type,
) CompileError!bool {
return Value.eqlAdvanced(lhs, ty, rhs, ty, sema.mod, sema);
}
/// Asserts the values are comparable vectors of type `ty`.
fn compareVector(
sema: *Sema,
lhs: Value,
op: std.math.CompareOperator,
rhs: Value,
ty: Type,
) !Value {
const mod = sema.mod;
assert(ty.zigTypeTag(mod) == .Vector);
const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(sema.mod, i);
const rhs_elem = try rhs.elemValue(sema.mod, i);
const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod));
scalar.* = Value.makeBool(res_bool);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
/// Returns the type of a pointer to an element.
/// Asserts that the type is a pointer, and that the element type is indexable.
/// For *[N]T, return *T
/// For [*]T, returns *T
/// For []T, returns *T
/// Handles const-ness and address spaces in particular.
/// This code is duplicated in `analyzePtrArithmetic`.
fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
const mod = sema.mod;
const ptr_info = ptr_ty.ptrInfo(mod);
const elem_ty = ptr_ty.elemType2(mod);
const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0;
const parent_ty = ptr_ty.childType(mod);
const VI = Type.Payload.Pointer.Data.VectorIndex;
const vector_info: struct {
host_size: u16 = 0,
alignment: u32 = 0,
vector_index: VI = .none,
} = if (parent_ty.isVector(mod) and ptr_info.size == .One) blk: {
const elem_bits = elem_ty.bitSize(mod);
if (elem_bits == 0) break :blk .{};
const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
if (!is_packed) break :blk .{};
break :blk .{
.host_size = @intCast(u16, parent_ty.arrayLen(mod)),
.alignment = @intCast(u16, parent_ty.abiAlignment(mod)),
.vector_index = if (offset) |some| @intToEnum(VI, some) else .runtime,
};
} else .{};
const alignment: u32 = a: {
// Calculate the new pointer alignment.
if (ptr_info.@"align" == 0) {
if (vector_info.alignment != 0) break :a vector_info.alignment;
// ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness.
break :a 0;
}
// If the addend is not a comptime-known value we can still count on
// it being a multiple of the type size.
const elem_size = try sema.typeAbiSize(elem_ty);
const addend = if (offset) |off| elem_size * off else elem_size;
// The resulting pointer is aligned to the lcd between the offset (an
// arbitrary number) and the alignment factor (always a power of two,
// non zero).
const new_align = @as(u32, 1) << @intCast(u5, @ctz(addend | ptr_info.@"align"));
break :a new_align;
};
return try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = elem_ty,
.mutable = ptr_info.mutable,
.@"addrspace" = ptr_info.@"addrspace",
.@"allowzero" = allow_zero,
.@"volatile" = ptr_info.@"volatile",
.@"align" = alignment,
.host_size = vector_info.host_size,
.vector_index = vector_info.vector_index,
});
}
/// Merge lhs with rhs.
/// Asserts that lhs and rhs are both error sets and are resolved.
fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type {
const mod = sema.mod;
const arena = sema.arena;
const lhs_names = lhs.errorSetNames(mod);
const rhs_names = rhs.errorSetNames(mod);
var names: Module.Fn.InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(arena, lhs_names.len);
for (lhs_names) |name| {
names.putAssumeCapacityNoClobber(name, {});
}
for (rhs_names) |name| {
try names.put(arena, name, {});
}
return mod.errorSetFromUnsortedNames(names.keys());
}