Stage 2: Support inst.func() syntax (#9827)

* Merge call zir instructions to make space for field_call
* Fix bug with comptime known anytype args
* Delete the param_type zir instruction
* Move some passing tests to stage 2
* Implement a.b() function calls
* Add field_call_bind support for call and field builtins
This commit is contained in:
Martin Wickham 2021-09-28 12:00:35 -05:00 committed by GitHub
parent 60b6e74468
commit 1cc5d4e758
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 629 additions and 219 deletions

View File

@ -56,6 +56,7 @@ fn addExtraAssumeCapacity(astgen: *AstGen, extra: anytype) u32 {
u32 => @field(extra, field.name),
Zir.Inst.Ref => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.Call.Flags => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
});
}
@ -1934,11 +1935,14 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
// in the above while loop.
const zir_tags = gz.astgen.instructions.items(.tag);
switch (zir_tags[inst]) {
// For some instructions, swap in a slightly different ZIR tag
// For some instructions, modify the zir data
// so we can avoid a separate ensure_result_used instruction.
.call_chkused => unreachable,
.call => {
zir_tags[inst] = .call_chkused;
const extra_index = gz.astgen.instructions.items(.data)[inst].pl_node.payload_index;
const slot = &gz.astgen.extra.items[extra_index];
var flags = @bitCast(Zir.Inst.Call.Flags, slot.*);
flags.ensure_result_used = true;
slot.* = @bitCast(u32, flags);
break :b true;
},
@ -1976,9 +1980,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.bool_br_and,
.bool_br_or,
.bool_not,
.call_compile_time,
.call_nosuspend,
.call_async,
.cmp_lt,
.cmp_lte,
.cmp_eq,
@ -1996,8 +1997,10 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.elem_val_node,
.field_ptr,
.field_val,
.field_call_bind,
.field_ptr_named,
.field_val_named,
.field_call_bind_named,
.func,
.func_inferred,
.int,
@ -2012,7 +2015,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.mod_rem,
.mul,
.mulwrap,
.param_type,
.ref,
.shl,
.shr,
@ -4968,6 +4970,21 @@ fn fieldAccess(
scope: *Scope,
rl: ResultLoc,
node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
if (rl == .ref) {
return addFieldAccess(.field_ptr, gz, scope, .ref, node);
} else {
const access = try addFieldAccess(.field_val, gz, scope, .none_or_ref, node);
return rvalue(gz, rl, access, node);
}
}
fn addFieldAccess(
tag: Zir.Inst.Tag,
gz: *GenZir,
scope: *Scope,
lhs_rl: ResultLoc,
node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@ -4978,16 +4995,11 @@ fn fieldAccess(
const dot_token = main_tokens[node];
const field_ident = dot_token + 1;
const str_index = try astgen.identAsString(field_ident);
switch (rl) {
.ref => return gz.addPlNode(.field_ptr, node, Zir.Inst.Field{
.lhs = try expr(gz, scope, .ref, object_node),
.field_name_start = str_index,
}),
else => return rvalue(gz, rl, try gz.addPlNode(.field_val, node, Zir.Inst.Field{
.lhs = try expr(gz, scope, .none_or_ref, object_node),
.field_name_start = str_index,
}), node),
}
return gz.addPlNode(tag, node, Zir.Inst.Field{
.lhs = try expr(gz, scope, lhs_rl, object_node),
.field_name_start = str_index,
});
}
fn arrayAccess(
@ -7169,16 +7181,15 @@ fn builtinCall(
return rvalue(gz, rl, result, node);
},
.field => {
const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]);
if (rl == .ref) {
return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .ref, params[0]),
.field_name = field_name,
.field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]),
});
}
const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .none, params[0]),
.field_name = field_name,
.field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]),
});
return rvalue(gz, rl, result, node);
},
@ -7554,7 +7565,7 @@ fn builtinCall(
},
.call => {
const options = try comptimeExpr(gz, scope, .{ .ty = .call_options_type }, params[0]);
const callee = try expr(gz, scope, .none, params[1]);
const callee = try calleeExpr(gz, scope, params[1]);
const args = try expr(gz, scope, .none, params[2]);
const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{
.options = options,
@ -7897,20 +7908,16 @@ fn callExpr(
call: Ast.full.Call,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const lhs = try expr(gz, scope, .none, call.ast.fn_expr);
const callee = try calleeExpr(gz, scope, call.ast.fn_expr);
const args = try astgen.gpa.alloc(Zir.Inst.Ref, call.ast.params.len);
defer astgen.gpa.free(args);
for (call.ast.params) |param_node, i| {
const param_type = try gz.add(.{
.tag = .param_type,
.data = .{ .param_type = .{
.callee = lhs,
.param_index = @intCast(u32, i),
} },
});
args[i] = try expr(gz, scope, .{ .coerced_ty = param_type }, param_node);
// Parameters are always temporary values, they have no
// meaningful result location. Sema will coerce them.
args[i] = try expr(gz, scope, .none, param_node);
}
const modifier: std.builtin.CallOptions.Modifier = blk: {
@ -7925,20 +7932,72 @@ fn callExpr(
}
break :blk .auto;
};
const result: Zir.Inst.Ref = res: {
const tag: Zir.Inst.Tag = switch (modifier) {
.auto => .call,
.async_kw => .call_async,
.never_tail => unreachable,
.never_inline => unreachable,
.no_async => .call_nosuspend,
.always_tail => unreachable,
.always_inline => unreachable,
.compile_time => .call_compile_time,
};
break :res try gz.addCall(tag, lhs, args, node);
};
return rvalue(gz, rl, result, node); // TODO function call with result location
const call_inst = try gz.addCall(modifier, callee, args, node);
return rvalue(gz, rl, call_inst, node); // TODO function call with result location
}
/// calleeExpr generates the function part of a call expression (f in f(x)), or the
/// callee argument to the @call() builtin. If the lhs is a field access or the
/// @field() builtin, we need to generate a special field_call_bind instruction
/// instead of the normal field_val or field_ptr. If this is a inst.func() call,
/// this instruction will capture the value of the first argument before evaluating
/// the other arguments. We need to use .ref here to guarantee we will be able to
/// promote an lvalue to an address if the first parameter requires it. This
/// unfortunately also means we need to take a reference to any types on the lhs.
fn calleeExpr(
gz: *GenZir,
scope: *Scope,
node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const tag = tree.nodes.items(.tag)[node];
switch (tag) {
.field_access => return addFieldAccess(.field_call_bind, gz, scope, .ref, node),
.builtin_call_two,
.builtin_call_two_comma,
.builtin_call,
.builtin_call_comma,
=> {
const node_datas = tree.nodes.items(.data);
const main_tokens = tree.nodes.items(.main_token);
const builtin_token = main_tokens[node];
const builtin_name = tree.tokenSlice(builtin_token);
var inline_params: [2]Ast.Node.Index = undefined;
var params: []Ast.Node.Index = switch (tag) {
.builtin_call,
.builtin_call_comma,
=> tree.extra_data[node_datas[node].lhs..node_datas[node].rhs],
.builtin_call_two,
.builtin_call_two_comma,
=> blk: {
inline_params = .{ node_datas[node].lhs, node_datas[node].rhs };
const len: usize = if (inline_params[0] == 0) @as(usize, 0) else if (inline_params[1] == 0) @as(usize, 1) else @as(usize, 2);
break :blk inline_params[0..len];
},
else => unreachable,
};
// If anything is wrong, fall back to builtinCall.
// It will emit any necessary compile errors and notes.
if (std.mem.eql(u8, builtin_name, "@field") and params.len == 2) {
const lhs = try expr(gz, scope, .ref, params[0]);
const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]);
return gz.addPlNode(.field_call_bind_named, node, Zir.Inst.FieldNamed{
.lhs = lhs,
.field_name = field_name,
});
}
return builtinCall(gz, scope, .none, node, params);
},
else => return expr(gz, scope, .none, node),
}
}
pub const simple_types = std.ComptimeStringMap(Zir.Inst.Ref, .{
@ -9607,7 +9666,7 @@ const GenZir = struct {
fn addCall(
gz: *GenZir,
tag: Zir.Inst.Tag,
modifier: std.builtin.CallOptions.Modifier,
callee: Zir.Inst.Ref,
args: []const Zir.Inst.Ref,
/// Absolute node index. This function does the conversion to offset from Decl.
@ -9616,20 +9675,24 @@ const GenZir = struct {
assert(callee != .none);
assert(src_node != 0);
const gpa = gz.astgen.gpa;
const Call = Zir.Inst.Call;
try gz.instructions.ensureUnusedCapacity(gpa, 1);
try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1);
try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Call).Struct.fields.len +
try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Call).Struct.fields.len +
args.len);
const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Call{
const payload_index = gz.astgen.addExtraAssumeCapacity(Call{
.callee = callee,
.args_len = @intCast(u32, args.len),
.flags = .{
.packed_modifier = @intCast(Call.Flags.PackedModifier, @enumToInt(modifier)),
.args_len = @intCast(Call.Flags.PackedArgsLen, args.len),
},
});
gz.astgen.appendRefsAssumeCapacity(args);
const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
gz.astgen.instructions.appendAssumeCapacity(.{
.tag = tag,
.tag = .call,
.data = .{ .pl_node = .{
.src_node = gz.nodeIndexToRelative(src_node),
.payload_index = payload_index,

View File

@ -185,11 +185,7 @@ pub fn analyzeBody(
.bool_br_and => try sema.zirBoolBr(block, inst, false),
.bool_br_or => try sema.zirBoolBr(block, inst, true),
.c_import => try sema.zirCImport(block, inst),
.call => try sema.zirCall(block, inst, .auto, false),
.call_chkused => try sema.zirCall(block, inst, .auto, true),
.call_compile_time => try sema.zirCall(block, inst, .compile_time, false),
.call_nosuspend => try sema.zirCall(block, inst, .no_async, false),
.call_async => try sema.zirCall(block, inst, .async_kw, false),
.call => try sema.zirCall(block, inst),
.closure_get => try sema.zirClosureGet(block, inst),
.cmp_lt => try sema.zirCmp(block, inst, .lt),
.cmp_lte => try sema.zirCmp(block, inst, .lte),
@ -223,6 +219,8 @@ pub fn analyzeBody(
.field_ptr_named => try sema.zirFieldPtrNamed(block, inst),
.field_val => try sema.zirFieldVal(block, inst),
.field_val_named => try sema.zirFieldValNamed(block, inst),
.field_call_bind => try sema.zirFieldCallBind(block, inst),
.field_call_bind_named => try sema.zirFieldCallBindNamed(block, inst),
.func => try sema.zirFunc(block, inst, false),
.func_inferred => try sema.zirFunc(block, inst, true),
.import => try sema.zirImport(block, inst),
@ -244,7 +242,6 @@ pub fn analyzeBody(
.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false),
.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false),
.optional_type => try sema.zirOptionalType(block, inst),
.param_type => try sema.zirParamType(block, inst),
.ptr_type => try sema.zirPtrType(block, inst),
.ptr_type_simple => try sema.zirPtrTypeSimple(block, inst),
.ref => try sema.zirRef(block, inst),
@ -2031,45 +2028,6 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE
return sema.storePtr(block, src, ptr, value);
}
fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const src = sema.src;
const fn_inst_src = sema.src;
const inst_data = sema.code.instructions.items(.data)[inst].param_type;
const fn_inst = sema.resolveInst(inst_data.callee);
const fn_inst_ty = sema.typeOf(fn_inst);
const param_index = inst_data.param_index;
const fn_ty: Type = switch (fn_inst_ty.zigTypeTag()) {
.Fn => fn_inst_ty,
.BoundFn => {
return sema.mod.fail(&block.base, fn_inst_src, "TODO implement zirParamType for method call syntax", .{});
},
else => {
return sema.mod.fail(&block.base, fn_inst_src, "expected function, found '{}'", .{fn_inst_ty});
},
};
const param_count = fn_ty.fnParamLen();
if (param_index >= param_count) {
if (fn_ty.fnIsVarArgs()) {
return sema.addType(Type.initTag(.var_args_param));
}
return sema.mod.fail(&block.base, src, "arg index {d} out of bounds; '{}' has {d} argument(s)", .{
param_index,
fn_ty,
param_count,
});
}
// TODO support generic functions
const param_type = fn_ty.fnParamType(param_index);
return sema.addType(param_type);
}
fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@ -2786,8 +2744,6 @@ fn zirCall(
sema: *Sema,
block: *Scope.Block,
inst: Zir.Inst.Index,
modifier: std.builtin.CallOptions.Modifier,
ensure_result_used: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@ -2796,14 +2752,31 @@ fn zirCall(
const func_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node };
const call_src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index);
const args = sema.code.refSlice(extra.end, extra.data.args_len);
const args = sema.code.refSlice(extra.end, extra.data.flags.args_len);
const func = sema.resolveInst(extra.data.callee);
// TODO handle function calls of generic functions
const resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len);
for (args) |zir_arg, i| {
// the args are already casted to the result of a param type instruction.
resolved_args[i] = sema.resolveInst(zir_arg);
const modifier = @intToEnum(std.builtin.CallOptions.Modifier, extra.data.flags.packed_modifier);
const ensure_result_used = extra.data.flags.ensure_result_used;
var func = sema.resolveInst(extra.data.callee);
var resolved_args: []Air.Inst.Ref = undefined;
const func_type = sema.typeOf(func);
// Desugar bound functions here
if (func_type.tag() == .bound_fn) {
const bound_func = try sema.resolveValue(block, func_src, func);
const bound_data = &bound_func.cast(Value.Payload.BoundFn).?.data;
func = bound_data.func_inst;
resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len + 1);
resolved_args[0] = bound_data.arg0_inst;
for (args) |zir_arg, i| {
resolved_args[i + 1] = sema.resolveInst(zir_arg);
}
} else {
resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len);
for (args) |zir_arg, i| {
resolved_args[i] = sema.resolveInst(zir_arg);
}
}
return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args);
@ -3334,14 +3307,16 @@ fn analyzeCall(
}
const arg_src = call_src; // TODO: better source location
const arg = uncasted_args[arg_i];
if (try sema.resolveMaybeUndefVal(block, arg_src, arg)) |arg_val| {
const child_arg = try child_sema.addConstant(sema.typeOf(arg), arg_val);
child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
} else if (is_comptime) {
return sema.failWithNeededComptime(block, arg_src);
if (is_comptime) {
if (try sema.resolveMaybeUndefVal(block, arg_src, arg)) |arg_val| {
const child_arg = try child_sema.addConstant(sema.typeOf(arg), arg_val);
child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
} else {
return sema.failWithNeededComptime(block, arg_src);
}
} else if (is_anytype) {
// We insert into the map an instruction which is runtime-known
// but has the type of the comptime argument.
// but has the type of the argument.
const child_arg = try child_block.addArg(sema.typeOf(arg), 0);
child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
}
@ -4558,6 +4533,19 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src);
}
fn zirFieldCallBind(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(extra.field_name_start);
const object_ptr = sema.resolveInst(extra.lhs);
return sema.fieldCallBind(block, src, object_ptr, field_name, field_name_src);
}
fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@ -4584,6 +4572,19 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Comp
return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src);
}
fn zirFieldCallBindNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
const object_ptr = sema.resolveInst(extra.lhs);
const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name);
return sema.fieldCallBind(block, src, object_ptr, field_name, field_name_src);
}
fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@ -9484,6 +9485,148 @@ fn fieldPtr(
return mod.fail(&block.base, src, "type '{}' does not support field access", .{object_ty});
}
fn fieldCallBind(
sema: *Sema,
block: *Scope.Block,
src: LazySrcLoc,
raw_ptr: Air.Inst.Ref,
field_name: []const u8,
field_name_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
// When editing this function, note that there is corresponding logic to be edited
// in `fieldVal`. This function takes a pointer and returns a pointer.
const mod = sema.mod;
const raw_ptr_src = src; // TODO better source location
const raw_ptr_ty = sema.typeOf(raw_ptr);
const inner_ty = if (raw_ptr_ty.zigTypeTag() == .Pointer and raw_ptr_ty.ptrSize() == .One)
raw_ptr_ty.childType()
else
return mod.fail(&block.base, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty});
// Optionally dereference a second pointer to get the concrete type.
const is_double_ptr = inner_ty.zigTypeTag() == .Pointer and inner_ty.ptrSize() == .One;
const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty;
const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty;
const object_ptr = if (is_double_ptr)
try sema.analyzeLoad(block, src, raw_ptr, src)
else
raw_ptr;
const arena = sema.arena;
find_field: {
switch (concrete_ty.zigTypeTag()) {
.Struct => {
const struct_ty = try sema.resolveTypeFields(block, src, concrete_ty);
const struct_obj = struct_ty.castTag(.@"struct").?.data;
const field_index = struct_obj.fields.getIndex(field_name) orelse
break :find_field;
const field = struct_obj.fields.values()[field_index];
const ptr_field_ty = try Type.ptr(arena, .{
.pointee_type = field.ty,
.mutable = ptr_ty.ptrIsMutable(),
.@"addrspace" = ptr_ty.ptrAddressSpace(),
});
if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| {
const pointer = try sema.addConstant(
ptr_field_ty,
try Value.Tag.field_ptr.create(arena, .{
.container_ptr = struct_ptr_val,
.field_index = field_index,
}),
);
return sema.analyzeLoad(block, src, pointer, src);
}
try sema.requireRuntimeBlock(block, src);
const ptr_inst = ptr_inst: {
const tag: Air.Inst.Tag = switch (field_index) {
0 => .struct_field_ptr_index_0,
1 => .struct_field_ptr_index_1,
2 => .struct_field_ptr_index_2,
3 => .struct_field_ptr_index_3,
else => {
break :ptr_inst try block.addInst(.{
.tag = .struct_field_ptr,
.data = .{ .ty_pl = .{
.ty = try sema.addType(ptr_field_ty),
.payload = try sema.addExtra(Air.StructField{
.struct_operand = object_ptr,
.field_index = @intCast(u32, field_index),
}),
} },
});
},
};
break :ptr_inst try block.addInst(.{
.tag = tag,
.data = .{ .ty_op = .{
.ty = try sema.addType(ptr_field_ty),
.operand = object_ptr,
} },
});
};
return sema.analyzeLoad(block, src, ptr_inst, src);
},
.Union => return sema.mod.fail(&block.base, src, "TODO implement field calls on unions", .{}),
.Type => {
const namespace = try sema.analyzeLoad(block, src, object_ptr, src);
return sema.fieldVal(block, src, namespace, field_name, field_name_src);
},
else => {},
}
}
// If we get here, we need to look for a decl in the struct type instead.
switch (concrete_ty.zigTypeTag()) {
.Struct, .Opaque, .Union, .Enum => {
if (concrete_ty.getNamespace()) |namespace| {
if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
const decl_val = try sema.analyzeLoad(block, src, inst, src);
const decl_type = sema.typeOf(decl_val);
if (decl_type.zigTypeTag() == .Fn and
decl_type.fnParamLen() >= 1)
{
const first_param_type = decl_type.fnParamType(0);
const first_param_tag = first_param_type.tag();
// zig fmt: off
if (first_param_tag == .var_args_param or
first_param_tag == .generic_poison or (
first_param_type.zigTypeTag() == .Pointer and
first_param_type.ptrSize() == .One and
first_param_type.childType().eql(concrete_ty)))
{
// zig fmt: on
// TODO: bound fn calls on rvalues should probably
// generate a by-value argument somehow.
const ty = Type.Tag.bound_fn.init();
const value = try Value.Tag.bound_fn.create(arena, .{
.func_inst = decl_val,
.arg0_inst = object_ptr,
});
return sema.addConstant(ty, value);
} else if (first_param_type.eql(concrete_ty)) {
var deref = try sema.analyzeLoad(block, src, object_ptr, src);
const ty = Type.Tag.bound_fn.init();
const value = try Value.Tag.bound_fn.create(arena, .{
.func_inst = decl_val,
.arg0_inst = deref,
});
return sema.addConstant(ty, value);
}
}
}
}
},
else => {},
}
return mod.fail(&block.base, src, "type '{}' has no field or member function named '{s}'", .{ concrete_ty, field_name });
}
fn namespaceLookup(
sema: *Sema,
block: *Scope.Block,
@ -9850,14 +9993,14 @@ fn coerce(
if (dest_type.eql(inst_ty))
return inst;
const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty, false);
const mod = sema.mod;
const arena = sema.arena;
const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty, false, mod.getTarget());
if (in_memory_result == .ok) {
return sema.bitcast(block, dest_type, inst, inst_src);
}
const mod = sema.mod;
const arena = sema.arena;
// undefined to anything
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
if (val.isUndef() or inst_ty.zigTypeTag() == .Undefined) {
@ -9898,7 +10041,7 @@ fn coerce(
if (inst_ty.ptrAddressSpace() != dest_type.ptrAddressSpace()) break :src_array_ptr;
const dst_elem_type = dest_type.elemType();
switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut)) {
switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut, mod.getTarget())) {
.ok => {},
.no_match => break :src_array_ptr,
}
@ -10024,7 +10167,7 @@ const InMemoryCoercionResult = enum {
/// * sentinel-terminated pointers can coerce into `[*]`
/// TODO improve this function to report recursive compile errors like it does in stage1.
/// look at the function types_match_const_cast_only
fn coerceInMemoryAllowed(dest_type: Type, src_type: Type, dest_is_mut: bool) InMemoryCoercionResult {
fn coerceInMemoryAllowed(dest_type: Type, src_type: Type, dest_is_mut: bool, target: std.Target) InMemoryCoercionResult {
if (dest_type.eql(src_type))
return .ok;
@ -10034,7 +10177,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type, dest_is_mut: bool) InM
const dest_info = dest_type.ptrInfo().data;
const src_info = src_type.ptrInfo().data;
const child = coerceInMemoryAllowed(dest_info.pointee_type, src_info.pointee_type, dest_info.mutable);
const child = coerceInMemoryAllowed(dest_info.pointee_type, src_info.pointee_type, dest_info.mutable, target);
if (child == .no_match) {
return child;
}
@ -10081,11 +10224,19 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type, dest_is_mut: bool) InM
return .no_match;
}
assert(src_info.@"align" != 0);
assert(dest_info.@"align" != 0);
// If both pointers have alignment 0, it means they both want ABI alignment.
// In this case, if they share the same child type, no need to resolve
// pointee type alignment. Otherwise both pointee types must have their alignment
// resolved and we compare the alignment numerically.
if (src_info.@"align" != 0 or dest_info.@"align" != 0 or
!dest_info.pointee_type.eql(src_info.pointee_type))
{
const src_align = src_type.ptrAlignment(target);
const dest_align = dest_type.ptrAlignment(target);
if (dest_info.@"align" > src_info.@"align") {
return .no_match;
if (dest_align > src_align) {
return .no_match;
}
}
return .ok;
@ -11606,6 +11757,7 @@ fn typeHasOnePossibleValue(
.single_const_pointer,
.single_mut_pointer,
.pointer,
.bound_fn,
=> return null,
.@"struct" => {

View File

@ -70,6 +70,7 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en
u32 => code.extra[i],
Inst.Ref => @intToEnum(Inst.Ref, code.extra[i]),
i32 => @bitCast(i32, code.extra[i]),
Inst.Call.Flags => @bitCast(Inst.Call.Flags, code.extra[i]),
else => @compileError("bad field type"),
};
i += 1;
@ -222,17 +223,9 @@ pub const Inst = struct {
break_inline,
/// Uses the `node` union field.
breakpoint,
/// Function call with modifier `.auto`.
/// Function call.
/// Uses `pl_node`. AST node is the function call. Payload is `Call`.
call,
/// Same as `call` but it also does `ensure_result_used` on the return value.
call_chkused,
/// Same as `call` but with modifier `.compile_time`.
call_compile_time,
/// Same as `call` but with modifier `.no_suspend`.
call_nosuspend,
/// Same as `call` but with modifier `.async_kw`.
call_async,
/// `<`
/// Uses the `pl_node` union field. Payload is `Bin`.
cmp_lt,
@ -327,6 +320,15 @@ pub const Inst = struct {
/// This instruction also accepts a pointer.
/// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field.
field_val,
/// Given a pointer to a struct or object that contains virtual fields, returns the
/// named field. If there is no named field, searches in the type for a decl that
/// matches the field name. The decl is resolved and we ensure that it's a function
/// which can accept the object as the first parameter, with one pointer fixup. If
/// all of that works, this instruction produces a special "bound function" value
/// which contains both the function and the saved first parameter value.
/// Bound functions may only be used as the function parameter to a `call` or
/// `builtin_call` instruction. Any other use is invalid zir and may crash the compiler.
field_call_bind,
/// Given a pointer to a struct or object that contains virtual fields, returns a pointer
/// to the named field. The field name is a comptime instruction. Used by @field.
/// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed.
@ -335,6 +337,15 @@ pub const Inst = struct {
/// The field name is a comptime instruction. Used by @field.
/// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed.
field_val_named,
/// Given a pointer to a struct or object that contains virtual fields, returns the
/// named field. If there is no named field, searches in the type for a decl that
/// matches the field name. The decl is resolved and we ensure that it's a function
/// which can accept the object as the first parameter, with one pointer fixup. If
/// all of that works, this instruction produces a special "bound function" value
/// which contains both the function and the saved first parameter value.
/// Bound functions may only be used as the function parameter to a `call` or
/// `builtin_call` instruction. Any other use is invalid zir and may crash the compiler.
field_call_bind_named,
/// Returns a function type, or a function instance, depending on whether
/// the body_len is 0. Calling convention is auto.
/// Uses the `pl_node` union field. `payload_index` points to a `Func`.
@ -395,14 +406,6 @@ pub const Inst = struct {
/// Twos complement wrapping integer multiplication.
/// Uses the `pl_node` union field. Payload is `Bin`.
mulwrap,
/// Given a reference to a function and a parameter index, returns the
/// type of the parameter. The only usage of this instruction is for the
/// result location of parameters of function calls. In the case of a function's
/// parameter type being `anytype`, it is the type coercion's job to detect this
/// scenario and skip the coercion, so that semantic analysis of this instruction
/// is not in a position where it must create an invalid type.
/// Uses the `param_type` union field.
param_type,
/// Turns an R-Value into a const L-Value. In other words, it takes a value,
/// stores it in a memory location, and returns a const pointer to it. If the value
/// is `comptime`, the memory location is global static constant data. Otherwise,
@ -988,10 +991,6 @@ pub const Inst = struct {
.breakpoint,
.fence,
.call,
.call_chkused,
.call_compile_time,
.call_nosuspend,
.call_async,
.cmp_lt,
.cmp_lte,
.cmp_eq,
@ -1017,8 +1016,10 @@ pub const Inst = struct {
.export_value,
.field_ptr,
.field_val,
.field_call_bind,
.field_ptr_named,
.field_val_named,
.field_call_bind_named,
.func,
.func_inferred,
.has_decl,
@ -1034,7 +1035,6 @@ pub const Inst = struct {
.mod_rem,
.mul,
.mulwrap,
.param_type,
.ref,
.shl,
.shr,
@ -1247,10 +1247,6 @@ pub const Inst = struct {
.break_inline = .@"break",
.breakpoint = .node,
.call = .pl_node,
.call_chkused = .pl_node,
.call_compile_time = .pl_node,
.call_nosuspend = .pl_node,
.call_async = .pl_node,
.cmp_lt = .pl_node,
.cmp_lte = .pl_node,
.cmp_eq = .pl_node,
@ -1282,6 +1278,8 @@ pub const Inst = struct {
.field_val = .pl_node,
.field_ptr_named = .pl_node,
.field_val_named = .pl_node,
.field_call_bind = .pl_node,
.field_call_bind_named = .pl_node,
.func = .pl_node,
.func_inferred = .pl_node,
.import = .str_tok,
@ -1301,7 +1299,6 @@ pub const Inst = struct {
.mod_rem = .pl_node,
.mul = .pl_node,
.mulwrap = .pl_node,
.param_type = .param_type,
.ref = .un_tok,
.ret_node = .un_node,
.ret_load = .un_node,
@ -2170,10 +2167,6 @@ pub const Inst = struct {
/// Points to a `Block`.
payload_index: u32,
},
param_type: struct {
callee: Ref,
param_index: u32,
},
@"unreachable": struct {
/// Offset from Decl AST node index.
/// `Tag` determines which kind of AST node this points to.
@ -2244,7 +2237,6 @@ pub const Inst = struct {
ptr_type,
int_type,
bool_br,
param_type,
@"unreachable",
@"break",
switch_capture,
@ -2372,8 +2364,27 @@ pub const Inst = struct {
/// Stored inside extra, with trailing arguments according to `args_len`.
/// Each argument is a `Ref`.
pub const Call = struct {
// Note: Flags *must* come first so that unusedResultExpr
// can find it when it goes to modify them.
flags: Flags,
callee: Ref,
args_len: u32,
pub const Flags = packed struct {
/// std.builtin.CallOptions.Modifier in packed form
pub const PackedModifier = u3;
pub const PackedArgsLen = u28;
packed_modifier: PackedModifier,
ensure_result_used: bool = false,
args_len: PackedArgsLen,
comptime {
if (@sizeOf(Flags) != 4 or @bitSizeOf(Flags) != 32)
@compileError("Layout of Call.Flags needs to be updated!");
if (@bitSizeOf(std.builtin.CallOptions.Modifier) != @bitSizeOf(PackedModifier))
@compileError("Call.Flags.PackedModifier needs to be updated!");
}
};
};
pub const BuiltinCall = struct {

View File

@ -179,7 +179,6 @@ const Writer = struct {
=> try self.writeBoolBr(stream, inst),
.array_type_sentinel => try self.writeArrayTypeSentinel(stream, inst),
.param_type => try self.writeParamType(stream, inst),
.ptr_type_simple => try self.writePtrTypeSimple(stream, inst),
.ptr_type => try self.writePtrType(stream, inst),
.int => try self.writeInt(stream, inst),
@ -195,8 +194,6 @@ const Writer = struct {
.elem_ptr_node,
.elem_val_node,
.field_ptr_named,
.field_val_named,
.slice_start,
.slice_end,
.slice_sentinel,
@ -288,12 +285,7 @@ const Writer = struct {
.@"export" => try self.writePlNodeExport(stream, inst),
.export_value => try self.writePlNodeExportValue(stream, inst),
.call,
.call_chkused,
.call_compile_time,
.call_nosuspend,
.call_async,
=> try self.writePlNodeCall(stream, inst),
.call => try self.writePlNodeCall(stream, inst),
.block,
.block_inline,
@ -328,8 +320,14 @@ const Writer = struct {
.field_ptr,
.field_val,
.field_call_bind,
=> try self.writePlNodeField(stream, inst),
.field_ptr_named,
.field_val_named,
.field_call_bind_named,
=> try self.writePlNodeFieldNamed(stream, inst),
.as_node => try self.writeAs(stream, inst),
.breakpoint,
@ -481,16 +479,6 @@ const Writer = struct {
try stream.writeAll("TODO)");
}
fn writeParamType(
self: *Writer,
stream: anytype,
inst: Zir.Inst.Index,
) (@TypeOf(stream).Error || error{OutOfMemory})!void {
const inst_data = self.code.instructions.items(.data)[inst].param_type;
try self.writeInstRef(stream, inst_data.callee);
try stream.print(", {d})", .{inst_data.param_index});
}
fn writePtrTypeSimple(
self: *Writer,
stream: anytype,
@ -881,8 +869,12 @@ const Writer = struct {
fn writePlNodeCall(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.Call, inst_data.payload_index);
const args = self.code.refSlice(extra.end, extra.data.args_len);
const args = self.code.refSlice(extra.end, extra.data.flags.args_len);
if (extra.data.flags.ensure_result_used) {
try stream.writeAll("nodiscard ");
}
try stream.print(".{s}, ", .{@tagName(@intToEnum(std.builtin.CallOptions.Modifier, extra.data.flags.packed_modifier))});
try self.writeInstRef(stream, extra.data.callee);
try stream.writeAll(", [");
for (args) |arg, i| {
@ -1637,6 +1629,16 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
fn writePlNodeFieldNamed(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
try self.writeInstRef(stream, extra.lhs);
try stream.writeAll(", ");
try self.writeInstRef(stream, extra.field_name);
try stream.writeAll(") ");
try self.writeSrc(stream, inst_data.src());
}
fn writeAs(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.As, inst_data.payload_index).data;

View File

@ -138,6 +138,7 @@ pub const Type = extern union {
.type_info,
=> return .Union,
.bound_fn => unreachable,
.var_args_param => unreachable, // can be any type
}
}
@ -771,6 +772,7 @@ pub const Type = extern union {
.type_info,
.@"anyframe",
.generic_poison,
.bound_fn,
=> unreachable,
.array_u8,
@ -936,6 +938,7 @@ pub const Type = extern union {
.comptime_float,
.noreturn,
.var_args_param,
.bound_fn,
=> return writer.writeAll(@tagName(t)),
.enum_literal => return writer.writeAll("@Type(.EnumLiteral)"),
@ -1248,6 +1251,7 @@ pub const Type = extern union {
.var_args_param => unreachable,
.inferred_alloc_mut => unreachable,
.inferred_alloc_const => unreachable,
.bound_fn => unreachable,
.array_u8,
.array_u8_sentinel_0,
@ -1479,6 +1483,7 @@ pub const Type = extern union {
.empty_struct_literal,
.@"opaque",
.type_info,
.bound_fn,
=> false,
.inferred_alloc_const => unreachable,
@ -1489,7 +1494,9 @@ pub const Type = extern union {
}
pub fn isNoReturn(self: Type) bool {
const definitely_correct_result = self.zigTypeTag() == .NoReturn;
const definitely_correct_result =
self.tag_if_small_enough != .bound_fn and
self.zigTypeTag() == .NoReturn;
const fast_result = self.tag_if_small_enough == Tag.noreturn;
assert(fast_result == definitely_correct_result);
return fast_result;
@ -1736,6 +1743,7 @@ pub const Type = extern union {
.@"opaque",
.var_args_param,
.type_info,
.bound_fn,
=> unreachable,
.generic_poison => unreachable,
@ -1768,6 +1776,7 @@ pub const Type = extern union {
.var_args_param => unreachable,
.generic_poison => unreachable,
.type_info => unreachable,
.bound_fn => unreachable,
.@"struct" => {
const s = self.castTag(.@"struct").?.data;
@ -1951,6 +1960,7 @@ pub const Type = extern union {
.@"opaque" => unreachable,
.var_args_param => unreachable,
.generic_poison => unreachable,
.bound_fn => unreachable,
.@"struct" => {
@panic("TODO bitSize struct");
@ -2353,6 +2363,51 @@ pub const Type = extern union {
}
}
/// Returns if type can be used for a runtime variable
pub fn isValidVarType(self: Type, is_extern: bool) bool {
var ty = self;
while (true) switch (ty.zigTypeTag()) {
.Bool,
.Int,
.Float,
.ErrorSet,
.Enum,
.Frame,
.AnyFrame,
=> return true,
.Opaque => return is_extern,
.BoundFn,
.ComptimeFloat,
.ComptimeInt,
.EnumLiteral,
.NoReturn,
.Type,
.Void,
.Undefined,
.Null,
=> return false,
.Optional => {
var buf: Payload.ElemType = undefined;
return ty.optionalChild(&buf).isValidVarType(is_extern);
},
.Pointer, .Array, .Vector => ty = ty.elemType(),
.ErrorUnion => ty = ty.errorUnionPayload(),
.Fn => @panic("TODO fn isValidVarType"),
.Struct => {
// TODO this is not always correct; introduce lazy value mechanism
// and here we need to force a resolve of "type requires comptime".
return true;
},
.Union => @panic("TODO union isValidVarType"),
};
}
/// For *[N]T, returns [N]T.
/// For *T, returns T.
/// For [*]T, returns T.
pub fn childType(ty: Type) Type {
return switch (ty.tag()) {
.vector => ty.castTag(.vector).?.data.elem_type,
@ -2934,6 +2989,7 @@ pub const Type = extern union {
.single_const_pointer,
.single_mut_pointer,
.pointer,
.bound_fn,
=> return null,
.@"struct" => {
@ -3480,6 +3536,7 @@ pub const Type = extern union {
inferred_alloc_mut,
/// Same as `inferred_alloc_mut` but the local is `var` not `const`.
inferred_alloc_const, // See last_no_payload_tag below.
bound_fn,
// After this, the tag requires a payload.
array_u8,
@ -3518,7 +3575,7 @@ pub const Type = extern union {
enum_full,
enum_nonexhaustive,
pub const last_no_payload_tag = Tag.inferred_alloc_const;
pub const last_no_payload_tag = Tag.bound_fn;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
pub fn Type(comptime t: Tag) type {
@ -3585,6 +3642,7 @@ pub const Type = extern union {
.extern_options,
.type_info,
.@"anyframe",
.bound_fn,
=> @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"),
.array_u8,

View File

@ -159,6 +159,10 @@ pub const Value = extern union {
/// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc
/// instructions for comptime code.
inferred_alloc_comptime,
/// Used sometimes as the result of field_call_bind. This value is always temporary,
/// and refers directly to the air. It will never be referenced by the air itself.
/// TODO: This is probably a bad encoding, maybe put temp data in the sema instead.
bound_fn,
pub const last_no_payload_tag = Tag.empty_array;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
@ -279,6 +283,7 @@ pub const Value = extern union {
.inferred_alloc => Payload.InferredAlloc,
.@"struct" => Payload.Struct,
.@"union" => Payload.Union,
.bound_fn => Payload.BoundFn,
};
}
@ -422,6 +427,7 @@ pub const Value = extern union {
.extern_options_type,
.type_info_type,
.generic_poison,
.bound_fn,
=> unreachable,
.ty => {
@ -716,6 +722,10 @@ pub const Value = extern union {
try out_stream.writeAll("(opt_payload_ptr)");
val = val.castTag(.opt_payload_ptr).?.data;
},
.bound_fn => {
const bound_func = val.castTag(.bound_fn).?.data;
return out_stream.print("(bound_fn %{}(%{})", .{ bound_func.func_inst, bound_func.arg0_inst });
},
};
}
@ -2199,6 +2209,16 @@ pub const Value = extern union {
val: Value,
},
};
pub const BoundFn = struct {
pub const base_tag = Tag.bound_fn;
base: Payload = Payload{ .tag = base_tag },
data: struct {
func_inst: Air.Inst.Ref,
arg0_inst: Air.Inst.Ref,
},
};
};
/// Big enough to fit any non-BigInt value

View File

@ -10,6 +10,7 @@ test {
_ = @import("behavior/eval.zig");
_ = @import("behavior/generics.zig");
_ = @import("behavior/if.zig");
_ = @import("behavior/member_func.zig");
_ = @import("behavior/pointers.zig");
_ = @import("behavior/sizeof_and_typeof.zig");
_ = @import("behavior/struct.zig");

View File

@ -155,3 +155,31 @@ fn MakeType(comptime T: type) type {
field: T,
};
}
test "try to trick eval with runtime if" {
try expect(testTryToTrickEvalWithRuntimeIf(true) == 10);
}
fn testTryToTrickEvalWithRuntimeIf(b: bool) usize {
comptime var i: usize = 0;
inline while (i < 10) : (i += 1) {
const result = if (b) false else true;
_ = result;
}
comptime {
return i;
}
}
test "@setEvalBranchQuota" {
comptime {
// 1001 for the loop and then 1 more for the expect fn call
@setEvalBranchQuota(1002);
var i = 0;
var sum = 0;
while (i < 1001) : (i += 1) {
sum += i;
}
try expect(sum == 500500);
}
}

View File

@ -109,21 +109,6 @@ test "const slice" {
}
}
test "try to trick eval with runtime if" {
try expect(testTryToTrickEvalWithRuntimeIf(true) == 10);
}
fn testTryToTrickEvalWithRuntimeIf(b: bool) usize {
comptime var i: usize = 0;
inline while (i < 10) : (i += 1) {
const result = if (b) false else true;
_ = result;
}
comptime {
return i;
}
}
test "inlined loop has array literal with elided runtime scope on first iteration but not second iteration" {
var runtime = [1]i32{3};
comptime var i: usize = 0;
@ -276,19 +261,6 @@ fn assertEqualPtrs(ptr1: *const u8, ptr2: *const u8) !void {
try expect(ptr1 == ptr2);
}
test "@setEvalBranchQuota" {
comptime {
// 1001 for the loop and then 1 more for the expect fn call
@setEvalBranchQuota(1002);
var i = 0;
var sum = 0;
while (i < 1001) : (i += 1) {
sum += i;
}
try expect(sum == 500500);
}
}
test "float literal at compile time not lossy" {
try expect(16777216.0 + 1.0 == 16777217.0);
try expect(9007199254740992.0 + 1.0 == 9007199254740993.0);

View File

@ -118,3 +118,19 @@ pub fn SmallList(comptime T: type, comptime STATIC_SIZE: usize) type {
prealloc_items: [STATIC_SIZE]T,
};
}
test "const decls in struct" {
try expect(GenericDataThing(3).count_plus_one == 4);
}
fn GenericDataThing(comptime count: isize) type {
return struct {
const count_plus_one = count + 1;
};
}
test "use generic param in generic param" {
try expect(aGenericFn(i32, 3, 4) == 7);
}
fn aGenericFn(comptime T: type, comptime a: T, b: T) T {
return a + b;
}

View File

@ -26,22 +26,6 @@ fn GenNode(comptime T: type) type {
};
}
test "const decls in struct" {
try expect(GenericDataThing(3).count_plus_one == 4);
}
fn GenericDataThing(comptime count: isize) type {
return struct {
const count_plus_one = count + 1;
};
}
test "use generic param in generic param" {
try expect(aGenericFn(i32, 3, 4) == 7);
}
fn aGenericFn(comptime T: type, comptime a: T, b: T) T {
return a + b;
}
test "generic fn with implicit cast" {
try expect(getFirstByte(u8, &[_]u8{13}) == 13);
try expect(getFirstByte(u16, &[_]u16{

View File

@ -0,0 +1,103 @@
const expect = @import("std").testing.expect;
const HasFuncs = struct {
state: u32,
func_field: fn (u32) u32,
fn inc(self: *HasFuncs) void {
self.state += 1;
}
fn get(self: HasFuncs) u32 {
return self.state;
}
fn getPtr(self: *const HasFuncs) *const u32 {
return &self.state;
}
fn one(_: u32) u32 {
return 1;
}
fn two(_: u32) u32 {
return 2;
}
};
test "standard field calls" {
try expect(HasFuncs.one(0) == 1);
try expect(HasFuncs.two(0) == 2);
var v: HasFuncs = undefined;
v.state = 0;
v.func_field = HasFuncs.one;
const pv = &v;
const pcv: *const HasFuncs = pv;
try expect(v.get() == 0);
v.inc();
try expect(v.state == 1);
try expect(v.get() == 1);
pv.inc();
try expect(v.state == 2);
try expect(pv.get() == 2);
try expect(v.getPtr().* == 2);
try expect(pcv.get() == 2);
try expect(pcv.getPtr().* == 2);
v.func_field = HasFuncs.one;
try expect(v.func_field(0) == 1);
try expect(pv.func_field(0) == 1);
try expect(pcv.func_field(0) == 1);
try expect(pcv.func_field(blk: {
pv.func_field = HasFuncs.two;
break :blk 0;
}) == 1);
v.func_field = HasFuncs.two;
try expect(v.func_field(0) == 2);
try expect(pv.func_field(0) == 2);
try expect(pcv.func_field(0) == 2);
}
test "@field field calls" {
try expect(@field(HasFuncs, "one")(0) == 1);
try expect(@field(HasFuncs, "two")(0) == 2);
var v: HasFuncs = undefined;
v.state = 0;
v.func_field = HasFuncs.one;
const pv = &v;
const pcv: *const HasFuncs = pv;
try expect(@field(v, "get")() == 0);
@field(v, "inc")();
try expect(v.state == 1);
try expect(@field(v, "get")() == 1);
@field(pv, "inc")();
try expect(v.state == 2);
try expect(@field(pv, "get")() == 2);
try expect(@field(v, "getPtr")().* == 2);
try expect(@field(pcv, "get")() == 2);
try expect(@field(pcv, "getPtr")().* == 2);
v.func_field = HasFuncs.one;
try expect(@field(v, "func_field")(0) == 1);
try expect(@field(pv, "func_field")(0) == 1);
try expect(@field(pcv, "func_field")(0) == 1);
try expect(@field(pcv, "func_field")(blk: {
pv.func_field = HasFuncs.two;
break :blk 0;
}) == 1);
v.func_field = HasFuncs.two;
try expect(@field(v, "func_field")(0) == 2);
try expect(@field(pv, "func_field")(0) == 2);
try expect(@field(pcv, "func_field")(0) == 2);
}