Merge pull request #14403 from Vexu/fixes

Misc fixes
This commit is contained in:
Andrew Kelley 2023-01-22 00:36:50 -05:00 committed by GitHub
commit a51c76541d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 366 additions and 223 deletions

View File

@ -3948,6 +3948,7 @@ fn validateArrayInitTy(
return;
},
.Struct => if (ty.isTuple()) {
_ = try sema.resolveTypeFields(ty);
const array_len = ty.arrayLen();
if (extra.init_count > array_len) {
return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{
@ -4642,11 +4643,11 @@ fn failWithBadMemberAccess(
.Enum => "enum",
else => unreachable,
};
if (sema.mod.declIsRoot(agg_ty.getOwnerDecl())) {
if (agg_ty.getOwnerDeclOrNull()) |some| if (sema.mod.declIsRoot(some)) {
return sema.fail(block, field_src, "root struct of file '{}' has no member named '{s}'", .{
agg_ty.fmt(sema.mod), field_name,
});
}
};
const msg = msg: {
const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{s}'", .{
kw_name, agg_ty.fmt(sema.mod), field_name,
@ -7514,7 +7515,7 @@ fn resolveGenericInstantiationType(
}
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
if (!ty.isSimpleTuple()) return;
if (!ty.isSimpleTupleOrAnonStruct()) return;
const tuple = ty.tupleFields();
for (tuple.values) |field_val, i| {
try sema.resolveTupleLazyValues(block, src, tuple.types[i]);
@ -11771,8 +11772,8 @@ fn zirShl(
// TODO coerce rhs if air_tag is not shl_sat
const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty);
const maybe_lhs_val = try sema.resolveMaybeUndefVal(lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefVal(rhs);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs);
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef()) {
@ -11842,7 +11843,7 @@ fn zirShl(
if (scalar_ty.zigTypeTag() == .ComptimeInt) {
break :val shifted.wrapped_result;
}
if (shifted.overflow_bit.compareAllWithZero(.eq)) {
if (shifted.overflow_bit.compareAllWithZero(.eq, sema.mod)) {
break :val shifted.wrapped_result;
}
return sema.fail(block, src, "operation caused overflow", .{});
@ -11959,8 +11960,8 @@ fn zirShr(
const target = sema.mod.getTarget();
const scalar_ty = lhs_ty.scalarType();
const maybe_lhs_val = try sema.resolveMaybeUndefVal(lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefVal(rhs);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs);
const runtime_src = if (maybe_rhs_val) |rhs_val| rs: {
if (rhs_val.isUndef()) {
@ -12799,7 +12800,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .div);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
@ -12831,7 +12832,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const lhs_val = maybe_lhs_val orelse unreachable;
const rhs_val = maybe_rhs_val orelse unreachable;
const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable;
if (!rem.compareAllWithZero(.eq)) {
if (!rem.compareAllWithZero(.eq, mod)) {
return sema.fail(block, src, "ambiguous coercion of division operands '{s}' and '{s}'; non-zero remainder '{}'", .{
@tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()), rem.fmtValue(resolved_type, sema.mod),
});
@ -12959,7 +12960,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .div_exact);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
@ -13024,7 +13025,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod);
if (!(modulus_val.compareAllWithZero(.eq))) {
if (!(modulus_val.compareAllWithZero(.eq, mod))) {
return sema.fail(block, src, "exact division produced remainder", .{});
}
const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod);
@ -13035,7 +13036,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(resolved_type, res);
} else {
const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod);
if (!(modulus_val.compareAllWithZero(.eq))) {
if (!(modulus_val.compareAllWithZero(.eq, mod))) {
return sema.fail(block, src, "exact division produced remainder", .{});
}
return sema.addConstant(
@ -13122,7 +13123,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .div_floor);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
@ -13238,7 +13239,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .div_trunc);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
@ -13481,7 +13482,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .mod_rem);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
@ -13664,7 +13665,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .mod);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
@ -13766,7 +13767,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .rem);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
@ -14106,12 +14107,7 @@ fn analyzeArithmetic(
const air_tag: Air.Inst.Tag = switch (zir_tag) {
.add => .ptr_add,
.sub => .ptr_sub,
else => return sema.fail(
block,
src,
"invalid pointer arithmetic operand: '{s}''",
.{@tagName(zir_tag)},
),
else => return sema.fail(block, src, "invalid pointer arithmetic operator", .{}),
};
return sema.analyzePtrArithmetic(block, src, lhs, rhs, air_tag, lhs_src, rhs_src);
},
@ -19697,7 +19693,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
}
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (try sema.resolveMaybeUndefValIntable(operand)) |val| {
if (val.isUndef()) return sema.addConstUndef(dest_ty);
if (!is_vector) {
return sema.addConstant(
@ -19901,7 +19897,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
_ = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src);
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
return sema.addConstant(operand_ty, val);
@ -19909,7 +19905,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const target = sema.mod.getTarget();
switch (operand_ty.zigTypeTag()) {
.Int, .ComptimeInt => {
.Int => {
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef()) return sema.addConstUndef(operand_ty);
const result_val = try val.bitReverse(operand_ty, target, sema.arena);
@ -19929,7 +19925,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const elems = try sema.arena.alloc(Value, vec_len);
for (elems) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try elem_val.bitReverse(operand_ty, target, sema.arena);
elem.* = try elem_val.bitReverse(scalar_ty, target, sema.arena);
}
return sema.addConstant(
operand_ty,
@ -20028,7 +20024,6 @@ fn checkInvalidPtrArithmetic(
block: *Block,
src: LazySrcLoc,
ty: Type,
zir_tag: Zir.Inst.Tag,
) CompileError!void {
switch (try ty.zigTypeTagOrPoison()) {
.Pointer => switch (ty.ptrSize()) {
@ -20036,8 +20031,8 @@ fn checkInvalidPtrArithmetic(
.Many, .C => return sema.fail(
block,
src,
"invalid pointer arithmetic operand: '{s}''",
.{@tagName(zir_tag)},
"invalid pointer arithmetic operator",
.{},
),
},
else => return,

View File

@ -10396,12 +10396,7 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool
.mips, .mipsel => return false,
.x86_64 => switch (target.os.tag) {
.windows => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory,
else => {
const class = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret);
if (class[0] == .memory) return true;
if (class[0] == .x87 and class[2] != .none) return true;
return false;
},
else => return firstParamSRetSystemV(fn_info.return_type, target),
},
.wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, target)[0] == .indirect,
.aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, target) == .memory,
@ -10413,11 +10408,20 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool
.riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, target) == .memory,
else => return false, // TODO investigate C ABI for other architectures
},
.SysV => return firstParamSRetSystemV(fn_info.return_type, target),
.Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory,
.Stdcall => return !isScalar(fn_info.return_type),
else => return false,
}
}
fn firstParamSRetSystemV(ty: Type, target: std.Target) bool {
const class = x86_64_abi.classifySystemV(ty, target, .ret);
if (class[0] == .memory) return true;
if (class[0] == .x87 and class[2] != .none) return true;
return false;
}
/// In order to support the C calling convention, some return types need to be lowered
/// completely differently in the function prototype to honor the C ABI, and then
/// be effectively bitcasted to the actual return type.
@ -10442,77 +10446,14 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
}
},
.C => {
const is_scalar = isScalar(fn_info.return_type);
switch (target.cpu.arch) {
.mips, .mipsel => return dg.lowerType(fn_info.return_type),
.x86_64 => switch (target.os.tag) {
.windows => switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) {
.integer => {
if (is_scalar) {
return dg.lowerType(fn_info.return_type);
} else {
const abi_size = fn_info.return_type.abiSize(target);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
}
},
.win_i128 => return dg.context.intType(64).vectorType(2),
.memory => return dg.context.voidType(),
.sse => return dg.lowerType(fn_info.return_type),
else => unreachable,
},
else => {
if (is_scalar) {
return dg.lowerType(fn_info.return_type);
}
const classes = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret);
if (classes[0] == .memory) {
return dg.context.voidType();
}
var llvm_types_buffer: [8]*llvm.Type = undefined;
var llvm_types_index: u32 = 0;
for (classes) |class| {
switch (class) {
.integer => {
llvm_types_buffer[llvm_types_index] = dg.context.intType(64);
llvm_types_index += 1;
},
.sse, .sseup => {
llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
llvm_types_index += 1;
},
.float => {
llvm_types_buffer[llvm_types_index] = dg.context.floatType();
llvm_types_index += 1;
},
.float_combine => {
llvm_types_buffer[llvm_types_index] = dg.context.floatType().vectorType(2);
llvm_types_index += 1;
},
.x87 => {
if (llvm_types_index != 0 or classes[2] != .none) {
return dg.context.voidType();
}
llvm_types_buffer[llvm_types_index] = dg.context.x86FP80Type();
llvm_types_index += 1;
},
.x87up => continue,
.complex_x87 => {
@panic("TODO");
},
.memory => unreachable, // handled above
.win_i128 => unreachable, // windows only
.none => break,
}
}
if (classes[0] == .integer and classes[1] == .none) {
const abi_size = fn_info.return_type.abiSize(target);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
}
return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False);
},
.windows => return lowerWin64FnRetTy(dg, fn_info),
else => return lowerSystemVFnRetTy(dg, fn_info),
},
.wasm32 => {
if (is_scalar) {
if (isScalar(fn_info.return_type)) {
return dg.lowerType(fn_info.return_type);
}
const classes = wasm_c_abi.classifyType(fn_info.return_type, target);
@ -10569,6 +10510,8 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
else => return dg.lowerType(fn_info.return_type),
}
},
.Win64 => return lowerWin64FnRetTy(dg, fn_info),
.SysV => return lowerSystemVFnRetTy(dg, fn_info),
.Stdcall => {
if (isScalar(fn_info.return_type)) {
return dg.lowerType(fn_info.return_type);
@ -10580,6 +10523,76 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
}
}
fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
const target = dg.module.getTarget();
switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) {
.integer => {
if (isScalar(fn_info.return_type)) {
return dg.lowerType(fn_info.return_type);
} else {
const abi_size = fn_info.return_type.abiSize(target);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
}
},
.win_i128 => return dg.context.intType(64).vectorType(2),
.memory => return dg.context.voidType(),
.sse => return dg.lowerType(fn_info.return_type),
else => unreachable,
}
}
fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
if (isScalar(fn_info.return_type)) {
return dg.lowerType(fn_info.return_type);
}
const target = dg.module.getTarget();
const classes = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret);
if (classes[0] == .memory) {
return dg.context.voidType();
}
var llvm_types_buffer: [8]*llvm.Type = undefined;
var llvm_types_index: u32 = 0;
for (classes) |class| {
switch (class) {
.integer => {
llvm_types_buffer[llvm_types_index] = dg.context.intType(64);
llvm_types_index += 1;
},
.sse, .sseup => {
llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
llvm_types_index += 1;
},
.float => {
llvm_types_buffer[llvm_types_index] = dg.context.floatType();
llvm_types_index += 1;
},
.float_combine => {
llvm_types_buffer[llvm_types_index] = dg.context.floatType().vectorType(2);
llvm_types_index += 1;
},
.x87 => {
if (llvm_types_index != 0 or classes[2] != .none) {
return dg.context.voidType();
}
llvm_types_buffer[llvm_types_index] = dg.context.x86FP80Type();
llvm_types_index += 1;
},
.x87up => continue,
.complex_x87 => {
@panic("TODO");
},
.memory => unreachable, // handled above
.win_i128 => unreachable, // windows only
.none => break,
}
}
if (classes[0] == .integer and classes[1] == .none) {
const abi_size = fn_info.return_type.abiSize(target);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
}
return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False);
}
const ParamTypeIterator = struct {
dg: *DeclGen,
fn_info: Type.Payload.Function.Data,
@ -10629,7 +10642,6 @@ const ParamTypeIterator = struct {
it.zig_index += 1;
return .no_bits;
}
const dg = it.dg;
switch (it.fn_info.cc) {
.Unspecified, .Inline => {
it.zig_index += 1;
@ -10648,7 +10660,6 @@ const ParamTypeIterator = struct {
@panic("TODO implement async function lowering in the LLVM backend");
},
.C => {
const is_scalar = isScalar(ty);
switch (it.target.cpu.arch) {
.mips, .mipsel => {
it.zig_index += 1;
@ -10656,99 +10667,13 @@ const ParamTypeIterator = struct {
return .byval;
},
.x86_64 => switch (it.target.os.tag) {
.windows => switch (x86_64_abi.classifyWindows(ty, it.target)) {
.integer => {
if (is_scalar) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
} else {
it.zig_index += 1;
it.llvm_index += 1;
return .abi_sized_int;
}
},
.win_i128 => {
it.zig_index += 1;
it.llvm_index += 1;
return .byref;
},
.memory => {
it.zig_index += 1;
it.llvm_index += 1;
return .byref_mut;
},
.sse => {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
},
else => unreachable,
},
else => {
const classes = x86_64_abi.classifySystemV(ty, it.target, .arg);
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
it.byval_attr = true;
return .byref;
}
if (is_scalar) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
}
var llvm_types_buffer: [8]*llvm.Type = undefined;
var llvm_types_index: u32 = 0;
for (classes) |class| {
switch (class) {
.integer => {
llvm_types_buffer[llvm_types_index] = dg.context.intType(64);
llvm_types_index += 1;
},
.sse, .sseup => {
llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
llvm_types_index += 1;
},
.float => {
llvm_types_buffer[llvm_types_index] = dg.context.floatType();
llvm_types_index += 1;
},
.float_combine => {
llvm_types_buffer[llvm_types_index] = dg.context.floatType().vectorType(2);
llvm_types_index += 1;
},
.x87 => {
it.zig_index += 1;
it.llvm_index += 1;
it.byval_attr = true;
return .byref;
},
.x87up => unreachable,
.complex_x87 => {
@panic("TODO");
},
.memory => unreachable, // handled above
.win_i128 => unreachable, // windows only
.none => break,
}
}
if (classes[0] == .integer and classes[1] == .none) {
it.zig_index += 1;
it.llvm_index += 1;
return .abi_sized_int;
}
it.llvm_types_buffer = llvm_types_buffer;
it.llvm_types_len = llvm_types_index;
it.llvm_index += llvm_types_index;
it.zig_index += 1;
return .multiple_llvm_types;
},
.windows => return it.nextWin64(ty),
else => return it.nextSystemV(ty),
},
.wasm32 => {
it.zig_index += 1;
it.llvm_index += 1;
if (is_scalar) {
if (isScalar(ty)) {
return .byval;
}
const classes = wasm_c_abi.classifyType(ty, it.target);
@ -10766,7 +10691,7 @@ const ParamTypeIterator = struct {
.byval => return .byval,
.integer => {
it.llvm_types_len = 1;
it.llvm_types_buffer[0] = dg.context.intType(64);
it.llvm_types_buffer[0] = it.dg.context.intType(64);
return .multiple_llvm_types;
},
.double_integer => return Lowering{ .i64_array = 2 },
@ -10806,6 +10731,8 @@ const ParamTypeIterator = struct {
},
}
},
.Win64 => return it.nextWin64(ty),
.SysV => return it.nextSystemV(ty),
.Stdcall => {
it.zig_index += 1;
it.llvm_index += 1;
@ -10824,6 +10751,98 @@ const ParamTypeIterator = struct {
},
}
}
fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering {
switch (x86_64_abi.classifyWindows(ty, it.target)) {
.integer => {
if (isScalar(ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
} else {
it.zig_index += 1;
it.llvm_index += 1;
return .abi_sized_int;
}
},
.win_i128 => {
it.zig_index += 1;
it.llvm_index += 1;
return .byref;
},
.memory => {
it.zig_index += 1;
it.llvm_index += 1;
return .byref_mut;
},
.sse => {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
},
else => unreachable,
}
}
fn nextSystemV(it: *ParamTypeIterator, ty: Type) ?Lowering {
const classes = x86_64_abi.classifySystemV(ty, it.target, .arg);
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
it.byval_attr = true;
return .byref;
}
if (isScalar(ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
}
var llvm_types_buffer: [8]*llvm.Type = undefined;
var llvm_types_index: u32 = 0;
for (classes) |class| {
switch (class) {
.integer => {
llvm_types_buffer[llvm_types_index] = it.dg.context.intType(64);
llvm_types_index += 1;
},
.sse, .sseup => {
llvm_types_buffer[llvm_types_index] = it.dg.context.doubleType();
llvm_types_index += 1;
},
.float => {
llvm_types_buffer[llvm_types_index] = it.dg.context.floatType();
llvm_types_index += 1;
},
.float_combine => {
llvm_types_buffer[llvm_types_index] = it.dg.context.floatType().vectorType(2);
llvm_types_index += 1;
},
.x87 => {
it.zig_index += 1;
it.llvm_index += 1;
it.byval_attr = true;
return .byref;
},
.x87up => unreachable,
.complex_x87 => {
@panic("TODO");
},
.memory => unreachable, // handled above
.win_i128 => unreachable, // windows only
.none => break,
}
}
if (classes[0] == .integer and classes[1] == .none) {
it.zig_index += 1;
it.llvm_index += 1;
return .abi_sized_int;
}
it.llvm_types_buffer = llvm_types_buffer;
it.llvm_types_len = llvm_types_index;
it.llvm_index += llvm_types_index;
it.zig_index += 1;
return .multiple_llvm_types;
}
};
fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTypeIterator {

View File

@ -3789,6 +3789,39 @@ pub const Type = extern union {
}
}
/// Returns true if the type's layout is already resolved and it is safe
/// to use `abiSize`, `abiAlignment` and `bitSize` on it.
pub fn layoutIsResolved(ty: Type) bool {
switch (ty.zigTypeTag()) {
.Struct => {
if (ty.castTag(.@"struct")) |struct_ty| {
return struct_ty.data.haveLayout();
}
return true;
},
.Union => {
if (ty.cast(Payload.Union)) |union_ty| {
return union_ty.data.haveLayout();
}
return true;
},
.Array => {
if (ty.arrayLenIncludingSentinel() == 0) return true;
return ty.childType().layoutIsResolved();
},
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
return payload_ty.layoutIsResolved();
},
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload();
return payload_ty.layoutIsResolved();
},
else => return true,
}
}
pub fn isSinglePointer(self: Type) bool {
return switch (self.tag()) {
.single_const_pointer,
@ -5500,7 +5533,7 @@ pub const Type = extern union {
}
const S = struct {
fn fieldWithRange(int_ty: Type, int_val: Value, end: usize, m: *Module) ?usize {
if (int_val.compareAllWithZero(.lt)) return null;
if (int_val.compareAllWithZero(.lt, m)) return null;
var end_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = end,
@ -6498,12 +6531,7 @@ pub const Type = extern union {
// pointee type needs to be resolved more, that needs to be done before calling
// this ptr() function.
if (d.@"align" != 0) canonicalize: {
if (d.pointee_type.castTag(.@"struct")) |struct_ty| {
if (!struct_ty.data.haveLayout()) break :canonicalize;
}
if (d.pointee_type.cast(Payload.Union)) |union_ty| {
if (!union_ty.data.haveLayout()) break :canonicalize;
}
if (!d.pointee_type.layoutIsResolved()) break :canonicalize;
if (d.@"align" == d.pointee_type.abiAlignment(target)) {
d.@"align" = 0;
}
@ -6528,12 +6556,12 @@ pub const Type = extern union {
if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) {
switch (d.size) {
.Slice => {
if (sent.compareAllWithZero(.eq)) {
if (sent.compareAllWithZero(.eq, mod)) {
return Type.initTag(.const_slice_u8_sentinel_0);
}
},
.Many => {
if (sent.compareAllWithZero(.eq)) {
if (sent.compareAllWithZero(.eq, mod)) {
return Type.initTag(.manyptr_const_u8_sentinel_0);
}
},

View File

@ -2076,13 +2076,22 @@ pub const Value = extern union {
/// For vectors, returns true if comparison is true for ALL elements.
///
/// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)`
pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator) bool {
return compareAllWithZeroAdvanced(lhs, op, null) catch unreachable;
pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool {
return compareAllWithZeroAdvancedExtra(lhs, op, mod, null) catch unreachable;
}
pub fn compareAllWithZeroAdvanced(
lhs: Value,
op: std.math.CompareOperator,
sema: *Sema,
) Module.CompileError!bool {
return compareAllWithZeroAdvancedExtra(lhs, op, sema.mod, sema);
}
pub fn compareAllWithZeroAdvancedExtra(
lhs: Value,
op: std.math.CompareOperator,
mod: *Module,
opt_sema: ?*Sema,
) Module.CompileError!bool {
if (lhs.isInf()) {
@ -2095,10 +2104,25 @@ pub const Value = extern union {
}
switch (lhs.tag()) {
.repeated => return lhs.castTag(.repeated).?.data.compareAllWithZeroAdvanced(op, opt_sema),
.repeated => return lhs.castTag(.repeated).?.data.compareAllWithZeroAdvancedExtra(op, mod, opt_sema),
.aggregate => {
for (lhs.castTag(.aggregate).?.data) |elem_val| {
if (!(try elem_val.compareAllWithZeroAdvanced(op, opt_sema))) return false;
if (!(try elem_val.compareAllWithZeroAdvancedExtra(op, mod, opt_sema))) return false;
}
return true;
},
.str_lit => {
const str_lit = lhs.castTag(.str_lit).?.data;
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
for (bytes) |byte| {
if (!std.math.compare(byte, op, 0)) return false;
}
return true;
},
.bytes => {
const bytes = lhs.castTag(.bytes).?.data;
for (bytes) |byte| {
if (!std.math.compare(byte, op, 0)) return false;
}
return true;
},
@ -3103,7 +3127,7 @@ pub const Value = extern union {
.int_i64,
.int_big_positive,
.int_big_negative,
=> compareAllWithZero(self, .eq),
=> self.orderAgainstZero().compare(.eq),
.undef => unreachable,
.unreachable_value => unreachable,

View File

@ -106,7 +106,6 @@ test {
_ = @import("behavior/bugs/12430.zig");
_ = @import("behavior/bugs/12450.zig");
_ = @import("behavior/bugs/12486.zig");
_ = @import("behavior/bugs/12488.zig");
_ = @import("behavior/bugs/12498.zig");
_ = @import("behavior/bugs/12551.zig");
_ = @import("behavior/bugs/12571.zig");

View File

@ -1,13 +0,0 @@
const expect = @import("std").testing.expect;
const A = struct {
a: u32,
};
fn foo(comptime a: anytype) !void {
try expect(a[0][0] == @sizeOf(A));
}
test {
try foo(.{[_]usize{@sizeOf(A)}});
}

View File

@ -517,3 +517,26 @@ test "peer type resolution of inferred error set with non-void payload" {
};
try expect(try S.openDataFile(.read) == 1);
}
test "lazy values passed to anytype parameter" {
const A = struct {
a: u32,
fn foo(comptime a: anytype) !void {
try expect(a[0][0] == @sizeOf(@This()));
}
};
try A.foo(.{[_]usize{@sizeOf(A)}});
const B = struct {
fn foo(comptime a: anytype) !void {
try expect(a.x == 0);
}
};
try B.foo(.{ .x = @sizeOf(B) });
const C = struct {};
try expect(@truncate(u32, @sizeOf(C)) == 0);
const D = struct {};
try expect(@sizeOf(D) << 1 == 0);
}

View File

@ -532,3 +532,18 @@ test "pointer alignment and element type include call expression" {
};
try expect(@alignOf(S.P) > 0);
}
test "pointer to array has explicit alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
const Base = extern struct { a: u8 };
const Base2 = extern struct { a: u8 };
fn func(ptr: *[4]Base) *align(1) [4]Base2 {
return @alignCast(1, @ptrCast(*[4]Base2, ptr));
}
};
var bases = [_]S.Base{.{ .a = 2 }} ** 4;
const casted = S.func(&bases);
try expect(casted[0].a == 2);
}

View File

@ -1573,3 +1573,8 @@ test "struct fields get automatically reordered" {
};
try expect(@sizeOf(S1) == @sizeOf(S2));
}
test "directly initiating tuple like struct" {
const a = struct { u8 }{8};
try expect(a[0] == 8);
}

View File

@ -1286,3 +1286,14 @@ test "store to vector in slice" {
s[i] = s[0];
try expectEqual(v[1], v[0]);
}
test "addition of vectors represented as strings" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const V = @Vector(3, u8);
const foo: V = "foo".*;
const bar: V = @typeName(u32).*;
try expectEqual(V{ 219, 162, 161 }, foo + bar);
}

View File

@ -1015,3 +1015,15 @@ void __attribute__((stdcall)) stdcall_big_union(union BigUnion x) {
assert_or_panic(x.a.c == 3);
assert_or_panic(x.a.d == 4);
}
#ifdef __x86_64__
struct ByRef __attribute__((ms_abi)) c_explict_win64(struct ByRef in) {
in.val = 42;
return in;
}
struct ByRef __attribute__((sysv_abi)) c_explict_sys_v(struct ByRef in) {
in.val = 42;
return in;
}
#endif

View File

@ -1190,3 +1190,19 @@ test "Stdcall ABI big union" {
};
stdcall_big_union(x);
}
extern fn c_explict_win64(ByRef) callconv(.Win64) ByRef;
test "explicit SysV calling convention" {
if (builtin.cpu.arch != .x86_64) return error.SkipZigTest;
const res = c_explict_win64(.{ .val = 1, .arr = undefined });
try expect(res.val == 42);
}
extern fn c_explict_sys_v(ByRef) callconv(.SysV) ByRef;
test "explicit Win64 calling convention" {
if (builtin.cpu.arch != .x86_64) return error.SkipZigTest;
const res = c_explict_sys_v(.{ .val = 1, .arr = undefined });
try expect(res.val == 42);
}

View File

@ -0,0 +1,9 @@
comptime {
_ = @TypeOf(.{}).is_optional;
}
// error
// backend=stage2
// target=native
//
// :2:21: error: struct '@TypeOf(.{})' has no member named 'is_optional'