Legalize: implement scalarization of binary operations

This commit is contained in:
Jacob Young 2025-05-30 00:22:45 -04:00
parent c1e9ef9eaa
commit b483defc5a
39 changed files with 1388 additions and 839 deletions

View File

@ -11194,6 +11194,7 @@ fn rvalueInner(
const as_void = @as(u64, @intFromEnum(Zir.Inst.Ref.void_type)) << 32;
const as_comptime_int = @as(u64, @intFromEnum(Zir.Inst.Ref.comptime_int_type)) << 32;
const as_usize = @as(u64, @intFromEnum(Zir.Inst.Ref.usize_type)) << 32;
const as_u1 = @as(u64, @intFromEnum(Zir.Inst.Ref.u1_type)) << 32;
const as_u8 = @as(u64, @intFromEnum(Zir.Inst.Ref.u8_type)) << 32;
switch ((@as(u64, @intFromEnum(ty_inst)) << 32) | @as(u64, @intFromEnum(result))) {
as_ty | @intFromEnum(Zir.Inst.Ref.u1_type),
@ -11237,10 +11238,11 @@ fn rvalueInner(
as_ty | @intFromEnum(Zir.Inst.Ref.null_type),
as_ty | @intFromEnum(Zir.Inst.Ref.undefined_type),
as_ty | @intFromEnum(Zir.Inst.Ref.enum_literal_type),
as_ty | @intFromEnum(Zir.Inst.Ref.ptr_usize_type),
as_ty | @intFromEnum(Zir.Inst.Ref.ptr_const_comptime_int_type),
as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_u8_type),
as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_const_u8_type),
as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type),
as_ty | @intFromEnum(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type),
as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_type),
as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_sentinel_0_type),
as_ty | @intFromEnum(Zir.Inst.Ref.anyerror_void_error_union_type),
@ -11249,27 +11251,45 @@ fn rvalueInner(
as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero),
as_comptime_int | @intFromEnum(Zir.Inst.Ref.one),
as_comptime_int | @intFromEnum(Zir.Inst.Ref.negative_one),
as_usize | @intFromEnum(Zir.Inst.Ref.undef_usize),
as_usize | @intFromEnum(Zir.Inst.Ref.zero_usize),
as_usize | @intFromEnum(Zir.Inst.Ref.one_usize),
as_u1 | @intFromEnum(Zir.Inst.Ref.undef_u1),
as_u1 | @intFromEnum(Zir.Inst.Ref.zero_u1),
as_u1 | @intFromEnum(Zir.Inst.Ref.one_u1),
as_u8 | @intFromEnum(Zir.Inst.Ref.zero_u8),
as_u8 | @intFromEnum(Zir.Inst.Ref.one_u8),
as_u8 | @intFromEnum(Zir.Inst.Ref.four_u8),
as_bool | @intFromEnum(Zir.Inst.Ref.undef_bool),
as_bool | @intFromEnum(Zir.Inst.Ref.bool_true),
as_bool | @intFromEnum(Zir.Inst.Ref.bool_false),
as_void | @intFromEnum(Zir.Inst.Ref.void_value),
=> return result, // type of result is already correct
as_bool | @intFromEnum(Zir.Inst.Ref.undef) => return .undef_bool,
as_usize | @intFromEnum(Zir.Inst.Ref.undef) => return .undef_usize,
as_usize | @intFromEnum(Zir.Inst.Ref.undef_u1) => return .undef_usize,
as_u1 | @intFromEnum(Zir.Inst.Ref.undef) => return .undef_u1,
as_usize | @intFromEnum(Zir.Inst.Ref.zero) => return .zero_usize,
as_u1 | @intFromEnum(Zir.Inst.Ref.zero) => return .zero_u1,
as_u8 | @intFromEnum(Zir.Inst.Ref.zero) => return .zero_u8,
as_usize | @intFromEnum(Zir.Inst.Ref.one) => return .one_usize,
as_u1 | @intFromEnum(Zir.Inst.Ref.one) => return .one_u1,
as_u8 | @intFromEnum(Zir.Inst.Ref.one) => return .one_u8,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero_usize) => return .zero,
as_u1 | @intFromEnum(Zir.Inst.Ref.zero_usize) => return .zero_u1,
as_u8 | @intFromEnum(Zir.Inst.Ref.zero_usize) => return .zero_u8,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.one_usize) => return .one,
as_u1 | @intFromEnum(Zir.Inst.Ref.one_usize) => return .one_u1,
as_u8 | @intFromEnum(Zir.Inst.Ref.one_usize) => return .one_u8,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero_u1) => return .zero,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero_u8) => return .zero,
as_usize | @intFromEnum(Zir.Inst.Ref.zero_u1) => return .zero_usize,
as_usize | @intFromEnum(Zir.Inst.Ref.zero_u8) => return .zero_usize,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.one_u1) => return .one,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.one_u8) => return .one,
as_usize | @intFromEnum(Zir.Inst.Ref.one_u1) => return .one_usize,
as_usize | @intFromEnum(Zir.Inst.Ref.one_u8) => return .one_usize,
// Need an explicit type coercion instruction.

View File

@ -2142,7 +2142,7 @@ pub const Inst = struct {
ref_start_index = static_len,
_,
pub const static_len = 118;
pub const static_len = 124;
pub fn toRef(i: Index) Inst.Ref {
return @enumFromInt(@intFromEnum(Index.ref_start_index) + @intFromEnum(i));
@ -2220,10 +2220,11 @@ pub const Inst = struct {
null_type,
undefined_type,
enum_literal_type,
ptr_usize_type,
ptr_const_comptime_int_type,
manyptr_u8_type,
manyptr_const_u8_type,
manyptr_const_u8_sentinel_0_type,
single_const_pointer_to_comptime_int_type,
slice_const_u8_type,
slice_const_u8_sentinel_0_type,
vector_8_i8_type,
@ -2279,11 +2280,16 @@ pub const Inst = struct {
generic_poison_type,
empty_tuple_type,
undef,
undef_bool,
undef_usize,
undef_u1,
zero,
zero_usize,
zero_u1,
zero_u8,
one,
one_usize,
one_u1,
one_u8,
four_u8,
negative_one,

View File

@ -1011,10 +1011,11 @@ pub const Inst = struct {
null_type = @intFromEnum(InternPool.Index.null_type),
undefined_type = @intFromEnum(InternPool.Index.undefined_type),
enum_literal_type = @intFromEnum(InternPool.Index.enum_literal_type),
ptr_usize_type = @intFromEnum(InternPool.Index.ptr_usize_type),
ptr_const_comptime_int_type = @intFromEnum(InternPool.Index.ptr_const_comptime_int_type),
manyptr_u8_type = @intFromEnum(InternPool.Index.manyptr_u8_type),
manyptr_const_u8_type = @intFromEnum(InternPool.Index.manyptr_const_u8_type),
manyptr_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.manyptr_const_u8_sentinel_0_type),
single_const_pointer_to_comptime_int_type = @intFromEnum(InternPool.Index.single_const_pointer_to_comptime_int_type),
slice_const_u8_type = @intFromEnum(InternPool.Index.slice_const_u8_type),
slice_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.slice_const_u8_sentinel_0_type),
vector_8_i8_type = @intFromEnum(InternPool.Index.vector_8_i8_type),
@ -1070,11 +1071,16 @@ pub const Inst = struct {
generic_poison_type = @intFromEnum(InternPool.Index.generic_poison_type),
empty_tuple_type = @intFromEnum(InternPool.Index.empty_tuple_type),
undef = @intFromEnum(InternPool.Index.undef),
undef_bool = @intFromEnum(InternPool.Index.undef_bool),
undef_usize = @intFromEnum(InternPool.Index.undef_usize),
undef_u1 = @intFromEnum(InternPool.Index.undef_u1),
zero = @intFromEnum(InternPool.Index.zero),
zero_usize = @intFromEnum(InternPool.Index.zero_usize),
zero_u1 = @intFromEnum(InternPool.Index.zero_u1),
zero_u8 = @intFromEnum(InternPool.Index.zero_u8),
one = @intFromEnum(InternPool.Index.one),
one_usize = @intFromEnum(InternPool.Index.one_usize),
one_u1 = @intFromEnum(InternPool.Index.one_u1),
one_u8 = @intFromEnum(InternPool.Index.one_u8),
four_u8 = @intFromEnum(InternPool.Index.four_u8),
negative_one = @intFromEnum(InternPool.Index.negative_one),
@ -1121,7 +1127,7 @@ pub const Inst = struct {
}
pub fn toType(ref: Ref) Type {
return Type.fromInterned(ref.toInterned().?);
return .fromInterned(ref.toInterned().?);
}
};
@ -1393,7 +1399,7 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index {
pub fn typeOf(air: *const Air, inst: Air.Inst.Ref, ip: *const InternPool) Type {
if (inst.toInterned()) |ip_index| {
return Type.fromInterned(ip.typeOf(ip_index));
return .fromInterned(ip.typeOf(ip_index));
} else {
return air.typeOfIndex(inst.toIndex().?, ip);
}
@ -1483,7 +1489,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.is_non_err_ptr,
.is_named_enum_value,
.error_set_has_value,
=> return Type.bool,
=> return .bool,
.alloc,
.ret_ptr,
@ -1574,7 +1580,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.ret_load,
.unreach,
.trap,
=> return Type.noreturn,
=> return .noreturn,
.breakpoint,
.dbg_stmt,
@ -1597,22 +1603,22 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.set_err_return_trace,
.vector_store_elem,
.c_va_end,
=> return Type.void,
=> return .void,
.slice_len,
.ret_addr,
.frame_addr,
.save_err_return_trace_index,
=> return Type.usize,
=> return .usize,
.wasm_memory_grow => return Type.isize,
.wasm_memory_size => return Type.usize,
.wasm_memory_grow => return .isize,
.wasm_memory_size => return .usize,
.tag_name, .error_name => return Type.slice_const_u8_sentinel_0,
.tag_name, .error_name => return .slice_const_u8_sentinel_0,
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const callee_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip);
return Type.fromInterned(ip.funcTypeReturnType(callee_ty.toIntern()));
return .fromInterned(ip.funcTypeReturnType(callee_ty.toIntern()));
},
.slice_elem_val, .ptr_elem_val, .array_elem_val => {
@ -1630,7 +1636,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.reduce, .reduce_optimized => {
const operand_ty = air.typeOf(datas[@intFromEnum(inst)].reduce.operand, ip);
return Type.fromInterned(ip.indexToKey(operand_ty.ip_index).vector_type.child);
return .fromInterned(ip.indexToKey(operand_ty.ip_index).vector_type.child);
},
.mul_add => return air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip),
@ -1641,7 +1647,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.@"try", .try_cold => {
const err_union_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip);
return Type.fromInterned(ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type);
return .fromInterned(ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type);
},
.tlv_dllimport_ptr => return .fromInterned(datas[@intFromEnum(inst)].ty_nav.ty),
@ -1649,7 +1655,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.work_item_id,
.work_group_size,
.work_group_id,
=> return Type.u32,
=> return .u32,
.inferred_alloc => unreachable,
.inferred_alloc_comptime => unreachable,
@ -1696,7 +1702,7 @@ pub fn internedToRef(ip_index: InternPool.Index) Inst.Ref {
/// Returns `null` if runtime-known.
pub fn value(air: Air, inst: Inst.Ref, pt: Zcu.PerThread) !?Value {
if (inst.toInterned()) |ip_index| {
return Value.fromInterned(ip_index);
return .fromInterned(ip_index);
}
const index = inst.toIndex().?;
return air.typeOfIndex(index, &pt.zcu.intern_pool).onePossibleValue(pt);

View File

@ -4,6 +4,43 @@ air_extra: std.ArrayListUnmanaged(u32),
features: *const Features,
pub const Feature = enum {
scalarize_add,
scalarize_add_safe,
scalarize_add_optimized,
scalarize_add_wrap,
scalarize_add_sat,
scalarize_sub,
scalarize_sub_safe,
scalarize_sub_optimized,
scalarize_sub_wrap,
scalarize_sub_sat,
scalarize_mul,
scalarize_mul_safe,
scalarize_mul_optimized,
scalarize_mul_wrap,
scalarize_mul_sat,
scalarize_div_float,
scalarize_div_float_optimized,
scalarize_div_trunc,
scalarize_div_trunc_optimized,
scalarize_div_floor,
scalarize_div_floor_optimized,
scalarize_div_exact,
scalarize_div_exact_optimized,
scalarize_rem,
scalarize_rem_optimized,
scalarize_mod,
scalarize_mod_optimized,
scalarize_max,
scalarize_min,
scalarize_bit_and,
scalarize_bit_or,
scalarize_shr,
scalarize_shr_exact,
scalarize_shl,
scalarize_shl_exact,
scalarize_shl_sat,
scalarize_xor,
scalarize_not,
scalarize_clz,
scalarize_ctz,
@ -26,41 +63,110 @@ pub const Feature = enum {
scalarize_trunc_float,
scalarize_neg,
scalarize_neg_optimized,
scalarize_cmp_vector,
scalarize_cmp_vector_optimized,
scalarize_fptrunc,
scalarize_fpext,
scalarize_intcast,
scalarize_intcast_safe,
scalarize_trunc,
scalarize_int_from_float,
scalarize_int_from_float_optimized,
scalarize_float_from_int,
scalarize_mul_add,
/// Legalize (shift lhs, (splat rhs)) -> (shift lhs, rhs)
remove_shift_vector_rhs_splat,
/// Legalize reduce of a one element vector to a bitcast
reduce_one_elem_to_bitcast,
fn scalarize(tag: Air.Inst.Tag) Feature {
return switch (tag) {
else => unreachable,
.add => .scalarize_add,
.add_safe => .scalarize_add_safe,
.add_optimized => .scalarize_add_optimized,
.add_wrap => .scalarize_add_wrap,
.add_sat => .scalarize_add_sat,
.sub => .scalarize_sub,
.sub_safe => .scalarize_sub_safe,
.sub_optimized => .scalarize_sub_optimized,
.sub_wrap => .scalarize_sub_wrap,
.sub_sat => .scalarize_sub_sat,
.mul => .scalarize_mul,
.mul_safe => .scalarize_mul_safe,
.mul_optimized => .scalarize_mul_optimized,
.mul_wrap => .scalarize_mul_wrap,
.mul_sat => .scalarize_mul_sat,
.div_float => .scalarize_div_float,
.div_float_optimized => .scalarize_div_float_optimized,
.div_trunc => .scalarize_div_trunc,
.div_trunc_optimized => .scalarize_div_trunc_optimized,
.div_floor => .scalarize_div_floor,
.div_floor_optimized => .scalarize_div_floor_optimized,
.div_exact => .scalarize_div_exact,
.div_exact_optimized => .scalarize_div_exact_optimized,
.rem => .scalarize_rem,
.rem_optimized => .scalarize_rem_optimized,
.mod => .scalarize_mod,
.mod_optimized => .scalarize_mod_optimized,
.max => .scalarize_max,
.min => .scalarize_min,
.bit_and => .scalarize_bit_and,
.bit_or => .scalarize_bit_or,
.shr => .scalarize_shr,
.shr_exact => .scalarize_shr_exact,
.shl => .scalarize_shl,
.shl_exact => .scalarize_shl_exact,
.shl_sat => .scalarize_shl_sat,
.xor => .scalarize_xor,
.not => .scalarize_not,
.clz => .scalarize_clz,
.ctz => .scalarize_ctz,
.popcount => .scalarize_popcount,
.byte_swap => .scalarize_byte_swap,
.bit_reverse => .scalarize_bit_reverse,
.sqrt => .scalarize_sqrt,
.sin => .scalarize_sin,
.cos => .scalarize_cos,
.tan => .scalarize_tan,
.exp => .scalarize_exp,
.exp2 => .scalarize_exp2,
.log => .scalarize_log,
.log2 => .scalarize_log2,
.log10 => .scalarize_log10,
.abs => .scalarize_abs,
.floor => .scalarize_floor,
.ceil => .scalarize_ceil,
.round => .scalarize_round,
.trunc_float => .scalarize_trunc_float,
.neg => .scalarize_neg,
.neg_optimized => .scalarize_neg_optimized,
.cmp_vector => .scalarize_cmp_vector,
.cmp_vector_optimized => .scalarize_cmp_vector_optimized,
.fptrunc => .scalarize_fptrunc,
.fpext => .scalarize_fpext,
.intcast => .scalarize_intcast,
.intcast_safe => .scalarize_intcast_safe,
.trunc => .scalarize_trunc,
.int_from_float => .scalarize_int_from_float,
.int_from_float_optimized => .scalarize_int_from_float_optimized,
.float_from_int => .scalarize_float_from_int,
.mul_add => .scalarize_mul_add,
};
}
};
pub const Features = std.enums.EnumSet(Feature);
pub const Error = std.mem.Allocator.Error;
pub fn legalize(air: *Air, backend: std.builtin.CompilerBackend, pt: Zcu.PerThread) Error!void {
pub fn legalize(air: *Air, pt: Zcu.PerThread, features: *const Features) Error!void {
var l: Legalize = .{
.pt = pt,
.air_instructions = air.instructions.toMultiArrayList(),
.air_extra = air.extra,
.features = &features: switch (backend) {
.other, .stage1 => unreachable,
inline .stage2_llvm,
.stage2_c,
.stage2_wasm,
.stage2_arm,
.stage2_x86_64,
.stage2_aarch64,
.stage2_x86,
.stage2_riscv64,
.stage2_sparc64,
.stage2_spirv64,
.stage2_powerpc,
=> |ct_backend| {
const Backend = codegen.importBackend(ct_backend) orelse break :features .initEmpty();
break :features if (@hasDecl(Backend, "legalize_features")) Backend.legalize_features else .initEmpty();
},
_ => unreachable,
},
.features = features,
};
if (l.features.bits.eql(.initEmpty())) return;
defer air.* = l.getTmpAir();
@ -90,11 +196,93 @@ fn extraData(l: *const Legalize, comptime T: type, index: usize) @TypeOf(Air.ext
fn legalizeBody(l: *Legalize, body_start: usize, body_len: usize) Error!void {
const zcu = l.pt.zcu;
const ip = &zcu.intern_pool;
for (body_start..body_start + body_len) |inst_extra_index| {
const inst: Air.Inst.Index = @enumFromInt(l.air_extra.items[inst_extra_index]);
for (0..body_len) |body_index| {
const inst: Air.Inst.Index = @enumFromInt(l.air_extra.items[body_start + body_index]);
inst: switch (l.air_instructions.items(.tag)[@intFromEnum(inst)]) {
else => {},
.arg,
=> {},
inline .add,
.add_safe,
.add_optimized,
.add_wrap,
.add_sat,
.sub,
.sub_safe,
.sub_optimized,
.sub_wrap,
.sub_sat,
.mul,
.mul_safe,
.mul_optimized,
.mul_wrap,
.mul_sat,
.div_float,
.div_float_optimized,
.div_trunc,
.div_trunc_optimized,
.div_floor,
.div_floor_optimized,
.div_exact,
.div_exact_optimized,
.rem,
.rem_optimized,
.mod,
.mod_optimized,
.max,
.min,
.bit_and,
.bit_or,
.xor,
=> |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) {
const bin_op = l.air_instructions.items(.data)[@intFromEnum(inst)].bin_op;
if (l.typeOf(bin_op.lhs).isVector(zcu)) continue :inst try l.scalarize(inst, .bin_op);
},
.ptr_add,
.ptr_sub,
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
.alloc,
=> {},
.inferred_alloc,
.inferred_alloc_comptime,
=> unreachable,
.ret_ptr,
.assembly,
=> {},
inline .shr,
.shr_exact,
.shl,
.shl_exact,
.shl_sat,
=> |air_tag| done: {
const bin_op = l.air_instructions.items(.data)[@intFromEnum(inst)].bin_op;
if (!l.typeOf(bin_op.rhs).isVector(zcu)) break :done;
if (l.features.contains(comptime .scalarize(air_tag))) {
continue :inst try l.scalarize(inst, .bin_op);
} else if (l.features.contains(.remove_shift_vector_rhs_splat)) {
if (bin_op.rhs.toInterned()) |rhs_ip_index| switch (ip.indexToKey(rhs_ip_index)) {
else => {},
.aggregate => |aggregate| switch (aggregate.storage) {
else => {},
.repeated_elem => |splat| continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{
.lhs = bin_op.lhs,
.rhs = Air.internedToRef(splat),
} }),
},
} else {
const rhs_inst = bin_op.rhs.toIndex().?;
switch (l.air_instructions.items(.tag)[@intFromEnum(rhs_inst)]) {
else => {},
.splat => continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{
.lhs = bin_op.lhs,
.rhs = l.air_instructions.items(.data)[@intFromEnum(rhs_inst)].ty_op.operand,
} }),
}
}
}
},
inline .not,
.clz,
.ctz,
@ -102,11 +290,38 @@ fn legalizeBody(l: *Legalize, body_start: usize, body_len: usize) Error!void {
.byte_swap,
.bit_reverse,
.abs,
=> |air_tag| if (l.features.contains(@field(Feature, "scalarize_" ++ @tagName(air_tag)))) done: {
.fptrunc,
.fpext,
.intcast,
.intcast_safe,
.trunc,
.int_from_float,
.int_from_float_optimized,
.float_from_int,
=> |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) {
const ty_op = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_op;
if (!ty_op.ty.toType().isVector(zcu)) break :done;
continue :inst try l.scalarizeUnary(inst, .ty_op, ty_op.operand);
if (ty_op.ty.toType().isVector(zcu)) continue :inst try l.scalarize(inst, .ty_op);
},
.bitcast,
=> {},
.block,
.loop,
=> {
const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = l.extraData(Air.Block, ty_pl.payload);
try l.legalizeBody(extra.end, extra.data.body_len);
},
.repeat,
.br,
.trap,
.breakpoint,
.ret_addr,
.frame_addr,
.call,
.call_always_tail,
.call_never_tail,
.call_never_inline,
=> {},
inline .sqrt,
.sin,
.cos,
@ -122,42 +337,128 @@ fn legalizeBody(l: *Legalize, body_start: usize, body_len: usize) Error!void {
.trunc_float,
.neg,
.neg_optimized,
=> |air_tag| if (l.features.contains(@field(Feature, "scalarize_" ++ @tagName(air_tag)))) done: {
=> |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) {
const un_op = l.air_instructions.items(.data)[@intFromEnum(inst)].un_op;
if (!l.typeOf(un_op).isVector(zcu)) break :done;
continue :inst try l.scalarizeUnary(inst, .un_op, un_op);
if (l.typeOf(un_op).isVector(zcu)) continue :inst try l.scalarize(inst, .un_op);
},
.shl,
.shl_exact,
.shl_sat,
.shr,
.shr_exact,
=> |air_tag| if (l.features.contains(.remove_shift_vector_rhs_splat)) done: {
const bin_op = l.air_instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ty = l.typeOf(bin_op.rhs);
if (!ty.isVector(zcu)) break :done;
if (bin_op.rhs.toInterned()) |rhs_ip_index| switch (ip.indexToKey(rhs_ip_index)) {
else => {},
.aggregate => |aggregate| switch (aggregate.storage) {
else => {},
.repeated_elem => |splat| continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{
.lhs = bin_op.lhs,
.rhs = Air.internedToRef(splat),
} }),
},
} else {
const rhs_inst = bin_op.rhs.toIndex().?;
switch (l.air_instructions.items(.tag)[@intFromEnum(rhs_inst)]) {
else => {},
.splat => continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{
.lhs = bin_op.lhs,
.rhs = l.air_instructions.items(.data)[@intFromEnum(rhs_inst)].ty_op.operand,
} }),
}
.cmp_lt,
.cmp_lt_optimized,
.cmp_lte,
.cmp_lte_optimized,
.cmp_eq,
.cmp_eq_optimized,
.cmp_gte,
.cmp_gte_optimized,
.cmp_gt,
.cmp_gt_optimized,
.cmp_neq,
.cmp_neq_optimized,
=> {},
inline .cmp_vector,
.cmp_vector_optimized,
=> |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) {
const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
if (ty_pl.ty.toType().isVector(zcu)) continue :inst try l.scalarize(inst, .ty_pl_vector_cmp);
},
.cond_br,
=> {
const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = l.extraData(Air.CondBr, pl_op.payload);
try l.legalizeBody(extra.end, extra.data.then_body_len);
try l.legalizeBody(extra.end + extra.data.then_body_len, extra.data.else_body_len);
},
.switch_br,
.loop_switch_br,
=> {
const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = l.extraData(Air.SwitchBr, pl_op.payload);
const hint_bag_count = std.math.divCeil(usize, extra.data.cases_len + 1, 10) catch unreachable;
var extra_index = extra.end + hint_bag_count;
for (0..extra.data.cases_len) |_| {
const case_extra = l.extraData(Air.SwitchBr.Case, extra_index);
const case_body_start = case_extra.end + case_extra.data.items_len + case_extra.data.ranges_len * 2;
try l.legalizeBody(case_body_start, case_extra.data.body_len);
extra_index = case_body_start + case_extra.data.body_len;
}
try l.legalizeBody(extra_index, extra.data.else_body_len);
},
.switch_dispatch,
=> {},
.@"try",
.try_cold,
=> {
const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = l.extraData(Air.Try, pl_op.payload);
try l.legalizeBody(extra.end, extra.data.body_len);
},
.try_ptr,
.try_ptr_cold,
=> {
const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = l.extraData(Air.TryPtr, ty_pl.payload);
try l.legalizeBody(extra.end, extra.data.body_len);
},
.dbg_stmt,
.dbg_empty_stmt,
=> {},
.dbg_inline_block,
=> {
const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = l.extraData(Air.DbgInlineBlock, ty_pl.payload);
try l.legalizeBody(extra.end, extra.data.body_len);
},
.dbg_var_ptr,
.dbg_var_val,
.dbg_arg_inline,
.is_null,
.is_non_null,
.is_null_ptr,
.is_non_null_ptr,
.is_err,
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
.bool_and,
.bool_or,
.load,
.ret,
.ret_safe,
.ret_load,
.store,
.store_safe,
.unreach,
=> {},
.optional_payload,
.optional_payload_ptr,
.optional_payload_ptr_set,
.wrap_optional,
.unwrap_errunion_payload,
.unwrap_errunion_err,
.unwrap_errunion_payload_ptr,
.unwrap_errunion_err_ptr,
.errunion_payload_ptr_set,
.wrap_errunion_payload,
.wrap_errunion_err,
.struct_field_ptr,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.struct_field_val,
.set_union_tag,
.get_union_tag,
.slice,
.slice_len,
.slice_ptr,
.ptr_slice_len_ptr,
.ptr_slice_ptr_ptr,
.array_elem_val,
.slice_elem_val,
.slice_elem_ptr,
.ptr_elem_val,
.ptr_elem_ptr,
.array_to_slice,
=> {},
.reduce,
.reduce_optimized,
=> if (l.features.contains(.reduce_one_elem_to_bitcast)) done: {
@ -172,194 +473,317 @@ fn legalizeBody(l: *Legalize, body_start: usize, body_len: usize) Error!void {
else => break :done,
}
},
.@"try", .try_cold => {
.splat,
.shuffle,
.select,
.memset,
.memset_safe,
.memcpy,
.memmove,
.cmpxchg_weak,
.cmpxchg_strong,
.atomic_load,
.atomic_store_unordered,
.atomic_store_monotonic,
.atomic_store_release,
.atomic_store_seq_cst,
.atomic_rmw,
.is_named_enum_value,
.tag_name,
.error_name,
.error_set_has_value,
.aggregate_init,
.union_init,
.prefetch,
=> {},
inline .mul_add,
=> |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) {
const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = l.extraData(Air.Try, pl_op.payload);
try l.legalizeBody(extra.end, extra.data.body_len);
},
.try_ptr, .try_ptr_cold => {
const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = l.extraData(Air.TryPtr, ty_pl.payload);
try l.legalizeBody(extra.end, extra.data.body_len);
},
.block, .loop => {
const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = l.extraData(Air.Block, ty_pl.payload);
try l.legalizeBody(extra.end, extra.data.body_len);
},
.dbg_inline_block => {
const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = l.extraData(Air.DbgInlineBlock, ty_pl.payload);
try l.legalizeBody(extra.end, extra.data.body_len);
},
.cond_br => {
const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = l.extraData(Air.CondBr, pl_op.payload);
try l.legalizeBody(extra.end, extra.data.then_body_len);
try l.legalizeBody(extra.end + extra.data.then_body_len, extra.data.else_body_len);
},
.switch_br, .loop_switch_br => {
const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = l.extraData(Air.SwitchBr, pl_op.payload);
const hint_bag_count = std.math.divCeil(usize, extra.data.cases_len + 1, 10) catch unreachable;
var extra_index = extra.end + hint_bag_count;
for (0..extra.data.cases_len) |_| {
const case_extra = l.extraData(Air.SwitchBr.Case, extra_index);
const case_body_start = case_extra.end + case_extra.data.items_len + case_extra.data.ranges_len * 2;
try l.legalizeBody(case_body_start, case_extra.data.body_len);
extra_index = case_body_start + case_extra.data.body_len;
}
try l.legalizeBody(extra_index, extra.data.else_body_len);
if (l.typeOf(pl_op.operand).isVector(zcu)) continue :inst try l.scalarize(inst, .pl_op_bin);
},
.field_parent_ptr,
.wasm_memory_size,
.wasm_memory_grow,
.cmp_lt_errors_len,
.err_return_trace,
.set_err_return_trace,
.addrspace_cast,
.save_err_return_trace_index,
.vector_store_elem,
.tlv_dllimport_ptr,
.c_va_arg,
.c_va_copy,
.c_va_end,
.c_va_start,
.work_item_id,
.work_group_size,
.work_group_id,
=> {},
}
}
}
const UnaryDataTag = enum { un_op, ty_op };
inline fn scalarizeUnary(l: *Legalize, inst: Air.Inst.Index, data_tag: UnaryDataTag, un_op: Air.Inst.Ref) Error!Air.Inst.Tag {
return l.replaceInst(inst, .block, try l.scalarizeUnaryBlockPayload(inst, data_tag, un_op));
const ScalarizeDataTag = enum { un_op, ty_op, bin_op, ty_pl_vector_cmp, pl_op_bin };
inline fn scalarize(l: *Legalize, orig_inst: Air.Inst.Index, comptime data_tag: ScalarizeDataTag) Error!Air.Inst.Tag {
return l.replaceInst(orig_inst, .block, try l.scalarizeBlockPayload(orig_inst, data_tag));
}
fn scalarizeUnaryBlockPayload(
l: *Legalize,
inst: Air.Inst.Index,
data_tag: UnaryDataTag,
un_op: Air.Inst.Ref,
) Error!Air.Inst.Data {
fn scalarizeBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index, comptime data_tag: ScalarizeDataTag) Error!Air.Inst.Data {
const pt = l.pt;
const zcu = pt.zcu;
const gpa = zcu.gpa;
const res_ty = l.typeOfIndex(inst);
try l.air_instructions.ensureUnusedCapacity(gpa, 15);
const res_alloc_inst = l.addInstAssumeCapacity(.{
.tag = .alloc,
.data = .{ .ty = try pt.singleMutPtrType(res_ty) },
});
const index_alloc_inst = l.addInstAssumeCapacity(.{
.tag = .alloc,
.data = .{ .ty = try pt.singleMutPtrType(.usize) },
});
const index_init_inst = l.addInstAssumeCapacity(.{
.tag = .store,
.data = .{ .bin_op = .{
.lhs = index_alloc_inst.toRef(),
.rhs = try pt.intRef(.usize, 0),
} },
});
const cur_index_inst = l.addInstAssumeCapacity(.{
.tag = .load,
.data = .{ .ty_op = .{
.ty = .usize_type,
.operand = index_alloc_inst.toRef(),
} },
});
const get_elem_inst = l.addInstAssumeCapacity(.{
.tag = .array_elem_val,
.data = .{ .bin_op = .{
.lhs = un_op,
.rhs = cur_index_inst.toRef(),
} },
});
const op_elem_inst = l.addInstAssumeCapacity(.{
.tag = l.air_instructions.items(.tag)[@intFromEnum(inst)],
.data = switch (data_tag) {
.un_op => .{ .un_op = get_elem_inst.toRef() },
.ty_op => .{ .ty_op = .{
.ty = Air.internedToRef(res_ty.scalarType(zcu).toIntern()),
.operand = get_elem_inst.toRef(),
const orig = l.air_instructions.get(@intFromEnum(orig_inst));
const res_ty = l.typeOfIndex(orig_inst);
const arity = switch (data_tag) {
.un_op, .ty_op => 1,
.bin_op, .ty_pl_vector_cmp => 2,
.pl_op_bin => 3,
};
const expected_instructions_len = l.air_instructions.len + (6 + arity + 8);
try l.air_instructions.ensureTotalCapacity(gpa, expected_instructions_len);
var res_block: Block(4) = .empty;
{
const res_alloc_inst = res_block.add(l.addInstAssumeCapacity(.{
.tag = .alloc,
.data = .{ .ty = try pt.singleMutPtrType(res_ty) },
}));
const index_alloc_inst = res_block.add(l.addInstAssumeCapacity(.{
.tag = .alloc,
.data = .{ .ty = .ptr_usize },
}));
_ = res_block.add(l.addInstAssumeCapacity(.{
.tag = .store,
.data = .{ .bin_op = .{
.lhs = index_alloc_inst.toRef(),
.rhs = .zero_usize,
} },
},
});
const set_elem_inst = l.addInstAssumeCapacity(.{
.tag = .vector_store_elem,
.data = .{ .vector_store_elem = .{
.vector_ptr = res_alloc_inst.toRef(),
.payload = try l.addExtra(Air.Bin, .{
.lhs = cur_index_inst.toRef(),
.rhs = op_elem_inst.toRef(),
}),
} },
});
const not_done_inst = l.addInstAssumeCapacity(.{
.tag = .cmp_lt,
.data = .{ .bin_op = .{
.lhs = cur_index_inst.toRef(),
.rhs = try pt.intRef(.usize, res_ty.vectorLen(zcu)),
} },
});
const next_index_inst = l.addInstAssumeCapacity(.{
.tag = .add,
.data = .{ .bin_op = .{
.lhs = cur_index_inst.toRef(),
.rhs = try pt.intRef(.usize, 1),
} },
});
const set_index_inst = l.addInstAssumeCapacity(.{
.tag = .store,
.data = .{ .bin_op = .{
.lhs = index_alloc_inst.toRef(),
.rhs = next_index_inst.toRef(),
} },
});
const loop_inst: Air.Inst.Index = @enumFromInt(l.air_instructions.len + 4);
const repeat_inst = l.addInstAssumeCapacity(.{
.tag = .repeat,
.data = .{ .repeat = .{ .loop_inst = loop_inst } },
});
const final_res_inst = l.addInstAssumeCapacity(.{
.tag = .load,
.data = .{ .ty_op = .{
.ty = Air.internedToRef(res_ty.toIntern()),
.operand = res_alloc_inst.toRef(),
} },
});
const br_res_inst = l.addInstAssumeCapacity(.{
.tag = .br,
.data = .{ .br = .{
.block_inst = inst,
.operand = final_res_inst.toRef(),
} },
});
const done_br_inst = l.addInstAssumeCapacity(.{
.tag = .cond_br,
.data = .{ .pl_op = .{
.operand = not_done_inst.toRef(),
.payload = try l.addCondBrBodies(&.{
next_index_inst,
set_index_inst,
repeat_inst,
}, &.{
final_res_inst,
br_res_inst,
}),
} },
});
assert(loop_inst == l.addInstAssumeCapacity(.{
.tag = .loop,
.data = .{ .ty_pl = .{
.ty = .noreturn_type,
.payload = try l.addBlockBody(&.{
cur_index_inst,
get_elem_inst,
op_elem_inst,
set_elem_inst,
not_done_inst,
done_br_inst,
}),
} },
}));
}));
const loop_inst: Air.Inst.Index = @enumFromInt(l.air_instructions.len + (3 + arity + 7));
var loop_block: Block(3 + arity + 2) = .empty;
{
const cur_index_inst = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .load,
.data = .{ .ty_op = .{
.ty = .usize_type,
.operand = index_alloc_inst.toRef(),
} },
}));
_ = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .vector_store_elem,
.data = .{ .vector_store_elem = .{
.vector_ptr = res_alloc_inst.toRef(),
.payload = try l.addExtra(Air.Bin, .{
.lhs = cur_index_inst.toRef(),
.rhs = loop_block.add(l.addInstAssumeCapacity(res_elem: switch (data_tag) {
.un_op => .{
.tag = orig.tag,
.data = .{ .un_op = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .array_elem_val,
.data = .{ .bin_op = .{
.lhs = orig.data.un_op,
.rhs = cur_index_inst.toRef(),
} },
})).toRef() },
},
.ty_op => .{
.tag = orig.tag,
.data = .{ .ty_op = .{
.ty = Air.internedToRef(orig.data.ty_op.ty.toType().scalarType(zcu).toIntern()),
.operand = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .array_elem_val,
.data = .{ .bin_op = .{
.lhs = orig.data.ty_op.operand,
.rhs = cur_index_inst.toRef(),
} },
})).toRef(),
} },
},
.bin_op => .{
.tag = orig.tag,
.data = .{ .bin_op = .{
.lhs = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .array_elem_val,
.data = .{ .bin_op = .{
.lhs = orig.data.bin_op.lhs,
.rhs = cur_index_inst.toRef(),
} },
})).toRef(),
.rhs = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .array_elem_val,
.data = .{ .bin_op = .{
.lhs = orig.data.bin_op.rhs,
.rhs = cur_index_inst.toRef(),
} },
})).toRef(),
} },
},
.ty_pl_vector_cmp => {
const extra = l.extraData(Air.VectorCmp, orig.data.ty_pl.payload).data;
break :res_elem .{
.tag = switch (orig.tag) {
else => unreachable,
.cmp_vector => switch (extra.compareOperator()) {
.lt => .cmp_lt,
.lte => .cmp_lte,
.eq => .cmp_eq,
.gte => .cmp_gte,
.gt => .cmp_gt,
.neq => .cmp_neq,
},
.cmp_vector_optimized => switch (extra.compareOperator()) {
.lt => .cmp_lt_optimized,
.lte => .cmp_lte_optimized,
.eq => .cmp_eq_optimized,
.gte => .cmp_gte_optimized,
.gt => .cmp_gt_optimized,
.neq => .cmp_neq_optimized,
},
},
.data = .{ .bin_op = .{
.lhs = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .array_elem_val,
.data = .{ .bin_op = .{
.lhs = extra.lhs,
.rhs = cur_index_inst.toRef(),
} },
})).toRef(),
.rhs = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .array_elem_val,
.data = .{ .bin_op = .{
.lhs = extra.rhs,
.rhs = cur_index_inst.toRef(),
} },
})).toRef(),
} },
};
},
.pl_op_bin => {
const extra = l.extraData(Air.Bin, orig.data.pl_op.payload).data;
break :res_elem .{
.tag = orig.tag,
.data = .{ .pl_op = .{
.payload = try l.addExtra(Air.Bin, .{
.lhs = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .array_elem_val,
.data = .{ .bin_op = .{
.lhs = extra.lhs,
.rhs = cur_index_inst.toRef(),
} },
})).toRef(),
.rhs = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .array_elem_val,
.data = .{ .bin_op = .{
.lhs = extra.rhs,
.rhs = cur_index_inst.toRef(),
} },
})).toRef(),
}),
.operand = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .array_elem_val,
.data = .{ .bin_op = .{
.lhs = orig.data.pl_op.operand,
.rhs = cur_index_inst.toRef(),
} },
})).toRef(),
} },
};
},
})).toRef(),
}),
} },
}));
const not_done_inst = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .cmp_lt,
.data = .{ .bin_op = .{
.lhs = cur_index_inst.toRef(),
.rhs = try pt.intRef(.usize, res_ty.vectorLen(zcu) - 1),
} },
}));
var not_done_block: Block(3) = .empty;
{
_ = not_done_block.add(l.addInstAssumeCapacity(.{
.tag = .store,
.data = .{ .bin_op = .{
.lhs = index_alloc_inst.toRef(),
.rhs = not_done_block.add(l.addInstAssumeCapacity(.{
.tag = .add,
.data = .{ .bin_op = .{
.lhs = cur_index_inst.toRef(),
.rhs = .one_usize,
} },
})).toRef(),
} },
}));
_ = not_done_block.add(l.addInstAssumeCapacity(.{
.tag = .repeat,
.data = .{ .repeat = .{ .loop_inst = loop_inst } },
}));
}
var done_block: Block(2) = .empty;
{
_ = done_block.add(l.addInstAssumeCapacity(.{
.tag = .br,
.data = .{ .br = .{
.block_inst = orig_inst,
.operand = done_block.add(l.addInstAssumeCapacity(.{
.tag = .load,
.data = .{ .ty_op = .{
.ty = Air.internedToRef(res_ty.toIntern()),
.operand = res_alloc_inst.toRef(),
} },
})).toRef(),
} },
}));
}
_ = loop_block.add(l.addInstAssumeCapacity(.{
.tag = .cond_br,
.data = .{ .pl_op = .{
.operand = not_done_inst.toRef(),
.payload = try l.addCondBrBodies(not_done_block.body(), done_block.body()),
} },
}));
}
assert(loop_inst == res_block.add(l.addInstAssumeCapacity(.{
.tag = .loop,
.data = .{ .ty_pl = .{
.ty = .noreturn_type,
.payload = try l.addBlockBody(loop_block.body()),
} },
})));
}
assert(l.air_instructions.len == expected_instructions_len);
return .{ .ty_pl = .{
.ty = Air.internedToRef(res_ty.toIntern()),
.payload = try l.addBlockBody(&.{
res_alloc_inst,
index_alloc_inst,
index_init_inst,
loop_inst,
}),
.payload = try l.addBlockBody(res_block.body()),
} };
}
fn Block(comptime capacity: usize) type {
return struct {
instructions: [capacity]Air.Inst.Index,
len: usize,
const empty: @This() = .{
.instructions = undefined,
.len = 0,
};
fn add(b: *@This(), inst: Air.Inst.Index) Air.Inst.Index {
b.instructions[b.len] = inst;
b.len += 1;
return inst;
}
fn body(b: *const @This()) []const Air.Inst.Index {
assert(b.len == b.instructions.len);
return &b.instructions;
}
};
}
fn addInstAssumeCapacity(l: *Legalize, inst: Air.Inst) Air.Inst.Index {
defer l.air_instructions.appendAssumeCapacity(inst);
return @enumFromInt(l.air_instructions.len);
@ -414,7 +838,6 @@ inline fn replaceInst(l: *Legalize, inst: Air.Inst.Index, tag: Air.Inst.Tag, dat
const Air = @import("../Air.zig");
const assert = std.debug.assert;
const codegen = @import("../codegen.zig");
const Legalize = @This();
const std = @import("std");
const Type = @import("../Type.zig");

View File

@ -4579,10 +4579,11 @@ pub const Index = enum(u32) {
undefined_type,
enum_literal_type,
ptr_usize_type,
ptr_const_comptime_int_type,
manyptr_u8_type,
manyptr_const_u8_type,
manyptr_const_u8_sentinel_0_type,
single_const_pointer_to_comptime_int_type,
slice_const_u8_type,
slice_const_u8_sentinel_0_type,
@ -4649,19 +4650,29 @@ pub const Index = enum(u32) {
/// `undefined` (untyped)
undef,
/// `@as(bool, undefined)`
undef_bool,
/// `@as(usize, undefined)`
undef_usize,
/// `@as(u1, undefined)`
undef_u1,
/// `0` (comptime_int)
zero,
/// `0` (usize)
/// `@as(usize, 0)`
zero_usize,
/// `0` (u8)
/// `@as(u1, 0)`
zero_u1,
/// `@as(u8, 0)`
zero_u8,
/// `1` (comptime_int)
one,
/// `1` (usize)
/// `@as(usize, 1)`
one_usize,
/// `1` (u8)
/// `@as(u1, 1)`
one_u1,
/// `@as(u8, 1)`
one_u8,
/// `4` (u8)
/// `@as(u8, 4)`
four_u8,
/// `-1` (comptime_int)
negative_one,
@ -5074,6 +5085,20 @@ pub const static_keys: [static_len]Key = .{
.{ .simple_type = .undefined },
.{ .simple_type = .enum_literal },
// *usize
.{ .ptr_type = .{
.child = .usize_type,
.flags = .{},
} },
// *const comptime_int
.{ .ptr_type = .{
.child = .comptime_int_type,
.flags = .{
.is_const = true,
},
} },
// [*]u8
.{ .ptr_type = .{
.child = .u8_type,
@ -5101,15 +5126,6 @@ pub const static_keys: [static_len]Key = .{
},
} },
// *const comptime_int
.{ .ptr_type = .{
.child = .comptime_int_type,
.flags = .{
.size = .one,
.is_const = true,
},
} },
// []const u8
.{ .ptr_type = .{
.child = .u8_type,
@ -5245,6 +5261,9 @@ pub const static_keys: [static_len]Key = .{
} },
.{ .simple_value = .undefined },
.{ .undef = .bool_type },
.{ .undef = .usize_type },
.{ .undef = .u1_type },
.{ .int = .{
.ty = .comptime_int_type,
@ -5256,6 +5275,11 @@ pub const static_keys: [static_len]Key = .{
.storage = .{ .u64 = 0 },
} },
.{ .int = .{
.ty = .u1_type,
.storage = .{ .u64 = 0 },
} },
.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = 0 },
@ -5271,17 +5295,21 @@ pub const static_keys: [static_len]Key = .{
.storage = .{ .u64 = 1 },
} },
// one_u8
.{ .int = .{
.ty = .u1_type,
.storage = .{ .u64 = 1 },
} },
.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = 1 },
} },
// four_u8
.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = 4 },
} },
// negative_one
.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .i64 = -1 },
@ -10482,7 +10510,7 @@ pub fn getCoerced(
.base_addr = .int,
.byte_offset = 0,
} }),
.len = try ip.get(gpa, tid, .{ .undef = .usize_type }),
.len = .undef_usize,
} }),
};
},
@ -10601,7 +10629,7 @@ pub fn getCoerced(
.base_addr = .int,
.byte_offset = 0,
} }),
.len = try ip.get(gpa, tid, .{ .undef = .usize_type }),
.len = .undef_usize,
} }),
},
else => |payload| try ip.getCoerced(gpa, tid, payload, new_ty),
@ -11847,10 +11875,11 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.null_type,
.undefined_type,
.enum_literal_type,
.ptr_usize_type,
.ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
.single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
.vector_8_i8_type,
@ -11909,12 +11938,13 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.undef => .undefined_type,
.zero, .one, .negative_one => .comptime_int_type,
.zero_usize, .one_usize => .usize_type,
.undef_usize, .zero_usize, .one_usize => .usize_type,
.undef_u1, .zero_u1, .one_u1 => .u1_type,
.zero_u8, .one_u8, .four_u8 => .u8_type,
.void_value => .void_type,
.unreachable_value => .noreturn_type,
.null_value => .null_type,
.bool_true, .bool_false => .bool_type,
.undef_bool, .bool_true, .bool_false => .bool_type,
.empty_tuple => .empty_tuple_type,
// This optimization on tags is needed so that indexToKey can call
@ -12186,10 +12216,11 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
.undefined_type => .undefined,
.enum_literal_type => .enum_literal,
.ptr_usize_type,
.ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
.single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
=> .pointer,
@ -12251,11 +12282,16 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
// values, not types
.undef => unreachable,
.undef_bool => unreachable,
.undef_usize => unreachable,
.undef_u1 => unreachable,
.zero => unreachable,
.zero_usize => unreachable,
.zero_u1 => unreachable,
.zero_u8 => unreachable,
.one => unreachable,
.one_usize => unreachable,
.one_u1 => unreachable,
.one_u8 => unreachable,
.four_u8 => unreachable,
.negative_one => unreachable,

File diff suppressed because it is too large Load Diff

View File

@ -168,7 +168,7 @@ fn addWithOverflowScalar(
else => unreachable,
}
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return .{
.overflow_bit = try pt.undefValue(.u1),
.overflow_bit = .undef_u1,
.wrapped_result = try pt.undefValue(ty),
};
return intAddWithOverflow(sema, lhs, rhs, ty);
@ -229,7 +229,7 @@ fn subWithOverflowScalar(
else => unreachable,
}
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return .{
.overflow_bit = try pt.undefValue(.u1),
.overflow_bit = .undef_u1,
.wrapped_result = try pt.undefValue(ty),
};
return intSubWithOverflow(sema, lhs, rhs, ty);
@ -290,7 +290,7 @@ fn mulWithOverflowScalar(
else => unreachable,
}
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return .{
.overflow_bit = try pt.undefValue(.u1),
.overflow_bit = .undef_u1,
.wrapped_result = try pt.undefValue(ty),
};
return intMulWithOverflow(sema, lhs, rhs, ty);
@ -1043,7 +1043,7 @@ fn comptimeIntAdd(sema: *Sema, lhs: Value, rhs: Value) !Value {
fn intAddWithOverflow(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value.OverflowArithmeticResult {
switch (ty.toIntern()) {
.comptime_int_type => return .{
.overflow_bit = try sema.pt.intValue(.u1, 0),
.overflow_bit = .zero_u1,
.wrapped_result = try comptimeIntAdd(sema, lhs, rhs),
},
else => return intAddWithOverflowInner(sema, lhs, rhs, ty),
@ -1125,7 +1125,7 @@ fn comptimeIntSub(sema: *Sema, lhs: Value, rhs: Value) !Value {
fn intSubWithOverflow(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value.OverflowArithmeticResult {
switch (ty.toIntern()) {
.comptime_int_type => return .{
.overflow_bit = try sema.pt.intValue(.u1, 0),
.overflow_bit = .zero_u1,
.wrapped_result = try comptimeIntSub(sema, lhs, rhs),
},
else => return intSubWithOverflowInner(sema, lhs, rhs, ty),
@ -1211,7 +1211,7 @@ fn comptimeIntMul(sema: *Sema, lhs: Value, rhs: Value) !Value {
fn intMulWithOverflow(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value.OverflowArithmeticResult {
switch (ty.toIntern()) {
.comptime_int_type => return .{
.overflow_bit = try sema.pt.intValue(.u1, 0),
.overflow_bit = .zero_u1,
.wrapped_result = try comptimeIntMul(sema, lhs, rhs),
},
else => return intMulWithOverflowInner(sema, lhs, rhs, ty),

View File

@ -2641,10 +2641,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
if (enum_type.values.len == 0) {
const only = try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = try pt.intern(.{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = 0 },
} }),
.int = (try pt.intValue(.fromInterned(enum_type.tag_ty), 0)).toIntern(),
} });
return Value.fromInterned(only);
} else {
@ -3676,10 +3673,11 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
.null_type,
.undefined_type,
.enum_literal_type,
.ptr_usize_type,
.ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
.single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
.optional_noreturn_type,
@ -3691,9 +3689,11 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
.undef => unreachable,
.zero => unreachable,
.zero_usize => unreachable,
.zero_u1 => unreachable,
.zero_u8 => unreachable,
.one => unreachable,
.one_usize => unreachable,
.one_u1 => unreachable,
.one_u8 => unreachable,
.four_u8 => unreachable,
.negative_one => unreachable,
@ -4100,10 +4100,11 @@ pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type };
pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type };
pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type };
pub const ptr_usize: Type = .{ .ip_index = .ptr_usize_type };
pub const ptr_const_comptime_int: Type = .{ .ip_index = .ptr_const_comptime_int_type };
pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type };
pub const manyptr_const_u8: Type = .{ .ip_index = .manyptr_const_u8_type };
pub const manyptr_const_u8_sentinel_0: Type = .{ .ip_index = .manyptr_const_u8_sentinel_0_type };
pub const single_const_pointer_to_comptime_int: Type = .{ .ip_index = .single_const_pointer_to_comptime_int_type };
pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type };
pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type };

View File

@ -2895,19 +2895,25 @@ pub fn intValueBounds(val: Value, pt: Zcu.PerThread) !?[2]Value {
pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace;
pub const zero_usize: Value = .{ .ip_index = .zero_usize };
pub const zero_u8: Value = .{ .ip_index = .zero_u8 };
pub const zero_comptime_int: Value = .{ .ip_index = .zero };
pub const one_comptime_int: Value = .{ .ip_index = .one };
pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one };
pub const undef: Value = .{ .ip_index = .undef };
pub const undef_bool: Value = .{ .ip_index = .undef_bool };
pub const undef_usize: Value = .{ .ip_index = .undef_usize };
pub const undef_u1: Value = .{ .ip_index = .undef_u1 };
pub const zero_comptime_int: Value = .{ .ip_index = .zero };
pub const zero_usize: Value = .{ .ip_index = .zero_usize };
pub const zero_u1: Value = .{ .ip_index = .zero_u1 };
pub const zero_u8: Value = .{ .ip_index = .zero_u8 };
pub const one_comptime_int: Value = .{ .ip_index = .one };
pub const one_usize: Value = .{ .ip_index = .one_usize };
pub const one_u1: Value = .{ .ip_index = .one_u1 };
pub const one_u8: Value = .{ .ip_index = .one_u8 };
pub const four_u8: Value = .{ .ip_index = .four_u8 };
pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one };
pub const @"void": Value = .{ .ip_index = .void_value };
pub const @"null": Value = .{ .ip_index = .null_value };
pub const @"false": Value = .{ .ip_index = .bool_false };
pub const @"true": Value = .{ .ip_index = .bool_true };
pub const @"unreachable": Value = .{ .ip_index = .unreachable_value };
pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type };
pub const @"null": Value = .{ .ip_index = .null_value };
pub const @"true": Value = .{ .ip_index = .bool_true };
pub const @"false": Value = .{ .ip_index = .bool_false };
pub const empty_tuple: Value = .{ .ip_index = .empty_tuple };
pub fn makeBool(x: bool) Value {

View File

@ -1741,8 +1741,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: *A
return;
}
const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
try air.legalize(backend, pt);
try air.legalize(pt, @import("../codegen.zig").legalizeFeatures(pt, nav_index));
var liveness = try Air.Liveness.analyze(gpa, air.*, ip);
defer liveness.deinit(gpa);
@ -3022,7 +3021,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
// is unused so it just has to be a no-op.
sema.air_instructions.set(@intFromEnum(ptr_inst), .{
.tag = .alloc,
.data = .{ .ty = Type.single_const_pointer_to_comptime_int },
.data = .{ .ty = .ptr_const_comptime_int },
});
}

View File

@ -32,15 +32,65 @@ const FrameIndex = bits.FrameIndex;
const InnerError = codegen.CodeGenError || error{OutOfRegisters};
pub const legalize_features: Air.Legalize.Features = .init(.{
.scalarize_ctz = true,
.scalarize_popcount = true,
.scalarize_byte_swap = true,
.scalarize_bit_reverse = true,
pub inline fn legalizeFeatures(target: *const std.Target) *const Air.Legalize.Features {
@setEvalBranchQuota(1_200);
return switch (target.ofmt == .coff) {
inline false, true => |use_old| comptime &.init(.{
.scalarize_add = use_old,
.scalarize_add_sat = use_old,
.scalarize_sub = use_old,
.scalarize_sub_sat = use_old,
.scalarize_mul = use_old,
.scalarize_mul_wrap = use_old,
.scalarize_mul_sat = true,
.scalarize_div_float = use_old,
.scalarize_div_float_optimized = use_old,
.scalarize_div_trunc = use_old,
.scalarize_div_trunc_optimized = use_old,
.scalarize_div_floor = use_old,
.scalarize_div_floor_optimized = use_old,
.scalarize_div_exact = use_old,
.scalarize_div_exact_optimized = use_old,
.scalarize_max = use_old,
.scalarize_min = use_old,
.scalarize_shr = true,
.scalarize_shr_exact = true,
.scalarize_shl = true,
.scalarize_shl_exact = true,
.scalarize_shl_sat = true,
.scalarize_not = use_old,
.scalarize_clz = use_old,
.scalarize_ctz = true,
.scalarize_popcount = true,
.scalarize_byte_swap = true,
.scalarize_bit_reverse = true,
.scalarize_sin = use_old,
.scalarize_cos = use_old,
.scalarize_tan = use_old,
.scalarize_exp = use_old,
.scalarize_exp2 = use_old,
.scalarize_log = use_old,
.scalarize_log2 = use_old,
.scalarize_log10 = use_old,
.scalarize_abs = use_old,
.scalarize_floor = use_old,
.scalarize_ceil = use_old,
.scalarize_trunc_float = use_old,
.scalarize_cmp_vector = true,
.scalarize_cmp_vector_optimized = true,
.scalarize_fptrunc = use_old,
.scalarize_fpext = use_old,
.scalarize_intcast = use_old,
.scalarize_int_from_float = use_old,
.scalarize_int_from_float_optimized = use_old,
.scalarize_float_from_int = use_old,
.scalarize_mul_add = use_old,
.remove_shift_vector_rhs_splat = false,
.reduce_one_elem_to_bitcast = true,
});
.remove_shift_vector_rhs_splat = false,
.reduce_one_elem_to_bitcast = true,
}),
};
}
/// Set this to `false` to uncover Sema OPV bugs.
/// https://github.com/ziglang/zig/issues/22419
@ -5719,7 +5769,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
},
.extra_temps = .{
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
.{ .type = .i64, .kind = .{ .mut_rc = .{ .ref = .src1, .rc = .general_purpose } } },
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
@ -92473,7 +92523,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ },
.{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_dst0_size, -16), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ },
.{ ._, ._r, .sa, .tmp0q, .ui(63), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ },
@ -92505,7 +92555,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ },
.{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_dst0_size, -16), ._, ._ },
.{ ._, ._l, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ },
.{ ._, ._r, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ },
@ -92539,7 +92589,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sia(-1, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_size, -8), ._, ._ },
.{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_dst0_size, -8), ._, ._ },
.{ ._, ._l, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ },
.{ ._, ._r, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ },
@ -92600,7 +92650,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sa(.dst0, .add_bit_size_rem_64), ._, ._ },
.{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_size, -16), .tmp2q, ._ },
.{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_dst0_size, -16), .tmp2q, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp2q, ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .si(0), ._, ._ },
} },
@ -92632,7 +92682,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp2d, .sia(-1, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sa(.dst0, .add_bit_size_rem_64), ._, ._ },
.{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_size, -8), .tmp2q, ._ },
.{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_dst0_size, -8), .tmp2q, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp2q, ._, ._ },
} },
}, .{
@ -92663,7 +92713,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp0q, .ua(.dst0, .add_umax), ._, ._ },
.{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ },
.{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_dst0_size, -16), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .si(0), ._, ._ },
} },
@ -92695,7 +92745,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp2d, .sia(-1, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp0q, .ua(.dst0, .add_umax), ._, ._ },
.{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_size, -8), ._, ._ },
.{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_dst0_size, -8), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ },
} },
}, .{
@ -165537,9 +165587,7 @@ fn airShlShrBinOp(self: *CodeGen, inst: Air.Inst.Index) !void {
.ty = mask_ty.toIntern(),
.storage = .{ .elems = &([1]InternPool.Index{
(try rhs_ty.childType(zcu).maxIntScalar(pt, .u8)).toIntern(),
} ++ [1]InternPool.Index{
(try pt.intValue(.u8, 0)).toIntern(),
} ** 15) },
} ++ [1]InternPool.Index{.zero_u8} ** 15) },
} })));
const mask_addr_reg = try self.copyToTmpRegister(.usize, mask_mcv.address());
const mask_addr_lock = self.register_manager.lockRegAssumeUnused(mask_addr_reg);
@ -178776,10 +178824,10 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
const mask_ty = try pt.vectorType(.{ .len = vec_len, .child = mask_elem_ty.toIntern() });
var mask_elems_buf: [32]InternPool.Index = undefined;
const mask_elems = mask_elems_buf[0..vec_len];
for (mask_elems, 0..) |*elem, bit| elem.* = try pt.intern(.{ .int = .{
.ty = mask_elem_ty.toIntern(),
.storage = .{ .u64 = @as(u64, 1) << @intCast(bit) },
} });
for (mask_elems, 0..) |*elem, bit| elem.* = (try pt.intValue(
mask_elem_ty,
@as(u8, 1) << @truncate(bit),
)).toIntern();
const mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = mask_elems },
@ -179647,16 +179695,13 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
var lhs_mask_elems: [16]InternPool.Index = undefined;
for (lhs_mask_elems[0..max_abi_size], 0..) |*lhs_mask_elem, byte_index| {
const elem_index = byte_index / elem_abi_size;
lhs_mask_elem.* = try pt.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
if (mask_elem < 0) break :elem 0b1_00_00000;
const mask_elem_index: u31 = @intCast(mask_elem);
const byte_off: u32 = @intCast(byte_index % elem_abi_size);
break :elem @intCast(mask_elem_index * elem_abi_size + byte_off);
} },
} });
lhs_mask_elem.* = (try pt.intValue(.u8, if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
if (mask_elem < 0) break :elem 0b1_00_00000;
const mask_elem_index: u31 = @intCast(mask_elem);
const byte_off: u32 = @intCast(byte_index % elem_abi_size);
break :elem mask_elem_index * elem_abi_size + byte_off;
})).toIntern();
}
const lhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
const lhs_mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
@ -179681,16 +179726,13 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
var rhs_mask_elems: [16]InternPool.Index = undefined;
for (rhs_mask_elems[0..max_abi_size], 0..) |*rhs_mask_elem, byte_index| {
const elem_index = byte_index / elem_abi_size;
rhs_mask_elem.* = try pt.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
if (mask_elem >= 0) break :elem 0b1_00_00000;
const mask_elem_index: u31 = @intCast(~mask_elem);
const byte_off: u32 = @intCast(byte_index % elem_abi_size);
break :elem @intCast(mask_elem_index * elem_abi_size + byte_off);
} },
} });
rhs_mask_elem.* = (try pt.intValue(.u8, if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
if (mask_elem >= 0) break :elem 0b1_00_00000;
const mask_elem_index: u31 = @intCast(~mask_elem);
const byte_off: u32 = @intCast(byte_index % elem_abi_size);
break :elem mask_elem_index * elem_abi_size + byte_off;
})).toIntern();
}
const rhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
const rhs_mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
@ -188160,6 +188202,7 @@ const Select = struct {
ptr_bit_size,
size,
src0_size,
dst0_size,
delta_size,
delta_elem_size,
unaligned_size,
@ -188203,6 +188246,7 @@ const Select = struct {
const sub_src0_size: Adjust = .{ .sign = .neg, .lhs = .src0_size, .op = .mul, .rhs = .@"1" };
const add_src0_size: Adjust = .{ .sign = .pos, .lhs = .src0_size, .op = .mul, .rhs = .@"1" };
const add_8_src0_size: Adjust = .{ .sign = .pos, .lhs = .src0_size, .op = .mul, .rhs = .@"8" };
const add_dst0_size: Adjust = .{ .sign = .pos, .lhs = .dst0_size, .op = .mul, .rhs = .@"1" };
const add_delta_size_div_8: Adjust = .{ .sign = .pos, .lhs = .delta_size, .op = .div, .rhs = .@"8" };
const add_delta_elem_size: Adjust = .{ .sign = .pos, .lhs = .delta_elem_size, .op = .mul, .rhs = .@"1" };
const add_delta_elem_size_div_8: Adjust = .{ .sign = .pos, .lhs = .delta_elem_size, .op = .div, .rhs = .@"8" };
@ -188998,6 +189042,7 @@ const Select = struct {
.ptr_bit_size => s.cg.target.ptrBitWidth(),
.size => @intCast(op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu)),
.src0_size => @intCast(Select.Operand.Ref.src0.typeOf(s).abiSize(s.cg.pt.zcu)),
.dst0_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).abiSize(s.cg.pt.zcu)),
.delta_size => @intCast(@as(SignedImm, @intCast(op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu))) -
@as(SignedImm, @intCast(op.flags.index.ref.typeOf(s).abiSize(s.cg.pt.zcu)))),
.delta_elem_size => @intCast(@as(SignedImm, @intCast(op.flags.base.ref.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))) -

View File

@ -32,8 +32,9 @@ fn devFeatureForBackend(comptime backend: std.builtin.CompilerBackend) dev.Featu
return @field(dev.Feature, @tagName(backend)["stage2_".len..] ++ "_backend");
}
pub fn importBackend(comptime backend: std.builtin.CompilerBackend) ?type {
fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
return switch (backend) {
.other, .stage1 => unreachable,
.stage2_aarch64 => @import("arch/aarch64/CodeGen.zig"),
.stage2_arm => @import("arch/arm/CodeGen.zig"),
.stage2_c => @import("codegen/c.zig"),
@ -42,11 +43,35 @@ pub fn importBackend(comptime backend: std.builtin.CompilerBackend) ?type {
.stage2_riscv64 => @import("arch/riscv64/CodeGen.zig"),
.stage2_sparc64 => @import("arch/sparc64/CodeGen.zig"),
.stage2_spirv64 => @import("codegen/spirv.zig"),
.stage2_x86_64 => @import("arch/x86_64/CodeGen.zig"),
else => null,
.stage2_wasm => @import("arch/wasm/CodeGen.zig"),
.stage2_x86, .stage2_x86_64 => @import("arch/x86_64/CodeGen.zig"),
_ => unreachable,
};
}
pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) *const Air.Legalize.Features {
const zcu = pt.zcu;
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
switch (target_util.zigBackend(target.*, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_llvm,
.stage2_c,
.stage2_wasm,
.stage2_arm,
.stage2_x86_64,
.stage2_aarch64,
.stage2_x86,
.stage2_riscv64,
.stage2_sparc64,
.stage2_spirv64,
.stage2_powerpc,
=> |backend| {
const Backend = importBackend(backend);
return if (@hasDecl(Backend, "legalizeFeatures")) Backend.legalizeFeatures(target) else &.initEmpty();
},
}
}
pub fn generateFunction(
lf: *link.File,
pt: Zcu.PerThread,
@ -60,7 +85,7 @@ pub fn generateFunction(
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const target = zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_aarch64,
.stage2_arm,
@ -70,7 +95,7 @@ pub fn generateFunction(
.stage2_x86_64,
=> |backend| {
dev.check(devFeatureForBackend(backend));
return importBackend(backend).?.generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output);
return importBackend(backend).generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output);
},
}
}
@ -88,14 +113,14 @@ pub fn generateLazyFunction(
zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.?.resolved_target.result
else
zcu.getTarget();
switch (target_util.zigBackend(target, false)) {
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_powerpc,
.stage2_riscv64,
.stage2_x86_64,
=> |backend| {
dev.check(devFeatureForBackend(backend));
return importBackend(backend).?.generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
},
}
}

View File

@ -1591,7 +1591,7 @@ pub const DeclGen = struct {
try writer.writeAll("((");
try dg.renderCType(writer, ctype);
return writer.print("){x})", .{
try dg.fmtIntLiteral(try pt.undefValue(.usize), .Other),
try dg.fmtIntLiteral(.undef_usize, .Other),
});
},
.slice => {
@ -1605,7 +1605,7 @@ pub const DeclGen = struct {
const ptr_ty = ty.slicePtrFieldType(zcu);
try dg.renderType(writer, ptr_ty);
return writer.print("){x}, {0x}}}", .{
try dg.fmtIntLiteral(try dg.pt.undefValue(.usize), .Other),
try dg.fmtIntLiteral(.undef_usize, .Other),
});
},
},
@ -6376,7 +6376,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
if (operand_child_ctype.info(ctype_pool) == .array) {
try writer.writeByte('&');
try f.writeCValueDeref(writer, operand);
try writer.print("[{}]", .{try f.fmtIntLiteral(try pt.intValue(.usize, 0))});
try writer.print("[{}]", .{try f.fmtIntLiteral(.zero_usize)});
} else try f.writeCValue(writer, operand, .Other);
}
try a.end(f, writer);
@ -6907,7 +6907,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeAll("for (");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" = ");
try f.object.dg.renderValue(writer, try pt.intValue(.usize, 0), .Other);
try f.object.dg.renderValue(writer, .zero_usize, .Other);
try writer.writeAll("; ");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" != ");
@ -8311,11 +8311,11 @@ const Vectorize = struct {
try writer.writeAll("for (");
try f.writeCValue(writer, local, .Other);
try writer.print(" = {d}; ", .{try f.fmtIntLiteral(try pt.intValue(.usize, 0))});
try writer.print(" = {d}; ", .{try f.fmtIntLiteral(.zero_usize)});
try f.writeCValue(writer, local, .Other);
try writer.print(" < {d}; ", .{try f.fmtIntLiteral(try pt.intValue(.usize, ty.vectorLen(zcu)))});
try f.writeCValue(writer, local, .Other);
try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(try pt.intValue(.usize, 1))});
try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(.one_usize)});
f.object.indent_writer.pushIndent();
break :index .{ .index = local };

View File

@ -1408,6 +1408,15 @@ pub const Pool = struct {
.bits = pt.zcu.errorSetBits(),
}, mod, kind),
.ptr_usize_type,
=> return pool.getPointer(allocator, .{
.elem_ctype = .usize,
}),
.ptr_const_comptime_int_type,
=> return pool.getPointer(allocator, .{
.elem_ctype = .void,
.@"const" = true,
}),
.manyptr_u8_type,
=> return pool.getPointer(allocator, .{
.elem_ctype = .u8,
@ -1418,11 +1427,6 @@ pub const Pool = struct {
.elem_ctype = .u8,
.@"const" = true,
}),
.single_const_pointer_to_comptime_int_type,
=> return pool.getPointer(allocator, .{
.elem_ctype = .void,
.@"const" = true,
}),
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
=> {
@ -2157,11 +2161,16 @@ pub const Pool = struct {
},
.undef,
.undef_bool,
.undef_usize,
.undef_u1,
.zero,
.zero_usize,
.zero_u1,
.zero_u8,
.one,
.one_usize,
.one_u1,
.one_u8,
.four_u8,
.negative_one,
@ -2172,7 +2181,7 @@ pub const Pool = struct {
.bool_false,
.empty_tuple,
.none,
=> unreachable,
=> unreachable, // values, not types
_ => |ip_index| switch (ip.indexToKey(ip_index)) {
.int_type => |int_info| return pool.fromIntInfo(allocator, int_info, mod, kind),

View File

@ -3081,10 +3081,11 @@ pub const Object = struct {
.undefined_type,
.enum_literal_type,
=> unreachable,
.ptr_usize_type,
.ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
.single_const_pointer_to_comptime_int_type,
=> .ptr,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
@ -3098,11 +3099,16 @@ pub const Object = struct {
=> unreachable,
// values, not types
.undef,
.undef_bool,
.undef_usize,
.undef_u1,
.zero,
.zero_usize,
.zero_u1,
.zero_u8,
.one,
.one_usize,
.one_u1,
.one_u8,
.four_u8,
.negative_one,

View File

@ -260,7 +260,7 @@ pub const MutableValue = union(enum) {
const ptr = try arena.create(MutableValue);
const len = try arena.create(MutableValue);
ptr.* = .{ .interned = try pt.intern(.{ .undef = ip.slicePtrType(ty_ip) }) };
len.* = .{ .interned = try pt.intern(.{ .undef = .usize_type }) };
len.* = .{ .interned = .undef_usize };
mv.* = .{ .slice = .{
.ty = ty_ip,
.ptr = ptr,
@ -464,7 +464,7 @@ pub const MutableValue = union(enum) {
return switch (field_idx) {
Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(pt.zcu).toIntern() },
Value.slice_len_index => .{ .interned = switch (pt.zcu.intern_pool.indexToKey(ip_index)) {
.undef => try pt.intern(.{ .undef = .usize_type }),
.undef => .undef_usize,
.slice => |s| s.len,
else => unreachable,
} },

View File

@ -96,7 +96,6 @@ test "@abs big int <= 128 bits" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime testAbsSignedBigInt();
try testAbsSignedBigInt();
@ -211,7 +210,6 @@ fn testAbsFloats(comptime T: type) !void {
test "@abs int vectors" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -837,7 +837,6 @@ test "extern variable with non-pointer opaque type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@export(&var_to_export, .{ .name = "opaque_extern_var" });

View File

@ -384,7 +384,6 @@ test "comptime bitcast with fields following f80" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const FloatT = extern struct { f: f80, x: u128 align(16) };
const x: FloatT = .{ .f = 0.5, .x = 123 };

View File

@ -12,7 +12,6 @@ test "@bitReverse" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testBitReverse();
@ -128,7 +127,6 @@ test "bitReverse vectors u8" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime vector8();
try vector8();
@ -149,7 +147,6 @@ test "bitReverse vectors u16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime vector16();
try vector16();
@ -170,7 +167,6 @@ test "bitReverse vectors u24" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime vector24();
try vector24();

View File

@ -39,7 +39,6 @@ test "@byteSwap integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const ByteSwapIntTest = struct {
fn run() !void {
@ -100,7 +99,6 @@ test "@byteSwap vectors u8" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime vector8();
try vector8();
@ -121,7 +119,6 @@ test "@byteSwap vectors u16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime vector16();
try vector16();
@ -142,7 +139,6 @@ test "@byteSwap vectors u24" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime vector24();
try vector24();

View File

@ -617,7 +617,6 @@ test "@intCast on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
@ -2520,7 +2519,6 @@ test "@ptrFromInt on vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = struct {
@ -2592,7 +2590,6 @@ test "@intFromFloat on vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -2693,7 +2690,6 @@ test "@intCast vector of signed integer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;

View File

@ -5,7 +5,6 @@ const expect = std.testing.expect;
test "anyopaque extern symbol" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const a = @extern(*anyopaque, .{ .name = "a_mystery_symbol" });

View File

@ -135,7 +135,6 @@ test "cmp f32" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testCmp(f32);
try comptime testCmp(f32);
@ -144,7 +143,6 @@ test "cmp f32" {
test "cmp f64" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
try testCmp(f64);
@ -400,7 +398,6 @@ test "@sqrt f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
try testSqrt(f32);

View File

@ -429,7 +429,6 @@ test "implicit cast function to function ptr" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S1 = struct {
export fn someFunctionThatReturnsAValue() c_int {

View File

@ -85,7 +85,6 @@ test "@clz big ints" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testClzBigInts();
@ -103,7 +102,6 @@ fn testOneClz(comptime T: type, x: T) u32 {
test "@clz vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -173,7 +171,6 @@ fn testOneCtz(comptime T: type, x: T) u32 {
}
test "@ctz 128-bit integers" {
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -198,7 +195,6 @@ test "@ctz vectors" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testCtzVectors();
try comptime testCtzVectors();
@ -1694,9 +1690,6 @@ test "vector comparison" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
var a: @Vector(6, i32) = [_]i32{ 1, 3, -1, 5, 7, 9 };
@ -1785,7 +1778,6 @@ test "mod lazy values" {
test "@clz works on both vector and scalar inputs" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1807,7 +1799,6 @@ test "runtime comparison to NaN is comptime-known" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
@ -1838,7 +1829,6 @@ test "runtime int comparison to inf is comptime-known" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234

View File

@ -34,7 +34,6 @@ test "@max on vectors" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -90,7 +89,6 @@ test "@min for vectors" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -260,7 +258,6 @@ test "@min/@max notices bounds from vector types" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
var x: @Vector(2, u16) = .{ 30, 67 };
var y: @Vector(2, u32) = .{ 20, 500 };
@ -303,7 +300,6 @@ test "@min/@max notices bounds from vector types when element of comptime-known
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
var x: @Vector(2, u32) = .{ 1_000_000, 12345 };
_ = &x;
@ -375,7 +371,6 @@ test "@min/@max with runtime vectors of signed and unsigned integers of same siz
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn min(a: @Vector(2, i32), b: @Vector(2, u32)) @Vector(2, i32) {

View File

@ -82,7 +82,6 @@ test "@popCount vectors" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime testPopCountVectors();
try testPopCountVectors();

View File

@ -70,7 +70,7 @@ fn selectArrays() !void {
test "@select compare result" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
const S = struct {

View File

@ -282,6 +282,7 @@ test "cast union to tag type of union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testCastUnionToTag();
try comptime testCastUnionToTag();

View File

@ -31,7 +31,6 @@ test "vector wrap operators" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -652,7 +651,6 @@ test "vector division operators" {
test "vector bitwise not operator" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -934,7 +932,6 @@ test "saturating add" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -969,7 +966,6 @@ test "saturating subtraction" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -989,7 +985,6 @@ test "saturating subtraction" {
test "saturating multiplication" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1018,12 +1013,12 @@ test "saturating multiplication" {
test "saturating shift-left" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1469,7 +1464,6 @@ test "compare vectors with different element types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

View File

@ -1,5 +1,7 @@
const AddOneBit = math.AddOneBit;
const AsSignedness = math.AsSignedness;
const cast = math.cast;
const ChangeScalar = math.ChangeScalar;
const checkExpected = math.checkExpected;
const Compare = math.Compare;
const DoubleBits = math.DoubleBits;
@ -13,6 +15,7 @@ const math = @import("math.zig");
const nan = math.nan;
const Scalar = math.Scalar;
const sign = math.sign;
const splat = math.splat;
const Sse = math.Sse;
const tmin = math.tmin;
@ -5141,6 +5144,7 @@ inline fn mulSat(comptime Type: type, lhs: Type, rhs: Type) Type {
test mulSat {
const test_mul_sat = binary(mulSat, .{});
try test_mul_sat.testInts();
try test_mul_sat.testIntVectors();
}
inline fn multiply(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs * rhs) {
@ -5265,9 +5269,9 @@ test mulWithOverflow {
}
inline fn shlWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, u1 } {
const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
return @shlWithOverflow(lhs, if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs);
return @shlWithOverflow(lhs, if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs);
}
test shlWithOverflow {
const test_shl_with_overflow = binary(shlWithOverflow, .{});
@ -5280,7 +5284,9 @@ inline fn equal(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs == rhs) {
test equal {
const test_equal = binary(equal, .{});
try test_equal.testInts();
try test_equal.testIntVectors();
try test_equal.testFloats();
try test_equal.testFloatVectors();
}
inline fn notEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs != rhs) {
@ -5289,7 +5295,9 @@ inline fn notEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs != rhs
test notEqual {
const test_not_equal = binary(notEqual, .{});
try test_not_equal.testInts();
try test_not_equal.testIntVectors();
try test_not_equal.testFloats();
try test_not_equal.testFloatVectors();
}
inline fn lessThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs < rhs) {
@ -5298,7 +5306,9 @@ inline fn lessThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs < rhs)
test lessThan {
const test_less_than = binary(lessThan, .{});
try test_less_than.testInts();
try test_less_than.testIntVectors();
try test_less_than.testFloats();
try test_less_than.testFloatVectors();
}
inline fn lessThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs <= rhs) {
@ -5307,7 +5317,9 @@ inline fn lessThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs
test lessThanOrEqual {
const test_less_than_or_equal = binary(lessThanOrEqual, .{});
try test_less_than_or_equal.testInts();
try test_less_than_or_equal.testIntVectors();
try test_less_than_or_equal.testFloats();
try test_less_than_or_equal.testFloatVectors();
}
inline fn greaterThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs > rhs) {
@ -5316,7 +5328,9 @@ inline fn greaterThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs > r
test greaterThan {
const test_greater_than = binary(greaterThan, .{});
try test_greater_than.testInts();
try test_greater_than.testIntVectors();
try test_greater_than.testFloats();
try test_greater_than.testFloatVectors();
}
inline fn greaterThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs >= rhs) {
@ -5325,7 +5339,9 @@ inline fn greaterThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(
test greaterThanOrEqual {
const test_greater_than_or_equal = binary(greaterThanOrEqual, .{});
try test_greater_than_or_equal.testInts();
try test_greater_than_or_equal.testIntVectors();
try test_greater_than_or_equal.testFloats();
try test_greater_than_or_equal.testFloatVectors();
}
inline fn bitAnd(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs & rhs) {
@ -5347,54 +5363,57 @@ test bitOr {
}
inline fn shr(comptime Type: type, lhs: Type, rhs: Type) Type {
const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
return lhs >> if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs;
return lhs >> if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs;
}
test shr {
const test_shr = binary(shr, .{});
try test_shr.testInts();
try test_shr.testIntVectors();
}
inline fn shrExact(comptime Type: type, lhs: Type, rhs: Type) Type {
const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
const final_rhs = if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs;
const final_rhs = if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs;
return @shrExact(lhs >> final_rhs << final_rhs, final_rhs);
}
test shrExact {
const test_shr_exact = binary(shrExact, .{});
try test_shr_exact.testInts();
try test_shr_exact.testIntVectors();
}
inline fn shl(comptime Type: type, lhs: Type, rhs: Type) Type {
const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
return lhs << if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs;
return lhs << if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs;
}
test shl {
const test_shl = binary(shl, .{});
try test_shl.testInts();
try test_shl.testIntVectors();
}
inline fn shlExactUnsafe(comptime Type: type, lhs: Type, rhs: Type) Type {
@setRuntimeSafety(false);
const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
const final_rhs = if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs;
const final_rhs = if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs;
return @shlExact(lhs << final_rhs >> final_rhs, final_rhs);
}
test shlExactUnsafe {
const test_shl_exact_unsafe = binary(shlExactUnsafe, .{});
try test_shl_exact_unsafe.testInts();
try test_shl_exact_unsafe.testIntVectors();
}
inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type {
// workaround https://github.com/ziglang/zig/issues/23034
if (@inComptime()) {
// workaround https://github.com/ziglang/zig/issues/23139
//return lhs <<| @min(@abs(rhs), imax(u64));
return lhs <<| @min(@abs(rhs), @as(u64, imax(u64)));
return lhs <<| @min(@abs(rhs), splat(ChangeScalar(Type, u64), imax(u64)));
}
// workaround https://github.com/ziglang/zig/issues/23033
@setRuntimeSafety(false);
@ -5403,6 +5422,7 @@ inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type {
test shlSat {
const test_shl_sat = binary(shlSat, .{});
try test_shl_sat.testInts();
try test_shl_sat.testIntVectors();
}
inline fn bitXor(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs ^ rhs) {

View File

@ -8,8 +8,6 @@ pub const fmin = math.floatMin;
pub const imax = math.maxInt;
pub const imin = math.minInt;
pub const inf = math.inf;
pub const Log2Int = math.Log2Int;
pub const Log2IntCeil = math.Log2IntCeil;
pub const nan = math.nan;
pub const next = math.nextAfter;
pub const tmin = math.floatTrueMin;
@ -30,38 +28,44 @@ pub fn Scalar(comptime Type: type) type {
.vector => |info| info.child,
};
}
pub fn ChangeScalar(comptime Type: type, comptime NewScalar: type) type {
return switch (@typeInfo(Type)) {
else => NewScalar,
.vector => |vector| @Vector(vector.len, NewScalar),
};
}
pub fn AsSignedness(comptime Type: type, comptime signedness: std.builtin.Signedness) type {
return ChangeScalar(Type, @Type(.{ .int = .{
.signedness = signedness,
.bits = @typeInfo(Scalar(Type)).int.bits,
} }));
}
pub fn AddOneBit(comptime Type: type) type {
const ResultScalar = switch (@typeInfo(Scalar(Type))) {
return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) {
.int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = 1 + int.bits } }),
.float => Scalar(Type),
else => @compileError(@typeName(Type)),
};
return switch (@typeInfo(Type)) {
else => ResultScalar,
.vector => |vector| @Vector(vector.len, ResultScalar),
};
});
}
pub fn DoubleBits(comptime Type: type) type {
const ResultScalar = switch (@typeInfo(Scalar(Type))) {
return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) {
.int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = int.bits * 2 } }),
.float => Scalar(Type),
else => @compileError(@typeName(Type)),
};
return switch (@typeInfo(Type)) {
else => ResultScalar,
.vector => |vector| @Vector(vector.len, ResultScalar),
};
});
}
pub fn RoundBitsUp(comptime Type: type, comptime multiple: u16) type {
const ResultScalar = switch (@typeInfo(Scalar(Type))) {
return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) {
.int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = std.mem.alignForward(u16, int.bits, multiple) } }),
.float => Scalar(Type),
else => @compileError(@typeName(Type)),
};
return switch (@typeInfo(Type)) {
else => ResultScalar,
.vector => |vector| @Vector(vector.len, ResultScalar),
};
});
}
pub fn Log2Int(comptime Type: type) type {
return ChangeScalar(Type, math.Log2Int(Scalar(Type)));
}
pub fn Log2IntCeil(comptime Type: type) type {
return ChangeScalar(Type, math.Log2IntCeil(Scalar(Type)));
}
// inline to avoid a runtime `@splat`
pub inline fn splat(comptime Type: type, scalar: Scalar(Type)) Type {
@ -78,18 +82,12 @@ inline fn select(cond: anytype, lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) {
else => @compileError(@typeName(@TypeOf(cond))),
};
}
pub fn sign(rhs: anytype) switch (@typeInfo(@TypeOf(rhs))) {
else => bool,
.vector => |vector| @Vector(vector.len, bool),
} {
pub fn sign(rhs: anytype) ChangeScalar(@TypeOf(rhs), bool) {
const ScalarInt = @Type(.{ .int = .{
.signedness = .unsigned,
.bits = @bitSizeOf(Scalar(@TypeOf(rhs))),
} });
const VectorInt = switch (@typeInfo(@TypeOf(rhs))) {
else => ScalarInt,
.vector => |vector| @Vector(vector.len, ScalarInt),
};
const VectorInt = ChangeScalar(@TypeOf(rhs), ScalarInt);
return @as(VectorInt, @bitCast(rhs)) & splat(VectorInt, @as(ScalarInt, 1) << @bitSizeOf(ScalarInt) - 1) != splat(VectorInt, 0);
}
fn boolAnd(lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) {

View File

@ -117,9 +117,9 @@ export fn testMutablePointer() void {
// tmp.zig:37:38: note: imported here
// neg_inf.zon:1:1: error: expected type '?u8'
// tmp.zig:57:28: note: imported here
// neg_inf.zon:1:1: error: expected type 'tmp.testNonExhaustiveEnum__enum_518'
// neg_inf.zon:1:1: error: expected type 'tmp.testNonExhaustiveEnum__enum_522'
// tmp.zig:62:39: note: imported here
// neg_inf.zon:1:1: error: expected type 'tmp.testUntaggedUnion__union_520'
// neg_inf.zon:1:1: error: expected type 'tmp.testUntaggedUnion__union_524'
// tmp.zig:67:44: note: imported here
// neg_inf.zon:1:1: error: expected type 'tmp.testTaggedUnionVoid__union_523'
// neg_inf.zon:1:1: error: expected type 'tmp.testTaggedUnionVoid__union_527'
// tmp.zig:72:50: note: imported here

View File

@ -15,6 +15,6 @@ pub export fn entry() void {
// error
//
// :7:25: error: unable to resolve comptime value
// :7:25: note: initializer of comptime-only struct 'tmp.S.foo__anon_492.C' must be comptime-known
// :7:25: note: initializer of comptime-only struct 'tmp.S.foo__anon_496.C' must be comptime-known
// :4:16: note: struct requires comptime because of this field
// :4:16: note: types are not available at runtime

View File

@ -16,5 +16,5 @@ pub export fn entry2() void {
//
// :3:6: error: no field or member function named 'copy' in '[]const u8'
// :9:8: error: no field or member function named 'bar' in '@TypeOf(.{})'
// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_496'
// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_500'
// :12:6: note: struct declared here

View File

@ -6,6 +6,6 @@ export fn foo() void {
// error
//
// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_485'
// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_489'
// :3:16: note: struct declared here
// :1:11: note: struct declared here

View File

@ -44,9 +44,9 @@ comptime {
//
// :5:23: error: expected error union type, found 'comptime_int'
// :10:23: error: expected error union type, found '@TypeOf(.{})'
// :15:23: error: expected error union type, found 'tmp.test2__struct_522'
// :15:23: error: expected error union type, found 'tmp.test2__struct_526'
// :15:23: note: struct declared here
// :20:27: error: expected error union type, found 'tmp.test3__struct_524'
// :20:27: error: expected error union type, found 'tmp.test3__struct_528'
// :20:27: note: struct declared here
// :25:23: error: expected error union type, found 'struct { comptime *const [5:0]u8 = "hello" }'
// :31:13: error: expected error union type, found 'u32'

View File

@ -601,7 +601,8 @@ type_tag_handlers = {
'fn_void_no_args': lambda payload: 'fn() void',
'fn_naked_noreturn_no_args': lambda payload: 'fn() callconv(.naked) noreturn',
'fn_ccc_void_no_args': lambda payload: 'fn() callconv(.c) void',
'single_const_pointer_to_comptime_int': lambda payload: '*const comptime_int',
'ptr_usize': lambda payload: '*usize',
'ptr_const_comptime_int': lambda payload: '*const comptime_int',
'manyptr_u8': lambda payload: '[*]u8',
'manyptr_const_u8': lambda payload: '[*]const u8',
'manyptr_const_u8_sentinel_0': lambda payload: '[*:0]const u8',