Liveness: avoid emitting unused instructions or marking their operands as used

Backends want to avoid emitting unused instructions which do not have
side effects: to that end, they all have `Liveness.isUnused` checks for
many instructions. However, checking this in the backends avoids a lot
of potential optimizations. For instance, if a nested field is loaded,
then the first field access would still be emitted, since its result is
used by the next access (which is then unreferenced).

To elide more instructions, Liveness can track this data instead. For
operands which do not have to be lowered (i.e. are not side effecting
and are not something special like `arg), Liveness can ignore their
operand usages, and push the unused information further up, potentially
marking many more instructions as unreferenced.

In doing this, I also uncovered a bug in the LLVM backend relating to
discarding the result of `@cVaArg`, which this change fixes. A behaviour
test has been added to cover it.
This commit is contained in:
mlugg 2023-04-14 21:38:32 +01:00
parent 4486f27126
commit 407dc6eee4
No known key found for this signature in database
GPG Key ID: 58978E823BDE3EF9
5 changed files with 772 additions and 695 deletions

View File

@ -1375,3 +1375,217 @@ pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 {
}
return bytes[0..end :0];
}
/// Returns whether the given instruction must always be lowered, for instance because it can cause
/// side effects. If an instruction does not need to be lowered, and Liveness determines its result
/// is unused, backends should avoid lowering it.
pub fn mustLower(air: Air, inst: Air.Inst.Index) bool {
const data = air.instructions.items(.data)[inst];
return switch (air.instructions.items(.tag)[inst]) {
.arg,
.block,
.loop,
.br,
.trap,
.breakpoint,
.call,
.call_always_tail,
.call_never_tail,
.call_never_inline,
.cond_br,
.switch_br,
.@"try",
.try_ptr,
.dbg_stmt,
.dbg_block_begin,
.dbg_block_end,
.dbg_inline_begin,
.dbg_inline_end,
.dbg_var_ptr,
.dbg_var_val,
.ret,
.ret_load,
.store,
.unreach,
.optional_payload_ptr_set,
.errunion_payload_ptr_set,
.set_union_tag,
.memset,
.memcpy,
.cmpxchg_weak,
.cmpxchg_strong,
.fence,
.atomic_store_unordered,
.atomic_store_monotonic,
.atomic_store_release,
.atomic_store_seq_cst,
.atomic_rmw,
.prefetch,
.wasm_memory_grow,
.set_err_return_trace,
.vector_store_elem,
.c_va_arg,
.c_va_copy,
.c_va_end,
.c_va_start,
=> true,
.add,
.add_optimized,
.addwrap,
.addwrap_optimized,
.add_sat,
.sub,
.sub_optimized,
.subwrap,
.subwrap_optimized,
.sub_sat,
.mul,
.mul_optimized,
.mulwrap,
.mulwrap_optimized,
.mul_sat,
.div_float,
.div_float_optimized,
.div_trunc,
.div_trunc_optimized,
.div_floor,
.div_floor_optimized,
.div_exact,
.div_exact_optimized,
.rem,
.rem_optimized,
.mod,
.mod_optimized,
.ptr_add,
.ptr_sub,
.max,
.min,
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
.alloc,
.ret_ptr,
.bit_and,
.bit_or,
.shr,
.shr_exact,
.shl,
.shl_exact,
.shl_sat,
.xor,
.not,
.bitcast,
.ret_addr,
.frame_addr,
.clz,
.ctz,
.popcount,
.byte_swap,
.bit_reverse,
.sqrt,
.sin,
.cos,
.tan,
.exp,
.exp2,
.log,
.log2,
.log10,
.fabs,
.floor,
.ceil,
.round,
.trunc_float,
.neg,
.neg_optimized,
.cmp_lt,
.cmp_lt_optimized,
.cmp_lte,
.cmp_lte_optimized,
.cmp_eq,
.cmp_eq_optimized,
.cmp_gte,
.cmp_gte_optimized,
.cmp_gt,
.cmp_gt_optimized,
.cmp_neq,
.cmp_neq_optimized,
.cmp_vector,
.cmp_vector_optimized,
.constant,
.const_ty,
.is_null,
.is_non_null,
.is_null_ptr,
.is_non_null_ptr,
.is_err,
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
.bool_and,
.bool_or,
.ptrtoint,
.bool_to_int,
.fptrunc,
.fpext,
.intcast,
.trunc,
.optional_payload,
.optional_payload_ptr,
.wrap_optional,
.unwrap_errunion_payload,
.unwrap_errunion_err,
.unwrap_errunion_payload_ptr,
.unwrap_errunion_err_ptr,
.wrap_errunion_payload,
.wrap_errunion_err,
.struct_field_ptr,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.struct_field_val,
.get_union_tag,
.slice,
.slice_len,
.slice_ptr,
.ptr_slice_len_ptr,
.ptr_slice_ptr_ptr,
.array_elem_val,
.slice_elem_ptr,
.ptr_elem_ptr,
.array_to_slice,
.float_to_int,
.float_to_int_optimized,
.int_to_float,
.reduce,
.reduce_optimized,
.splat,
.shuffle,
.select,
.is_named_enum_value,
.tag_name,
.error_name,
.error_set_has_value,
.aggregate_init,
.union_init,
.mul_add,
.field_parent_ptr,
.wasm_memory_size,
.cmp_lt_errors_len,
.err_return_trace,
.addrspace_cast,
.save_err_return_trace_index,
.work_item_id,
.work_group_size,
.work_group_id,
=> false,
.assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0,
.load => air.typeOf(data.ty_op.operand).isVolatilePtr(),
.slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs).isVolatilePtr(),
.atomic_load => air.typeOf(data.atomic_load.ptr).isVolatilePtr(),
};
}

View File

@ -1333,40 +1333,47 @@ fn analyzeOperands(
.main_analysis => {
const usize_index = (inst * bpi) / @bitSizeOf(usize);
var tomb_bits: Bpi = 0;
// This logic must synchronize with `will_die_immediately` in `AnalyzeBigOperands.init`.
var immediate_death = false;
if (data.branch_deaths.remove(inst)) {
log.debug("[{}] %{}: resolved branch death to birth (immediate death)", .{ pass, inst });
tomb_bits |= @as(Bpi, 1) << (bpi - 1);
immediate_death = true;
assert(!data.live_set.contains(inst));
} else if (data.live_set.remove(inst)) {
log.debug("[{}] %{}: removed from live set", .{ pass, inst });
} else {
log.debug("[{}] %{}: immediate death", .{ pass, inst });
tomb_bits |= @as(Bpi, 1) << (bpi - 1);
immediate_death = true;
}
// Note that it's important we iterate over the operands backwards, so that if a dying
// operand is used multiple times we mark its last use as its death.
var i = operands.len;
while (i > 0) {
i -= 1;
const op_ref = operands[i];
const operand = Air.refToIndex(op_ref) orelse continue;
var tomb_bits: Bpi = @as(Bpi, @boolToInt(immediate_death)) << (bpi - 1);
// Don't compute any liveness for constants
switch (inst_tags[operand]) {
.constant, .const_ty => continue,
else => {},
}
// If our result is unused and the instruction doesn't need to be lowered, backends will
// skip the lowering of this instruction, so we don't want to record uses of operands.
// That way, we can mark as many instructions as possible unused.
if (!immediate_death or a.air.mustLower(inst)) {
// Note that it's important we iterate over the operands backwards, so that if a dying
// operand is used multiple times we mark its last use as its death.
var i = operands.len;
while (i > 0) {
i -= 1;
const op_ref = operands[i];
const operand = Air.refToIndex(op_ref) orelse continue;
const mask = @as(Bpi, 1) << @intCast(OperandInt, i);
// Don't compute any liveness for constants
switch (inst_tags[operand]) {
.constant, .const_ty => continue,
else => {},
}
if ((try data.live_set.fetchPut(gpa, operand, {})) == null) {
log.debug("[{}] %{}: added %{} to live set (operand dies here)", .{ pass, inst, operand });
tomb_bits |= mask;
if (data.branch_deaths.remove(operand)) {
log.debug("[{}] %{}: resolved branch death of %{} to this usage", .{ pass, inst, operand });
const mask = @as(Bpi, 1) << @intCast(OperandInt, i);
if ((try data.live_set.fetchPut(gpa, operand, {})) == null) {
log.debug("[{}] %{}: added %{} to live set (operand dies here)", .{ pass, inst, operand });
tomb_bits |= mask;
if (data.branch_deaths.remove(operand)) {
log.debug("[{}] %{}: resolved branch death of %{} to this usage", .{ pass, inst, operand });
}
}
}
}
@ -1975,6 +1982,9 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
small: [bpi - 1]Air.Inst.Ref = .{.none} ** (bpi - 1),
extra_tombs: []u32,
// Only used in `LivenessPass.main_analysis`
will_die_immediately: bool,
const Self = @This();
fn init(
@ -1994,12 +2004,18 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
std.mem.set(u32, extra_tombs, 0);
const will_die_immediately: bool = switch (pass) {
.loop_analysis => false, // track everything, since we don't have full liveness information yet
.main_analysis => data.branch_deaths.contains(inst) and !data.live_set.contains(inst),
};
return .{
.a = a,
.data = data,
.inst = inst,
.operands_remaining = @intCast(u32, total_operands),
.extra_tombs = extra_tombs,
.will_die_immediately = will_die_immediately,
};
}
@ -2022,6 +2038,11 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
else => {},
}
// If our result is unused and the instruction doesn't need to be lowered, backends will
// skip the lowering of this instruction, so we don't want to record uses of operands.
// That way, we can mark as many instructions as possible unused.
if (big.will_die_immediately and !big.a.air.mustLower(big.inst)) return;
const extra_byte = (big.operands_remaining - (bpi - 1)) / 31;
const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31);

File diff suppressed because it is too large Load Diff

View File

@ -4523,6 +4523,10 @@ pub const FuncGen = struct {
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
const air_tags = self.air.instructions.items(.tag);
for (body, 0..) |inst, i| {
if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) {
continue;
}
const opt_value: ?*llvm.Value = switch (air_tags[inst]) {
// zig fmt: off
.add => try self.airAdd(inst, false),
@ -5166,8 +5170,6 @@ pub const FuncGen = struct {
}
fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const list = try self.resolveInst(ty_op.operand);
const arg_ty = self.air.getRefType(ty_op.ty);
@ -5177,8 +5179,6 @@ pub const FuncGen = struct {
}
fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_list = try self.resolveInst(ty_op.operand);
const va_list_ty = self.air.getRefType(ty_op.ty);
@ -5226,8 +5226,6 @@ pub const FuncGen = struct {
}
fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const va_list_ty = self.air.typeOfIndex(inst);
const llvm_va_list_ty = try self.dg.lowerType(va_list_ty);
@ -5254,7 +5252,6 @@ pub const FuncGen = struct {
}
fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -5266,7 +5263,6 @@ pub const FuncGen = struct {
}
fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@ -5281,8 +5277,6 @@ pub const FuncGen = struct {
}
fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const llvm_fn = try self.getCmpLtErrorsLenFunction();
@ -5650,9 +5644,6 @@ pub const FuncGen = struct {
}
fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const array_ty = operand_ty.childType();
@ -5674,9 +5665,6 @@ pub const FuncGen = struct {
}
fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@ -5733,9 +5721,6 @@ pub const FuncGen = struct {
}
fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
self.builder.setFastMath(want_fast_math);
const target = self.dg.module.getTarget();
@ -5792,16 +5777,12 @@ pub const FuncGen = struct {
}
fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
return self.builder.buildExtractValue(operand, index, "");
}
fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const slice_ptr = try self.resolveInst(ty_op.operand);
const slice_ptr_ty = self.air.typeOf(ty_op.operand);
@ -5814,8 +5795,6 @@ pub const FuncGen = struct {
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const slice_ty = self.air.typeOf(bin_op.lhs);
if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
const elem_ty = slice_ty.childType();
@ -5835,7 +5814,6 @@ pub const FuncGen = struct {
}
fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const slice_ty = self.air.typeOf(bin_op.lhs);
@ -5850,7 +5828,6 @@ pub const FuncGen = struct {
fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
const inst = body_tail[0];
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const array_ty = self.air.typeOf(bin_op.lhs);
@ -5881,8 +5858,6 @@ pub const FuncGen = struct {
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
const elem_ty = ptr_ty.childType();
const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty);
const base_ptr = try self.resolveInst(bin_op.lhs);
@ -5908,8 +5883,6 @@ pub const FuncGen = struct {
}
fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.air.typeOf(bin_op.lhs);
@ -5934,9 +5907,6 @@ pub const FuncGen = struct {
}
fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ptr = try self.resolveInst(struct_field.struct_operand);
@ -5949,8 +5919,6 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
field_index: u32,
) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const struct_ptr = try self.resolveInst(ty_op.operand);
const struct_ptr_ty = self.air.typeOf(ty_op.operand);
@ -5959,8 +5927,6 @@ pub const FuncGen = struct {
fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
const inst = body_tail[0];
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ty = self.air.typeOf(struct_field.struct_operand);
@ -6060,8 +6026,6 @@ pub const FuncGen = struct {
}
fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
@ -6083,9 +6047,6 @@ pub const FuncGen = struct {
}
fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@ -6263,8 +6224,6 @@ pub const FuncGen = struct {
const clobbers_len = @truncate(u31, extra.data.flags);
var extra_i: usize = extra.end;
if (!is_volatile and self.liveness.isUnused(inst)) return null;
const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
@ -6610,8 +6569,6 @@ pub const FuncGen = struct {
operand_is_ptr: bool,
pred: llvm.IntPredicate,
) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.air.typeOf(un_op);
@ -6659,8 +6616,6 @@ pub const FuncGen = struct {
op: llvm.IntPredicate,
operand_is_ptr: bool,
) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.air.typeOf(un_op);
@ -6701,8 +6656,6 @@ pub const FuncGen = struct {
}
fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand).childType();
@ -6756,8 +6709,6 @@ pub const FuncGen = struct {
fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
const inst = body_tail[0];
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand);
@ -6780,8 +6731,6 @@ pub const FuncGen = struct {
operand_is_ptr: bool,
) !?*llvm.Value {
const inst = body_tail[0];
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
@ -6817,9 +6766,6 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
operand_is_ptr: bool,
) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
@ -6893,8 +6839,6 @@ pub const FuncGen = struct {
}
fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const target = self.dg.module.getTarget();
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@ -6911,8 +6855,6 @@ pub const FuncGen = struct {
}
fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const payload_ty = self.air.typeOf(ty_op.operand);
const non_null_bit = self.context.intType(8).constInt(1, .False);
@ -6943,8 +6885,6 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const err_un_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
@ -6978,8 +6918,6 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const err_un_ty = self.air.typeOfIndex(inst);
const payload_ty = err_un_ty.errorUnionPayload();
@ -7015,8 +6953,6 @@ pub const FuncGen = struct {
}
fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const index = pl_op.payload;
const llvm_u32 = self.context.intType(32);
@ -7061,8 +6997,6 @@ pub const FuncGen = struct {
}
fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@ -7074,8 +7008,6 @@ pub const FuncGen = struct {
}
fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@ -7087,8 +7019,6 @@ pub const FuncGen = struct {
}
fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
@ -7103,7 +7033,6 @@ pub const FuncGen = struct {
}
fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -7118,7 +7047,6 @@ pub const FuncGen = struct {
}
fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -7129,8 +7057,6 @@ pub const FuncGen = struct {
}
fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@ -7144,7 +7070,6 @@ pub const FuncGen = struct {
}
fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -7159,7 +7084,6 @@ pub const FuncGen = struct {
}
fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -7170,8 +7094,6 @@ pub const FuncGen = struct {
}
fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@ -7184,7 +7106,6 @@ pub const FuncGen = struct {
}
fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -7199,7 +7120,6 @@ pub const FuncGen = struct {
}
fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -7210,8 +7130,6 @@ pub const FuncGen = struct {
}
fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@ -7224,7 +7142,6 @@ pub const FuncGen = struct {
}
fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -7236,7 +7153,6 @@ pub const FuncGen = struct {
}
fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -7254,7 +7170,6 @@ pub const FuncGen = struct {
}
fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -7287,7 +7202,6 @@ pub const FuncGen = struct {
}
fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -7302,7 +7216,6 @@ pub const FuncGen = struct {
}
fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -7317,7 +7230,6 @@ pub const FuncGen = struct {
}
fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -7347,8 +7259,6 @@ pub const FuncGen = struct {
}
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const base_ptr = try self.resolveInst(bin_op.lhs);
@ -7368,8 +7278,6 @@ pub const FuncGen = struct {
}
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const base_ptr = try self.resolveInst(bin_op.lhs);
@ -7395,9 +7303,6 @@ pub const FuncGen = struct {
signed_intrinsic: []const u8,
unsigned_intrinsic: []const u8,
) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@ -7686,8 +7591,6 @@ pub const FuncGen = struct {
}
fn airMulAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
@ -7700,9 +7603,6 @@ pub const FuncGen = struct {
}
fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@ -7759,8 +7659,6 @@ pub const FuncGen = struct {
}
fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@ -7768,8 +7666,6 @@ pub const FuncGen = struct {
}
fn airOr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@ -7777,8 +7673,6 @@ pub const FuncGen = struct {
}
fn airXor(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@ -7786,8 +7680,6 @@ pub const FuncGen = struct {
}
fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@ -7809,8 +7701,6 @@ pub const FuncGen = struct {
}
fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@ -7831,8 +7721,6 @@ pub const FuncGen = struct {
}
fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@ -7876,8 +7764,6 @@ pub const FuncGen = struct {
}
fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@ -7912,9 +7798,6 @@ pub const FuncGen = struct {
}
fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const target = self.dg.module.getTarget();
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const dest_ty = self.air.typeOfIndex(inst);
@ -7937,8 +7820,6 @@ pub const FuncGen = struct {
}
fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
@ -7946,9 +7827,6 @@ pub const FuncGen = struct {
}
fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
@ -7978,9 +7856,6 @@ pub const FuncGen = struct {
}
fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
@ -8010,9 +7885,6 @@ pub const FuncGen = struct {
}
fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
@ -8020,8 +7892,6 @@ pub const FuncGen = struct {
}
fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const inst_ty = self.air.typeOfIndex(inst);
@ -8137,9 +8007,6 @@ pub const FuncGen = struct {
}
fn airBoolToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
return operand;
@ -8189,7 +8056,6 @@ pub const FuncGen = struct {
}
fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
const pointee_type = ptr_ty.childType();
if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
@ -8201,7 +8067,6 @@ pub const FuncGen = struct {
}
fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
const ret_ty = ptr_ty.childType();
if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
@ -8289,8 +8154,6 @@ pub const FuncGen = struct {
const ptr = try fg.resolveInst(ty_op.operand);
elide: {
if (ptr_info.@"volatile") break :elide;
if (fg.liveness.isUnused(inst)) return null;
if (!isByRef(ptr_info.pointee_type)) break :elide;
if (!canElideLoad(fg, body_tail)) break :elide;
return ptr;
@ -8314,8 +8177,7 @@ pub const FuncGen = struct {
}
fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
_ = inst;
const llvm_usize = try self.dg.lowerType(Type.usize);
const target = self.dg.module.getTarget();
if (!target_util.supportsReturnAddress(target)) {
@ -8331,8 +8193,7 @@ pub const FuncGen = struct {
}
fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
_ = inst;
const llvm_i32 = self.context.intType(32);
const llvm_fn_name = "llvm.frameaddress.p0";
const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
@ -8462,8 +8323,6 @@ pub const FuncGen = struct {
const ptr = try self.resolveInst(atomic_load.ptr);
const ptr_ty = self.air.typeOf(atomic_load.ptr);
const ptr_info = ptr_ty.ptrInfo().data;
if (!ptr_info.@"volatile" and self.liveness.isUnused(inst))
return null;
const elem_ty = ptr_info.pointee_type;
if (!elem_ty.hasRuntimeBitsIgnoreComptime())
return null;
@ -8577,8 +8436,6 @@ pub const FuncGen = struct {
}
fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const un_ty = self.air.typeOf(ty_op.operand);
const target = self.dg.module.getTarget();
@ -8603,8 +8460,6 @@ pub const FuncGen = struct {
}
fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.air.typeOf(un_op);
@ -8613,7 +8468,6 @@ pub const FuncGen = struct {
}
fn airNeg(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const un_op = self.air.instructions.items(.data)[inst].un_op;
@ -8624,8 +8478,6 @@ pub const FuncGen = struct {
}
fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
@ -8652,8 +8504,6 @@ pub const FuncGen = struct {
}
fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
@ -8679,8 +8529,6 @@ pub const FuncGen = struct {
}
fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const target = self.dg.module.getTarget();
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
@ -8734,8 +8582,6 @@ pub const FuncGen = struct {
}
fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const error_set_ty = self.air.getRefType(ty_op.ty);
@ -8781,8 +8627,6 @@ pub const FuncGen = struct {
}
fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const enum_ty = self.air.typeOf(un_op);
@ -8862,8 +8706,6 @@ pub const FuncGen = struct {
}
fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const enum_ty = self.air.typeOf(un_op);
@ -8995,8 +8837,6 @@ pub const FuncGen = struct {
}
fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const slice_ty = self.air.typeOfIndex(inst);
@ -9011,8 +8851,6 @@ pub const FuncGen = struct {
}
fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const scalar = try self.resolveInst(ty_op.operand);
const vector_ty = self.air.typeOfIndex(inst);
@ -9021,8 +8859,6 @@ pub const FuncGen = struct {
}
fn airSelect(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const pred = try self.resolveInst(pl_op.operand);
@ -9033,8 +8869,6 @@ pub const FuncGen = struct {
}
fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolveInst(extra.a);
@ -9134,7 +8968,6 @@ pub const FuncGen = struct {
}
fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const target = self.dg.module.getTarget();
@ -9221,8 +9054,6 @@ pub const FuncGen = struct {
}
fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const result_ty = self.air.typeOfIndex(inst);
const len = @intCast(usize, result_ty.arrayLen());
@ -9360,8 +9191,6 @@ pub const FuncGen = struct {
}
fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
const union_ty = self.air.typeOfIndex(inst);
@ -9566,8 +9395,6 @@ pub const FuncGen = struct {
}
fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const inst_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
@ -9592,8 +9419,6 @@ pub const FuncGen = struct {
}
fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const target = self.dg.module.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
@ -9603,8 +9428,6 @@ pub const FuncGen = struct {
}
fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const target = self.dg.module.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
@ -9634,8 +9457,6 @@ pub const FuncGen = struct {
}
fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const target = self.dg.module.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
@ -9756,8 +9577,6 @@ pub const FuncGen = struct {
struct_ptr_ty: Type,
field_index: u32,
) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const target = self.dg.object.target;
const struct_ty = struct_ptr_ty.childType();
switch (struct_ty.zigTypeTag()) {

View File

@ -215,3 +215,19 @@ test "copy VaList" {
try std.testing.expectEqual(@as(c_int, 3), S.add(1, @as(c_int, 1)));
try std.testing.expectEqual(@as(c_int, 9), S.add(2, @as(c_int, 1), @as(c_int, 2)));
}
test "unused VaList arg" {
const S = struct {
fn thirdArg(dummy: c_int, ...) callconv(.C) c_int {
_ = dummy;
var ap = @cVaStart();
defer @cVaEnd(&ap);
_ = @cVaArg(&ap, c_int);
return @cVaArg(&ap, c_int);
}
};
const x = S.thirdArg(0, @as(c_int, 1), @as(c_int, 2));
try std.testing.expectEqual(@as(c_int, 2), x);
}