Liveness: modify encoding to support over 32 operands

Prior to this, Liveness encoded `asm`, `call`, and `aggregate_init` with
a single 32-bit integer, allowing up to 35 operands (3 are provided by
the regular tomb_bits). However, the Zig language allows function calls
with more than 35 arguments, inline assembly with more than 35 inputs,
and anonymous tuples with more than 35 elements.

The new encoding stores an index to the extra array instead of the bits
directly, and then as many extra elements as needed to encode all the
operands. The MSB is used as a flag to tell which element is the last
one, allowing for 31 bits per element.

Prior to this, print_air did not bother correctly printing tombstones
for these instructions; now it does.

In addition to updating the BigTomb iteration logic in the machine code
backends, this commit extracts the common logic into the Liveness namespace.
This commit is contained in:
Andrew Kelley 2022-04-12 07:05:58 -07:00
parent 17631cb2d3
commit b0edd8752a
8 changed files with 240 additions and 90 deletions

View File

@ -178,11 +178,50 @@ pub fn deinit(l: *Liveness, gpa: Allocator) void {
l.* = undefined;
}
pub fn iterateBigTomb(l: Liveness, inst: Air.Inst.Index) BigTomb {
return .{
.tomb_bits = l.getTombBits(inst),
.extra_start = l.special.get(inst) orelse 0,
.extra_offset = 0,
.extra = l.extra,
.bit_index = 0,
};
}
/// How many tomb bits per AIR instruction.
pub const bpi = 4;
pub const Bpi = std.meta.Int(.unsigned, bpi);
pub const OperandInt = std.math.Log2Int(Bpi);
/// Useful for decoders of Liveness information.
pub const BigTomb = struct {
tomb_bits: Liveness.Bpi,
bit_index: u32,
extra_start: u32,
extra_offset: u32,
extra: []const u32,
/// Returns whether the next operand dies.
pub fn feed(bt: *BigTomb) bool {
const this_bit_index = bt.bit_index;
bt.bit_index += 1;
const small_tombs = Liveness.bpi - 1;
if (this_bit_index < small_tombs) {
const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0;
return dies;
}
const big_bit_index = this_bit_index - small_tombs;
while (big_bit_index - bt.extra_offset * 31 >= 31) {
bt.extra_offset += 1;
}
const dies = @truncate(u1, bt.extra[bt.extra_start + bt.extra_offset] >>
@intCast(u5, big_bit_index - bt.extra_offset * 31)) != 0;
return dies;
}
};
/// In-progress data; on successful analysis converted into `Liveness`.
const Analysis = struct {
gpa: Allocator,
@ -428,6 +467,7 @@ fn analyzeInst(
.inst = inst,
.main_tomb = main_tomb,
};
defer extra_tombs.deinit();
try extra_tombs.feed(callee);
for (args) |arg| {
try extra_tombs.feed(arg);
@ -468,6 +508,7 @@ fn analyzeInst(
.inst = inst,
.main_tomb = main_tomb,
};
defer extra_tombs.deinit();
for (elements) |elem| {
try extra_tombs.feed(elem);
}
@ -555,6 +596,7 @@ fn analyzeInst(
.inst = inst,
.main_tomb = main_tomb,
};
defer extra_tombs.deinit();
for (outputs) |output| {
if (output != .none) {
try extra_tombs.feed(output);
@ -790,10 +832,10 @@ const ExtraTombs = struct {
bit_index: usize = 0,
tomb_bits: Bpi = 0,
big_tomb_bits: u32 = 0,
big_tomb_bits_extra: std.ArrayListUnmanaged(u32) = .{},
fn feed(et: *ExtraTombs, op_ref: Air.Inst.Ref) !void {
const this_bit_index = et.bit_index;
assert(this_bit_index < 32); // TODO mechanism for when there are greater than 32 operands
et.bit_index += 1;
const gpa = et.analysis.gpa;
const op_index = Air.refToIndex(op_ref) orelse return;
@ -801,18 +843,37 @@ const ExtraTombs = struct {
if (prev == null) {
// Death.
if (et.new_set) |ns| try ns.putNoClobber(gpa, op_index, {});
if (this_bit_index < bpi - 1) {
const available_tomb_bits = bpi - 1;
if (this_bit_index < available_tomb_bits) {
et.tomb_bits |= @as(Bpi, 1) << @intCast(OperandInt, this_bit_index);
} else {
const big_bit_index = this_bit_index - (bpi - 1);
et.big_tomb_bits |= @as(u32, 1) << @intCast(u5, big_bit_index);
const big_bit_index = this_bit_index - available_tomb_bits;
while (big_bit_index >= (et.big_tomb_bits_extra.items.len + 1) * 31) {
// We need another element in the extra array.
try et.big_tomb_bits_extra.append(gpa, et.big_tomb_bits);
et.big_tomb_bits = 0;
} else {
const final_bit_index = big_bit_index - et.big_tomb_bits_extra.items.len * 31;
et.big_tomb_bits |= @as(u32, 1) << @intCast(u5, final_bit_index);
}
}
}
}
fn finish(et: *ExtraTombs) !void {
et.tomb_bits |= @as(Bpi, @boolToInt(et.main_tomb)) << (bpi - 1);
// Signal the terminal big_tomb_bits element.
et.big_tomb_bits |= @as(u32, 1) << 31;
et.analysis.storeTombBits(et.inst, et.tomb_bits);
try et.analysis.special.put(et.analysis.gpa, et.inst, et.big_tomb_bits);
const extra_index = @intCast(u32, et.analysis.extra.items.len);
try et.analysis.extra.ensureUnusedCapacity(et.analysis.gpa, et.big_tomb_bits_extra.items.len + 1);
try et.analysis.special.put(et.analysis.gpa, et.inst, extra_index);
et.analysis.extra.appendSliceAssumeCapacity(et.big_tomb_bits_extra.items);
et.analysis.extra.appendAssumeCapacity(et.big_tomb_bits);
}
fn deinit(et: *ExtraTombs) void {
et.big_tomb_bits_extra.deinit(et.analysis.gpa);
}
};

View File

@ -202,26 +202,12 @@ const BlockData = struct {
const BigTomb = struct {
function: *Self,
inst: Air.Inst.Index,
tomb_bits: Liveness.Bpi,
big_tomb_bits: u32,
bit_index: usize,
lbt: Liveness.BigTomb,
fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void {
const this_bit_index = bt.bit_index;
bt.bit_index += 1;
const op_int = @enumToInt(op_ref);
if (op_int < Air.Inst.Ref.typed_value_map.len) return;
const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
if (this_bit_index < Liveness.bpi - 1) {
const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0;
const dies = bt.lbt.feed();
const op_index = Air.refToIndex(op_ref) orelse return;
if (!dies) return;
} else {
const big_bit_index = @intCast(u5, this_bit_index - (Liveness.bpi - 1));
const dies = @truncate(u1, bt.big_tomb_bits >> big_bit_index) != 0;
if (!dies) return;
}
bt.function.processDeath(op_index);
}
@ -3291,9 +3277,7 @@ fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigT
return BigTomb{
.function = self,
.inst = inst,
.tomb_bits = self.liveness.getTombBits(inst),
.big_tomb_bits = self.liveness.special.get(inst) orelse 0,
.bit_index = 0,
.lbt = self.liveness.iterateBigTomb(inst),
};
}

View File

@ -224,26 +224,12 @@ const BlockData = struct {
const BigTomb = struct {
function: *Self,
inst: Air.Inst.Index,
tomb_bits: Liveness.Bpi,
big_tomb_bits: u32,
bit_index: usize,
lbt: Liveness.BigTomb,
fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void {
const this_bit_index = bt.bit_index;
bt.bit_index += 1;
const op_int = @enumToInt(op_ref);
if (op_int < Air.Inst.Ref.typed_value_map.len) return;
const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
if (this_bit_index < Liveness.bpi - 1) {
const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0;
const dies = bt.lbt.feed();
const op_index = Air.refToIndex(op_ref) orelse return;
if (!dies) return;
} else {
const big_bit_index = @intCast(u5, this_bit_index - (Liveness.bpi - 1));
const dies = @truncate(u1, bt.big_tomb_bits >> big_bit_index) != 0;
if (!dies) return;
}
bt.function.processDeath(op_index);
}
@ -4076,9 +4062,7 @@ fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigT
return BigTomb{
.function = self,
.inst = inst,
.tomb_bits = self.liveness.getTombBits(inst),
.big_tomb_bits = self.liveness.special.get(inst) orelse 0,
.bit_index = 0,
.lbt = self.liveness.iterateBigTomb(inst),
};
}

View File

@ -194,26 +194,12 @@ const Reloc = union(enum) {
const BigTomb = struct {
function: *Self,
inst: Air.Inst.Index,
tomb_bits: Liveness.Bpi,
big_tomb_bits: u32,
bit_index: usize,
lbt: Liveness.BigTomb,
fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void {
const this_bit_index = bt.bit_index;
bt.bit_index += 1;
const op_int = @enumToInt(op_ref);
if (op_int < Air.Inst.Ref.typed_value_map.len) return;
const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
if (this_bit_index < Liveness.bpi - 1) {
const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0;
const dies = bt.lbt.feed();
const op_index = Air.refToIndex(op_ref) orelse return;
if (!dies) return;
} else {
const big_bit_index = @intCast(u5, this_bit_index - (Liveness.bpi - 1));
const dies = @truncate(u1, bt.big_tomb_bits >> big_bit_index) != 0;
if (!dies) return;
}
bt.function.processDeath(op_index);
}
@ -2198,9 +2184,7 @@ fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigT
return BigTomb{
.function = self,
.inst = inst,
.tomb_bits = self.liveness.getTombBits(inst),
.big_tomb_bits = self.liveness.special.get(inst) orelse 0,
.bit_index = 0,
.lbt = self.liveness.iterateBigTomb(inst),
};
}

View File

@ -272,24 +272,12 @@ const BlockData = struct {
const BigTomb = struct {
function: *Self,
inst: Air.Inst.Index,
tomb_bits: Liveness.Bpi,
big_tomb_bits: u32,
bit_index: usize,
lbt: Liveness.BigTomb,
fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void {
const this_bit_index = bt.bit_index;
bt.bit_index += 1;
const dies = bt.lbt.feed();
const op_index = Air.refToIndex(op_ref) orelse return;
if (this_bit_index < Liveness.bpi - 1) {
const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0;
if (!dies) return;
} else {
const big_bit_index = @intCast(u5, this_bit_index - (Liveness.bpi - 1));
const dies = @truncate(u1, bt.big_tomb_bits >> big_bit_index) != 0;
if (!dies) return;
}
bt.function.processDeath(op_index);
}
@ -4845,9 +4833,7 @@ fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigT
return BigTomb{
.function = self,
.inst = inst,
.tomb_bits = self.liveness.getTombBits(inst),
.big_tomb_bits = self.liveness.special.get(inst) orelse 0,
.bit_index = 0,
.lbt = self.liveness.iterateBigTomb(inst),
};
}

View File

@ -1348,7 +1348,7 @@ pub const DeclGen = struct {
return w.writeAll(name);
},
.ErrorSet => {
comptime std.debug.assert(Type.initTag(.anyerror).abiSize(builtin.target) == 2);
comptime assert(Type.initTag(.anyerror).abiSize(builtin.target) == 2);
return w.writeAll("uint16_t");
},
.ErrorUnion => {

View File

@ -724,11 +724,21 @@ const Writer = struct {
op_index: usize,
operand: Air.Inst.Ref,
) @TypeOf(s).Error!void {
const dies = if (op_index < Liveness.bpi - 1)
const small_tomb_bits = Liveness.bpi - 1;
const dies = if (op_index < small_tomb_bits)
w.liveness.operandDies(inst, @intCast(Liveness.OperandInt, op_index))
else blk: {
// TODO
break :blk false;
var extra_index = w.liveness.special.get(inst).?;
var tomb_op_index: usize = small_tomb_bits;
while (true) {
const bits = w.liveness.extra[extra_index];
if (op_index < tomb_op_index + 31) {
break :blk @truncate(u1, bits >> @intCast(u5, op_index - tomb_op_index)) != 0;
}
if ((bits >> 31) != 0) break :blk false;
extra_index += 1;
tomb_op_index += 31;
} else unreachable;
};
return w.writeInstRef(s, operand, dies);
}

View File

@ -118,3 +118,144 @@ test "result location of function call argument through runtime condition and st
.e = if (!runtime) .a else .b,
});
}
test "function call with 40 arguments" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest(thirty_nine: i32) !void {
const result = add(
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
thirty_nine,
40,
);
try expect(result == 820);
try expect(thirty_nine == 39);
}
fn add(
a0: i32,
a1: i32,
a2: i32,
a3: i32,
a4: i32,
a5: i32,
a6: i32,
a7: i32,
a8: i32,
a9: i32,
a10: i32,
a11: i32,
a12: i32,
a13: i32,
a14: i32,
a15: i32,
a16: i32,
a17: i32,
a18: i32,
a19: i32,
a20: i32,
a21: i32,
a22: i32,
a23: i32,
a24: i32,
a25: i32,
a26: i32,
a27: i32,
a28: i32,
a29: i32,
a30: i32,
a31: i32,
a32: i32,
a33: i32,
a34: i32,
a35: i32,
a36: i32,
a37: i32,
a38: i32,
a39: i32,
a40: i32,
) i32 {
return a0 +
a1 +
a2 +
a3 +
a4 +
a5 +
a6 +
a7 +
a8 +
a9 +
a10 +
a11 +
a12 +
a13 +
a14 +
a15 +
a16 +
a17 +
a18 +
a19 +
a20 +
a21 +
a22 +
a23 +
a24 +
a25 +
a26 +
a27 +
a28 +
a29 +
a30 +
a31 +
a32 +
a33 +
a34 +
a35 +
a36 +
a37 +
a38 +
a39 +
a40;
}
};
try S.doTheTest(39);
}