mirror of
https://github.com/ziglang/zig.git
synced 2026-02-14 13:30:45 +00:00
stage2: update Liveness, SPIR-V for new AIR memory layout
also do the inline assembly instruction
This commit is contained in:
parent
9918a5fbe3
commit
ef7080aed1
44
BRANCH_TODO
44
BRANCH_TODO
@ -1,24 +1,6 @@
|
||||
* be sure to test debug info of parameters
|
||||
|
||||
|
||||
/// Each bit represents the index of an `Inst` parameter in the `args` field.
|
||||
/// If a bit is set, it marks the end of the lifetime of the corresponding
|
||||
/// instruction parameter. For example, 0b101 means that the first and
|
||||
/// third `Inst` parameters' lifetimes end after this instruction, and will
|
||||
/// not have any more following references.
|
||||
/// The most significant bit being set means that the instruction itself is
|
||||
/// never referenced, in other words its lifetime ends as soon as it finishes.
|
||||
/// If bit 15 (0b1xxx_xxxx_xxxx_xxxx) is set, it means this instruction itself is unreferenced.
|
||||
/// If bit 14 (0bx1xx_xxxx_xxxx_xxxx) is set, it means this is a special case and the
|
||||
/// lifetimes of operands are encoded elsewhere.
|
||||
deaths: DeathsInt = undefined,
|
||||
|
||||
|
||||
pub const DeathsInt = u16;
|
||||
pub const DeathsBitIndex = std.math.Log2Int(DeathsInt);
|
||||
pub const unreferenced_bit_index = @typeInfo(DeathsInt).Int.bits - 1;
|
||||
pub const deaths_bits = unreferenced_bit_index - 1;
|
||||
|
||||
pub fn isUnused(self: Inst) bool {
|
||||
return (self.deaths & (1 << unreferenced_bit_index)) != 0;
|
||||
}
|
||||
@ -115,32 +97,6 @@
|
||||
|
||||
|
||||
|
||||
pub const Assembly = struct {
|
||||
pub const base_tag = Tag.assembly;
|
||||
|
||||
base: Inst,
|
||||
asm_source: []const u8,
|
||||
is_volatile: bool,
|
||||
output_constraint: ?[]const u8,
|
||||
inputs: []const []const u8,
|
||||
clobbers: []const []const u8,
|
||||
args: []const *Inst,
|
||||
|
||||
pub fn operandCount(self: *const Assembly) usize {
|
||||
return self.args.len;
|
||||
}
|
||||
pub fn getOperand(self: *const Assembly, index: usize) ?*Inst {
|
||||
if (index < self.args.len)
|
||||
return self.args[index];
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const StructFieldPtr = struct {
|
||||
struct_ptr: *Inst,
|
||||
field_index: usize,
|
||||
};
|
||||
|
||||
|
||||
/// For debugging purposes, prints a function representation to stderr.
|
||||
pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void {
|
||||
|
||||
60
src/Air.zig
60
src/Air.zig
@ -1,5 +1,7 @@
|
||||
//! Analyzed Intermediate Representation.
|
||||
//! Sema inputs ZIR and outputs AIR.
|
||||
//! This data is produced by Sema and consumed by codegen.
|
||||
//! Unlike ZIR where there is one instance for an entire source file, each function
|
||||
//! gets its own `Air` instance.
|
||||
|
||||
const std = @import("std");
|
||||
const Value = @import("value.zig").Value;
|
||||
@ -27,38 +29,48 @@ pub const Inst = struct {
|
||||
data: Data,
|
||||
|
||||
pub const Tag = enum(u8) {
|
||||
/// The first N instructions in Air must be one arg instruction per function parameter.
|
||||
/// Uses the `ty` field.
|
||||
arg,
|
||||
/// Float or integer addition. For integers, wrapping is undefined behavior.
|
||||
/// Result type is the same as both operands.
|
||||
/// Both operands are guaranteed to be the same type, and the result type
|
||||
/// is the same as both operands.
|
||||
/// Uses the `bin_op` field.
|
||||
add,
|
||||
/// Integer addition. Wrapping is defined to be twos complement wrapping.
|
||||
/// Result type is the same as both operands.
|
||||
/// Both operands are guaranteed to be the same type, and the result type
|
||||
/// is the same as both operands.
|
||||
/// Uses the `bin_op` field.
|
||||
addwrap,
|
||||
/// Float or integer subtraction. For integers, wrapping is undefined behavior.
|
||||
/// Result type is the same as both operands.
|
||||
/// Both operands are guaranteed to be the same type, and the result type
|
||||
/// is the same as both operands.
|
||||
/// Uses the `bin_op` field.
|
||||
sub,
|
||||
/// Integer subtraction. Wrapping is defined to be twos complement wrapping.
|
||||
/// Result type is the same as both operands.
|
||||
/// Both operands are guaranteed to be the same type, and the result type
|
||||
/// is the same as both operands.
|
||||
/// Uses the `bin_op` field.
|
||||
subwrap,
|
||||
/// Float or integer multiplication. For integers, wrapping is undefined behavior.
|
||||
/// Result type is the same as both operands.
|
||||
/// Both operands are guaranteed to be the same type, and the result type
|
||||
/// is the same as both operands.
|
||||
/// Uses the `bin_op` field.
|
||||
mul,
|
||||
/// Integer multiplication. Wrapping is defined to be twos complement wrapping.
|
||||
/// Result type is the same as both operands.
|
||||
/// Both operands are guaranteed to be the same type, and the result type
|
||||
/// is the same as both operands.
|
||||
/// Uses the `bin_op` field.
|
||||
mulwrap,
|
||||
/// Integer or float division. For integers, wrapping is undefined behavior.
|
||||
/// Result type is the same as both operands.
|
||||
/// Both operands are guaranteed to be the same type, and the result type
|
||||
/// is the same as both operands.
|
||||
/// Uses the `bin_op` field.
|
||||
div,
|
||||
/// Allocates stack local memory.
|
||||
/// Uses the `ty` field.
|
||||
alloc,
|
||||
/// TODO
|
||||
/// Inline assembly. Uses the `ty_pl` field. Payload is `Asm`.
|
||||
assembly,
|
||||
/// Bitwise AND. `&`.
|
||||
/// Result type is the same as both operands.
|
||||
@ -80,7 +92,7 @@ pub const Inst = struct {
|
||||
/// Uses the `ty_pl` field with payload `Block`.
|
||||
block,
|
||||
/// Return from a block with a result.
|
||||
/// Result type is always noreturn.
|
||||
/// Result type is always noreturn; no instructions in a block follow this one.
|
||||
/// Uses the `br` field.
|
||||
br,
|
||||
/// Lowers to a hardware trap instruction, or the next best thing.
|
||||
@ -109,11 +121,11 @@ pub const Inst = struct {
|
||||
/// Uses the `bin_op` field.
|
||||
cmp_neq,
|
||||
/// Conditional branch.
|
||||
/// Result type is always noreturn.
|
||||
/// Result type is always noreturn; no instructions in a block follow this one.
|
||||
/// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`.
|
||||
cond_br,
|
||||
/// Switch branch.
|
||||
/// Result type is always noreturn.
|
||||
/// Result type is always noreturn; no instructions in a block follow this one.
|
||||
/// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`.
|
||||
switch_br,
|
||||
/// A comptime-known value. Uses the `ty_pl` field, payload is index of
|
||||
@ -166,7 +178,7 @@ pub const Inst = struct {
|
||||
load,
|
||||
/// A labeled block of code that loops forever. At the end of the body it is implied
|
||||
/// to repeat; no explicit "repeat" instruction terminates loop bodies.
|
||||
/// Result type is always noreturn.
|
||||
/// Result type is always noreturn; no instructions in a block follow this one.
|
||||
/// Uses the `ty_pl` field. Payload is `Block`.
|
||||
loop,
|
||||
/// Converts a pointer to its address. Result type is always `usize`.
|
||||
@ -178,7 +190,7 @@ pub const Inst = struct {
|
||||
/// Uses the `ty_op` field.
|
||||
ref,
|
||||
/// Return a value from a function.
|
||||
/// Result type is always noreturn.
|
||||
/// Result type is always noreturn; no instructions in a block follow this one.
|
||||
/// Uses the `un_op` field.
|
||||
ret,
|
||||
/// Returns a pointer to a global variable.
|
||||
@ -189,7 +201,7 @@ pub const Inst = struct {
|
||||
/// Uses the `bin_op` field.
|
||||
store,
|
||||
/// Indicates the program counter will never get to this instruction.
|
||||
/// Result type is always noreturn.
|
||||
/// Result type is always noreturn; no instructions in a block follow this one.
|
||||
unreach,
|
||||
/// Convert from one float type to another.
|
||||
/// Uses the `ty_op` field.
|
||||
@ -343,6 +355,16 @@ pub const StructField = struct {
|
||||
field_index: u32,
|
||||
};
|
||||
|
||||
/// Trailing:
|
||||
/// 0. `Ref` for every outputs_len
|
||||
/// 1. `Ref` for every inputs_len
|
||||
pub const Asm = struct {
|
||||
/// Index to the corresponding ZIR instruction.
|
||||
/// `asm_source`, `outputs_len`, `inputs_len`, `clobbers_len`, `is_volatile`, and
|
||||
/// clobbers are found via here.
|
||||
zir_index: u32,
|
||||
};
|
||||
|
||||
pub fn getMainBody(air: Air) []const Air.Inst.Index {
|
||||
const body_index = air.extra[@enumToInt(ExtraIndex.main_block)];
|
||||
const body_len = air.extra[body_index];
|
||||
@ -369,3 +391,11 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end
|
||||
.end = i,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(air: *Air, gpa: *std.mem.Allocator) void {
|
||||
air.instructions.deinit(gpa);
|
||||
gpa.free(air.extra);
|
||||
gpa.free(air.values);
|
||||
gpa.free(air.variables);
|
||||
air.* = undefined;
|
||||
}
|
||||
|
||||
@ -13,7 +13,7 @@ const target_util = @import("target.zig");
|
||||
const Package = @import("Package.zig");
|
||||
const link = @import("link.zig");
|
||||
const trace = @import("tracy.zig").trace;
|
||||
const liveness = @import("liveness.zig");
|
||||
const Liveness = @import("Liveness.zig");
|
||||
const build_options = @import("build_options");
|
||||
const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
|
||||
const glibc = @import("glibc.zig");
|
||||
@ -1922,6 +1922,7 @@ pub fn getCompileLogOutput(self: *Compilation) []const u8 {
|
||||
}
|
||||
|
||||
pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemory }!void {
|
||||
const gpa = self.gpa;
|
||||
// If the terminal is dumb, we dont want to show the user all the
|
||||
// output.
|
||||
var progress: std.Progress = .{ .dont_print_on_dumb = true };
|
||||
@ -2005,7 +2006,8 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
|
||||
assert(decl.has_tv);
|
||||
if (decl.val.castTag(.function)) |payload| {
|
||||
const func = payload.data;
|
||||
switch (func.state) {
|
||||
|
||||
var air = switch (func.state) {
|
||||
.queued => module.analyzeFnBody(decl, func) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
assert(func.state != .in_progress);
|
||||
@ -2016,18 +2018,39 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
|
||||
.in_progress => unreachable,
|
||||
.inline_only => unreachable, // don't queue work for this
|
||||
.sema_failure, .dependency_failure => continue,
|
||||
.success => {},
|
||||
}
|
||||
// Here we tack on additional allocations to the Decl's arena. The allocations
|
||||
// are lifetime annotations in the ZIR.
|
||||
var decl_arena = decl.value_arena.?.promote(module.gpa);
|
||||
defer decl.value_arena.?.* = decl_arena.state;
|
||||
.success => unreachable, // don't queue it twice
|
||||
};
|
||||
defer air.deinit(gpa);
|
||||
|
||||
log.debug("analyze liveness of {s}", .{decl.name});
|
||||
try liveness.analyze(module.gpa, &decl_arena.allocator, func.body);
|
||||
var liveness = try Liveness.analyze(gpa, air);
|
||||
defer liveness.deinit(gpa);
|
||||
|
||||
if (std.builtin.mode == .Debug and self.verbose_air) {
|
||||
func.dump(module.*);
|
||||
}
|
||||
|
||||
assert(decl.ty.hasCodeGenBits());
|
||||
|
||||
self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => {
|
||||
decl.analysis = .codegen_failure;
|
||||
continue;
|
||||
},
|
||||
else => {
|
||||
try module.failed_decls.ensureUnusedCapacity(gpa, 1);
|
||||
module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create(
|
||||
gpa,
|
||||
decl.srcLoc(),
|
||||
"unable to codegen: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
decl.analysis = .codegen_failure_retryable;
|
||||
continue;
|
||||
},
|
||||
};
|
||||
continue;
|
||||
}
|
||||
|
||||
assert(decl.ty.hasCodeGenBits());
|
||||
@ -2039,9 +2062,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
|
||||
continue;
|
||||
},
|
||||
else => {
|
||||
try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1);
|
||||
try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1);
|
||||
module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create(
|
||||
module.gpa,
|
||||
gpa,
|
||||
decl.srcLoc(),
|
||||
"unable to codegen: {s}",
|
||||
.{@errorName(err)},
|
||||
@ -2070,7 +2093,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
|
||||
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
|
||||
const module = self.bin_file.options.module.?;
|
||||
const emit_h = module.emit_h.?;
|
||||
_ = try emit_h.decl_table.getOrPut(module.gpa, decl);
|
||||
_ = try emit_h.decl_table.getOrPut(gpa, decl);
|
||||
const decl_emit_h = decl.getEmitH(module);
|
||||
const fwd_decl = &decl_emit_h.fwd_decl;
|
||||
fwd_decl.shrinkRetainingCapacity(0);
|
||||
@ -2079,7 +2102,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
|
||||
.module = module,
|
||||
.error_msg = null,
|
||||
.decl = decl,
|
||||
.fwd_decl = fwd_decl.toManaged(module.gpa),
|
||||
.fwd_decl = fwd_decl.toManaged(gpa),
|
||||
// we don't want to emit optionals and error unions to headers since they have no ABI
|
||||
.typedefs = undefined,
|
||||
};
|
||||
@ -2087,14 +2110,14 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
|
||||
|
||||
c_codegen.genHeader(&dg) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
try emit_h.failed_decls.put(module.gpa, decl, dg.error_msg.?);
|
||||
try emit_h.failed_decls.put(gpa, decl, dg.error_msg.?);
|
||||
continue;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
fwd_decl.* = dg.fwd_decl.moveToUnmanaged();
|
||||
fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len);
|
||||
fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len);
|
||||
},
|
||||
},
|
||||
.analyze_decl => |decl| {
|
||||
@ -2111,9 +2134,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
|
||||
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
|
||||
const module = self.bin_file.options.module.?;
|
||||
self.bin_file.updateDeclLineNumber(module, decl) catch |err| {
|
||||
try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1);
|
||||
try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1);
|
||||
module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create(
|
||||
module.gpa,
|
||||
gpa,
|
||||
decl.srcLoc(),
|
||||
"unable to update line number: {s}",
|
||||
.{@errorName(err)},
|
||||
|
||||
@ -150,6 +150,7 @@ fn analyzeInst(
|
||||
const gpa = a.gpa;
|
||||
const table = &a.table;
|
||||
const inst_tags = a.air.instructions.items(.tag);
|
||||
const inst_datas = a.air.instructions.items(.data);
|
||||
|
||||
// No tombstone for this instruction means it is never referenced,
|
||||
// and its birth marks its own death. Very metal 🤘
|
||||
|
||||
@ -739,8 +739,6 @@ pub const Union = struct {
|
||||
pub const Fn = struct {
|
||||
/// The Decl that corresponds to the function itself.
|
||||
owner_decl: *Decl,
|
||||
/// undefined unless analysis state is `success`.
|
||||
body: ir.Body,
|
||||
/// The ZIR instruction that is a function instruction. Use this to find
|
||||
/// the body. We store this rather than the body directly so that when ZIR
|
||||
/// is regenerated on update(), we can map this to the new corresponding
|
||||
@ -3585,17 +3583,19 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void {
|
||||
mod.gpa.free(kv.value);
|
||||
}
|
||||
|
||||
pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void {
|
||||
pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const gpa = mod.gpa;
|
||||
|
||||
// Use the Decl's arena for function memory.
|
||||
var arena = decl.value_arena.?.promote(mod.gpa);
|
||||
var arena = decl.value_arena.?.promote(gpa);
|
||||
defer decl.value_arena.?.* = arena.state;
|
||||
|
||||
const fn_ty = decl.ty;
|
||||
const param_inst_list = try mod.gpa.alloc(*ir.Inst, fn_ty.fnParamLen());
|
||||
defer mod.gpa.free(param_inst_list);
|
||||
const param_inst_list = try gpa.alloc(*ir.Inst, fn_ty.fnParamLen());
|
||||
defer gpa.free(param_inst_list);
|
||||
|
||||
for (param_inst_list) |*param_inst, param_index| {
|
||||
const param_type = fn_ty.fnParamType(param_index);
|
||||
@ -3615,7 +3615,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void {
|
||||
|
||||
var sema: Sema = .{
|
||||
.mod = mod,
|
||||
.gpa = mod.gpa,
|
||||
.gpa = gpa,
|
||||
.arena = &arena.allocator,
|
||||
.code = zir,
|
||||
.owner_decl = decl,
|
||||
@ -3626,6 +3626,11 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void {
|
||||
};
|
||||
defer sema.deinit();
|
||||
|
||||
// First few indexes of extra are reserved and set at the end.
|
||||
const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len;
|
||||
try sema.air_extra.ensureTotalCapacity(gpa, reserved_count);
|
||||
sema.air_extra.items.len += reserved_count;
|
||||
|
||||
var inner_block: Scope.Block = .{
|
||||
.parent = null,
|
||||
.sema = &sema,
|
||||
@ -3634,20 +3639,29 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void {
|
||||
.inlining = null,
|
||||
.is_comptime = false,
|
||||
};
|
||||
defer inner_block.instructions.deinit(mod.gpa);
|
||||
defer inner_block.instructions.deinit(gpa);
|
||||
|
||||
// AIR currently requires the arg parameters to be the first N instructions
|
||||
try inner_block.instructions.appendSlice(mod.gpa, param_inst_list);
|
||||
try inner_block.instructions.appendSlice(gpa, param_inst_list);
|
||||
|
||||
func.state = .in_progress;
|
||||
log.debug("set {s} to in_progress", .{decl.name});
|
||||
|
||||
try sema.analyzeFnBody(&inner_block, func.zir_body_inst);
|
||||
|
||||
const instructions = try arena.allocator.dupe(*ir.Inst, inner_block.instructions.items);
|
||||
// Copy the block into place and mark that as the main block.
|
||||
sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = sema.air_extra.items.len;
|
||||
try sema.air_extra.appendSlice(inner_block.instructions.items);
|
||||
|
||||
func.state = .success;
|
||||
func.body = .{ .instructions = instructions };
|
||||
log.debug("set {s} to success", .{decl.name});
|
||||
|
||||
return Air{
|
||||
.instructions = sema.air_instructions.toOwnedSlice(),
|
||||
.extra = sema.air_extra.toOwnedSlice(),
|
||||
.values = sema.air_values.toOwnedSlice(),
|
||||
.variables = sema.air_variables.toOwnedSlice(),
|
||||
};
|
||||
}
|
||||
|
||||
fn markOutdatedDecl(mod: *Module, decl: *Decl) !void {
|
||||
|
||||
563
src/Sema.zig
563
src/Sema.zig
File diff suppressed because it is too large
Load Diff
@ -18,14 +18,14 @@ pub const Word = u32;
|
||||
pub const ResultId = u32;
|
||||
|
||||
pub const TypeMap = std.HashMap(Type, u32, Type.HashContext64, std.hash_map.default_max_load_percentage);
|
||||
pub const InstMap = std.AutoHashMap(*Inst, ResultId);
|
||||
pub const InstMap = std.AutoHashMap(Air.Inst.Index, ResultId);
|
||||
|
||||
const IncomingBlock = struct {
|
||||
src_label_id: ResultId,
|
||||
break_value_id: ResultId,
|
||||
};
|
||||
|
||||
pub const BlockMap = std.AutoHashMap(*Inst.Block, struct {
|
||||
pub const BlockMap = std.AutoHashMap(Air.Inst.Index, struct {
|
||||
label_id: ResultId,
|
||||
incoming_blocks: *std.ArrayListUnmanaged(IncomingBlock),
|
||||
});
|
||||
@ -279,16 +279,17 @@ pub const DeclGen = struct {
|
||||
return self.spv.module.getTarget();
|
||||
}
|
||||
|
||||
fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) Error {
|
||||
fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
|
||||
@setCold(true);
|
||||
const src: LazySrcLoc = .{ .node_offset = 0 };
|
||||
const src_loc = src.toSrcLocWithDecl(self.decl);
|
||||
self.error_msg = try Module.ErrorMsg.create(self.spv.module.gpa, src_loc, format, args);
|
||||
return error.AnalysisFail;
|
||||
}
|
||||
|
||||
fn resolve(self: *DeclGen, inst: *Inst) !ResultId {
|
||||
fn resolve(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
|
||||
if (inst.value()) |val| {
|
||||
return self.genConstant(inst.src, inst.ty, val);
|
||||
return self.genConstant(inst.ty, val);
|
||||
}
|
||||
|
||||
return self.inst_results.get(inst).?; // Instruction does not dominate all uses!
|
||||
@ -313,7 +314,7 @@ pub const DeclGen = struct {
|
||||
const target = self.getTarget();
|
||||
|
||||
// The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function.
|
||||
std.debug.assert(bits != 0);
|
||||
assert(bits != 0);
|
||||
|
||||
// 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively.
|
||||
// 32-bit integers are always supported (see spec, 2.16.1, Data rules).
|
||||
@ -387,19 +388,19 @@ pub const DeclGen = struct {
|
||||
.composite_integer };
|
||||
},
|
||||
// As of yet, there is no vector support in the self-hosted compiler.
|
||||
.Vector => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}),
|
||||
.Vector => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}),
|
||||
// TODO: For which types is this the case?
|
||||
else => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}),
|
||||
else => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}),
|
||||
};
|
||||
}
|
||||
|
||||
/// Generate a constant representing `val`.
|
||||
/// TODO: Deduplication?
|
||||
fn genConstant(self: *DeclGen, src: LazySrcLoc, ty: Type, val: Value) Error!ResultId {
|
||||
fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!ResultId {
|
||||
const target = self.getTarget();
|
||||
const code = &self.spv.binary.types_globals_constants;
|
||||
const result_id = self.spv.allocResultId();
|
||||
const result_type_id = try self.genType(src, ty);
|
||||
const result_type_id = try self.genType(ty);
|
||||
|
||||
if (val.isUndef()) {
|
||||
try writeInstruction(code, .OpUndef, &[_]Word{ result_type_id, result_id });
|
||||
@ -411,13 +412,13 @@ pub const DeclGen = struct {
|
||||
const int_info = ty.intInfo(target);
|
||||
const backing_bits = self.backingIntBits(int_info.bits) orelse {
|
||||
// Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
|
||||
return self.fail(src, "TODO: SPIR-V backend: implement composite int constants for {}", .{ty});
|
||||
return self.fail("TODO: SPIR-V backend: implement composite int constants for {}", .{ty});
|
||||
};
|
||||
|
||||
// We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any
|
||||
// SPIR-V native type (up to i/u64 with Int64). If SPIR-V ever supports native ints of a larger size, this
|
||||
// might need to be updated.
|
||||
std.debug.assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64));
|
||||
assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64));
|
||||
var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt();
|
||||
|
||||
// Mask the low bits which make up the actual integer. This is to make sure that negative values
|
||||
@ -469,13 +470,13 @@ pub const DeclGen = struct {
|
||||
}
|
||||
},
|
||||
.Void => unreachable,
|
||||
else => return self.fail(src, "TODO: SPIR-V backend: constant generation of type {}", .{ty}),
|
||||
else => return self.fail("TODO: SPIR-V backend: constant generation of type {}", .{ty}),
|
||||
}
|
||||
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genType(self: *DeclGen, src: LazySrcLoc, ty: Type) Error!ResultId {
|
||||
fn genType(self: *DeclGen, ty: Type) Error!ResultId {
|
||||
// We can't use getOrPut here so we can recursively generate types.
|
||||
if (self.spv.types.get(ty)) |already_generated| {
|
||||
return already_generated;
|
||||
@ -492,7 +493,7 @@ pub const DeclGen = struct {
|
||||
const int_info = ty.intInfo(target);
|
||||
const backing_bits = self.backingIntBits(int_info.bits) orelse {
|
||||
// Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
|
||||
return self.fail(src, "TODO: SPIR-V backend: implement composite int {}", .{ty});
|
||||
return self.fail("TODO: SPIR-V backend: implement composite int {}", .{ty});
|
||||
};
|
||||
|
||||
// TODO: If backing_bits != int_info.bits, a duplicate type might be generated here.
|
||||
@ -518,7 +519,7 @@ pub const DeclGen = struct {
|
||||
};
|
||||
|
||||
if (!supported) {
|
||||
return self.fail(src, "Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
|
||||
return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
|
||||
}
|
||||
|
||||
try writeInstruction(code, .OpTypeFloat, &[_]Word{ result_id, bits });
|
||||
@ -526,19 +527,19 @@ pub const DeclGen = struct {
|
||||
.Fn => {
|
||||
// We only support zig-calling-convention functions, no varargs.
|
||||
if (ty.fnCallingConvention() != .Unspecified)
|
||||
return self.fail(src, "Unsupported calling convention for SPIR-V", .{});
|
||||
return self.fail("Unsupported calling convention for SPIR-V", .{});
|
||||
if (ty.fnIsVarArgs())
|
||||
return self.fail(src, "VarArgs unsupported for SPIR-V", .{});
|
||||
return self.fail("VarArgs unsupported for SPIR-V", .{});
|
||||
|
||||
// In order to avoid a temporary here, first generate all the required types and then simply look them up
|
||||
// when generating the function type.
|
||||
const params = ty.fnParamLen();
|
||||
var i: usize = 0;
|
||||
while (i < params) : (i += 1) {
|
||||
_ = try self.genType(src, ty.fnParamType(i));
|
||||
_ = try self.genType(ty.fnParamType(i));
|
||||
}
|
||||
|
||||
const return_type_id = try self.genType(src, ty.fnReturnType());
|
||||
const return_type_id = try self.genType(ty.fnReturnType());
|
||||
|
||||
// result id + result type id + parameter type ids.
|
||||
try writeOpcode(code, .OpTypeFunction, 2 + @intCast(u16, ty.fnParamLen()));
|
||||
@ -551,7 +552,7 @@ pub const DeclGen = struct {
|
||||
}
|
||||
},
|
||||
// When recursively generating a type, we cannot infer the pointer's storage class. See genPointerType.
|
||||
.Pointer => return self.fail(src, "Cannot create pointer with unkown storage class", .{}),
|
||||
.Pointer => return self.fail("Cannot create pointer with unkown storage class", .{}),
|
||||
.Vector => {
|
||||
// Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations
|
||||
// which work on them), so simply use those.
|
||||
@ -561,7 +562,7 @@ pub const DeclGen = struct {
|
||||
// is adequate at all for this.
|
||||
|
||||
// TODO: Vectors are not yet supported by the self-hosted compiler itself it seems.
|
||||
return self.fail(src, "TODO: SPIR-V backend: implement type Vector", .{});
|
||||
return self.fail("TODO: SPIR-V backend: implement type Vector", .{});
|
||||
},
|
||||
.Null,
|
||||
.Undefined,
|
||||
@ -573,7 +574,7 @@ pub const DeclGen = struct {
|
||||
|
||||
.BoundFn => unreachable, // this type will be deleted from the language.
|
||||
|
||||
else => |tag| return self.fail(src, "TODO: SPIR-V backend: implement type {}s", .{tag}),
|
||||
else => |tag| return self.fail("TODO: SPIR-V backend: implement type {}s", .{tag}),
|
||||
}
|
||||
|
||||
try self.spv.types.putNoClobber(ty, result_id);
|
||||
@ -582,8 +583,8 @@ pub const DeclGen = struct {
|
||||
|
||||
/// SPIR-V requires pointers to have a storage class (address space), and so we have a special function for that.
|
||||
/// TODO: The result of this needs to be cached.
|
||||
fn genPointerType(self: *DeclGen, src: LazySrcLoc, ty: Type, storage_class: spec.StorageClass) !ResultId {
|
||||
std.debug.assert(ty.zigTypeTag() == .Pointer);
|
||||
fn genPointerType(self: *DeclGen, ty: Type, storage_class: spec.StorageClass) !ResultId {
|
||||
assert(ty.zigTypeTag() == .Pointer);
|
||||
|
||||
const code = &self.spv.binary.types_globals_constants;
|
||||
const result_id = self.spv.allocResultId();
|
||||
@ -591,7 +592,7 @@ pub const DeclGen = struct {
|
||||
// TODO: There are many constraints which are ignored for now: We may only create pointers to certain types, and to other types
|
||||
// if more capabilities are enabled. For example, we may only create pointers to f16 if Float16Buffer is enabled.
|
||||
// These also relates to the pointer's address space.
|
||||
const child_id = try self.genType(src, ty.elemType());
|
||||
const child_id = try self.genType(ty.elemType());
|
||||
|
||||
try writeInstruction(code, .OpTypePointer, &[_]Word{ result_id, @enumToInt(storage_class), child_id });
|
||||
|
||||
@ -602,9 +603,9 @@ pub const DeclGen = struct {
|
||||
const decl = self.decl;
|
||||
const result_id = decl.fn_link.spirv.id;
|
||||
|
||||
if (decl.val.castTag(.function)) |func_payload| {
|
||||
std.debug.assert(decl.ty.zigTypeTag() == .Fn);
|
||||
const prototype_id = try self.genType(.{ .node_offset = 0 }, decl.ty);
|
||||
if (decl.val.castTag(.function)) |_| {
|
||||
assert(decl.ty.zigTypeTag() == .Fn);
|
||||
const prototype_id = try self.genType(decl.ty);
|
||||
try writeInstruction(&self.spv.binary.fn_decls, .OpFunction, &[_]Word{
|
||||
self.spv.types.get(decl.ty.fnReturnType()).?, // This type should be generated along with the prototype.
|
||||
result_id,
|
||||
@ -631,189 +632,167 @@ pub const DeclGen = struct {
|
||||
try writeInstruction(&self.spv.binary.fn_decls, .OpLabel, &[_]Word{root_block_id});
|
||||
self.current_block_label_id = root_block_id;
|
||||
|
||||
try self.genBody(func_payload.data.body);
|
||||
const main_body = self.air.getMainBody();
|
||||
try self.genBody(main_body);
|
||||
|
||||
// Append the actual code into the fn_decls section.
|
||||
try self.spv.binary.fn_decls.appendSlice(self.code.items);
|
||||
try writeInstruction(&self.spv.binary.fn_decls, .OpFunctionEnd, &[_]Word{});
|
||||
} else {
|
||||
return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()});
|
||||
return self.fail("TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()});
|
||||
}
|
||||
}
|
||||
|
||||
fn genBody(self: *DeclGen, body: ir.Body) Error!void {
|
||||
for (body.instructions) |inst| {
|
||||
fn genBody(self: *DeclGen, body: []const Air.Inst.Index) Error!void {
|
||||
for (body) |inst| {
|
||||
try self.genInst(inst);
|
||||
}
|
||||
}
|
||||
|
||||
fn genInst(self: *DeclGen, inst: *Inst) !void {
|
||||
const result_id = switch (inst.tag) {
|
||||
.add, .addwrap => try self.genBinOp(inst.castTag(.add).?),
|
||||
.sub, .subwrap => try self.genBinOp(inst.castTag(.sub).?),
|
||||
.mul, .mulwrap => try self.genBinOp(inst.castTag(.mul).?),
|
||||
.div => try self.genBinOp(inst.castTag(.div).?),
|
||||
.bit_and => try self.genBinOp(inst.castTag(.bit_and).?),
|
||||
.bit_or => try self.genBinOp(inst.castTag(.bit_or).?),
|
||||
.xor => try self.genBinOp(inst.castTag(.xor).?),
|
||||
.cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?),
|
||||
.cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?),
|
||||
.cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?),
|
||||
.cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?),
|
||||
.cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?),
|
||||
.cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?),
|
||||
.bool_and => try self.genBinOp(inst.castTag(.bool_and).?),
|
||||
.bool_or => try self.genBinOp(inst.castTag(.bool_or).?),
|
||||
.not => try self.genUnOp(inst.castTag(.not).?),
|
||||
.alloc => try self.genAlloc(inst.castTag(.alloc).?),
|
||||
.arg => self.genArg(),
|
||||
.block => (try self.genBlock(inst.castTag(.block).?)) orelse return,
|
||||
.br => return try self.genBr(inst.castTag(.br).?),
|
||||
.br_void => return try self.genBrVoid(inst.castTag(.br_void).?),
|
||||
// TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them
|
||||
// throughout the IR.
|
||||
fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const air_tags = self.air.instructions.items(.tag);
|
||||
const result_id = switch (air_tags[inst]) {
|
||||
// zig fmt: off
|
||||
.add, .addwrap => try self.genArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}),
|
||||
.sub, .subwrap => try self.genArithOp(inst, .{.OpFSub, .OpISub, .OpISub}),
|
||||
.mul, .mulwrap => try self.genArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}),
|
||||
.div => try self.genArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}),
|
||||
|
||||
.bit_and => try self.genBinOpSimple(inst, .OpBitwiseAnd),
|
||||
.bit_or => try self.genBinOpSimple(inst, .OpBitwiseOr),
|
||||
.xor => try self.genBinOpSimple(inst, .OpBitwiseXor),
|
||||
.bool_and => try self.genBinOpSimple(inst, .OpLogicalAnd),
|
||||
.bool_or => try self.genBinOpSimple(inst, .OpLogicalOr),
|
||||
|
||||
.not => try self.genNot(inst),
|
||||
|
||||
.cmp_eq => try self.genCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}),
|
||||
.cmp_neq => try self.genCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}),
|
||||
.cmp_gt => try self.genCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}),
|
||||
.cmp_gte => try self.genCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}),
|
||||
.cmp_lt => try self.genCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}),
|
||||
.cmp_lte => try self.genCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}),
|
||||
|
||||
.arg => self.genArg(),
|
||||
.alloc => try self.genAlloc(inst),
|
||||
.block => (try self.genBlock(inst)) orelse return,
|
||||
.load => try self.genLoad(inst),
|
||||
|
||||
.br => return self.genBr(inst),
|
||||
.breakpoint => return,
|
||||
.condbr => return try self.genCondBr(inst.castTag(.condbr).?),
|
||||
.constant => unreachable,
|
||||
.dbg_stmt => return try self.genDbgStmt(inst.castTag(.dbg_stmt).?),
|
||||
.load => try self.genLoad(inst.castTag(.load).?),
|
||||
.loop => return try self.genLoop(inst.castTag(.loop).?),
|
||||
.ret => return try self.genRet(inst.castTag(.ret).?),
|
||||
.retvoid => return try self.genRetVoid(),
|
||||
.store => return try self.genStore(inst.castTag(.store).?),
|
||||
.unreach => return try self.genUnreach(),
|
||||
else => return self.fail(inst.src, "TODO: SPIR-V backend: implement inst {s}", .{@tagName(inst.tag)}),
|
||||
.condbr => return self.genCondBr(inst),
|
||||
.constant => unreachable,
|
||||
.dbg_stmt => return self.genDbgStmt(inst),
|
||||
.loop => return self.genLoop(inst),
|
||||
.ret => return self.genRet(inst),
|
||||
.store => return self.genStore(inst),
|
||||
.unreach => return self.genUnreach(),
|
||||
// zig fmt: on
|
||||
};
|
||||
|
||||
try self.inst_results.putNoClobber(inst, result_id);
|
||||
}
|
||||
|
||||
fn genBinOp(self: *DeclGen, inst: *Inst.BinOp) !ResultId {
|
||||
// TODO: Will lhs and rhs have the same type?
|
||||
const lhs_id = try self.resolve(inst.lhs);
|
||||
const rhs_id = try self.resolve(inst.rhs);
|
||||
fn genBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs_id = try self.resolve(bin_op.lhs);
|
||||
const rhs_id = try self.resolve(bin_op.rhs);
|
||||
const result_id = self.spv.allocResultId();
|
||||
try writeInstruction(&self.code, opcode, &[_]Word{
|
||||
result_type_id, result_id, lhs_id, rhs_id,
|
||||
});
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
|
||||
// LHS and RHS are guaranteed to have the same type, and AIR guarantees
|
||||
// the result to be the same as the LHS and RHS, which matches SPIR-V.
|
||||
const ty = self.air.getType(inst);
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs_id = try self.resolve(bin_op.lhs);
|
||||
const rhs_id = try self.resolve(bin_op.rhs);
|
||||
|
||||
const result_id = self.spv.allocResultId();
|
||||
const result_type_id = try self.genType(inst.base.src, inst.base.ty);
|
||||
const result_type_id = try self.genType(ty);
|
||||
|
||||
// TODO: Is the result the same as the argument types?
|
||||
// This is supposed to be the case for SPIR-V.
|
||||
std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty));
|
||||
std.debug.assert(inst.base.ty.tag() == .bool or inst.base.ty.eql(inst.lhs.ty));
|
||||
assert(self.air.getType(bin_op.lhs).eql(ty));
|
||||
assert(self.air.getType(bin_op.rhs).eql(ty));
|
||||
|
||||
// Binary operations are generally applicable to both scalar and vector operations in SPIR-V, but int and float
|
||||
// versions of operations require different opcodes.
|
||||
// For operations which produce bools, the information of inst.base.ty is not useful, so just pick either operand
|
||||
// instead.
|
||||
const info = try self.arithmeticTypeInfo(inst.lhs.ty);
|
||||
// Binary operations are generally applicable to both scalar and vector operations
|
||||
// in SPIR-V, but int and float versions of operations require different opcodes.
|
||||
const info = try self.arithmeticTypeInfo(ty);
|
||||
|
||||
if (info.class == .composite_integer) {
|
||||
return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{});
|
||||
} else if (info.class == .strange_integer) {
|
||||
return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for strange integers", .{});
|
||||
}
|
||||
|
||||
const is_float = info.class == .float;
|
||||
const is_signed = info.signedness == .signed;
|
||||
// **Note**: All these operations must be valid for vectors as well!
|
||||
const opcode = switch (inst.base.tag) {
|
||||
// The regular integer operations are all defined for wrapping. Since theyre only relevant for integers,
|
||||
// we can just switch on both cases here.
|
||||
.add, .addwrap => if (is_float) Opcode.OpFAdd else Opcode.OpIAdd,
|
||||
.sub, .subwrap => if (is_float) Opcode.OpFSub else Opcode.OpISub,
|
||||
.mul, .mulwrap => if (is_float) Opcode.OpFMul else Opcode.OpIMul,
|
||||
// TODO: Trap if divisor is 0?
|
||||
// TODO: Figure out of OpSDiv for unsigned/OpUDiv for signed does anything useful.
|
||||
// => Those are probably for divTrunc and divFloor, though the compiler does not yet generate those.
|
||||
// => TODO: Figure out how those work on the SPIR-V side.
|
||||
// => TODO: Test these.
|
||||
.div => if (is_float) Opcode.OpFDiv else if (is_signed) Opcode.OpSDiv else Opcode.OpUDiv,
|
||||
// Only integer versions for these.
|
||||
.bit_and => Opcode.OpBitwiseAnd,
|
||||
.bit_or => Opcode.OpBitwiseOr,
|
||||
.xor => Opcode.OpBitwiseXor,
|
||||
// Bool -> bool operations.
|
||||
.bool_and => Opcode.OpLogicalAnd,
|
||||
.bool_or => Opcode.OpLogicalOr,
|
||||
const opcode_index: usize = switch (info.class) {
|
||||
.composite_integer => {
|
||||
return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{});
|
||||
},
|
||||
.strange_integer => {
|
||||
return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{});
|
||||
},
|
||||
.integer => switch (info.signedness) {
|
||||
.signed => 1,
|
||||
.unsigned => 2,
|
||||
},
|
||||
.float => 0,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
const opcode = ops[opcode_index];
|
||||
try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id });
|
||||
|
||||
// TODO: Trap on overflow? Probably going to be annoying.
|
||||
// TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap.
|
||||
|
||||
if (info.class != .strange_integer)
|
||||
return result_id;
|
||||
|
||||
return self.fail(inst.base.src, "TODO: SPIR-V backend: strange integer operation mask", .{});
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genCmp(self: *DeclGen, inst: *Inst.BinOp) !ResultId {
|
||||
const lhs_id = try self.resolve(inst.lhs);
|
||||
const rhs_id = try self.resolve(inst.rhs);
|
||||
|
||||
fn genCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs_id = try self.resolve(bin_op.lhs);
|
||||
const rhs_id = try self.resolve(bin_op.rhs);
|
||||
const result_id = self.spv.allocResultId();
|
||||
const result_type_id = try self.genType(inst.base.src, inst.base.ty);
|
||||
const result_type_id = try self.genType(Type.initTag(.bool));
|
||||
const op_ty = self.air.getType(bin_op.lhs);
|
||||
assert(op_ty.eql(self.air.getType(bin_op.rhs)));
|
||||
|
||||
// All of these operations should be 2 equal types -> bool
|
||||
std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty));
|
||||
std.debug.assert(inst.base.ty.tag() == .bool);
|
||||
// Comparisons are generally applicable to both scalar and vector operations in SPIR-V,
|
||||
// but int and float versions of operations require different opcodes.
|
||||
const info = try self.arithmeticTypeInfo(op_ty);
|
||||
|
||||
// Comparisons are generally applicable to both scalar and vector operations in SPIR-V, but int and float
|
||||
// versions of operations require different opcodes.
|
||||
// Since inst.base.ty is always bool and so not very useful, and because both arguments must be the same, just get the info
|
||||
// from either of the operands.
|
||||
const info = try self.arithmeticTypeInfo(inst.lhs.ty);
|
||||
|
||||
if (info.class == .composite_integer) {
|
||||
return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{});
|
||||
} else if (info.class == .strange_integer) {
|
||||
return self.fail(inst.base.src, "TODO: SPIR-V backend: comparison for strange integers", .{});
|
||||
}
|
||||
|
||||
const is_bool = info.class == .bool;
|
||||
const is_float = info.class == .float;
|
||||
const is_signed = info.signedness == .signed;
|
||||
|
||||
// **Note**: All these operations must be valid for vectors as well!
|
||||
// For floating points, we generally want ordered operations (which return false if either operand is nan).
|
||||
const opcode = switch (inst.base.tag) {
|
||||
.cmp_eq => if (is_float) Opcode.OpFOrdEqual else if (is_bool) Opcode.OpLogicalEqual else Opcode.OpIEqual,
|
||||
.cmp_neq => if (is_float) Opcode.OpFOrdNotEqual else if (is_bool) Opcode.OpLogicalNotEqual else Opcode.OpINotEqual,
|
||||
// TODO: Verify that these OpFOrd type operations produce the right value.
|
||||
// TODO: Is there a more fundamental difference between OpU and OpS operations here than just the type?
|
||||
.cmp_gt => if (is_float) Opcode.OpFOrdGreaterThan else if (is_signed) Opcode.OpSGreaterThan else Opcode.OpUGreaterThan,
|
||||
.cmp_gte => if (is_float) Opcode.OpFOrdGreaterThanEqual else if (is_signed) Opcode.OpSGreaterThanEqual else Opcode.OpUGreaterThanEqual,
|
||||
.cmp_lt => if (is_float) Opcode.OpFOrdLessThan else if (is_signed) Opcode.OpSLessThan else Opcode.OpULessThan,
|
||||
.cmp_lte => if (is_float) Opcode.OpFOrdLessThanEqual else if (is_signed) Opcode.OpSLessThanEqual else Opcode.OpULessThanEqual,
|
||||
const opcode_index: usize = switch (info.class) {
|
||||
.composite_integer => {
|
||||
return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{});
|
||||
},
|
||||
.strange_integer => {
|
||||
return self.fail("TODO: SPIR-V backend: comparison for strange integers", .{});
|
||||
},
|
||||
.float => 0,
|
||||
.bool => 1,
|
||||
.integer => switch (info.signedness) {
|
||||
.signed => 1,
|
||||
.unsigned => 2,
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
const opcode = ops[opcode_index];
|
||||
|
||||
try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id });
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genUnOp(self: *DeclGen, inst: *Inst.UnOp) !ResultId {
|
||||
const operand_id = try self.resolve(inst.operand);
|
||||
|
||||
fn genNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const operand_id = try self.resolve(ty_op.operand);
|
||||
const result_id = self.spv.allocResultId();
|
||||
const result_type_id = try self.genType(inst.base.src, inst.base.ty);
|
||||
|
||||
const opcode = switch (inst.base.tag) {
|
||||
// Bool -> bool
|
||||
.not => Opcode.OpLogicalNot,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
const result_type_id = try self.genType(Type.initTag(.bool));
|
||||
const opcode: Opcode = .OpLogicalNot;
|
||||
try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, operand_id });
|
||||
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genAlloc(self: *DeclGen, inst: *Inst.NoOp) !ResultId {
|
||||
fn genAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
|
||||
const ty = self.air.getType(inst);
|
||||
const storage_class = spec.StorageClass.Function;
|
||||
const result_type_id = try self.genPointerType(inst.base.src, inst.base.ty, storage_class);
|
||||
const result_type_id = try self.genPointerType(ty, storage_class);
|
||||
const result_id = self.spv.allocResultId();
|
||||
|
||||
// Rather than generating into code here, we're just going to generate directly into the fn_decls section so that
|
||||
@ -828,7 +807,7 @@ pub const DeclGen = struct {
|
||||
return self.args.items[self.next_arg_index];
|
||||
}
|
||||
|
||||
fn genBlock(self: *DeclGen, inst: *Inst.Block) !?ResultId {
|
||||
fn genBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId {
|
||||
// In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and
|
||||
// "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up
|
||||
// the current block by first generating the code of the block, then a label, and then generate the rest of the current
|
||||
@ -848,11 +827,16 @@ pub const DeclGen = struct {
|
||||
incoming_blocks.deinit(self.spv.gpa);
|
||||
}
|
||||
|
||||
try self.genBody(inst.body);
|
||||
const ty = self.air.getType(inst);
|
||||
const inst_datas = self.air.instructions.items(.data);
|
||||
const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
|
||||
const body = self.air.extra[extra.end..][0..extra.data.body_len];
|
||||
|
||||
try self.genBody(body);
|
||||
try self.beginSPIRVBlock(label_id);
|
||||
|
||||
// If this block didn't produce a value, simply return here.
|
||||
if (!inst.base.ty.hasCodeGenBits())
|
||||
if (!ty.hasCodeGenBits())
|
||||
return null;
|
||||
|
||||
// Combine the result from the blocks using the Phi instruction.
|
||||
@ -862,7 +846,7 @@ pub const DeclGen = struct {
|
||||
// TODO: OpPhi is limited in the types that it may produce, such as pointers. Figure out which other types
|
||||
// are not allowed to be created from a phi node, and throw an error for those. For now, genType already throws
|
||||
// an error for pointers.
|
||||
const result_type_id = try self.genType(inst.base.src, inst.base.ty);
|
||||
const result_type_id = try self.genType(ty);
|
||||
_ = result_type_id;
|
||||
|
||||
try writeOpcode(&self.code, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent...
|
||||
@ -874,30 +858,26 @@ pub const DeclGen = struct {
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genBr(self: *DeclGen, inst: *Inst.Br) !void {
|
||||
// TODO: This instruction needs to be the last in a block. Is that guaranteed?
|
||||
const target = self.blocks.get(inst.block).?;
|
||||
fn genBr(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const br = self.air.instructions.items(.data)[inst].br;
|
||||
const block = self.blocks.get(br.block_inst).?;
|
||||
const operand_ty = self.air.getType(br.operand);
|
||||
|
||||
// TODO: For some reason, br is emitted with void parameters.
|
||||
if (inst.operand.ty.hasCodeGenBits()) {
|
||||
const operand_id = try self.resolve(inst.operand);
|
||||
if (operand_ty.hasCodeGenBits()) {
|
||||
const operand_id = try self.resolve(br.operand);
|
||||
// current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body.
|
||||
try target.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
|
||||
try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
|
||||
}
|
||||
|
||||
try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id});
|
||||
}
|
||||
|
||||
fn genBrVoid(self: *DeclGen, inst: *Inst.BrVoid) !void {
|
||||
// TODO: This instruction needs to be the last in a block. Is that guaranteed?
|
||||
const target = self.blocks.get(inst.block).?;
|
||||
// Don't need to add this to the incoming block list, as there is no value to insert in the phi node anyway.
|
||||
try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id});
|
||||
try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id});
|
||||
}
|
||||
|
||||
fn genCondBr(self: *DeclGen, inst: *Inst.CondBr) !void {
|
||||
// TODO: This instruction needs to be the last in a block. Is that guaranteed?
|
||||
const condition_id = try self.resolve(inst.condition);
|
||||
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
|
||||
const cond_br = self.air.extraData(Air.CondBr, pl_op.payload);
|
||||
const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len];
|
||||
const else_body = self.air.extra[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len];
|
||||
const condition_id = try self.resolve(pl_op.operand);
|
||||
|
||||
// These will always generate a new SPIR-V block, since they are ir.Body and not ir.Block.
|
||||
const then_label_id = self.spv.allocResultId();
|
||||
@ -913,23 +893,26 @@ pub const DeclGen = struct {
|
||||
});
|
||||
|
||||
try self.beginSPIRVBlock(then_label_id);
|
||||
try self.genBody(inst.then_body);
|
||||
try self.genBody(then_body);
|
||||
try self.beginSPIRVBlock(else_label_id);
|
||||
try self.genBody(inst.else_body);
|
||||
try self.genBody(else_body);
|
||||
}
|
||||
|
||||
fn genDbgStmt(self: *DeclGen, inst: *Inst.DbgStmt) !void {
|
||||
fn genDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
|
||||
const src_fname_id = try self.spv.resolveSourceFileName(self.decl);
|
||||
try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, inst.line, inst.column });
|
||||
try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column });
|
||||
}
|
||||
|
||||
fn genLoad(self: *DeclGen, inst: *Inst.UnOp) !ResultId {
|
||||
const operand_id = try self.resolve(inst.operand);
|
||||
fn genLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const operand_id = try self.resolve(ty_op.operand);
|
||||
const ty = self.air.getType(inst);
|
||||
|
||||
const result_type_id = try self.genType(inst.base.src, inst.base.ty);
|
||||
const result_type_id = try self.genType(ty);
|
||||
const result_id = self.spv.allocResultId();
|
||||
|
||||
const operands = if (inst.base.ty.isVolatilePtr())
|
||||
const operands = if (ty.isVolatilePtr())
|
||||
&[_]Word{ result_type_id, result_id, operand_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) }
|
||||
else
|
||||
&[_]Word{ result_type_id, result_id, operand_id };
|
||||
@ -939,8 +922,9 @@ pub const DeclGen = struct {
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genLoop(self: *DeclGen, inst: *Inst.Loop) !void {
|
||||
// TODO: This instruction needs to be the last in a block. Is that guaranteed?
|
||||
fn genLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
|
||||
const body = self.air.extra[loop.end..][0..loop.data.body_len];
|
||||
const loop_label_id = self.spv.allocResultId();
|
||||
|
||||
// Jump to the loop entry point
|
||||
@ -949,27 +933,29 @@ pub const DeclGen = struct {
|
||||
// TODO: Look into OpLoopMerge.
|
||||
|
||||
try self.beginSPIRVBlock(loop_label_id);
|
||||
try self.genBody(inst.body);
|
||||
try self.genBody(body);
|
||||
|
||||
try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id});
|
||||
}
|
||||
|
||||
fn genRet(self: *DeclGen, inst: *Inst.UnOp) !void {
|
||||
const operand_id = try self.resolve(inst.operand);
|
||||
// TODO: This instruction needs to be the last in a block. Is that guaranteed?
|
||||
try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id});
|
||||
fn genRet(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const operand = inst_datas[inst].un_op;
|
||||
const operand_ty = self.air.getType(operand);
|
||||
if (operand_ty.hasCodeGenBits()) {
|
||||
const operand_id = try self.resolve(operand);
|
||||
try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id});
|
||||
} else {
|
||||
try writeInstruction(&self.code, .OpReturn, &[_]Word{});
|
||||
}
|
||||
}
|
||||
|
||||
fn genRetVoid(self: *DeclGen) !void {
|
||||
// TODO: This instruction needs to be the last in a block. Is that guaranteed?
|
||||
try writeInstruction(&self.code, .OpReturn, &[_]Word{});
|
||||
}
|
||||
fn genStore(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const dst_ptr_id = try self.resolve(bin_op.lhs);
|
||||
const src_val_id = try self.resolve(bin_op.rhs);
|
||||
const lhs_ty = self.air.getType(bin_op.lhs);
|
||||
|
||||
fn genStore(self: *DeclGen, inst: *Inst.BinOp) !void {
|
||||
const dst_ptr_id = try self.resolve(inst.lhs);
|
||||
const src_val_id = try self.resolve(inst.rhs);
|
||||
|
||||
const operands = if (inst.lhs.ty.isVolatilePtr())
|
||||
const operands = if (lhs_ty.isVolatilePtr())
|
||||
&[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) }
|
||||
else
|
||||
&[_]Word{ dst_ptr_id, src_val_id };
|
||||
@ -978,7 +964,6 @@ pub const DeclGen = struct {
|
||||
}
|
||||
|
||||
fn genUnreach(self: *DeclGen) !void {
|
||||
// TODO: This instruction needs to be the last in a block. Is that guaranteed?
|
||||
try writeInstruction(&self.code, .OpUnreachable, &[_]Word{});
|
||||
}
|
||||
};
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user