mirror of
https://github.com/ziglang/zig.git
synced 2026-01-10 09:25:11 +00:00
Merge pull request #18097 from Snektron/spirv-structured-codegen
spirv: structured codegen
This commit is contained in:
commit
608b5d06ea
@ -1002,6 +1002,8 @@ pub const InitOptions = struct {
|
||||
/// (Windows) PDB output path
|
||||
pdb_out_path: ?[]const u8 = null,
|
||||
error_limit: ?Module.ErrorInt = null,
|
||||
/// (SPIR-V) whether to generate a structured control flow graph or not
|
||||
want_structured_cfg: ?bool = null,
|
||||
};
|
||||
|
||||
fn addModuleTableToCacheHash(
|
||||
@ -1447,6 +1449,8 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
|
||||
};
|
||||
const formatted_panics = options.formatted_panics orelse (options.optimize_mode == .Debug);
|
||||
|
||||
const error_limit = options.error_limit orelse (std.math.maxInt(u16) - 1);
|
||||
|
||||
// We put everything into the cache hash that *cannot be modified
|
||||
// during an incremental update*. For example, one cannot change the
|
||||
// target between updates, but one can change source files, so the
|
||||
@ -1545,6 +1549,9 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
|
||||
hash.add(options.skip_linker_dependencies);
|
||||
hash.add(options.parent_compilation_link_libc);
|
||||
hash.add(formatted_panics);
|
||||
hash.add(options.emit_h != null);
|
||||
hash.add(error_limit);
|
||||
hash.addOptional(options.want_structured_cfg);
|
||||
|
||||
// In the case of incremental cache mode, this `zig_cache_artifact_directory`
|
||||
// is computed based on a hash of non-linker inputs, and it is where all
|
||||
@ -1699,7 +1706,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
|
||||
.local_zir_cache = local_zir_cache,
|
||||
.emit_h = emit_h,
|
||||
.tmp_hack_arena = std.heap.ArenaAllocator.init(gpa),
|
||||
.error_limit = options.error_limit orelse (std.math.maxInt(u16) - 1),
|
||||
.error_limit = error_limit,
|
||||
};
|
||||
try module.init();
|
||||
|
||||
@ -1958,6 +1965,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
|
||||
.force_undefined_symbols = options.force_undefined_symbols,
|
||||
.pdb_source_path = options.pdb_source_path,
|
||||
.pdb_out_path = options.pdb_out_path,
|
||||
.want_structured_cfg = options.want_structured_cfg,
|
||||
});
|
||||
errdefer bin_file.destroy();
|
||||
comp.* = .{
|
||||
@ -2732,6 +2740,7 @@ fn addNonIncrementalStuffToCacheManifest(comp: *Compilation, man: *Cache.Manifes
|
||||
man.hash.add(comp.bin_file.options.valgrind);
|
||||
man.hash.add(comp.bin_file.options.single_threaded);
|
||||
man.hash.add(comp.bin_file.options.use_llvm);
|
||||
man.hash.add(comp.bin_file.options.use_lib_llvm);
|
||||
man.hash.add(comp.bin_file.options.dll_export_fns);
|
||||
man.hash.add(comp.bin_file.options.is_test);
|
||||
man.hash.add(comp.test_evented_io);
|
||||
@ -2739,8 +2748,10 @@ fn addNonIncrementalStuffToCacheManifest(comp: *Compilation, man: *Cache.Manifes
|
||||
man.hash.addOptionalBytes(comp.test_name_prefix);
|
||||
man.hash.add(comp.bin_file.options.skip_linker_dependencies);
|
||||
man.hash.add(comp.bin_file.options.parent_compilation_link_libc);
|
||||
man.hash.add(comp.formatted_panics);
|
||||
man.hash.add(mod.emit_h != null);
|
||||
man.hash.add(mod.error_limit);
|
||||
man.hash.addOptional(comp.bin_file.options.want_structured_cfg);
|
||||
}
|
||||
|
||||
try man.addOptionalFile(comp.bin_file.options.linker_script);
|
||||
@ -6823,6 +6834,7 @@ fn buildOutputFromZig(
|
||||
.clang_passthrough_mode = comp.clang_passthrough_mode,
|
||||
.skip_linker_dependencies = true,
|
||||
.parent_compilation_link_libc = comp.bin_file.options.link_libc,
|
||||
.want_structured_cfg = comp.bin_file.options.want_structured_cfg,
|
||||
});
|
||||
defer sub_compilation.destroy();
|
||||
|
||||
@ -6903,6 +6915,7 @@ pub fn build_crt_file(
|
||||
.clang_passthrough_mode = comp.clang_passthrough_mode,
|
||||
.skip_linker_dependencies = true,
|
||||
.parent_compilation_link_libc = comp.bin_file.options.link_libc,
|
||||
.want_structured_cfg = comp.bin_file.options.want_structured_cfg,
|
||||
});
|
||||
defer sub_compilation.destroy();
|
||||
|
||||
|
||||
@ -40,17 +40,109 @@ const SpvTypeInfo = struct {
|
||||
|
||||
const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, SpvTypeInfo);
|
||||
|
||||
const IncomingBlock = struct {
|
||||
src_label_id: IdRef,
|
||||
break_value_id: IdRef,
|
||||
};
|
||||
const ControlFlow = union(enum) {
|
||||
const Structured = struct {
|
||||
/// This type indicates the way that a block is terminated. The
|
||||
/// state of a particular block is used to track how a jump from
|
||||
/// inside the block must reach the outside.
|
||||
const Block = union(enum) {
|
||||
const Incoming = struct {
|
||||
src_label: IdRef,
|
||||
/// Instruction that returns an u32 value of the
|
||||
/// `Air.Inst.Index` that control flow should jump to.
|
||||
next_block: IdRef,
|
||||
};
|
||||
|
||||
const Block = struct {
|
||||
label_id: ?IdRef,
|
||||
incoming_blocks: std.ArrayListUnmanaged(IncomingBlock),
|
||||
};
|
||||
const SelectionMerge = struct {
|
||||
/// Incoming block from the `then` label.
|
||||
/// Note that hte incoming block from the `else` label is
|
||||
/// either given by the next element in the stack.
|
||||
incoming: Incoming,
|
||||
/// The label id of the cond_br's merge block.
|
||||
/// For the top-most element in the stack, this
|
||||
/// value is undefined.
|
||||
merge_block: IdRef,
|
||||
};
|
||||
|
||||
const BlockMap = std.AutoHashMapUnmanaged(Air.Inst.Index, *Block);
|
||||
/// For a `selection` type block, we cannot use early exits, and we
|
||||
/// must generate a 'merge ladder' of OpSelection instructions. To that end,
|
||||
/// we keep a stack of the merges that still must be closed at the end of
|
||||
/// a block.
|
||||
///
|
||||
/// This entire structure basically just resembles a tree like
|
||||
/// a x
|
||||
/// \ /
|
||||
/// b o merge
|
||||
/// \ /
|
||||
/// c o merge
|
||||
/// \ /
|
||||
/// o merge
|
||||
/// /
|
||||
/// o jump to next block
|
||||
selection: struct {
|
||||
/// In order to know which merges we still need to do, we need to keep
|
||||
/// a stack of those.
|
||||
merge_stack: std.ArrayListUnmanaged(SelectionMerge) = .{},
|
||||
},
|
||||
/// For a `loop` type block, we can early-exit the block by
|
||||
/// jumping to the loop exit node, and we don't need to generate
|
||||
/// an entire stack of merges.
|
||||
loop: struct {
|
||||
/// The next block to jump to can be determined from any number
|
||||
/// of conditions that jump to the loop exit.
|
||||
merges: std.ArrayListUnmanaged(Incoming) = .{},
|
||||
/// The label id of the loop's merge block.
|
||||
merge_block: IdRef,
|
||||
},
|
||||
|
||||
fn deinit(self: *Structured.Block, a: Allocator) void {
|
||||
switch (self.*) {
|
||||
.selection => |*merge| merge.merge_stack.deinit(a),
|
||||
.loop => |*merge| merge.merges.deinit(a),
|
||||
}
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
/// The stack of (structured) blocks that we are currently in. This determines
|
||||
/// how exits from the current block must be handled.
|
||||
block_stack: std.ArrayListUnmanaged(*Structured.Block) = .{},
|
||||
/// Maps `block` inst indices to the variable that the block's result
|
||||
/// value must be written to.
|
||||
block_results: std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef) = .{},
|
||||
};
|
||||
|
||||
const Unstructured = struct {
|
||||
const Incoming = struct {
|
||||
src_label: IdRef,
|
||||
break_value_id: IdRef,
|
||||
};
|
||||
|
||||
const Block = struct {
|
||||
label: ?IdRef = null,
|
||||
incoming_blocks: std.ArrayListUnmanaged(Incoming) = .{},
|
||||
};
|
||||
|
||||
/// We need to keep track of result ids for block labels, as well as the 'incoming'
|
||||
/// blocks for a block.
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *Block) = .{},
|
||||
};
|
||||
|
||||
structured: Structured,
|
||||
unstructured: Unstructured,
|
||||
|
||||
pub fn deinit(self: *ControlFlow, a: Allocator) void {
|
||||
switch (self.*) {
|
||||
.structured => |*cf| {
|
||||
cf.block_stack.deinit(a);
|
||||
cf.block_results.deinit(a);
|
||||
},
|
||||
.unstructured => |*cf| {
|
||||
cf.blocks.deinit(a);
|
||||
},
|
||||
}
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
/// This structure holds information that is relevant to the entire compilation,
|
||||
/// in contrast to `DeclGen`, which only holds relevant information about a
|
||||
@ -99,6 +191,14 @@ pub const Object = struct {
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
) !void {
|
||||
const target = mod.getTarget();
|
||||
// We always want a structured control flow in shaders. This option is only relevant
|
||||
// for OpenCL kernels.
|
||||
const want_structured_cfg = switch (target.os.tag) {
|
||||
.opencl => mod.comp.bin_file.options.want_structured_cfg orelse false,
|
||||
else => true,
|
||||
};
|
||||
|
||||
var decl_gen = DeclGen{
|
||||
.gpa = self.gpa,
|
||||
.object = self,
|
||||
@ -108,7 +208,11 @@ pub const Object = struct {
|
||||
.air = air,
|
||||
.liveness = liveness,
|
||||
.type_map = &self.type_map,
|
||||
.current_block_label_id = undefined,
|
||||
.control_flow = switch (want_structured_cfg) {
|
||||
true => .{ .structured = .{} },
|
||||
false => .{ .unstructured = .{} },
|
||||
},
|
||||
.current_block_label = undefined,
|
||||
};
|
||||
defer decl_gen.deinit();
|
||||
|
||||
@ -213,12 +317,11 @@ const DeclGen = struct {
|
||||
/// is already in this map, its recursive.
|
||||
wip_pointers: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, CacheRef) = .{},
|
||||
|
||||
/// We need to keep track of result ids for block labels, as well as the 'incoming'
|
||||
/// blocks for a block.
|
||||
blocks: BlockMap = .{},
|
||||
/// This field keeps track of the current state wrt structured or unstructured control flow.
|
||||
control_flow: ControlFlow,
|
||||
|
||||
/// The label of the SPIR-V block we are currently generating.
|
||||
current_block_label_id: IdRef,
|
||||
current_block_label: IdRef,
|
||||
|
||||
/// The code (prologue and body) for the function we are currently generating code for.
|
||||
func: SpvModule.Fn = .{},
|
||||
@ -300,7 +403,7 @@ const DeclGen = struct {
|
||||
self.args.deinit(self.gpa);
|
||||
self.inst_results.deinit(self.gpa);
|
||||
self.wip_pointers.deinit(self.gpa);
|
||||
self.blocks.deinit(self.gpa);
|
||||
self.control_flow.deinit(self.gpa);
|
||||
self.func.deinit(self.gpa);
|
||||
self.base_line_stack.deinit(self.gpa);
|
||||
}
|
||||
@ -384,10 +487,11 @@ const DeclGen = struct {
|
||||
// TODO: This should probably be made a little more robust.
|
||||
const func = self.func;
|
||||
defer self.func = func;
|
||||
const block_label_id = self.current_block_label_id;
|
||||
defer self.current_block_label_id = block_label_id;
|
||||
const block_label = self.current_block_label;
|
||||
defer self.current_block_label = block_label;
|
||||
|
||||
self.func = .{};
|
||||
defer self.func.deinit(self.gpa);
|
||||
|
||||
// TODO: Merge this with genDecl?
|
||||
const begin = self.spv.beginGlobal();
|
||||
@ -409,7 +513,7 @@ const DeclGen = struct {
|
||||
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
|
||||
.id_result = root_block_id,
|
||||
});
|
||||
self.current_block_label_id = root_block_id;
|
||||
self.current_block_label = root_block_id;
|
||||
|
||||
const val_id = try self.constant(ty, val.toValue(), .indirect);
|
||||
try self.func.body.emit(self.spv.gpa, .OpStore, .{
|
||||
@ -432,9 +536,9 @@ const DeclGen = struct {
|
||||
/// block we are currently generating.
|
||||
/// Note that there is no such thing as nested blocks like in ZIR or AIR, so we don't need to
|
||||
/// keep track of the previous block.
|
||||
fn beginSpvBlock(self: *DeclGen, label_id: IdResult) !void {
|
||||
try self.func.body.emit(self.spv.gpa, .OpLabel, .{ .id_result = label_id });
|
||||
self.current_block_label_id = label_id;
|
||||
fn beginSpvBlock(self: *DeclGen, label: IdResult) !void {
|
||||
try self.func.body.emit(self.spv.gpa, .OpLabel, .{ .id_result = label });
|
||||
self.current_block_label = label;
|
||||
}
|
||||
|
||||
/// SPIR-V requires enabling specific integer sizes through capabilities, and so if they are not enabled, we need
|
||||
@ -1783,13 +1887,22 @@ const DeclGen = struct {
|
||||
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
|
||||
.id_result = root_block_id,
|
||||
});
|
||||
self.current_block_label_id = root_block_id;
|
||||
self.current_block_label = root_block_id;
|
||||
|
||||
const main_body = self.air.getMainBody();
|
||||
try self.genBody(main_body);
|
||||
|
||||
// Append the actual code into the functions section.
|
||||
switch (self.control_flow) {
|
||||
.structured => {
|
||||
_ = try self.genStructuredBody(.selection, main_body);
|
||||
// We always expect paths to here to end, but we still need the block
|
||||
// to act as a dummy merge block.
|
||||
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
|
||||
},
|
||||
.unstructured => {
|
||||
try self.genBody(main_body);
|
||||
},
|
||||
}
|
||||
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
|
||||
// Append the actual code into the functions section.
|
||||
try self.spv.addFunction(spv_decl_index, self.func);
|
||||
|
||||
const fqn = ip.stringToSlice(try decl.getFullyQualifiedName(self.module));
|
||||
@ -1847,7 +1960,7 @@ const DeclGen = struct {
|
||||
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
|
||||
.id_result = root_block_id,
|
||||
});
|
||||
self.current_block_label_id = root_block_id;
|
||||
self.current_block_label = root_block_id;
|
||||
|
||||
const val_id = try self.constant(decl.ty, init_val, .indirect);
|
||||
try self.func.body.emit(self.spv.gpa, .OpStore, .{
|
||||
@ -3646,6 +3759,154 @@ const DeclGen = struct {
|
||||
return self.args.items[self.next_arg_index];
|
||||
}
|
||||
|
||||
/// Given a slice of incoming block connections, returns the block-id of the next
|
||||
/// block to jump to. This function emits instructions, so it should be emitted
|
||||
/// inside the merge block of the block.
|
||||
/// This function should only be called with structured control flow generation.
|
||||
fn structuredNextBlock(self: *DeclGen, incoming: []const ControlFlow.Structured.Block.Incoming) !IdRef {
|
||||
assert(self.control_flow == .structured);
|
||||
|
||||
const result_id = self.spv.allocId();
|
||||
const block_id_ty_ref = try self.intType(.unsigned, 32);
|
||||
try self.func.body.emitRaw(self.spv.gpa, .OpPhi, @intCast(2 + incoming.len * 2)); // result type + result + variable/parent...
|
||||
self.func.body.writeOperand(spec.IdResultType, self.typeId(block_id_ty_ref));
|
||||
self.func.body.writeOperand(spec.IdRef, result_id);
|
||||
|
||||
for (incoming) |incoming_block| {
|
||||
self.func.body.writeOperand(spec.PairIdRefIdRef, .{ incoming_block.next_block, incoming_block.src_label });
|
||||
}
|
||||
|
||||
return result_id;
|
||||
}
|
||||
|
||||
/// Jumps to the block with the target block-id. This function must only be called when
|
||||
/// terminating a body, there should be no instructions after it.
|
||||
/// This function should only be called with structured control flow generation.
|
||||
fn structuredBreak(self: *DeclGen, target_block: IdRef) !void {
|
||||
assert(self.control_flow == .structured);
|
||||
|
||||
const sblock = self.control_flow.structured.block_stack.getLast();
|
||||
const merge_block = switch (sblock.*) {
|
||||
.selection => |*merge| blk: {
|
||||
const merge_label = self.spv.allocId();
|
||||
try merge.merge_stack.append(self.gpa, .{
|
||||
.incoming = .{
|
||||
.src_label = self.current_block_label,
|
||||
.next_block = target_block,
|
||||
},
|
||||
.merge_block = merge_label,
|
||||
});
|
||||
break :blk merge_label;
|
||||
},
|
||||
// Loop blocks do not end in a break. Not through a direct break,
|
||||
// and also not through another instruction like cond_br or unreachable (these
|
||||
// situations are replaced by `cond_br` in sema, or there is a `block` instruction
|
||||
// placed around them).
|
||||
.loop => unreachable,
|
||||
};
|
||||
|
||||
try self.func.body.emitBranch(self.spv.gpa, merge_block);
|
||||
}
|
||||
|
||||
/// Generate a body in a way that exits the body using only structured constructs.
|
||||
/// Returns the block-id of the next block to jump to. After this function, a jump
|
||||
/// should still be emitted to the block that should follow this structured body.
|
||||
/// This function should only be called with structured control flow generation.
|
||||
fn genStructuredBody(
|
||||
self: *DeclGen,
|
||||
/// This parameter defines the method that this structured body is exited with.
|
||||
block_merge_type: union(enum) {
|
||||
/// Using selection; early exits from this body are surrounded with
|
||||
/// if() statements.
|
||||
selection,
|
||||
/// Using loops; loops can be early exited by jumping to the merge block at
|
||||
/// any time.
|
||||
loop: struct {
|
||||
merge_label: IdRef,
|
||||
continue_label: IdRef,
|
||||
},
|
||||
},
|
||||
body: []const Air.Inst.Index,
|
||||
) !IdRef {
|
||||
assert(self.control_flow == .structured);
|
||||
|
||||
var sblock: ControlFlow.Structured.Block = switch (block_merge_type) {
|
||||
.loop => |merge| .{ .loop = .{
|
||||
.merge_block = merge.merge_label,
|
||||
} },
|
||||
.selection => .{ .selection = .{} },
|
||||
};
|
||||
defer sblock.deinit(self.gpa);
|
||||
|
||||
{
|
||||
try self.control_flow.structured.block_stack.append(self.gpa, &sblock);
|
||||
defer _ = self.control_flow.structured.block_stack.pop();
|
||||
|
||||
try self.genBody(body);
|
||||
}
|
||||
|
||||
switch (sblock) {
|
||||
.selection => |merge| {
|
||||
// Now generate the merge block for all merges that
|
||||
// still need to be performed.
|
||||
const merge_stack = merge.merge_stack.items;
|
||||
|
||||
// If no merges on the stack, this block didn't generate any jumps (all paths
|
||||
// ended with a return or an unreachable). In that case, we don't need to do
|
||||
// any merging.
|
||||
if (merge_stack.len == 0) {
|
||||
// We still need to return a value of a next block to jump to.
|
||||
// For example, if we have code like
|
||||
// if (x) {
|
||||
// if (y) return else return;
|
||||
// } else {}
|
||||
// then we still need the outer to have an OpSelectionMerge and consequently
|
||||
// a phi node. In that case we can just return bogus, since we know that its
|
||||
// path will never be taken.
|
||||
|
||||
// Make sure that we are still in a block when exiting the function.
|
||||
// TODO: Can we get rid of that?
|
||||
try self.beginSpvBlock(self.spv.allocId());
|
||||
const block_id_ty_ref = try self.intType(.unsigned, 32);
|
||||
return try self.spv.constUndef(block_id_ty_ref);
|
||||
}
|
||||
|
||||
// The top-most merge actually only has a single source, the
|
||||
// final jump of the block, or the merge block of a sub-block, cond_br,
|
||||
// or loop. Therefore we just need to generate a block with a jump to the
|
||||
// next merge block.
|
||||
try self.beginSpvBlock(merge_stack[merge_stack.len - 1].merge_block);
|
||||
|
||||
// Now generate a merge ladder for the remaining merges in the stack.
|
||||
var incoming = ControlFlow.Structured.Block.Incoming{
|
||||
.src_label = self.current_block_label,
|
||||
.next_block = merge_stack[merge_stack.len - 1].incoming.next_block,
|
||||
};
|
||||
var i = merge_stack.len - 1;
|
||||
while (i > 0) {
|
||||
i -= 1;
|
||||
const step = merge_stack[i];
|
||||
try self.func.body.emitBranch(self.spv.gpa, step.merge_block);
|
||||
try self.beginSpvBlock(step.merge_block);
|
||||
const next_block = try self.structuredNextBlock(&.{ incoming, step.incoming });
|
||||
incoming = .{
|
||||
.src_label = step.merge_block,
|
||||
.next_block = next_block,
|
||||
};
|
||||
}
|
||||
|
||||
return incoming.next_block;
|
||||
},
|
||||
.loop => |merge| {
|
||||
// Close the loop by jumping to the continue label
|
||||
try self.func.body.emitBranch(self.spv.gpa, block_merge_type.loop.continue_label);
|
||||
// For blocks we must simple merge all the incoming blocks to get the next block.
|
||||
try self.beginSpvBlock(merge.merge_block);
|
||||
return try self.structuredNextBlock(merge.merges.items);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
|
||||
// In AIR, a block doesn't really define an entry point like a block, but
|
||||
// more like a scope that breaks can jump out of and "return" a value from.
|
||||
@ -3661,62 +3922,170 @@ const DeclGen = struct {
|
||||
const body = self.air.extra[extra.end..][0..extra.data.body_len];
|
||||
const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(mod);
|
||||
|
||||
// 4 chosen as arbitrary initial capacity.
|
||||
var block = Block{
|
||||
// Label id is lazily allocated if needed.
|
||||
.label_id = null,
|
||||
.incoming_blocks = try std.ArrayListUnmanaged(IncomingBlock).initCapacity(self.gpa, 4),
|
||||
const cf = switch (self.control_flow) {
|
||||
.structured => |*cf| cf,
|
||||
.unstructured => |*cf| {
|
||||
var block = ControlFlow.Unstructured.Block{};
|
||||
defer block.incoming_blocks.deinit(self.gpa);
|
||||
|
||||
// 4 chosen as arbitrary initial capacity.
|
||||
try block.incoming_blocks.ensureUnusedCapacity(self.gpa, 4);
|
||||
|
||||
try cf.blocks.putNoClobber(self.gpa, inst, &block);
|
||||
defer assert(cf.blocks.remove(inst));
|
||||
|
||||
try self.genBody(body);
|
||||
|
||||
// Only begin a new block if there were actually any breaks towards it.
|
||||
if (block.label) |label| {
|
||||
try self.beginSpvBlock(label);
|
||||
}
|
||||
|
||||
if (!have_block_result)
|
||||
return null;
|
||||
|
||||
assert(block.label != null);
|
||||
const result_id = self.spv.allocId();
|
||||
const result_type_id = try self.resolveTypeId(ty);
|
||||
|
||||
try self.func.body.emitRaw(
|
||||
self.spv.gpa,
|
||||
.OpPhi,
|
||||
// result type + result + variable/parent...
|
||||
2 + @as(u16, @intCast(block.incoming_blocks.items.len * 2)),
|
||||
);
|
||||
self.func.body.writeOperand(spec.IdResultType, result_type_id);
|
||||
self.func.body.writeOperand(spec.IdRef, result_id);
|
||||
|
||||
for (block.incoming_blocks.items) |incoming| {
|
||||
self.func.body.writeOperand(
|
||||
spec.PairIdRefIdRef,
|
||||
.{ incoming.break_value_id, incoming.src_label },
|
||||
);
|
||||
}
|
||||
|
||||
return result_id;
|
||||
},
|
||||
};
|
||||
defer block.incoming_blocks.deinit(self.gpa);
|
||||
|
||||
try self.blocks.putNoClobber(self.gpa, inst, &block);
|
||||
defer assert(self.blocks.remove(inst));
|
||||
const maybe_block_result_var_id = if (have_block_result) blk: {
|
||||
const block_result_var_id = try self.alloc(ty, .{ .storage_class = .Function });
|
||||
try cf.block_results.putNoClobber(self.gpa, inst, block_result_var_id);
|
||||
break :blk block_result_var_id;
|
||||
} else null;
|
||||
defer if (have_block_result) assert(cf.block_results.remove(inst));
|
||||
|
||||
try self.genBody(body);
|
||||
const next_block = try self.genStructuredBody(.selection, body);
|
||||
|
||||
// Only begin a new block if there were actually any breaks towards it.
|
||||
if (block.label_id) |label_id| {
|
||||
try self.beginSpvBlock(label_id);
|
||||
// When encountering a block instruction, we are always at least in the function's scope,
|
||||
// so there always has to be another entry.
|
||||
assert(cf.block_stack.items.len > 0);
|
||||
|
||||
// Check if the target of the branch was this current block.
|
||||
const block_id_ty_ref = try self.intType(.unsigned, 32);
|
||||
const this_block = try self.constInt(block_id_ty_ref, inst);
|
||||
const jump_to_this_block_id = self.spv.allocId();
|
||||
const bool_ty_ref = try self.resolveType(Type.bool, .direct);
|
||||
try self.func.body.emit(self.spv.gpa, .OpIEqual, .{
|
||||
.id_result_type = self.typeId(bool_ty_ref),
|
||||
.id_result = jump_to_this_block_id,
|
||||
.operand_1 = next_block,
|
||||
.operand_2 = this_block,
|
||||
});
|
||||
|
||||
const sblock = cf.block_stack.getLast();
|
||||
|
||||
if (ty.isNoReturn(mod)) {
|
||||
// If this block is noreturn, this instruction is the last of a block,
|
||||
// and we must simply jump to the block's merge unconditionally.
|
||||
try self.structuredBreak(next_block);
|
||||
} else {
|
||||
switch (sblock.*) {
|
||||
.selection => |*merge| {
|
||||
// To jump out of a selection block, push a new entry onto its merge stack and
|
||||
// generate a conditional branch to there and to the instructions following this block.
|
||||
const merge_label = self.spv.allocId();
|
||||
const then_label = self.spv.allocId();
|
||||
try self.func.body.emit(self.spv.gpa, .OpSelectionMerge, .{
|
||||
.merge_block = merge_label,
|
||||
.selection_control = .{},
|
||||
});
|
||||
try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{
|
||||
.condition = jump_to_this_block_id,
|
||||
.true_label = then_label,
|
||||
.false_label = merge_label,
|
||||
});
|
||||
try merge.merge_stack.append(self.gpa, .{
|
||||
.incoming = .{
|
||||
.src_label = self.current_block_label,
|
||||
.next_block = next_block,
|
||||
},
|
||||
.merge_block = merge_label,
|
||||
});
|
||||
|
||||
try self.beginSpvBlock(then_label);
|
||||
},
|
||||
.loop => |*merge| {
|
||||
// To jump out of a loop block, generate a conditional that exits the block
|
||||
// to the loop merge if the target ID is not the one of this block.
|
||||
const continue_label = self.spv.allocId();
|
||||
try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{
|
||||
.condition = jump_to_this_block_id,
|
||||
.true_label = continue_label,
|
||||
.false_label = merge.merge_block,
|
||||
});
|
||||
try merge.merges.append(self.gpa, .{
|
||||
.src_label = self.current_block_label,
|
||||
.next_block = next_block,
|
||||
});
|
||||
try self.beginSpvBlock(continue_label);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if (!have_block_result)
|
||||
return null;
|
||||
|
||||
assert(block.label_id != null);
|
||||
const result_id = self.spv.allocId();
|
||||
const result_type_id = try self.resolveTypeId(ty);
|
||||
|
||||
try self.func.body.emitRaw(self.spv.gpa, .OpPhi, 2 + @as(u16, @intCast(block.incoming_blocks.items.len * 2))); // result type + result + variable/parent...
|
||||
self.func.body.writeOperand(spec.IdResultType, result_type_id);
|
||||
self.func.body.writeOperand(spec.IdRef, result_id);
|
||||
|
||||
for (block.incoming_blocks.items) |incoming| {
|
||||
self.func.body.writeOperand(spec.PairIdRefIdRef, .{ incoming.break_value_id, incoming.src_label_id });
|
||||
if (maybe_block_result_var_id) |block_result_var_id| {
|
||||
return try self.load(ty, block_result_var_id, .{});
|
||||
}
|
||||
|
||||
return result_id;
|
||||
return null;
|
||||
}
|
||||
|
||||
fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const mod = self.module;
|
||||
const br = self.air.instructions.items(.data)[inst].br;
|
||||
const operand_ty = self.typeOf(br.operand);
|
||||
const block = self.blocks.get(br.block_inst).?;
|
||||
|
||||
const mod = self.module;
|
||||
if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
|
||||
const operand_id = try self.resolve(br.operand);
|
||||
// current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body.
|
||||
try block.incoming_blocks.append(self.gpa, .{
|
||||
.src_label_id = self.current_block_label_id,
|
||||
.break_value_id = operand_id,
|
||||
});
|
||||
switch (self.control_flow) {
|
||||
.structured => |*cf| {
|
||||
if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
|
||||
const operand_id = try self.resolve(br.operand);
|
||||
const block_result_var_id = cf.block_results.get(br.block_inst).?;
|
||||
try self.store(operand_ty, block_result_var_id, operand_id, .{});
|
||||
}
|
||||
|
||||
const block_id_ty_ref = try self.intType(.unsigned, 32);
|
||||
const next_block = try self.constInt(block_id_ty_ref, br.block_inst);
|
||||
try self.structuredBreak(next_block);
|
||||
},
|
||||
.unstructured => |cf| {
|
||||
const block = cf.blocks.get(br.block_inst).?;
|
||||
if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
|
||||
const operand_id = try self.resolve(br.operand);
|
||||
// current_block_label should not be undefined here, lest there
|
||||
// is a br or br_void in the function's body.
|
||||
try block.incoming_blocks.append(self.gpa, .{
|
||||
.src_label = self.current_block_label,
|
||||
.break_value_id = operand_id,
|
||||
});
|
||||
}
|
||||
|
||||
if (block.label == null) {
|
||||
block.label = self.spv.allocId();
|
||||
}
|
||||
|
||||
try self.func.body.emitBranch(self.spv.gpa, block.label.?);
|
||||
},
|
||||
}
|
||||
|
||||
if (block.label_id == null) {
|
||||
block.label_id = self.spv.allocId();
|
||||
}
|
||||
|
||||
try self.func.body.emit(self.spv.gpa, .OpBranch, .{ .target_label = block.label_id.? });
|
||||
}
|
||||
|
||||
fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
@ -3726,23 +4095,104 @@ const DeclGen = struct {
|
||||
const else_body = self.air.extra[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len];
|
||||
const condition_id = try self.resolve(pl_op.operand);
|
||||
|
||||
// These will always generate a new SPIR-V block, since they are ir.Body and not ir.Block.
|
||||
const then_label_id = self.spv.allocId();
|
||||
const else_label_id = self.spv.allocId();
|
||||
const then_label = self.spv.allocId();
|
||||
const else_label = self.spv.allocId();
|
||||
|
||||
// TODO: We can generate OpSelectionMerge here if we know the target block that both of these will resolve to,
|
||||
// but i don't know if those will always resolve to the same block.
|
||||
switch (self.control_flow) {
|
||||
.structured => {
|
||||
const merge_label = self.spv.allocId();
|
||||
|
||||
try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{
|
||||
.condition = condition_id,
|
||||
.true_label = then_label_id,
|
||||
.false_label = else_label_id,
|
||||
});
|
||||
try self.func.body.emit(self.spv.gpa, .OpSelectionMerge, .{
|
||||
.merge_block = merge_label,
|
||||
.selection_control = .{},
|
||||
});
|
||||
try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{
|
||||
.condition = condition_id,
|
||||
.true_label = then_label,
|
||||
.false_label = else_label,
|
||||
});
|
||||
|
||||
try self.beginSpvBlock(then_label_id);
|
||||
try self.genBody(then_body);
|
||||
try self.beginSpvBlock(else_label_id);
|
||||
try self.genBody(else_body);
|
||||
try self.beginSpvBlock(then_label);
|
||||
const then_next = try self.genStructuredBody(.selection, then_body);
|
||||
const then_incoming = ControlFlow.Structured.Block.Incoming{
|
||||
.src_label = self.current_block_label,
|
||||
.next_block = then_next,
|
||||
};
|
||||
try self.func.body.emitBranch(self.spv.gpa, merge_label);
|
||||
|
||||
try self.beginSpvBlock(else_label);
|
||||
const else_next = try self.genStructuredBody(.selection, else_body);
|
||||
const else_incoming = ControlFlow.Structured.Block.Incoming{
|
||||
.src_label = self.current_block_label,
|
||||
.next_block = else_next,
|
||||
};
|
||||
try self.func.body.emitBranch(self.spv.gpa, merge_label);
|
||||
|
||||
try self.beginSpvBlock(merge_label);
|
||||
const next_block = try self.structuredNextBlock(&.{ then_incoming, else_incoming });
|
||||
|
||||
try self.structuredBreak(next_block);
|
||||
},
|
||||
.unstructured => {
|
||||
try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{
|
||||
.condition = condition_id,
|
||||
.true_label = then_label,
|
||||
.false_label = else_label,
|
||||
});
|
||||
|
||||
try self.beginSpvBlock(then_label);
|
||||
try self.genBody(then_body);
|
||||
try self.beginSpvBlock(else_label);
|
||||
try self.genBody(else_body);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const loop = self.air.extraData(Air.Block, ty_pl.payload);
|
||||
const body = self.air.extra[loop.end..][0..loop.data.body_len];
|
||||
|
||||
const body_label = self.spv.allocId();
|
||||
|
||||
switch (self.control_flow) {
|
||||
.structured => {
|
||||
const header_label = self.spv.allocId();
|
||||
const merge_label = self.spv.allocId();
|
||||
const continue_label = self.spv.allocId();
|
||||
|
||||
// The back-edge must point to the loop header, so generate a separate block for the
|
||||
// loop header so that we don't accidentally include some instructions from there
|
||||
// in the loop.
|
||||
try self.func.body.emitBranch(self.spv.gpa, header_label);
|
||||
try self.beginSpvBlock(header_label);
|
||||
|
||||
// Emit loop header and jump to loop body
|
||||
try self.func.body.emit(self.spv.gpa, .OpLoopMerge, .{
|
||||
.merge_block = merge_label,
|
||||
.continue_target = continue_label,
|
||||
.loop_control = .{},
|
||||
});
|
||||
try self.func.body.emitBranch(self.spv.gpa, body_label);
|
||||
|
||||
try self.beginSpvBlock(body_label);
|
||||
|
||||
const next_block = try self.genStructuredBody(.{ .loop = .{
|
||||
.merge_label = merge_label,
|
||||
.continue_label = continue_label,
|
||||
} }, body);
|
||||
try self.structuredBreak(next_block);
|
||||
|
||||
try self.beginSpvBlock(continue_label);
|
||||
try self.func.body.emitBranch(self.spv.gpa, header_label);
|
||||
},
|
||||
.unstructured => {
|
||||
try self.func.body.emitBranch(self.spv.gpa, body_label);
|
||||
try self.beginSpvBlock(body_label);
|
||||
try self.genBody(body);
|
||||
try self.func.body.emitBranch(self.spv.gpa, body_label);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
|
||||
@ -3766,22 +4216,6 @@ const DeclGen = struct {
|
||||
try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(self.module) });
|
||||
}
|
||||
|
||||
fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const loop = self.air.extraData(Air.Block, ty_pl.payload);
|
||||
const body = self.air.extra[loop.end..][0..loop.data.body_len];
|
||||
const loop_label_id = self.spv.allocId();
|
||||
|
||||
// Jump to the loop entry point
|
||||
try self.func.body.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id });
|
||||
|
||||
// TODO: Look into OpLoopMerge.
|
||||
try self.beginSpvBlock(loop_label_id);
|
||||
try self.genBody(body);
|
||||
|
||||
try self.func.body.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id });
|
||||
}
|
||||
|
||||
fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const operand = self.air.instructions.items(.data)[inst].un_op;
|
||||
const ret_ty = self.typeOf(operand);
|
||||
@ -3870,7 +4304,20 @@ const DeclGen = struct {
|
||||
const err_block = self.spv.allocId();
|
||||
const ok_block = self.spv.allocId();
|
||||
|
||||
// TODO: Merge block
|
||||
switch (self.control_flow) {
|
||||
.structured => {
|
||||
// According to AIR documentation, this block is guaranteed
|
||||
// to not break and end in a return instruction. Thus,
|
||||
// for structured control flow, we can just naively use
|
||||
// the ok block as the merge block here.
|
||||
try self.func.body.emit(self.spv.gpa, .OpSelectionMerge, .{
|
||||
.merge_block = ok_block,
|
||||
.selection_control = .{},
|
||||
});
|
||||
},
|
||||
.unstructured => {},
|
||||
}
|
||||
|
||||
try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{
|
||||
.condition = is_err_id,
|
||||
.true_label = err_block,
|
||||
@ -3881,7 +4328,6 @@ const DeclGen = struct {
|
||||
try self.genBody(body);
|
||||
|
||||
try self.beginSpvBlock(ok_block);
|
||||
// Now just extract the payload, if required.
|
||||
}
|
||||
if (self.liveness.isUnused(inst)) {
|
||||
return null;
|
||||
@ -3890,6 +4336,7 @@ const DeclGen = struct {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Now just extract the payload, if required.
|
||||
return try self.extractField(payload_ty, err_union_id, eu_layout.payloadFieldIndex());
|
||||
}
|
||||
|
||||
@ -4155,9 +4602,8 @@ const DeclGen = struct {
|
||||
// Zig switches are grouped by condition, so we need to loop through all of them
|
||||
const num_conditions = blk: {
|
||||
var extra_index: usize = switch_br.end;
|
||||
var case_i: u32 = 0;
|
||||
var num_conditions: u32 = 0;
|
||||
while (case_i < num_cases) : (case_i += 1) {
|
||||
for (0..num_cases) |_| {
|
||||
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
|
||||
const case_body = self.air.extra[case.end + case.data.items_len ..][0..case.data.body_len];
|
||||
extra_index = case.end + case.data.items_len + case_body.len;
|
||||
@ -4171,6 +4617,18 @@ const DeclGen = struct {
|
||||
// We always need the default case - if zig has none, we will generate unreachable there.
|
||||
const default = self.spv.allocId();
|
||||
|
||||
const merge_label = switch (self.control_flow) {
|
||||
.structured => self.spv.allocId(),
|
||||
.unstructured => null,
|
||||
};
|
||||
|
||||
if (self.control_flow == .structured) {
|
||||
try self.func.body.emit(self.spv.gpa, .OpSelectionMerge, .{
|
||||
.merge_block = merge_label.?,
|
||||
.selection_control = .{},
|
||||
});
|
||||
}
|
||||
|
||||
// Emit the instruction before generating the blocks.
|
||||
try self.func.body.emitRaw(self.spv.gpa, .OpSwitch, 2 + (cond_words + 1) * num_conditions);
|
||||
self.func.body.writeOperand(IdRef, cond_indirect);
|
||||
@ -4179,20 +4637,17 @@ const DeclGen = struct {
|
||||
// Emit each of the cases
|
||||
{
|
||||
var extra_index: usize = switch_br.end;
|
||||
var case_i: u32 = 0;
|
||||
while (case_i < num_cases) : (case_i += 1) {
|
||||
for (0..num_cases) |case_i| {
|
||||
// SPIR-V needs a literal here, which' width depends on the case condition.
|
||||
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
|
||||
const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
|
||||
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
|
||||
extra_index = case.end + case.data.items_len + case_body.len;
|
||||
|
||||
const label = IdRef{ .id = first_case_label.id + case_i };
|
||||
const label = IdRef{ .id = @intCast(first_case_label.id + case_i) };
|
||||
|
||||
for (items) |item| {
|
||||
const value = (try self.air.value(item, mod)) orelse {
|
||||
return self.todo("switch on runtime value???", .{});
|
||||
};
|
||||
const value = (try self.air.value(item, mod)) orelse unreachable;
|
||||
const int_val = switch (cond_ty.zigTypeTag(mod)) {
|
||||
.Bool, .Int => if (cond_ty.isSignedInt(mod)) @as(u64, @bitCast(value.toSignedInt(mod))) else value.toUnsignedInt(mod),
|
||||
.Enum => blk: {
|
||||
@ -4213,28 +4668,65 @@ const DeclGen = struct {
|
||||
}
|
||||
}
|
||||
|
||||
var incoming_structured_blocks = std.ArrayListUnmanaged(ControlFlow.Structured.Block.Incoming){};
|
||||
defer incoming_structured_blocks.deinit(self.gpa);
|
||||
|
||||
if (self.control_flow == .structured) {
|
||||
try incoming_structured_blocks.ensureUnusedCapacity(self.gpa, num_cases + 1);
|
||||
}
|
||||
|
||||
// Now, finally, we can start emitting each of the cases.
|
||||
var extra_index: usize = switch_br.end;
|
||||
var case_i: u32 = 0;
|
||||
while (case_i < num_cases) : (case_i += 1) {
|
||||
for (0..num_cases) |case_i| {
|
||||
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
|
||||
const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
|
||||
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
|
||||
extra_index = case.end + case.data.items_len + case_body.len;
|
||||
|
||||
const label = IdResult{ .id = first_case_label.id + case_i };
|
||||
const label = IdResult{ .id = @intCast(first_case_label.id + case_i) };
|
||||
|
||||
try self.beginSpvBlock(label);
|
||||
try self.genBody(case_body);
|
||||
|
||||
switch (self.control_flow) {
|
||||
.structured => {
|
||||
const next_block = try self.genStructuredBody(.selection, case_body);
|
||||
incoming_structured_blocks.appendAssumeCapacity(.{
|
||||
.src_label = self.current_block_label,
|
||||
.next_block = next_block,
|
||||
});
|
||||
try self.func.body.emitBranch(self.spv.gpa, merge_label.?);
|
||||
},
|
||||
.unstructured => {
|
||||
try self.genBody(case_body);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
|
||||
try self.beginSpvBlock(default);
|
||||
if (else_body.len != 0) {
|
||||
try self.genBody(else_body);
|
||||
switch (self.control_flow) {
|
||||
.structured => {
|
||||
const next_block = try self.genStructuredBody(.selection, else_body);
|
||||
incoming_structured_blocks.appendAssumeCapacity(.{
|
||||
.src_label = self.current_block_label,
|
||||
.next_block = next_block,
|
||||
});
|
||||
try self.func.body.emitBranch(self.spv.gpa, merge_label.?);
|
||||
},
|
||||
.unstructured => {
|
||||
try self.genBody(else_body);
|
||||
},
|
||||
}
|
||||
} else {
|
||||
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
|
||||
}
|
||||
|
||||
if (self.control_flow == .structured) {
|
||||
try self.beginSpvBlock(merge_label.?);
|
||||
const next_block = try self.structuredNextBlock(incoming_structured_blocks.items);
|
||||
try self.structuredBreak(next_block);
|
||||
}
|
||||
}
|
||||
|
||||
fn airUnreach(self: *DeclGen) !void {
|
||||
|
||||
@ -184,6 +184,7 @@ pub fn deinit(self: *Module) void {
|
||||
self.sections.debug_strings.deinit(self.gpa);
|
||||
self.sections.debug_names.deinit(self.gpa);
|
||||
self.sections.annotations.deinit(self.gpa);
|
||||
self.sections.types_globals_constants.deinit(self.gpa);
|
||||
self.sections.functions.deinit(self.gpa);
|
||||
|
||||
self.source_file_names.deinit(self.gpa);
|
||||
|
||||
@ -65,6 +65,16 @@ pub fn emit(
|
||||
section.writeOperands(opcode.Operands(), operands);
|
||||
}
|
||||
|
||||
pub fn emitBranch(
|
||||
section: *Section,
|
||||
allocator: Allocator,
|
||||
target_label: spec.IdRef,
|
||||
) !void {
|
||||
try section.emit(allocator, .OpBranch, .{
|
||||
.target_label = target_label,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn emitSpecConstantOp(
|
||||
section: *Section,
|
||||
allocator: Allocator,
|
||||
@ -198,10 +208,6 @@ fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand
|
||||
}
|
||||
}
|
||||
|
||||
if (mask == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
section.writeWord(mask);
|
||||
|
||||
inline for (@typeInfo(Operand).Struct.fields) |field| {
|
||||
@ -304,9 +310,6 @@ fn extendedMaskSize(comptime Operand: type, operand: Operand) usize {
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
if (!any_set) {
|
||||
return 0;
|
||||
}
|
||||
return total + 1; // Add one for the mask itself.
|
||||
}
|
||||
|
||||
|
||||
@ -268,6 +268,9 @@ pub const Options = struct {
|
||||
/// (Windows) .def file to specify when linking
|
||||
module_definition_file: ?[]const u8 = null,
|
||||
|
||||
/// (SPIR-V) whether to generate a structured control flow graph or not
|
||||
want_structured_cfg: ?bool = null,
|
||||
|
||||
pub fn effectiveOutputMode(options: Options) std.builtin.OutputMode {
|
||||
return if (options.use_lld) .Obj else options.output_mode;
|
||||
}
|
||||
|
||||
@ -493,6 +493,8 @@ const usage_build_generic =
|
||||
\\ msvc Use msvc include paths (must be present on the system)
|
||||
\\ gnu Use mingw include paths (distributed with Zig)
|
||||
\\ none Do not use any autodetected include paths
|
||||
\\ -fstructured-cfg (SPIR-V) force SPIR-V kernels to use structured control flow
|
||||
\\ -fno-structured-cfg (SPIR-V) force SPIR-V kernels to not use structured control flow
|
||||
\\
|
||||
\\Link Options:
|
||||
\\ -l[lib], --library [lib] Link against system library (only if actually used)
|
||||
@ -913,7 +915,7 @@ fn buildOutputType(
|
||||
var pdb_out_path: ?[]const u8 = null;
|
||||
var dwarf_format: ?std.dwarf.Format = null;
|
||||
var error_limit: ?Module.ErrorInt = null;
|
||||
|
||||
var want_structured_cfg: ?bool = null;
|
||||
// e.g. -m3dnow or -mno-outline-atomics. They correspond to std.Target llvm cpu feature names.
|
||||
// This array is populated by zig cc frontend and then has to be converted to zig-style
|
||||
// CPU features.
|
||||
@ -1070,6 +1072,10 @@ fn buildOutputType(
|
||||
if (mem.eql(u8, next_arg, "--")) break;
|
||||
try extra_rcflags.append(next_arg);
|
||||
}
|
||||
} else if (mem.startsWith(u8, arg, "-fstructured-cfg")) {
|
||||
want_structured_cfg = true;
|
||||
} else if (mem.startsWith(u8, arg, "-fno-structured-cfg")) {
|
||||
want_structured_cfg = false;
|
||||
} else if (mem.eql(u8, arg, "--color")) {
|
||||
const next_arg = args_iter.next() orelse {
|
||||
fatal("expected [auto|on|off] after --color", .{});
|
||||
@ -3595,6 +3601,7 @@ fn buildOutputType(
|
||||
.error_tracing = error_tracing,
|
||||
.pdb_out_path = pdb_out_path,
|
||||
.error_limit = error_limit,
|
||||
.want_structured_cfg = want_structured_cfg,
|
||||
}) catch |err| switch (err) {
|
||||
error.LibCUnavailable => {
|
||||
const target = target_info.target;
|
||||
|
||||
@ -27,6 +27,7 @@ test "large alignment of local constant" {
|
||||
test "slicing array of length 1 can not assume runtime index is always zero" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
var runtime_index: usize = 1;
|
||||
_ = &runtime_index;
|
||||
|
||||
@ -572,6 +572,8 @@ test "comptime cast fn to ptr" {
|
||||
}
|
||||
|
||||
test "equality compare fn ptrs" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // Uses function pointers
|
||||
|
||||
var a = &emptyFn;
|
||||
_ = &a;
|
||||
try expect(a == a);
|
||||
|
||||
@ -853,6 +853,7 @@ test "slice with dereferenced value" {
|
||||
|
||||
test "empty slice ptr is non null" {
|
||||
if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // Test assumes `undefined` is non-zero
|
||||
|
||||
{
|
||||
const empty_slice: []u8 = &[_]u8{};
|
||||
|
||||
@ -1513,6 +1513,7 @@ test "discarded struct initialization works as expected" {
|
||||
|
||||
test "function pointer in struct returns the struct" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const A = struct {
|
||||
const A = @This();
|
||||
|
||||
@ -1929,6 +1929,8 @@ test "inner struct initializer uses union layout" {
|
||||
}
|
||||
|
||||
test "inner struct initializer uses packed union layout" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const namespace = struct {
|
||||
const U = packed union {
|
||||
a: packed struct {
|
||||
@ -1953,6 +1955,8 @@ test "inner struct initializer uses packed union layout" {
|
||||
}
|
||||
|
||||
test "extern union initialized via reintepreted struct field initializer" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
|
||||
|
||||
const U = extern union {
|
||||
@ -1970,6 +1974,8 @@ test "extern union initialized via reintepreted struct field initializer" {
|
||||
}
|
||||
|
||||
test "packed union initialized via reintepreted struct field initializer" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
|
||||
|
||||
const U = packed union {
|
||||
@ -1988,6 +1994,8 @@ test "packed union initialized via reintepreted struct field initializer" {
|
||||
}
|
||||
|
||||
test "store of comptime reinterpreted memory to extern union" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
|
||||
|
||||
const U = extern union {
|
||||
@ -2008,6 +2016,8 @@ test "store of comptime reinterpreted memory to extern union" {
|
||||
}
|
||||
|
||||
test "store of comptime reinterpreted memory to packed union" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
|
||||
|
||||
const U = packed union {
|
||||
@ -2063,6 +2073,8 @@ test "pass register-sized field as non-register-sized union" {
|
||||
}
|
||||
|
||||
test "circular dependency through pointer field of a union" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
const UnionInner = extern struct {
|
||||
outer: UnionOuter = std.mem.zeroes(UnionOuter),
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user