mirror of
https://github.com/ziglang/zig.git
synced 2026-02-21 08:45:52 +00:00
compiler: introduce new CallingConvention
This commit begins implementing accepted proposal #21209 by making `std.builtin.CallingConvention` a tagged union. The stage1 dance here is a little convoluted. This commit introduces the new type as `NewCallingConvention`, keeping the old `CallingConvention` around. The compiler uses `std.builtin.NewCallingConvention` exclusively, but when fetching the type from `std` when running the compiler (e.g. with `getBuiltinType`), the name `CallingConvention` is used. This allows a prior build of Zig to be used to build this commit. The next commit will update `zig1.wasm`, and then the compiler and standard library can be updated to completely replace `CallingConvention` with `NewCallingConvention`. The second half of #21209 is to remove `@setAlignStack`, which will be implemented in another commit after updating `zig1.wasm`.
This commit is contained in:
parent
8573836892
commit
51706af908
@ -550,12 +550,24 @@ pub const Payload = struct {
|
||||
is_var_args: bool,
|
||||
name: ?[]const u8,
|
||||
linksection_string: ?[]const u8,
|
||||
explicit_callconv: ?std.builtin.CallingConvention,
|
||||
explicit_callconv: ?CallingConvention,
|
||||
params: []Param,
|
||||
return_type: Node,
|
||||
body: ?Node,
|
||||
alignment: ?c_uint,
|
||||
},
|
||||
|
||||
pub const CallingConvention = enum {
|
||||
c,
|
||||
x86_64_sysv,
|
||||
x86_stdcall,
|
||||
x86_fastcall,
|
||||
x86_thiscall,
|
||||
x86_vectorcall,
|
||||
aarch64_vfabi,
|
||||
arm_aapcs,
|
||||
arm_aapcs_vfp,
|
||||
};
|
||||
};
|
||||
|
||||
pub const Param = struct {
|
||||
@ -2812,14 +2824,50 @@ fn renderFunc(c: *Context, node: Node) !NodeIndex {
|
||||
const callconv_expr = if (payload.explicit_callconv) |some| blk: {
|
||||
_ = try c.addToken(.keyword_callconv, "callconv");
|
||||
_ = try c.addToken(.l_paren, "(");
|
||||
_ = try c.addToken(.period, ".");
|
||||
const res = try c.addNode(.{
|
||||
.tag = .enum_literal,
|
||||
.main_token = try c.addTokenFmt(.identifier, "{s}", .{@tagName(some)}),
|
||||
.data = undefined,
|
||||
});
|
||||
const cc_node = switch (some) {
|
||||
.c => cc_node: {
|
||||
_ = try c.addToken(.period, ".");
|
||||
break :cc_node try c.addNode(.{
|
||||
.tag = .enum_literal,
|
||||
.main_token = try c.addToken(.identifier, "c"),
|
||||
.data = undefined,
|
||||
});
|
||||
},
|
||||
.x86_64_sysv,
|
||||
.x86_stdcall,
|
||||
.x86_fastcall,
|
||||
.x86_thiscall,
|
||||
.x86_vectorcall,
|
||||
.aarch64_vfabi,
|
||||
.arm_aapcs,
|
||||
.arm_aapcs_vfp,
|
||||
=> cc_node: {
|
||||
// .{ .foo = .{} }
|
||||
_ = try c.addToken(.period, ".");
|
||||
const outer_lbrace = try c.addToken(.l_brace, "{");
|
||||
_ = try c.addToken(.period, ".");
|
||||
_ = try c.addToken(.identifier, @tagName(some));
|
||||
_ = try c.addToken(.equal, "=");
|
||||
_ = try c.addToken(.period, ".");
|
||||
const inner_lbrace = try c.addToken(.l_brace, "{");
|
||||
_ = try c.addToken(.r_brace, "}");
|
||||
_ = try c.addToken(.r_brace, "}");
|
||||
break :cc_node try c.addNode(.{
|
||||
.tag = .struct_init_dot_two,
|
||||
.main_token = outer_lbrace,
|
||||
.data = .{
|
||||
.lhs = try c.addNode(.{
|
||||
.tag = .struct_init_dot_two,
|
||||
.main_token = inner_lbrace,
|
||||
.data = .{ .lhs = 0, .rhs = 0 },
|
||||
}),
|
||||
.rhs = 0,
|
||||
},
|
||||
});
|
||||
},
|
||||
};
|
||||
_ = try c.addToken(.r_paren, ")");
|
||||
break :blk res;
|
||||
break :blk cc_node;
|
||||
} else 0;
|
||||
|
||||
const return_type_expr = try renderNode(c, payload.return_type);
|
||||
|
||||
@ -1609,6 +1609,165 @@ pub const Cpu = struct {
|
||||
else => ".X",
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns the array of `Arch` to which a specific `std.builtin.CallingConvention` applies.
|
||||
/// Asserts that `cc` is not `.auto`, `.@"async"`, `.naked`, or `.@"inline"`.
|
||||
pub fn fromCallconv(cc: std.builtin.NewCallingConvention) []const Arch {
|
||||
return switch (cc) {
|
||||
.auto,
|
||||
.@"async",
|
||||
.naked,
|
||||
.@"inline",
|
||||
=> unreachable,
|
||||
|
||||
.x86_64_sysv,
|
||||
.x86_64_win,
|
||||
.x86_64_regcall_v3_sysv,
|
||||
.x86_64_regcall_v4_win,
|
||||
.x86_64_vectorcall,
|
||||
.x86_64_interrupt,
|
||||
=> &.{.x86_64},
|
||||
|
||||
.x86_sysv,
|
||||
.x86_win,
|
||||
.x86_stdcall,
|
||||
.x86_fastcall,
|
||||
.x86_thiscall,
|
||||
.x86_thiscall_mingw,
|
||||
.x86_regcall_v3,
|
||||
.x86_regcall_v4_win,
|
||||
.x86_vectorcall,
|
||||
.x86_interrupt,
|
||||
=> &.{.x86},
|
||||
|
||||
.aarch64_aapcs,
|
||||
.aarch64_aapcs_darwin,
|
||||
.aarch64_aapcs_win,
|
||||
.aarch64_vfabi,
|
||||
.aarch64_vfabi_sve,
|
||||
=> &.{ .aarch64, .aarch64_be },
|
||||
|
||||
.arm_apcs,
|
||||
.arm_aapcs,
|
||||
.arm_aapcs_vfp,
|
||||
.arm_aapcs16_vfp,
|
||||
.arm_interrupt,
|
||||
=> &.{ .arm, .armeb, .thumb, .thumbeb },
|
||||
|
||||
.mips64_n64,
|
||||
.mips64_n32,
|
||||
.mips64_interrupt,
|
||||
=> &.{ .mips64, .mips64el },
|
||||
|
||||
.mips_o32,
|
||||
.mips_interrupt,
|
||||
=> &.{ .mips, .mipsel },
|
||||
|
||||
.riscv64_lp64,
|
||||
.riscv64_lp64_v,
|
||||
.riscv64_interrupt,
|
||||
=> &.{.riscv64},
|
||||
|
||||
.riscv32_ilp32,
|
||||
.riscv32_ilp32_v,
|
||||
.riscv32_interrupt,
|
||||
=> &.{.riscv32},
|
||||
|
||||
.sparc64_sysv,
|
||||
=> &.{.sparc64},
|
||||
|
||||
.sparc_sysv,
|
||||
=> &.{.sparc},
|
||||
|
||||
.powerpc64_elf,
|
||||
.powerpc64_elf_altivec,
|
||||
.powerpc64_elf_v2,
|
||||
=> &.{ .powerpc64, .powerpc64le },
|
||||
|
||||
.powerpc_sysv,
|
||||
.powerpc_sysv_altivec,
|
||||
.powerpc_aix,
|
||||
.powerpc_aix_altivec,
|
||||
=> &.{ .powerpc, .powerpcle },
|
||||
|
||||
.wasm_watc,
|
||||
=> &.{ .wasm64, .wasm32 },
|
||||
|
||||
.arc_sysv,
|
||||
=> &.{.arc},
|
||||
|
||||
.avr_gnu,
|
||||
.avr_builtin,
|
||||
.avr_signal,
|
||||
.avr_interrupt,
|
||||
=> &.{.avr},
|
||||
|
||||
.bpf_std,
|
||||
=> &.{ .bpfel, .bpfeb },
|
||||
|
||||
.csky_sysv,
|
||||
.csky_interrupt,
|
||||
=> &.{.csky},
|
||||
|
||||
.hexagon_sysv,
|
||||
.hexagon_sysv_hvx,
|
||||
=> &.{.hexagon},
|
||||
|
||||
.lanai_sysv,
|
||||
=> &.{.lanai},
|
||||
|
||||
.loongarch64_lp64,
|
||||
=> &.{.loongarch64},
|
||||
|
||||
.loongarch32_ilp32,
|
||||
=> &.{.loongarch32},
|
||||
|
||||
.m68k_sysv,
|
||||
.m68k_gnu,
|
||||
.m68k_rtd,
|
||||
.m68k_interrupt,
|
||||
=> &.{.m68k},
|
||||
|
||||
.msp430_eabi,
|
||||
=> &.{.msp430},
|
||||
|
||||
.propeller1_sysv,
|
||||
=> &.{.propeller1},
|
||||
|
||||
.propeller2_sysv,
|
||||
=> &.{.propeller2},
|
||||
|
||||
.s390x_sysv,
|
||||
.s390x_sysv_vx,
|
||||
=> &.{.s390x},
|
||||
|
||||
.ve_sysv,
|
||||
=> &.{.ve},
|
||||
|
||||
.xcore_xs1,
|
||||
.xcore_xs2,
|
||||
=> &.{.xcore},
|
||||
|
||||
.xtensa_call0,
|
||||
.xtensa_windowed,
|
||||
=> &.{.xtensa},
|
||||
|
||||
.amdgcn_device,
|
||||
.amdgcn_kernel,
|
||||
.amdgcn_cs,
|
||||
=> &.{.amdgcn},
|
||||
|
||||
.nvptx_device,
|
||||
.nvptx_kernel,
|
||||
=> &.{ .nvptx, .nvptx64 },
|
||||
|
||||
.spirv_device,
|
||||
.spirv_kernel,
|
||||
.spirv_fragment,
|
||||
.spirv_vertex,
|
||||
=> &.{ .spirv, .spirv32, .spirv64 },
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Model = struct {
|
||||
@ -2873,6 +3032,70 @@ pub fn cTypePreferredAlignment(target: Target, c_type: CType) u16 {
|
||||
);
|
||||
}
|
||||
|
||||
pub fn defaultCCallingConvention(target: Target) ?std.builtin.NewCallingConvention {
|
||||
return switch (target.cpu.arch) {
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows, .uefi => .{ .x86_64_win = .{} },
|
||||
else => .{ .x86_64_sysv = .{} },
|
||||
},
|
||||
.x86 => switch (target.os.tag) {
|
||||
.windows, .uefi => .{ .x86_win = .{} },
|
||||
else => .{ .x86_sysv = .{} },
|
||||
},
|
||||
.aarch64, .aarch64_be => if (target.os.tag.isDarwin()) cc: {
|
||||
break :cc .{ .aarch64_aapcs_darwin = .{} };
|
||||
} else switch (target.os.tag) {
|
||||
.windows => .{ .aarch64_aapcs_win = .{} },
|
||||
else => .{ .aarch64_aapcs = .{} },
|
||||
},
|
||||
.arm, .armeb, .thumb, .thumbeb => .{ .arm_aapcs = .{} },
|
||||
.mips64, .mips64el => switch (target.abi) {
|
||||
.gnuabin32 => .{ .mips64_n32 = .{} },
|
||||
else => .{ .mips64_n64 = .{} },
|
||||
},
|
||||
.mips, .mipsel => .{ .mips_o32 = .{} },
|
||||
.riscv64 => .{ .riscv64_lp64 = .{} },
|
||||
.riscv32 => .{ .riscv32_ilp32 = .{} },
|
||||
.sparc64 => .{ .sparc64_sysv = .{} },
|
||||
.sparc => .{ .sparc_sysv = .{} },
|
||||
.powerpc64 => if (target.isMusl())
|
||||
.{ .powerpc64_elf_v2 = .{} }
|
||||
else
|
||||
.{ .powerpc64_elf = .{} },
|
||||
.powerpc64le => .{ .powerpc64_elf_v2 = .{} },
|
||||
.powerpc, .powerpcle => switch (target.os.tag) {
|
||||
.aix => .{ .powerpc_aix = .{} },
|
||||
else => .{ .powerpc_sysv = .{} },
|
||||
},
|
||||
.wasm32 => .{ .wasm_watc = .{} },
|
||||
.wasm64 => .{ .wasm_watc = .{} },
|
||||
.arc => .{ .arc_sysv = .{} },
|
||||
.avr => .avr_gnu,
|
||||
.bpfel, .bpfeb => .{ .bpf_std = .{} },
|
||||
.csky => .{ .csky_sysv = .{} },
|
||||
.hexagon => .{ .hexagon_sysv = .{} },
|
||||
.kalimba => null,
|
||||
.lanai => .{ .lanai_sysv = .{} },
|
||||
.loongarch64 => .{ .loongarch64_lp64 = .{} },
|
||||
.loongarch32 => .{ .loongarch32_ilp32 = .{} },
|
||||
.m68k => if (target.abi.isGnu() or target.abi.isMusl())
|
||||
.{ .m68k_gnu = .{} }
|
||||
else
|
||||
.{ .m68k_sysv = .{} },
|
||||
.msp430 => .{ .msp430_eabi = .{} },
|
||||
.propeller1 => .{ .propeller1_sysv = .{} },
|
||||
.propeller2 => .{ .propeller2_sysv = .{} },
|
||||
.s390x => .{ .s390x_sysv = .{} },
|
||||
.spu_2 => null,
|
||||
.ve => .{ .ve_sysv = .{} },
|
||||
.xcore => .{ .xcore_xs1 = .{} },
|
||||
.xtensa => .{ .xtensa_call0 = .{} },
|
||||
.amdgcn => .{ .amdgcn_device = .{} },
|
||||
.nvptx, .nvptx64 => .nvptx_device,
|
||||
.spirv, .spirv32, .spirv64 => .spirv_device,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn osArchName(target: std.Target) [:0]const u8 {
|
||||
return target.os.tag.archName(target.cpu.arch);
|
||||
}
|
||||
|
||||
@ -210,6 +210,336 @@ pub const CallingConvention = enum(u8) {
|
||||
Vertex,
|
||||
};
|
||||
|
||||
/// The calling convention of a function defines how arguments and return values are passed, as well
|
||||
/// as any other requirements which callers and callees must respect, such as register preservation
|
||||
/// and stack alignment.
|
||||
///
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
///
|
||||
/// TODO: this will be renamed `CallingConvention` after an initial zig1.wasm update.
|
||||
pub const NewCallingConvention = union(enum(u8)) {
|
||||
pub const Tag = @typeInfo(NewCallingConvention).@"union".tag_type.?;
|
||||
|
||||
/// This is an alias for the default C calling convention for this target.
|
||||
/// Functions marked as `extern` or `export` are given this calling convention by default.
|
||||
pub const c = builtin.target.defaultCCallingConvention().?;
|
||||
|
||||
pub const winapi: NewCallingConvention = switch (builtin.target.arch) {
|
||||
.x86_64 => .{ .x86_64_win = .{} },
|
||||
.x86 => .{ .x86_stdcall = .{} },
|
||||
.aarch64, .aarch64_be => .{ .aarch64_aapcs_win = .{} },
|
||||
.arm, .armeb, .thumb, .thumbeb => .{ .arm_aapcs_vfp = .{} },
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
pub const kernel: NewCallingConvention = switch (builtin.target.cpu.arch) {
|
||||
.amdgcn => .amdgcn_kernel,
|
||||
.nvptx, .nvptx64 => .nvptx_kernel,
|
||||
.spirv, .spirv32, .spirv64 => .spirv_kernel,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
/// Deprecated; use `.auto`.
|
||||
pub const Unspecified: NewCallingConvention = .auto;
|
||||
/// Deprecated; use `.c`.
|
||||
pub const C: NewCallingConvention = .c;
|
||||
/// Deprecated; use `.naked`.
|
||||
pub const Naked: NewCallingConvention = .naked;
|
||||
/// Deprecated; use `.@"async"`.
|
||||
pub const Async: NewCallingConvention = .@"async";
|
||||
/// Deprecated; use `.@"inline"`.
|
||||
pub const Inline: NewCallingConvention = .@"inline";
|
||||
/// Deprecated; use `.x86_64_interrupt`, `.x86_interrupt`, or `.avr_interrupt`.
|
||||
pub const Interrupt: NewCallingConvention = switch (builtin.target.cpu.arch) {
|
||||
.x86_64 => .{ .x86_64_interrupt = .{} },
|
||||
.x86 => .{ .x86_interrupt = .{} },
|
||||
.avr => .avr_interrupt,
|
||||
else => unreachable,
|
||||
};
|
||||
/// Deprecated; use `.avr_signal`.
|
||||
pub const Signal: NewCallingConvention = .avr_signal;
|
||||
/// Deprecated; use `.x86_stdcall`.
|
||||
pub const Stdcall: NewCallingConvention = .{ .x86_stdcall = .{} };
|
||||
/// Deprecated; use `.x86_fastcall`.
|
||||
pub const Fastcall: NewCallingConvention = .{ .x86_fastcall = .{} };
|
||||
/// Deprecated; use `.x86_64_vectorcall`, `.x86_vectorcall`, or `aarch64_vfabi`.
|
||||
pub const Vectorcall: NewCallingConvention = switch (builtin.target.cpu.arch) {
|
||||
.x86_64 => .{ .x86_64_vectorcall = .{} },
|
||||
.x86 => .{ .x86_vectorcall = .{} },
|
||||
.aarch64, .aarch64_be => .{ .aarch64_vfabi = .{} },
|
||||
else => unreachable,
|
||||
};
|
||||
/// Deprecated; use `.x86_thiscall`.
|
||||
pub const Thiscall: NewCallingConvention = .{ .x86_thiscall = .{} };
|
||||
/// Deprecated; use `.arm_apcs`.
|
||||
pub const APCS: NewCallingConvention = .{ .arm_apcs = .{} };
|
||||
/// Deprecated; use `.arm_aapcs`.
|
||||
pub const AAPCS: NewCallingConvention = .{ .arm_aapcs = .{} };
|
||||
/// Deprecated; use `.arm_aapcs_vfp`.
|
||||
pub const AAPCSVFP: NewCallingConvention = .{ .arm_aapcs_vfp = .{} };
|
||||
/// Deprecated; use `.x86_64_sysv`.
|
||||
pub const SysV: NewCallingConvention = .{ .x86_64_sysv = .{} };
|
||||
/// Deprecated; use `.x86_64_win`.
|
||||
pub const Win64: NewCallingConvention = .{ .x86_64_win = .{} };
|
||||
/// Deprecated; use `.kernel`.
|
||||
pub const Kernel: NewCallingConvention = .kernel;
|
||||
/// Deprecated; use `.spirv_fragment`.
|
||||
pub const Fragment: NewCallingConvention = .spirv_fragment;
|
||||
/// Deprecated; use `.spirv_vertex`.
|
||||
pub const Vertex: NewCallingConvention = .spirv_vertex;
|
||||
|
||||
/// The default Zig calling convention when neither `export` nor `inline` is specified.
|
||||
/// This calling convention makes no guarantees about stack alignment, registers, etc.
|
||||
/// It can only be used within this Zig compilation unit.
|
||||
auto,
|
||||
|
||||
/// The calling convention of a function that can be called with `async` syntax. An `async` call
|
||||
/// of a runtime-known function must target a function with this calling convention.
|
||||
/// Comptime-known functions with other calling conventions may be coerced to this one.
|
||||
@"async",
|
||||
|
||||
/// Functions with this calling convention have no prologue or epilogue, making the function
|
||||
/// uncallable in regular Zig code. This can be useful when integrating with assembly.
|
||||
naked,
|
||||
|
||||
/// This calling convention is exactly equivalent to using the `inline` keyword on a function
|
||||
/// definition. This function will be semantically inlined by the Zig compiler at call sites.
|
||||
/// Pointers to inline functions are comptime-only.
|
||||
@"inline",
|
||||
|
||||
// Calling conventions for the x86_64 architecture.
|
||||
x86_64_sysv: CommonOptions,
|
||||
x86_64_win: CommonOptions,
|
||||
x86_64_regcall_v3_sysv: CommonOptions,
|
||||
x86_64_regcall_v4_win: CommonOptions,
|
||||
x86_64_vectorcall: CommonOptions,
|
||||
x86_64_interrupt: CommonOptions,
|
||||
|
||||
// Calling conventions for the x86 architecture.
|
||||
x86_sysv: X86RegparmOptions,
|
||||
x86_win: X86RegparmOptions,
|
||||
x86_stdcall: X86RegparmOptions,
|
||||
x86_fastcall: CommonOptions,
|
||||
x86_thiscall: CommonOptions,
|
||||
x86_thiscall_mingw: CommonOptions,
|
||||
x86_regcall_v3: CommonOptions,
|
||||
x86_regcall_v4_win: CommonOptions,
|
||||
x86_vectorcall: CommonOptions,
|
||||
x86_interrupt: CommonOptions,
|
||||
|
||||
// Calling conventions for the aarch64 architecture.
|
||||
aarch64_aapcs: CommonOptions,
|
||||
aarch64_aapcs_darwin: CommonOptions,
|
||||
aarch64_aapcs_win: CommonOptions,
|
||||
aarch64_vfabi: CommonOptions,
|
||||
aarch64_vfabi_sve: CommonOptions,
|
||||
|
||||
// Calling convetions for the arm architecture.
|
||||
/// ARM Procedure Call Standard (obsolete)
|
||||
arm_apcs: CommonOptions,
|
||||
/// ARM Architecture Procedure Call Standard
|
||||
arm_aapcs: CommonOptions,
|
||||
/// ARM Architecture Procedure Call Standard Vector Floating-Point
|
||||
arm_aapcs_vfp: CommonOptions,
|
||||
arm_aapcs16_vfp: CommonOptions,
|
||||
arm_interrupt: ArmInterruptOptions,
|
||||
|
||||
// Calling conventions for the mips64 architecture.
|
||||
mips64_n64: CommonOptions,
|
||||
mips64_n32: CommonOptions,
|
||||
mips64_interrupt: MipsInterruptOptions,
|
||||
|
||||
// Calling conventions for the mips architecture.
|
||||
mips_o32: CommonOptions,
|
||||
mips_interrupt: MipsInterruptOptions,
|
||||
|
||||
// Calling conventions for the riscv64 architecture.
|
||||
riscv64_lp64: CommonOptions,
|
||||
riscv64_lp64_v: CommonOptions,
|
||||
riscv64_interrupt: RiscvInterruptOptions,
|
||||
|
||||
// Calling conventions for the riscv32 architecture.
|
||||
riscv32_ilp32: CommonOptions,
|
||||
riscv32_ilp32_v: CommonOptions,
|
||||
riscv32_interrupt: RiscvInterruptOptions,
|
||||
|
||||
// Calling conventions for the sparc64 architecture.
|
||||
sparc64_sysv: CommonOptions,
|
||||
|
||||
// Calling conventions for the sparc architecture.
|
||||
sparc_sysv: CommonOptions,
|
||||
|
||||
// Calling conventions for the powerpc64 architecture.
|
||||
powerpc64_elf: CommonOptions,
|
||||
powerpc64_elf_altivec: CommonOptions,
|
||||
powerpc64_elf_v2: CommonOptions,
|
||||
|
||||
// Calling conventions for the powerpc architecture.
|
||||
powerpc_sysv: CommonOptions,
|
||||
powerpc_sysv_altivec: CommonOptions,
|
||||
powerpc_aix: CommonOptions,
|
||||
powerpc_aix_altivec: CommonOptions,
|
||||
|
||||
/// The standard wasm32/wasm64 calling convention, as specified in the WebAssembly Tool Conventions.
|
||||
wasm_watc: CommonOptions,
|
||||
|
||||
/// The standard ARC calling convention.
|
||||
arc_sysv: CommonOptions,
|
||||
|
||||
// Calling conventions for the AVR architecture.
|
||||
avr_gnu,
|
||||
avr_builtin,
|
||||
avr_signal,
|
||||
avr_interrupt,
|
||||
|
||||
/// The standard bpf calling convention.
|
||||
bpf_std: CommonOptions,
|
||||
|
||||
// Calling conventions for the csky architecture.
|
||||
csky_sysv: CommonOptions,
|
||||
csky_interrupt: CommonOptions,
|
||||
|
||||
// Calling conventions for the hexagon architecture.
|
||||
hexagon_sysv: CommonOptions,
|
||||
hexagon_sysv_hvx: CommonOptions,
|
||||
|
||||
/// The standard Lanai calling convention.
|
||||
lanai_sysv: CommonOptions,
|
||||
|
||||
/// The standard loongarch64 calling convention.
|
||||
loongarch64_lp64: CommonOptions,
|
||||
|
||||
/// The standard loongarch32 calling convention.
|
||||
loongarch32_ilp32: CommonOptions,
|
||||
|
||||
// Calling conventions for the m68k architecture.
|
||||
m68k_sysv: CommonOptions,
|
||||
m68k_gnu: CommonOptions,
|
||||
m68k_rtd: CommonOptions,
|
||||
m68k_interrupt: CommonOptions,
|
||||
|
||||
/// The standard MSP430 calling convention.
|
||||
msp430_eabi: CommonOptions,
|
||||
|
||||
/// The standard propeller1 calling convention.
|
||||
propeller1_sysv: CommonOptions,
|
||||
|
||||
/// The standard propeller1 calling convention.
|
||||
propeller2_sysv: CommonOptions,
|
||||
|
||||
// Calling conventions for the S390X architecture.
|
||||
s390x_sysv: CommonOptions,
|
||||
s390x_sysv_vx: CommonOptions,
|
||||
|
||||
/// The standard VE calling convention.
|
||||
ve_sysv: CommonOptions,
|
||||
|
||||
// Calling conventions for the xCORE architecture.
|
||||
xcore_xs1: CommonOptions,
|
||||
xcore_xs2: CommonOptions,
|
||||
|
||||
// Calling conventions for the Xtensa architecture.
|
||||
xtensa_call0: CommonOptions,
|
||||
xtensa_windowed: CommonOptions,
|
||||
|
||||
// Calling conventions for the AMDGCN architecture.
|
||||
amdgcn_device: CommonOptions,
|
||||
amdgcn_kernel,
|
||||
amdgcn_cs: CommonOptions,
|
||||
|
||||
// Calling conventions for the NVPTX architecture.
|
||||
nvptx_device,
|
||||
nvptx_kernel,
|
||||
|
||||
// Calling conventions for SPIR-V kernels and shaders.
|
||||
spirv_device,
|
||||
spirv_kernel,
|
||||
spirv_fragment,
|
||||
spirv_vertex,
|
||||
|
||||
/// Options shared across most calling conventions.
|
||||
pub const CommonOptions = struct {
|
||||
/// The boundary the stack is aligned to when the function is called.
|
||||
/// `null` means the default for this calling convention.
|
||||
incoming_stack_alignment: ?u64 = null,
|
||||
};
|
||||
|
||||
/// Options for x86 calling conventions which support the regparm attribute to pass some
|
||||
/// arguments in registers.
|
||||
pub const X86RegparmOptions = struct {
|
||||
/// The boundary the stack is aligned to when the function is called.
|
||||
/// `null` means the default for this calling convention.
|
||||
incoming_stack_alignment: ?u64 = null,
|
||||
/// The number of arguments to pass in registers before passing the remaining arguments
|
||||
/// according to the calling convention.
|
||||
/// Equivalent to `__attribute__((regparm(x)))` in Clang and GCC.
|
||||
register_params: u2 = 0,
|
||||
};
|
||||
|
||||
/// Options for the `arm_interrupt` calling convention.
|
||||
pub const ArmInterruptOptions = struct {
|
||||
/// The boundary the stack is aligned to when the function is called.
|
||||
/// `null` means the default for this calling convention.
|
||||
incoming_stack_alignment: ?u64 = null,
|
||||
/// The kind of interrupt being received.
|
||||
type: InterruptType = .generic,
|
||||
|
||||
pub const InterruptType = enum(u3) {
|
||||
generic,
|
||||
irq,
|
||||
fiq,
|
||||
swi,
|
||||
abort,
|
||||
undef,
|
||||
};
|
||||
};
|
||||
|
||||
/// Options for the `mips_interrupt` and `mips64_interrupt` calling conventions.
|
||||
pub const MipsInterruptOptions = struct {
|
||||
/// The boundary the stack is aligned to when the function is called.
|
||||
/// `null` means the default for this calling convention.
|
||||
incoming_stack_alignment: ?u64 = null,
|
||||
/// The interrupt mode.
|
||||
mode: InterruptMode = .eic,
|
||||
|
||||
pub const InterruptMode = enum(u4) {
|
||||
eic,
|
||||
sw0,
|
||||
sw1,
|
||||
hw0,
|
||||
hw1,
|
||||
hw2,
|
||||
hw3,
|
||||
hw4,
|
||||
hw5,
|
||||
};
|
||||
};
|
||||
|
||||
/// Options for the `riscv32_interrupt` and `riscv64_interrupt` calling conventions.
|
||||
pub const RiscvInterruptOptions = struct {
|
||||
/// The boundary the stack is aligned to when the function is called.
|
||||
/// `null` means the default for this calling convention.
|
||||
incoming_stack_alignment: ?u64 = null,
|
||||
/// The privilege level.
|
||||
level: PrivilegeLevel = .machine,
|
||||
|
||||
pub const PrivilegeLevel = enum(u2) {
|
||||
user,
|
||||
supervisor,
|
||||
machine,
|
||||
};
|
||||
};
|
||||
|
||||
/// Returns the array of `std.Target.Cpu.Arch` to which this `CallingConvention` applies.
|
||||
/// Asserts that `cc` is not `.auto`, `.@"async"`, `.naked`, or `.@"inline"`.
|
||||
pub const archs = std.Target.Cpu.Arch.fromCallconv;
|
||||
|
||||
pub fn eql(a: NewCallingConvention, b: NewCallingConvention) bool {
|
||||
return std.meta.eql(a, b);
|
||||
}
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const AddressSpace = enum(u5) {
|
||||
|
||||
@ -1988,7 +1988,7 @@ pub const Key = union(enum) {
|
||||
/// Tells whether a parameter is noalias. See `paramIsNoalias` helper
|
||||
/// method for accessing this.
|
||||
noalias_bits: u32,
|
||||
cc: std.builtin.CallingConvention,
|
||||
cc: std.builtin.NewCallingConvention,
|
||||
is_var_args: bool,
|
||||
is_generic: bool,
|
||||
is_noinline: bool,
|
||||
@ -2011,10 +2011,10 @@ pub const Key = union(enum) {
|
||||
a.return_type == b.return_type and
|
||||
a.comptime_bits == b.comptime_bits and
|
||||
a.noalias_bits == b.noalias_bits and
|
||||
a.cc == b.cc and
|
||||
a.is_var_args == b.is_var_args and
|
||||
a.is_generic == b.is_generic and
|
||||
a.is_noinline == b.is_noinline;
|
||||
a.is_noinline == b.is_noinline and
|
||||
std.meta.eql(a.cc, b.cc);
|
||||
}
|
||||
|
||||
pub fn hash(self: FuncType, hasher: *Hash, ip: *const InternPool) void {
|
||||
@ -5444,7 +5444,7 @@ pub const Tag = enum(u8) {
|
||||
flags: Flags,
|
||||
|
||||
pub const Flags = packed struct(u32) {
|
||||
cc: std.builtin.CallingConvention,
|
||||
cc: PackedCallingConvention,
|
||||
is_var_args: bool,
|
||||
is_generic: bool,
|
||||
has_comptime_bits: bool,
|
||||
@ -5453,7 +5453,7 @@ pub const Tag = enum(u8) {
|
||||
cc_is_generic: bool,
|
||||
section_is_generic: bool,
|
||||
addrspace_is_generic: bool,
|
||||
_: u16 = 0,
|
||||
_: u6 = 0,
|
||||
};
|
||||
};
|
||||
|
||||
@ -6912,7 +6912,7 @@ fn extraFuncType(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Ke
|
||||
.return_type = type_function.data.return_type,
|
||||
.comptime_bits = comptime_bits,
|
||||
.noalias_bits = noalias_bits,
|
||||
.cc = type_function.data.flags.cc,
|
||||
.cc = type_function.data.flags.cc.unpack(),
|
||||
.is_var_args = type_function.data.flags.is_var_args,
|
||||
.is_noinline = type_function.data.flags.is_noinline,
|
||||
.cc_is_generic = type_function.data.flags.cc_is_generic,
|
||||
@ -8526,7 +8526,7 @@ pub const GetFuncTypeKey = struct {
|
||||
comptime_bits: u32 = 0,
|
||||
noalias_bits: u32 = 0,
|
||||
/// `null` means generic.
|
||||
cc: ?std.builtin.CallingConvention = .Unspecified,
|
||||
cc: ?std.builtin.NewCallingConvention = .auto,
|
||||
is_var_args: bool = false,
|
||||
is_generic: bool = false,
|
||||
is_noinline: bool = false,
|
||||
@ -8564,7 +8564,7 @@ pub fn getFuncType(
|
||||
.params_len = params_len,
|
||||
.return_type = key.return_type,
|
||||
.flags = .{
|
||||
.cc = key.cc orelse .Unspecified,
|
||||
.cc = .pack(key.cc orelse .auto),
|
||||
.is_var_args = key.is_var_args,
|
||||
.has_comptime_bits = key.comptime_bits != 0,
|
||||
.has_noalias_bits = key.noalias_bits != 0,
|
||||
@ -8668,7 +8668,7 @@ pub const GetFuncDeclKey = struct {
|
||||
rbrace_line: u32,
|
||||
lbrace_column: u32,
|
||||
rbrace_column: u32,
|
||||
cc: ?std.builtin.CallingConvention,
|
||||
cc: ?std.builtin.NewCallingConvention,
|
||||
is_noinline: bool,
|
||||
};
|
||||
|
||||
@ -8733,7 +8733,7 @@ pub const GetFuncDeclIesKey = struct {
|
||||
comptime_bits: u32,
|
||||
bare_return_type: Index,
|
||||
/// null means generic.
|
||||
cc: ?std.builtin.CallingConvention,
|
||||
cc: ?std.builtin.NewCallingConvention,
|
||||
/// null means generic.
|
||||
alignment: ?Alignment,
|
||||
section_is_generic: bool,
|
||||
@ -8818,7 +8818,7 @@ pub fn getFuncDeclIes(
|
||||
.params_len = params_len,
|
||||
.return_type = error_union_type,
|
||||
.flags = .{
|
||||
.cc = key.cc orelse .Unspecified,
|
||||
.cc = .pack(key.cc orelse .auto),
|
||||
.is_var_args = key.is_var_args,
|
||||
.has_comptime_bits = key.comptime_bits != 0,
|
||||
.has_noalias_bits = key.noalias_bits != 0,
|
||||
@ -8948,7 +8948,7 @@ pub const GetFuncInstanceKey = struct {
|
||||
comptime_args: []const Index,
|
||||
noalias_bits: u32,
|
||||
bare_return_type: Index,
|
||||
cc: std.builtin.CallingConvention,
|
||||
cc: std.builtin.NewCallingConvention,
|
||||
alignment: Alignment,
|
||||
section: OptionalNullTerminatedString,
|
||||
is_noinline: bool,
|
||||
@ -9110,7 +9110,7 @@ pub fn getFuncInstanceIes(
|
||||
.params_len = params_len,
|
||||
.return_type = error_union_type,
|
||||
.flags = .{
|
||||
.cc = arg.cc,
|
||||
.cc = .pack(arg.cc),
|
||||
.is_var_args = false,
|
||||
.has_comptime_bits = false,
|
||||
.has_noalias_bits = arg.noalias_bits != 0,
|
||||
@ -12224,3 +12224,82 @@ pub fn getErrorValue(
|
||||
pub fn getErrorValueIfExists(ip: *const InternPool, name: NullTerminatedString) ?Zcu.ErrorInt {
|
||||
return @intFromEnum(ip.global_error_set.getErrorValueIfExists(name) orelse return null);
|
||||
}
|
||||
|
||||
const PackedCallingConvention = packed struct(u18) {
|
||||
tag: std.builtin.NewCallingConvention.Tag,
|
||||
/// May be ignored depending on `tag`.
|
||||
incoming_stack_alignment: Alignment,
|
||||
/// Interpretation depends on `tag`.
|
||||
extra: u4,
|
||||
|
||||
fn pack(cc: std.builtin.NewCallingConvention) PackedCallingConvention {
|
||||
return switch (cc) {
|
||||
inline else => |pl, tag| switch (@TypeOf(pl)) {
|
||||
void => .{
|
||||
.tag = tag,
|
||||
.incoming_stack_alignment = .none, // unused
|
||||
.extra = 0, // unused
|
||||
},
|
||||
std.builtin.NewCallingConvention.CommonOptions => .{
|
||||
.tag = tag,
|
||||
.incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
|
||||
.extra = 0, // unused
|
||||
},
|
||||
std.builtin.NewCallingConvention.X86RegparmOptions => .{
|
||||
.tag = tag,
|
||||
.incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
|
||||
.extra = pl.register_params,
|
||||
},
|
||||
std.builtin.NewCallingConvention.ArmInterruptOptions => .{
|
||||
.tag = tag,
|
||||
.incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
|
||||
.extra = @intFromEnum(pl.type),
|
||||
},
|
||||
std.builtin.NewCallingConvention.MipsInterruptOptions => .{
|
||||
.tag = tag,
|
||||
.incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
|
||||
.extra = @intFromEnum(pl.mode),
|
||||
},
|
||||
std.builtin.NewCallingConvention.RiscvInterruptOptions => .{
|
||||
.tag = tag,
|
||||
.incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
|
||||
.extra = @intFromEnum(pl.level),
|
||||
},
|
||||
else => comptime unreachable,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn unpack(cc: PackedCallingConvention) std.builtin.NewCallingConvention {
|
||||
@setEvalBranchQuota(400_000);
|
||||
return switch (cc.tag) {
|
||||
inline else => |tag| @unionInit(
|
||||
std.builtin.NewCallingConvention,
|
||||
@tagName(tag),
|
||||
switch (std.meta.FieldType(std.builtin.NewCallingConvention, tag)) {
|
||||
void => {},
|
||||
std.builtin.NewCallingConvention.CommonOptions => .{
|
||||
.incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
|
||||
},
|
||||
std.builtin.NewCallingConvention.X86RegparmOptions => .{
|
||||
.incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
|
||||
.register_params = @intCast(cc.extra),
|
||||
},
|
||||
std.builtin.NewCallingConvention.ArmInterruptOptions => .{
|
||||
.incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
|
||||
.type = @enumFromInt(cc.extra),
|
||||
},
|
||||
std.builtin.NewCallingConvention.MipsInterruptOptions => .{
|
||||
.incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
|
||||
.mode = @enumFromInt(cc.extra),
|
||||
},
|
||||
std.builtin.NewCallingConvention.RiscvInterruptOptions => .{
|
||||
.incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
|
||||
.level = @enumFromInt(cc.extra),
|
||||
},
|
||||
else => comptime unreachable,
|
||||
},
|
||||
),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
336
src/Sema.zig
336
src/Sema.zig
@ -26,7 +26,7 @@ owner: AnalUnit,
|
||||
/// in the case of an inline or comptime function call.
|
||||
/// This could be `none`, a `func_decl`, or a `func_instance`.
|
||||
func_index: InternPool.Index,
|
||||
/// Whether the type of func_index has a calling convention of `.Naked`.
|
||||
/// Whether the type of func_index has a calling convention of `.naked`.
|
||||
func_is_naked: bool,
|
||||
/// Used to restore the error return trace when returning a non-error from a function.
|
||||
error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none,
|
||||
@ -1355,7 +1355,7 @@ fn analyzeBodyInner(
|
||||
},
|
||||
.value_placeholder => unreachable, // never appears in a body
|
||||
.field_parent_ptr => try sema.zirFieldParentPtr(block, extended),
|
||||
.builtin_value => try sema.zirBuiltinValue(extended),
|
||||
.builtin_value => try sema.zirBuiltinValue(block, extended),
|
||||
.inplace_arith_result_ty => try sema.zirInplaceArithResultTy(extended),
|
||||
};
|
||||
},
|
||||
@ -2698,6 +2698,20 @@ fn analyzeAsInt(
|
||||
return try val.toUnsignedIntSema(sema.pt);
|
||||
}
|
||||
|
||||
fn analyzeValueAsCallconv(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
src: LazySrcLoc,
|
||||
unresolved_val: Value,
|
||||
) !std.builtin.NewCallingConvention {
|
||||
const resolved_val = try sema.resolveLazyValue(unresolved_val);
|
||||
return resolved_val.interpret(std.builtin.NewCallingConvention, sema.pt) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
error.UndefinedValue => return sema.failWithUseOfUndef(block, src),
|
||||
error.TypeMismatch => @panic("std.builtin is corrupt"),
|
||||
};
|
||||
}
|
||||
|
||||
/// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`,
|
||||
/// resolves this into a list of `InternPool.CaptureValue` allocated by `arena`.
|
||||
fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: usize, captures_len: u32) ![]InternPool.CaptureValue {
|
||||
@ -6516,8 +6530,8 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
|
||||
}
|
||||
|
||||
switch (Value.fromInterned(func).typeOf(zcu).fnCallingConvention(zcu)) {
|
||||
.Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}),
|
||||
.Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}),
|
||||
.naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}),
|
||||
.@"inline" => return sema.fail(block, src, "@setAlignStack in inline function", .{}),
|
||||
else => {},
|
||||
}
|
||||
|
||||
@ -7554,7 +7568,7 @@ fn analyzeCall(
|
||||
if (try sema.resolveValue(func)) |func_val|
|
||||
if (func_val.isUndef(zcu))
|
||||
return sema.failWithUseOfUndef(block, call_src);
|
||||
if (cc == .Naked) {
|
||||
if (cc == .naked) {
|
||||
const maybe_func_inst = try sema.funcDeclSrcInst(func);
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(
|
||||
@ -7587,7 +7601,7 @@ fn analyzeCall(
|
||||
.async_kw => return sema.failWithUseOfAsync(block, call_src),
|
||||
};
|
||||
|
||||
if (modifier == .never_inline and func_ty_info.cc == .Inline) {
|
||||
if (modifier == .never_inline and func_ty_info.cc == .@"inline") {
|
||||
return sema.fail(block, call_src, "'never_inline' call of inline function", .{});
|
||||
}
|
||||
if (modifier == .always_inline and func_ty_info.is_noinline) {
|
||||
@ -7598,7 +7612,7 @@ fn analyzeCall(
|
||||
|
||||
const is_generic_call = func_ty_info.is_generic;
|
||||
var is_comptime_call = block.is_comptime or modifier == .compile_time;
|
||||
var is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .Inline;
|
||||
var is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .@"inline";
|
||||
var comptime_reason: ?*const Block.ComptimeReason = null;
|
||||
if (!is_inline_call and !is_comptime_call) {
|
||||
if (try Type.fromInterned(func_ty_info.return_type).comptimeOnlySema(pt)) {
|
||||
@ -8455,7 +8469,7 @@ fn instantiateGenericCall(
|
||||
}
|
||||
// Similarly, if the call evaluated to a generic type we need to instead
|
||||
// call it inline.
|
||||
if (func_ty_info.is_generic or func_ty_info.cc == .Inline) {
|
||||
if (func_ty_info.is_generic or func_ty_info.cc == .@"inline") {
|
||||
return error.GenericPoison;
|
||||
}
|
||||
|
||||
@ -9505,8 +9519,8 @@ fn zirFunc(
|
||||
|
||||
// If this instruction has a body, then it's a function declaration, and we decide
|
||||
// the callconv based on whether it is exported. Otherwise, the callconv defaults
|
||||
// to `.Unspecified`.
|
||||
const cc: std.builtin.CallingConvention = if (has_body) cc: {
|
||||
// to `.auto`.
|
||||
const cc: std.builtin.NewCallingConvention = if (has_body) cc: {
|
||||
const func_decl_cau = if (sema.generic_owner != .none) cau: {
|
||||
const generic_owner_fn = zcu.funcInfo(sema.generic_owner);
|
||||
// The generic owner definitely has a `Cau` for the corresponding function declaration.
|
||||
@ -9518,8 +9532,26 @@ fn zirFunc(
|
||||
const zir_decl = sema.code.getDeclaration(decl_inst)[0];
|
||||
break :exported zir_decl.flags.is_export;
|
||||
};
|
||||
break :cc if (fn_is_exported) .C else .Unspecified;
|
||||
} else .Unspecified;
|
||||
if (fn_is_exported) {
|
||||
break :cc target.defaultCCallingConvention() orelse {
|
||||
// This target has no default C calling convention. We sometimes trigger a similar
|
||||
// error by trying to evaluate `std.builtin.CallingConvention.c`, so for consistency,
|
||||
// let's eval that now and just get the transitive error. (It's guaranteed to error
|
||||
// because it does the exact `defaultCCallingConvention` call we just did.)
|
||||
const cc_type = try sema.getBuiltinType("CallingConvention");
|
||||
_ = try sema.namespaceLookupVal(
|
||||
block,
|
||||
LazySrcLoc.unneeded,
|
||||
cc_type.getNamespaceIndex(zcu),
|
||||
try ip.getOrPutString(sema.gpa, pt.tid, "c", .no_embedded_nulls),
|
||||
);
|
||||
// The above should have errored.
|
||||
@panic("std.builtin is corrupt");
|
||||
};
|
||||
} else {
|
||||
break :cc .auto;
|
||||
}
|
||||
} else .auto;
|
||||
|
||||
return sema.funcCommon(
|
||||
block,
|
||||
@ -9654,15 +9686,64 @@ fn handleExternLibName(
|
||||
/// These are calling conventions that are confirmed to work with variadic functions.
|
||||
/// Any calling conventions not included here are either not yet verified to work with variadic
|
||||
/// functions or there are no more other calling conventions that support variadic functions.
|
||||
const calling_conventions_supporting_var_args = [_]std.builtin.CallingConvention{
|
||||
.C,
|
||||
const calling_conventions_supporting_var_args = [_]std.builtin.NewCallingConvention.Tag{
|
||||
.x86_64_sysv,
|
||||
.x86_64_win,
|
||||
.x86_sysv,
|
||||
.x86_win,
|
||||
.aarch64_aapcs,
|
||||
.aarch64_aapcs_darwin,
|
||||
.aarch64_aapcs_win,
|
||||
.aarch64_vfabi,
|
||||
.aarch64_vfabi_sve,
|
||||
.arm_apcs,
|
||||
.arm_aapcs,
|
||||
.arm_aapcs_vfp,
|
||||
.arm_aapcs16_vfp,
|
||||
.mips64_n64,
|
||||
.mips64_n32,
|
||||
.mips_o32,
|
||||
.riscv64_lp64,
|
||||
.riscv64_lp64_v,
|
||||
.riscv32_ilp32,
|
||||
.riscv32_ilp32_v,
|
||||
.sparc64_sysv,
|
||||
.sparc_sysv,
|
||||
.powerpc64_elf,
|
||||
.powerpc64_elf_altivec,
|
||||
.powerpc64_elf_v2,
|
||||
.powerpc_sysv,
|
||||
.powerpc_sysv_altivec,
|
||||
.powerpc_aix,
|
||||
.powerpc_aix_altivec,
|
||||
.wasm_watc,
|
||||
.arc_sysv,
|
||||
.avr_gnu,
|
||||
.bpf_std,
|
||||
.csky_sysv,
|
||||
.hexagon_sysv,
|
||||
.hexagon_sysv_hvx,
|
||||
.lanai_sysv,
|
||||
.loongarch64_lp64,
|
||||
.loongarch32_ilp32,
|
||||
.m68k_sysv,
|
||||
.m68k_gnu,
|
||||
.m68k_rtd,
|
||||
.msp430_eabi,
|
||||
.s390x_sysv,
|
||||
.s390x_sysv_vx,
|
||||
.ve_sysv,
|
||||
.xcore_xs1,
|
||||
.xcore_xs2,
|
||||
.xtensa_call0,
|
||||
.xtensa_windowed,
|
||||
};
|
||||
fn callConvSupportsVarArgs(cc: std.builtin.CallingConvention) bool {
|
||||
fn callConvSupportsVarArgs(cc: std.builtin.NewCallingConvention.Tag) bool {
|
||||
return for (calling_conventions_supporting_var_args) |supported_cc| {
|
||||
if (cc == supported_cc) return true;
|
||||
} else false;
|
||||
}
|
||||
fn checkCallConvSupportsVarArgs(sema: *Sema, block: *Block, src: LazySrcLoc, cc: std.builtin.CallingConvention) CompileError!void {
|
||||
fn checkCallConvSupportsVarArgs(sema: *Sema, block: *Block, src: LazySrcLoc, cc: std.builtin.NewCallingConvention.Tag) CompileError!void {
|
||||
const CallingConventionsSupportingVarArgsList = struct {
|
||||
pub fn format(_: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = fmt;
|
||||
@ -9703,7 +9784,7 @@ fn funcCommon(
|
||||
address_space: ?std.builtin.AddressSpace,
|
||||
section: Section,
|
||||
/// null means generic poison
|
||||
cc: ?std.builtin.CallingConvention,
|
||||
cc: ?std.builtin.NewCallingConvention,
|
||||
/// this might be Type.generic_poison
|
||||
bare_return_type: Type,
|
||||
var_args: bool,
|
||||
@ -9743,7 +9824,7 @@ fn funcCommon(
|
||||
// default values which are only meaningful for the generic function, *not*
|
||||
// the instantiation, which can depend on comptime parameters.
|
||||
// Related proposal: https://github.com/ziglang/zig/issues/11834
|
||||
const cc_resolved = cc orelse .Unspecified;
|
||||
const cc_resolved = cc orelse .auto;
|
||||
var comptime_bits: u32 = 0;
|
||||
for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| {
|
||||
const param_ty = Type.fromInterned(param_ty_ip);
|
||||
@ -9761,10 +9842,10 @@ fn funcCommon(
|
||||
}
|
||||
const this_generic = param_ty.isGenericPoison();
|
||||
is_generic = is_generic or this_generic;
|
||||
if (param_is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) {
|
||||
if (param_is_comptime and !target_util.fnCallConvAllowsZigTypes(cc_resolved)) {
|
||||
return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
|
||||
}
|
||||
if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) {
|
||||
if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(cc_resolved)) {
|
||||
return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
|
||||
}
|
||||
if (!param_ty.isValidParamType(zcu)) {
|
||||
@ -9773,7 +9854,7 @@ fn funcCommon(
|
||||
opaque_str, param_ty.fmt(pt),
|
||||
});
|
||||
}
|
||||
if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(param_ty, .param_ty)) {
|
||||
if (!this_generic and !target_util.fnCallConvAllowsZigTypes(cc_resolved) and !try sema.validateExternType(param_ty, .param_ty)) {
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{
|
||||
param_ty.fmt(pt), @tagName(cc_resolved),
|
||||
@ -9807,15 +9888,24 @@ fn funcCommon(
|
||||
return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
|
||||
}
|
||||
switch (cc_resolved) {
|
||||
.Interrupt => if (target.cpu.arch.isX86()) {
|
||||
.x86_64_interrupt, .x86_interrupt => {
|
||||
const err_code_size = target.ptrBitWidth();
|
||||
switch (i) {
|
||||
0 => if (param_ty.zigTypeTag(zcu) != .pointer) return sema.fail(block, param_src, "first parameter of function with 'Interrupt' calling convention must be a pointer type", .{}),
|
||||
1 => if (param_ty.bitSize(zcu) != err_code_size) return sema.fail(block, param_src, "second parameter of function with 'Interrupt' calling convention must be a {d}-bit integer", .{err_code_size}),
|
||||
else => return sema.fail(block, param_src, "'Interrupt' calling convention supports up to 2 parameters, found {d}", .{i + 1}),
|
||||
}
|
||||
} else return sema.fail(block, param_src, "parameters are not allowed with 'Interrupt' calling convention", .{}),
|
||||
.Signal => return sema.fail(block, param_src, "parameters are not allowed with 'Signal' calling convention", .{}),
|
||||
},
|
||||
.arm_interrupt,
|
||||
.mips64_interrupt,
|
||||
.mips_interrupt,
|
||||
.riscv64_interrupt,
|
||||
.riscv32_interrupt,
|
||||
.avr_interrupt,
|
||||
.csky_interrupt,
|
||||
.m68k_interrupt,
|
||||
=> return sema.fail(block, param_src, "parameters are not allowed with 'Interrupt' calling convention", .{}),
|
||||
.avr_signal => return sema.fail(block, param_src, "parameters are not allowed with 'Signal' calling convention", .{}),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
@ -10051,7 +10141,7 @@ fn finishFunc(
|
||||
ret_poison: bool,
|
||||
bare_return_type: Type,
|
||||
ret_ty_src: LazySrcLoc,
|
||||
cc_resolved: std.builtin.CallingConvention,
|
||||
cc_resolved: std.builtin.NewCallingConvention,
|
||||
is_source_decl: bool,
|
||||
ret_ty_requires_comptime: bool,
|
||||
func_inst: Zir.Inst.Index,
|
||||
@ -10064,7 +10154,6 @@ fn finishFunc(
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const gpa = sema.gpa;
|
||||
const target = zcu.getTarget();
|
||||
|
||||
const return_type: Type = if (opt_func_index == .none or ret_poison)
|
||||
bare_return_type
|
||||
@ -10077,7 +10166,7 @@ fn finishFunc(
|
||||
opaque_str, return_type.fmt(pt),
|
||||
});
|
||||
}
|
||||
if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and
|
||||
if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(cc_resolved) and
|
||||
!try sema.validateExternType(return_type, .ret_ty))
|
||||
{
|
||||
const msg = msg: {
|
||||
@ -10134,56 +10223,50 @@ fn finishFunc(
|
||||
}
|
||||
|
||||
switch (cc_resolved) {
|
||||
.Interrupt, .Signal => if (return_type.zigTypeTag(zcu) != .void and return_type.zigTypeTag(zcu) != .noreturn) {
|
||||
.x86_64_interrupt,
|
||||
.x86_interrupt,
|
||||
.arm_interrupt,
|
||||
.mips64_interrupt,
|
||||
.mips_interrupt,
|
||||
.riscv64_interrupt,
|
||||
.riscv32_interrupt,
|
||||
.avr_interrupt,
|
||||
.csky_interrupt,
|
||||
.m68k_interrupt,
|
||||
.avr_signal,
|
||||
=> if (return_type.zigTypeTag(zcu) != .void and return_type.zigTypeTag(zcu) != .noreturn) {
|
||||
return sema.fail(block, ret_ty_src, "function with calling convention '{s}' must return 'void' or 'noreturn'", .{@tagName(cc_resolved)});
|
||||
},
|
||||
.Inline => if (is_noinline) {
|
||||
return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{});
|
||||
.@"inline" => if (is_noinline) {
|
||||
return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'inline'", .{});
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
const arch = target.cpu.arch;
|
||||
if (@as(?[]const u8, switch (cc_resolved) {
|
||||
.Unspecified, .C, .Naked, .Async, .Inline => null,
|
||||
.Interrupt => switch (arch) {
|
||||
.x86, .x86_64, .avr, .msp430 => null,
|
||||
else => "x86, x86_64, AVR, and MSP430",
|
||||
switch (zcu.callconvSupported(cc_resolved)) {
|
||||
.ok => {},
|
||||
.bad_arch => |allowed_archs| {
|
||||
const ArchListFormatter = struct {
|
||||
archs: []const std.Target.Cpu.Arch,
|
||||
pub fn format(formatter: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
for (formatter.archs, 0..) |arch, i| {
|
||||
if (i != 0)
|
||||
try writer.writeAll(", ");
|
||||
try writer.print("'.{s}'", .{@tagName(arch)});
|
||||
}
|
||||
}
|
||||
};
|
||||
return sema.fail(block, cc_src, "callconv '{s}' only available on architectures {}", .{
|
||||
@tagName(cc_resolved),
|
||||
ArchListFormatter{ .archs = allowed_archs },
|
||||
});
|
||||
},
|
||||
.Signal => switch (arch) {
|
||||
.avr => null,
|
||||
else => "AVR",
|
||||
},
|
||||
.Stdcall, .Fastcall, .Thiscall => switch (arch) {
|
||||
.x86 => null,
|
||||
else => "x86",
|
||||
},
|
||||
.Vectorcall => switch (arch) {
|
||||
.x86, .aarch64, .aarch64_be => null,
|
||||
else => "x86 and AArch64",
|
||||
},
|
||||
.APCS, .AAPCS, .AAPCSVFP => switch (arch) {
|
||||
.arm, .armeb, .aarch64, .aarch64_be, .thumb, .thumbeb => null,
|
||||
else => "ARM",
|
||||
},
|
||||
.SysV, .Win64 => switch (arch) {
|
||||
.x86_64 => null,
|
||||
else => "x86_64",
|
||||
},
|
||||
.Kernel => switch (arch) {
|
||||
.nvptx, .nvptx64, .amdgcn, .spirv, .spirv32, .spirv64 => null,
|
||||
else => "nvptx, amdgcn and SPIR-V",
|
||||
},
|
||||
.Fragment, .Vertex => switch (arch) {
|
||||
.spirv, .spirv32, .spirv64 => null,
|
||||
else => "SPIR-V",
|
||||
},
|
||||
})) |allowed_platform| {
|
||||
return sema.fail(block, cc_src, "callconv '{s}' is only available on {s}, not {s}", .{
|
||||
.bad_backend => |bad_backend| return sema.fail(block, cc_src, "callconv '{s}' not supported by compiler backend '{s}'", .{
|
||||
@tagName(cc_resolved),
|
||||
allowed_platform,
|
||||
@tagName(arch),
|
||||
});
|
||||
@tagName(bad_backend),
|
||||
}),
|
||||
}
|
||||
|
||||
if (is_generic and sema.no_partial_func_ty) return error.GenericPoison;
|
||||
@ -18342,10 +18425,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
} });
|
||||
|
||||
const callconv_ty = try sema.getBuiltinType("CallingConvention");
|
||||
const callconv_val = Value.uninterpret(func_ty_info.cc, callconv_ty, pt) catch |err| switch (err) {
|
||||
error.TypeMismatch => @panic("std.builtin is corrupt"),
|
||||
error.OutOfMemory => |e| return e,
|
||||
};
|
||||
|
||||
const field_values = .{
|
||||
const field_values: [5]InternPool.Index = .{
|
||||
// calling_convention: CallingConvention,
|
||||
(try pt.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(),
|
||||
callconv_val.toIntern(),
|
||||
// is_generic: bool,
|
||||
Value.makeBool(func_ty_info.is_generic).toIntern(),
|
||||
// is_var_args: bool,
|
||||
@ -22171,7 +22258,7 @@ fn zirReify(
|
||||
}
|
||||
|
||||
const is_var_args = is_var_args_val.toBool();
|
||||
const cc = zcu.toEnum(std.builtin.CallingConvention, calling_convention_val);
|
||||
const cc = try sema.analyzeValueAsCallconv(block, src, calling_convention_val);
|
||||
if (is_var_args) {
|
||||
try sema.checkCallConvSupportsVarArgs(block, src, cc);
|
||||
}
|
||||
@ -26657,7 +26744,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
break :blk .{ .explicit = section_name };
|
||||
} else .default;
|
||||
|
||||
const cc: ?std.builtin.CallingConvention = if (extra.data.bits.has_cc_body) blk: {
|
||||
const cc: ?std.builtin.NewCallingConvention = if (extra.data.bits.has_cc_body) blk: {
|
||||
const body_len = sema.code.extra[extra_index];
|
||||
extra_index += 1;
|
||||
const body = sema.code.bodySlice(extra_index, body_len);
|
||||
@ -26670,7 +26757,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
if (val.isGenericPoison()) {
|
||||
break :blk null;
|
||||
}
|
||||
break :blk zcu.toEnum(std.builtin.CallingConvention, val);
|
||||
break :blk try sema.analyzeValueAsCallconv(block, cc_src, val);
|
||||
} else if (extra.data.bits.has_cc_ref) blk: {
|
||||
const cc_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
@ -26689,7 +26776,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
error.GenericPoison => break :blk null,
|
||||
else => |e| return e,
|
||||
};
|
||||
break :blk zcu.toEnum(std.builtin.CallingConvention, cc_val);
|
||||
break :blk try sema.analyzeValueAsCallconv(block, cc_src, cc_val);
|
||||
} else cc: {
|
||||
if (has_body) {
|
||||
const decl_inst = if (sema.generic_owner != .none) decl_inst: {
|
||||
@ -26705,7 +26792,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
break :cc .C;
|
||||
}
|
||||
}
|
||||
break :cc .Unspecified;
|
||||
break :cc .auto;
|
||||
};
|
||||
|
||||
const ret_ty: Type = if (extra.data.bits.has_ret_ty_body) blk: {
|
||||
@ -27132,9 +27219,15 @@ fn zirInComptime(
|
||||
return if (block.is_comptime) .bool_true else .bool_false;
|
||||
}
|
||||
|
||||
fn zirBuiltinValue(sema: *Sema, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
||||
fn zirBuiltinValue(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const src = block.nodeOffset(@bitCast(extended.operand));
|
||||
const value: Zir.Inst.BuiltinValue = @enumFromInt(extended.small);
|
||||
|
||||
const type_name = switch (value) {
|
||||
.atomic_order => "AtomicOrder",
|
||||
.atomic_rmw_op => "AtomicRmwOp",
|
||||
@ -27152,21 +27245,25 @@ fn zirBuiltinValue(sema: *Sema, extended: Zir.Inst.Extended.InstData) CompileErr
|
||||
// Values are handled here.
|
||||
.calling_convention_c => {
|
||||
const callconv_ty = try sema.getBuiltinType("CallingConvention");
|
||||
comptime assert(@intFromEnum(std.builtin.CallingConvention.C) == 1);
|
||||
const val = try pt.intern(.{ .enum_tag = .{
|
||||
.ty = callconv_ty.toIntern(),
|
||||
.int = .one_u8,
|
||||
} });
|
||||
return Air.internedToRef(val);
|
||||
return try sema.namespaceLookupVal(
|
||||
block,
|
||||
src,
|
||||
callconv_ty.getNamespaceIndex(zcu),
|
||||
try ip.getOrPutString(gpa, pt.tid, "c", .no_embedded_nulls),
|
||||
) orelse @panic("std.builtin is corrupt");
|
||||
},
|
||||
.calling_convention_inline => {
|
||||
comptime assert(@typeInfo(std.builtin.NewCallingConvention.Tag).@"enum".tag_type == u8);
|
||||
const callconv_ty = try sema.getBuiltinType("CallingConvention");
|
||||
comptime assert(@intFromEnum(std.builtin.CallingConvention.Inline) == 4);
|
||||
const val = try pt.intern(.{ .enum_tag = .{
|
||||
.ty = callconv_ty.toIntern(),
|
||||
.int = .four_u8,
|
||||
} });
|
||||
return Air.internedToRef(val);
|
||||
const callconv_tag_ty = callconv_ty.unionTagType(zcu) orelse @panic("std.builtin is corrupt");
|
||||
const inline_tag_val = try pt.enumValue(
|
||||
callconv_tag_ty,
|
||||
(try pt.intValue(
|
||||
Type.u8,
|
||||
@intFromEnum(std.builtin.NewCallingConvention.@"inline"),
|
||||
)).toIntern(),
|
||||
);
|
||||
return sema.coerce(block, callconv_ty, Air.internedToRef(inline_tag_val.toIntern()), src);
|
||||
},
|
||||
};
|
||||
const ty = try sema.getBuiltinType(type_name);
|
||||
@ -27353,7 +27450,7 @@ fn explainWhyTypeIsComptimeInner(
|
||||
try sema.errNote(src_loc, msg, "function is generic", .{});
|
||||
}
|
||||
switch (fn_info.cc) {
|
||||
.Inline => try sema.errNote(src_loc, msg, "function has inline calling convention", .{}),
|
||||
.@"inline" => try sema.errNote(src_loc, msg, "function has inline calling convention", .{}),
|
||||
else => {},
|
||||
}
|
||||
if (Type.fromInterned(fn_info.return_type).comptimeOnly(zcu)) {
|
||||
@ -27461,13 +27558,12 @@ fn validateExternType(
|
||||
},
|
||||
.@"fn" => {
|
||||
if (position != .other) return false;
|
||||
const target = zcu.getTarget();
|
||||
// For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI.
|
||||
// The goal is to experiment with more integrated CPU/GPU code.
|
||||
if (ty.fnCallingConvention(zcu) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) {
|
||||
if (ty.fnCallingConvention(zcu) == .nvptx_kernel) {
|
||||
return true;
|
||||
}
|
||||
return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(zcu));
|
||||
return !target_util.fnCallConvAllowsZigTypes(ty.fnCallingConvention(zcu));
|
||||
},
|
||||
.@"enum" => {
|
||||
return sema.validateExternType(ty.intTagType(zcu), position);
|
||||
@ -27547,9 +27643,9 @@ fn explainWhyTypeIsNotExtern(
|
||||
return;
|
||||
}
|
||||
switch (ty.fnCallingConvention(zcu)) {
|
||||
.Unspecified => try sema.errNote(src_loc, msg, "extern function must specify calling convention", .{}),
|
||||
.Async => try sema.errNote(src_loc, msg, "async function cannot be extern", .{}),
|
||||
.Inline => try sema.errNote(src_loc, msg, "inline function cannot be extern", .{}),
|
||||
.auto => try sema.errNote(src_loc, msg, "extern function must specify calling convention", .{}),
|
||||
.@"async" => try sema.errNote(src_loc, msg, "async function cannot be extern", .{}),
|
||||
.@"inline" => try sema.errNote(src_loc, msg, "inline function cannot be extern", .{}),
|
||||
else => return,
|
||||
}
|
||||
},
|
||||
@ -30525,8 +30621,8 @@ const InMemoryCoercionResult = union(enum) {
|
||||
};
|
||||
|
||||
const CC = struct {
|
||||
actual: std.builtin.CallingConvention,
|
||||
wanted: std.builtin.CallingConvention,
|
||||
actual: std.builtin.NewCallingConvention,
|
||||
wanted: std.builtin.NewCallingConvention,
|
||||
};
|
||||
|
||||
const BitRange = struct {
|
||||
@ -31176,8 +31272,8 @@ fn coerceInMemoryAllowedFns(
|
||||
return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic };
|
||||
}
|
||||
|
||||
if (dest_info.cc != src_info.cc) {
|
||||
return InMemoryCoercionResult{ .fn_cc = .{
|
||||
if (!callconvCoerceAllowed(target, src_info.cc, dest_info.cc)) {
|
||||
return .{ .fn_cc = .{
|
||||
.actual = src_info.cc,
|
||||
.wanted = dest_info.cc,
|
||||
} };
|
||||
@ -31250,6 +31346,44 @@ fn coerceInMemoryAllowedFns(
|
||||
return .ok;
|
||||
}
|
||||
|
||||
fn callconvCoerceAllowed(
|
||||
target: std.Target,
|
||||
src_cc: std.builtin.NewCallingConvention,
|
||||
dest_cc: std.builtin.NewCallingConvention,
|
||||
) bool {
|
||||
const Tag = std.builtin.NewCallingConvention.Tag;
|
||||
if (@as(Tag, src_cc) != @as(Tag, dest_cc)) return false;
|
||||
|
||||
switch (src_cc) {
|
||||
inline else => |src_data, tag| {
|
||||
const dest_data = @field(dest_cc, @tagName(tag));
|
||||
if (@TypeOf(src_data) != void) {
|
||||
const default_stack_align = target.stackAlignment();
|
||||
const src_stack_align = src_data.incoming_stack_alignment orelse default_stack_align;
|
||||
const dest_stack_align = src_data.incoming_stack_alignment orelse default_stack_align;
|
||||
if (dest_stack_align < src_stack_align) return false;
|
||||
}
|
||||
switch (@TypeOf(src_data)) {
|
||||
void, std.builtin.NewCallingConvention.CommonOptions => {},
|
||||
std.builtin.NewCallingConvention.X86RegparmOptions => {
|
||||
if (src_data.register_params != dest_data.register_params) return false;
|
||||
},
|
||||
std.builtin.NewCallingConvention.ArmInterruptOptions => {
|
||||
if (src_data.type != dest_data.type) return false;
|
||||
},
|
||||
std.builtin.NewCallingConvention.MipsInterruptOptions => {
|
||||
if (src_data.mode != dest_data.mode) return false;
|
||||
},
|
||||
std.builtin.NewCallingConvention.RiscvInterruptOptions => {
|
||||
if (src_data.level != dest_data.level) return false;
|
||||
},
|
||||
else => comptime unreachable,
|
||||
}
|
||||
},
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
fn coerceInMemoryAllowedPtrs(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
@ -36306,7 +36440,7 @@ fn resolveInferredErrorSet(
|
||||
// because inline function does not create a new declaration, and the ies has been filled with analyzeCall,
|
||||
// so here we can simply skip this case.
|
||||
if (ies_func_info.return_type == .generic_poison_type) {
|
||||
assert(ies_func_info.cc == .Inline);
|
||||
assert(ies_func_info.cc == .@"inline");
|
||||
} else if (ip.errorUnionSet(ies_func_info.return_type) == ies_index) {
|
||||
if (ies_func_info.is_generic) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
|
||||
16
src/Type.zig
16
src/Type.zig
@ -390,10 +390,14 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
|
||||
try writer.writeAll("...");
|
||||
}
|
||||
try writer.writeAll(") ");
|
||||
if (fn_info.cc != .Unspecified) {
|
||||
try writer.writeAll("callconv(.");
|
||||
try writer.writeAll(@tagName(fn_info.cc));
|
||||
try writer.writeAll(") ");
|
||||
if (fn_info.cc != .auto) print_cc: {
|
||||
if (zcu.getTarget().defaultCCallingConvention()) |ccc| {
|
||||
if (fn_info.cc.eql(ccc)) {
|
||||
try writer.writeAll("callconv(.c) ");
|
||||
break :print_cc;
|
||||
}
|
||||
}
|
||||
try writer.print("callconv({any}) ", .{fn_info.cc});
|
||||
}
|
||||
if (fn_info.return_type == .generic_poison_type) {
|
||||
try writer.writeAll("anytype");
|
||||
@ -791,7 +795,7 @@ pub fn fnHasRuntimeBitsInner(
|
||||
const fn_info = zcu.typeToFunc(ty).?;
|
||||
if (fn_info.is_generic) return false;
|
||||
if (fn_info.is_var_args) return true;
|
||||
if (fn_info.cc == .Inline) return false;
|
||||
if (fn_info.cc == .@"inline") return false;
|
||||
return !try Type.fromInterned(fn_info.return_type).comptimeOnlyInner(strat, zcu, tid);
|
||||
}
|
||||
|
||||
@ -2489,7 +2493,7 @@ pub fn fnReturnType(ty: Type, zcu: *const Zcu) Type {
|
||||
}
|
||||
|
||||
/// Asserts the type is a function.
|
||||
pub fn fnCallingConvention(ty: Type, zcu: *const Zcu) std.builtin.CallingConvention {
|
||||
pub fn fnCallingConvention(ty: Type, zcu: *const Zcu) std.builtin.NewCallingConvention {
|
||||
return zcu.intern_pool.indexToKey(ty.toIntern()).func_type.cc;
|
||||
}
|
||||
|
||||
|
||||
160
src/Value.zig
160
src/Value.zig
@ -4490,3 +4490,163 @@ pub fn resolveLazy(
|
||||
else => return val,
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a `Value` representing a comptime-known value of type `T`, unwrap it into an actual `T` known to the compiler.
|
||||
/// This is useful for accessing `std.builtin` structures received from comptime logic.
|
||||
/// `val` must be fully resolved.
|
||||
pub fn interpret(val: Value, comptime T: type, pt: Zcu.PerThread) error{ OutOfMemory, UndefinedValue, TypeMismatch }!T {
|
||||
@setEvalBranchQuota(400_000);
|
||||
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const ty = val.typeOf(zcu);
|
||||
if (ty.zigTypeTag(zcu) != @typeInfo(T)) return error.TypeMismatch;
|
||||
if (val.isUndef(zcu)) return error.UndefinedValue;
|
||||
|
||||
return switch (@typeInfo(T)) {
|
||||
.type,
|
||||
.noreturn,
|
||||
.comptime_float,
|
||||
.comptime_int,
|
||||
.undefined,
|
||||
.null,
|
||||
.@"fn",
|
||||
.@"opaque",
|
||||
.enum_literal,
|
||||
=> comptime unreachable, // comptime-only or otherwise impossible
|
||||
|
||||
.pointer,
|
||||
.array,
|
||||
.error_union,
|
||||
.error_set,
|
||||
.frame,
|
||||
.@"anyframe",
|
||||
.vector,
|
||||
=> comptime unreachable, // unsupported
|
||||
|
||||
.void => {},
|
||||
|
||||
.bool => switch (val.toIntern()) {
|
||||
.bool_false => false,
|
||||
.bool_true => true,
|
||||
else => unreachable,
|
||||
},
|
||||
|
||||
.int => switch (ip.indexToKey(val.toIntern()).int.storage) {
|
||||
.lazy_align, .lazy_size => unreachable, // `val` is fully resolved
|
||||
inline .u64, .i64 => |x| std.math.cast(T, x) orelse return error.TypeMismatch,
|
||||
.big_int => |big| big.to(T) catch return error.TypeMismatch,
|
||||
},
|
||||
|
||||
.float => val.toFloat(T, zcu),
|
||||
|
||||
.optional => |opt| if (val.optionalValue(zcu)) |unwrapped|
|
||||
try unwrapped.interpret(opt.child, pt)
|
||||
else
|
||||
null,
|
||||
|
||||
.@"enum" => zcu.toEnum(T, val),
|
||||
|
||||
.@"union" => |@"union"| {
|
||||
const union_obj = zcu.typeToUnion(ty) orelse return error.TypeMismatch;
|
||||
if (union_obj.field_types.len != @"union".fields.len) return error.TypeMismatch;
|
||||
const tag_val = val.unionTag(zcu) orelse return error.TypeMismatch;
|
||||
const tag = try tag_val.interpret(@"union".tag_type.?, pt);
|
||||
switch (tag) {
|
||||
inline else => |tag_comptime| {
|
||||
const Payload = std.meta.FieldType(T, tag_comptime);
|
||||
const payload = try val.unionValue(zcu).interpret(Payload, pt);
|
||||
return @unionInit(T, @tagName(tag_comptime), payload);
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
.@"struct" => |@"struct"| {
|
||||
if (ty.structFieldCount(zcu) != @"struct".fields.len) return error.TypeMismatch;
|
||||
var result: T = undefined;
|
||||
inline for (@"struct".fields, 0..) |field, field_idx| {
|
||||
const field_val = try val.fieldValue(pt, field_idx);
|
||||
@field(result, field.name) = try field_val.interpret(field.type, pt);
|
||||
}
|
||||
return result;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// Given any `val` and a `Type` corresponding `@TypeOf(val)`, construct a `Value` representing it which can be used
|
||||
/// within the compilation. This is useful for passing `std.builtin` structures in the compiler back to the compilation.
|
||||
/// This is the inverse of `interpret`.
|
||||
pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory, TypeMismatch }!Value {
|
||||
@setEvalBranchQuota(400_000);
|
||||
|
||||
const T = @TypeOf(val);
|
||||
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) != @typeInfo(T)) return error.TypeMismatch;
|
||||
|
||||
return switch (@typeInfo(T)) {
|
||||
.type,
|
||||
.noreturn,
|
||||
.comptime_float,
|
||||
.comptime_int,
|
||||
.undefined,
|
||||
.null,
|
||||
.@"fn",
|
||||
.@"opaque",
|
||||
.enum_literal,
|
||||
=> comptime unreachable, // comptime-only or otherwise impossible
|
||||
|
||||
.pointer,
|
||||
.array,
|
||||
.error_union,
|
||||
.error_set,
|
||||
.frame,
|
||||
.@"anyframe",
|
||||
.vector,
|
||||
=> comptime unreachable, // unsupported
|
||||
|
||||
.void => .void,
|
||||
|
||||
.bool => if (val) .true else .false,
|
||||
|
||||
.int => try pt.intValue(ty, val),
|
||||
|
||||
.float => try pt.floatValue(ty, val),
|
||||
|
||||
.optional => if (val) |some|
|
||||
.fromInterned(try pt.intern(.{ .opt = .{
|
||||
.ty = ty.toIntern(),
|
||||
.val = (try uninterpret(some, ty.optionalChild(zcu), pt)).toIntern(),
|
||||
} }))
|
||||
else
|
||||
try pt.nullValue(ty),
|
||||
|
||||
.@"enum" => try pt.enumValue(ty, (try uninterpret(@intFromEnum(val), ty.intTagType(zcu), pt)).toIntern()),
|
||||
|
||||
.@"union" => |@"union"| {
|
||||
const tag: @"union".tag_type.? = val;
|
||||
const tag_val = try uninterpret(tag, ty.unionTagType(zcu).?, pt);
|
||||
const field_ty = ty.unionFieldType(tag_val, zcu) orelse return error.TypeMismatch;
|
||||
return switch (val) {
|
||||
inline else => |payload| try pt.unionValue(
|
||||
ty,
|
||||
tag_val,
|
||||
try uninterpret(payload, field_ty, pt),
|
||||
),
|
||||
};
|
||||
},
|
||||
|
||||
.@"struct" => |@"struct"| {
|
||||
if (ty.structFieldCount(zcu) != @"struct".fields.len) return error.TypeMismatch;
|
||||
var field_vals: [@"struct".fields.len]InternPool.Index = undefined;
|
||||
inline for (&field_vals, @"struct".fields, 0..) |*field_val, field, field_idx| {
|
||||
const field_ty = ty.fieldType(field_idx, zcu);
|
||||
field_val.* = (try uninterpret(@field(val, field.name), field_ty, pt)).toIntern();
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = &field_vals },
|
||||
} }));
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
90
src/Zcu.zig
90
src/Zcu.zig
@ -3539,3 +3539,93 @@ pub fn maybeUnresolveIes(zcu: *Zcu, func_index: InternPool.Index) !void {
|
||||
zcu.intern_pool.funcSetIesResolved(func_index, .none);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn callconvSupported(zcu: *Zcu, cc: std.builtin.NewCallingConvention) union(enum) {
|
||||
ok,
|
||||
bad_arch: []const std.Target.Cpu.Arch, // value is allowed archs for cc
|
||||
bad_backend: std.builtin.CompilerBackend, // value is current backend
|
||||
} {
|
||||
const target = zcu.getTarget();
|
||||
const backend = target_util.zigBackend(target, zcu.comp.config.use_llvm);
|
||||
switch (cc) {
|
||||
.auto, .@"inline" => return .ok,
|
||||
.@"async" => return .{ .bad_backend = backend }, // nothing supports async currently
|
||||
.naked => {}, // depends only on backend
|
||||
else => for (cc.archs()) |allowed_arch| {
|
||||
if (allowed_arch == target.cpu.arch) break;
|
||||
} else return .{ .bad_arch = cc.archs() },
|
||||
}
|
||||
const backend_ok = switch (backend) {
|
||||
.stage1 => unreachable,
|
||||
.other => unreachable,
|
||||
_ => unreachable,
|
||||
|
||||
.stage2_llvm => @import("codegen/llvm.zig").toLlvmCallConv(cc, target) != null,
|
||||
.stage2_c => ok: {
|
||||
if (target.defaultCCallingConvention()) |default_c| {
|
||||
if (cc.eql(default_c)) {
|
||||
break :ok true;
|
||||
}
|
||||
}
|
||||
break :ok switch (cc) {
|
||||
.x86_64_vectorcall,
|
||||
.x86_fastcall,
|
||||
.x86_thiscall,
|
||||
.x86_vectorcall,
|
||||
=> |opts| opts.incoming_stack_alignment == null,
|
||||
|
||||
.x86_stdcall,
|
||||
=> |opts| opts.incoming_stack_alignment == null and opts.register_params == 0,
|
||||
|
||||
.naked => true,
|
||||
|
||||
else => false,
|
||||
};
|
||||
},
|
||||
.stage2_wasm => switch (cc) {
|
||||
.wasm_watc => |opts| opts.incoming_stack_alignment == null,
|
||||
else => false,
|
||||
},
|
||||
.stage2_arm => switch (cc) {
|
||||
.arm_aapcs => |opts| opts.incoming_stack_alignment == null,
|
||||
.naked => true,
|
||||
else => false,
|
||||
},
|
||||
.stage2_x86_64 => switch (cc) {
|
||||
.x86_64_sysv, .x86_64_win, .naked => true,
|
||||
else => false,
|
||||
},
|
||||
.stage2_aarch64 => switch (cc) {
|
||||
.aarch64_aapcs => |opts| opts.incoming_stack_alignment == null,
|
||||
.naked => true,
|
||||
else => false,
|
||||
},
|
||||
.stage2_x86 => switch (cc) {
|
||||
.x86_sysv,
|
||||
.x86_win,
|
||||
=> |opts| opts.incoming_stack_alignment == null and opts.register_params == 0,
|
||||
.naked => true,
|
||||
else => false,
|
||||
},
|
||||
.stage2_riscv64 => switch (cc) {
|
||||
.riscv64_lp64 => |opts| opts.incoming_stack_alignment == null,
|
||||
.naked => true,
|
||||
else => false,
|
||||
},
|
||||
.stage2_sparc64 => switch (cc) {
|
||||
.sparc64_sysv => |opts| opts.incoming_stack_alignment == null,
|
||||
.naked => true,
|
||||
else => false,
|
||||
},
|
||||
.stage2_spirv64 => switch (cc) {
|
||||
.spirv_device,
|
||||
.spirv_kernel,
|
||||
.spirv_fragment,
|
||||
.spirv_vertex,
|
||||
=> true,
|
||||
else => false,
|
||||
},
|
||||
};
|
||||
if (!backend_ok) return .{ .bad_backend = backend };
|
||||
return .ok;
|
||||
}
|
||||
|
||||
@ -2090,7 +2090,7 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError!
|
||||
.code = zir,
|
||||
.owner = anal_unit,
|
||||
.func_index = func_index,
|
||||
.func_is_naked = fn_ty_info.cc == .Naked,
|
||||
.func_is_naked = fn_ty_info.cc == .naked,
|
||||
.fn_ret_ty = Type.fromInterned(fn_ty_info.return_type),
|
||||
.fn_ret_ty_ies = null,
|
||||
.branch_quota = @max(func.branchQuotaUnordered(ip), Sema.default_branch_quota),
|
||||
|
||||
@ -468,7 +468,7 @@ fn gen(self: *Self) !void {
|
||||
const pt = self.pt;
|
||||
const zcu = pt.zcu;
|
||||
const cc = self.fn_type.fnCallingConvention(zcu);
|
||||
if (cc != .Naked) {
|
||||
if (cc != .naked) {
|
||||
// stp fp, lr, [sp, #-16]!
|
||||
_ = try self.addInst(.{
|
||||
.tag = .stp,
|
||||
@ -6229,14 +6229,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
const ret_ty = fn_ty.fnReturnType(zcu);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
.naked => {
|
||||
assert(result.args.len == 0);
|
||||
result.return_value = .{ .unreach = {} };
|
||||
result.stack_byte_count = 0;
|
||||
result.stack_align = 1;
|
||||
return result;
|
||||
},
|
||||
.C => {
|
||||
.aarch64_aapcs, .aarch64_aapcs_darwin, .aarch64_aapcs_win => {
|
||||
// ARM64 Procedure Call Standard
|
||||
var ncrn: usize = 0; // Next Core Register Number
|
||||
var nsaa: u32 = 0; // Next stacked argument address
|
||||
@ -6266,7 +6266,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
|
||||
// We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
|
||||
// values to spread across odd-numbered registers.
|
||||
if (Type.fromInterned(ty).abiAlignment(zcu) == .@"16" and !self.target.isDarwin()) {
|
||||
if (Type.fromInterned(ty).abiAlignment(zcu) == .@"16" and cc != .aarch64_aapcs_darwin) {
|
||||
// Round up NCRN to the next even number
|
||||
ncrn += ncrn % 2;
|
||||
}
|
||||
@ -6298,7 +6298,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
result.stack_byte_count = nsaa;
|
||||
result.stack_align = 16;
|
||||
},
|
||||
.Unspecified => {
|
||||
.auto => {
|
||||
if (ret_ty.zigTypeTag(zcu) == .noreturn) {
|
||||
result.return_value = .{ .unreach = {} };
|
||||
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
|
||||
|
||||
@ -475,7 +475,7 @@ fn gen(self: *Self) !void {
|
||||
const pt = self.pt;
|
||||
const zcu = pt.zcu;
|
||||
const cc = self.fn_type.fnCallingConvention(zcu);
|
||||
if (cc != .Naked) {
|
||||
if (cc != .naked) {
|
||||
// push {fp, lr}
|
||||
const push_reloc = try self.addNop();
|
||||
|
||||
@ -6196,14 +6196,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
const ret_ty = fn_ty.fnReturnType(zcu);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
.naked => {
|
||||
assert(result.args.len == 0);
|
||||
result.return_value = .{ .unreach = {} };
|
||||
result.stack_byte_count = 0;
|
||||
result.stack_align = 1;
|
||||
return result;
|
||||
},
|
||||
.C => {
|
||||
.arm_aapcs => {
|
||||
// ARM Procedure Call Standard, Chapter 6.5
|
||||
var ncrn: usize = 0; // Next Core Register Number
|
||||
var nsaa: u32 = 0; // Next stacked argument address
|
||||
@ -6254,7 +6254,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
result.stack_byte_count = nsaa;
|
||||
result.stack_align = 8;
|
||||
},
|
||||
.Unspecified => {
|
||||
.auto => {
|
||||
if (ret_ty.zigTypeTag(zcu) == .noreturn) {
|
||||
result.return_value = .{ .unreach = {} };
|
||||
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
|
||||
|
||||
@ -977,7 +977,7 @@ pub fn generateLazy(
|
||||
.pt = pt,
|
||||
.allocator = gpa,
|
||||
.mir = mir,
|
||||
.cc = .Unspecified,
|
||||
.cc = .auto,
|
||||
.src_loc = src_loc,
|
||||
.output_mode = comp.config.output_mode,
|
||||
.link_mode = comp.config.link_mode,
|
||||
@ -1036,7 +1036,7 @@ fn formatWipMir(
|
||||
.instructions = data.func.mir_instructions.slice(),
|
||||
.frame_locs = data.func.frame_locs.slice(),
|
||||
},
|
||||
.cc = .Unspecified,
|
||||
.cc = .auto,
|
||||
.src_loc = data.func.src_loc,
|
||||
.output_mode = comp.config.output_mode,
|
||||
.link_mode = comp.config.link_mode,
|
||||
@ -1238,7 +1238,7 @@ fn gen(func: *Func) !void {
|
||||
}
|
||||
}
|
||||
|
||||
if (fn_info.cc != .Naked) {
|
||||
if (fn_info.cc != .naked) {
|
||||
_ = try func.addPseudo(.pseudo_dbg_prologue_end);
|
||||
|
||||
const backpatch_stack_alloc = try func.addPseudo(.pseudo_dead);
|
||||
@ -4894,7 +4894,7 @@ fn genCall(
|
||||
.lib => |lib| try pt.funcType(.{
|
||||
.param_types = lib.param_types,
|
||||
.return_type = lib.return_type,
|
||||
.cc = .C,
|
||||
.cc = func.target.defaultCCallingConvention().?,
|
||||
}),
|
||||
};
|
||||
|
||||
@ -8289,12 +8289,12 @@ fn resolveCallingConventionValues(
|
||||
const ret_ty = Type.fromInterned(fn_info.return_type);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
.naked => {
|
||||
assert(result.args.len == 0);
|
||||
result.return_value = InstTracking.init(.unreach);
|
||||
result.stack_align = .@"8";
|
||||
},
|
||||
.C, .Unspecified => {
|
||||
.riscv64_lp64, .auto => {
|
||||
if (result.args.len > 8) {
|
||||
return func.fail("RISC-V calling convention does not support more than 8 arguments", .{});
|
||||
}
|
||||
@ -8359,7 +8359,7 @@ fn resolveCallingConventionValues(
|
||||
|
||||
for (param_types, result.args) |ty, *arg| {
|
||||
if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
assert(cc == .Unspecified);
|
||||
assert(cc == .auto);
|
||||
arg.* = .none;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -6,7 +6,7 @@ link_mode: std.builtin.LinkMode,
|
||||
pic: bool,
|
||||
allocator: Allocator,
|
||||
mir: Mir,
|
||||
cc: std.builtin.CallingConvention,
|
||||
cc: std.builtin.NewCallingConvention,
|
||||
err_msg: ?*ErrorMsg = null,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
result_insts_len: u8 = undefined,
|
||||
|
||||
@ -366,7 +366,7 @@ fn gen(self: *Self) !void {
|
||||
const pt = self.pt;
|
||||
const zcu = pt.zcu;
|
||||
const cc = self.fn_type.fnCallingConvention(zcu);
|
||||
if (cc != .Naked) {
|
||||
if (cc != .naked) {
|
||||
// TODO Finish function prologue and epilogue for sparc64.
|
||||
|
||||
// save %sp, stack_reserved_area, %sp
|
||||
@ -4441,14 +4441,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
|
||||
const ret_ty = fn_ty.fnReturnType(zcu);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
.naked => {
|
||||
assert(result.args.len == 0);
|
||||
result.return_value = .{ .unreach = {} };
|
||||
result.stack_byte_count = 0;
|
||||
result.stack_align = .@"1";
|
||||
return result;
|
||||
},
|
||||
.Unspecified, .C => {
|
||||
.auto, .sparc64_sysv => {
|
||||
// SPARC Compliance Definition 2.4.1, Chapter 3
|
||||
// Low-Level System Information (64-bit psABI) - Function Calling Sequence
|
||||
|
||||
|
||||
@ -1145,7 +1145,7 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
|
||||
/// Memory is owned by the caller.
|
||||
fn genFunctype(
|
||||
gpa: Allocator,
|
||||
cc: std.builtin.CallingConvention,
|
||||
cc: std.builtin.NewCallingConvention,
|
||||
params: []const InternPool.Index,
|
||||
return_type: Type,
|
||||
pt: Zcu.PerThread,
|
||||
@ -1160,7 +1160,7 @@ fn genFunctype(
|
||||
if (firstParamSRet(cc, return_type, pt, target)) {
|
||||
try temp_params.append(.i32); // memory address is always a 32-bit handle
|
||||
} else if (return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
if (cc == .C) {
|
||||
if (cc == .wasm_watc) {
|
||||
const res_classes = abi.classifyType(return_type, zcu);
|
||||
assert(res_classes[0] == .direct and res_classes[1] == .none);
|
||||
const scalar_type = abi.scalarType(return_type, zcu);
|
||||
@ -1178,7 +1178,7 @@ fn genFunctype(
|
||||
if (!param_type.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
switch (cc) {
|
||||
.C => {
|
||||
.wasm_watc => {
|
||||
const param_classes = abi.classifyType(param_type, zcu);
|
||||
if (param_classes[1] == .none) {
|
||||
if (param_classes[0] == .direct) {
|
||||
@ -1367,7 +1367,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
|
||||
.args = &.{},
|
||||
.return_value = .none,
|
||||
};
|
||||
if (cc == .Naked) return result;
|
||||
if (cc == .naked) return result;
|
||||
|
||||
var args = std.ArrayList(WValue).init(func.gpa);
|
||||
defer args.deinit();
|
||||
@ -1382,7 +1382,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
|
||||
}
|
||||
|
||||
switch (cc) {
|
||||
.Unspecified => {
|
||||
.auto => {
|
||||
for (fn_info.param_types.get(ip)) |ty| {
|
||||
if (!Type.fromInterned(ty).hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
continue;
|
||||
@ -1392,7 +1392,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
|
||||
func.local_index += 1;
|
||||
}
|
||||
},
|
||||
.C => {
|
||||
.wasm_watc => {
|
||||
for (fn_info.param_types.get(ip)) |ty| {
|
||||
const ty_classes = abi.classifyType(Type.fromInterned(ty), zcu);
|
||||
for (ty_classes) |class| {
|
||||
@ -1408,10 +1408,11 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
|
||||
return result;
|
||||
}
|
||||
|
||||
fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.PerThread, target: std.Target) bool {
|
||||
fn firstParamSRet(cc: std.builtin.NewCallingConvention, return_type: Type, pt: Zcu.PerThread, target: std.Target) bool {
|
||||
switch (cc) {
|
||||
.Unspecified, .Inline => return isByRef(return_type, pt, target),
|
||||
.C => {
|
||||
.@"inline" => unreachable,
|
||||
.auto => return isByRef(return_type, pt, target),
|
||||
.wasm_watc => {
|
||||
const ty_classes = abi.classifyType(return_type, pt.zcu);
|
||||
if (ty_classes[0] == .indirect) return true;
|
||||
if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true;
|
||||
@ -1423,8 +1424,8 @@ fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.
|
||||
|
||||
/// Lowers a Zig type and its value based on a given calling convention to ensure
|
||||
/// it matches the ABI.
|
||||
fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: WValue) !void {
|
||||
if (cc != .C) {
|
||||
fn lowerArg(func: *CodeGen, cc: std.builtin.NewCallingConvention, ty: Type, value: WValue) !void {
|
||||
if (cc != .wasm_watc) {
|
||||
return func.lowerToStack(value);
|
||||
}
|
||||
|
||||
@ -2108,7 +2109,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
// to the stack instead
|
||||
if (func.return_value != .none) {
|
||||
try func.store(func.return_value, operand, ret_ty, 0);
|
||||
} else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
} else if (fn_info.cc == .wasm_watc and ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
switch (ret_ty.zigTypeTag(zcu)) {
|
||||
// Aggregate types can be lowered as a singular value
|
||||
.@"struct", .@"union" => {
|
||||
@ -2286,7 +2287,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
|
||||
} else if (first_param_sret) {
|
||||
break :result_value sret;
|
||||
// TODO: Make this less fragile and optimize
|
||||
} else if (zcu.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(zcu) == .@"struct" or ret_ty.zigTypeTag(zcu) == .@"union") {
|
||||
} else if (zcu.typeToFunc(fn_ty).?.cc == .wasm_watc and ret_ty.zigTypeTag(zcu) == .@"struct" or ret_ty.zigTypeTag(zcu) == .@"union") {
|
||||
const result_local = try func.allocLocal(ret_ty);
|
||||
try func.addLabel(.local_set, result_local.local.value);
|
||||
const scalar_type = abi.scalarType(ret_ty, zcu);
|
||||
@ -2565,7 +2566,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const arg = func.args[arg_index];
|
||||
const cc = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?.cc;
|
||||
const arg_ty = func.typeOfIndex(inst);
|
||||
if (cc == .C) {
|
||||
if (cc == .wasm_watc) {
|
||||
const arg_classes = abi.classifyType(arg_ty, zcu);
|
||||
for (arg_classes) |class| {
|
||||
if (class != .none) {
|
||||
@ -7175,12 +7176,12 @@ fn callIntrinsic(
|
||||
// Always pass over C-ABI
|
||||
const pt = func.pt;
|
||||
const zcu = pt.zcu;
|
||||
var func_type = try genFunctype(func.gpa, .C, param_types, return_type, pt, func.target.*);
|
||||
var func_type = try genFunctype(func.gpa, .{ .wasm_watc = .{} }, param_types, return_type, pt, func.target.*);
|
||||
defer func_type.deinit(func.gpa);
|
||||
const func_type_index = try func.bin_file.zigObjectPtr().?.putOrGetFuncType(func.gpa, func_type);
|
||||
try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index);
|
||||
|
||||
const want_sret_param = firstParamSRet(.C, return_type, pt, func.target.*);
|
||||
const want_sret_param = firstParamSRet(.{ .wasm_watc = .{} }, return_type, pt, func.target.*);
|
||||
// if we want return as first param, we allocate a pointer to stack,
|
||||
// and emit it as our first argument
|
||||
const sret = if (want_sret_param) blk: {
|
||||
@ -7193,7 +7194,7 @@ fn callIntrinsic(
|
||||
for (args, 0..) |arg, arg_i| {
|
||||
assert(!(want_sret_param and arg == .stack));
|
||||
assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(zcu));
|
||||
try func.lowerArg(.C, Type.fromInterned(param_types[arg_i]), arg);
|
||||
try func.lowerArg(.{ .wasm_watc = .{} }, Type.fromInterned(param_types[arg_i]), arg);
|
||||
}
|
||||
|
||||
// Actually call our intrinsic
|
||||
|
||||
@ -918,13 +918,13 @@ pub fn generate(
|
||||
);
|
||||
function.va_info = switch (cc) {
|
||||
else => undefined,
|
||||
.SysV => .{ .sysv = .{
|
||||
.x86_64_sysv => .{ .sysv = .{
|
||||
.gp_count = call_info.gp_count,
|
||||
.fp_count = call_info.fp_count,
|
||||
.overflow_arg_area = .{ .index = .args_frame, .off = call_info.stack_byte_count },
|
||||
.reg_save_area = undefined,
|
||||
} },
|
||||
.Win64 => .{ .win64 = .{} },
|
||||
.x86_64_win => .{ .win64 = .{} },
|
||||
};
|
||||
|
||||
function.gen() catch |err| switch (err) {
|
||||
@ -1053,7 +1053,7 @@ pub fn generateLazy(
|
||||
.bin_file = bin_file,
|
||||
.allocator = gpa,
|
||||
.mir = mir,
|
||||
.cc = abi.resolveCallingConvention(.Unspecified, function.target.*),
|
||||
.cc = abi.resolveCallingConvention(.auto, function.target.*),
|
||||
.src_loc = src_loc,
|
||||
.output_mode = comp.config.output_mode,
|
||||
.link_mode = comp.config.link_mode,
|
||||
@ -1159,7 +1159,7 @@ fn formatWipMir(
|
||||
.extra = data.self.mir_extra.items,
|
||||
.frame_locs = (std.MultiArrayList(Mir.FrameLoc){}).slice(),
|
||||
},
|
||||
.cc = .Unspecified,
|
||||
.cc = .auto,
|
||||
.src_loc = data.self.src_loc,
|
||||
.output_mode = comp.config.output_mode,
|
||||
.link_mode = comp.config.link_mode,
|
||||
@ -2023,7 +2023,7 @@ fn gen(self: *Self) InnerError!void {
|
||||
const zcu = pt.zcu;
|
||||
const fn_info = zcu.typeToFunc(self.fn_type).?;
|
||||
const cc = abi.resolveCallingConvention(fn_info.cc, self.target.*);
|
||||
if (cc != .Naked) {
|
||||
if (cc != .naked) {
|
||||
try self.asmRegister(.{ ._, .push }, .rbp);
|
||||
try self.asmPseudoImmediate(.pseudo_cfi_adjust_cfa_offset_i_s, Immediate.s(8));
|
||||
try self.asmPseudoRegisterImmediate(.pseudo_cfi_rel_offset_ri_s, .rbp, Immediate.s(0));
|
||||
@ -2056,7 +2056,7 @@ fn gen(self: *Self) InnerError!void {
|
||||
}
|
||||
|
||||
if (fn_info.is_var_args) switch (cc) {
|
||||
.SysV => {
|
||||
.x86_64_sysv => {
|
||||
const info = &self.va_info.sysv;
|
||||
const reg_save_area_fi = try self.allocFrameIndex(FrameAlloc.init(.{
|
||||
.size = abi.SysV.c_abi_int_param_regs.len * 8 +
|
||||
@ -2089,7 +2089,7 @@ fn gen(self: *Self) InnerError!void {
|
||||
|
||||
self.performReloc(skip_sse_reloc);
|
||||
},
|
||||
.Win64 => return self.fail("TODO implement gen var arg function for Win64", .{}),
|
||||
.x86_64_win => return self.fail("TODO implement gen var arg function for Win64", .{}),
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
@ -2541,7 +2541,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
|
||||
const enum_ty = Type.fromInterned(lazy_sym.ty);
|
||||
wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)});
|
||||
|
||||
const resolved_cc = abi.resolveCallingConvention(.Unspecified, self.target.*);
|
||||
const resolved_cc = abi.resolveCallingConvention(.auto, self.target.*);
|
||||
const param_regs = abi.getCAbiIntParamRegs(resolved_cc);
|
||||
const param_locks = self.register_manager.lockRegsAssumeUnused(2, param_regs[0..2].*);
|
||||
defer for (param_locks) |lock| self.register_manager.unlockReg(lock);
|
||||
@ -2694,7 +2694,7 @@ fn setFrameLoc(
|
||||
offset.* += self.frame_allocs.items(.abi_size)[frame_i];
|
||||
}
|
||||
|
||||
fn computeFrameLayout(self: *Self, cc: std.builtin.CallingConvention) !FrameLayout {
|
||||
fn computeFrameLayout(self: *Self, cc: std.builtin.NewCallingConvention) !FrameLayout {
|
||||
const frame_allocs_len = self.frame_allocs.len;
|
||||
try self.frame_locs.resize(self.gpa, frame_allocs_len);
|
||||
const stack_frame_order = try self.gpa.alloc(FrameIndex, frame_allocs_len - FrameIndex.named_count);
|
||||
@ -3006,11 +3006,10 @@ pub fn spillEflagsIfOccupied(self: *Self) !void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn spillCallerPreservedRegs(self: *Self, cc: std.builtin.CallingConvention) !void {
|
||||
pub fn spillCallerPreservedRegs(self: *Self, cc: std.builtin.NewCallingConvention) !void {
|
||||
switch (cc) {
|
||||
inline .SysV, .Win64 => |known_cc| try self.spillRegisters(
|
||||
comptime abi.getCallerPreservedRegs(known_cc),
|
||||
),
|
||||
.x86_64_sysv => try self.spillRegisters(abi.getCallerPreservedRegs(.{ .x86_64_sysv = .{} })),
|
||||
.x86_64_win => try self.spillRegisters(abi.getCallerPreservedRegs(.{ .x86_64_win = .{} })),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
@ -12384,7 +12383,7 @@ fn genCall(self: *Self, info: union(enum) {
|
||||
.lib => |lib| try pt.funcType(.{
|
||||
.param_types = lib.param_types,
|
||||
.return_type = lib.return_type,
|
||||
.cc = .C,
|
||||
.cc = self.target.defaultCCallingConvention().?,
|
||||
}),
|
||||
};
|
||||
const fn_info = zcu.typeToFunc(fn_ty).?;
|
||||
@ -12543,7 +12542,7 @@ fn genCall(self: *Self, info: union(enum) {
|
||||
src_arg,
|
||||
.{},
|
||||
),
|
||||
.C, .SysV, .Win64 => {
|
||||
.x86_64_sysv, .x86_64_win => {
|
||||
const promoted_ty = self.promoteInt(arg_ty);
|
||||
const promoted_abi_size: u32 = @intCast(promoted_ty.abiSize(zcu));
|
||||
const dst_alias = registerAlias(dst_reg, promoted_abi_size);
|
||||
@ -16822,7 +16821,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
||||
const inst_ty = self.typeOfIndex(inst);
|
||||
const enum_ty = self.typeOf(un_op);
|
||||
const resolved_cc = abi.resolveCallingConvention(.Unspecified, self.target.*);
|
||||
const resolved_cc = abi.resolveCallingConvention(.auto, self.target.*);
|
||||
|
||||
// We need a properly aligned and sized call frame to be able to call this function.
|
||||
{
|
||||
@ -18915,7 +18914,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
|
||||
self.fn_type.fnCallingConvention(zcu),
|
||||
self.target.*,
|
||||
)) {
|
||||
.SysV => result: {
|
||||
.x86_64_sysv => result: {
|
||||
const info = self.va_info.sysv;
|
||||
const dst_fi = try self.allocFrameIndex(FrameAlloc.initSpill(va_list_ty, zcu));
|
||||
var field_off: u31 = 0;
|
||||
@ -18957,7 +18956,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
|
||||
field_off += @intCast(ptr_anyopaque_ty.abiSize(zcu));
|
||||
break :result .{ .load_frame = .{ .index = dst_fi } };
|
||||
},
|
||||
.Win64 => return self.fail("TODO implement c_va_start for Win64", .{}),
|
||||
.x86_64_win => return self.fail("TODO implement c_va_start for Win64", .{}),
|
||||
else => unreachable,
|
||||
};
|
||||
return self.finishAir(inst, result, .{ .none, .none, .none });
|
||||
@ -18976,7 +18975,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
self.fn_type.fnCallingConvention(zcu),
|
||||
self.target.*,
|
||||
)) {
|
||||
.SysV => result: {
|
||||
.x86_64_sysv => result: {
|
||||
try self.spillEflagsIfOccupied();
|
||||
|
||||
const tmp_regs =
|
||||
@ -19155,7 +19154,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
);
|
||||
break :result promote_mcv;
|
||||
},
|
||||
.Win64 => return self.fail("TODO implement c_va_arg for Win64", .{}),
|
||||
.x86_64_win => return self.fail("TODO implement c_va_arg for Win64", .{}),
|
||||
else => unreachable,
|
||||
};
|
||||
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
||||
@ -19324,12 +19323,12 @@ fn resolveCallingConventionValues(
|
||||
|
||||
const resolved_cc = abi.resolveCallingConvention(cc, self.target.*);
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
.naked => {
|
||||
assert(result.args.len == 0);
|
||||
result.return_value = InstTracking.init(.unreach);
|
||||
result.stack_align = .@"8";
|
||||
},
|
||||
.C, .SysV, .Win64 => {
|
||||
.x86_64_sysv, .x86_64_win => {
|
||||
var ret_int_reg_i: u32 = 0;
|
||||
var ret_sse_reg_i: u32 = 0;
|
||||
var param_int_reg_i: u32 = 0;
|
||||
@ -19337,8 +19336,8 @@ fn resolveCallingConventionValues(
|
||||
result.stack_align = .@"16";
|
||||
|
||||
switch (resolved_cc) {
|
||||
.SysV => {},
|
||||
.Win64 => {
|
||||
.x86_64_sysv => {},
|
||||
.x86_64_win => {
|
||||
// Align the stack to 16bytes before allocating shadow stack space (if any).
|
||||
result.stack_byte_count += @intCast(4 * Type.usize.abiSize(zcu));
|
||||
},
|
||||
@ -19356,8 +19355,8 @@ fn resolveCallingConventionValues(
|
||||
var ret_tracking_i: usize = 0;
|
||||
|
||||
const classes = switch (resolved_cc) {
|
||||
.SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, zcu, self.target.*, .ret), .none),
|
||||
.Win64 => &.{abi.classifyWindows(ret_ty, zcu)},
|
||||
.x86_64_sysv => mem.sliceTo(&abi.classifySystemV(ret_ty, zcu, self.target.*, .ret), .none),
|
||||
.x86_64_win => &.{abi.classifyWindows(ret_ty, zcu)},
|
||||
else => unreachable,
|
||||
};
|
||||
for (classes) |class| switch (class) {
|
||||
@ -19419,8 +19418,8 @@ fn resolveCallingConventionValues(
|
||||
for (param_types, result.args) |ty, *arg| {
|
||||
assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
|
||||
switch (resolved_cc) {
|
||||
.SysV => {},
|
||||
.Win64 => {
|
||||
.x86_64_sysv => {},
|
||||
.x86_64_win => {
|
||||
param_int_reg_i = @max(param_int_reg_i, param_sse_reg_i);
|
||||
param_sse_reg_i = param_int_reg_i;
|
||||
},
|
||||
@ -19431,8 +19430,8 @@ fn resolveCallingConventionValues(
|
||||
var arg_mcv_i: usize = 0;
|
||||
|
||||
const classes = switch (resolved_cc) {
|
||||
.SysV => mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .arg), .none),
|
||||
.Win64 => &.{abi.classifyWindows(ty, zcu)},
|
||||
.x86_64_sysv => mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .arg), .none),
|
||||
.x86_64_win => &.{abi.classifyWindows(ty, zcu)},
|
||||
else => unreachable,
|
||||
};
|
||||
for (classes) |class| switch (class) {
|
||||
@ -19464,11 +19463,11 @@ fn resolveCallingConventionValues(
|
||||
},
|
||||
.sseup => assert(arg_mcv[arg_mcv_i - 1].register.class() == .sse),
|
||||
.x87, .x87up, .complex_x87, .memory, .win_i128 => switch (resolved_cc) {
|
||||
.SysV => switch (class) {
|
||||
.x86_64_sysv => switch (class) {
|
||||
.x87, .x87up, .complex_x87, .memory => break,
|
||||
else => unreachable,
|
||||
},
|
||||
.Win64 => if (ty.abiSize(zcu) > 8) {
|
||||
.x86_64_win => if (ty.abiSize(zcu) > 8) {
|
||||
const param_int_reg =
|
||||
abi.getCAbiIntParamRegs(resolved_cc)[param_int_reg_i].to64();
|
||||
param_int_reg_i += 1;
|
||||
@ -19530,7 +19529,7 @@ fn resolveCallingConventionValues(
|
||||
assert(param_sse_reg_i <= 16);
|
||||
result.fp_count = param_sse_reg_i;
|
||||
},
|
||||
.Unspecified => {
|
||||
.auto => {
|
||||
result.stack_align = .@"16";
|
||||
|
||||
// Return values
|
||||
|
||||
@ -6,7 +6,7 @@ link_mode: std.builtin.LinkMode,
|
||||
pic: bool,
|
||||
allocator: std.mem.Allocator,
|
||||
mir: Mir,
|
||||
cc: std.builtin.CallingConvention,
|
||||
cc: std.builtin.NewCallingConvention,
|
||||
err_msg: ?*Zcu.ErrorMsg = null,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
result_insts_len: u8 = undefined,
|
||||
|
||||
@ -436,62 +436,62 @@ pub const Win64 = struct {
|
||||
};
|
||||
|
||||
pub fn resolveCallingConvention(
|
||||
cc: std.builtin.CallingConvention,
|
||||
cc: std.builtin.NewCallingConvention,
|
||||
target: std.Target,
|
||||
) std.builtin.CallingConvention {
|
||||
) std.builtin.NewCallingConvention {
|
||||
return switch (cc) {
|
||||
.Unspecified, .C => switch (target.os.tag) {
|
||||
else => .SysV,
|
||||
.windows => .Win64,
|
||||
.auto => switch (target.os.tag) {
|
||||
else => .{ .x86_64_sysv = .{} },
|
||||
.windows => .{ .x86_64_win = .{} },
|
||||
},
|
||||
else => cc,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCalleePreservedRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
pub fn getCalleePreservedRegs(cc: std.builtin.NewCallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.callee_preserved_regs,
|
||||
.Win64 => &Win64.callee_preserved_regs,
|
||||
.x86_64_sysv => &SysV.callee_preserved_regs,
|
||||
.x86_64_win => &Win64.callee_preserved_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCallerPreservedRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
pub fn getCallerPreservedRegs(cc: std.builtin.NewCallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.caller_preserved_regs,
|
||||
.Win64 => &Win64.caller_preserved_regs,
|
||||
.x86_64_sysv => &SysV.caller_preserved_regs,
|
||||
.x86_64_win => &Win64.caller_preserved_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiIntParamRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
pub fn getCAbiIntParamRegs(cc: std.builtin.NewCallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.c_abi_int_param_regs,
|
||||
.Win64 => &Win64.c_abi_int_param_regs,
|
||||
.x86_64_sysv => &SysV.c_abi_int_param_regs,
|
||||
.x86_64_win => &Win64.c_abi_int_param_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiSseParamRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
pub fn getCAbiSseParamRegs(cc: std.builtin.NewCallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.c_abi_sse_param_regs,
|
||||
.Win64 => &Win64.c_abi_sse_param_regs,
|
||||
.x86_64_sysv => &SysV.c_abi_sse_param_regs,
|
||||
.x86_64_win => &Win64.c_abi_sse_param_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiIntReturnRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
pub fn getCAbiIntReturnRegs(cc: std.builtin.NewCallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.c_abi_int_return_regs,
|
||||
.Win64 => &Win64.c_abi_int_return_regs,
|
||||
.x86_64_sysv => &SysV.c_abi_int_return_regs,
|
||||
.x86_64_win => &Win64.c_abi_int_return_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiSseReturnRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
pub fn getCAbiSseReturnRegs(cc: std.builtin.NewCallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.c_abi_sse_return_regs,
|
||||
.Win64 => &Win64.c_abi_sse_return_regs,
|
||||
.x86_64_sysv => &SysV.c_abi_sse_return_regs,
|
||||
.x86_64_win => &Win64.c_abi_sse_return_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
@ -1783,7 +1783,7 @@ pub const DeclGen = struct {
|
||||
const fn_ctype = try dg.ctypeFromType(fn_ty, kind);
|
||||
|
||||
const fn_info = zcu.typeToFunc(fn_ty).?;
|
||||
if (fn_info.cc == .Naked) {
|
||||
if (fn_info.cc == .naked) {
|
||||
switch (kind) {
|
||||
.forward => try w.writeAll("zig_naked_decl "),
|
||||
.complete => try w.writeAll("zig_naked "),
|
||||
@ -1796,7 +1796,7 @@ pub const DeclGen = struct {
|
||||
|
||||
var trailing = try renderTypePrefix(dg.pass, &dg.ctype_pool, zcu, w, fn_ctype, .suffix, .{});
|
||||
|
||||
if (toCallingConvention(fn_info.cc)) |call_conv| {
|
||||
if (toCallingConvention(fn_info.cc, zcu)) |call_conv| {
|
||||
try w.print("{}zig_callconv({s})", .{ trailing, call_conv });
|
||||
trailing = .maybe_space;
|
||||
}
|
||||
@ -7604,12 +7604,17 @@ fn writeMemoryOrder(w: anytype, order: std.builtin.AtomicOrder) !void {
|
||||
return w.writeAll(toMemoryOrder(order));
|
||||
}
|
||||
|
||||
fn toCallingConvention(call_conv: std.builtin.CallingConvention) ?[]const u8 {
|
||||
return switch (call_conv) {
|
||||
.Stdcall => "stdcall",
|
||||
.Fastcall => "fastcall",
|
||||
.Vectorcall => "vectorcall",
|
||||
else => null,
|
||||
fn toCallingConvention(cc: std.builtin.NewCallingConvention, zcu: *Zcu) ?[]const u8 {
|
||||
return switch (cc) {
|
||||
.auto, .naked => null,
|
||||
.x86_stdcall => "stdcall",
|
||||
.x86_fastcall => "fastcall",
|
||||
.x86_vectorcall, .x86_64_vectorcall => "vectorcall",
|
||||
else => {
|
||||
// `Zcu.callconvSupported` means this must be the C callconv.
|
||||
assert(cc.eql(zcu.getTarget().defaultCCallingConvention().?));
|
||||
return null;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -1159,7 +1159,7 @@ pub const Object = struct {
|
||||
}
|
||||
|
||||
{
|
||||
var module_flags = try std.ArrayList(Builder.Metadata).initCapacity(o.gpa, 6);
|
||||
var module_flags = try std.ArrayList(Builder.Metadata).initCapacity(o.gpa, 7);
|
||||
defer module_flags.deinit();
|
||||
|
||||
const behavior_error = try o.builder.metadataConstant(try o.builder.intConst(.i32, 1));
|
||||
@ -1233,6 +1233,18 @@ pub const Object = struct {
|
||||
}
|
||||
}
|
||||
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
if (target.os.tag == .windows and (target.cpu.arch == .x86_64 or target.cpu.arch == .x86)) {
|
||||
// Add the "RegCallv4" flag so that any functions using `x86_regcallcc` use regcall
|
||||
// v4, which is essentially a requirement on Windows. See corresponding logic in
|
||||
// `toLlvmCallConvTag`.
|
||||
module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
|
||||
behavior_max,
|
||||
try o.builder.metadataString("RegCallv4"),
|
||||
try o.builder.metadataConstant(.@"1"),
|
||||
));
|
||||
}
|
||||
|
||||
try o.builder.metadataNamed(try o.builder.metadataString("llvm.module.flags"), module_flags.items);
|
||||
}
|
||||
|
||||
@ -1467,14 +1479,6 @@ pub const Object = struct {
|
||||
_ = try attributes.removeFnAttr(.@"noinline");
|
||||
}
|
||||
|
||||
const stack_alignment = func.analysisUnordered(ip).stack_alignment;
|
||||
if (stack_alignment != .none) {
|
||||
try attributes.addFnAttr(.{ .alignstack = stack_alignment.toLlvm() }, &o.builder);
|
||||
try attributes.addFnAttr(.@"noinline", &o.builder);
|
||||
} else {
|
||||
_ = try attributes.removeFnAttr(.alignstack);
|
||||
}
|
||||
|
||||
if (func_analysis.branch_hint == .cold) {
|
||||
try attributes.addFnAttr(.cold, &o.builder);
|
||||
} else {
|
||||
@ -1486,7 +1490,7 @@ pub const Object = struct {
|
||||
} else {
|
||||
_ = try attributes.removeFnAttr(.sanitize_thread);
|
||||
}
|
||||
const is_naked = fn_info.cc == .Naked;
|
||||
const is_naked = fn_info.cc == .naked;
|
||||
if (owner_mod.fuzz and !func_analysis.disable_instrumentation and !is_naked) {
|
||||
try attributes.addFnAttr(.optforfuzzing, &o.builder);
|
||||
_ = try attributes.removeFnAttr(.skipprofile);
|
||||
@ -1784,7 +1788,7 @@ pub const Object = struct {
|
||||
.liveness = liveness,
|
||||
.ng = &ng,
|
||||
.wip = wip,
|
||||
.is_naked = fn_info.cc == .Naked,
|
||||
.is_naked = fn_info.cc == .naked,
|
||||
.fuzz = fuzz,
|
||||
.ret_ptr = ret_ptr,
|
||||
.args = args.items,
|
||||
@ -3038,14 +3042,33 @@ pub const Object = struct {
|
||||
llvm_arg_i += 1;
|
||||
}
|
||||
|
||||
switch (fn_info.cc) {
|
||||
.Unspecified, .Inline => function_index.setCallConv(.fastcc, &o.builder),
|
||||
.Naked => try attributes.addFnAttr(.naked, &o.builder),
|
||||
.Async => {
|
||||
function_index.setCallConv(.fastcc, &o.builder);
|
||||
@panic("TODO: LLVM backend lower async function");
|
||||
},
|
||||
else => function_index.setCallConv(toLlvmCallConv(fn_info.cc, target), &o.builder),
|
||||
if (fn_info.cc == .@"async") {
|
||||
@panic("TODO: LLVM backend lower async function");
|
||||
}
|
||||
|
||||
{
|
||||
const cc_info = toLlvmCallConv(fn_info.cc, target).?;
|
||||
|
||||
function_index.setCallConv(cc_info.llvm_cc, &o.builder);
|
||||
|
||||
if (cc_info.align_stack) {
|
||||
try attributes.addFnAttr(.{ .alignstack = .fromByteUnits(target.stackAlignment()) }, &o.builder);
|
||||
} else {
|
||||
_ = try attributes.removeFnAttr(.alignstack);
|
||||
}
|
||||
|
||||
if (cc_info.naked) {
|
||||
try attributes.addFnAttr(.naked, &o.builder);
|
||||
} else {
|
||||
_ = try attributes.removeFnAttr(.naked);
|
||||
}
|
||||
|
||||
for (0..cc_info.inreg_param_count) |param_idx| {
|
||||
try attributes.addParamAttr(param_idx, .inreg, &o.builder);
|
||||
}
|
||||
for (cc_info.inreg_param_count..std.math.maxInt(u2)) |param_idx| {
|
||||
_ = try attributes.removeParamAttr(param_idx, .inreg);
|
||||
}
|
||||
}
|
||||
|
||||
if (resolved.alignment != .none)
|
||||
@ -3061,7 +3084,7 @@ pub const Object = struct {
|
||||
// suppress generation of the prologue and epilogue, and the prologue is where the
|
||||
// frame pointer normally gets set up. At time of writing, this is the case for at
|
||||
// least x86 and RISC-V.
|
||||
owner_mod.omit_frame_pointer or fn_info.cc == .Naked,
|
||||
owner_mod.omit_frame_pointer or fn_info.cc == .naked,
|
||||
);
|
||||
|
||||
if (fn_info.return_type == .noreturn_type) try attributes.addFnAttr(.noreturn, &o.builder);
|
||||
@ -4618,9 +4641,16 @@ pub const Object = struct {
|
||||
if (!param_ty.isPtrLikeOptional(zcu) and !ptr_info.flags.is_allowzero) {
|
||||
try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder);
|
||||
}
|
||||
if (fn_info.cc == .Interrupt) {
|
||||
const child_type = try lowerType(o, Type.fromInterned(ptr_info.child));
|
||||
try attributes.addParamAttr(llvm_arg_i, .{ .byval = child_type }, &o.builder);
|
||||
switch (fn_info.cc) {
|
||||
else => {},
|
||||
.x86_64_interrupt,
|
||||
.x86_interrupt,
|
||||
.avr_interrupt,
|
||||
.m68k_interrupt,
|
||||
=> {
|
||||
const child_type = try lowerType(o, Type.fromInterned(ptr_info.child));
|
||||
try attributes.addParamAttr(llvm_arg_i, .{ .byval = child_type }, &o.builder);
|
||||
},
|
||||
}
|
||||
if (ptr_info.flags.is_const) {
|
||||
try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder);
|
||||
@ -5677,7 +5707,7 @@ pub const FuncGen = struct {
|
||||
.always_tail => .musttail,
|
||||
.async_kw, .no_async, .always_inline, .compile_time => unreachable,
|
||||
},
|
||||
toLlvmCallConv(fn_info.cc, target),
|
||||
toLlvmCallConvTag(fn_info.cc, target).?,
|
||||
try attributes.finish(&o.builder),
|
||||
try o.lowerType(zig_fn_ty),
|
||||
llvm_fn,
|
||||
@ -5756,7 +5786,7 @@ pub const FuncGen = struct {
|
||||
_ = try fg.wip.callIntrinsicAssumeCold();
|
||||
_ = try fg.wip.call(
|
||||
.normal,
|
||||
toLlvmCallConv(fn_info.cc, target),
|
||||
toLlvmCallConvTag(fn_info.cc, target).?,
|
||||
.none,
|
||||
panic_global.typeOf(&o.builder),
|
||||
panic_global.toValue(&o.builder),
|
||||
@ -11554,36 +11584,146 @@ fn toLlvmAtomicRmwBinOp(
|
||||
};
|
||||
}
|
||||
|
||||
fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) Builder.CallConv {
|
||||
return switch (cc) {
|
||||
.Unspecified, .Inline, .Async => .fastcc,
|
||||
.C, .Naked => .ccc,
|
||||
.Stdcall => .x86_stdcallcc,
|
||||
.Fastcall => .x86_fastcallcc,
|
||||
.Vectorcall => return switch (target.cpu.arch) {
|
||||
.x86, .x86_64 => .x86_vectorcallcc,
|
||||
.aarch64, .aarch64_be => .aarch64_vector_pcs,
|
||||
const CallingConventionInfo = struct {
|
||||
/// The LLVM calling convention to use.
|
||||
llvm_cc: Builder.CallConv,
|
||||
/// Whether to use an `alignstack` attribute to forcibly re-align the stack pointer in the function's prologue.
|
||||
align_stack: bool,
|
||||
/// Whether the function needs a `naked` attribute.
|
||||
naked: bool,
|
||||
/// How many leading parameters to apply the `inreg` attribute to.
|
||||
inreg_param_count: u2 = 0,
|
||||
};
|
||||
|
||||
pub fn toLlvmCallConv(cc: std.builtin.NewCallingConvention, target: std.Target) ?CallingConventionInfo {
|
||||
const llvm_cc = toLlvmCallConvTag(cc, target) orelse return null;
|
||||
const incoming_stack_alignment: ?u64, const register_params: u2 = switch (cc) {
|
||||
inline else => |pl| switch (@TypeOf(pl)) {
|
||||
void => .{ null, 0 },
|
||||
std.builtin.NewCallingConvention.CommonOptions => .{ pl.incoming_stack_alignment, 0 },
|
||||
std.builtin.NewCallingConvention.X86RegparmOptions => .{ pl.incoming_stack_alignment, pl.register_params },
|
||||
else => unreachable,
|
||||
},
|
||||
.Thiscall => .x86_thiscallcc,
|
||||
.APCS => .arm_apcscc,
|
||||
.AAPCS => .arm_aapcscc,
|
||||
.AAPCSVFP => .arm_aapcs_vfpcc,
|
||||
.Interrupt => return switch (target.cpu.arch) {
|
||||
.x86, .x86_64 => .x86_intrcc,
|
||||
.avr => .avr_intrcc,
|
||||
.msp430 => .msp430_intrcc,
|
||||
else => unreachable,
|
||||
},
|
||||
.Signal => .avr_signalcc,
|
||||
.SysV => .x86_64_sysvcc,
|
||||
.Win64 => .win64cc,
|
||||
.Kernel => return switch (target.cpu.arch) {
|
||||
.nvptx, .nvptx64 => .ptx_kernel,
|
||||
.amdgcn => .amdgpu_kernel,
|
||||
else => unreachable,
|
||||
},
|
||||
.Vertex, .Fragment => unreachable,
|
||||
};
|
||||
return .{
|
||||
.llvm_cc = llvm_cc,
|
||||
.align_stack = if (incoming_stack_alignment) |a| need_align: {
|
||||
const normal_stack_align = target.stackAlignment();
|
||||
break :need_align a < normal_stack_align;
|
||||
} else false,
|
||||
.naked = cc == .naked,
|
||||
.inreg_param_count = register_params,
|
||||
};
|
||||
}
|
||||
fn toLlvmCallConvTag(cc_tag: std.builtin.NewCallingConvention.Tag, target: std.Target) ?Builder.CallConv {
|
||||
if (target.defaultCCallingConvention()) |default_c| {
|
||||
if (cc_tag == default_c) {
|
||||
return .ccc;
|
||||
}
|
||||
}
|
||||
return switch (cc_tag) {
|
||||
.@"inline" => unreachable,
|
||||
.auto, .@"async" => .fastcc,
|
||||
.naked => .ccc,
|
||||
.x86_64_sysv => .x86_64_sysvcc,
|
||||
.x86_64_win => .win64cc,
|
||||
.x86_64_regcall_v3_sysv => if (target.cpu.arch == .x86_64 and target.os.tag != .windows)
|
||||
.x86_regcallcc
|
||||
else
|
||||
null,
|
||||
.x86_64_regcall_v4_win => if (target.cpu.arch == .x86_64 and target.os.tag == .windows)
|
||||
.x86_regcallcc // we use the "RegCallv4" module flag to make this correct
|
||||
else
|
||||
null,
|
||||
.x86_64_vectorcall => .x86_vectorcallcc,
|
||||
.x86_64_interrupt => .x86_intrcc,
|
||||
.x86_stdcall => .x86_stdcallcc,
|
||||
.x86_fastcall => .x86_fastcallcc,
|
||||
.x86_thiscall => .x86_thiscallcc,
|
||||
.x86_regcall_v3 => if (target.cpu.arch == .x86 and target.os.tag != .windows)
|
||||
.x86_regcallcc
|
||||
else
|
||||
null,
|
||||
.x86_regcall_v4_win => if (target.cpu.arch == .x86 and target.os.tag == .windows)
|
||||
.x86_regcallcc // we use the "RegCallv4" module flag to make this correct
|
||||
else
|
||||
null,
|
||||
.x86_vectorcall => .x86_vectorcallcc,
|
||||
.x86_interrupt => .x86_intrcc,
|
||||
.aarch64_vfabi => .aarch64_vector_pcs,
|
||||
.aarch64_vfabi_sve => .aarch64_sve_vector_pcs,
|
||||
.arm_apcs => .arm_apcscc,
|
||||
.arm_aapcs => .arm_aapcscc,
|
||||
.arm_aapcs_vfp => .arm_aapcs_vfpcc,
|
||||
.riscv64_lp64_v => .riscv_vectorcallcc,
|
||||
.riscv32_ilp32_v => .riscv_vectorcallcc,
|
||||
.avr_builtin => .avr_builtincc,
|
||||
.avr_signal => .avr_signalcc,
|
||||
.avr_interrupt => .avr_intrcc,
|
||||
.m68k_rtd => .m68k_rtdcc,
|
||||
.m68k_interrupt => .m68k_intrcc,
|
||||
.amdgcn_kernel => .amdgpu_kernel,
|
||||
.amdgcn_cs => .amdgpu_cs,
|
||||
.nvptx_device => .ptx_device,
|
||||
.nvptx_kernel => .ptx_kernel,
|
||||
|
||||
// All the calling conventions which LLVM does not have a general representation for.
|
||||
// Note that these are often still supported through the `defaultCCallingConvention` path above via `ccc`.
|
||||
.x86_sysv,
|
||||
.x86_win,
|
||||
.x86_thiscall_mingw,
|
||||
.aarch64_aapcs,
|
||||
.aarch64_aapcs_darwin,
|
||||
.aarch64_aapcs_win,
|
||||
.arm_aapcs16_vfp,
|
||||
.arm_interrupt,
|
||||
.mips64_n64,
|
||||
.mips64_n32,
|
||||
.mips64_interrupt,
|
||||
.mips_o32,
|
||||
.mips_interrupt,
|
||||
.riscv64_lp64,
|
||||
.riscv64_interrupt,
|
||||
.riscv32_ilp32,
|
||||
.riscv32_interrupt,
|
||||
.sparc64_sysv,
|
||||
.sparc_sysv,
|
||||
.powerpc64_elf,
|
||||
.powerpc64_elf_altivec,
|
||||
.powerpc64_elf_v2,
|
||||
.powerpc_sysv,
|
||||
.powerpc_sysv_altivec,
|
||||
.powerpc_aix,
|
||||
.powerpc_aix_altivec,
|
||||
.wasm_watc,
|
||||
.arc_sysv,
|
||||
.avr_gnu,
|
||||
.bpf_std,
|
||||
.csky_sysv,
|
||||
.csky_interrupt,
|
||||
.hexagon_sysv,
|
||||
.hexagon_sysv_hvx,
|
||||
.lanai_sysv,
|
||||
.loongarch64_lp64,
|
||||
.loongarch32_ilp32,
|
||||
.m68k_sysv,
|
||||
.m68k_gnu,
|
||||
.msp430_eabi,
|
||||
.propeller1_sysv,
|
||||
.propeller2_sysv,
|
||||
.s390x_sysv,
|
||||
.s390x_sysv_vx,
|
||||
.ve_sysv,
|
||||
.xcore_xs1,
|
||||
.xcore_xs2,
|
||||
.xtensa_call0,
|
||||
.xtensa_windowed,
|
||||
.amdgcn_device,
|
||||
.spirv_device,
|
||||
.spirv_kernel,
|
||||
.spirv_fragment,
|
||||
.spirv_vertex,
|
||||
=> null,
|
||||
};
|
||||
}
|
||||
|
||||
@ -11711,31 +11851,27 @@ fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Targe
|
||||
if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false;
|
||||
|
||||
return switch (fn_info.cc) {
|
||||
.Unspecified, .Inline => returnTypeByRef(zcu, target, return_type),
|
||||
.C => switch (target.cpu.arch) {
|
||||
.mips, .mipsel => switch (mips_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i32_array => true,
|
||||
.byval => false,
|
||||
},
|
||||
.x86 => isByRef(return_type, zcu),
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
|
||||
else => firstParamSRetSystemV(return_type, zcu, target),
|
||||
},
|
||||
.wasm32 => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect,
|
||||
.aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, zcu) == .memory,
|
||||
.arm, .armeb => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i64_array => true,
|
||||
.i32_array => |size| size != 1,
|
||||
.byval => false,
|
||||
},
|
||||
.riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, zcu) == .memory,
|
||||
else => false, // TODO investigate C ABI for other architectures
|
||||
.auto => returnTypeByRef(zcu, target, return_type),
|
||||
.x86_64_sysv => firstParamSRetSystemV(return_type, zcu, target),
|
||||
.x86_64_win => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
|
||||
.x86_sysv, .x86_win => isByRef(return_type, zcu),
|
||||
.x86_stdcall => !isScalar(zcu, return_type),
|
||||
.wasm_watc => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect,
|
||||
.aarch64_aapcs,
|
||||
.aarch64_aapcs_darwin,
|
||||
.aarch64_aapcs_win,
|
||||
=> aarch64_c_abi.classifyType(return_type, zcu) == .memory,
|
||||
.arm_aapcs => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i64_array => true,
|
||||
.i32_array => |size| size != 1,
|
||||
.byval => false,
|
||||
},
|
||||
.SysV => firstParamSRetSystemV(return_type, zcu, target),
|
||||
.Win64 => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
|
||||
.Stdcall => !isScalar(zcu, return_type),
|
||||
else => false,
|
||||
.riscv64_lp64, .riscv32_ilp32 => riscv_c_abi.classifyType(return_type, zcu) == .memory,
|
||||
.mips64_n64, .mips64_n32, .mips_o32 => switch (mips_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i32_array => true,
|
||||
.byval => false,
|
||||
},
|
||||
else => false, // TODO: investigate other targets/callconvs
|
||||
};
|
||||
}
|
||||
|
||||
@ -11761,82 +11897,64 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
|
||||
}
|
||||
const target = zcu.getTarget();
|
||||
switch (fn_info.cc) {
|
||||
.Unspecified,
|
||||
.Inline,
|
||||
=> return if (returnTypeByRef(zcu, target, return_type)) .void else o.lowerType(return_type),
|
||||
.@"inline" => unreachable,
|
||||
.auto => return if (returnTypeByRef(zcu, target, return_type)) .void else o.lowerType(return_type),
|
||||
|
||||
.C => {
|
||||
switch (target.cpu.arch) {
|
||||
.mips, .mipsel => {
|
||||
switch (mips_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i32_array => return .void,
|
||||
.byval => return o.lowerType(return_type),
|
||||
}
|
||||
},
|
||||
.x86 => return if (isByRef(return_type, zcu)) .void else o.lowerType(return_type),
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows => return lowerWin64FnRetTy(o, fn_info),
|
||||
else => return lowerSystemVFnRetTy(o, fn_info),
|
||||
},
|
||||
.wasm32 => {
|
||||
if (isScalar(zcu, return_type)) {
|
||||
return o.lowerType(return_type);
|
||||
}
|
||||
const classes = wasm_c_abi.classifyType(return_type, zcu);
|
||||
if (classes[0] == .indirect or classes[0] == .none) {
|
||||
return .void;
|
||||
}
|
||||
|
||||
assert(classes[0] == .direct and classes[1] == .none);
|
||||
const scalar_type = wasm_c_abi.scalarType(return_type, zcu);
|
||||
return o.builder.intType(@intCast(scalar_type.abiSize(zcu) * 8));
|
||||
},
|
||||
.aarch64, .aarch64_be => {
|
||||
switch (aarch64_c_abi.classifyType(return_type, zcu)) {
|
||||
.memory => return .void,
|
||||
.float_array => return o.lowerType(return_type),
|
||||
.byval => return o.lowerType(return_type),
|
||||
.integer => return o.builder.intType(@intCast(return_type.bitSize(zcu))),
|
||||
.double_integer => return o.builder.arrayType(2, .i64),
|
||||
}
|
||||
},
|
||||
.arm, .armeb => {
|
||||
switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i64_array => return .void,
|
||||
.i32_array => |len| return if (len == 1) .i32 else .void,
|
||||
.byval => return o.lowerType(return_type),
|
||||
}
|
||||
},
|
||||
.riscv32, .riscv64 => {
|
||||
switch (riscv_c_abi.classifyType(return_type, zcu)) {
|
||||
.memory => return .void,
|
||||
.integer => {
|
||||
return o.builder.intType(@intCast(return_type.bitSize(zcu)));
|
||||
},
|
||||
.double_integer => {
|
||||
return o.builder.structType(.normal, &.{ .i64, .i64 });
|
||||
},
|
||||
.byval => return o.lowerType(return_type),
|
||||
.fields => {
|
||||
var types_len: usize = 0;
|
||||
var types: [8]Builder.Type = undefined;
|
||||
for (0..return_type.structFieldCount(zcu)) |field_index| {
|
||||
const field_ty = return_type.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
types[types_len] = try o.lowerType(field_ty);
|
||||
types_len += 1;
|
||||
}
|
||||
return o.builder.structType(.normal, types[0..types_len]);
|
||||
},
|
||||
}
|
||||
},
|
||||
// TODO investigate C ABI for other architectures
|
||||
else => return o.lowerType(return_type),
|
||||
}
|
||||
.x86_64_sysv => return lowerSystemVFnRetTy(o, fn_info),
|
||||
.x86_64_win => return lowerWin64FnRetTy(o, fn_info),
|
||||
.x86_stdcall => return if (isScalar(zcu, return_type)) o.lowerType(return_type) else .void,
|
||||
.x86_sysv, .x86_win => return if (isByRef(return_type, zcu)) .void else o.lowerType(return_type),
|
||||
.aarch64_aapcs, .aarch64_aapcs_darwin, .aarch64_aapcs_win => switch (aarch64_c_abi.classifyType(return_type, zcu)) {
|
||||
.memory => return .void,
|
||||
.float_array => return o.lowerType(return_type),
|
||||
.byval => return o.lowerType(return_type),
|
||||
.integer => return o.builder.intType(@intCast(return_type.bitSize(zcu))),
|
||||
.double_integer => return o.builder.arrayType(2, .i64),
|
||||
},
|
||||
.Win64 => return lowerWin64FnRetTy(o, fn_info),
|
||||
.SysV => return lowerSystemVFnRetTy(o, fn_info),
|
||||
.Stdcall => return if (isScalar(zcu, return_type)) o.lowerType(return_type) else .void,
|
||||
.arm_aapcs => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i64_array => return .void,
|
||||
.i32_array => |len| return if (len == 1) .i32 else .void,
|
||||
.byval => return o.lowerType(return_type),
|
||||
},
|
||||
.mips64_n64, .mips64_n32, .mips_o32 => switch (mips_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i32_array => return .void,
|
||||
.byval => return o.lowerType(return_type),
|
||||
},
|
||||
.riscv64_lp64, .riscv32_ilp32 => switch (riscv_c_abi.classifyType(return_type, zcu)) {
|
||||
.memory => return .void,
|
||||
.integer => {
|
||||
return o.builder.intType(@intCast(return_type.bitSize(zcu)));
|
||||
},
|
||||
.double_integer => {
|
||||
return o.builder.structType(.normal, &.{ .i64, .i64 });
|
||||
},
|
||||
.byval => return o.lowerType(return_type),
|
||||
.fields => {
|
||||
var types_len: usize = 0;
|
||||
var types: [8]Builder.Type = undefined;
|
||||
for (0..return_type.structFieldCount(zcu)) |field_index| {
|
||||
const field_ty = return_type.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
types[types_len] = try o.lowerType(field_ty);
|
||||
types_len += 1;
|
||||
}
|
||||
return o.builder.structType(.normal, types[0..types_len]);
|
||||
},
|
||||
},
|
||||
.wasm_watc => {
|
||||
if (isScalar(zcu, return_type)) {
|
||||
return o.lowerType(return_type);
|
||||
}
|
||||
const classes = wasm_c_abi.classifyType(return_type, zcu);
|
||||
if (classes[0] == .indirect or classes[0] == .none) {
|
||||
return .void;
|
||||
}
|
||||
|
||||
assert(classes[0] == .direct and classes[1] == .none);
|
||||
const scalar_type = wasm_c_abi.scalarType(return_type, zcu);
|
||||
return o.builder.intType(@intCast(scalar_type.abiSize(zcu) * 8));
|
||||
},
|
||||
// TODO investigate other callconvs
|
||||
else => return o.lowerType(return_type),
|
||||
}
|
||||
}
|
||||
@ -11989,7 +12107,8 @@ const ParamTypeIterator = struct {
|
||||
return .no_bits;
|
||||
}
|
||||
switch (it.fn_info.cc) {
|
||||
.Unspecified, .Inline => {
|
||||
.@"inline" => unreachable,
|
||||
.auto => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
if (ty.isSlice(zcu) or
|
||||
@ -12010,97 +12129,12 @@ const ParamTypeIterator = struct {
|
||||
return .byval;
|
||||
}
|
||||
},
|
||||
.Async => {
|
||||
.@"async" => {
|
||||
@panic("TODO implement async function lowering in the LLVM backend");
|
||||
},
|
||||
.C => switch (target.cpu.arch) {
|
||||
.mips, .mipsel => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (mips_c_abi.classifyType(ty, zcu, .arg)) {
|
||||
.memory => {
|
||||
it.byval_attr = true;
|
||||
return .byref;
|
||||
},
|
||||
.byval => return .byval,
|
||||
.i32_array => |size| return Lowering{ .i32_array = size },
|
||||
}
|
||||
},
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows => return it.nextWin64(ty),
|
||||
else => return it.nextSystemV(ty),
|
||||
},
|
||||
.wasm32 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
if (isScalar(zcu, ty)) {
|
||||
return .byval;
|
||||
}
|
||||
const classes = wasm_c_abi.classifyType(ty, zcu);
|
||||
if (classes[0] == .indirect) {
|
||||
return .byref;
|
||||
}
|
||||
return .abi_sized_int;
|
||||
},
|
||||
.aarch64, .aarch64_be => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (aarch64_c_abi.classifyType(ty, zcu)) {
|
||||
.memory => return .byref_mut,
|
||||
.float_array => |len| return Lowering{ .float_array = len },
|
||||
.byval => return .byval,
|
||||
.integer => {
|
||||
it.types_len = 1;
|
||||
it.types_buffer[0] = .i64;
|
||||
return .multiple_llvm_types;
|
||||
},
|
||||
.double_integer => return Lowering{ .i64_array = 2 },
|
||||
}
|
||||
},
|
||||
.arm, .armeb => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (arm_c_abi.classifyType(ty, zcu, .arg)) {
|
||||
.memory => {
|
||||
it.byval_attr = true;
|
||||
return .byref;
|
||||
},
|
||||
.byval => return .byval,
|
||||
.i32_array => |size| return Lowering{ .i32_array = size },
|
||||
.i64_array => |size| return Lowering{ .i64_array = size },
|
||||
}
|
||||
},
|
||||
.riscv32, .riscv64 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (riscv_c_abi.classifyType(ty, zcu)) {
|
||||
.memory => return .byref_mut,
|
||||
.byval => return .byval,
|
||||
.integer => return .abi_sized_int,
|
||||
.double_integer => return Lowering{ .i64_array = 2 },
|
||||
.fields => {
|
||||
it.types_len = 0;
|
||||
for (0..ty.structFieldCount(zcu)) |field_index| {
|
||||
const field_ty = ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
it.types_buffer[it.types_len] = try it.object.lowerType(field_ty);
|
||||
it.types_len += 1;
|
||||
}
|
||||
it.llvm_index += it.types_len - 1;
|
||||
return .multiple_llvm_types;
|
||||
},
|
||||
}
|
||||
},
|
||||
// TODO investigate C ABI for other architectures
|
||||
else => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .byval;
|
||||
},
|
||||
},
|
||||
.Win64 => return it.nextWin64(ty),
|
||||
.SysV => return it.nextSystemV(ty),
|
||||
.Stdcall => {
|
||||
.x86_64_sysv => return it.nextSystemV(ty),
|
||||
.x86_64_win => return it.nextWin64(ty),
|
||||
.x86_stdcall => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
|
||||
@ -12111,6 +12145,80 @@ const ParamTypeIterator = struct {
|
||||
return .byref;
|
||||
}
|
||||
},
|
||||
.aarch64_aapcs, .aarch64_aapcs_darwin, .aarch64_aapcs_win => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (aarch64_c_abi.classifyType(ty, zcu)) {
|
||||
.memory => return .byref_mut,
|
||||
.float_array => |len| return Lowering{ .float_array = len },
|
||||
.byval => return .byval,
|
||||
.integer => {
|
||||
it.types_len = 1;
|
||||
it.types_buffer[0] = .i64;
|
||||
return .multiple_llvm_types;
|
||||
},
|
||||
.double_integer => return Lowering{ .i64_array = 2 },
|
||||
}
|
||||
},
|
||||
.arm_aapcs => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (arm_c_abi.classifyType(ty, zcu, .arg)) {
|
||||
.memory => {
|
||||
it.byval_attr = true;
|
||||
return .byref;
|
||||
},
|
||||
.byval => return .byval,
|
||||
.i32_array => |size| return Lowering{ .i32_array = size },
|
||||
.i64_array => |size| return Lowering{ .i64_array = size },
|
||||
}
|
||||
},
|
||||
.mips64_n64, .mips64_n32, .mips_o32 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (mips_c_abi.classifyType(ty, zcu, .arg)) {
|
||||
.memory => {
|
||||
it.byval_attr = true;
|
||||
return .byref;
|
||||
},
|
||||
.byval => return .byval,
|
||||
.i32_array => |size| return Lowering{ .i32_array = size },
|
||||
}
|
||||
},
|
||||
.riscv64_lp64, .riscv32_ilp32 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (riscv_c_abi.classifyType(ty, zcu)) {
|
||||
.memory => return .byref_mut,
|
||||
.byval => return .byval,
|
||||
.integer => return .abi_sized_int,
|
||||
.double_integer => return Lowering{ .i64_array = 2 },
|
||||
.fields => {
|
||||
it.types_len = 0;
|
||||
for (0..ty.structFieldCount(zcu)) |field_index| {
|
||||
const field_ty = ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
it.types_buffer[it.types_len] = try it.object.lowerType(field_ty);
|
||||
it.types_len += 1;
|
||||
}
|
||||
it.llvm_index += it.types_len - 1;
|
||||
return .multiple_llvm_types;
|
||||
},
|
||||
}
|
||||
},
|
||||
.wasm_watc => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
if (isScalar(zcu, ty)) {
|
||||
return .byval;
|
||||
}
|
||||
const classes = wasm_c_abi.classifyType(ty, zcu);
|
||||
if (classes[0] == .indirect) {
|
||||
return .byref;
|
||||
}
|
||||
return .abi_sized_int;
|
||||
},
|
||||
// TODO investigate other callconvs
|
||||
else => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
@ -12263,13 +12371,13 @@ fn iterateParamTypes(object: *Object, fn_info: InternPool.Key.FuncType) ParamTyp
|
||||
}
|
||||
|
||||
fn ccAbiPromoteInt(
|
||||
cc: std.builtin.CallingConvention,
|
||||
cc: std.builtin.NewCallingConvention,
|
||||
zcu: *Zcu,
|
||||
ty: Type,
|
||||
) ?std.builtin.Signedness {
|
||||
const target = zcu.getTarget();
|
||||
switch (cc) {
|
||||
.Unspecified, .Inline, .Async => return null,
|
||||
.auto, .@"inline", .@"async" => return null,
|
||||
else => {},
|
||||
}
|
||||
const int_info = switch (ty.zigTypeTag(zcu)) {
|
||||
|
||||
@ -2052,6 +2052,7 @@ pub const CallConv = enum(u10) {
|
||||
x86_intrcc,
|
||||
avr_intrcc,
|
||||
avr_signalcc,
|
||||
avr_builtincc,
|
||||
|
||||
amdgpu_vs = 87,
|
||||
amdgpu_gs,
|
||||
@ -2060,6 +2061,7 @@ pub const CallConv = enum(u10) {
|
||||
amdgpu_kernel,
|
||||
x86_regcallcc,
|
||||
amdgpu_hs,
|
||||
msp430_builtincc,
|
||||
|
||||
amdgpu_ls = 95,
|
||||
amdgpu_es,
|
||||
@ -2068,9 +2070,15 @@ pub const CallConv = enum(u10) {
|
||||
|
||||
amdgpu_gfx = 100,
|
||||
|
||||
m68k_intrcc,
|
||||
|
||||
aarch64_sme_preservemost_from_x0 = 102,
|
||||
aarch64_sme_preservemost_from_x2,
|
||||
|
||||
m68k_rtdcc = 106,
|
||||
|
||||
riscv_vectorcallcc = 110,
|
||||
|
||||
_,
|
||||
|
||||
pub const default = CallConv.ccc;
|
||||
@ -2115,6 +2123,7 @@ pub const CallConv = enum(u10) {
|
||||
.x86_intrcc,
|
||||
.avr_intrcc,
|
||||
.avr_signalcc,
|
||||
.avr_builtincc,
|
||||
.amdgpu_vs,
|
||||
.amdgpu_gs,
|
||||
.amdgpu_ps,
|
||||
@ -2122,13 +2131,17 @@ pub const CallConv = enum(u10) {
|
||||
.amdgpu_kernel,
|
||||
.x86_regcallcc,
|
||||
.amdgpu_hs,
|
||||
.msp430_builtincc,
|
||||
.amdgpu_ls,
|
||||
.amdgpu_es,
|
||||
.aarch64_vector_pcs,
|
||||
.aarch64_sve_vector_pcs,
|
||||
.amdgpu_gfx,
|
||||
.m68k_intrcc,
|
||||
.aarch64_sme_preservemost_from_x0,
|
||||
.aarch64_sme_preservemost_from_x2,
|
||||
.m68k_rtdcc,
|
||||
.riscv_vectorcallcc,
|
||||
=> try writer.print(" {s}", .{@tagName(self)}),
|
||||
_ => try writer.print(" cc{d}", .{@intFromEnum(self)}),
|
||||
}
|
||||
|
||||
@ -1640,8 +1640,8 @@ const NavGen = struct {
|
||||
|
||||
comptime assert(zig_call_abi_ver == 3);
|
||||
switch (fn_info.cc) {
|
||||
.Unspecified, .Kernel, .Fragment, .Vertex, .C => {},
|
||||
else => unreachable, // TODO
|
||||
.auto, .spirv_kernel, .spirv_fragment, .spirv_vertex => {},
|
||||
else => @panic("TODO"),
|
||||
}
|
||||
|
||||
// TODO: Put this somewhere in Sema.zig
|
||||
@ -2970,7 +2970,7 @@ const NavGen = struct {
|
||||
.id_result_type = return_ty_id,
|
||||
.id_result = result_id,
|
||||
.function_control = switch (fn_info.cc) {
|
||||
.Inline => .{ .Inline = true },
|
||||
.@"inline" => .{ .Inline = true },
|
||||
else => .{},
|
||||
},
|
||||
.function_type = prototype_ty_id,
|
||||
|
||||
@ -217,7 +217,7 @@ pub fn updateFunc(
|
||||
.mod = zcu.navFileScope(func.owner_nav).mod,
|
||||
.error_msg = null,
|
||||
.pass = .{ .nav = func.owner_nav },
|
||||
.is_naked_fn = zcu.navValue(func.owner_nav).typeOf(zcu).fnCallingConvention(zcu) == .Naked,
|
||||
.is_naked_fn = zcu.navValue(func.owner_nav).typeOf(zcu).fnCallingConvention(zcu) == .naked,
|
||||
.fwd_decl = fwd_decl.toManaged(gpa),
|
||||
.ctype_pool = ctype_pool.*,
|
||||
.scratch = .{},
|
||||
|
||||
@ -1484,14 +1484,16 @@ pub fn updateExports(
|
||||
const exported_nav = ip.getNav(exported_nav_index);
|
||||
const exported_ty = exported_nav.typeOf(ip);
|
||||
if (!ip.isFunctionType(exported_ty)) continue;
|
||||
const winapi_cc: std.builtin.CallingConvention = switch (target.cpu.arch) {
|
||||
.x86 => .Stdcall,
|
||||
else => .C,
|
||||
const c_cc = target.defaultCCallingConvention().?;
|
||||
const winapi_cc: std.builtin.NewCallingConvention = switch (target.cpu.arch) {
|
||||
.x86 => .{ .x86_stdcall = .{} },
|
||||
else => c_cc,
|
||||
};
|
||||
const exported_cc = Type.fromInterned(exported_ty).fnCallingConvention(zcu);
|
||||
if (exported_cc == .C and exp.opts.name.eqlSlice("main", ip) and comp.config.link_libc) {
|
||||
const CcTag = std.builtin.NewCallingConvention.Tag;
|
||||
if (@as(CcTag, exported_cc) == @as(CcTag, c_cc) and exp.opts.name.eqlSlice("main", ip) and comp.config.link_libc) {
|
||||
zcu.stage1_flags.have_c_main = true;
|
||||
} else if (exported_cc == winapi_cc and target.os.tag == .windows) {
|
||||
} else if (@as(CcTag, exported_cc) == @as(CcTag, winapi_cc) and target.os.tag == .windows) {
|
||||
if (exp.opts.name.eqlSlice("WinMain", ip)) {
|
||||
zcu.stage1_flags.have_winmain = true;
|
||||
} else if (exp.opts.name.eqlSlice("wWinMain", ip)) {
|
||||
|
||||
@ -3398,21 +3398,68 @@ fn updateType(
|
||||
const is_nullary = func_type.param_types.len == 0 and !func_type.is_var_args;
|
||||
try wip_nav.abbrevCode(if (is_nullary) .nullary_func_type else .func_type);
|
||||
try wip_nav.strp(name);
|
||||
try diw.writeByte(@intFromEnum(@as(DW.CC, switch (func_type.cc) {
|
||||
.Unspecified, .C => .normal,
|
||||
.Naked, .Async, .Inline => .nocall,
|
||||
.Interrupt, .Signal => .nocall,
|
||||
.Stdcall => .BORLAND_stdcall,
|
||||
.Fastcall => .BORLAND_fastcall,
|
||||
.Vectorcall => .LLVM_vectorcall,
|
||||
.Thiscall => .BORLAND_thiscall,
|
||||
.APCS => .nocall,
|
||||
.AAPCS => .LLVM_AAPCS,
|
||||
.AAPCSVFP => .LLVM_AAPCS_VFP,
|
||||
.SysV => .LLVM_X86_64SysV,
|
||||
.Win64 => .LLVM_Win64,
|
||||
.Kernel, .Fragment, .Vertex => .nocall,
|
||||
})));
|
||||
const cc: DW.CC = cc: {
|
||||
if (zcu.getTarget().defaultCCallingConvention()) |cc| {
|
||||
if (@as(std.builtin.NewCallingConvention.Tag, cc) == func_type.cc) {
|
||||
break :cc .normal;
|
||||
}
|
||||
}
|
||||
break :cc switch (func_type.cc) {
|
||||
.@"inline" => unreachable,
|
||||
.@"async", .auto, .naked => .normal,
|
||||
.x86_64_sysv => .LLVM_X86_64SysV,
|
||||
.x86_64_win => .LLVM_Win64,
|
||||
.x86_64_regcall_v3_sysv => .LLVM_X86RegCall,
|
||||
.x86_64_regcall_v4_win => .LLVM_X86RegCall,
|
||||
.x86_64_vectorcall => .LLVM_vectorcall,
|
||||
.x86_sysv => .nocall,
|
||||
.x86_win => .nocall,
|
||||
.x86_stdcall => .BORLAND_stdcall,
|
||||
.x86_fastcall => .BORLAND_msfastcall,
|
||||
.x86_thiscall => .BORLAND_thiscall,
|
||||
.x86_thiscall_mingw => .BORLAND_thiscall,
|
||||
.x86_regcall_v3 => .LLVM_X86RegCall,
|
||||
.x86_regcall_v4_win => .LLVM_X86RegCall,
|
||||
.x86_vectorcall => .LLVM_vectorcall,
|
||||
|
||||
.aarch64_aapcs => .LLVM_AAPCS,
|
||||
.aarch64_aapcs_darwin => .LLVM_AAPCS,
|
||||
.aarch64_aapcs_win => .LLVM_AAPCS,
|
||||
.aarch64_vfabi => .LLVM_AAPCS,
|
||||
.aarch64_vfabi_sve => .LLVM_AAPCS,
|
||||
|
||||
.arm_apcs => .nocall,
|
||||
.arm_aapcs => .LLVM_AAPCS,
|
||||
.arm_aapcs_vfp => .LLVM_AAPCS_VFP,
|
||||
.arm_aapcs16_vfp => .nocall,
|
||||
|
||||
.riscv64_lp64_v,
|
||||
.riscv32_ilp32_v,
|
||||
=> .LLVM_RISCVVectorCall,
|
||||
|
||||
.m68k_rtd => .LLVM_M68kRTD,
|
||||
|
||||
.amdgcn_kernel,
|
||||
.nvptx_kernel,
|
||||
.spirv_kernel,
|
||||
=> .LLVM_OpenCLKernel,
|
||||
|
||||
.x86_64_interrupt,
|
||||
.x86_interrupt,
|
||||
.arm_interrupt,
|
||||
.mips64_interrupt,
|
||||
.mips_interrupt,
|
||||
.riscv64_interrupt,
|
||||
.riscv32_interrupt,
|
||||
.avr_interrupt,
|
||||
.csky_interrupt,
|
||||
.m68k_interrupt,
|
||||
=> .normal,
|
||||
|
||||
else => .nocall,
|
||||
};
|
||||
};
|
||||
try diw.writeByte(@intFromEnum(cc));
|
||||
try wip_nav.refType(Type.fromInterned(func_type.return_type));
|
||||
for (0..func_type.param_types.len) |param_index| {
|
||||
try wip_nav.abbrevCode(.func_type_param);
|
||||
|
||||
@ -165,10 +165,9 @@ pub fn updateExports(
|
||||
const target = zcu.getTarget();
|
||||
const spv_decl_index = try self.object.resolveNav(zcu, nav_index);
|
||||
const execution_model = switch (Type.fromInterned(nav_ty).fnCallingConvention(zcu)) {
|
||||
.Vertex => spec.ExecutionModel.Vertex,
|
||||
.Fragment => spec.ExecutionModel.Fragment,
|
||||
.Kernel => spec.ExecutionModel.Kernel,
|
||||
.C => return, // TODO: What to do here?
|
||||
.spirv_vertex => spec.ExecutionModel.Vertex,
|
||||
.spirv_fragment => spec.ExecutionModel.Fragment,
|
||||
.spirv_kernel => spec.ExecutionModel.Kernel,
|
||||
else => unreachable,
|
||||
};
|
||||
const is_vulkan = target.os.tag == .vulkan;
|
||||
|
||||
@ -544,13 +544,13 @@ pub fn compilerRtIntAbbrev(bits: u16) []const u8 {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fnCallConvAllowsZigTypes(target: std.Target, cc: std.builtin.CallingConvention) bool {
|
||||
pub fn fnCallConvAllowsZigTypes(cc: std.builtin.NewCallingConvention) bool {
|
||||
return switch (cc) {
|
||||
.Unspecified, .Async, .Inline => true,
|
||||
.auto, .@"async", .@"inline" => true,
|
||||
// For now we want to authorize PTX kernel to use zig objects, even if
|
||||
// we end up exposing the ABI. The goal is to experiment with more
|
||||
// integrated CPU/GPU code.
|
||||
.Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64,
|
||||
.nvptx_kernel => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@ const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const math = std.math;
|
||||
const meta = std.meta;
|
||||
const CallingConvention = std.builtin.CallingConvention;
|
||||
const clang = @import("clang.zig");
|
||||
const aro = @import("aro");
|
||||
const CToken = aro.Tokenizer.Token;
|
||||
@ -5001,17 +5000,18 @@ fn transCC(
|
||||
c: *Context,
|
||||
fn_ty: *const clang.FunctionType,
|
||||
source_loc: clang.SourceLocation,
|
||||
) !CallingConvention {
|
||||
) !ast.Payload.Func.CallingConvention {
|
||||
const clang_cc = fn_ty.getCallConv();
|
||||
switch (clang_cc) {
|
||||
.C => return CallingConvention.C,
|
||||
.X86StdCall => return CallingConvention.Stdcall,
|
||||
.X86FastCall => return CallingConvention.Fastcall,
|
||||
.X86VectorCall, .AArch64VectorCall => return CallingConvention.Vectorcall,
|
||||
.X86ThisCall => return CallingConvention.Thiscall,
|
||||
.AAPCS => return CallingConvention.AAPCS,
|
||||
.AAPCS_VFP => return CallingConvention.AAPCSVFP,
|
||||
.X86_64SysV => return CallingConvention.SysV,
|
||||
return switch (clang_cc) {
|
||||
.C => .c,
|
||||
.X86_64SysV => .x86_64_sysv,
|
||||
.X86StdCall => .x86_stdcall,
|
||||
.X86FastCall => .x86_fastcall,
|
||||
.X86ThisCall => .x86_thiscall,
|
||||
.X86VectorCall => .x86_vectorcall,
|
||||
.AArch64VectorCall => .aarch64_vfabi,
|
||||
.AAPCS => .arm_aapcs,
|
||||
.AAPCS_VFP => .arm_aapcs_vfp,
|
||||
else => return fail(
|
||||
c,
|
||||
error.UnsupportedType,
|
||||
@ -5019,7 +5019,7 @@ fn transCC(
|
||||
"unsupported calling convention: {s}",
|
||||
.{@tagName(clang_cc)},
|
||||
),
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn transFnProto(
|
||||
@ -5056,7 +5056,7 @@ fn finishTransFnProto(
|
||||
source_loc: clang.SourceLocation,
|
||||
fn_decl_context: ?FnDeclContext,
|
||||
is_var_args: bool,
|
||||
cc: CallingConvention,
|
||||
cc: ast.Payload.Func.CallingConvention,
|
||||
is_pub: bool,
|
||||
) !*ast.Payload.Func {
|
||||
const is_export = if (fn_decl_context) |ctx| ctx.is_export else false;
|
||||
@ -5104,7 +5104,7 @@ fn finishTransFnProto(
|
||||
|
||||
const alignment = if (fn_decl) |decl| ClangAlignment.forFunc(c, decl).zigAlignment() else null;
|
||||
|
||||
const explicit_callconv = if ((is_inline or is_export or is_extern) and cc == .C) null else cc;
|
||||
const explicit_callconv = if ((is_inline or is_export or is_extern) and cc == .c) null else cc;
|
||||
|
||||
const return_type_node = blk: {
|
||||
if (fn_ty.getNoReturnAttr()) {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user