mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
Merge pull request #21697 from mlugg/callconv
Replace `std.builtin.CallingConvention` with a tagged union, eliminating `@setAlignStack`
This commit is contained in:
commit
6bf52b0505
@ -3,4 +3,5 @@ export fn entry(foo: Foo) void {
|
||||
_ = foo;
|
||||
}
|
||||
|
||||
// obj=parameter of type 'enum_export_error.Foo' not allowed in function with calling convention 'C'
|
||||
// obj=parameter of type 'enum_export_error.Foo' not allowed in function with calling convention 'x86_64_sysv'
|
||||
// target=x86_64-linux
|
||||
|
||||
@ -550,12 +550,26 @@ pub const Payload = struct {
|
||||
is_var_args: bool,
|
||||
name: ?[]const u8,
|
||||
linksection_string: ?[]const u8,
|
||||
explicit_callconv: ?std.builtin.CallingConvention,
|
||||
explicit_callconv: ?CallingConvention,
|
||||
params: []Param,
|
||||
return_type: Node,
|
||||
body: ?Node,
|
||||
alignment: ?c_uint,
|
||||
},
|
||||
|
||||
pub const CallingConvention = enum {
|
||||
c,
|
||||
x86_64_sysv,
|
||||
x86_64_win,
|
||||
x86_stdcall,
|
||||
x86_fastcall,
|
||||
x86_thiscall,
|
||||
x86_vectorcall,
|
||||
aarch64_vfabi,
|
||||
arm_aapcs,
|
||||
arm_aapcs_vfp,
|
||||
m68k_rtd,
|
||||
};
|
||||
};
|
||||
|
||||
pub const Param = struct {
|
||||
@ -2812,14 +2826,52 @@ fn renderFunc(c: *Context, node: Node) !NodeIndex {
|
||||
const callconv_expr = if (payload.explicit_callconv) |some| blk: {
|
||||
_ = try c.addToken(.keyword_callconv, "callconv");
|
||||
_ = try c.addToken(.l_paren, "(");
|
||||
_ = try c.addToken(.period, ".");
|
||||
const res = try c.addNode(.{
|
||||
.tag = .enum_literal,
|
||||
.main_token = try c.addTokenFmt(.identifier, "{s}", .{@tagName(some)}),
|
||||
.data = undefined,
|
||||
});
|
||||
const cc_node = switch (some) {
|
||||
.c => cc_node: {
|
||||
_ = try c.addToken(.period, ".");
|
||||
break :cc_node try c.addNode(.{
|
||||
.tag = .enum_literal,
|
||||
.main_token = try c.addToken(.identifier, "c"),
|
||||
.data = undefined,
|
||||
});
|
||||
},
|
||||
.x86_64_sysv,
|
||||
.x86_64_win,
|
||||
.x86_stdcall,
|
||||
.x86_fastcall,
|
||||
.x86_thiscall,
|
||||
.x86_vectorcall,
|
||||
.aarch64_vfabi,
|
||||
.arm_aapcs,
|
||||
.arm_aapcs_vfp,
|
||||
.m68k_rtd,
|
||||
=> cc_node: {
|
||||
// .{ .foo = .{} }
|
||||
_ = try c.addToken(.period, ".");
|
||||
const outer_lbrace = try c.addToken(.l_brace, "{");
|
||||
_ = try c.addToken(.period, ".");
|
||||
_ = try c.addToken(.identifier, @tagName(some));
|
||||
_ = try c.addToken(.equal, "=");
|
||||
_ = try c.addToken(.period, ".");
|
||||
const inner_lbrace = try c.addToken(.l_brace, "{");
|
||||
_ = try c.addToken(.r_brace, "}");
|
||||
_ = try c.addToken(.r_brace, "}");
|
||||
break :cc_node try c.addNode(.{
|
||||
.tag = .struct_init_dot_two,
|
||||
.main_token = outer_lbrace,
|
||||
.data = .{
|
||||
.lhs = try c.addNode(.{
|
||||
.tag = .struct_init_dot_two,
|
||||
.main_token = inner_lbrace,
|
||||
.data = .{ .lhs = 0, .rhs = 0 },
|
||||
}),
|
||||
.rhs = 0,
|
||||
},
|
||||
});
|
||||
},
|
||||
};
|
||||
_ = try c.addToken(.r_paren, ")");
|
||||
break :blk res;
|
||||
break :blk cc_node;
|
||||
} else 0;
|
||||
|
||||
const return_type_expr = try renderNode(c, payload.return_type);
|
||||
|
||||
@ -10,7 +10,6 @@ const is_test = builtin.is_test;
|
||||
const common = @import("common.zig");
|
||||
const udivmod = @import("udivmod.zig").udivmod;
|
||||
const __divti3 = @import("divti3.zig").__divti3;
|
||||
const arm = @import("arm.zig");
|
||||
|
||||
pub const panic = common.panic;
|
||||
|
||||
@ -102,25 +101,6 @@ test "test_divmoddi4" {
|
||||
}
|
||||
}
|
||||
|
||||
fn test_one_aeabi_ldivmod(a: i64, b: i64, expected_q: i64, expected_r: i64) !void {
|
||||
const LdivmodRes = extern struct {
|
||||
q: i64, // r1:r0
|
||||
r: i64, // r3:r2
|
||||
};
|
||||
const actualIdivmod = @as(*const fn (a: i64, b: i64) callconv(.AAPCS) LdivmodRes, @ptrCast(&arm.__aeabi_ldivmod));
|
||||
const arm_res = actualIdivmod(a, b);
|
||||
try testing.expectEqual(expected_q, arm_res.q);
|
||||
try testing.expectEqual(expected_r, arm_res.r);
|
||||
}
|
||||
|
||||
test "arm.__aeabi_ldivmod" {
|
||||
if (!builtin.cpu.arch.isARM()) return error.SkipZigTest;
|
||||
|
||||
for (cases__divmodsi4) |case| {
|
||||
try test_one_aeabi_ldivmod(case[0], case[1], case[2], case[3]);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?*u64) callconv(.C) u64 {
|
||||
return udivmod(u64, a, b, maybe_rem);
|
||||
}
|
||||
@ -261,25 +241,6 @@ test "test_divmodsi4" {
|
||||
}
|
||||
}
|
||||
|
||||
fn test_one_aeabi_idivmod(a: i32, b: i32, expected_q: i32, expected_r: i32) !void {
|
||||
const IdivmodRes = extern struct {
|
||||
q: i32, // r0
|
||||
r: i32, // r1
|
||||
};
|
||||
const actualIdivmod = @as(*const fn (a: i32, b: i32) callconv(.AAPCS) IdivmodRes, @ptrCast(&arm.__aeabi_idivmod));
|
||||
const arm_res = actualIdivmod(a, b);
|
||||
try testing.expectEqual(expected_q, arm_res.q);
|
||||
try testing.expectEqual(expected_r, arm_res.r);
|
||||
}
|
||||
|
||||
test "arm.__aeabi_idivmod" {
|
||||
if (!builtin.cpu.arch.isARM()) return error.SkipZigTest;
|
||||
|
||||
for (cases__divmodsi4) |case| {
|
||||
try test_one_aeabi_idivmod(case[0], case[1], case[2], case[3]);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn __udivmodsi4(a: u32, b: u32, rem: *u32) callconv(.C) u32 {
|
||||
const d = __udivsi3(a, b);
|
||||
rem.* = @bitCast(@as(i32, @bitCast(a)) -% (@as(i32, @bitCast(d)) * @as(i32, @bitCast(b))));
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
const testing = @import("std").testing;
|
||||
const builtin = @import("builtin");
|
||||
const __udivmoddi4 = @import("int.zig").__udivmoddi4;
|
||||
const __aeabi_uldivmod = @import("arm.zig").__aeabi_uldivmod;
|
||||
|
||||
fn test__udivmoddi4(a: u64, b: u64, expected_q: u64, expected_r: u64) !void {
|
||||
var r: u64 = undefined;
|
||||
@ -18,26 +17,6 @@ test "udivmoddi4" {
|
||||
}
|
||||
}
|
||||
|
||||
const ARMRes = extern struct {
|
||||
q: u64, // r1:r0
|
||||
r: u64, // r3:r2
|
||||
};
|
||||
|
||||
fn test__aeabi_uldivmod(a: u64, b: u64, expected_q: u64, expected_r: u64) !void {
|
||||
const actualUldivmod = @as(*const fn (a: u64, b: u64) callconv(.AAPCS) ARMRes, @ptrCast(&__aeabi_uldivmod));
|
||||
const arm_res = actualUldivmod(a, b);
|
||||
try testing.expectEqual(expected_q, arm_res.q);
|
||||
try testing.expectEqual(expected_r, arm_res.r);
|
||||
}
|
||||
|
||||
test "arm.__aeabi_uldivmod" {
|
||||
if (!builtin.cpu.arch.isARM()) return error.SkipZigTest;
|
||||
|
||||
for (cases) |case| {
|
||||
try test__aeabi_uldivmod(case[0], case[1], case[2], case[3]);
|
||||
}
|
||||
}
|
||||
|
||||
const cases = [_][4]u64{
|
||||
[_]u64{0x0000000000000000, 0x0000000000000001, 0x0000000000000000, 0x0000000000000000},
|
||||
[_]u64{0x0000000000000000, 0x0000000000000002, 0x0000000000000000, 0x0000000000000000},
|
||||
|
||||
@ -2,27 +2,18 @@
|
||||
// zig fmt: off
|
||||
const testing = @import("std").testing;
|
||||
const builtin = @import("builtin");
|
||||
const __aeabi_uidivmod = @import("arm.zig").__aeabi_uidivmod;
|
||||
const __udivmodsi4 = @import("int.zig").__udivmodsi4;
|
||||
|
||||
const ARMRes = extern struct {
|
||||
q: u32, // r0
|
||||
r: u32, // r1
|
||||
};
|
||||
|
||||
fn test__aeabi_uidivmod(a: u32, b: u32, expected_q: u32, expected_r: u32) !void {
|
||||
const actualUidivmod = @as(*const fn (a: u32, b: u32) callconv(.AAPCS) ARMRes, @ptrCast(&__aeabi_uidivmod));
|
||||
const arm_res = actualUidivmod(a, b);
|
||||
try testing.expectEqual(expected_q, arm_res.q);
|
||||
try testing.expectEqual(expected_r, arm_res.r);
|
||||
fn test__udivmodsi4(a: u32, b: u32, expected_q: u32, expected_r: u32) !void {
|
||||
var r: u32 = undefined;
|
||||
const q = __udivmodsi4(a, b, &r);
|
||||
try testing.expectEqual(expected_q, q);
|
||||
try testing.expectEqual(expected_r, r);
|
||||
}
|
||||
|
||||
test "arm.__aeabi_uidivmod" {
|
||||
if (!builtin.cpu.arch.isARM()) return error.SkipZigTest;
|
||||
|
||||
var i: i32 = 0;
|
||||
test "udivmodsi4" {
|
||||
for (cases) |case| {
|
||||
try test__aeabi_uidivmod(case[0], case[1], case[2], case[3]);
|
||||
i+=1;
|
||||
try test__udivmodsi4(case[0], case[1], case[2], case[3]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1609,6 +1609,165 @@ pub const Cpu = struct {
|
||||
else => ".X",
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns the array of `Arch` to which a specific `std.builtin.CallingConvention` applies.
|
||||
/// Asserts that `cc` is not `.auto`, `.@"async"`, `.naked`, or `.@"inline"`.
|
||||
pub fn fromCallingConvention(cc: std.builtin.CallingConvention.Tag) []const Arch {
|
||||
return switch (cc) {
|
||||
.auto,
|
||||
.@"async",
|
||||
.naked,
|
||||
.@"inline",
|
||||
=> unreachable,
|
||||
|
||||
.x86_64_sysv,
|
||||
.x86_64_win,
|
||||
.x86_64_regcall_v3_sysv,
|
||||
.x86_64_regcall_v4_win,
|
||||
.x86_64_vectorcall,
|
||||
.x86_64_interrupt,
|
||||
=> &.{.x86_64},
|
||||
|
||||
.x86_sysv,
|
||||
.x86_win,
|
||||
.x86_stdcall,
|
||||
.x86_fastcall,
|
||||
.x86_thiscall,
|
||||
.x86_thiscall_mingw,
|
||||
.x86_regcall_v3,
|
||||
.x86_regcall_v4_win,
|
||||
.x86_vectorcall,
|
||||
.x86_interrupt,
|
||||
=> &.{.x86},
|
||||
|
||||
.aarch64_aapcs,
|
||||
.aarch64_aapcs_darwin,
|
||||
.aarch64_aapcs_win,
|
||||
.aarch64_vfabi,
|
||||
.aarch64_vfabi_sve,
|
||||
=> &.{ .aarch64, .aarch64_be },
|
||||
|
||||
.arm_apcs,
|
||||
.arm_aapcs,
|
||||
.arm_aapcs_vfp,
|
||||
.arm_aapcs16_vfp,
|
||||
.arm_interrupt,
|
||||
=> &.{ .arm, .armeb, .thumb, .thumbeb },
|
||||
|
||||
.mips64_n64,
|
||||
.mips64_n32,
|
||||
.mips64_interrupt,
|
||||
=> &.{ .mips64, .mips64el },
|
||||
|
||||
.mips_o32,
|
||||
.mips_interrupt,
|
||||
=> &.{ .mips, .mipsel },
|
||||
|
||||
.riscv64_lp64,
|
||||
.riscv64_lp64_v,
|
||||
.riscv64_interrupt,
|
||||
=> &.{.riscv64},
|
||||
|
||||
.riscv32_ilp32,
|
||||
.riscv32_ilp32_v,
|
||||
.riscv32_interrupt,
|
||||
=> &.{.riscv32},
|
||||
|
||||
.sparc64_sysv,
|
||||
=> &.{.sparc64},
|
||||
|
||||
.sparc_sysv,
|
||||
=> &.{.sparc},
|
||||
|
||||
.powerpc64_elf,
|
||||
.powerpc64_elf_altivec,
|
||||
.powerpc64_elf_v2,
|
||||
=> &.{ .powerpc64, .powerpc64le },
|
||||
|
||||
.powerpc_sysv,
|
||||
.powerpc_sysv_altivec,
|
||||
.powerpc_aix,
|
||||
.powerpc_aix_altivec,
|
||||
=> &.{ .powerpc, .powerpcle },
|
||||
|
||||
.wasm_watc,
|
||||
=> &.{ .wasm64, .wasm32 },
|
||||
|
||||
.arc_sysv,
|
||||
=> &.{.arc},
|
||||
|
||||
.avr_gnu,
|
||||
.avr_builtin,
|
||||
.avr_signal,
|
||||
.avr_interrupt,
|
||||
=> &.{.avr},
|
||||
|
||||
.bpf_std,
|
||||
=> &.{ .bpfel, .bpfeb },
|
||||
|
||||
.csky_sysv,
|
||||
.csky_interrupt,
|
||||
=> &.{.csky},
|
||||
|
||||
.hexagon_sysv,
|
||||
.hexagon_sysv_hvx,
|
||||
=> &.{.hexagon},
|
||||
|
||||
.lanai_sysv,
|
||||
=> &.{.lanai},
|
||||
|
||||
.loongarch64_lp64,
|
||||
=> &.{.loongarch64},
|
||||
|
||||
.loongarch32_ilp32,
|
||||
=> &.{.loongarch32},
|
||||
|
||||
.m68k_sysv,
|
||||
.m68k_gnu,
|
||||
.m68k_rtd,
|
||||
.m68k_interrupt,
|
||||
=> &.{.m68k},
|
||||
|
||||
.msp430_eabi,
|
||||
=> &.{.msp430},
|
||||
|
||||
.propeller1_sysv,
|
||||
=> &.{.propeller1},
|
||||
|
||||
.propeller2_sysv,
|
||||
=> &.{.propeller2},
|
||||
|
||||
.s390x_sysv,
|
||||
.s390x_sysv_vx,
|
||||
=> &.{.s390x},
|
||||
|
||||
.ve_sysv,
|
||||
=> &.{.ve},
|
||||
|
||||
.xcore_xs1,
|
||||
.xcore_xs2,
|
||||
=> &.{.xcore},
|
||||
|
||||
.xtensa_call0,
|
||||
.xtensa_windowed,
|
||||
=> &.{.xtensa},
|
||||
|
||||
.amdgcn_device,
|
||||
.amdgcn_kernel,
|
||||
.amdgcn_cs,
|
||||
=> &.{.amdgcn},
|
||||
|
||||
.nvptx_device,
|
||||
.nvptx_kernel,
|
||||
=> &.{ .nvptx, .nvptx64 },
|
||||
|
||||
.spirv_device,
|
||||
.spirv_kernel,
|
||||
.spirv_fragment,
|
||||
.spirv_vertex,
|
||||
=> &.{ .spirv, .spirv32, .spirv64 },
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Model = struct {
|
||||
@ -2873,6 +3032,76 @@ pub fn cTypePreferredAlignment(target: Target, c_type: CType) u16 {
|
||||
);
|
||||
}
|
||||
|
||||
pub fn cCallingConvention(target: Target) ?std.builtin.CallingConvention {
|
||||
return switch (target.cpu.arch) {
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows, .uefi => .{ .x86_64_win = .{} },
|
||||
else => .{ .x86_64_sysv = .{} },
|
||||
},
|
||||
.x86 => switch (target.os.tag) {
|
||||
.windows, .uefi => .{ .x86_win = .{} },
|
||||
else => .{ .x86_sysv = .{} },
|
||||
},
|
||||
.aarch64, .aarch64_be => if (target.os.tag.isDarwin()) cc: {
|
||||
break :cc .{ .aarch64_aapcs_darwin = .{} };
|
||||
} else switch (target.os.tag) {
|
||||
.windows => .{ .aarch64_aapcs_win = .{} },
|
||||
else => .{ .aarch64_aapcs = .{} },
|
||||
},
|
||||
.arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
|
||||
.netbsd => .{ .arm_apcs = .{} },
|
||||
else => switch (target.abi.floatAbi()) {
|
||||
.soft => .{ .arm_aapcs = .{} },
|
||||
.hard => .{ .arm_aapcs_vfp = .{} },
|
||||
},
|
||||
},
|
||||
.mips64, .mips64el => switch (target.abi) {
|
||||
.gnuabin32 => .{ .mips64_n32 = .{} },
|
||||
else => .{ .mips64_n64 = .{} },
|
||||
},
|
||||
.mips, .mipsel => .{ .mips_o32 = .{} },
|
||||
.riscv64 => .{ .riscv64_lp64 = .{} },
|
||||
.riscv32 => .{ .riscv32_ilp32 = .{} },
|
||||
.sparc64 => .{ .sparc64_sysv = .{} },
|
||||
.sparc => .{ .sparc_sysv = .{} },
|
||||
.powerpc64 => if (target.isMusl())
|
||||
.{ .powerpc64_elf_v2 = .{} }
|
||||
else
|
||||
.{ .powerpc64_elf = .{} },
|
||||
.powerpc64le => .{ .powerpc64_elf_v2 = .{} },
|
||||
.powerpc, .powerpcle => switch (target.os.tag) {
|
||||
.aix => .{ .powerpc_aix = .{} },
|
||||
else => .{ .powerpc_sysv = .{} },
|
||||
},
|
||||
.wasm32 => .{ .wasm_watc = .{} },
|
||||
.wasm64 => .{ .wasm_watc = .{} },
|
||||
.arc => .{ .arc_sysv = .{} },
|
||||
.avr => .avr_gnu,
|
||||
.bpfel, .bpfeb => .{ .bpf_std = .{} },
|
||||
.csky => .{ .csky_sysv = .{} },
|
||||
.hexagon => .{ .hexagon_sysv = .{} },
|
||||
.kalimba => null,
|
||||
.lanai => .{ .lanai_sysv = .{} },
|
||||
.loongarch64 => .{ .loongarch64_lp64 = .{} },
|
||||
.loongarch32 => .{ .loongarch32_ilp32 = .{} },
|
||||
.m68k => if (target.abi.isGnu() or target.abi.isMusl())
|
||||
.{ .m68k_gnu = .{} }
|
||||
else
|
||||
.{ .m68k_sysv = .{} },
|
||||
.msp430 => .{ .msp430_eabi = .{} },
|
||||
.propeller1 => .{ .propeller1_sysv = .{} },
|
||||
.propeller2 => .{ .propeller2_sysv = .{} },
|
||||
.s390x => .{ .s390x_sysv = .{} },
|
||||
.spu_2 => null,
|
||||
.ve => .{ .ve_sysv = .{} },
|
||||
.xcore => .{ .xcore_xs1 = .{} },
|
||||
.xtensa => .{ .xtensa_call0 = .{} },
|
||||
.amdgcn => .{ .amdgcn_device = .{} },
|
||||
.nvptx, .nvptx64 => .nvptx_device,
|
||||
.spirv, .spirv32, .spirv64 => .spirv_device,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn osArchName(target: std.Target) [:0]const u8 {
|
||||
return target.os.tag.archName(target.cpu.arch);
|
||||
}
|
||||
|
||||
@ -160,54 +160,340 @@ pub const OptimizeMode = enum {
|
||||
/// Deprecated; use OptimizeMode.
|
||||
pub const Mode = OptimizeMode;
|
||||
|
||||
/// The calling convention of a function defines how arguments and return values are passed, as well
|
||||
/// as any other requirements which callers and callees must respect, such as register preservation
|
||||
/// and stack alignment.
|
||||
///
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const CallingConvention = enum(u8) {
|
||||
/// This is the default Zig calling convention used when not using `export` on `fn`
|
||||
/// and no other calling convention is specified.
|
||||
Unspecified,
|
||||
/// Matches the C ABI for the target.
|
||||
/// This is the default calling convention when using `export` on `fn`
|
||||
/// and no other calling convention is specified.
|
||||
C,
|
||||
/// This makes a function not have any function prologue or epilogue,
|
||||
/// making the function itself uncallable in regular Zig code.
|
||||
/// This can be useful when integrating with assembly.
|
||||
Naked,
|
||||
/// Functions with this calling convention are called asynchronously,
|
||||
/// as if called as `async function()`.
|
||||
Async,
|
||||
/// Functions with this calling convention are inlined at all call sites.
|
||||
Inline,
|
||||
/// x86-only.
|
||||
Interrupt,
|
||||
Signal,
|
||||
/// x86-only.
|
||||
Stdcall,
|
||||
/// x86-only.
|
||||
Fastcall,
|
||||
/// x86-only.
|
||||
Vectorcall,
|
||||
/// x86-only.
|
||||
Thiscall,
|
||||
pub const CallingConvention = union(enum(u8)) {
|
||||
pub const Tag = @typeInfo(CallingConvention).@"union".tag_type.?;
|
||||
|
||||
/// This is an alias for the default C calling convention for this target.
|
||||
/// Functions marked as `extern` or `export` are given this calling convention by default.
|
||||
pub const c = builtin.target.cCallingConvention().?;
|
||||
|
||||
pub const winapi: CallingConvention = switch (builtin.target.cpu.arch) {
|
||||
.x86_64 => .{ .x86_64_win = .{} },
|
||||
.x86 => .{ .x86_stdcall = .{} },
|
||||
.aarch64 => .{ .aarch64_aapcs_win = .{} },
|
||||
.thumb => .{ .arm_aapcs_vfp = .{} },
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
pub const kernel: CallingConvention = switch (builtin.target.cpu.arch) {
|
||||
.amdgcn => .amdgcn_kernel,
|
||||
.nvptx, .nvptx64 => .nvptx_kernel,
|
||||
.spirv, .spirv32, .spirv64 => .spirv_kernel,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
/// Deprecated; use `.auto`.
|
||||
pub const Unspecified: CallingConvention = .auto;
|
||||
/// Deprecated; use `.c`.
|
||||
pub const C: CallingConvention = .c;
|
||||
/// Deprecated; use `.naked`.
|
||||
pub const Naked: CallingConvention = .naked;
|
||||
/// Deprecated; use `.@"async"`.
|
||||
pub const Async: CallingConvention = .@"async";
|
||||
/// Deprecated; use `.@"inline"`.
|
||||
pub const Inline: CallingConvention = .@"inline";
|
||||
/// Deprecated; use `.x86_64_interrupt`, `.x86_interrupt`, or `.avr_interrupt`.
|
||||
pub const Interrupt: CallingConvention = switch (builtin.target.cpu.arch) {
|
||||
.x86_64 => .{ .x86_64_interrupt = .{} },
|
||||
.x86 => .{ .x86_interrupt = .{} },
|
||||
.avr => .avr_interrupt,
|
||||
else => unreachable,
|
||||
};
|
||||
/// Deprecated; use `.avr_signal`.
|
||||
pub const Signal: CallingConvention = .avr_signal;
|
||||
/// Deprecated; use `.x86_stdcall`.
|
||||
pub const Stdcall: CallingConvention = .{ .x86_stdcall = .{} };
|
||||
/// Deprecated; use `.x86_fastcall`.
|
||||
pub const Fastcall: CallingConvention = .{ .x86_fastcall = .{} };
|
||||
/// Deprecated; use `.x86_64_vectorcall`, `.x86_vectorcall`, or `aarch64_vfabi`.
|
||||
pub const Vectorcall: CallingConvention = switch (builtin.target.cpu.arch) {
|
||||
.x86_64 => .{ .x86_64_vectorcall = .{} },
|
||||
.x86 => .{ .x86_vectorcall = .{} },
|
||||
.aarch64, .aarch64_be => .{ .aarch64_vfabi = .{} },
|
||||
else => unreachable,
|
||||
};
|
||||
/// Deprecated; use `.x86_thiscall`.
|
||||
pub const Thiscall: CallingConvention = .{ .x86_thiscall = .{} };
|
||||
/// Deprecated; use `.arm_apcs`.
|
||||
pub const APCS: CallingConvention = .{ .arm_apcs = .{} };
|
||||
/// Deprecated; use `.arm_aapcs`.
|
||||
pub const AAPCS: CallingConvention = .{ .arm_aapcs = .{} };
|
||||
/// Deprecated; use `.arm_aapcs_vfp`.
|
||||
pub const AAPCSVFP: CallingConvention = .{ .arm_aapcs_vfp = .{} };
|
||||
/// Deprecated; use `.x86_64_sysv`.
|
||||
pub const SysV: CallingConvention = .{ .x86_64_sysv = .{} };
|
||||
/// Deprecated; use `.x86_64_win`.
|
||||
pub const Win64: CallingConvention = .{ .x86_64_win = .{} };
|
||||
/// Deprecated; use `.kernel`.
|
||||
pub const Kernel: CallingConvention = .kernel;
|
||||
/// Deprecated; use `.spirv_fragment`.
|
||||
pub const Fragment: CallingConvention = .spirv_fragment;
|
||||
/// Deprecated; use `.spirv_vertex`.
|
||||
pub const Vertex: CallingConvention = .spirv_vertex;
|
||||
|
||||
/// The default Zig calling convention when neither `export` nor `inline` is specified.
|
||||
/// This calling convention makes no guarantees about stack alignment, registers, etc.
|
||||
/// It can only be used within this Zig compilation unit.
|
||||
auto,
|
||||
|
||||
/// The calling convention of a function that can be called with `async` syntax. An `async` call
|
||||
/// of a runtime-known function must target a function with this calling convention.
|
||||
/// Comptime-known functions with other calling conventions may be coerced to this one.
|
||||
@"async",
|
||||
|
||||
/// Functions with this calling convention have no prologue or epilogue, making the function
|
||||
/// uncallable in regular Zig code. This can be useful when integrating with assembly.
|
||||
naked,
|
||||
|
||||
/// This calling convention is exactly equivalent to using the `inline` keyword on a function
|
||||
/// definition. This function will be semantically inlined by the Zig compiler at call sites.
|
||||
/// Pointers to inline functions are comptime-only.
|
||||
@"inline",
|
||||
|
||||
// Calling conventions for the `x86_64` architecture.
|
||||
x86_64_sysv: CommonOptions,
|
||||
x86_64_win: CommonOptions,
|
||||
x86_64_regcall_v3_sysv: CommonOptions,
|
||||
x86_64_regcall_v4_win: CommonOptions,
|
||||
x86_64_vectorcall: CommonOptions,
|
||||
x86_64_interrupt: CommonOptions,
|
||||
|
||||
// Calling conventions for the `x86` architecture.
|
||||
x86_sysv: X86RegparmOptions,
|
||||
x86_win: X86RegparmOptions,
|
||||
x86_stdcall: X86RegparmOptions,
|
||||
x86_fastcall: CommonOptions,
|
||||
x86_thiscall: CommonOptions,
|
||||
x86_thiscall_mingw: CommonOptions,
|
||||
x86_regcall_v3: CommonOptions,
|
||||
x86_regcall_v4_win: CommonOptions,
|
||||
x86_vectorcall: CommonOptions,
|
||||
x86_interrupt: CommonOptions,
|
||||
|
||||
// Calling conventions for the `aarch64` and `aarch64_be` architectures.
|
||||
aarch64_aapcs: CommonOptions,
|
||||
aarch64_aapcs_darwin: CommonOptions,
|
||||
aarch64_aapcs_win: CommonOptions,
|
||||
aarch64_vfabi: CommonOptions,
|
||||
aarch64_vfabi_sve: CommonOptions,
|
||||
|
||||
// Calling convetions for the `arm`, `armeb`, `thumb`, and `thumbeb` architectures.
|
||||
/// ARM Procedure Call Standard (obsolete)
|
||||
/// ARM-only.
|
||||
APCS,
|
||||
/// ARM Architecture Procedure Call Standard (current standard)
|
||||
/// ARM-only.
|
||||
AAPCS,
|
||||
arm_apcs: CommonOptions,
|
||||
/// ARM Architecture Procedure Call Standard
|
||||
arm_aapcs: CommonOptions,
|
||||
/// ARM Architecture Procedure Call Standard Vector Floating-Point
|
||||
/// ARM-only.
|
||||
AAPCSVFP,
|
||||
/// x86-64-only.
|
||||
SysV,
|
||||
/// x86-64-only.
|
||||
Win64,
|
||||
/// AMD GPU, NVPTX, or SPIR-V kernel
|
||||
Kernel,
|
||||
// Vulkan-only
|
||||
Fragment,
|
||||
Vertex,
|
||||
arm_aapcs_vfp: CommonOptions,
|
||||
arm_aapcs16_vfp: CommonOptions,
|
||||
arm_interrupt: ArmInterruptOptions,
|
||||
|
||||
// Calling conventions for the `mips64` architecture.
|
||||
mips64_n64: CommonOptions,
|
||||
mips64_n32: CommonOptions,
|
||||
mips64_interrupt: MipsInterruptOptions,
|
||||
|
||||
// Calling conventions for the `mips` architecture.
|
||||
mips_o32: CommonOptions,
|
||||
mips_interrupt: MipsInterruptOptions,
|
||||
|
||||
// Calling conventions for the `riscv64` architecture.
|
||||
riscv64_lp64: CommonOptions,
|
||||
riscv64_lp64_v: CommonOptions,
|
||||
riscv64_interrupt: RiscvInterruptOptions,
|
||||
|
||||
// Calling conventions for the `riscv32` architecture.
|
||||
riscv32_ilp32: CommonOptions,
|
||||
riscv32_ilp32_v: CommonOptions,
|
||||
riscv32_interrupt: RiscvInterruptOptions,
|
||||
|
||||
// Calling conventions for the `sparc64` architecture.
|
||||
sparc64_sysv: CommonOptions,
|
||||
|
||||
// Calling conventions for the `sparc` architecture.
|
||||
sparc_sysv: CommonOptions,
|
||||
|
||||
// Calling conventions for the `powerpc64` and `powerpc64le` architectures.
|
||||
powerpc64_elf: CommonOptions,
|
||||
powerpc64_elf_altivec: CommonOptions,
|
||||
powerpc64_elf_v2: CommonOptions,
|
||||
|
||||
// Calling conventions for the `powerpc` and `powerpcle` architectures.
|
||||
powerpc_sysv: CommonOptions,
|
||||
powerpc_sysv_altivec: CommonOptions,
|
||||
powerpc_aix: CommonOptions,
|
||||
powerpc_aix_altivec: CommonOptions,
|
||||
|
||||
/// The standard `wasm32`/`wasm64` calling convention, as specified in the WebAssembly Tool Conventions.
|
||||
wasm_watc: CommonOptions,
|
||||
|
||||
/// The standard `arc` calling convention.
|
||||
arc_sysv: CommonOptions,
|
||||
|
||||
// Calling conventions for the `avr` architecture.
|
||||
avr_gnu,
|
||||
avr_builtin,
|
||||
avr_signal,
|
||||
avr_interrupt,
|
||||
|
||||
/// The standard `bpfel`/`bpfeb` calling convention.
|
||||
bpf_std: CommonOptions,
|
||||
|
||||
// Calling conventions for the `csky` architecture.
|
||||
csky_sysv: CommonOptions,
|
||||
csky_interrupt: CommonOptions,
|
||||
|
||||
// Calling conventions for the `hexagon` architecture.
|
||||
hexagon_sysv: CommonOptions,
|
||||
hexagon_sysv_hvx: CommonOptions,
|
||||
|
||||
/// The standard `lanai` calling convention.
|
||||
lanai_sysv: CommonOptions,
|
||||
|
||||
/// The standard `loongarch64` calling convention.
|
||||
loongarch64_lp64: CommonOptions,
|
||||
|
||||
/// The standard `loongarch32` calling convention.
|
||||
loongarch32_ilp32: CommonOptions,
|
||||
|
||||
// Calling conventions for the `m68k` architecture.
|
||||
m68k_sysv: CommonOptions,
|
||||
m68k_gnu: CommonOptions,
|
||||
m68k_rtd: CommonOptions,
|
||||
m68k_interrupt: CommonOptions,
|
||||
|
||||
/// The standard `msp430` calling convention.
|
||||
msp430_eabi: CommonOptions,
|
||||
|
||||
/// The standard `propeller1` calling convention.
|
||||
propeller1_sysv: CommonOptions,
|
||||
|
||||
/// The standard `propeller2` calling convention.
|
||||
propeller2_sysv: CommonOptions,
|
||||
|
||||
// Calling conventions for the `s390x` architecture.
|
||||
s390x_sysv: CommonOptions,
|
||||
s390x_sysv_vx: CommonOptions,
|
||||
|
||||
/// The standard `ve` calling convention.
|
||||
ve_sysv: CommonOptions,
|
||||
|
||||
// Calling conventions for the `xcore` architecture.
|
||||
xcore_xs1: CommonOptions,
|
||||
xcore_xs2: CommonOptions,
|
||||
|
||||
// Calling conventions for the `xtensa` architecture.
|
||||
xtensa_call0: CommonOptions,
|
||||
xtensa_windowed: CommonOptions,
|
||||
|
||||
// Calling conventions for the `amdgcn` architecture.
|
||||
amdgcn_device: CommonOptions,
|
||||
amdgcn_kernel,
|
||||
amdgcn_cs: CommonOptions,
|
||||
|
||||
// Calling conventions for the `nvptx` architecture.
|
||||
nvptx_device,
|
||||
nvptx_kernel,
|
||||
|
||||
// Calling conventions for kernels and shaders on the `spirv`, `spirv32`, and `spirv64` architectures.
|
||||
spirv_device,
|
||||
spirv_kernel,
|
||||
spirv_fragment,
|
||||
spirv_vertex,
|
||||
|
||||
/// Options shared across most calling conventions.
|
||||
pub const CommonOptions = struct {
|
||||
/// The boundary the stack is aligned to when the function is called.
|
||||
/// `null` means the default for this calling convention.
|
||||
incoming_stack_alignment: ?u64 = null,
|
||||
};
|
||||
|
||||
/// Options for x86 calling conventions which support the regparm attribute to pass some
|
||||
/// arguments in registers.
|
||||
pub const X86RegparmOptions = struct {
|
||||
/// The boundary the stack is aligned to when the function is called.
|
||||
/// `null` means the default for this calling convention.
|
||||
incoming_stack_alignment: ?u64 = null,
|
||||
/// The number of arguments to pass in registers before passing the remaining arguments
|
||||
/// according to the calling convention.
|
||||
/// Equivalent to `__attribute__((regparm(x)))` in Clang and GCC.
|
||||
register_params: u2 = 0,
|
||||
};
|
||||
|
||||
/// Options for the `arm_interrupt` calling convention.
|
||||
pub const ArmInterruptOptions = struct {
|
||||
/// The boundary the stack is aligned to when the function is called.
|
||||
/// `null` means the default for this calling convention.
|
||||
incoming_stack_alignment: ?u64 = null,
|
||||
/// The kind of interrupt being received.
|
||||
type: InterruptType = .generic,
|
||||
|
||||
pub const InterruptType = enum(u3) {
|
||||
generic,
|
||||
irq,
|
||||
fiq,
|
||||
swi,
|
||||
abort,
|
||||
undef,
|
||||
};
|
||||
};
|
||||
|
||||
/// Options for the `mips_interrupt` and `mips64_interrupt` calling conventions.
|
||||
pub const MipsInterruptOptions = struct {
|
||||
/// The boundary the stack is aligned to when the function is called.
|
||||
/// `null` means the default for this calling convention.
|
||||
incoming_stack_alignment: ?u64 = null,
|
||||
/// The interrupt mode.
|
||||
mode: InterruptMode = .eic,
|
||||
|
||||
pub const InterruptMode = enum(u4) {
|
||||
eic,
|
||||
sw0,
|
||||
sw1,
|
||||
hw0,
|
||||
hw1,
|
||||
hw2,
|
||||
hw3,
|
||||
hw4,
|
||||
hw5,
|
||||
};
|
||||
};
|
||||
|
||||
/// Options for the `riscv32_interrupt` and `riscv64_interrupt` calling conventions.
|
||||
pub const RiscvInterruptOptions = struct {
|
||||
/// The boundary the stack is aligned to when the function is called.
|
||||
/// `null` means the default for this calling convention.
|
||||
incoming_stack_alignment: ?u64 = null,
|
||||
/// The privilege mode.
|
||||
mode: PrivilegeMode = .machine,
|
||||
|
||||
pub const PrivilegeMode = enum(u2) {
|
||||
supervisor,
|
||||
machine,
|
||||
};
|
||||
};
|
||||
|
||||
/// Returns the array of `std.Target.Cpu.Arch` to which this `CallingConvention` applies.
|
||||
/// Asserts that `cc` is not `.auto`, `.@"async"`, `.naked`, or `.@"inline"`.
|
||||
pub fn archs(cc: CallingConvention) []const std.Target.Cpu.Arch {
|
||||
return std.Target.Cpu.Arch.fromCallingConvention(cc);
|
||||
}
|
||||
|
||||
pub fn eql(a: CallingConvention, b: CallingConvention) bool {
|
||||
return std.meta.eql(a, b);
|
||||
}
|
||||
|
||||
pub fn withStackAlign(cc: CallingConvention, incoming_stack_alignment: u64) CallingConvention {
|
||||
const tag: CallingConvention.Tag = cc;
|
||||
var result = cc;
|
||||
@field(result, @tagName(tag)).incoming_stack_alignment = incoming_stack_alignment;
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
|
||||
@ -6,9 +6,9 @@ const NonCanonicalError = crypto.errors.NonCanonicalError;
|
||||
const NotSquareError = crypto.errors.NotSquareError;
|
||||
|
||||
// Inline conditionally, when it can result in large code generation.
|
||||
const bloaty_inline = switch (builtin.mode) {
|
||||
.ReleaseSafe, .ReleaseFast => .Inline,
|
||||
.Debug, .ReleaseSmall => .Unspecified,
|
||||
const bloaty_inline: std.builtin.CallingConvention = switch (builtin.mode) {
|
||||
.ReleaseSafe, .ReleaseFast => .@"inline",
|
||||
.Debug, .ReleaseSmall => .auto,
|
||||
};
|
||||
|
||||
pub const Fe = struct {
|
||||
|
||||
@ -2824,10 +2824,7 @@ pub const STD_OUTPUT_HANDLE = maxInt(DWORD) - 11 + 1;
|
||||
/// The standard error device. Initially, this is the active console screen buffer, CONOUT$.
|
||||
pub const STD_ERROR_HANDLE = maxInt(DWORD) - 12 + 1;
|
||||
|
||||
pub const WINAPI: std.builtin.CallingConvention = if (native_arch == .x86)
|
||||
.Stdcall
|
||||
else
|
||||
.C;
|
||||
pub const WINAPI: std.builtin.CallingConvention = .winapi;
|
||||
|
||||
pub const BOOL = c_int;
|
||||
pub const BOOLEAN = BYTE;
|
||||
|
||||
@ -55,7 +55,7 @@ comptime {
|
||||
if (builtin.link_libc and @hasDecl(root, "main")) {
|
||||
if (native_arch.isWasm()) {
|
||||
@export(&mainWithoutEnv, .{ .name = "main" });
|
||||
} else if (@typeInfo(@TypeOf(root.main)).@"fn".calling_convention != .C) {
|
||||
} else if (!@typeInfo(@TypeOf(root.main)).@"fn".calling_convention.eql(.c)) {
|
||||
@export(&main, .{ .name = "main" });
|
||||
}
|
||||
} else if (native_os == .windows) {
|
||||
@ -102,12 +102,11 @@ fn main2() callconv(.C) c_int {
|
||||
return 0;
|
||||
}
|
||||
|
||||
fn _start2() callconv(.C) noreturn {
|
||||
fn _start2() callconv(.withStackAlign(.c, 1)) noreturn {
|
||||
callMain2();
|
||||
}
|
||||
|
||||
fn callMain2() noreturn {
|
||||
@setAlignStack(16);
|
||||
root.main();
|
||||
exit2(0);
|
||||
}
|
||||
@ -428,8 +427,7 @@ fn _start() callconv(.Naked) noreturn {
|
||||
);
|
||||
}
|
||||
|
||||
fn WinStartup() callconv(std.os.windows.WINAPI) noreturn {
|
||||
@setAlignStack(16);
|
||||
fn WinStartup() callconv(.withStackAlign(.winapi, 1)) noreturn {
|
||||
if (!builtin.single_threaded and !builtin.link_libc) {
|
||||
_ = @import("os/windows/tls.zig");
|
||||
}
|
||||
@ -439,8 +437,7 @@ fn WinStartup() callconv(std.os.windows.WINAPI) noreturn {
|
||||
std.os.windows.ntdll.RtlExitUserProcess(callMain());
|
||||
}
|
||||
|
||||
fn wWinMainCRTStartup() callconv(std.os.windows.WINAPI) noreturn {
|
||||
@setAlignStack(16);
|
||||
fn wWinMainCRTStartup() callconv(.withStackAlign(.winapi, 1)) noreturn {
|
||||
if (!builtin.single_threaded and !builtin.link_libc) {
|
||||
_ = @import("os/windows/tls.zig");
|
||||
}
|
||||
|
||||
@ -2902,7 +2902,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
|
||||
.breakpoint,
|
||||
.disable_instrumentation,
|
||||
.set_float_mode,
|
||||
.set_align_stack,
|
||||
.branch_hint,
|
||||
=> break :b true,
|
||||
else => break :b false,
|
||||
@ -9324,14 +9323,6 @@ fn builtinCall(
|
||||
});
|
||||
return rvalue(gz, ri, .void_value, node);
|
||||
},
|
||||
.set_align_stack => {
|
||||
const order = try expr(gz, scope, coerced_align_ri, params[0]);
|
||||
_ = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{
|
||||
.node = gz.nodeIndexToRelative(node),
|
||||
.operand = order,
|
||||
});
|
||||
return rvalue(gz, ri, .void_value, node);
|
||||
},
|
||||
|
||||
.src => {
|
||||
// Incorporate the source location into the source hash, so that
|
||||
|
||||
@ -909,7 +909,6 @@ fn builtinCall(astrl: *AstRlAnnotate, block: ?*Block, ri: ResultInfo, node: Ast.
|
||||
.wasm_memory_size,
|
||||
.splat,
|
||||
.set_float_mode,
|
||||
.set_align_stack,
|
||||
.type_info,
|
||||
.work_item_id,
|
||||
.work_group_size,
|
||||
|
||||
@ -82,7 +82,6 @@ pub const Tag = enum {
|
||||
rem,
|
||||
return_address,
|
||||
select,
|
||||
set_align_stack,
|
||||
set_eval_branch_quota,
|
||||
set_float_mode,
|
||||
set_runtime_safety,
|
||||
@ -744,14 +743,6 @@ pub const list = list: {
|
||||
.param_count = 4,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@setAlignStack",
|
||||
.{
|
||||
.tag = .set_align_stack,
|
||||
.param_count = 1,
|
||||
.illegal_outside_function = true,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@setEvalBranchQuota",
|
||||
.{
|
||||
|
||||
@ -1982,9 +1982,6 @@ pub const Inst = struct {
|
||||
/// Implement builtin `@setFloatMode`.
|
||||
/// `operand` is payload index to `UnNode`.
|
||||
set_float_mode,
|
||||
/// Implement builtin `@setAlignStack`.
|
||||
/// `operand` is payload index to `UnNode`.
|
||||
set_align_stack,
|
||||
/// Implements the `@errorCast` builtin.
|
||||
/// `operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand.
|
||||
error_cast,
|
||||
@ -4012,7 +4009,6 @@ fn findDeclsInner(
|
||||
.wasm_memory_grow,
|
||||
.prefetch,
|
||||
.set_float_mode,
|
||||
.set_align_stack,
|
||||
.error_cast,
|
||||
.await_nosuspend,
|
||||
.breakpoint,
|
||||
|
||||
@ -265,4 +265,4 @@ pub fn __builtin_mul_overflow(a: anytype, b: anytype, result: *@TypeOf(a, b)) c_
|
||||
// It is used in a run-translated-c test and a test-translate-c test to ensure that non-implemented
|
||||
// builtins are correctly demoted. If you implement __builtin_alloca_with_align, please update the
|
||||
// run-translated-c test and the test-translate-c test to use a different non-implemented builtin.
|
||||
// pub fn __builtin_alloca_with_align(size: usize, alignment: usize) callconv(.Inline) *anyopaque {}
|
||||
// pub inline fn __builtin_alloca_with_align(size: usize, alignment: usize) *anyopaque {}
|
||||
|
||||
@ -107,15 +107,15 @@ test "zig fmt: respect line breaks before functions" {
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: rewrite callconv(.Inline) to the inline keyword" {
|
||||
test "zig fmt: rewrite callconv(.@\"inline\") to the inline keyword" {
|
||||
try testTransform(
|
||||
\\fn foo() callconv(.Inline) void {}
|
||||
\\const bar = .Inline;
|
||||
\\fn foo() callconv(.@"inline") void {}
|
||||
\\const bar: @import("std").builtin.CallingConvention = .@"inline";
|
||||
\\fn foo() callconv(bar) void {}
|
||||
\\
|
||||
,
|
||||
\\inline fn foo() void {}
|
||||
\\const bar = .Inline;
|
||||
\\const bar: @import("std").builtin.CallingConvention = .@"inline";
|
||||
\\fn foo() callconv(bar) void {}
|
||||
\\
|
||||
);
|
||||
@ -3062,7 +3062,7 @@ test "zig fmt: functions" {
|
||||
\\pub export fn puts(s: *const u8) align(2 + 2) c_int;
|
||||
\\pub inline fn puts(s: *const u8) align(2 + 2) c_int;
|
||||
\\pub noinline fn puts(s: *const u8) align(2 + 2) c_int;
|
||||
\\pub fn callInlineFn(func: fn () callconv(.Inline) void) void {
|
||||
\\pub fn callInlineFn(func: fn () callconv(.@"inline") void) void {
|
||||
\\ func();
|
||||
\\}
|
||||
\\
|
||||
|
||||
@ -184,8 +184,9 @@ fn renderMember(
|
||||
tree.extraData(datas[fn_proto].lhs, Ast.Node.FnProtoOne).callconv_expr
|
||||
else
|
||||
tree.extraData(datas[fn_proto].lhs, Ast.Node.FnProto).callconv_expr;
|
||||
// Keep in sync with logic in `renderFnProto`. Search this file for the marker PROMOTE_CALLCONV_INLINE
|
||||
if (callconv_expr != 0 and tree.nodes.items(.tag)[callconv_expr] == .enum_literal) {
|
||||
if (mem.eql(u8, "Inline", tree.tokenSlice(main_tokens[callconv_expr]))) {
|
||||
if (mem.eql(u8, "@\"inline\"", tree.tokenSlice(main_tokens[callconv_expr]))) {
|
||||
try ais.writer().writeAll("inline ");
|
||||
}
|
||||
}
|
||||
@ -1839,7 +1840,8 @@ fn renderFnProto(r: *Render, fn_proto: Ast.full.FnProto, space: Space) Error!voi
|
||||
try renderToken(r, section_rparen, .space); // )
|
||||
}
|
||||
|
||||
const is_callconv_inline = mem.eql(u8, "Inline", tree.tokenSlice(tree.nodes.items(.main_token)[fn_proto.ast.callconv_expr]));
|
||||
// Keep in sync with logic in `renderMember`. Search this file for the marker PROMOTE_CALLCONV_INLINE
|
||||
const is_callconv_inline = mem.eql(u8, "@\"inline\"", tree.tokenSlice(tree.nodes.items(.main_token)[fn_proto.ast.callconv_expr]));
|
||||
const is_declaration = fn_proto.name_token != null;
|
||||
if (fn_proto.ast.callconv_expr != 0 and !(is_declaration and is_callconv_inline)) {
|
||||
const callconv_lparen = tree.firstToken(fn_proto.ast.callconv_expr) - 1;
|
||||
|
||||
@ -2011,10 +2011,10 @@ pub const Key = union(enum) {
|
||||
a.return_type == b.return_type and
|
||||
a.comptime_bits == b.comptime_bits and
|
||||
a.noalias_bits == b.noalias_bits and
|
||||
a.cc == b.cc and
|
||||
a.is_var_args == b.is_var_args and
|
||||
a.is_generic == b.is_generic and
|
||||
a.is_noinline == b.is_noinline;
|
||||
a.is_noinline == b.is_noinline and
|
||||
std.meta.eql(a.cc, b.cc);
|
||||
}
|
||||
|
||||
pub fn hash(self: FuncType, hasher: *Hash, ip: *const InternPool) void {
|
||||
@ -5444,7 +5444,7 @@ pub const Tag = enum(u8) {
|
||||
flags: Flags,
|
||||
|
||||
pub const Flags = packed struct(u32) {
|
||||
cc: std.builtin.CallingConvention,
|
||||
cc: PackedCallingConvention,
|
||||
is_var_args: bool,
|
||||
is_generic: bool,
|
||||
has_comptime_bits: bool,
|
||||
@ -5453,7 +5453,7 @@ pub const Tag = enum(u8) {
|
||||
cc_is_generic: bool,
|
||||
section_is_generic: bool,
|
||||
addrspace_is_generic: bool,
|
||||
_: u16 = 0,
|
||||
_: u6 = 0,
|
||||
};
|
||||
};
|
||||
|
||||
@ -5618,12 +5618,11 @@ pub const FuncAnalysis = packed struct(u32) {
|
||||
branch_hint: std.builtin.BranchHint,
|
||||
is_noinline: bool,
|
||||
calls_or_awaits_errorable_fn: bool,
|
||||
stack_alignment: Alignment,
|
||||
/// True if this function has an inferred error set.
|
||||
inferred_error_set: bool,
|
||||
disable_instrumentation: bool,
|
||||
|
||||
_: u17 = 0,
|
||||
_: u23 = 0,
|
||||
|
||||
pub const State = enum(u2) {
|
||||
/// The runtime function has never been referenced.
|
||||
@ -6912,7 +6911,7 @@ fn extraFuncType(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Ke
|
||||
.return_type = type_function.data.return_type,
|
||||
.comptime_bits = comptime_bits,
|
||||
.noalias_bits = noalias_bits,
|
||||
.cc = type_function.data.flags.cc,
|
||||
.cc = type_function.data.flags.cc.unpack(),
|
||||
.is_var_args = type_function.data.flags.is_var_args,
|
||||
.is_noinline = type_function.data.flags.is_noinline,
|
||||
.cc_is_generic = type_function.data.flags.cc_is_generic,
|
||||
@ -8526,7 +8525,7 @@ pub const GetFuncTypeKey = struct {
|
||||
comptime_bits: u32 = 0,
|
||||
noalias_bits: u32 = 0,
|
||||
/// `null` means generic.
|
||||
cc: ?std.builtin.CallingConvention = .Unspecified,
|
||||
cc: ?std.builtin.CallingConvention = .auto,
|
||||
is_var_args: bool = false,
|
||||
is_generic: bool = false,
|
||||
is_noinline: bool = false,
|
||||
@ -8564,7 +8563,7 @@ pub fn getFuncType(
|
||||
.params_len = params_len,
|
||||
.return_type = key.return_type,
|
||||
.flags = .{
|
||||
.cc = key.cc orelse .Unspecified,
|
||||
.cc = .pack(key.cc orelse .auto),
|
||||
.is_var_args = key.is_var_args,
|
||||
.has_comptime_bits = key.comptime_bits != 0,
|
||||
.has_noalias_bits = key.noalias_bits != 0,
|
||||
@ -8696,7 +8695,6 @@ pub fn getFuncDecl(
|
||||
.branch_hint = .none,
|
||||
.is_noinline = key.is_noinline,
|
||||
.calls_or_awaits_errorable_fn = false,
|
||||
.stack_alignment = .none,
|
||||
.inferred_error_set = false,
|
||||
.disable_instrumentation = false,
|
||||
},
|
||||
@ -8800,7 +8798,6 @@ pub fn getFuncDeclIes(
|
||||
.branch_hint = .none,
|
||||
.is_noinline = key.is_noinline,
|
||||
.calls_or_awaits_errorable_fn = false,
|
||||
.stack_alignment = .none,
|
||||
.inferred_error_set = true,
|
||||
.disable_instrumentation = false,
|
||||
},
|
||||
@ -8818,7 +8815,7 @@ pub fn getFuncDeclIes(
|
||||
.params_len = params_len,
|
||||
.return_type = error_union_type,
|
||||
.flags = .{
|
||||
.cc = key.cc orelse .Unspecified,
|
||||
.cc = .pack(key.cc orelse .auto),
|
||||
.is_var_args = key.is_var_args,
|
||||
.has_comptime_bits = key.comptime_bits != 0,
|
||||
.has_noalias_bits = key.noalias_bits != 0,
|
||||
@ -8992,7 +8989,6 @@ pub fn getFuncInstance(
|
||||
.branch_hint = .none,
|
||||
.is_noinline = arg.is_noinline,
|
||||
.calls_or_awaits_errorable_fn = false,
|
||||
.stack_alignment = .none,
|
||||
.inferred_error_set = false,
|
||||
.disable_instrumentation = false,
|
||||
},
|
||||
@ -9092,7 +9088,6 @@ pub fn getFuncInstanceIes(
|
||||
.branch_hint = .none,
|
||||
.is_noinline = arg.is_noinline,
|
||||
.calls_or_awaits_errorable_fn = false,
|
||||
.stack_alignment = .none,
|
||||
.inferred_error_set = true,
|
||||
.disable_instrumentation = false,
|
||||
},
|
||||
@ -9110,7 +9105,7 @@ pub fn getFuncInstanceIes(
|
||||
.params_len = params_len,
|
||||
.return_type = error_union_type,
|
||||
.flags = .{
|
||||
.cc = arg.cc,
|
||||
.cc = .pack(arg.cc),
|
||||
.is_var_args = false,
|
||||
.has_comptime_bits = false,
|
||||
.has_noalias_bits = arg.noalias_bits != 0,
|
||||
@ -11871,21 +11866,6 @@ pub fn funcAnalysisUnordered(ip: *const InternPool, func: Index) FuncAnalysis {
|
||||
return @atomicLoad(FuncAnalysis, @constCast(ip).funcAnalysisPtr(func), .unordered);
|
||||
}
|
||||
|
||||
pub fn funcMaxStackAlignment(ip: *InternPool, func: Index, new_stack_alignment: Alignment) void {
|
||||
const unwrapped_func = func.unwrap(ip);
|
||||
const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
|
||||
extra_mutex.lock();
|
||||
defer extra_mutex.unlock();
|
||||
|
||||
const analysis_ptr = ip.funcAnalysisPtr(func);
|
||||
var analysis = analysis_ptr.*;
|
||||
analysis.stack_alignment = switch (analysis.stack_alignment) {
|
||||
.none => new_stack_alignment,
|
||||
else => |old_stack_alignment| old_stack_alignment.maxStrict(new_stack_alignment),
|
||||
};
|
||||
@atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
|
||||
}
|
||||
|
||||
pub fn funcSetCallsOrAwaitsErrorableFn(ip: *InternPool, func: Index) void {
|
||||
const unwrapped_func = func.unwrap(ip);
|
||||
const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
|
||||
@ -12224,3 +12204,81 @@ pub fn getErrorValue(
|
||||
pub fn getErrorValueIfExists(ip: *const InternPool, name: NullTerminatedString) ?Zcu.ErrorInt {
|
||||
return @intFromEnum(ip.global_error_set.getErrorValueIfExists(name) orelse return null);
|
||||
}
|
||||
|
||||
const PackedCallingConvention = packed struct(u18) {
|
||||
tag: std.builtin.CallingConvention.Tag,
|
||||
/// May be ignored depending on `tag`.
|
||||
incoming_stack_alignment: Alignment,
|
||||
/// Interpretation depends on `tag`.
|
||||
extra: u4,
|
||||
|
||||
fn pack(cc: std.builtin.CallingConvention) PackedCallingConvention {
|
||||
return switch (cc) {
|
||||
inline else => |pl, tag| switch (@TypeOf(pl)) {
|
||||
void => .{
|
||||
.tag = tag,
|
||||
.incoming_stack_alignment = .none, // unused
|
||||
.extra = 0, // unused
|
||||
},
|
||||
std.builtin.CallingConvention.CommonOptions => .{
|
||||
.tag = tag,
|
||||
.incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
|
||||
.extra = 0, // unused
|
||||
},
|
||||
std.builtin.CallingConvention.X86RegparmOptions => .{
|
||||
.tag = tag,
|
||||
.incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
|
||||
.extra = pl.register_params,
|
||||
},
|
||||
std.builtin.CallingConvention.ArmInterruptOptions => .{
|
||||
.tag = tag,
|
||||
.incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
|
||||
.extra = @intFromEnum(pl.type),
|
||||
},
|
||||
std.builtin.CallingConvention.MipsInterruptOptions => .{
|
||||
.tag = tag,
|
||||
.incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
|
||||
.extra = @intFromEnum(pl.mode),
|
||||
},
|
||||
std.builtin.CallingConvention.RiscvInterruptOptions => .{
|
||||
.tag = tag,
|
||||
.incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
|
||||
.extra = @intFromEnum(pl.mode),
|
||||
},
|
||||
else => comptime unreachable,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn unpack(cc: PackedCallingConvention) std.builtin.CallingConvention {
|
||||
return switch (cc.tag) {
|
||||
inline else => |tag| @unionInit(
|
||||
std.builtin.CallingConvention,
|
||||
@tagName(tag),
|
||||
switch (@FieldType(std.builtin.CallingConvention, @tagName(tag))) {
|
||||
void => {},
|
||||
std.builtin.CallingConvention.CommonOptions => .{
|
||||
.incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
|
||||
},
|
||||
std.builtin.CallingConvention.X86RegparmOptions => .{
|
||||
.incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
|
||||
.register_params = @intCast(cc.extra),
|
||||
},
|
||||
std.builtin.CallingConvention.ArmInterruptOptions => .{
|
||||
.incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
|
||||
.type = @enumFromInt(cc.extra),
|
||||
},
|
||||
std.builtin.CallingConvention.MipsInterruptOptions => .{
|
||||
.incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
|
||||
.mode = @enumFromInt(cc.extra),
|
||||
},
|
||||
std.builtin.CallingConvention.RiscvInterruptOptions => .{
|
||||
.incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
|
||||
.mode = @enumFromInt(cc.extra),
|
||||
},
|
||||
else => comptime unreachable,
|
||||
},
|
||||
),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
397
src/Sema.zig
397
src/Sema.zig
@ -26,7 +26,7 @@ owner: AnalUnit,
|
||||
/// in the case of an inline or comptime function call.
|
||||
/// This could be `none`, a `func_decl`, or a `func_instance`.
|
||||
func_index: InternPool.Index,
|
||||
/// Whether the type of func_index has a calling convention of `.Naked`.
|
||||
/// Whether the type of func_index has a calling convention of `.naked`.
|
||||
func_is_naked: bool,
|
||||
/// Used to restore the error return trace when returning a non-error from a function.
|
||||
error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none,
|
||||
@ -1326,11 +1326,6 @@ fn analyzeBodyInner(
|
||||
i += 1;
|
||||
continue;
|
||||
},
|
||||
.set_align_stack => {
|
||||
try sema.zirSetAlignStack(block, extended);
|
||||
i += 1;
|
||||
continue;
|
||||
},
|
||||
.breakpoint => {
|
||||
if (!block.is_comptime) {
|
||||
_ = try block.addNoOp(.breakpoint);
|
||||
@ -1355,7 +1350,7 @@ fn analyzeBodyInner(
|
||||
},
|
||||
.value_placeholder => unreachable, // never appears in a body
|
||||
.field_parent_ptr => try sema.zirFieldParentPtr(block, extended),
|
||||
.builtin_value => try sema.zirBuiltinValue(extended),
|
||||
.builtin_value => try sema.zirBuiltinValue(block, extended),
|
||||
.inplace_arith_result_ty => try sema.zirInplaceArithResultTy(extended),
|
||||
};
|
||||
},
|
||||
@ -2698,6 +2693,20 @@ fn analyzeAsInt(
|
||||
return try val.toUnsignedIntSema(sema.pt);
|
||||
}
|
||||
|
||||
fn analyzeValueAsCallconv(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
src: LazySrcLoc,
|
||||
unresolved_val: Value,
|
||||
) !std.builtin.CallingConvention {
|
||||
const resolved_val = try sema.resolveLazyValue(unresolved_val);
|
||||
return resolved_val.interpret(std.builtin.CallingConvention, sema.pt) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
error.UndefinedValue => return sema.failWithUseOfUndef(block, src),
|
||||
error.TypeMismatch => @panic("std.builtin is corrupt"),
|
||||
};
|
||||
}
|
||||
|
||||
/// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`,
|
||||
/// resolves this into a list of `InternPool.CaptureValue` allocated by `arena`.
|
||||
fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: usize, captures_len: u32) ![]InternPool.CaptureValue {
|
||||
@ -6496,35 +6505,6 @@ pub fn analyzeExport(
|
||||
});
|
||||
}
|
||||
|
||||
fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
|
||||
const operand_src = block.builtinCallArgSrc(extra.node, 0);
|
||||
const src = block.nodeOffset(extra.node);
|
||||
const alignment = try sema.resolveAlign(block, operand_src, extra.operand);
|
||||
|
||||
const func = switch (sema.owner.unwrap()) {
|
||||
.func => |func| func,
|
||||
.cau => return sema.fail(block, src, "@setAlignStack outside of function scope", .{}),
|
||||
};
|
||||
|
||||
if (alignment.order(Alignment.fromNonzeroByteUnits(256)).compare(.gt)) {
|
||||
return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{
|
||||
alignment.toByteUnits().?,
|
||||
});
|
||||
}
|
||||
|
||||
switch (Value.fromInterned(func).typeOf(zcu).fnCallingConvention(zcu)) {
|
||||
.Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}),
|
||||
.Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}),
|
||||
else => {},
|
||||
}
|
||||
|
||||
zcu.intern_pool.funcMaxStackAlignment(sema.func_index, alignment);
|
||||
sema.allow_memoize = false;
|
||||
}
|
||||
|
||||
fn zirDisableInstrumentation(sema: *Sema) CompileError!void {
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
@ -7554,7 +7534,7 @@ fn analyzeCall(
|
||||
if (try sema.resolveValue(func)) |func_val|
|
||||
if (func_val.isUndef(zcu))
|
||||
return sema.failWithUseOfUndef(block, call_src);
|
||||
if (cc == .Naked) {
|
||||
if (cc == .naked) {
|
||||
const maybe_func_inst = try sema.funcDeclSrcInst(func);
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(
|
||||
@ -7587,7 +7567,7 @@ fn analyzeCall(
|
||||
.async_kw => return sema.failWithUseOfAsync(block, call_src),
|
||||
};
|
||||
|
||||
if (modifier == .never_inline and func_ty_info.cc == .Inline) {
|
||||
if (modifier == .never_inline and func_ty_info.cc == .@"inline") {
|
||||
return sema.fail(block, call_src, "'never_inline' call of inline function", .{});
|
||||
}
|
||||
if (modifier == .always_inline and func_ty_info.is_noinline) {
|
||||
@ -7598,7 +7578,7 @@ fn analyzeCall(
|
||||
|
||||
const is_generic_call = func_ty_info.is_generic;
|
||||
var is_comptime_call = block.is_comptime or modifier == .compile_time;
|
||||
var is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .Inline;
|
||||
var is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .@"inline";
|
||||
var comptime_reason: ?*const Block.ComptimeReason = null;
|
||||
if (!is_inline_call and !is_comptime_call) {
|
||||
if (try Type.fromInterned(func_ty_info.return_type).comptimeOnlySema(pt)) {
|
||||
@ -8455,7 +8435,7 @@ fn instantiateGenericCall(
|
||||
}
|
||||
// Similarly, if the call evaluated to a generic type we need to instead
|
||||
// call it inline.
|
||||
if (func_ty_info.is_generic or func_ty_info.cc == .Inline) {
|
||||
if (func_ty_info.is_generic or func_ty_info.cc == .@"inline") {
|
||||
return error.GenericPoison;
|
||||
}
|
||||
|
||||
@ -9505,7 +9485,7 @@ fn zirFunc(
|
||||
|
||||
// If this instruction has a body, then it's a function declaration, and we decide
|
||||
// the callconv based on whether it is exported. Otherwise, the callconv defaults
|
||||
// to `.Unspecified`.
|
||||
// to `.auto`.
|
||||
const cc: std.builtin.CallingConvention = if (has_body) cc: {
|
||||
const func_decl_cau = if (sema.generic_owner != .none) cau: {
|
||||
const generic_owner_fn = zcu.funcInfo(sema.generic_owner);
|
||||
@ -9518,8 +9498,26 @@ fn zirFunc(
|
||||
const zir_decl = sema.code.getDeclaration(decl_inst)[0];
|
||||
break :exported zir_decl.flags.is_export;
|
||||
};
|
||||
break :cc if (fn_is_exported) .C else .Unspecified;
|
||||
} else .Unspecified;
|
||||
if (fn_is_exported) {
|
||||
break :cc target.cCallingConvention() orelse {
|
||||
// This target has no default C calling convention. We sometimes trigger a similar
|
||||
// error by trying to evaluate `std.builtin.CallingConvention.c`, so for consistency,
|
||||
// let's eval that now and just get the transitive error. (It's guaranteed to error
|
||||
// because it does the exact `cCallingConvention` call we just did.)
|
||||
const cc_type = try sema.getBuiltinType("CallingConvention");
|
||||
_ = try sema.namespaceLookupVal(
|
||||
block,
|
||||
LazySrcLoc.unneeded,
|
||||
cc_type.getNamespaceIndex(zcu),
|
||||
try ip.getOrPutString(sema.gpa, pt.tid, "c", .no_embedded_nulls),
|
||||
);
|
||||
// The above should have errored.
|
||||
@panic("std.builtin is corrupt");
|
||||
};
|
||||
} else {
|
||||
break :cc .auto;
|
||||
}
|
||||
} else .auto;
|
||||
|
||||
return sema.funcCommon(
|
||||
block,
|
||||
@ -9654,35 +9652,91 @@ fn handleExternLibName(
|
||||
/// These are calling conventions that are confirmed to work with variadic functions.
|
||||
/// Any calling conventions not included here are either not yet verified to work with variadic
|
||||
/// functions or there are no more other calling conventions that support variadic functions.
|
||||
const calling_conventions_supporting_var_args = [_]std.builtin.CallingConvention{
|
||||
.C,
|
||||
const calling_conventions_supporting_var_args = [_]std.builtin.CallingConvention.Tag{
|
||||
.x86_64_sysv,
|
||||
.x86_64_win,
|
||||
.x86_sysv,
|
||||
.x86_win,
|
||||
.aarch64_aapcs,
|
||||
.aarch64_aapcs_darwin,
|
||||
.aarch64_aapcs_win,
|
||||
.aarch64_vfabi,
|
||||
.aarch64_vfabi_sve,
|
||||
.arm_apcs,
|
||||
.arm_aapcs,
|
||||
.arm_aapcs_vfp,
|
||||
.arm_aapcs16_vfp,
|
||||
.mips64_n64,
|
||||
.mips64_n32,
|
||||
.mips_o32,
|
||||
.riscv64_lp64,
|
||||
.riscv64_lp64_v,
|
||||
.riscv32_ilp32,
|
||||
.riscv32_ilp32_v,
|
||||
.sparc64_sysv,
|
||||
.sparc_sysv,
|
||||
.powerpc64_elf,
|
||||
.powerpc64_elf_altivec,
|
||||
.powerpc64_elf_v2,
|
||||
.powerpc_sysv,
|
||||
.powerpc_sysv_altivec,
|
||||
.powerpc_aix,
|
||||
.powerpc_aix_altivec,
|
||||
.wasm_watc,
|
||||
.arc_sysv,
|
||||
.avr_gnu,
|
||||
.bpf_std,
|
||||
.csky_sysv,
|
||||
.hexagon_sysv,
|
||||
.hexagon_sysv_hvx,
|
||||
.lanai_sysv,
|
||||
.loongarch64_lp64,
|
||||
.loongarch32_ilp32,
|
||||
.m68k_sysv,
|
||||
.m68k_gnu,
|
||||
.m68k_rtd,
|
||||
.msp430_eabi,
|
||||
.s390x_sysv,
|
||||
.s390x_sysv_vx,
|
||||
.ve_sysv,
|
||||
.xcore_xs1,
|
||||
.xcore_xs2,
|
||||
.xtensa_call0,
|
||||
.xtensa_windowed,
|
||||
};
|
||||
fn callConvSupportsVarArgs(cc: std.builtin.CallingConvention) bool {
|
||||
fn callConvSupportsVarArgs(cc: std.builtin.CallingConvention.Tag) bool {
|
||||
return for (calling_conventions_supporting_var_args) |supported_cc| {
|
||||
if (cc == supported_cc) return true;
|
||||
} else false;
|
||||
}
|
||||
fn checkCallConvSupportsVarArgs(sema: *Sema, block: *Block, src: LazySrcLoc, cc: std.builtin.CallingConvention) CompileError!void {
|
||||
fn checkCallConvSupportsVarArgs(sema: *Sema, block: *Block, src: LazySrcLoc, cc: std.builtin.CallingConvention.Tag) CompileError!void {
|
||||
const CallingConventionsSupportingVarArgsList = struct {
|
||||
pub fn format(_: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
arch: std.Target.Cpu.Arch,
|
||||
pub fn format(ctx: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
for (calling_conventions_supporting_var_args, 0..) |cc_inner, i| {
|
||||
if (i != 0)
|
||||
var first = true;
|
||||
for (calling_conventions_supporting_var_args) |cc_inner| {
|
||||
for (std.Target.Cpu.Arch.fromCallingConvention(cc_inner)) |supported_arch| {
|
||||
if (supported_arch == ctx.arch) break;
|
||||
} else continue; // callconv not supported by this arch
|
||||
if (!first) {
|
||||
try writer.writeAll(", ");
|
||||
try writer.print("'.{s}'", .{@tagName(cc_inner)});
|
||||
}
|
||||
first = false;
|
||||
try writer.print("'{s}'", .{@tagName(cc_inner)});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if (!callConvSupportsVarArgs(cc)) {
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(src, "variadic function does not support '.{s}' calling convention", .{@tagName(cc)});
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(src, "variadic function does not support '{s}' calling convention", .{@tagName(cc)});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
try sema.errNote(src, msg, "supported calling conventions: {}", .{CallingConventionsSupportingVarArgsList{}});
|
||||
const target = sema.pt.zcu.getTarget();
|
||||
try sema.errNote(src, msg, "supported calling conventions: {}", .{CallingConventionsSupportingVarArgsList{ .arch = target.cpu.arch }});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(block, msg);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -9743,7 +9797,7 @@ fn funcCommon(
|
||||
// default values which are only meaningful for the generic function, *not*
|
||||
// the instantiation, which can depend on comptime parameters.
|
||||
// Related proposal: https://github.com/ziglang/zig/issues/11834
|
||||
const cc_resolved = cc orelse .Unspecified;
|
||||
const cc_resolved = cc orelse .auto;
|
||||
var comptime_bits: u32 = 0;
|
||||
for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| {
|
||||
const param_ty = Type.fromInterned(param_ty_ip);
|
||||
@ -9761,10 +9815,10 @@ fn funcCommon(
|
||||
}
|
||||
const this_generic = param_ty.isGenericPoison();
|
||||
is_generic = is_generic or this_generic;
|
||||
if (param_is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) {
|
||||
if (param_is_comptime and !target_util.fnCallConvAllowsZigTypes(cc_resolved)) {
|
||||
return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
|
||||
}
|
||||
if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) {
|
||||
if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(cc_resolved)) {
|
||||
return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
|
||||
}
|
||||
if (!param_ty.isValidParamType(zcu)) {
|
||||
@ -9773,7 +9827,7 @@ fn funcCommon(
|
||||
opaque_str, param_ty.fmt(pt),
|
||||
});
|
||||
}
|
||||
if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(param_ty, .param_ty)) {
|
||||
if (!this_generic and !target_util.fnCallConvAllowsZigTypes(cc_resolved) and !try sema.validateExternType(param_ty, .param_ty)) {
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{
|
||||
param_ty.fmt(pt), @tagName(cc_resolved),
|
||||
@ -9807,15 +9861,24 @@ fn funcCommon(
|
||||
return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
|
||||
}
|
||||
switch (cc_resolved) {
|
||||
.Interrupt => if (target.cpu.arch.isX86()) {
|
||||
.x86_64_interrupt, .x86_interrupt => {
|
||||
const err_code_size = target.ptrBitWidth();
|
||||
switch (i) {
|
||||
0 => if (param_ty.zigTypeTag(zcu) != .pointer) return sema.fail(block, param_src, "first parameter of function with 'Interrupt' calling convention must be a pointer type", .{}),
|
||||
1 => if (param_ty.bitSize(zcu) != err_code_size) return sema.fail(block, param_src, "second parameter of function with 'Interrupt' calling convention must be a {d}-bit integer", .{err_code_size}),
|
||||
else => return sema.fail(block, param_src, "'Interrupt' calling convention supports up to 2 parameters, found {d}", .{i + 1}),
|
||||
0 => if (param_ty.zigTypeTag(zcu) != .pointer) return sema.fail(block, param_src, "first parameter of function with '{s}' calling convention must be a pointer type", .{@tagName(cc_resolved)}),
|
||||
1 => if (param_ty.bitSize(zcu) != err_code_size) return sema.fail(block, param_src, "second parameter of function with '{s}' calling convention must be a {d}-bit integer", .{ @tagName(cc_resolved), err_code_size }),
|
||||
else => return sema.fail(block, param_src, "'{s}' calling convention supports up to 2 parameters, found {d}", .{ @tagName(cc_resolved), i + 1 }),
|
||||
}
|
||||
} else return sema.fail(block, param_src, "parameters are not allowed with 'Interrupt' calling convention", .{}),
|
||||
.Signal => return sema.fail(block, param_src, "parameters are not allowed with 'Signal' calling convention", .{}),
|
||||
},
|
||||
.arm_interrupt,
|
||||
.mips64_interrupt,
|
||||
.mips_interrupt,
|
||||
.riscv64_interrupt,
|
||||
.riscv32_interrupt,
|
||||
.avr_interrupt,
|
||||
.csky_interrupt,
|
||||
.m68k_interrupt,
|
||||
.avr_signal,
|
||||
=> return sema.fail(block, param_src, "parameters are not allowed with '{s}' calling convention", .{@tagName(cc_resolved)}),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
@ -10064,7 +10127,6 @@ fn finishFunc(
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const gpa = sema.gpa;
|
||||
const target = zcu.getTarget();
|
||||
|
||||
const return_type: Type = if (opt_func_index == .none or ret_poison)
|
||||
bare_return_type
|
||||
@ -10077,7 +10139,7 @@ fn finishFunc(
|
||||
opaque_str, return_type.fmt(pt),
|
||||
});
|
||||
}
|
||||
if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and
|
||||
if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(cc_resolved) and
|
||||
!try sema.validateExternType(return_type, .ret_ty))
|
||||
{
|
||||
const msg = msg: {
|
||||
@ -10133,57 +10195,63 @@ fn finishFunc(
|
||||
return sema.failWithOwnedErrorMsg(block, msg);
|
||||
}
|
||||
|
||||
validate_incoming_stack_align: {
|
||||
const a: u64 = switch (cc_resolved) {
|
||||
inline else => |payload| if (@TypeOf(payload) != void and @hasField(@TypeOf(payload), "incoming_stack_alignment"))
|
||||
payload.incoming_stack_alignment orelse break :validate_incoming_stack_align
|
||||
else
|
||||
break :validate_incoming_stack_align,
|
||||
};
|
||||
if (!std.math.isPowerOfTwo(a)) {
|
||||
return sema.fail(block, cc_src, "calling convention incoming stack alignment '{d}' is not a power of two", .{a});
|
||||
}
|
||||
}
|
||||
|
||||
switch (cc_resolved) {
|
||||
.Interrupt, .Signal => if (return_type.zigTypeTag(zcu) != .void and return_type.zigTypeTag(zcu) != .noreturn) {
|
||||
.x86_64_interrupt,
|
||||
.x86_interrupt,
|
||||
.arm_interrupt,
|
||||
.mips64_interrupt,
|
||||
.mips_interrupt,
|
||||
.riscv64_interrupt,
|
||||
.riscv32_interrupt,
|
||||
.avr_interrupt,
|
||||
.csky_interrupt,
|
||||
.m68k_interrupt,
|
||||
.avr_signal,
|
||||
=> if (return_type.zigTypeTag(zcu) != .void and return_type.zigTypeTag(zcu) != .noreturn) {
|
||||
return sema.fail(block, ret_ty_src, "function with calling convention '{s}' must return 'void' or 'noreturn'", .{@tagName(cc_resolved)});
|
||||
},
|
||||
.Inline => if (is_noinline) {
|
||||
return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{});
|
||||
.@"inline" => if (is_noinline) {
|
||||
return sema.fail(block, cc_src, "'noinline' function cannot have calling convention 'inline'", .{});
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
const arch = target.cpu.arch;
|
||||
if (@as(?[]const u8, switch (cc_resolved) {
|
||||
.Unspecified, .C, .Naked, .Async, .Inline => null,
|
||||
.Interrupt => switch (arch) {
|
||||
.x86, .x86_64, .avr, .msp430 => null,
|
||||
else => "x86, x86_64, AVR, and MSP430",
|
||||
switch (zcu.callconvSupported(cc_resolved)) {
|
||||
.ok => {},
|
||||
.bad_arch => |allowed_archs| {
|
||||
const ArchListFormatter = struct {
|
||||
archs: []const std.Target.Cpu.Arch,
|
||||
pub fn format(formatter: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
for (formatter.archs, 0..) |arch, i| {
|
||||
if (i != 0)
|
||||
try writer.writeAll(", ");
|
||||
try writer.print("'{s}'", .{@tagName(arch)});
|
||||
}
|
||||
}
|
||||
};
|
||||
return sema.fail(block, cc_src, "calling convention '{s}' only available on architectures {}", .{
|
||||
@tagName(cc_resolved),
|
||||
ArchListFormatter{ .archs = allowed_archs },
|
||||
});
|
||||
},
|
||||
.Signal => switch (arch) {
|
||||
.avr => null,
|
||||
else => "AVR",
|
||||
},
|
||||
.Stdcall, .Fastcall, .Thiscall => switch (arch) {
|
||||
.x86 => null,
|
||||
else => "x86",
|
||||
},
|
||||
.Vectorcall => switch (arch) {
|
||||
.x86, .aarch64, .aarch64_be => null,
|
||||
else => "x86 and AArch64",
|
||||
},
|
||||
.APCS, .AAPCS, .AAPCSVFP => switch (arch) {
|
||||
.arm, .armeb, .aarch64, .aarch64_be, .thumb, .thumbeb => null,
|
||||
else => "ARM",
|
||||
},
|
||||
.SysV, .Win64 => switch (arch) {
|
||||
.x86_64 => null,
|
||||
else => "x86_64",
|
||||
},
|
||||
.Kernel => switch (arch) {
|
||||
.nvptx, .nvptx64, .amdgcn, .spirv, .spirv32, .spirv64 => null,
|
||||
else => "nvptx, amdgcn and SPIR-V",
|
||||
},
|
||||
.Fragment, .Vertex => switch (arch) {
|
||||
.spirv, .spirv32, .spirv64 => null,
|
||||
else => "SPIR-V",
|
||||
},
|
||||
})) |allowed_platform| {
|
||||
return sema.fail(block, cc_src, "callconv '{s}' is only available on {s}, not {s}", .{
|
||||
.bad_backend => |bad_backend| return sema.fail(block, cc_src, "calling convention '{s}' not supported by compiler backend '{s}'", .{
|
||||
@tagName(cc_resolved),
|
||||
allowed_platform,
|
||||
@tagName(arch),
|
||||
});
|
||||
@tagName(bad_backend),
|
||||
}),
|
||||
}
|
||||
|
||||
if (is_generic and sema.no_partial_func_ty) return error.GenericPoison;
|
||||
@ -18342,10 +18410,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
} });
|
||||
|
||||
const callconv_ty = try sema.getBuiltinType("CallingConvention");
|
||||
const callconv_val = Value.uninterpret(func_ty_info.cc, callconv_ty, pt) catch |err| switch (err) {
|
||||
error.TypeMismatch => @panic("std.builtin is corrupt"),
|
||||
error.OutOfMemory => |e| return e,
|
||||
};
|
||||
|
||||
const field_values = .{
|
||||
const field_values: [5]InternPool.Index = .{
|
||||
// calling_convention: CallingConvention,
|
||||
(try pt.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(),
|
||||
callconv_val.toIntern(),
|
||||
// is_generic: bool,
|
||||
Value.makeBool(func_ty_info.is_generic).toIntern(),
|
||||
// is_var_args: bool,
|
||||
@ -22171,7 +22243,7 @@ fn zirReify(
|
||||
}
|
||||
|
||||
const is_var_args = is_var_args_val.toBool();
|
||||
const cc = zcu.toEnum(std.builtin.CallingConvention, calling_convention_val);
|
||||
const cc = try sema.analyzeValueAsCallconv(block, src, calling_convention_val);
|
||||
if (is_var_args) {
|
||||
try sema.checkCallConvSupportsVarArgs(block, src, cc);
|
||||
}
|
||||
@ -26670,7 +26742,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
if (val.isGenericPoison()) {
|
||||
break :blk null;
|
||||
}
|
||||
break :blk zcu.toEnum(std.builtin.CallingConvention, val);
|
||||
break :blk try sema.analyzeValueAsCallconv(block, cc_src, val);
|
||||
} else if (extra.data.bits.has_cc_ref) blk: {
|
||||
const cc_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
@ -26689,7 +26761,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
error.GenericPoison => break :blk null,
|
||||
else => |e| return e,
|
||||
};
|
||||
break :blk zcu.toEnum(std.builtin.CallingConvention, cc_val);
|
||||
break :blk try sema.analyzeValueAsCallconv(block, cc_src, cc_val);
|
||||
} else cc: {
|
||||
if (has_body) {
|
||||
const decl_inst = if (sema.generic_owner != .none) decl_inst: {
|
||||
@ -26705,7 +26777,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
break :cc .C;
|
||||
}
|
||||
}
|
||||
break :cc .Unspecified;
|
||||
break :cc .auto;
|
||||
};
|
||||
|
||||
const ret_ty: Type = if (extra.data.bits.has_ret_ty_body) blk: {
|
||||
@ -27132,9 +27204,15 @@ fn zirInComptime(
|
||||
return if (block.is_comptime) .bool_true else .bool_false;
|
||||
}
|
||||
|
||||
fn zirBuiltinValue(sema: *Sema, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
||||
fn zirBuiltinValue(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const src = block.nodeOffset(@bitCast(extended.operand));
|
||||
const value: Zir.Inst.BuiltinValue = @enumFromInt(extended.small);
|
||||
|
||||
const type_name = switch (value) {
|
||||
.atomic_order => "AtomicOrder",
|
||||
.atomic_rmw_op => "AtomicRmwOp",
|
||||
@ -27152,21 +27230,25 @@ fn zirBuiltinValue(sema: *Sema, extended: Zir.Inst.Extended.InstData) CompileErr
|
||||
// Values are handled here.
|
||||
.calling_convention_c => {
|
||||
const callconv_ty = try sema.getBuiltinType("CallingConvention");
|
||||
comptime assert(@intFromEnum(std.builtin.CallingConvention.C) == 1);
|
||||
const val = try pt.intern(.{ .enum_tag = .{
|
||||
.ty = callconv_ty.toIntern(),
|
||||
.int = .one_u8,
|
||||
} });
|
||||
return Air.internedToRef(val);
|
||||
return try sema.namespaceLookupVal(
|
||||
block,
|
||||
src,
|
||||
callconv_ty.getNamespaceIndex(zcu),
|
||||
try ip.getOrPutString(gpa, pt.tid, "c", .no_embedded_nulls),
|
||||
) orelse @panic("std.builtin is corrupt");
|
||||
},
|
||||
.calling_convention_inline => {
|
||||
comptime assert(@typeInfo(std.builtin.CallingConvention.Tag).@"enum".tag_type == u8);
|
||||
const callconv_ty = try sema.getBuiltinType("CallingConvention");
|
||||
comptime assert(@intFromEnum(std.builtin.CallingConvention.Inline) == 4);
|
||||
const val = try pt.intern(.{ .enum_tag = .{
|
||||
.ty = callconv_ty.toIntern(),
|
||||
.int = .four_u8,
|
||||
} });
|
||||
return Air.internedToRef(val);
|
||||
const callconv_tag_ty = callconv_ty.unionTagType(zcu) orelse @panic("std.builtin is corrupt");
|
||||
const inline_tag_val = try pt.enumValue(
|
||||
callconv_tag_ty,
|
||||
(try pt.intValue(
|
||||
Type.u8,
|
||||
@intFromEnum(std.builtin.CallingConvention.@"inline"),
|
||||
)).toIntern(),
|
||||
);
|
||||
return sema.coerce(block, callconv_ty, Air.internedToRef(inline_tag_val.toIntern()), src);
|
||||
},
|
||||
};
|
||||
const ty = try sema.getBuiltinType(type_name);
|
||||
@ -27353,7 +27435,7 @@ fn explainWhyTypeIsComptimeInner(
|
||||
try sema.errNote(src_loc, msg, "function is generic", .{});
|
||||
}
|
||||
switch (fn_info.cc) {
|
||||
.Inline => try sema.errNote(src_loc, msg, "function has inline calling convention", .{}),
|
||||
.@"inline" => try sema.errNote(src_loc, msg, "function has inline calling convention", .{}),
|
||||
else => {},
|
||||
}
|
||||
if (Type.fromInterned(fn_info.return_type).comptimeOnly(zcu)) {
|
||||
@ -27461,13 +27543,12 @@ fn validateExternType(
|
||||
},
|
||||
.@"fn" => {
|
||||
if (position != .other) return false;
|
||||
const target = zcu.getTarget();
|
||||
// For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI.
|
||||
// The goal is to experiment with more integrated CPU/GPU code.
|
||||
if (ty.fnCallingConvention(zcu) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) {
|
||||
if (ty.fnCallingConvention(zcu) == .nvptx_kernel) {
|
||||
return true;
|
||||
}
|
||||
return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(zcu));
|
||||
return !target_util.fnCallConvAllowsZigTypes(ty.fnCallingConvention(zcu));
|
||||
},
|
||||
.@"enum" => {
|
||||
return sema.validateExternType(ty.intTagType(zcu), position);
|
||||
@ -27547,9 +27628,9 @@ fn explainWhyTypeIsNotExtern(
|
||||
return;
|
||||
}
|
||||
switch (ty.fnCallingConvention(zcu)) {
|
||||
.Unspecified => try sema.errNote(src_loc, msg, "extern function must specify calling convention", .{}),
|
||||
.Async => try sema.errNote(src_loc, msg, "async function cannot be extern", .{}),
|
||||
.Inline => try sema.errNote(src_loc, msg, "inline function cannot be extern", .{}),
|
||||
.auto => try sema.errNote(src_loc, msg, "extern function must specify calling convention", .{}),
|
||||
.@"async" => try sema.errNote(src_loc, msg, "async function cannot be extern", .{}),
|
||||
.@"inline" => try sema.errNote(src_loc, msg, "inline function cannot be extern", .{}),
|
||||
else => return,
|
||||
}
|
||||
},
|
||||
@ -31176,8 +31257,8 @@ fn coerceInMemoryAllowedFns(
|
||||
return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic };
|
||||
}
|
||||
|
||||
if (dest_info.cc != src_info.cc) {
|
||||
return InMemoryCoercionResult{ .fn_cc = .{
|
||||
if (!callconvCoerceAllowed(target, src_info.cc, dest_info.cc)) {
|
||||
return .{ .fn_cc = .{
|
||||
.actual = src_info.cc,
|
||||
.wanted = dest_info.cc,
|
||||
} };
|
||||
@ -31250,6 +31331,44 @@ fn coerceInMemoryAllowedFns(
|
||||
return .ok;
|
||||
}
|
||||
|
||||
fn callconvCoerceAllowed(
|
||||
target: std.Target,
|
||||
src_cc: std.builtin.CallingConvention,
|
||||
dest_cc: std.builtin.CallingConvention,
|
||||
) bool {
|
||||
const Tag = std.builtin.CallingConvention.Tag;
|
||||
if (@as(Tag, src_cc) != @as(Tag, dest_cc)) return false;
|
||||
|
||||
switch (src_cc) {
|
||||
inline else => |src_data, tag| {
|
||||
const dest_data = @field(dest_cc, @tagName(tag));
|
||||
if (@TypeOf(src_data) != void) {
|
||||
const default_stack_align = target.stackAlignment();
|
||||
const src_stack_align = src_data.incoming_stack_alignment orelse default_stack_align;
|
||||
const dest_stack_align = src_data.incoming_stack_alignment orelse default_stack_align;
|
||||
if (dest_stack_align < src_stack_align) return false;
|
||||
}
|
||||
switch (@TypeOf(src_data)) {
|
||||
void, std.builtin.CallingConvention.CommonOptions => {},
|
||||
std.builtin.CallingConvention.X86RegparmOptions => {
|
||||
if (src_data.register_params != dest_data.register_params) return false;
|
||||
},
|
||||
std.builtin.CallingConvention.ArmInterruptOptions => {
|
||||
if (src_data.type != dest_data.type) return false;
|
||||
},
|
||||
std.builtin.CallingConvention.MipsInterruptOptions => {
|
||||
if (src_data.mode != dest_data.mode) return false;
|
||||
},
|
||||
std.builtin.CallingConvention.RiscvInterruptOptions => {
|
||||
if (src_data.mode != dest_data.mode) return false;
|
||||
},
|
||||
else => comptime unreachable,
|
||||
}
|
||||
},
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
fn coerceInMemoryAllowedPtrs(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
@ -36306,7 +36425,7 @@ fn resolveInferredErrorSet(
|
||||
// because inline function does not create a new declaration, and the ies has been filled with analyzeCall,
|
||||
// so here we can simply skip this case.
|
||||
if (ies_func_info.return_type == .generic_poison_type) {
|
||||
assert(ies_func_info.cc == .Inline);
|
||||
assert(ies_func_info.cc == .@"inline");
|
||||
} else if (ip.errorUnionSet(ies_func_info.return_type) == ies_index) {
|
||||
if (ies_func_info.is_generic) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
|
||||
17
src/Type.zig
17
src/Type.zig
@ -390,10 +390,17 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
|
||||
try writer.writeAll("...");
|
||||
}
|
||||
try writer.writeAll(") ");
|
||||
if (fn_info.cc != .Unspecified) {
|
||||
try writer.writeAll("callconv(.");
|
||||
try writer.writeAll(@tagName(fn_info.cc));
|
||||
try writer.writeAll(") ");
|
||||
if (fn_info.cc != .auto) print_cc: {
|
||||
if (zcu.getTarget().cCallingConvention()) |ccc| {
|
||||
if (fn_info.cc.eql(ccc)) {
|
||||
try writer.writeAll("callconv(.c) ");
|
||||
break :print_cc;
|
||||
}
|
||||
}
|
||||
switch (fn_info.cc) {
|
||||
.auto, .@"async", .naked, .@"inline" => try writer.print("callconv(.{}) ", .{std.zig.fmtId(@tagName(fn_info.cc))}),
|
||||
else => try writer.print("callconv({any}) ", .{fn_info.cc}),
|
||||
}
|
||||
}
|
||||
if (fn_info.return_type == .generic_poison_type) {
|
||||
try writer.writeAll("anytype");
|
||||
@ -791,7 +798,7 @@ pub fn fnHasRuntimeBitsInner(
|
||||
const fn_info = zcu.typeToFunc(ty).?;
|
||||
if (fn_info.is_generic) return false;
|
||||
if (fn_info.is_var_args) return true;
|
||||
if (fn_info.cc == .Inline) return false;
|
||||
if (fn_info.cc == .@"inline") return false;
|
||||
return !try Type.fromInterned(fn_info.return_type).comptimeOnlyInner(strat, zcu, tid);
|
||||
}
|
||||
|
||||
|
||||
156
src/Value.zig
156
src/Value.zig
@ -4490,3 +4490,159 @@ pub fn resolveLazy(
|
||||
else => return val,
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a `Value` representing a comptime-known value of type `T`, unwrap it into an actual `T` known to the compiler.
|
||||
/// This is useful for accessing `std.builtin` structures received from comptime logic.
|
||||
/// `val` must be fully resolved.
|
||||
pub fn interpret(val: Value, comptime T: type, pt: Zcu.PerThread) error{ OutOfMemory, UndefinedValue, TypeMismatch }!T {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const ty = val.typeOf(zcu);
|
||||
if (ty.zigTypeTag(zcu) != @typeInfo(T)) return error.TypeMismatch;
|
||||
if (val.isUndef(zcu)) return error.UndefinedValue;
|
||||
|
||||
return switch (@typeInfo(T)) {
|
||||
.type,
|
||||
.noreturn,
|
||||
.comptime_float,
|
||||
.comptime_int,
|
||||
.undefined,
|
||||
.null,
|
||||
.@"fn",
|
||||
.@"opaque",
|
||||
.enum_literal,
|
||||
=> comptime unreachable, // comptime-only or otherwise impossible
|
||||
|
||||
.pointer,
|
||||
.array,
|
||||
.error_union,
|
||||
.error_set,
|
||||
.frame,
|
||||
.@"anyframe",
|
||||
.vector,
|
||||
=> comptime unreachable, // unsupported
|
||||
|
||||
.void => {},
|
||||
|
||||
.bool => switch (val.toIntern()) {
|
||||
.bool_false => false,
|
||||
.bool_true => true,
|
||||
else => unreachable,
|
||||
},
|
||||
|
||||
.int => switch (ip.indexToKey(val.toIntern()).int.storage) {
|
||||
.lazy_align, .lazy_size => unreachable, // `val` is fully resolved
|
||||
inline .u64, .i64 => |x| std.math.cast(T, x) orelse return error.TypeMismatch,
|
||||
.big_int => |big| big.to(T) catch return error.TypeMismatch,
|
||||
},
|
||||
|
||||
.float => val.toFloat(T, zcu),
|
||||
|
||||
.optional => |opt| if (val.optionalValue(zcu)) |unwrapped|
|
||||
try unwrapped.interpret(opt.child, pt)
|
||||
else
|
||||
null,
|
||||
|
||||
.@"enum" => zcu.toEnum(T, val),
|
||||
|
||||
.@"union" => |@"union"| {
|
||||
const union_obj = zcu.typeToUnion(ty) orelse return error.TypeMismatch;
|
||||
if (union_obj.field_types.len != @"union".fields.len) return error.TypeMismatch;
|
||||
const tag_val = val.unionTag(zcu) orelse return error.TypeMismatch;
|
||||
const tag = try tag_val.interpret(@"union".tag_type.?, pt);
|
||||
return switch (tag) {
|
||||
inline else => |tag_comptime| @unionInit(
|
||||
T,
|
||||
@tagName(tag_comptime),
|
||||
try val.unionValue(zcu).interpret(@FieldType(T, @tagName(tag_comptime)), pt),
|
||||
),
|
||||
};
|
||||
},
|
||||
|
||||
.@"struct" => |@"struct"| {
|
||||
if (ty.structFieldCount(zcu) != @"struct".fields.len) return error.TypeMismatch;
|
||||
var result: T = undefined;
|
||||
inline for (@"struct".fields, 0..) |field, field_idx| {
|
||||
const field_val = try val.fieldValue(pt, field_idx);
|
||||
@field(result, field.name) = try field_val.interpret(field.type, pt);
|
||||
}
|
||||
return result;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// Given any `val` and a `Type` corresponding `@TypeOf(val)`, construct a `Value` representing it which can be used
|
||||
/// within the compilation. This is useful for passing `std.builtin` structures in the compiler back to the compilation.
|
||||
/// This is the inverse of `interpret`.
|
||||
pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory, TypeMismatch }!Value {
|
||||
const T = @TypeOf(val);
|
||||
|
||||
const zcu = pt.zcu;
|
||||
if (ty.zigTypeTag(zcu) != @typeInfo(T)) return error.TypeMismatch;
|
||||
|
||||
return switch (@typeInfo(T)) {
|
||||
.type,
|
||||
.noreturn,
|
||||
.comptime_float,
|
||||
.comptime_int,
|
||||
.undefined,
|
||||
.null,
|
||||
.@"fn",
|
||||
.@"opaque",
|
||||
.enum_literal,
|
||||
=> comptime unreachable, // comptime-only or otherwise impossible
|
||||
|
||||
.pointer,
|
||||
.array,
|
||||
.error_union,
|
||||
.error_set,
|
||||
.frame,
|
||||
.@"anyframe",
|
||||
.vector,
|
||||
=> comptime unreachable, // unsupported
|
||||
|
||||
.void => .void,
|
||||
|
||||
.bool => if (val) .true else .false,
|
||||
|
||||
.int => try pt.intValue(ty, val),
|
||||
|
||||
.float => try pt.floatValue(ty, val),
|
||||
|
||||
.optional => if (val) |some|
|
||||
.fromInterned(try pt.intern(.{ .opt = .{
|
||||
.ty = ty.toIntern(),
|
||||
.val = (try uninterpret(some, ty.optionalChild(zcu), pt)).toIntern(),
|
||||
} }))
|
||||
else
|
||||
try pt.nullValue(ty),
|
||||
|
||||
.@"enum" => try pt.enumValue(ty, (try uninterpret(@intFromEnum(val), ty.intTagType(zcu), pt)).toIntern()),
|
||||
|
||||
.@"union" => |@"union"| {
|
||||
const tag: @"union".tag_type.? = val;
|
||||
const tag_val = try uninterpret(tag, ty.unionTagType(zcu).?, pt);
|
||||
const field_ty = ty.unionFieldType(tag_val, zcu) orelse return error.TypeMismatch;
|
||||
return switch (val) {
|
||||
inline else => |payload| try pt.unionValue(
|
||||
ty,
|
||||
tag_val,
|
||||
try uninterpret(payload, field_ty, pt),
|
||||
),
|
||||
};
|
||||
},
|
||||
|
||||
.@"struct" => |@"struct"| {
|
||||
if (ty.structFieldCount(zcu) != @"struct".fields.len) return error.TypeMismatch;
|
||||
var field_vals: [@"struct".fields.len]InternPool.Index = undefined;
|
||||
inline for (&field_vals, @"struct".fields, 0..) |*field_val, field, field_idx| {
|
||||
const field_ty = ty.fieldType(field_idx, zcu);
|
||||
field_val.* = (try uninterpret(@field(val, field.name), field_ty, pt)).toIntern();
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = &field_vals },
|
||||
} }));
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
108
src/Zcu.zig
108
src/Zcu.zig
@ -3539,3 +3539,111 @@ pub fn maybeUnresolveIes(zcu: *Zcu, func_index: InternPool.Index) !void {
|
||||
zcu.intern_pool.funcSetIesResolved(func_index, .none);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn callconvSupported(zcu: *Zcu, cc: std.builtin.CallingConvention) union(enum) {
|
||||
ok,
|
||||
bad_arch: []const std.Target.Cpu.Arch, // value is allowed archs for cc
|
||||
bad_backend: std.builtin.CompilerBackend, // value is current backend
|
||||
} {
|
||||
const target = zcu.getTarget();
|
||||
const backend = target_util.zigBackend(target, zcu.comp.config.use_llvm);
|
||||
switch (cc) {
|
||||
.auto, .@"inline" => return .ok,
|
||||
.@"async" => return .{ .bad_backend = backend }, // nothing supports async currently
|
||||
.naked => {}, // depends only on backend
|
||||
else => for (cc.archs()) |allowed_arch| {
|
||||
if (allowed_arch == target.cpu.arch) break;
|
||||
} else return .{ .bad_arch = cc.archs() },
|
||||
}
|
||||
const backend_ok = switch (backend) {
|
||||
.stage1 => unreachable,
|
||||
.other => unreachable,
|
||||
_ => unreachable,
|
||||
|
||||
.stage2_llvm => @import("codegen/llvm.zig").toLlvmCallConv(cc, target) != null,
|
||||
.stage2_c => ok: {
|
||||
if (target.cCallingConvention()) |default_c| {
|
||||
if (cc.eql(default_c)) {
|
||||
break :ok true;
|
||||
}
|
||||
}
|
||||
break :ok switch (cc) {
|
||||
.x86_64_sysv,
|
||||
.x86_64_win,
|
||||
.x86_64_vectorcall,
|
||||
.x86_64_regcall_v3_sysv,
|
||||
.x86_64_regcall_v4_win,
|
||||
.x86_fastcall,
|
||||
.x86_thiscall,
|
||||
.x86_vectorcall,
|
||||
.x86_regcall_v3,
|
||||
.x86_regcall_v4_win,
|
||||
.aarch64_vfabi,
|
||||
.aarch64_vfabi_sve,
|
||||
.arm_aapcs,
|
||||
.arm_aapcs_vfp,
|
||||
.riscv64_lp64_v,
|
||||
.riscv32_ilp32_v,
|
||||
.m68k_rtd,
|
||||
=> |opts| opts.incoming_stack_alignment == null,
|
||||
|
||||
.x86_sysv,
|
||||
.x86_win,
|
||||
.x86_stdcall,
|
||||
=> |opts| opts.incoming_stack_alignment == null and opts.register_params == 0,
|
||||
|
||||
.naked => true,
|
||||
|
||||
else => false,
|
||||
};
|
||||
},
|
||||
.stage2_wasm => switch (cc) {
|
||||
.wasm_watc => |opts| opts.incoming_stack_alignment == null,
|
||||
else => false,
|
||||
},
|
||||
.stage2_arm => switch (cc) {
|
||||
.arm_aapcs => |opts| opts.incoming_stack_alignment == null,
|
||||
.naked => true,
|
||||
else => false,
|
||||
},
|
||||
.stage2_x86_64 => switch (cc) {
|
||||
.x86_64_sysv, .x86_64_win, .naked => true, // incoming stack alignment supported
|
||||
else => false,
|
||||
},
|
||||
.stage2_aarch64 => switch (cc) {
|
||||
.aarch64_aapcs,
|
||||
.aarch64_aapcs_darwin,
|
||||
.aarch64_aapcs_win,
|
||||
=> |opts| opts.incoming_stack_alignment == null,
|
||||
.naked => true,
|
||||
else => false,
|
||||
},
|
||||
.stage2_x86 => switch (cc) {
|
||||
.x86_sysv,
|
||||
.x86_win,
|
||||
=> |opts| opts.incoming_stack_alignment == null and opts.register_params == 0,
|
||||
.naked => true,
|
||||
else => false,
|
||||
},
|
||||
.stage2_riscv64 => switch (cc) {
|
||||
.riscv64_lp64 => |opts| opts.incoming_stack_alignment == null,
|
||||
.naked => true,
|
||||
else => false,
|
||||
},
|
||||
.stage2_sparc64 => switch (cc) {
|
||||
.sparc64_sysv => |opts| opts.incoming_stack_alignment == null,
|
||||
.naked => true,
|
||||
else => false,
|
||||
},
|
||||
.stage2_spirv64 => switch (cc) {
|
||||
.spirv_device,
|
||||
.spirv_kernel,
|
||||
.spirv_fragment,
|
||||
.spirv_vertex,
|
||||
=> true,
|
||||
else => false,
|
||||
},
|
||||
};
|
||||
if (!backend_ok) return .{ .bad_backend = backend };
|
||||
return .ok;
|
||||
}
|
||||
|
||||
@ -2090,7 +2090,7 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError!
|
||||
.code = zir,
|
||||
.owner = anal_unit,
|
||||
.func_index = func_index,
|
||||
.func_is_naked = fn_ty_info.cc == .Naked,
|
||||
.func_is_naked = fn_ty_info.cc == .naked,
|
||||
.fn_ret_ty = Type.fromInterned(fn_ty_info.return_type),
|
||||
.fn_ret_ty_ies = null,
|
||||
.branch_quota = @max(func.branchQuotaUnordered(ip), Sema.default_branch_quota),
|
||||
|
||||
@ -468,7 +468,7 @@ fn gen(self: *Self) !void {
|
||||
const pt = self.pt;
|
||||
const zcu = pt.zcu;
|
||||
const cc = self.fn_type.fnCallingConvention(zcu);
|
||||
if (cc != .Naked) {
|
||||
if (cc != .naked) {
|
||||
// stp fp, lr, [sp, #-16]!
|
||||
_ = try self.addInst(.{
|
||||
.tag = .stp,
|
||||
@ -6229,14 +6229,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
const ret_ty = fn_ty.fnReturnType(zcu);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
.naked => {
|
||||
assert(result.args.len == 0);
|
||||
result.return_value = .{ .unreach = {} };
|
||||
result.stack_byte_count = 0;
|
||||
result.stack_align = 1;
|
||||
return result;
|
||||
},
|
||||
.C => {
|
||||
.aarch64_aapcs, .aarch64_aapcs_darwin, .aarch64_aapcs_win => {
|
||||
// ARM64 Procedure Call Standard
|
||||
var ncrn: usize = 0; // Next Core Register Number
|
||||
var nsaa: u32 = 0; // Next stacked argument address
|
||||
@ -6266,7 +6266,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
|
||||
// We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
|
||||
// values to spread across odd-numbered registers.
|
||||
if (Type.fromInterned(ty).abiAlignment(zcu) == .@"16" and !self.target.isDarwin()) {
|
||||
if (Type.fromInterned(ty).abiAlignment(zcu) == .@"16" and cc != .aarch64_aapcs_darwin) {
|
||||
// Round up NCRN to the next even number
|
||||
ncrn += ncrn % 2;
|
||||
}
|
||||
@ -6298,7 +6298,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
result.stack_byte_count = nsaa;
|
||||
result.stack_align = 16;
|
||||
},
|
||||
.Unspecified => {
|
||||
.auto => {
|
||||
if (ret_ty.zigTypeTag(zcu) == .noreturn) {
|
||||
result.return_value = .{ .unreach = {} };
|
||||
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
|
||||
|
||||
@ -475,7 +475,7 @@ fn gen(self: *Self) !void {
|
||||
const pt = self.pt;
|
||||
const zcu = pt.zcu;
|
||||
const cc = self.fn_type.fnCallingConvention(zcu);
|
||||
if (cc != .Naked) {
|
||||
if (cc != .naked) {
|
||||
// push {fp, lr}
|
||||
const push_reloc = try self.addNop();
|
||||
|
||||
@ -6196,14 +6196,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
const ret_ty = fn_ty.fnReturnType(zcu);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
.naked => {
|
||||
assert(result.args.len == 0);
|
||||
result.return_value = .{ .unreach = {} };
|
||||
result.stack_byte_count = 0;
|
||||
result.stack_align = 1;
|
||||
return result;
|
||||
},
|
||||
.C => {
|
||||
.arm_aapcs => {
|
||||
// ARM Procedure Call Standard, Chapter 6.5
|
||||
var ncrn: usize = 0; // Next Core Register Number
|
||||
var nsaa: u32 = 0; // Next stacked argument address
|
||||
@ -6254,7 +6254,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
result.stack_byte_count = nsaa;
|
||||
result.stack_align = 8;
|
||||
},
|
||||
.Unspecified => {
|
||||
.auto => {
|
||||
if (ret_ty.zigTypeTag(zcu) == .noreturn) {
|
||||
result.return_value = .{ .unreach = {} };
|
||||
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
|
||||
|
||||
@ -18,6 +18,7 @@ const Zcu = @import("../../Zcu.zig");
|
||||
const Package = @import("../../Package.zig");
|
||||
const InternPool = @import("../../InternPool.zig");
|
||||
const Compilation = @import("../../Compilation.zig");
|
||||
const target_util = @import("../../target.zig");
|
||||
const trace = @import("../../tracy.zig").trace;
|
||||
const codegen = @import("../../codegen.zig");
|
||||
|
||||
@ -819,10 +820,7 @@ pub fn generate(
|
||||
try function.frame_allocs.resize(gpa, FrameIndex.named_count);
|
||||
function.frame_allocs.set(
|
||||
@intFromEnum(FrameIndex.stack_frame),
|
||||
FrameAlloc.init(.{
|
||||
.size = 0,
|
||||
.alignment = func.analysisUnordered(ip).stack_alignment.max(.@"1"),
|
||||
}),
|
||||
FrameAlloc.init(.{ .size = 0, .alignment = .@"1" }),
|
||||
);
|
||||
function.frame_allocs.set(
|
||||
@intFromEnum(FrameIndex.call_frame),
|
||||
@ -977,7 +975,7 @@ pub fn generateLazy(
|
||||
.pt = pt,
|
||||
.allocator = gpa,
|
||||
.mir = mir,
|
||||
.cc = .Unspecified,
|
||||
.cc = .auto,
|
||||
.src_loc = src_loc,
|
||||
.output_mode = comp.config.output_mode,
|
||||
.link_mode = comp.config.link_mode,
|
||||
@ -1036,7 +1034,7 @@ fn formatWipMir(
|
||||
.instructions = data.func.mir_instructions.slice(),
|
||||
.frame_locs = data.func.frame_locs.slice(),
|
||||
},
|
||||
.cc = .Unspecified,
|
||||
.cc = .auto,
|
||||
.src_loc = data.func.src_loc,
|
||||
.output_mode = comp.config.output_mode,
|
||||
.link_mode = comp.config.link_mode,
|
||||
@ -1238,7 +1236,7 @@ fn gen(func: *Func) !void {
|
||||
}
|
||||
}
|
||||
|
||||
if (fn_info.cc != .Naked) {
|
||||
if (fn_info.cc != .naked) {
|
||||
_ = try func.addPseudo(.pseudo_dbg_prologue_end);
|
||||
|
||||
const backpatch_stack_alloc = try func.addPseudo(.pseudo_dead);
|
||||
@ -4894,7 +4892,7 @@ fn genCall(
|
||||
.lib => |lib| try pt.funcType(.{
|
||||
.param_types = lib.param_types,
|
||||
.return_type = lib.return_type,
|
||||
.cc = .C,
|
||||
.cc = func.target.cCallingConvention().?,
|
||||
}),
|
||||
};
|
||||
|
||||
@ -8289,12 +8287,12 @@ fn resolveCallingConventionValues(
|
||||
const ret_ty = Type.fromInterned(fn_info.return_type);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
.naked => {
|
||||
assert(result.args.len == 0);
|
||||
result.return_value = InstTracking.init(.unreach);
|
||||
result.stack_align = .@"8";
|
||||
},
|
||||
.C, .Unspecified => {
|
||||
.riscv64_lp64, .auto => {
|
||||
if (result.args.len > 8) {
|
||||
return func.fail("RISC-V calling convention does not support more than 8 arguments", .{});
|
||||
}
|
||||
@ -8359,7 +8357,7 @@ fn resolveCallingConventionValues(
|
||||
|
||||
for (param_types, result.args) |ty, *arg| {
|
||||
if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
assert(cc == .Unspecified);
|
||||
assert(cc == .auto);
|
||||
arg.* = .none;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -366,7 +366,7 @@ fn gen(self: *Self) !void {
|
||||
const pt = self.pt;
|
||||
const zcu = pt.zcu;
|
||||
const cc = self.fn_type.fnCallingConvention(zcu);
|
||||
if (cc != .Naked) {
|
||||
if (cc != .naked) {
|
||||
// TODO Finish function prologue and epilogue for sparc64.
|
||||
|
||||
// save %sp, stack_reserved_area, %sp
|
||||
@ -4441,14 +4441,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
|
||||
const ret_ty = fn_ty.fnReturnType(zcu);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
.naked => {
|
||||
assert(result.args.len == 0);
|
||||
result.return_value = .{ .unreach = {} };
|
||||
result.stack_byte_count = 0;
|
||||
result.stack_align = .@"1";
|
||||
return result;
|
||||
},
|
||||
.Unspecified, .C => {
|
||||
.auto, .sparc64_sysv => {
|
||||
// SPARC Compliance Definition 2.4.1, Chapter 3
|
||||
// Low-Level System Information (64-bit psABI) - Function Calling Sequence
|
||||
|
||||
|
||||
@ -710,7 +710,7 @@ stack_size: u32 = 0,
|
||||
/// The stack alignment, which is 16 bytes by default. This is specified by the
|
||||
/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
|
||||
/// and also what the llvm backend will emit.
|
||||
/// However, local variables or the usage of `@setAlignStack` can overwrite this default.
|
||||
/// However, local variables or the usage of `incoming_stack_alignment` in a `CallingConvention` can overwrite this default.
|
||||
stack_alignment: Alignment = .@"16",
|
||||
|
||||
// For each individual Wasm valtype we store a seperate free list which
|
||||
@ -1160,7 +1160,7 @@ fn genFunctype(
|
||||
if (firstParamSRet(cc, return_type, pt, target)) {
|
||||
try temp_params.append(.i32); // memory address is always a 32-bit handle
|
||||
} else if (return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
if (cc == .C) {
|
||||
if (cc == .wasm_watc) {
|
||||
const res_classes = abi.classifyType(return_type, zcu);
|
||||
assert(res_classes[0] == .direct and res_classes[1] == .none);
|
||||
const scalar_type = abi.scalarType(return_type, zcu);
|
||||
@ -1178,7 +1178,7 @@ fn genFunctype(
|
||||
if (!param_type.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
switch (cc) {
|
||||
.C => {
|
||||
.wasm_watc => {
|
||||
const param_classes = abi.classifyType(param_type, zcu);
|
||||
if (param_classes[1] == .none) {
|
||||
if (param_classes[0] == .direct) {
|
||||
@ -1367,7 +1367,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
|
||||
.args = &.{},
|
||||
.return_value = .none,
|
||||
};
|
||||
if (cc == .Naked) return result;
|
||||
if (cc == .naked) return result;
|
||||
|
||||
var args = std.ArrayList(WValue).init(func.gpa);
|
||||
defer args.deinit();
|
||||
@ -1382,7 +1382,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
|
||||
}
|
||||
|
||||
switch (cc) {
|
||||
.Unspecified => {
|
||||
.auto => {
|
||||
for (fn_info.param_types.get(ip)) |ty| {
|
||||
if (!Type.fromInterned(ty).hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
continue;
|
||||
@ -1392,7 +1392,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
|
||||
func.local_index += 1;
|
||||
}
|
||||
},
|
||||
.C => {
|
||||
.wasm_watc => {
|
||||
for (fn_info.param_types.get(ip)) |ty| {
|
||||
const ty_classes = abi.classifyType(Type.fromInterned(ty), zcu);
|
||||
for (ty_classes) |class| {
|
||||
@ -1410,8 +1410,9 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
|
||||
|
||||
fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.PerThread, target: std.Target) bool {
|
||||
switch (cc) {
|
||||
.Unspecified, .Inline => return isByRef(return_type, pt, target),
|
||||
.C => {
|
||||
.@"inline" => unreachable,
|
||||
.auto => return isByRef(return_type, pt, target),
|
||||
.wasm_watc => {
|
||||
const ty_classes = abi.classifyType(return_type, pt.zcu);
|
||||
if (ty_classes[0] == .indirect) return true;
|
||||
if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true;
|
||||
@ -1424,7 +1425,7 @@ fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.
|
||||
/// Lowers a Zig type and its value based on a given calling convention to ensure
|
||||
/// it matches the ABI.
|
||||
fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: WValue) !void {
|
||||
if (cc != .C) {
|
||||
if (cc != .wasm_watc) {
|
||||
return func.lowerToStack(value);
|
||||
}
|
||||
|
||||
@ -2108,7 +2109,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
// to the stack instead
|
||||
if (func.return_value != .none) {
|
||||
try func.store(func.return_value, operand, ret_ty, 0);
|
||||
} else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
} else if (fn_info.cc == .wasm_watc and ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
switch (ret_ty.zigTypeTag(zcu)) {
|
||||
// Aggregate types can be lowered as a singular value
|
||||
.@"struct", .@"union" => {
|
||||
@ -2286,7 +2287,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
|
||||
} else if (first_param_sret) {
|
||||
break :result_value sret;
|
||||
// TODO: Make this less fragile and optimize
|
||||
} else if (zcu.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(zcu) == .@"struct" or ret_ty.zigTypeTag(zcu) == .@"union") {
|
||||
} else if (zcu.typeToFunc(fn_ty).?.cc == .wasm_watc and ret_ty.zigTypeTag(zcu) == .@"struct" or ret_ty.zigTypeTag(zcu) == .@"union") {
|
||||
const result_local = try func.allocLocal(ret_ty);
|
||||
try func.addLabel(.local_set, result_local.local.value);
|
||||
const scalar_type = abi.scalarType(ret_ty, zcu);
|
||||
@ -2565,7 +2566,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const arg = func.args[arg_index];
|
||||
const cc = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?.cc;
|
||||
const arg_ty = func.typeOfIndex(inst);
|
||||
if (cc == .C) {
|
||||
if (cc == .wasm_watc) {
|
||||
const arg_classes = abi.classifyType(arg_ty, zcu);
|
||||
for (arg_classes) |class| {
|
||||
if (class != .none) {
|
||||
@ -7175,12 +7176,12 @@ fn callIntrinsic(
|
||||
// Always pass over C-ABI
|
||||
const pt = func.pt;
|
||||
const zcu = pt.zcu;
|
||||
var func_type = try genFunctype(func.gpa, .C, param_types, return_type, pt, func.target.*);
|
||||
var func_type = try genFunctype(func.gpa, .{ .wasm_watc = .{} }, param_types, return_type, pt, func.target.*);
|
||||
defer func_type.deinit(func.gpa);
|
||||
const func_type_index = try func.bin_file.zigObjectPtr().?.putOrGetFuncType(func.gpa, func_type);
|
||||
try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index);
|
||||
|
||||
const want_sret_param = firstParamSRet(.C, return_type, pt, func.target.*);
|
||||
const want_sret_param = firstParamSRet(.{ .wasm_watc = .{} }, return_type, pt, func.target.*);
|
||||
// if we want return as first param, we allocate a pointer to stack,
|
||||
// and emit it as our first argument
|
||||
const sret = if (want_sret_param) blk: {
|
||||
@ -7193,7 +7194,7 @@ fn callIntrinsic(
|
||||
for (args, 0..) |arg, arg_i| {
|
||||
assert(!(want_sret_param and arg == .stack));
|
||||
assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(zcu));
|
||||
try func.lowerArg(.C, Type.fromInterned(param_types[arg_i]), arg);
|
||||
try func.lowerArg(.{ .wasm_watc = .{} }, Type.fromInterned(param_types[arg_i]), arg);
|
||||
}
|
||||
|
||||
// Actually call our intrinsic
|
||||
|
||||
@ -11,6 +11,7 @@ const verbose_tracking_log = std.log.scoped(.verbose_tracking);
|
||||
const wip_mir_log = std.log.scoped(.wip_mir);
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const target_util = @import("../../target.zig");
|
||||
const trace = @import("../../tracy.zig").trace;
|
||||
|
||||
const Air = @import("../../Air.zig");
|
||||
@ -870,10 +871,7 @@ pub fn generate(
|
||||
try function.frame_allocs.resize(gpa, FrameIndex.named_count);
|
||||
function.frame_allocs.set(
|
||||
@intFromEnum(FrameIndex.stack_frame),
|
||||
FrameAlloc.init(.{
|
||||
.size = 0,
|
||||
.alignment = func.analysisUnordered(ip).stack_alignment.max(.@"1"),
|
||||
}),
|
||||
FrameAlloc.init(.{ .size = 0, .alignment = .@"1" }),
|
||||
);
|
||||
function.frame_allocs.set(
|
||||
@intFromEnum(FrameIndex.call_frame),
|
||||
@ -918,13 +916,13 @@ pub fn generate(
|
||||
);
|
||||
function.va_info = switch (cc) {
|
||||
else => undefined,
|
||||
.SysV => .{ .sysv = .{
|
||||
.x86_64_sysv => .{ .sysv = .{
|
||||
.gp_count = call_info.gp_count,
|
||||
.fp_count = call_info.fp_count,
|
||||
.overflow_arg_area = .{ .index = .args_frame, .off = call_info.stack_byte_count },
|
||||
.reg_save_area = undefined,
|
||||
} },
|
||||
.Win64 => .{ .win64 = .{} },
|
||||
.x86_64_win => .{ .win64 = .{} },
|
||||
};
|
||||
|
||||
function.gen() catch |err| switch (err) {
|
||||
@ -1053,7 +1051,7 @@ pub fn generateLazy(
|
||||
.bin_file = bin_file,
|
||||
.allocator = gpa,
|
||||
.mir = mir,
|
||||
.cc = abi.resolveCallingConvention(.Unspecified, function.target.*),
|
||||
.cc = abi.resolveCallingConvention(.auto, function.target.*),
|
||||
.src_loc = src_loc,
|
||||
.output_mode = comp.config.output_mode,
|
||||
.link_mode = comp.config.link_mode,
|
||||
@ -1159,7 +1157,7 @@ fn formatWipMir(
|
||||
.extra = data.self.mir_extra.items,
|
||||
.frame_locs = (std.MultiArrayList(Mir.FrameLoc){}).slice(),
|
||||
},
|
||||
.cc = .Unspecified,
|
||||
.cc = .auto,
|
||||
.src_loc = data.self.src_loc,
|
||||
.output_mode = comp.config.output_mode,
|
||||
.link_mode = comp.config.link_mode,
|
||||
@ -2023,7 +2021,7 @@ fn gen(self: *Self) InnerError!void {
|
||||
const zcu = pt.zcu;
|
||||
const fn_info = zcu.typeToFunc(self.fn_type).?;
|
||||
const cc = abi.resolveCallingConvention(fn_info.cc, self.target.*);
|
||||
if (cc != .Naked) {
|
||||
if (cc != .naked) {
|
||||
try self.asmRegister(.{ ._, .push }, .rbp);
|
||||
try self.asmPseudoImmediate(.pseudo_cfi_adjust_cfa_offset_i_s, Immediate.s(8));
|
||||
try self.asmPseudoRegisterImmediate(.pseudo_cfi_rel_offset_ri_s, .rbp, Immediate.s(0));
|
||||
@ -2056,7 +2054,7 @@ fn gen(self: *Self) InnerError!void {
|
||||
}
|
||||
|
||||
if (fn_info.is_var_args) switch (cc) {
|
||||
.SysV => {
|
||||
.x86_64_sysv => {
|
||||
const info = &self.va_info.sysv;
|
||||
const reg_save_area_fi = try self.allocFrameIndex(FrameAlloc.init(.{
|
||||
.size = abi.SysV.c_abi_int_param_regs.len * 8 +
|
||||
@ -2089,7 +2087,7 @@ fn gen(self: *Self) InnerError!void {
|
||||
|
||||
self.performReloc(skip_sse_reloc);
|
||||
},
|
||||
.Win64 => return self.fail("TODO implement gen var arg function for Win64", .{}),
|
||||
.x86_64_win => return self.fail("TODO implement gen var arg function for Win64", .{}),
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
@ -2541,7 +2539,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
|
||||
const enum_ty = Type.fromInterned(lazy_sym.ty);
|
||||
wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)});
|
||||
|
||||
const resolved_cc = abi.resolveCallingConvention(.Unspecified, self.target.*);
|
||||
const resolved_cc = abi.resolveCallingConvention(.auto, self.target.*);
|
||||
const param_regs = abi.getCAbiIntParamRegs(resolved_cc);
|
||||
const param_locks = self.register_manager.lockRegsAssumeUnused(2, param_regs[0..2].*);
|
||||
defer for (param_locks) |lock| self.register_manager.unlockReg(lock);
|
||||
@ -3008,9 +3006,8 @@ pub fn spillEflagsIfOccupied(self: *Self) !void {
|
||||
|
||||
pub fn spillCallerPreservedRegs(self: *Self, cc: std.builtin.CallingConvention) !void {
|
||||
switch (cc) {
|
||||
inline .SysV, .Win64 => |known_cc| try self.spillRegisters(
|
||||
comptime abi.getCallerPreservedRegs(known_cc),
|
||||
),
|
||||
.x86_64_sysv => try self.spillRegisters(abi.getCallerPreservedRegs(.{ .x86_64_sysv = .{} })),
|
||||
.x86_64_win => try self.spillRegisters(abi.getCallerPreservedRegs(.{ .x86_64_win = .{} })),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
@ -12384,7 +12381,7 @@ fn genCall(self: *Self, info: union(enum) {
|
||||
.lib => |lib| try pt.funcType(.{
|
||||
.param_types = lib.param_types,
|
||||
.return_type = lib.return_type,
|
||||
.cc = .C,
|
||||
.cc = self.target.cCallingConvention().?,
|
||||
}),
|
||||
};
|
||||
const fn_info = zcu.typeToFunc(fn_ty).?;
|
||||
@ -12543,7 +12540,7 @@ fn genCall(self: *Self, info: union(enum) {
|
||||
src_arg,
|
||||
.{},
|
||||
),
|
||||
.C, .SysV, .Win64 => {
|
||||
.x86_64_sysv, .x86_64_win => {
|
||||
const promoted_ty = self.promoteInt(arg_ty);
|
||||
const promoted_abi_size: u32 = @intCast(promoted_ty.abiSize(zcu));
|
||||
const dst_alias = registerAlias(dst_reg, promoted_abi_size);
|
||||
@ -16822,7 +16819,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
||||
const inst_ty = self.typeOfIndex(inst);
|
||||
const enum_ty = self.typeOf(un_op);
|
||||
const resolved_cc = abi.resolveCallingConvention(.Unspecified, self.target.*);
|
||||
const resolved_cc = abi.resolveCallingConvention(.auto, self.target.*);
|
||||
|
||||
// We need a properly aligned and sized call frame to be able to call this function.
|
||||
{
|
||||
@ -18915,7 +18912,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
|
||||
self.fn_type.fnCallingConvention(zcu),
|
||||
self.target.*,
|
||||
)) {
|
||||
.SysV => result: {
|
||||
.x86_64_sysv => result: {
|
||||
const info = self.va_info.sysv;
|
||||
const dst_fi = try self.allocFrameIndex(FrameAlloc.initSpill(va_list_ty, zcu));
|
||||
var field_off: u31 = 0;
|
||||
@ -18957,7 +18954,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
|
||||
field_off += @intCast(ptr_anyopaque_ty.abiSize(zcu));
|
||||
break :result .{ .load_frame = .{ .index = dst_fi } };
|
||||
},
|
||||
.Win64 => return self.fail("TODO implement c_va_start for Win64", .{}),
|
||||
.x86_64_win => return self.fail("TODO implement c_va_start for Win64", .{}),
|
||||
else => unreachable,
|
||||
};
|
||||
return self.finishAir(inst, result, .{ .none, .none, .none });
|
||||
@ -18976,7 +18973,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
self.fn_type.fnCallingConvention(zcu),
|
||||
self.target.*,
|
||||
)) {
|
||||
.SysV => result: {
|
||||
.x86_64_sysv => result: {
|
||||
try self.spillEflagsIfOccupied();
|
||||
|
||||
const tmp_regs =
|
||||
@ -19155,7 +19152,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
);
|
||||
break :result promote_mcv;
|
||||
},
|
||||
.Win64 => return self.fail("TODO implement c_va_arg for Win64", .{}),
|
||||
.x86_64_win => return self.fail("TODO implement c_va_arg for Win64", .{}),
|
||||
else => unreachable,
|
||||
};
|
||||
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
||||
@ -19324,21 +19321,21 @@ fn resolveCallingConventionValues(
|
||||
|
||||
const resolved_cc = abi.resolveCallingConvention(cc, self.target.*);
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
.naked => {
|
||||
assert(result.args.len == 0);
|
||||
result.return_value = InstTracking.init(.unreach);
|
||||
result.stack_align = .@"8";
|
||||
},
|
||||
.C, .SysV, .Win64 => {
|
||||
.x86_64_sysv, .x86_64_win => |cc_opts| {
|
||||
var ret_int_reg_i: u32 = 0;
|
||||
var ret_sse_reg_i: u32 = 0;
|
||||
var param_int_reg_i: u32 = 0;
|
||||
var param_sse_reg_i: u32 = 0;
|
||||
result.stack_align = .@"16";
|
||||
result.stack_align = .fromByteUnits(cc_opts.incoming_stack_alignment orelse 16);
|
||||
|
||||
switch (resolved_cc) {
|
||||
.SysV => {},
|
||||
.Win64 => {
|
||||
.x86_64_sysv => {},
|
||||
.x86_64_win => {
|
||||
// Align the stack to 16bytes before allocating shadow stack space (if any).
|
||||
result.stack_byte_count += @intCast(4 * Type.usize.abiSize(zcu));
|
||||
},
|
||||
@ -19356,8 +19353,8 @@ fn resolveCallingConventionValues(
|
||||
var ret_tracking_i: usize = 0;
|
||||
|
||||
const classes = switch (resolved_cc) {
|
||||
.SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, zcu, self.target.*, .ret), .none),
|
||||
.Win64 => &.{abi.classifyWindows(ret_ty, zcu)},
|
||||
.x86_64_sysv => mem.sliceTo(&abi.classifySystemV(ret_ty, zcu, self.target.*, .ret), .none),
|
||||
.x86_64_win => &.{abi.classifyWindows(ret_ty, zcu)},
|
||||
else => unreachable,
|
||||
};
|
||||
for (classes) |class| switch (class) {
|
||||
@ -19419,8 +19416,8 @@ fn resolveCallingConventionValues(
|
||||
for (param_types, result.args) |ty, *arg| {
|
||||
assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
|
||||
switch (resolved_cc) {
|
||||
.SysV => {},
|
||||
.Win64 => {
|
||||
.x86_64_sysv => {},
|
||||
.x86_64_win => {
|
||||
param_int_reg_i = @max(param_int_reg_i, param_sse_reg_i);
|
||||
param_sse_reg_i = param_int_reg_i;
|
||||
},
|
||||
@ -19431,8 +19428,8 @@ fn resolveCallingConventionValues(
|
||||
var arg_mcv_i: usize = 0;
|
||||
|
||||
const classes = switch (resolved_cc) {
|
||||
.SysV => mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .arg), .none),
|
||||
.Win64 => &.{abi.classifyWindows(ty, zcu)},
|
||||
.x86_64_sysv => mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .arg), .none),
|
||||
.x86_64_win => &.{abi.classifyWindows(ty, zcu)},
|
||||
else => unreachable,
|
||||
};
|
||||
for (classes) |class| switch (class) {
|
||||
@ -19464,11 +19461,11 @@ fn resolveCallingConventionValues(
|
||||
},
|
||||
.sseup => assert(arg_mcv[arg_mcv_i - 1].register.class() == .sse),
|
||||
.x87, .x87up, .complex_x87, .memory, .win_i128 => switch (resolved_cc) {
|
||||
.SysV => switch (class) {
|
||||
.x86_64_sysv => switch (class) {
|
||||
.x87, .x87up, .complex_x87, .memory => break,
|
||||
else => unreachable,
|
||||
},
|
||||
.Win64 => if (ty.abiSize(zcu) > 8) {
|
||||
.x86_64_win => if (ty.abiSize(zcu) > 8) {
|
||||
const param_int_reg =
|
||||
abi.getCAbiIntParamRegs(resolved_cc)[param_int_reg_i].to64();
|
||||
param_int_reg_i += 1;
|
||||
@ -19515,10 +19512,13 @@ fn resolveCallingConventionValues(
|
||||
}
|
||||
|
||||
const param_size: u31 = @intCast(ty.abiSize(zcu));
|
||||
const param_align: u31 =
|
||||
@intCast(@max(ty.abiAlignment(zcu).toByteUnits().?, 8));
|
||||
result.stack_byte_count =
|
||||
mem.alignForward(u31, result.stack_byte_count, param_align);
|
||||
const param_align = ty.abiAlignment(zcu).max(.@"8");
|
||||
result.stack_byte_count = mem.alignForward(
|
||||
u31,
|
||||
result.stack_byte_count,
|
||||
@intCast(param_align.toByteUnits().?),
|
||||
);
|
||||
result.stack_align = result.stack_align.max(param_align);
|
||||
arg.* = .{ .load_frame = .{
|
||||
.index = stack_frame_base,
|
||||
.off = result.stack_byte_count,
|
||||
@ -19530,7 +19530,7 @@ fn resolveCallingConventionValues(
|
||||
assert(param_sse_reg_i <= 16);
|
||||
result.fp_count = param_sse_reg_i;
|
||||
},
|
||||
.Unspecified => {
|
||||
.auto => {
|
||||
result.stack_align = .@"16";
|
||||
|
||||
// Return values
|
||||
@ -19560,9 +19560,13 @@ fn resolveCallingConventionValues(
|
||||
continue;
|
||||
}
|
||||
const param_size: u31 = @intCast(ty.abiSize(zcu));
|
||||
const param_align: u31 = @intCast(ty.abiAlignment(zcu).toByteUnits().?);
|
||||
result.stack_byte_count =
|
||||
mem.alignForward(u31, result.stack_byte_count, param_align);
|
||||
const param_align = ty.abiAlignment(zcu);
|
||||
result.stack_byte_count = mem.alignForward(
|
||||
u31,
|
||||
result.stack_byte_count,
|
||||
@intCast(param_align.toByteUnits().?),
|
||||
);
|
||||
result.stack_align = result.stack_align.max(param_align);
|
||||
arg.* = .{ .load_frame = .{
|
||||
.index = stack_frame_base,
|
||||
.off = result.stack_byte_count,
|
||||
|
||||
@ -440,9 +440,9 @@ pub fn resolveCallingConvention(
|
||||
target: std.Target,
|
||||
) std.builtin.CallingConvention {
|
||||
return switch (cc) {
|
||||
.Unspecified, .C => switch (target.os.tag) {
|
||||
else => .SysV,
|
||||
.windows => .Win64,
|
||||
.auto => switch (target.os.tag) {
|
||||
else => .{ .x86_64_sysv = .{} },
|
||||
.windows => .{ .x86_64_win = .{} },
|
||||
},
|
||||
else => cc,
|
||||
};
|
||||
@ -450,48 +450,48 @@ pub fn resolveCallingConvention(
|
||||
|
||||
pub fn getCalleePreservedRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.callee_preserved_regs,
|
||||
.Win64 => &Win64.callee_preserved_regs,
|
||||
.x86_64_sysv => &SysV.callee_preserved_regs,
|
||||
.x86_64_win => &Win64.callee_preserved_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCallerPreservedRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.caller_preserved_regs,
|
||||
.Win64 => &Win64.caller_preserved_regs,
|
||||
.x86_64_sysv => &SysV.caller_preserved_regs,
|
||||
.x86_64_win => &Win64.caller_preserved_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiIntParamRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.c_abi_int_param_regs,
|
||||
.Win64 => &Win64.c_abi_int_param_regs,
|
||||
.x86_64_sysv => &SysV.c_abi_int_param_regs,
|
||||
.x86_64_win => &Win64.c_abi_int_param_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiSseParamRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.c_abi_sse_param_regs,
|
||||
.Win64 => &Win64.c_abi_sse_param_regs,
|
||||
.x86_64_sysv => &SysV.c_abi_sse_param_regs,
|
||||
.x86_64_win => &Win64.c_abi_sse_param_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiIntReturnRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.c_abi_int_return_regs,
|
||||
.Win64 => &Win64.c_abi_int_return_regs,
|
||||
.x86_64_sysv => &SysV.c_abi_int_return_regs,
|
||||
.x86_64_win => &Win64.c_abi_int_return_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getCAbiSseReturnRegs(cc: std.builtin.CallingConvention) []const Register {
|
||||
return switch (cc) {
|
||||
.SysV => &SysV.c_abi_sse_return_regs,
|
||||
.Win64 => &Win64.c_abi_sse_return_regs,
|
||||
.x86_64_sysv => &SysV.c_abi_sse_return_regs,
|
||||
.x86_64_win => &Win64.c_abi_sse_return_regs,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
@ -1783,7 +1783,7 @@ pub const DeclGen = struct {
|
||||
const fn_ctype = try dg.ctypeFromType(fn_ty, kind);
|
||||
|
||||
const fn_info = zcu.typeToFunc(fn_ty).?;
|
||||
if (fn_info.cc == .Naked) {
|
||||
if (fn_info.cc == .naked) {
|
||||
switch (kind) {
|
||||
.forward => try w.writeAll("zig_naked_decl "),
|
||||
.complete => try w.writeAll("zig_naked "),
|
||||
@ -1796,7 +1796,7 @@ pub const DeclGen = struct {
|
||||
|
||||
var trailing = try renderTypePrefix(dg.pass, &dg.ctype_pool, zcu, w, fn_ctype, .suffix, .{});
|
||||
|
||||
if (toCallingConvention(fn_info.cc)) |call_conv| {
|
||||
if (toCallingConvention(fn_info.cc, zcu)) |call_conv| {
|
||||
try w.print("{}zig_callconv({s})", .{ trailing, call_conv });
|
||||
trailing = .maybe_space;
|
||||
}
|
||||
@ -7604,12 +7604,39 @@ fn writeMemoryOrder(w: anytype, order: std.builtin.AtomicOrder) !void {
|
||||
return w.writeAll(toMemoryOrder(order));
|
||||
}
|
||||
|
||||
fn toCallingConvention(call_conv: std.builtin.CallingConvention) ?[]const u8 {
|
||||
return switch (call_conv) {
|
||||
.Stdcall => "stdcall",
|
||||
.Fastcall => "fastcall",
|
||||
.Vectorcall => "vectorcall",
|
||||
else => null,
|
||||
fn toCallingConvention(cc: std.builtin.CallingConvention, zcu: *Zcu) ?[]const u8 {
|
||||
if (zcu.getTarget().cCallingConvention()) |ccc| {
|
||||
if (cc.eql(ccc)) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return switch (cc) {
|
||||
.auto, .naked => null,
|
||||
|
||||
.x86_64_sysv, .x86_sysv => "sysv_abi",
|
||||
.x86_64_win, .x86_win => "ms_abi",
|
||||
.x86_stdcall => "stdcall",
|
||||
.x86_fastcall => "fastcall",
|
||||
.x86_thiscall => "thiscall",
|
||||
|
||||
.x86_vectorcall,
|
||||
.x86_64_vectorcall,
|
||||
=> "vectorcall",
|
||||
|
||||
.x86_64_regcall_v3_sysv,
|
||||
.x86_64_regcall_v4_win,
|
||||
.x86_regcall_v3,
|
||||
.x86_regcall_v4_win,
|
||||
=> "regcall",
|
||||
|
||||
.aarch64_vfabi => "aarch64_vector_pcs",
|
||||
.aarch64_vfabi_sve => "aarch64_sve_pcs",
|
||||
.arm_aapcs => "pcs(\"aapcs\")",
|
||||
.arm_aapcs_vfp => "pcs(\"aapcs-vfp\")",
|
||||
.riscv64_lp64_v, .riscv32_ilp32_v => "riscv_vector_cc",
|
||||
.m68k_rtd => "m68k_rtd",
|
||||
|
||||
else => unreachable, // `Zcu.callconvSupported`
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -1159,7 +1159,7 @@ pub const Object = struct {
|
||||
}
|
||||
|
||||
{
|
||||
var module_flags = try std.ArrayList(Builder.Metadata).initCapacity(o.gpa, 6);
|
||||
var module_flags = try std.ArrayList(Builder.Metadata).initCapacity(o.gpa, 7);
|
||||
defer module_flags.deinit();
|
||||
|
||||
const behavior_error = try o.builder.metadataConstant(try o.builder.intConst(.i32, 1));
|
||||
@ -1233,6 +1233,18 @@ pub const Object = struct {
|
||||
}
|
||||
}
|
||||
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
if (target.os.tag == .windows and (target.cpu.arch == .x86_64 or target.cpu.arch == .x86)) {
|
||||
// Add the "RegCallv4" flag so that any functions using `x86_regcallcc` use regcall
|
||||
// v4, which is essentially a requirement on Windows. See corresponding logic in
|
||||
// `toLlvmCallConvTag`.
|
||||
module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
|
||||
behavior_max,
|
||||
try o.builder.metadataString("RegCallv4"),
|
||||
try o.builder.metadataConstant(.@"1"),
|
||||
));
|
||||
}
|
||||
|
||||
try o.builder.metadataNamed(try o.builder.metadataString("llvm.module.flags"), module_flags.items);
|
||||
}
|
||||
|
||||
@ -1467,14 +1479,6 @@ pub const Object = struct {
|
||||
_ = try attributes.removeFnAttr(.@"noinline");
|
||||
}
|
||||
|
||||
const stack_alignment = func.analysisUnordered(ip).stack_alignment;
|
||||
if (stack_alignment != .none) {
|
||||
try attributes.addFnAttr(.{ .alignstack = stack_alignment.toLlvm() }, &o.builder);
|
||||
try attributes.addFnAttr(.@"noinline", &o.builder);
|
||||
} else {
|
||||
_ = try attributes.removeFnAttr(.alignstack);
|
||||
}
|
||||
|
||||
if (func_analysis.branch_hint == .cold) {
|
||||
try attributes.addFnAttr(.cold, &o.builder);
|
||||
} else {
|
||||
@ -1486,7 +1490,7 @@ pub const Object = struct {
|
||||
} else {
|
||||
_ = try attributes.removeFnAttr(.sanitize_thread);
|
||||
}
|
||||
const is_naked = fn_info.cc == .Naked;
|
||||
const is_naked = fn_info.cc == .naked;
|
||||
if (owner_mod.fuzz and !func_analysis.disable_instrumentation and !is_naked) {
|
||||
try attributes.addFnAttr(.optforfuzzing, &o.builder);
|
||||
_ = try attributes.removeFnAttr(.skipprofile);
|
||||
@ -1784,7 +1788,7 @@ pub const Object = struct {
|
||||
.liveness = liveness,
|
||||
.ng = &ng,
|
||||
.wip = wip,
|
||||
.is_naked = fn_info.cc == .Naked,
|
||||
.is_naked = fn_info.cc == .naked,
|
||||
.fuzz = fuzz,
|
||||
.ret_ptr = ret_ptr,
|
||||
.args = args.items,
|
||||
@ -3038,14 +3042,33 @@ pub const Object = struct {
|
||||
llvm_arg_i += 1;
|
||||
}
|
||||
|
||||
switch (fn_info.cc) {
|
||||
.Unspecified, .Inline => function_index.setCallConv(.fastcc, &o.builder),
|
||||
.Naked => try attributes.addFnAttr(.naked, &o.builder),
|
||||
.Async => {
|
||||
function_index.setCallConv(.fastcc, &o.builder);
|
||||
@panic("TODO: LLVM backend lower async function");
|
||||
},
|
||||
else => function_index.setCallConv(toLlvmCallConv(fn_info.cc, target), &o.builder),
|
||||
if (fn_info.cc == .@"async") {
|
||||
@panic("TODO: LLVM backend lower async function");
|
||||
}
|
||||
|
||||
{
|
||||
const cc_info = toLlvmCallConv(fn_info.cc, target).?;
|
||||
|
||||
function_index.setCallConv(cc_info.llvm_cc, &o.builder);
|
||||
|
||||
if (cc_info.align_stack) {
|
||||
try attributes.addFnAttr(.{ .alignstack = .fromByteUnits(target.stackAlignment()) }, &o.builder);
|
||||
} else {
|
||||
_ = try attributes.removeFnAttr(.alignstack);
|
||||
}
|
||||
|
||||
if (cc_info.naked) {
|
||||
try attributes.addFnAttr(.naked, &o.builder);
|
||||
} else {
|
||||
_ = try attributes.removeFnAttr(.naked);
|
||||
}
|
||||
|
||||
for (0..cc_info.inreg_param_count) |param_idx| {
|
||||
try attributes.addParamAttr(param_idx, .inreg, &o.builder);
|
||||
}
|
||||
for (cc_info.inreg_param_count..std.math.maxInt(u2)) |param_idx| {
|
||||
_ = try attributes.removeParamAttr(param_idx, .inreg);
|
||||
}
|
||||
}
|
||||
|
||||
if (resolved.alignment != .none)
|
||||
@ -3061,7 +3084,7 @@ pub const Object = struct {
|
||||
// suppress generation of the prologue and epilogue, and the prologue is where the
|
||||
// frame pointer normally gets set up. At time of writing, this is the case for at
|
||||
// least x86 and RISC-V.
|
||||
owner_mod.omit_frame_pointer or fn_info.cc == .Naked,
|
||||
owner_mod.omit_frame_pointer or fn_info.cc == .naked,
|
||||
);
|
||||
|
||||
if (fn_info.return_type == .noreturn_type) try attributes.addFnAttr(.noreturn, &o.builder);
|
||||
@ -4618,9 +4641,14 @@ pub const Object = struct {
|
||||
if (!param_ty.isPtrLikeOptional(zcu) and !ptr_info.flags.is_allowzero) {
|
||||
try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder);
|
||||
}
|
||||
if (fn_info.cc == .Interrupt) {
|
||||
const child_type = try lowerType(o, Type.fromInterned(ptr_info.child));
|
||||
try attributes.addParamAttr(llvm_arg_i, .{ .byval = child_type }, &o.builder);
|
||||
switch (fn_info.cc) {
|
||||
else => {},
|
||||
.x86_64_interrupt,
|
||||
.x86_interrupt,
|
||||
=> {
|
||||
const child_type = try lowerType(o, Type.fromInterned(ptr_info.child));
|
||||
try attributes.addParamAttr(llvm_arg_i, .{ .byval = child_type }, &o.builder);
|
||||
},
|
||||
}
|
||||
if (ptr_info.flags.is_const) {
|
||||
try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder);
|
||||
@ -5677,7 +5705,7 @@ pub const FuncGen = struct {
|
||||
.always_tail => .musttail,
|
||||
.async_kw, .no_async, .always_inline, .compile_time => unreachable,
|
||||
},
|
||||
toLlvmCallConv(fn_info.cc, target),
|
||||
toLlvmCallConvTag(fn_info.cc, target).?,
|
||||
try attributes.finish(&o.builder),
|
||||
try o.lowerType(zig_fn_ty),
|
||||
llvm_fn,
|
||||
@ -5756,7 +5784,7 @@ pub const FuncGen = struct {
|
||||
_ = try fg.wip.callIntrinsicAssumeCold();
|
||||
_ = try fg.wip.call(
|
||||
.normal,
|
||||
toLlvmCallConv(fn_info.cc, target),
|
||||
toLlvmCallConvTag(fn_info.cc, target).?,
|
||||
.none,
|
||||
panic_global.typeOf(&o.builder),
|
||||
panic_global.toValue(&o.builder),
|
||||
@ -11554,36 +11582,146 @@ fn toLlvmAtomicRmwBinOp(
|
||||
};
|
||||
}
|
||||
|
||||
fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) Builder.CallConv {
|
||||
return switch (cc) {
|
||||
.Unspecified, .Inline, .Async => .fastcc,
|
||||
.C, .Naked => .ccc,
|
||||
.Stdcall => .x86_stdcallcc,
|
||||
.Fastcall => .x86_fastcallcc,
|
||||
.Vectorcall => return switch (target.cpu.arch) {
|
||||
.x86, .x86_64 => .x86_vectorcallcc,
|
||||
.aarch64, .aarch64_be => .aarch64_vector_pcs,
|
||||
const CallingConventionInfo = struct {
|
||||
/// The LLVM calling convention to use.
|
||||
llvm_cc: Builder.CallConv,
|
||||
/// Whether to use an `alignstack` attribute to forcibly re-align the stack pointer in the function's prologue.
|
||||
align_stack: bool,
|
||||
/// Whether the function needs a `naked` attribute.
|
||||
naked: bool,
|
||||
/// How many leading parameters to apply the `inreg` attribute to.
|
||||
inreg_param_count: u2 = 0,
|
||||
};
|
||||
|
||||
pub fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) ?CallingConventionInfo {
|
||||
const llvm_cc = toLlvmCallConvTag(cc, target) orelse return null;
|
||||
const incoming_stack_alignment: ?u64, const register_params: u2 = switch (cc) {
|
||||
inline else => |pl| switch (@TypeOf(pl)) {
|
||||
void => .{ null, 0 },
|
||||
std.builtin.CallingConvention.CommonOptions => .{ pl.incoming_stack_alignment, 0 },
|
||||
std.builtin.CallingConvention.X86RegparmOptions => .{ pl.incoming_stack_alignment, pl.register_params },
|
||||
else => unreachable,
|
||||
},
|
||||
.Thiscall => .x86_thiscallcc,
|
||||
.APCS => .arm_apcscc,
|
||||
.AAPCS => .arm_aapcscc,
|
||||
.AAPCSVFP => .arm_aapcs_vfpcc,
|
||||
.Interrupt => return switch (target.cpu.arch) {
|
||||
.x86, .x86_64 => .x86_intrcc,
|
||||
.avr => .avr_intrcc,
|
||||
.msp430 => .msp430_intrcc,
|
||||
else => unreachable,
|
||||
},
|
||||
.Signal => .avr_signalcc,
|
||||
.SysV => .x86_64_sysvcc,
|
||||
.Win64 => .win64cc,
|
||||
.Kernel => return switch (target.cpu.arch) {
|
||||
.nvptx, .nvptx64 => .ptx_kernel,
|
||||
.amdgcn => .amdgpu_kernel,
|
||||
else => unreachable,
|
||||
},
|
||||
.Vertex, .Fragment => unreachable,
|
||||
};
|
||||
return .{
|
||||
.llvm_cc = llvm_cc,
|
||||
.align_stack = if (incoming_stack_alignment) |a| need_align: {
|
||||
const normal_stack_align = target.stackAlignment();
|
||||
break :need_align a < normal_stack_align;
|
||||
} else false,
|
||||
.naked = cc == .naked,
|
||||
.inreg_param_count = register_params,
|
||||
};
|
||||
}
|
||||
fn toLlvmCallConvTag(cc_tag: std.builtin.CallingConvention.Tag, target: std.Target) ?Builder.CallConv {
|
||||
if (target.cCallingConvention()) |default_c| {
|
||||
if (cc_tag == default_c) {
|
||||
return .ccc;
|
||||
}
|
||||
}
|
||||
return switch (cc_tag) {
|
||||
.@"inline" => unreachable,
|
||||
.auto, .@"async" => .fastcc,
|
||||
.naked => .ccc,
|
||||
.x86_64_sysv => .x86_64_sysvcc,
|
||||
.x86_64_win => .win64cc,
|
||||
.x86_64_regcall_v3_sysv => if (target.cpu.arch == .x86_64 and target.os.tag != .windows)
|
||||
.x86_regcallcc
|
||||
else
|
||||
null,
|
||||
.x86_64_regcall_v4_win => if (target.cpu.arch == .x86_64 and target.os.tag == .windows)
|
||||
.x86_regcallcc // we use the "RegCallv4" module flag to make this correct
|
||||
else
|
||||
null,
|
||||
.x86_64_vectorcall => .x86_vectorcallcc,
|
||||
.x86_64_interrupt => .x86_intrcc,
|
||||
.x86_stdcall => .x86_stdcallcc,
|
||||
.x86_fastcall => .x86_fastcallcc,
|
||||
.x86_thiscall => .x86_thiscallcc,
|
||||
.x86_regcall_v3 => if (target.cpu.arch == .x86 and target.os.tag != .windows)
|
||||
.x86_regcallcc
|
||||
else
|
||||
null,
|
||||
.x86_regcall_v4_win => if (target.cpu.arch == .x86 and target.os.tag == .windows)
|
||||
.x86_regcallcc // we use the "RegCallv4" module flag to make this correct
|
||||
else
|
||||
null,
|
||||
.x86_vectorcall => .x86_vectorcallcc,
|
||||
.x86_interrupt => .x86_intrcc,
|
||||
.aarch64_vfabi => .aarch64_vector_pcs,
|
||||
.aarch64_vfabi_sve => .aarch64_sve_vector_pcs,
|
||||
.arm_apcs => .arm_apcscc,
|
||||
.arm_aapcs => .arm_aapcscc,
|
||||
.arm_aapcs_vfp => .arm_aapcs_vfpcc,
|
||||
.riscv64_lp64_v => .riscv_vectorcallcc,
|
||||
.riscv32_ilp32_v => .riscv_vectorcallcc,
|
||||
.avr_builtin => .avr_builtincc,
|
||||
.avr_signal => .avr_signalcc,
|
||||
.avr_interrupt => .avr_intrcc,
|
||||
.m68k_rtd => .m68k_rtdcc,
|
||||
.m68k_interrupt => .m68k_intrcc,
|
||||
.amdgcn_kernel => .amdgpu_kernel,
|
||||
.amdgcn_cs => .amdgpu_cs,
|
||||
.nvptx_device => .ptx_device,
|
||||
.nvptx_kernel => .ptx_kernel,
|
||||
|
||||
// All the calling conventions which LLVM does not have a general representation for.
|
||||
// Note that these are often still supported through the `cCallingConvention` path above via `ccc`.
|
||||
.x86_sysv,
|
||||
.x86_win,
|
||||
.x86_thiscall_mingw,
|
||||
.aarch64_aapcs,
|
||||
.aarch64_aapcs_darwin,
|
||||
.aarch64_aapcs_win,
|
||||
.arm_aapcs16_vfp,
|
||||
.arm_interrupt,
|
||||
.mips64_n64,
|
||||
.mips64_n32,
|
||||
.mips64_interrupt,
|
||||
.mips_o32,
|
||||
.mips_interrupt,
|
||||
.riscv64_lp64,
|
||||
.riscv64_interrupt,
|
||||
.riscv32_ilp32,
|
||||
.riscv32_interrupt,
|
||||
.sparc64_sysv,
|
||||
.sparc_sysv,
|
||||
.powerpc64_elf,
|
||||
.powerpc64_elf_altivec,
|
||||
.powerpc64_elf_v2,
|
||||
.powerpc_sysv,
|
||||
.powerpc_sysv_altivec,
|
||||
.powerpc_aix,
|
||||
.powerpc_aix_altivec,
|
||||
.wasm_watc,
|
||||
.arc_sysv,
|
||||
.avr_gnu,
|
||||
.bpf_std,
|
||||
.csky_sysv,
|
||||
.csky_interrupt,
|
||||
.hexagon_sysv,
|
||||
.hexagon_sysv_hvx,
|
||||
.lanai_sysv,
|
||||
.loongarch64_lp64,
|
||||
.loongarch32_ilp32,
|
||||
.m68k_sysv,
|
||||
.m68k_gnu,
|
||||
.msp430_eabi,
|
||||
.propeller1_sysv,
|
||||
.propeller2_sysv,
|
||||
.s390x_sysv,
|
||||
.s390x_sysv_vx,
|
||||
.ve_sysv,
|
||||
.xcore_xs1,
|
||||
.xcore_xs2,
|
||||
.xtensa_call0,
|
||||
.xtensa_windowed,
|
||||
.amdgcn_device,
|
||||
.spirv_device,
|
||||
.spirv_kernel,
|
||||
.spirv_fragment,
|
||||
.spirv_vertex,
|
||||
=> null,
|
||||
};
|
||||
}
|
||||
|
||||
@ -11711,31 +11849,27 @@ fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Targe
|
||||
if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false;
|
||||
|
||||
return switch (fn_info.cc) {
|
||||
.Unspecified, .Inline => returnTypeByRef(zcu, target, return_type),
|
||||
.C => switch (target.cpu.arch) {
|
||||
.mips, .mipsel => switch (mips_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i32_array => true,
|
||||
.byval => false,
|
||||
},
|
||||
.x86 => isByRef(return_type, zcu),
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
|
||||
else => firstParamSRetSystemV(return_type, zcu, target),
|
||||
},
|
||||
.wasm32 => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect,
|
||||
.aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, zcu) == .memory,
|
||||
.arm, .armeb => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i64_array => true,
|
||||
.i32_array => |size| size != 1,
|
||||
.byval => false,
|
||||
},
|
||||
.riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, zcu) == .memory,
|
||||
else => false, // TODO investigate C ABI for other architectures
|
||||
.auto => returnTypeByRef(zcu, target, return_type),
|
||||
.x86_64_sysv => firstParamSRetSystemV(return_type, zcu, target),
|
||||
.x86_64_win => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
|
||||
.x86_sysv, .x86_win => isByRef(return_type, zcu),
|
||||
.x86_stdcall => !isScalar(zcu, return_type),
|
||||
.wasm_watc => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect,
|
||||
.aarch64_aapcs,
|
||||
.aarch64_aapcs_darwin,
|
||||
.aarch64_aapcs_win,
|
||||
=> aarch64_c_abi.classifyType(return_type, zcu) == .memory,
|
||||
.arm_aapcs, .arm_aapcs_vfp => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i64_array => true,
|
||||
.i32_array => |size| size != 1,
|
||||
.byval => false,
|
||||
},
|
||||
.SysV => firstParamSRetSystemV(return_type, zcu, target),
|
||||
.Win64 => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
|
||||
.Stdcall => !isScalar(zcu, return_type),
|
||||
else => false,
|
||||
.riscv64_lp64, .riscv32_ilp32 => riscv_c_abi.classifyType(return_type, zcu) == .memory,
|
||||
.mips_o32 => switch (mips_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i32_array => true,
|
||||
.byval => false,
|
||||
},
|
||||
else => false, // TODO: investigate other targets/callconvs
|
||||
};
|
||||
}
|
||||
|
||||
@ -11761,82 +11895,64 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
|
||||
}
|
||||
const target = zcu.getTarget();
|
||||
switch (fn_info.cc) {
|
||||
.Unspecified,
|
||||
.Inline,
|
||||
=> return if (returnTypeByRef(zcu, target, return_type)) .void else o.lowerType(return_type),
|
||||
.@"inline" => unreachable,
|
||||
.auto => return if (returnTypeByRef(zcu, target, return_type)) .void else o.lowerType(return_type),
|
||||
|
||||
.C => {
|
||||
switch (target.cpu.arch) {
|
||||
.mips, .mipsel => {
|
||||
switch (mips_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i32_array => return .void,
|
||||
.byval => return o.lowerType(return_type),
|
||||
}
|
||||
},
|
||||
.x86 => return if (isByRef(return_type, zcu)) .void else o.lowerType(return_type),
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows => return lowerWin64FnRetTy(o, fn_info),
|
||||
else => return lowerSystemVFnRetTy(o, fn_info),
|
||||
},
|
||||
.wasm32 => {
|
||||
if (isScalar(zcu, return_type)) {
|
||||
return o.lowerType(return_type);
|
||||
}
|
||||
const classes = wasm_c_abi.classifyType(return_type, zcu);
|
||||
if (classes[0] == .indirect or classes[0] == .none) {
|
||||
return .void;
|
||||
}
|
||||
|
||||
assert(classes[0] == .direct and classes[1] == .none);
|
||||
const scalar_type = wasm_c_abi.scalarType(return_type, zcu);
|
||||
return o.builder.intType(@intCast(scalar_type.abiSize(zcu) * 8));
|
||||
},
|
||||
.aarch64, .aarch64_be => {
|
||||
switch (aarch64_c_abi.classifyType(return_type, zcu)) {
|
||||
.memory => return .void,
|
||||
.float_array => return o.lowerType(return_type),
|
||||
.byval => return o.lowerType(return_type),
|
||||
.integer => return o.builder.intType(@intCast(return_type.bitSize(zcu))),
|
||||
.double_integer => return o.builder.arrayType(2, .i64),
|
||||
}
|
||||
},
|
||||
.arm, .armeb => {
|
||||
switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i64_array => return .void,
|
||||
.i32_array => |len| return if (len == 1) .i32 else .void,
|
||||
.byval => return o.lowerType(return_type),
|
||||
}
|
||||
},
|
||||
.riscv32, .riscv64 => {
|
||||
switch (riscv_c_abi.classifyType(return_type, zcu)) {
|
||||
.memory => return .void,
|
||||
.integer => {
|
||||
return o.builder.intType(@intCast(return_type.bitSize(zcu)));
|
||||
},
|
||||
.double_integer => {
|
||||
return o.builder.structType(.normal, &.{ .i64, .i64 });
|
||||
},
|
||||
.byval => return o.lowerType(return_type),
|
||||
.fields => {
|
||||
var types_len: usize = 0;
|
||||
var types: [8]Builder.Type = undefined;
|
||||
for (0..return_type.structFieldCount(zcu)) |field_index| {
|
||||
const field_ty = return_type.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
types[types_len] = try o.lowerType(field_ty);
|
||||
types_len += 1;
|
||||
}
|
||||
return o.builder.structType(.normal, types[0..types_len]);
|
||||
},
|
||||
}
|
||||
},
|
||||
// TODO investigate C ABI for other architectures
|
||||
else => return o.lowerType(return_type),
|
||||
}
|
||||
.x86_64_sysv => return lowerSystemVFnRetTy(o, fn_info),
|
||||
.x86_64_win => return lowerWin64FnRetTy(o, fn_info),
|
||||
.x86_stdcall => return if (isScalar(zcu, return_type)) o.lowerType(return_type) else .void,
|
||||
.x86_sysv, .x86_win => return if (isByRef(return_type, zcu)) .void else o.lowerType(return_type),
|
||||
.aarch64_aapcs, .aarch64_aapcs_darwin, .aarch64_aapcs_win => switch (aarch64_c_abi.classifyType(return_type, zcu)) {
|
||||
.memory => return .void,
|
||||
.float_array => return o.lowerType(return_type),
|
||||
.byval => return o.lowerType(return_type),
|
||||
.integer => return o.builder.intType(@intCast(return_type.bitSize(zcu))),
|
||||
.double_integer => return o.builder.arrayType(2, .i64),
|
||||
},
|
||||
.Win64 => return lowerWin64FnRetTy(o, fn_info),
|
||||
.SysV => return lowerSystemVFnRetTy(o, fn_info),
|
||||
.Stdcall => return if (isScalar(zcu, return_type)) o.lowerType(return_type) else .void,
|
||||
.arm_aapcs, .arm_aapcs_vfp => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i64_array => return .void,
|
||||
.i32_array => |len| return if (len == 1) .i32 else .void,
|
||||
.byval => return o.lowerType(return_type),
|
||||
},
|
||||
.mips_o32 => switch (mips_c_abi.classifyType(return_type, zcu, .ret)) {
|
||||
.memory, .i32_array => return .void,
|
||||
.byval => return o.lowerType(return_type),
|
||||
},
|
||||
.riscv64_lp64, .riscv32_ilp32 => switch (riscv_c_abi.classifyType(return_type, zcu)) {
|
||||
.memory => return .void,
|
||||
.integer => {
|
||||
return o.builder.intType(@intCast(return_type.bitSize(zcu)));
|
||||
},
|
||||
.double_integer => {
|
||||
return o.builder.structType(.normal, &.{ .i64, .i64 });
|
||||
},
|
||||
.byval => return o.lowerType(return_type),
|
||||
.fields => {
|
||||
var types_len: usize = 0;
|
||||
var types: [8]Builder.Type = undefined;
|
||||
for (0..return_type.structFieldCount(zcu)) |field_index| {
|
||||
const field_ty = return_type.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
types[types_len] = try o.lowerType(field_ty);
|
||||
types_len += 1;
|
||||
}
|
||||
return o.builder.structType(.normal, types[0..types_len]);
|
||||
},
|
||||
},
|
||||
.wasm_watc => {
|
||||
if (isScalar(zcu, return_type)) {
|
||||
return o.lowerType(return_type);
|
||||
}
|
||||
const classes = wasm_c_abi.classifyType(return_type, zcu);
|
||||
if (classes[0] == .indirect or classes[0] == .none) {
|
||||
return .void;
|
||||
}
|
||||
|
||||
assert(classes[0] == .direct and classes[1] == .none);
|
||||
const scalar_type = wasm_c_abi.scalarType(return_type, zcu);
|
||||
return o.builder.intType(@intCast(scalar_type.abiSize(zcu) * 8));
|
||||
},
|
||||
// TODO investigate other callconvs
|
||||
else => return o.lowerType(return_type),
|
||||
}
|
||||
}
|
||||
@ -11989,7 +12105,8 @@ const ParamTypeIterator = struct {
|
||||
return .no_bits;
|
||||
}
|
||||
switch (it.fn_info.cc) {
|
||||
.Unspecified, .Inline => {
|
||||
.@"inline" => unreachable,
|
||||
.auto => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
if (ty.isSlice(zcu) or
|
||||
@ -12010,97 +12127,12 @@ const ParamTypeIterator = struct {
|
||||
return .byval;
|
||||
}
|
||||
},
|
||||
.Async => {
|
||||
.@"async" => {
|
||||
@panic("TODO implement async function lowering in the LLVM backend");
|
||||
},
|
||||
.C => switch (target.cpu.arch) {
|
||||
.mips, .mipsel => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (mips_c_abi.classifyType(ty, zcu, .arg)) {
|
||||
.memory => {
|
||||
it.byval_attr = true;
|
||||
return .byref;
|
||||
},
|
||||
.byval => return .byval,
|
||||
.i32_array => |size| return Lowering{ .i32_array = size },
|
||||
}
|
||||
},
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows => return it.nextWin64(ty),
|
||||
else => return it.nextSystemV(ty),
|
||||
},
|
||||
.wasm32 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
if (isScalar(zcu, ty)) {
|
||||
return .byval;
|
||||
}
|
||||
const classes = wasm_c_abi.classifyType(ty, zcu);
|
||||
if (classes[0] == .indirect) {
|
||||
return .byref;
|
||||
}
|
||||
return .abi_sized_int;
|
||||
},
|
||||
.aarch64, .aarch64_be => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (aarch64_c_abi.classifyType(ty, zcu)) {
|
||||
.memory => return .byref_mut,
|
||||
.float_array => |len| return Lowering{ .float_array = len },
|
||||
.byval => return .byval,
|
||||
.integer => {
|
||||
it.types_len = 1;
|
||||
it.types_buffer[0] = .i64;
|
||||
return .multiple_llvm_types;
|
||||
},
|
||||
.double_integer => return Lowering{ .i64_array = 2 },
|
||||
}
|
||||
},
|
||||
.arm, .armeb => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (arm_c_abi.classifyType(ty, zcu, .arg)) {
|
||||
.memory => {
|
||||
it.byval_attr = true;
|
||||
return .byref;
|
||||
},
|
||||
.byval => return .byval,
|
||||
.i32_array => |size| return Lowering{ .i32_array = size },
|
||||
.i64_array => |size| return Lowering{ .i64_array = size },
|
||||
}
|
||||
},
|
||||
.riscv32, .riscv64 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (riscv_c_abi.classifyType(ty, zcu)) {
|
||||
.memory => return .byref_mut,
|
||||
.byval => return .byval,
|
||||
.integer => return .abi_sized_int,
|
||||
.double_integer => return Lowering{ .i64_array = 2 },
|
||||
.fields => {
|
||||
it.types_len = 0;
|
||||
for (0..ty.structFieldCount(zcu)) |field_index| {
|
||||
const field_ty = ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
it.types_buffer[it.types_len] = try it.object.lowerType(field_ty);
|
||||
it.types_len += 1;
|
||||
}
|
||||
it.llvm_index += it.types_len - 1;
|
||||
return .multiple_llvm_types;
|
||||
},
|
||||
}
|
||||
},
|
||||
// TODO investigate C ABI for other architectures
|
||||
else => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .byval;
|
||||
},
|
||||
},
|
||||
.Win64 => return it.nextWin64(ty),
|
||||
.SysV => return it.nextSystemV(ty),
|
||||
.Stdcall => {
|
||||
.x86_64_sysv => return it.nextSystemV(ty),
|
||||
.x86_64_win => return it.nextWin64(ty),
|
||||
.x86_stdcall => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
|
||||
@ -12111,6 +12143,80 @@ const ParamTypeIterator = struct {
|
||||
return .byref;
|
||||
}
|
||||
},
|
||||
.aarch64_aapcs, .aarch64_aapcs_darwin, .aarch64_aapcs_win => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (aarch64_c_abi.classifyType(ty, zcu)) {
|
||||
.memory => return .byref_mut,
|
||||
.float_array => |len| return Lowering{ .float_array = len },
|
||||
.byval => return .byval,
|
||||
.integer => {
|
||||
it.types_len = 1;
|
||||
it.types_buffer[0] = .i64;
|
||||
return .multiple_llvm_types;
|
||||
},
|
||||
.double_integer => return Lowering{ .i64_array = 2 },
|
||||
}
|
||||
},
|
||||
.arm_aapcs, .arm_aapcs_vfp => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (arm_c_abi.classifyType(ty, zcu, .arg)) {
|
||||
.memory => {
|
||||
it.byval_attr = true;
|
||||
return .byref;
|
||||
},
|
||||
.byval => return .byval,
|
||||
.i32_array => |size| return Lowering{ .i32_array = size },
|
||||
.i64_array => |size| return Lowering{ .i64_array = size },
|
||||
}
|
||||
},
|
||||
.mips_o32 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (mips_c_abi.classifyType(ty, zcu, .arg)) {
|
||||
.memory => {
|
||||
it.byval_attr = true;
|
||||
return .byref;
|
||||
},
|
||||
.byval => return .byval,
|
||||
.i32_array => |size| return Lowering{ .i32_array = size },
|
||||
}
|
||||
},
|
||||
.riscv64_lp64, .riscv32_ilp32 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
switch (riscv_c_abi.classifyType(ty, zcu)) {
|
||||
.memory => return .byref_mut,
|
||||
.byval => return .byval,
|
||||
.integer => return .abi_sized_int,
|
||||
.double_integer => return Lowering{ .i64_array = 2 },
|
||||
.fields => {
|
||||
it.types_len = 0;
|
||||
for (0..ty.structFieldCount(zcu)) |field_index| {
|
||||
const field_ty = ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
it.types_buffer[it.types_len] = try it.object.lowerType(field_ty);
|
||||
it.types_len += 1;
|
||||
}
|
||||
it.llvm_index += it.types_len - 1;
|
||||
return .multiple_llvm_types;
|
||||
},
|
||||
}
|
||||
},
|
||||
.wasm_watc => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
if (isScalar(zcu, ty)) {
|
||||
return .byval;
|
||||
}
|
||||
const classes = wasm_c_abi.classifyType(ty, zcu);
|
||||
if (classes[0] == .indirect) {
|
||||
return .byref;
|
||||
}
|
||||
return .abi_sized_int;
|
||||
},
|
||||
// TODO investigate other callconvs
|
||||
else => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
@ -12269,7 +12375,7 @@ fn ccAbiPromoteInt(
|
||||
) ?std.builtin.Signedness {
|
||||
const target = zcu.getTarget();
|
||||
switch (cc) {
|
||||
.Unspecified, .Inline, .Async => return null,
|
||||
.auto, .@"inline", .@"async" => return null,
|
||||
else => {},
|
||||
}
|
||||
const int_info = switch (ty.zigTypeTag(zcu)) {
|
||||
|
||||
@ -2052,6 +2052,7 @@ pub const CallConv = enum(u10) {
|
||||
x86_intrcc,
|
||||
avr_intrcc,
|
||||
avr_signalcc,
|
||||
avr_builtincc,
|
||||
|
||||
amdgpu_vs = 87,
|
||||
amdgpu_gs,
|
||||
@ -2060,6 +2061,7 @@ pub const CallConv = enum(u10) {
|
||||
amdgpu_kernel,
|
||||
x86_regcallcc,
|
||||
amdgpu_hs,
|
||||
msp430_builtincc,
|
||||
|
||||
amdgpu_ls = 95,
|
||||
amdgpu_es,
|
||||
@ -2068,9 +2070,15 @@ pub const CallConv = enum(u10) {
|
||||
|
||||
amdgpu_gfx = 100,
|
||||
|
||||
m68k_intrcc,
|
||||
|
||||
aarch64_sme_preservemost_from_x0 = 102,
|
||||
aarch64_sme_preservemost_from_x2,
|
||||
|
||||
m68k_rtdcc = 106,
|
||||
|
||||
riscv_vectorcallcc = 110,
|
||||
|
||||
_,
|
||||
|
||||
pub const default = CallConv.ccc;
|
||||
@ -2115,6 +2123,7 @@ pub const CallConv = enum(u10) {
|
||||
.x86_intrcc,
|
||||
.avr_intrcc,
|
||||
.avr_signalcc,
|
||||
.avr_builtincc,
|
||||
.amdgpu_vs,
|
||||
.amdgpu_gs,
|
||||
.amdgpu_ps,
|
||||
@ -2122,13 +2131,17 @@ pub const CallConv = enum(u10) {
|
||||
.amdgpu_kernel,
|
||||
.x86_regcallcc,
|
||||
.amdgpu_hs,
|
||||
.msp430_builtincc,
|
||||
.amdgpu_ls,
|
||||
.amdgpu_es,
|
||||
.aarch64_vector_pcs,
|
||||
.aarch64_sve_vector_pcs,
|
||||
.amdgpu_gfx,
|
||||
.m68k_intrcc,
|
||||
.aarch64_sme_preservemost_from_x0,
|
||||
.aarch64_sme_preservemost_from_x2,
|
||||
.m68k_rtdcc,
|
||||
.riscv_vectorcallcc,
|
||||
=> try writer.print(" {s}", .{@tagName(self)}),
|
||||
_ => try writer.print(" cc{d}", .{@intFromEnum(self)}),
|
||||
}
|
||||
|
||||
@ -1640,8 +1640,8 @@ const NavGen = struct {
|
||||
|
||||
comptime assert(zig_call_abi_ver == 3);
|
||||
switch (fn_info.cc) {
|
||||
.Unspecified, .Kernel, .Fragment, .Vertex, .C => {},
|
||||
else => unreachable, // TODO
|
||||
.auto, .spirv_kernel, .spirv_fragment, .spirv_vertex => {},
|
||||
else => @panic("TODO"),
|
||||
}
|
||||
|
||||
// TODO: Put this somewhere in Sema.zig
|
||||
@ -2970,7 +2970,7 @@ const NavGen = struct {
|
||||
.id_result_type = return_ty_id,
|
||||
.id_result = result_id,
|
||||
.function_control = switch (fn_info.cc) {
|
||||
.Inline => .{ .Inline = true },
|
||||
.@"inline" => .{ .Inline = true },
|
||||
else => .{},
|
||||
},
|
||||
.function_type = prototype_ty_id,
|
||||
|
||||
@ -217,7 +217,7 @@ pub fn updateFunc(
|
||||
.mod = zcu.navFileScope(func.owner_nav).mod,
|
||||
.error_msg = null,
|
||||
.pass = .{ .nav = func.owner_nav },
|
||||
.is_naked_fn = zcu.navValue(func.owner_nav).typeOf(zcu).fnCallingConvention(zcu) == .Naked,
|
||||
.is_naked_fn = zcu.navValue(func.owner_nav).typeOf(zcu).fnCallingConvention(zcu) == .naked,
|
||||
.fwd_decl = fwd_decl.toManaged(gpa),
|
||||
.ctype_pool = ctype_pool.*,
|
||||
.scratch = .{},
|
||||
|
||||
@ -1488,14 +1488,16 @@ pub fn updateExports(
|
||||
const exported_nav = ip.getNav(exported_nav_index);
|
||||
const exported_ty = exported_nav.typeOf(ip);
|
||||
if (!ip.isFunctionType(exported_ty)) continue;
|
||||
const c_cc = target.cCallingConvention().?;
|
||||
const winapi_cc: std.builtin.CallingConvention = switch (target.cpu.arch) {
|
||||
.x86 => .Stdcall,
|
||||
else => .C,
|
||||
.x86 => .{ .x86_stdcall = .{} },
|
||||
else => c_cc,
|
||||
};
|
||||
const exported_cc = Type.fromInterned(exported_ty).fnCallingConvention(zcu);
|
||||
if (exported_cc == .C and exp.opts.name.eqlSlice("main", ip) and comp.config.link_libc) {
|
||||
const CcTag = std.builtin.CallingConvention.Tag;
|
||||
if (@as(CcTag, exported_cc) == @as(CcTag, c_cc) and exp.opts.name.eqlSlice("main", ip) and comp.config.link_libc) {
|
||||
zcu.stage1_flags.have_c_main = true;
|
||||
} else if (exported_cc == winapi_cc and target.os.tag == .windows) {
|
||||
} else if (@as(CcTag, exported_cc) == @as(CcTag, winapi_cc) and target.os.tag == .windows) {
|
||||
if (exp.opts.name.eqlSlice("WinMain", ip)) {
|
||||
zcu.stage1_flags.have_winmain = true;
|
||||
} else if (exp.opts.name.eqlSlice("wWinMain", ip)) {
|
||||
|
||||
@ -3398,21 +3398,71 @@ fn updateType(
|
||||
const is_nullary = func_type.param_types.len == 0 and !func_type.is_var_args;
|
||||
try wip_nav.abbrevCode(if (is_nullary) .nullary_func_type else .func_type);
|
||||
try wip_nav.strp(name);
|
||||
try diw.writeByte(@intFromEnum(@as(DW.CC, switch (func_type.cc) {
|
||||
.Unspecified, .C => .normal,
|
||||
.Naked, .Async, .Inline => .nocall,
|
||||
.Interrupt, .Signal => .nocall,
|
||||
.Stdcall => .BORLAND_stdcall,
|
||||
.Fastcall => .BORLAND_fastcall,
|
||||
.Vectorcall => .LLVM_vectorcall,
|
||||
.Thiscall => .BORLAND_thiscall,
|
||||
.APCS => .nocall,
|
||||
.AAPCS => .LLVM_AAPCS,
|
||||
.AAPCSVFP => .LLVM_AAPCS_VFP,
|
||||
.SysV => .LLVM_X86_64SysV,
|
||||
.Win64 => .LLVM_Win64,
|
||||
.Kernel, .Fragment, .Vertex => .nocall,
|
||||
})));
|
||||
const cc: DW.CC = cc: {
|
||||
if (zcu.getTarget().cCallingConvention()) |cc| {
|
||||
if (@as(std.builtin.CallingConvention.Tag, cc) == func_type.cc) {
|
||||
break :cc .normal;
|
||||
}
|
||||
}
|
||||
// For better or worse, we try to match what Clang emits.
|
||||
break :cc switch (func_type.cc) {
|
||||
.@"inline" => unreachable,
|
||||
.@"async", .auto, .naked => .normal,
|
||||
.x86_64_sysv => .LLVM_X86_64SysV,
|
||||
.x86_64_win => .LLVM_Win64,
|
||||
.x86_64_regcall_v3_sysv => .LLVM_X86RegCall,
|
||||
.x86_64_regcall_v4_win => .LLVM_X86RegCall,
|
||||
.x86_64_vectorcall => .LLVM_vectorcall,
|
||||
.x86_sysv => .nocall,
|
||||
.x86_win => .nocall,
|
||||
.x86_stdcall => .BORLAND_stdcall,
|
||||
.x86_fastcall => .BORLAND_msfastcall,
|
||||
.x86_thiscall => .BORLAND_thiscall,
|
||||
.x86_thiscall_mingw => .BORLAND_thiscall,
|
||||
.x86_regcall_v3 => .LLVM_X86RegCall,
|
||||
.x86_regcall_v4_win => .LLVM_X86RegCall,
|
||||
.x86_vectorcall => .LLVM_vectorcall,
|
||||
|
||||
.aarch64_aapcs => .LLVM_AAPCS,
|
||||
.aarch64_aapcs_darwin => .LLVM_AAPCS,
|
||||
.aarch64_aapcs_win => .LLVM_AAPCS,
|
||||
.aarch64_vfabi => .LLVM_AAPCS,
|
||||
.aarch64_vfabi_sve => .LLVM_AAPCS,
|
||||
|
||||
.arm_apcs => .nocall,
|
||||
.arm_aapcs => .LLVM_AAPCS,
|
||||
.arm_aapcs_vfp => .LLVM_AAPCS_VFP,
|
||||
.arm_aapcs16_vfp => .nocall,
|
||||
|
||||
.riscv64_lp64_v,
|
||||
.riscv32_ilp32_v,
|
||||
=> .LLVM_RISCVVectorCall,
|
||||
|
||||
.m68k_rtd => .LLVM_M68kRTD,
|
||||
|
||||
.amdgcn_kernel,
|
||||
.nvptx_kernel,
|
||||
.spirv_kernel,
|
||||
=> .LLVM_OpenCLKernel,
|
||||
|
||||
.x86_64_interrupt,
|
||||
.x86_interrupt,
|
||||
.arm_interrupt,
|
||||
.mips64_interrupt,
|
||||
.mips_interrupt,
|
||||
.riscv64_interrupt,
|
||||
.riscv32_interrupt,
|
||||
.avr_builtin,
|
||||
.avr_signal,
|
||||
.avr_interrupt,
|
||||
.csky_interrupt,
|
||||
.m68k_interrupt,
|
||||
=> .normal,
|
||||
|
||||
else => .nocall,
|
||||
};
|
||||
};
|
||||
try diw.writeByte(@intFromEnum(cc));
|
||||
try wip_nav.refType(Type.fromInterned(func_type.return_type));
|
||||
for (0..func_type.param_types.len) |param_index| {
|
||||
try wip_nav.abbrevCode(.func_type_param);
|
||||
|
||||
@ -165,10 +165,9 @@ pub fn updateExports(
|
||||
const target = zcu.getTarget();
|
||||
const spv_decl_index = try self.object.resolveNav(zcu, nav_index);
|
||||
const execution_model = switch (Type.fromInterned(nav_ty).fnCallingConvention(zcu)) {
|
||||
.Vertex => spec.ExecutionModel.Vertex,
|
||||
.Fragment => spec.ExecutionModel.Fragment,
|
||||
.Kernel => spec.ExecutionModel.Kernel,
|
||||
.C => return, // TODO: What to do here?
|
||||
.spirv_vertex => spec.ExecutionModel.Vertex,
|
||||
.spirv_fragment => spec.ExecutionModel.Fragment,
|
||||
.spirv_kernel => spec.ExecutionModel.Kernel,
|
||||
else => unreachable,
|
||||
};
|
||||
const is_vulkan = target.os.tag == .vulkan;
|
||||
|
||||
@ -567,7 +567,6 @@ const Writer = struct {
|
||||
.c_undef,
|
||||
.c_include,
|
||||
.set_float_mode,
|
||||
.set_align_stack,
|
||||
.wasm_memory_size,
|
||||
.int_from_error,
|
||||
.error_from_int,
|
||||
|
||||
@ -544,13 +544,13 @@ pub fn compilerRtIntAbbrev(bits: u16) []const u8 {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fnCallConvAllowsZigTypes(target: std.Target, cc: std.builtin.CallingConvention) bool {
|
||||
pub fn fnCallConvAllowsZigTypes(cc: std.builtin.CallingConvention) bool {
|
||||
return switch (cc) {
|
||||
.Unspecified, .Async, .Inline => true,
|
||||
.auto, .@"async", .@"inline" => true,
|
||||
// For now we want to authorize PTX kernel to use zig objects, even if
|
||||
// we end up exposing the ABI. The goal is to experiment with more
|
||||
// integrated CPU/GPU code.
|
||||
.Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64,
|
||||
.nvptx_kernel => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@ const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const math = std.math;
|
||||
const meta = std.meta;
|
||||
const CallingConvention = std.builtin.CallingConvention;
|
||||
const clang = @import("clang.zig");
|
||||
const aro = @import("aro");
|
||||
const CToken = aro.Tokenizer.Token;
|
||||
@ -5001,17 +5000,20 @@ fn transCC(
|
||||
c: *Context,
|
||||
fn_ty: *const clang.FunctionType,
|
||||
source_loc: clang.SourceLocation,
|
||||
) !CallingConvention {
|
||||
) !ast.Payload.Func.CallingConvention {
|
||||
const clang_cc = fn_ty.getCallConv();
|
||||
switch (clang_cc) {
|
||||
.C => return CallingConvention.C,
|
||||
.X86StdCall => return CallingConvention.Stdcall,
|
||||
.X86FastCall => return CallingConvention.Fastcall,
|
||||
.X86VectorCall, .AArch64VectorCall => return CallingConvention.Vectorcall,
|
||||
.X86ThisCall => return CallingConvention.Thiscall,
|
||||
.AAPCS => return CallingConvention.AAPCS,
|
||||
.AAPCS_VFP => return CallingConvention.AAPCSVFP,
|
||||
.X86_64SysV => return CallingConvention.SysV,
|
||||
return switch (clang_cc) {
|
||||
.C => .c,
|
||||
.X86_64SysV => .x86_64_sysv,
|
||||
.Win64 => .x86_64_win,
|
||||
.X86StdCall => .x86_stdcall,
|
||||
.X86FastCall => .x86_fastcall,
|
||||
.X86ThisCall => .x86_thiscall,
|
||||
.X86VectorCall => .x86_vectorcall,
|
||||
.AArch64VectorCall => .aarch64_vfabi,
|
||||
.AAPCS => .arm_aapcs,
|
||||
.AAPCS_VFP => .arm_aapcs_vfp,
|
||||
.M68kRTD => .m68k_rtd,
|
||||
else => return fail(
|
||||
c,
|
||||
error.UnsupportedType,
|
||||
@ -5019,7 +5021,7 @@ fn transCC(
|
||||
"unsupported calling convention: {s}",
|
||||
.{@tagName(clang_cc)},
|
||||
),
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn transFnProto(
|
||||
@ -5056,7 +5058,7 @@ fn finishTransFnProto(
|
||||
source_loc: clang.SourceLocation,
|
||||
fn_decl_context: ?FnDeclContext,
|
||||
is_var_args: bool,
|
||||
cc: CallingConvention,
|
||||
cc: ast.Payload.Func.CallingConvention,
|
||||
is_pub: bool,
|
||||
) !*ast.Payload.Func {
|
||||
const is_export = if (fn_decl_context) |ctx| ctx.is_export else false;
|
||||
@ -5104,7 +5106,7 @@ fn finishTransFnProto(
|
||||
|
||||
const alignment = if (fn_decl) |decl| ClangAlignment.forFunc(c, decl).zigAlignment() else null;
|
||||
|
||||
const explicit_callconv = if ((is_inline or is_export or is_extern) and cc == .C) null else cc;
|
||||
const explicit_callconv = if ((is_inline or is_export or is_extern) and cc == .c) null else cc;
|
||||
|
||||
const return_type_node = blk: {
|
||||
if (fn_ty.getNoReturnAttr()) {
|
||||
|
||||
60
stage1/zig.h
60
stage1/zig.h
@ -248,37 +248,55 @@ typedef char bool;
|
||||
|
||||
#if zig_has_builtin(trap)
|
||||
#define zig_trap() __builtin_trap()
|
||||
#elif _MSC_VER && (_M_IX86 || _M_X64)
|
||||
#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
|
||||
#define zig_trap() __ud2()
|
||||
#elif _MSC_VER
|
||||
#define zig_trap() __fastfail(0)
|
||||
#elif defined(__i386__) || defined(__x86_64__)
|
||||
#define zig_trap() __asm__ volatile("ud2");
|
||||
#elif defined(_MSC_VER)
|
||||
#define zig_trap() __fastfail(7)
|
||||
#elif defined(__thumb__)
|
||||
#define zig_trap() __asm__ volatile("udf #0xfe")
|
||||
#elif defined(__arm__) || defined(__aarch64__)
|
||||
#define zig_trap() __asm__ volatile("udf #0");
|
||||
#define zig_trap() __asm__ volatile("udf #0xfdee")
|
||||
#elif defined(__loongarch__) || defined(__powerpc__)
|
||||
#define zig_trap() __asm__ volatile(".word 0x0")
|
||||
#elif defined(__mips__)
|
||||
#define zig_trap() __asm__ volatile(".word 0x3d")
|
||||
#elif defined(__riscv)
|
||||
#define zig_trap() __asm__ volatile("unimp")
|
||||
#elif defined(__s390__)
|
||||
#define zig_trap() __asm__ volatile("j 0x2")
|
||||
#elif defined(__sparc__)
|
||||
#define zig_trap() __asm__ volatile("illtrap")
|
||||
#elif defined(__i386__) || defined(__x86_64__)
|
||||
#define zig_trap() __asm__ volatile("ud2")
|
||||
#else
|
||||
#include <stdlib.h>
|
||||
#define zig_trap() abort()
|
||||
#define zig_trap() zig_trap_unavailable
|
||||
#endif
|
||||
|
||||
#if zig_has_builtin(debugtrap)
|
||||
#define zig_breakpoint() __builtin_debugtrap()
|
||||
#elif defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__)
|
||||
#define zig_breakpoint() __debugbreak()
|
||||
#elif defined(__i386__) || defined(__x86_64__)
|
||||
#define zig_breakpoint() __asm__ volatile("int $0x03");
|
||||
#elif defined(__arm__)
|
||||
#define zig_breakpoint() __asm__ volatile("bkpt #0");
|
||||
#define zig_breakpoint() __asm__ volatile("bkpt #0x0")
|
||||
#elif defined(__aarch64__)
|
||||
#define zig_breakpoint() __asm__ volatile("brk #0");
|
||||
#else
|
||||
#include <signal.h>
|
||||
#if defined(SIGTRAP)
|
||||
#define zig_breakpoint() raise(SIGTRAP)
|
||||
#define zig_breakpoint() __asm__ volatile("brk #0xf000")
|
||||
#elif defined(__loongarch__)
|
||||
#define zig_breakpoint() __asm__ volatile("break 0x0")
|
||||
#elif defined(__mips__)
|
||||
#define zig_breakpoint() __asm__ volatile("break")
|
||||
#elif defined(__powerpc__)
|
||||
#define zig_breakpoint() __asm__ volatile("trap")
|
||||
#elif defined(__riscv)
|
||||
#define zig_breakpoint() __asm__ volatile("ebreak")
|
||||
#elif defined(__s390__)
|
||||
#define zig_breakpoint() __asm__ volatile("j 0x6")
|
||||
#elif defined(__sparc__)
|
||||
#define zig_breakpoint() __asm__ volatile("ta 0x1")
|
||||
#elif defined(__i386__) || defined(__x86_64__)
|
||||
#define zig_breakpoint() __asm__ volatile("int $0x3")
|
||||
#else
|
||||
#define zig_breakpoint() zig_breakpoint_unavailable
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if zig_has_builtin(return_address) || defined(zig_gnuc)
|
||||
#define zig_return_address() __builtin_extract_return_addr(__builtin_return_address(0))
|
||||
@ -3592,7 +3610,6 @@ typedef enum memory_order zig_memory_order;
|
||||
#define zig_atomicrmw_add_float zig_atomicrmw_add
|
||||
#undef zig_atomicrmw_sub_float
|
||||
#define zig_atomicrmw_sub_float zig_atomicrmw_sub
|
||||
#define zig_fence(order) atomic_thread_fence(order)
|
||||
#elif defined(__GNUC__)
|
||||
typedef int zig_memory_order;
|
||||
#define zig_memory_order_relaxed __ATOMIC_RELAXED
|
||||
@ -3616,7 +3633,6 @@ typedef int zig_memory_order;
|
||||
#define zig_atomic_load(res, obj, order, Type, ReprType) __atomic_load (obj, &(res), order)
|
||||
#undef zig_atomicrmw_xchg_float
|
||||
#define zig_atomicrmw_xchg_float zig_atomicrmw_xchg
|
||||
#define zig_fence(order) __atomic_thread_fence(order)
|
||||
#elif _MSC_VER && (_M_IX86 || _M_X64)
|
||||
#define zig_memory_order_relaxed 0
|
||||
#define zig_memory_order_acquire 2
|
||||
@ -3637,11 +3653,6 @@ typedef int zig_memory_order;
|
||||
#define zig_atomicrmw_max(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_max_ ##Type(obj, arg)
|
||||
#define zig_atomic_store( obj, arg, order, Type, ReprType) zig_msvc_atomic_store_ ##Type(obj, arg)
|
||||
#define zig_atomic_load(res, obj, order, Type, ReprType) res = zig_msvc_atomic_load_ ##order##_##Type(obj)
|
||||
#if _M_X64
|
||||
#define zig_fence(order) __faststorefence()
|
||||
#else
|
||||
#define zig_fence(order) zig_msvc_atomic_barrier()
|
||||
#endif
|
||||
/* TODO: _MSC_VER && (_M_ARM || _M_ARM64) */
|
||||
#else
|
||||
#define zig_memory_order_relaxed 0
|
||||
@ -3663,7 +3674,6 @@ typedef int zig_memory_order;
|
||||
#define zig_atomicrmw_max(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable
|
||||
#define zig_atomic_store( obj, arg, order, Type, ReprType) zig_atomics_unavailable
|
||||
#define zig_atomic_load(res, obj, order, Type, ReprType) zig_atomics_unavailable
|
||||
#define zig_fence(order) zig_fence_unavailable
|
||||
#endif
|
||||
|
||||
#if _MSC_VER && (_M_IX86 || _M_X64)
|
||||
|
||||
BIN
stage1/zig1.wasm
BIN
stage1/zig1.wasm
Binary file not shown.
@ -210,15 +210,6 @@ test "alignment and size of structs with 128-bit fields" {
|
||||
}
|
||||
}
|
||||
|
||||
test "alignstack" {
|
||||
try expect(fnWithAlignedStack() == 1234);
|
||||
}
|
||||
|
||||
fn fnWithAlignedStack() i32 {
|
||||
@setAlignStack(256);
|
||||
return 1234;
|
||||
}
|
||||
|
||||
test "implicitly decreasing slice alignment" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
@ -20,7 +20,6 @@ test {
|
||||
try testing.expectEqual({}, @memset(@as([*]u8, @ptrFromInt(1))[0..0], undefined));
|
||||
try testing.expectEqual(noreturn, @TypeOf(if (true) @panic("") else {}));
|
||||
try testing.expectEqual({}, @prefetch(&val, .{}));
|
||||
try testing.expectEqual({}, @setAlignStack(16));
|
||||
try testing.expectEqual({}, @setEvalBranchQuota(0));
|
||||
try testing.expectEqual({}, @setFloatMode(.optimized));
|
||||
try testing.expectEqual({}, @setRuntimeSafety(true));
|
||||
|
||||
@ -350,6 +350,7 @@ fn testOpaque() !void {
|
||||
|
||||
test "type info: function type info" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testFunction();
|
||||
try comptime testFunction();
|
||||
@ -358,7 +359,7 @@ test "type info: function type info" {
|
||||
fn testFunction() !void {
|
||||
const foo_fn_type = @TypeOf(typeInfoFoo);
|
||||
const foo_fn_info = @typeInfo(foo_fn_type);
|
||||
try expect(foo_fn_info.@"fn".calling_convention == .C);
|
||||
try expect(foo_fn_info.@"fn".calling_convention.eql(.c));
|
||||
try expect(!foo_fn_info.@"fn".is_generic);
|
||||
try expect(foo_fn_info.@"fn".params.len == 2);
|
||||
try expect(foo_fn_info.@"fn".is_var_args);
|
||||
@ -374,7 +375,7 @@ fn testFunction() !void {
|
||||
|
||||
const aligned_foo_fn_type = @TypeOf(typeInfoFooAligned);
|
||||
const aligned_foo_fn_info = @typeInfo(aligned_foo_fn_type);
|
||||
try expect(aligned_foo_fn_info.@"fn".calling_convention == .C);
|
||||
try expect(aligned_foo_fn_info.@"fn".calling_convention.eql(.c));
|
||||
try expect(!aligned_foo_fn_info.@"fn".is_generic);
|
||||
try expect(aligned_foo_fn_info.@"fn".params.len == 2);
|
||||
try expect(aligned_foo_fn_info.@"fn".is_var_args);
|
||||
@ -390,8 +391,8 @@ fn testFunction() !void {
|
||||
try expect(aligned_foo_ptr_fn_info.pointer.sentinel == null);
|
||||
}
|
||||
|
||||
extern fn typeInfoFoo(a: usize, b: bool, ...) callconv(.C) usize;
|
||||
extern fn typeInfoFooAligned(a: usize, b: bool, ...) align(4) callconv(.C) usize;
|
||||
extern fn typeInfoFoo(a: usize, b: bool, ...) callconv(.c) usize;
|
||||
extern fn typeInfoFooAligned(a: usize, b: bool, ...) align(4) callconv(.c) usize;
|
||||
|
||||
test "type info: generic function types" {
|
||||
const G1 = @typeInfo(@TypeOf(generic1));
|
||||
|
||||
@ -79,9 +79,9 @@ test "basic" {
|
||||
try expectEqualStrings("fn (comptime u32) void", @typeName(fn (comptime u32) void));
|
||||
try expectEqualStrings("fn (noalias []u8) void", @typeName(fn (noalias []u8) void));
|
||||
|
||||
try expectEqualStrings("fn () callconv(.C) void", @typeName(fn () callconv(.C) void));
|
||||
try expectEqualStrings("fn (...) callconv(.C) void", @typeName(fn (...) callconv(.C) void));
|
||||
try expectEqualStrings("fn (u32, ...) callconv(.C) void", @typeName(fn (u32, ...) callconv(.C) void));
|
||||
try expectEqualStrings("fn () callconv(.c) void", @typeName(fn () callconv(.c) void));
|
||||
try expectEqualStrings("fn (...) callconv(.c) void", @typeName(fn (...) callconv(.c) void));
|
||||
try expectEqualStrings("fn (u32, ...) callconv(.c) void", @typeName(fn (u32, ...) callconv(.c) void));
|
||||
}
|
||||
|
||||
test "top level decl" {
|
||||
|
||||
@ -7,10 +7,9 @@ export fn zig_return_array() [10]u8 {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :1:21: error: parameter of type '[10]u8' not allowed in function with calling convention 'C'
|
||||
// :1:21: error: parameter of type '[10]u8' not allowed in function with calling convention 'x86_64_sysv'
|
||||
// :1:21: note: arrays are not allowed as a parameter type
|
||||
// :5:30: error: return type '[10]u8' not allowed in function with calling convention 'C'
|
||||
// :5:30: error: return type '[10]u8' not allowed in function with calling convention 'x86_64_sysv'
|
||||
// :5:30: note: arrays are not allowed as a return type
|
||||
|
||||
@ -8,5 +8,5 @@ inline fn b() void {}
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :2:9: error: variable of type '*const fn () callconv(.Inline) void' must be const or comptime
|
||||
// :2:9: error: variable of type '*const fn () callconv(.@"inline") void' must be const or comptime
|
||||
// :2:9: note: function has inline calling convention
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
const Foo = packed struct(u32) {
|
||||
x: u1,
|
||||
};
|
||||
fn bar(_: Foo) callconv(.C) void {}
|
||||
fn bar(_: Foo) callconv(.c) void {}
|
||||
pub export fn entry() void {
|
||||
bar(.{ .x = 0 });
|
||||
}
|
||||
|
||||
@ -3,9 +3,8 @@ export fn entry2() callconv(.AAPCS) void {}
|
||||
export fn entry3() callconv(.AAPCSVFP) void {}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=x86_64-linux-none
|
||||
//
|
||||
// :1:30: error: callconv 'APCS' is only available on ARM, not x86_64
|
||||
// :2:30: error: callconv 'AAPCS' is only available on ARM, not x86_64
|
||||
// :3:30: error: callconv 'AAPCSVFP' is only available on ARM, not x86_64
|
||||
// :1:30: error: calling convention 'arm_apcs' only available on architectures 'arm', 'armeb', 'thumb', 'thumbeb'
|
||||
// :2:30: error: calling convention 'arm_aapcs' only available on architectures 'arm', 'armeb', 'thumb', 'thumbeb'
|
||||
// :3:30: error: calling convention 'arm_aapcs_vfp' only available on architectures 'arm', 'armeb', 'thumb', 'thumbeb'
|
||||
|
||||
@ -4,4 +4,4 @@ export fn entry() callconv(.Interrupt) void {}
|
||||
// backend=stage2
|
||||
// target=aarch64-linux-none
|
||||
//
|
||||
// :1:29: error: callconv 'Interrupt' is only available on x86, x86_64, AVR, and MSP430, not aarch64
|
||||
// :1:29: error: calling convention 'Interrupt' is only available on x86, x86_64, AVR, and MSP430, not aarch64
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
export fn entry() callconv(.Signal) void {}
|
||||
export fn entry() callconv(.avr_signal) void {}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=x86_64-linux-none
|
||||
//
|
||||
// :1:29: error: callconv 'Signal' is only available on AVR, not x86_64
|
||||
// :1:29: error: calling convention 'avr_signal' only available on architectures 'avr'
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
const F1 = fn () callconv(.Stdcall) void;
|
||||
const F2 = fn () callconv(.Fastcall) void;
|
||||
const F3 = fn () callconv(.Thiscall) void;
|
||||
const F1 = fn () callconv(.{ .x86_stdcall = .{} }) void;
|
||||
const F2 = fn () callconv(.{ .x86_fastcall = .{} }) void;
|
||||
const F3 = fn () callconv(.{ .x86_thiscall = .{} }) void;
|
||||
export fn entry1() void {
|
||||
const a: F1 = undefined;
|
||||
_ = a;
|
||||
@ -18,6 +18,6 @@ export fn entry3() void {
|
||||
// backend=stage2
|
||||
// target=x86_64-linux-none
|
||||
//
|
||||
// :1:28: error: callconv 'Stdcall' is only available on x86, not x86_64
|
||||
// :2:28: error: callconv 'Fastcall' is only available on x86, not x86_64
|
||||
// :3:28: error: callconv 'Thiscall' is only available on x86, not x86_64
|
||||
// :1:28: error: calling convention 'x86_stdcall' only available on architectures 'x86'
|
||||
// :2:28: error: calling convention 'x86_fastcall' only available on architectures 'x86'
|
||||
// :3:28: error: calling convention 'x86_thiscall' only available on architectures 'x86'
|
||||
|
||||
@ -1,7 +0,0 @@
|
||||
export fn entry() callconv(.Vectorcall) void {}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=x86_64-linux-none
|
||||
//
|
||||
// :1:29: error: callconv 'Vectorcall' is only available on x86 and AArch64, not x86_64
|
||||
@ -4,7 +4,7 @@ pub inline fn requestAdapter(
|
||||
comptime callbackArg: fn () callconv(.Inline) void,
|
||||
) void {
|
||||
_ = &(struct {
|
||||
pub fn callback() callconv(.C) void {
|
||||
pub fn callback() callconv(.c) void {
|
||||
callbackArg();
|
||||
}
|
||||
}).callback;
|
||||
|
||||
@ -3,7 +3,6 @@ export fn foo(comptime x: anytype, y: i32) i32 {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :1:15: error: comptime parameters not allowed in function with calling convention 'C'
|
||||
// :1:15: error: comptime parameters not allowed in function with calling convention 'x86_64_sysv'
|
||||
|
||||
@ -4,7 +4,6 @@ export fn foo(num: anytype) i32 {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :1:15: error: generic parameters not allowed in function with calling convention 'C'
|
||||
// :1:15: error: generic parameters not allowed in function with calling convention 'x86_64_sysv'
|
||||
|
||||
@ -14,8 +14,7 @@ export fn entry() usize {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :1:38: error: expected type 'fn (i32) i32', found 'fn (i32) callconv(.C) i32'
|
||||
// :1:38: note: calling convention 'C' cannot cast into calling convention 'Unspecified'
|
||||
// :1:38: error: expected type 'fn (i32) i32', found 'fn (i32) callconv(.c) i32'
|
||||
// :1:38: note: calling convention 'x86_64_sysv' cannot cast into calling convention 'auto'
|
||||
|
||||
@ -15,9 +15,8 @@ comptime {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :1:15: error: comptime parameters not allowed in function with calling convention 'C'
|
||||
// :5:30: error: comptime parameters not allowed in function with calling convention 'C'
|
||||
// :6:30: error: generic parameters not allowed in function with calling convention 'C'
|
||||
// :1:15: error: comptime parameters not allowed in function with calling convention 'x86_64_sysv'
|
||||
// :5:30: error: comptime parameters not allowed in function with calling convention 'x86_64_sysv'
|
||||
// :6:30: error: generic parameters not allowed in function with calling convention 'x86_64_sysv'
|
||||
|
||||
@ -1,7 +1,3 @@
|
||||
comptime {
|
||||
@setAlignStack(1);
|
||||
}
|
||||
|
||||
comptime {
|
||||
@branchHint(.cold);
|
||||
}
|
||||
@ -54,16 +50,15 @@ comptime {
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :2:5: error: '@setAlignStack' outside function scope
|
||||
// :6:5: error: '@branchHint' outside function scope
|
||||
// :10:5: error: '@src' outside function scope
|
||||
// :14:5: error: '@returnAddress' outside function scope
|
||||
// :18:5: error: '@frameAddress' outside function scope
|
||||
// :22:5: error: '@breakpoint' outside function scope
|
||||
// :26:5: error: '@cVaArg' outside function scope
|
||||
// :30:5: error: '@cVaCopy' outside function scope
|
||||
// :34:5: error: '@cVaEnd' outside function scope
|
||||
// :38:5: error: '@cVaStart' outside function scope
|
||||
// :42:5: error: '@workItemId' outside function scope
|
||||
// :46:5: error: '@workGroupSize' outside function scope
|
||||
// :50:5: error: '@workGroupId' outside function scope
|
||||
// :2:5: error: '@branchHint' outside function scope
|
||||
// :6:5: error: '@src' outside function scope
|
||||
// :10:5: error: '@returnAddress' outside function scope
|
||||
// :14:5: error: '@frameAddress' outside function scope
|
||||
// :18:5: error: '@breakpoint' outside function scope
|
||||
// :22:5: error: '@cVaArg' outside function scope
|
||||
// :26:5: error: '@cVaCopy' outside function scope
|
||||
// :30:5: error: '@cVaEnd' outside function scope
|
||||
// :34:5: error: '@cVaStart' outside function scope
|
||||
// :38:5: error: '@workItemId' outside function scope
|
||||
// :42:5: error: '@workGroupSize' outside function scope
|
||||
// :46:5: error: '@workGroupId' outside function scope
|
||||
|
||||
@ -4,10 +4,9 @@ export fn entry(foo: Foo) void {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :2:17: error: parameter of type 'tmp.Foo' not allowed in function with calling convention 'C'
|
||||
// :2:17: error: parameter of type 'tmp.Foo' not allowed in function with calling convention 'x86_64_sysv'
|
||||
// :2:17: note: enum tag type 'u2' is not extern compatible
|
||||
// :2:17: note: only integers with 0, 8, 16, 32, 64 and 128 bits are extern compatible
|
||||
// :1:13: note: enum declared here
|
||||
|
||||
@ -8,9 +8,8 @@ export fn entry(foo: Foo) void {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :6:17: error: parameter of type 'tmp.Foo' not allowed in function with calling convention 'C'
|
||||
// :6:17: error: parameter of type 'tmp.Foo' not allowed in function with calling convention 'x86_64_sysv'
|
||||
// :6:17: note: only extern structs and ABI sized packed structs are extern compatible
|
||||
// :1:13: note: struct declared here
|
||||
|
||||
@ -8,9 +8,8 @@ export fn entry(foo: Foo) void {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :6:17: error: parameter of type 'tmp.Foo' not allowed in function with calling convention 'C'
|
||||
// :6:17: error: parameter of type 'tmp.Foo' not allowed in function with calling convention 'x86_64_sysv'
|
||||
// :6:17: note: only extern unions and ABI sized packed unions are extern compatible
|
||||
// :1:13: note: union declared here
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
const x = @extern(*const fn () callconv(.C) void, .{ .name = "foo" });
|
||||
const x = @extern(*const fn () callconv(.c) void, .{ .name = "foo" });
|
||||
|
||||
export fn entry0() void {
|
||||
comptime x();
|
||||
|
||||
@ -9,12 +9,11 @@ export fn signal_param(_: u32) callconv(.Signal) void {}
|
||||
export fn signal_ret() callconv(.Signal) noreturn {}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :1:28: error: first parameter of function with 'Interrupt' calling convention must be a pointer type
|
||||
// :2:43: error: second parameter of function with 'Interrupt' calling convention must be a 64-bit integer
|
||||
// :3:51: error: 'Interrupt' calling convention supports up to 2 parameters, found 3
|
||||
// :4:69: error: function with calling convention 'Interrupt' must return 'void' or 'noreturn'
|
||||
// :8:24: error: parameters are not allowed with 'Signal' calling convention
|
||||
// :9:34: error: callconv 'Signal' is only available on AVR, not x86_64
|
||||
//
|
||||
// :1:28: error: first parameter of function with 'x86_64_interrupt' calling convention must be a pointer type
|
||||
// :2:43: error: second parameter of function with 'x86_64_interrupt' calling convention must be a 64-bit integer
|
||||
// :3:51: error: 'x86_64_interrupt' calling convention supports up to 2 parameters, found 3
|
||||
// :4:69: error: function with calling convention 'x86_64_interrupt' must return 'void' or 'noreturn'
|
||||
// :8:24: error: parameters are not allowed with 'avr_signal' calling convention
|
||||
// :9:34: error: calling convention 'avr_signal' only available on architectures 'avr'
|
||||
|
||||
@ -9,4 +9,4 @@ pub export fn entry() void {
|
||||
// backend=llvm
|
||||
// target=native
|
||||
//
|
||||
// :5:5: error: unable to perform tail call: type of function being called 'fn (usize) void' does not match type of calling function 'fn () callconv(.C) void'
|
||||
// :5:5: error: unable to perform tail call: type of function being called 'fn (usize) void' does not match type of calling function 'fn () callconv(.c) void'
|
||||
|
||||
@ -13,11 +13,10 @@ comptime {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :1:1: error: variadic function does not support '.Unspecified' calling convention
|
||||
// :1:1: note: supported calling conventions: '.C'
|
||||
// :1:1: error: variadic function does not support '.Inline' calling convention
|
||||
// :1:1: note: supported calling conventions: '.C'
|
||||
// :1:1: error: variadic function does not support 'auto' calling convention
|
||||
// :1:1: note: supported calling conventions: 'x86_64_sysv', 'x86_64_win'
|
||||
// :1:1: error: variadic function does not support 'inline' calling convention
|
||||
// :1:1: note: supported calling conventions: 'x86_64_sysv', 'x86_64_win'
|
||||
// :2:1: error: generic function cannot be variadic
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
const cc = .Inline;
|
||||
noinline fn foo() callconv(cc) void {}
|
||||
noinline fn foo() callconv(.@"inline") void {}
|
||||
|
||||
comptime {
|
||||
_ = foo;
|
||||
@ -9,4 +8,4 @@ comptime {
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :2:28: error: 'noinline' function cannot have callconv 'Inline'
|
||||
// :1:29: error: 'noinline' function cannot have calling convention 'inline'
|
||||
|
||||
@ -1,20 +1,20 @@
|
||||
const S = extern struct {
|
||||
a: fn () callconv(.C) void,
|
||||
a: fn () callconv(.c) void,
|
||||
};
|
||||
comptime {
|
||||
_ = @sizeOf(S) == 1;
|
||||
}
|
||||
comptime {
|
||||
_ = [*c][4]fn () callconv(.C) void;
|
||||
_ = [*c][4]fn () callconv(.c) void;
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :2:8: error: extern structs cannot contain fields of type 'fn () callconv(.C) void'
|
||||
// :2:8: error: extern structs cannot contain fields of type 'fn () callconv(.c) void'
|
||||
// :2:8: note: type has no guaranteed in-memory representation
|
||||
// :2:8: note: use '*const ' to make a function pointer type
|
||||
// :8:13: error: C pointers cannot point to non-C-ABI-compatible type '[4]fn () callconv(.C) void'
|
||||
// :8:13: error: C pointers cannot point to non-C-ABI-compatible type '[4]fn () callconv(.c) void'
|
||||
// :8:13: note: type has no guaranteed in-memory representation
|
||||
// :8:13: note: use '*const ' to make a function pointer type
|
||||
|
||||
@ -12,8 +12,7 @@ comptime {
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :1:13: error: variadic function does not support '.Unspecified' calling convention
|
||||
// :1:13: note: supported calling conventions: '.C'
|
||||
// :1:13: error: variadic function does not support 'auto' calling convention
|
||||
// :1:13: note: supported calling conventions: 'x86_64_sysv', 'x86_64_win'
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
const GuSettings = struct {
|
||||
fin: ?fn (c_int) callconv(.C) void,
|
||||
fin: ?fn (c_int) callconv(.c) void,
|
||||
};
|
||||
pub export fn callbackFin(id: c_int, arg: ?*anyopaque) void {
|
||||
const settings: ?*GuSettings = @as(?*GuSettings, @ptrFromInt(@intFromPtr(arg)));
|
||||
@ -13,4 +13,4 @@ pub export fn callbackFin(id: c_int, arg: ?*anyopaque) void {
|
||||
//
|
||||
// :5:54: error: pointer to comptime-only type '?*tmp.GuSettings' must be comptime-known, but operand is runtime-known
|
||||
// :2:10: note: struct requires comptime because of this field
|
||||
// :2:10: note: use '*const fn (c_int) callconv(.C) void' for a function pointer type
|
||||
// :2:10: note: use '*const fn (c_int) callconv(.c) void' for a function pointer type
|
||||
|
||||
@ -1,9 +0,0 @@
|
||||
export fn entry() callconv(.Naked) void {
|
||||
@setAlignStack(16);
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :2:5: error: @setAlignStack in naked function
|
||||
@ -1,9 +0,0 @@
|
||||
export fn entry() void {
|
||||
@setAlignStack(511 + 1);
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :2:5: error: attempt to @setAlignStack(512); maximum is 256
|
||||
@ -1,11 +1,10 @@
|
||||
extern fn Text(str: []const u8, num: i32) callconv(.C) void;
|
||||
extern fn Text(str: []const u8, num: i32) callconv(.c) void;
|
||||
export fn entry() void {
|
||||
_ = Text;
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :1:16: error: parameter of type '[]const u8' not allowed in function with calling convention 'C'
|
||||
// :1:16: error: parameter of type '[]const u8' not allowed in function with calling convention 'x86_64_sysv'
|
||||
// :1:16: note: slices have no guaranteed in-memory representation
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
const fn_ty = ?fn ([*c]u8, ...) callconv(.C) void;
|
||||
const fn_ty = ?fn ([*c]u8, ...) callconv(.c) void;
|
||||
extern fn fn_decl(fmt: [*:0]u8, ...) void;
|
||||
|
||||
export fn main() void {
|
||||
@ -10,6 +10,6 @@ export fn main() void {
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :5:22: error: expected type '?fn ([*c]u8, ...) callconv(.C) void', found 'fn ([*:0]u8, ...) callconv(.C) void'
|
||||
// :5:22: error: expected type '?fn ([*c]u8, ...) callconv(.c) void', found 'fn ([*:0]u8, ...) callconv(.c) void'
|
||||
// :5:22: note: parameter 0 '[*:0]u8' cannot cast into '[*c]u8'
|
||||
// :5:22: note: '[*c]u8' could have null values which are illegal in type '[*:0]u8'
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
fn entry() callconv(.C) void {}
|
||||
fn entry() callconv(.c) void {}
|
||||
comptime {
|
||||
@export(&entry, .{ .name = "entry", .linkage = @as(u32, 1234) });
|
||||
}
|
||||
|
||||
@ -9,7 +9,7 @@ static inline void foo() {
|
||||
// c_frontend=clang
|
||||
//
|
||||
// pub const struct_empty_struct = extern struct {};
|
||||
// pub fn foo() callconv(.C) void {
|
||||
// pub fn foo() callconv(.c) void {
|
||||
// const bar = struct {
|
||||
// var static: struct_empty_struct = @import("std").mem.zeroes(struct_empty_struct);
|
||||
// };
|
||||
|
||||
@ -484,11 +484,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\ fnptr_attr_ty qux;
|
||||
\\};
|
||||
, &[_][]const u8{
|
||||
\\pub const fnptr_ty = ?*const fn () callconv(.C) void;
|
||||
\\pub const fnptr_attr_ty = ?*const fn () callconv(.C) void;
|
||||
\\pub const fnptr_ty = ?*const fn () callconv(.c) void;
|
||||
\\pub const fnptr_attr_ty = ?*const fn () callconv(.c) void;
|
||||
\\pub const struct_foo = extern struct {
|
||||
\\ foo: ?*const fn () callconv(.C) void = @import("std").mem.zeroes(?*const fn () callconv(.C) void),
|
||||
\\ bar: ?*const fn () callconv(.C) void = @import("std").mem.zeroes(?*const fn () callconv(.C) void),
|
||||
\\ foo: ?*const fn () callconv(.c) void = @import("std").mem.zeroes(?*const fn () callconv(.c) void),
|
||||
\\ bar: ?*const fn () callconv(.c) void = @import("std").mem.zeroes(?*const fn () callconv(.c) void),
|
||||
\\ baz: fnptr_ty = @import("std").mem.zeroes(fnptr_ty),
|
||||
\\ qux: fnptr_attr_ty = @import("std").mem.zeroes(fnptr_attr_ty),
|
||||
\\};
|
||||
@ -735,7 +735,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\static void bar(void) {}
|
||||
, &[_][]const u8{
|
||||
\\pub export fn foo() void {}
|
||||
\\pub fn bar() callconv(.C) void {}
|
||||
\\pub fn bar() callconv(.c) void {}
|
||||
});
|
||||
|
||||
cases.add("typedef void",
|
||||
@ -769,7 +769,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\pub export fn bar() void {
|
||||
\\ var func_ptr: ?*anyopaque = @as(?*anyopaque, @ptrCast(&foo));
|
||||
\\ _ = &func_ptr;
|
||||
\\ var typed_func_ptr: ?*const fn () callconv(.C) void = @as(?*const fn () callconv(.C) void, @ptrFromInt(@as(c_ulong, @intCast(@intFromPtr(func_ptr)))));
|
||||
\\ var typed_func_ptr: ?*const fn () callconv(.c) void = @as(?*const fn () callconv(.c) void, @ptrFromInt(@as(c_ulong, @intCast(@intFromPtr(func_ptr)))));
|
||||
\\ _ = &typed_func_ptr;
|
||||
\\}
|
||||
});
|
||||
@ -839,9 +839,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\ lws_callback_function *callback_http;
|
||||
\\};
|
||||
, &[_][]const u8{
|
||||
\\pub const lws_callback_function = fn () callconv(.C) void;
|
||||
\\pub const lws_callback_function = fn () callconv(.c) void;
|
||||
\\pub const struct_Foo = extern struct {
|
||||
\\ func: ?*const fn () callconv(.C) void = @import("std").mem.zeroes(?*const fn () callconv(.C) void),
|
||||
\\ func: ?*const fn () callconv(.c) void = @import("std").mem.zeroes(?*const fn () callconv(.c) void),
|
||||
\\ callback_http: ?*const lws_callback_function = @import("std").mem.zeroes(?*const lws_callback_function),
|
||||
\\};
|
||||
});
|
||||
@ -867,7 +867,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\};
|
||||
, &[_][]const u8{
|
||||
\\pub const struct_Foo = extern struct {
|
||||
\\ derp: ?*const fn ([*c]struct_Foo) callconv(.C) void = @import("std").mem.zeroes(?*const fn ([*c]struct_Foo) callconv(.C) void),
|
||||
\\ derp: ?*const fn ([*c]struct_Foo) callconv(.c) void = @import("std").mem.zeroes(?*const fn ([*c]struct_Foo) callconv(.c) void),
|
||||
\\};
|
||||
,
|
||||
\\pub const Foo = struct_Foo;
|
||||
@ -1111,7 +1111,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
cases.add("__cdecl doesn't mess up function pointers",
|
||||
\\void foo(void (__cdecl *fn_ptr)(void));
|
||||
, &[_][]const u8{
|
||||
\\pub extern fn foo(fn_ptr: ?*const fn () callconv(.C) void) void;
|
||||
\\pub extern fn foo(fn_ptr: ?*const fn () callconv(.c) void) void;
|
||||
});
|
||||
|
||||
cases.add("void cast",
|
||||
@ -1477,8 +1477,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\typedef void (*fn0)();
|
||||
\\typedef void (*fn1)(char);
|
||||
, &[_][]const u8{
|
||||
\\pub const fn0 = ?*const fn (...) callconv(.C) void;
|
||||
\\pub const fn1 = ?*const fn (u8) callconv(.C) void;
|
||||
\\pub const fn0 = ?*const fn (...) callconv(.c) void;
|
||||
\\pub const fn1 = ?*const fn (u8) callconv(.c) void;
|
||||
});
|
||||
|
||||
cases.addWithTarget("Calling convention", .{
|
||||
@ -1492,11 +1492,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\void __attribute__((cdecl)) foo4(float *a);
|
||||
\\void __attribute__((thiscall)) foo5(float *a);
|
||||
, &[_][]const u8{
|
||||
\\pub extern fn foo1(a: [*c]f32) callconv(.Fastcall) void;
|
||||
\\pub extern fn foo2(a: [*c]f32) callconv(.Stdcall) void;
|
||||
\\pub extern fn foo3(a: [*c]f32) callconv(.Vectorcall) void;
|
||||
\\pub extern fn foo1(a: [*c]f32) callconv(.{ .x86_fastcall = .{} }) void;
|
||||
\\pub extern fn foo2(a: [*c]f32) callconv(.{ .x86_stdcall = .{} }) void;
|
||||
\\pub extern fn foo3(a: [*c]f32) callconv(.{ .x86_vectorcall = .{} }) void;
|
||||
\\pub extern fn foo4(a: [*c]f32) void;
|
||||
\\pub extern fn foo5(a: [*c]f32) callconv(.Thiscall) void;
|
||||
\\pub extern fn foo5(a: [*c]f32) callconv(.{ .x86_thiscall = .{} }) void;
|
||||
});
|
||||
|
||||
cases.addWithTarget("Calling convention", std.Target.Query.parse(.{
|
||||
@ -1506,8 +1506,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\void __attribute__((pcs("aapcs"))) foo1(float *a);
|
||||
\\void __attribute__((pcs("aapcs-vfp"))) foo2(float *a);
|
||||
, &[_][]const u8{
|
||||
\\pub extern fn foo1(a: [*c]f32) callconv(.AAPCS) void;
|
||||
\\pub extern fn foo2(a: [*c]f32) callconv(.AAPCSVFP) void;
|
||||
\\pub extern fn foo1(a: [*c]f32) callconv(.{ .arm_aapcs = .{} }) void;
|
||||
\\pub extern fn foo2(a: [*c]f32) callconv(.{ .arm_aapcs_vfp = .{} }) void;
|
||||
});
|
||||
|
||||
cases.addWithTarget("Calling convention", std.Target.Query.parse(.{
|
||||
@ -1516,7 +1516,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
}) catch unreachable,
|
||||
\\void __attribute__((aarch64_vector_pcs)) foo1(float *a);
|
||||
, &[_][]const u8{
|
||||
\\pub extern fn foo1(a: [*c]f32) callconv(.Vectorcall) void;
|
||||
\\pub extern fn foo1(a: [*c]f32) callconv(.{ .aarch64_vfabi = .{} }) void;
|
||||
});
|
||||
|
||||
cases.add("Parameterless function prototypes",
|
||||
@ -1533,8 +1533,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\pub export fn b() void {}
|
||||
\\pub extern fn c(...) void;
|
||||
\\pub extern fn d() void;
|
||||
\\pub fn e() callconv(.C) void {}
|
||||
\\pub fn f() callconv(.C) void {}
|
||||
\\pub fn e() callconv(.c) void {}
|
||||
\\pub fn f() callconv(.c) void {}
|
||||
\\pub extern fn g() void;
|
||||
\\pub extern fn h() void;
|
||||
});
|
||||
@ -1555,7 +1555,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\ char *arr1[10] ={0};
|
||||
\\}
|
||||
, &[_][]const u8{
|
||||
\\pub fn foo() callconv(.C) void {
|
||||
\\pub fn foo() callconv(.c) void {
|
||||
\\ var arr: [10]u8 = [1]u8{
|
||||
\\ 1,
|
||||
\\ } ++ [1]u8{0} ** 9;
|
||||
@ -1686,13 +1686,13 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\extern char (*fn_ptr2)(int, float);
|
||||
\\#define bar fn_ptr2
|
||||
, &[_][]const u8{
|
||||
\\pub extern var fn_ptr: ?*const fn () callconv(.C) void;
|
||||
\\pub extern var fn_ptr: ?*const fn () callconv(.c) void;
|
||||
,
|
||||
\\pub inline fn foo() void {
|
||||
\\ return fn_ptr.?();
|
||||
\\}
|
||||
,
|
||||
\\pub extern var fn_ptr2: ?*const fn (c_int, f32) callconv(.C) u8;
|
||||
\\pub extern var fn_ptr2: ?*const fn (c_int, f32) callconv(.c) u8;
|
||||
,
|
||||
\\pub inline fn bar(arg_1: c_int, arg_2: f32) u8 {
|
||||
\\ return fn_ptr2.?(arg_1, arg_2);
|
||||
@ -1714,8 +1714,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\#define glClearPFN PFNGLCLEARPROC
|
||||
, &[_][]const u8{
|
||||
\\pub const GLbitfield = c_uint;
|
||||
\\pub const PFNGLCLEARPROC = ?*const fn (GLbitfield) callconv(.C) void;
|
||||
\\pub const OpenGLProc = ?*const fn () callconv(.C) void;
|
||||
\\pub const PFNGLCLEARPROC = ?*const fn (GLbitfield) callconv(.c) void;
|
||||
\\pub const OpenGLProc = ?*const fn () callconv(.c) void;
|
||||
\\const struct_unnamed_1 = extern struct {
|
||||
\\ Clear: PFNGLCLEARPROC = @import("std").mem.zeroes(PFNGLCLEARPROC),
|
||||
\\};
|
||||
@ -2691,9 +2691,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\ return 0;
|
||||
\\}
|
||||
\\pub export fn bar() void {
|
||||
\\ var f: ?*const fn () callconv(.C) void = &foo;
|
||||
\\ var f: ?*const fn () callconv(.c) void = &foo;
|
||||
\\ _ = &f;
|
||||
\\ var b: ?*const fn () callconv(.C) c_int = &baz;
|
||||
\\ var b: ?*const fn () callconv(.c) c_int = &baz;
|
||||
\\ _ = &b;
|
||||
\\ f.?();
|
||||
\\ f.?();
|
||||
@ -3048,8 +3048,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\ baz();
|
||||
\\}
|
||||
, &[_][]const u8{
|
||||
\\pub fn bar() callconv(.C) void {}
|
||||
\\pub export fn foo(arg_baz: ?*const fn () callconv(.C) [*c]c_int) void {
|
||||
\\pub fn bar() callconv(.c) void {}
|
||||
\\pub export fn foo(arg_baz: ?*const fn () callconv(.c) [*c]c_int) void {
|
||||
\\ var baz = arg_baz;
|
||||
\\ _ = &baz;
|
||||
\\ bar();
|
||||
@ -3112,7 +3112,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\ do {} while (0);
|
||||
\\}
|
||||
, &[_][]const u8{
|
||||
\\pub fn foo() callconv(.C) void {
|
||||
\\pub fn foo() callconv(.c) void {
|
||||
\\ if (true) while (true) {
|
||||
\\ if (!false) break;
|
||||
\\ };
|
||||
@ -3212,10 +3212,10 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\void c(void) {}
|
||||
\\static void foo() {}
|
||||
, &[_][]const u8{
|
||||
\\pub fn a() callconv(.C) void {}
|
||||
\\pub fn b() callconv(.C) void {}
|
||||
\\pub fn a() callconv(.c) void {}
|
||||
\\pub fn b() callconv(.c) void {}
|
||||
\\pub export fn c() void {}
|
||||
\\pub fn foo() callconv(.C) void {}
|
||||
\\pub fn foo() callconv(.c) void {}
|
||||
});
|
||||
|
||||
cases.add("casting away const and volatile",
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user