Merge pull request #7749 from tadeokondrak/6429-callconv-inline

Replace inline fn with callconv(.Inline)
This commit is contained in:
Andrew Kelley 2021-02-11 16:01:58 -08:00 committed by GitHub
commit d3565ed6b4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
51 changed files with 322 additions and 328 deletions

View File

@ -4240,9 +4240,9 @@ fn _start() callconv(.Naked) noreturn {
abort();
}
// The inline specifier forces a function to be inlined at all call sites.
// The inline calling convention forces a function to be inlined at all call sites.
// If the function cannot be inlined, it is a compile-time error.
inline fn shiftLeftOne(a: u32) u32 {
fn shiftLeftOne(a: u32) callconv(.Inline) u32 {
return a << 1;
}

View File

@ -155,6 +155,7 @@ pub const CallingConvention = enum {
C,
Naked,
Async,
Inline,
Interrupt,
Signal,
Stdcall,
@ -404,21 +405,13 @@ pub const TypeInfo = union(enum) {
/// therefore must be kept in sync with the compiler implementation.
pub const FnDecl = struct {
fn_type: type,
inline_type: Inline,
is_noinline: bool,
is_var_args: bool,
is_extern: bool,
is_export: bool,
lib_name: ?[]const u8,
return_type: type,
arg_names: []const []const u8,
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const Inline = enum {
Auto,
Always,
Never,
};
};
};
};

View File

@ -6,70 +6,70 @@
const std = @import("std");
pub inline fn __builtin_bswap16(val: u16) callconv(.C) u16 { return @byteSwap(u16, val); }
pub inline fn __builtin_bswap32(val: u32) callconv(.C) u32 { return @byteSwap(u32, val); }
pub inline fn __builtin_bswap64(val: u64) callconv(.C) u64 { return @byteSwap(u64, val); }
pub fn __builtin_bswap16(val: u16) callconv(.Inline) u16 { return @byteSwap(u16, val); }
pub fn __builtin_bswap32(val: u32) callconv(.Inline) u32 { return @byteSwap(u32, val); }
pub fn __builtin_bswap64(val: u64) callconv(.Inline) u64 { return @byteSwap(u64, val); }
pub inline fn __builtin_signbit(val: f64) callconv(.C) c_int { return @boolToInt(std.math.signbit(val)); }
pub inline fn __builtin_signbitf(val: f32) callconv(.C) c_int { return @boolToInt(std.math.signbit(val)); }
pub fn __builtin_signbit(val: f64) callconv(.Inline) c_int { return @boolToInt(std.math.signbit(val)); }
pub fn __builtin_signbitf(val: f32) callconv(.Inline) c_int { return @boolToInt(std.math.signbit(val)); }
pub inline fn __builtin_popcount(val: c_uint) callconv(.C) c_int {
pub fn __builtin_popcount(val: c_uint) callconv(.Inline) c_int {
// popcount of a c_uint will never exceed the capacity of a c_int
@setRuntimeSafety(false);
return @bitCast(c_int, @as(c_uint, @popCount(c_uint, val)));
}
pub inline fn __builtin_ctz(val: c_uint) callconv(.C) c_int {
pub fn __builtin_ctz(val: c_uint) callconv(.Inline) c_int {
// Returns the number of trailing 0-bits in val, starting at the least significant bit position.
// In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
@setRuntimeSafety(false);
return @bitCast(c_int, @as(c_uint, @ctz(c_uint, val)));
}
pub inline fn __builtin_clz(val: c_uint) callconv(.C) c_int {
pub fn __builtin_clz(val: c_uint) callconv(.Inline) c_int {
// Returns the number of leading 0-bits in x, starting at the most significant bit position.
// In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
@setRuntimeSafety(false);
return @bitCast(c_int, @as(c_uint, @clz(c_uint, val)));
}
pub inline fn __builtin_sqrt(val: f64) callconv(.C) f64 { return @sqrt(val); }
pub inline fn __builtin_sqrtf(val: f32) callconv(.C) f32 { return @sqrt(val); }
pub fn __builtin_sqrt(val: f64) callconv(.Inline) f64 { return @sqrt(val); }
pub fn __builtin_sqrtf(val: f32) callconv(.Inline) f32 { return @sqrt(val); }
pub inline fn __builtin_sin(val: f64) callconv(.C) f64 { return @sin(val); }
pub inline fn __builtin_sinf(val: f32) callconv(.C) f32 { return @sin(val); }
pub inline fn __builtin_cos(val: f64) callconv(.C) f64 { return @cos(val); }
pub inline fn __builtin_cosf(val: f32) callconv(.C) f32 { return @cos(val); }
pub fn __builtin_sin(val: f64) callconv(.Inline) f64 { return @sin(val); }
pub fn __builtin_sinf(val: f32) callconv(.Inline) f32 { return @sin(val); }
pub fn __builtin_cos(val: f64) callconv(.Inline) f64 { return @cos(val); }
pub fn __builtin_cosf(val: f32) callconv(.Inline) f32 { return @cos(val); }
pub inline fn __builtin_exp(val: f64) callconv(.C) f64 { return @exp(val); }
pub inline fn __builtin_expf(val: f32) callconv(.C) f32 { return @exp(val); }
pub inline fn __builtin_exp2(val: f64) callconv(.C) f64 { return @exp2(val); }
pub inline fn __builtin_exp2f(val: f32) callconv(.C) f32 { return @exp2(val); }
pub inline fn __builtin_log(val: f64) callconv(.C) f64 { return @log(val); }
pub inline fn __builtin_logf(val: f32) callconv(.C) f32 { return @log(val); }
pub inline fn __builtin_log2(val: f64) callconv(.C) f64 { return @log2(val); }
pub inline fn __builtin_log2f(val: f32) callconv(.C) f32 { return @log2(val); }
pub inline fn __builtin_log10(val: f64) callconv(.C) f64 { return @log10(val); }
pub inline fn __builtin_log10f(val: f32) callconv(.C) f32 { return @log10(val); }
pub fn __builtin_exp(val: f64) callconv(.Inline) f64 { return @exp(val); }
pub fn __builtin_expf(val: f32) callconv(.Inline) f32 { return @exp(val); }
pub fn __builtin_exp2(val: f64) callconv(.Inline) f64 { return @exp2(val); }
pub fn __builtin_exp2f(val: f32) callconv(.Inline) f32 { return @exp2(val); }
pub fn __builtin_log(val: f64) callconv(.Inline) f64 { return @log(val); }
pub fn __builtin_logf(val: f32) callconv(.Inline) f32 { return @log(val); }
pub fn __builtin_log2(val: f64) callconv(.Inline) f64 { return @log2(val); }
pub fn __builtin_log2f(val: f32) callconv(.Inline) f32 { return @log2(val); }
pub fn __builtin_log10(val: f64) callconv(.Inline) f64 { return @log10(val); }
pub fn __builtin_log10f(val: f32) callconv(.Inline) f32 { return @log10(val); }
// Standard C Library bug: The absolute value of the most negative integer remains negative.
pub inline fn __builtin_abs(val: c_int) callconv(.C) c_int { return std.math.absInt(val) catch std.math.minInt(c_int); }
pub inline fn __builtin_fabs(val: f64) callconv(.C) f64 { return @fabs(val); }
pub inline fn __builtin_fabsf(val: f32) callconv(.C) f32 { return @fabs(val); }
pub fn __builtin_abs(val: c_int) callconv(.Inline) c_int { return std.math.absInt(val) catch std.math.minInt(c_int); }
pub fn __builtin_fabs(val: f64) callconv(.Inline) f64 { return @fabs(val); }
pub fn __builtin_fabsf(val: f32) callconv(.Inline) f32 { return @fabs(val); }
pub inline fn __builtin_floor(val: f64) callconv(.C) f64 { return @floor(val); }
pub inline fn __builtin_floorf(val: f32) callconv(.C) f32 { return @floor(val); }
pub inline fn __builtin_ceil(val: f64) callconv(.C) f64 { return @ceil(val); }
pub inline fn __builtin_ceilf(val: f32) callconv(.C) f32 { return @ceil(val); }
pub inline fn __builtin_trunc(val: f64) callconv(.C) f64 { return @trunc(val); }
pub inline fn __builtin_truncf(val: f32) callconv(.C) f32 { return @trunc(val); }
pub inline fn __builtin_round(val: f64) callconv(.C) f64 { return @round(val); }
pub inline fn __builtin_roundf(val: f32) callconv(.C) f32 { return @round(val); }
pub fn __builtin_floor(val: f64) callconv(.Inline) f64 { return @floor(val); }
pub fn __builtin_floorf(val: f32) callconv(.Inline) f32 { return @floor(val); }
pub fn __builtin_ceil(val: f64) callconv(.Inline) f64 { return @ceil(val); }
pub fn __builtin_ceilf(val: f32) callconv(.Inline) f32 { return @ceil(val); }
pub fn __builtin_trunc(val: f64) callconv(.Inline) f64 { return @trunc(val); }
pub fn __builtin_truncf(val: f32) callconv(.Inline) f32 { return @trunc(val); }
pub fn __builtin_round(val: f64) callconv(.Inline) f64 { return @round(val); }
pub fn __builtin_roundf(val: f32) callconv(.Inline) f32 { return @round(val); }
pub inline fn __builtin_strlen(s: [*c]const u8) callconv(.C) usize { return std.mem.lenZ(s); }
pub inline fn __builtin_strcmp(s1: [*c]const u8, s2: [*c]const u8) callconv(.C) c_int {
pub fn __builtin_strlen(s: [*c]const u8) callconv(.Inline) usize { return std.mem.lenZ(s); }
pub fn __builtin_strcmp(s1: [*c]const u8, s2: [*c]const u8) callconv(.Inline) c_int {
return @as(c_int, std.cstr.cmp(s1, s2));
}
pub inline fn __builtin_object_size(ptr: ?*const c_void, ty: c_int) callconv(.C) usize {
pub fn __builtin_object_size(ptr: ?*const c_void, ty: c_int) callconv(.Inline) usize {
// clang semantics match gcc's: https://gcc.gnu.org/onlinedocs/gcc/Object-Size-Checking.html
// If it is not possible to determine which objects ptr points to at compile time,
// __builtin_object_size should return (size_t) -1 for type 0 or 1 and (size_t) 0
@ -79,37 +79,37 @@ pub inline fn __builtin_object_size(ptr: ?*const c_void, ty: c_int) callconv(.C)
unreachable;
}
pub inline fn __builtin___memset_chk(
pub fn __builtin___memset_chk(
dst: ?*c_void,
val: c_int,
len: usize,
remaining: usize,
) callconv(.C) ?*c_void {
) callconv(.Inline) ?*c_void {
if (len > remaining) @panic("std.c.builtins.memset_chk called with len > remaining");
return __builtin_memset(dst, val, len);
}
pub inline fn __builtin_memset(dst: ?*c_void, val: c_int, len: usize) callconv(.C) ?*c_void {
pub fn __builtin_memset(dst: ?*c_void, val: c_int, len: usize) callconv(.Inline) ?*c_void {
const dst_cast = @ptrCast([*c]u8, dst);
@memset(dst_cast, @bitCast(u8, @truncate(i8, val)), len);
return dst;
}
pub inline fn __builtin___memcpy_chk(
pub fn __builtin___memcpy_chk(
noalias dst: ?*c_void,
noalias src: ?*const c_void,
len: usize,
remaining: usize,
) callconv(.C) ?*c_void {
) callconv(.Inline) ?*c_void {
if (len > remaining) @panic("std.c.builtins.memcpy_chk called with len > remaining");
return __builtin_memcpy(dst, src, len);
}
pub inline fn __builtin_memcpy(
pub fn __builtin_memcpy(
noalias dst: ?*c_void,
noalias src: ?*const c_void,
len: usize,
) callconv(.C) ?*c_void {
) callconv(.Inline) ?*c_void {
const dst_cast = @ptrCast([*c]u8, dst);
const src_cast = @ptrCast([*c]const u8, src);

View File

@ -209,7 +209,7 @@ pub fn InflateStream(comptime ReaderType: type) type {
// Insert a single byte into the window.
// Assumes there's enough space.
inline fn appendUnsafe(self: *WSelf, value: u8) void {
fn appendUnsafe(self: *WSelf, value: u8) callconv(.Inline) void {
self.buf[self.wi] = value;
self.wi = (self.wi + 1) & (self.buf.len - 1);
self.el += 1;

View File

@ -15,12 +15,12 @@ pub const Curve25519 = struct {
x: Fe,
/// Decode a Curve25519 point from its compressed (X) coordinates.
pub inline fn fromBytes(s: [32]u8) Curve25519 {
pub fn fromBytes(s: [32]u8) callconv(.Inline) Curve25519 {
return .{ .x = Fe.fromBytes(s) };
}
/// Encode a Curve25519 point.
pub inline fn toBytes(p: Curve25519) [32]u8 {
pub fn toBytes(p: Curve25519) callconv(.Inline) [32]u8 {
return p.x.toBytes();
}

View File

@ -92,7 +92,7 @@ pub const Edwards25519 = struct {
}
/// Flip the sign of the X coordinate.
pub inline fn neg(p: Edwards25519) Edwards25519 {
pub fn neg(p: Edwards25519) callconv(.Inline) Edwards25519 {
return .{ .x = p.x.neg(), .y = p.y, .z = p.z, .t = p.t.neg() };
}
@ -137,14 +137,14 @@ pub const Edwards25519 = struct {
return p.add(q.neg());
}
inline fn cMov(p: *Edwards25519, a: Edwards25519, c: u64) void {
fn cMov(p: *Edwards25519, a: Edwards25519, c: u64) callconv(.Inline) void {
p.x.cMov(a.x, c);
p.y.cMov(a.y, c);
p.z.cMov(a.z, c);
p.t.cMov(a.t, c);
}
inline fn pcSelect(comptime n: usize, pc: [n]Edwards25519, b: u8) Edwards25519 {
fn pcSelect(comptime n: usize, pc: [n]Edwards25519, b: u8) callconv(.Inline) Edwards25519 {
var t = Edwards25519.identityElement;
comptime var i: u8 = 1;
inline while (i < pc.len) : (i += 1) {

View File

@ -52,7 +52,7 @@ pub const Fe = struct {
pub const edwards25519sqrtam2 = Fe{ .limbs = .{ 1693982333959686, 608509411481997, 2235573344831311, 947681270984193, 266558006233600 } };
/// Return true if the field element is zero
pub inline fn isZero(fe: Fe) bool {
pub fn isZero(fe: Fe) callconv(.Inline) bool {
var reduced = fe;
reduced.reduce();
const limbs = reduced.limbs;
@ -60,7 +60,7 @@ pub const Fe = struct {
}
/// Return true if both field elements are equivalent
pub inline fn equivalent(a: Fe, b: Fe) bool {
pub fn equivalent(a: Fe, b: Fe) callconv(.Inline) bool {
return a.sub(b).isZero();
}
@ -164,7 +164,7 @@ pub const Fe = struct {
}
/// Add a field element
pub inline fn add(a: Fe, b: Fe) Fe {
pub fn add(a: Fe, b: Fe) callconv(.Inline) Fe {
var fe: Fe = undefined;
comptime var i = 0;
inline while (i < 5) : (i += 1) {
@ -174,7 +174,7 @@ pub const Fe = struct {
}
/// Substract a field elememnt
pub inline fn sub(a: Fe, b: Fe) Fe {
pub fn sub(a: Fe, b: Fe) callconv(.Inline) Fe {
var fe = b;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
@ -193,17 +193,17 @@ pub const Fe = struct {
}
/// Negate a field element
pub inline fn neg(a: Fe) Fe {
pub fn neg(a: Fe) callconv(.Inline) Fe {
return zero.sub(a);
}
/// Return true if a field element is negative
pub inline fn isNegative(a: Fe) bool {
pub fn isNegative(a: Fe) callconv(.Inline) bool {
return (a.toBytes()[0] & 1) != 0;
}
/// Conditonally replace a field element with `a` if `c` is positive
pub inline fn cMov(fe: *Fe, a: Fe, c: u64) void {
pub fn cMov(fe: *Fe, a: Fe, c: u64) callconv(.Inline) void {
const mask: u64 = 0 -% c;
var x = fe.*;
comptime var i = 0;
@ -244,7 +244,7 @@ pub const Fe = struct {
}
}
inline fn _carry128(r: *[5]u128) Fe {
fn _carry128(r: *[5]u128) callconv(.Inline) Fe {
var rs: [5]u64 = undefined;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
@ -265,7 +265,7 @@ pub const Fe = struct {
}
/// Multiply two field elements
pub inline fn mul(a: Fe, b: Fe) Fe {
pub fn mul(a: Fe, b: Fe) callconv(.Inline) Fe {
var ax: [5]u128 = undefined;
var bx: [5]u128 = undefined;
var a19: [5]u128 = undefined;
@ -288,7 +288,7 @@ pub const Fe = struct {
return _carry128(&r);
}
inline fn _sq(a: Fe, double: comptime bool) Fe {
fn _sq(a: Fe, double: comptime bool) callconv(.Inline) Fe {
var ax: [5]u128 = undefined;
var r: [5]u128 = undefined;
comptime var i = 0;
@ -317,17 +317,17 @@ pub const Fe = struct {
}
/// Square a field element
pub inline fn sq(a: Fe) Fe {
pub fn sq(a: Fe) callconv(.Inline) Fe {
return _sq(a, false);
}
/// Square and double a field element
pub inline fn sq2(a: Fe) Fe {
pub fn sq2(a: Fe) callconv(.Inline) Fe {
return _sq(a, true);
}
/// Multiply a field element with a small (32-bit) integer
pub inline fn mul32(a: Fe, comptime n: u32) Fe {
pub fn mul32(a: Fe, comptime n: u32) callconv(.Inline) Fe {
const sn = @intCast(u128, n);
var fe: Fe = undefined;
var x: u128 = 0;
@ -342,7 +342,7 @@ pub const Fe = struct {
}
/// Square a field element `n` times
inline fn sqn(a: Fe, comptime n: comptime_int) Fe {
fn sqn(a: Fe, comptime n: comptime_int) callconv(.Inline) Fe {
var i: usize = 0;
var fe = a;
while (i < n) : (i += 1) {

View File

@ -42,7 +42,7 @@ pub const Ristretto255 = struct {
}
/// Reject the neutral element.
pub inline fn rejectIdentity(p: Ristretto255) !void {
pub fn rejectIdentity(p: Ristretto255) callconv(.Inline) !void {
return p.p.rejectIdentity();
}
@ -141,19 +141,19 @@ pub const Ristretto255 = struct {
}
/// Double a Ristretto255 element.
pub inline fn dbl(p: Ristretto255) Ristretto255 {
pub fn dbl(p: Ristretto255) callconv(.Inline) Ristretto255 {
return .{ .p = p.p.dbl() };
}
/// Add two Ristretto255 elements.
pub inline fn add(p: Ristretto255, q: Ristretto255) Ristretto255 {
pub fn add(p: Ristretto255, q: Ristretto255) callconv(.Inline) Ristretto255 {
return .{ .p = p.p.add(q.p) };
}
/// Multiply a Ristretto255 element with a scalar.
/// Return error.WeakPublicKey if the resulting element is
/// the identity element.
pub inline fn mul(p: Ristretto255, s: [encoded_length]u8) !Ristretto255 {
pub fn mul(p: Ristretto255, s: [encoded_length]u8) callconv(.Inline) !Ristretto255 {
return Ristretto255{ .p = try p.p.mul(s) };
}

View File

@ -46,7 +46,7 @@ pub fn reduce64(s: [64]u8) [32]u8 {
/// Perform the X25519 "clamping" operation.
/// The scalar is then guaranteed to be a multiple of the cofactor.
pub inline fn clamp(s: *[32]u8) void {
pub fn clamp(s: *[32]u8) callconv(.Inline) void {
s[0] &= 248;
s[31] = (s[31] & 127) | 64;
}

View File

@ -35,7 +35,7 @@ const State128L = struct {
return state;
}
inline fn update(state: *State128L, d1: AesBlock, d2: AesBlock) void {
fn update(state: *State128L, d1: AesBlock, d2: AesBlock) callconv(.Inline) void {
const blocks = &state.blocks;
const tmp = blocks[7];
comptime var i: usize = 7;
@ -207,7 +207,7 @@ const State256 = struct {
return state;
}
inline fn update(state: *State256, d: AesBlock) void {
fn update(state: *State256, d: AesBlock) callconv(.Inline) void {
const blocks = &state.blocks;
const tmp = blocks[5].encrypt(blocks[0]);
comptime var i: usize = 5;

View File

@ -19,24 +19,24 @@ pub const Block = struct {
repr: BlockVec,
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [16]u8) Block {
pub fn fromBytes(bytes: *const [16]u8) callconv(.Inline) Block {
const repr = mem.bytesToValue(BlockVec, bytes);
return Block{ .repr = repr };
}
/// Convert the internal representation of a block into a byte sequence.
pub inline fn toBytes(block: Block) [16]u8 {
pub fn toBytes(block: Block) callconv(.Inline) [16]u8 {
return mem.toBytes(block.repr);
}
/// XOR the block with a byte sequence.
pub inline fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 {
pub fn xorBytes(block: Block, bytes: *const [16]u8) callconv(.Inline) [16]u8 {
const x = block.repr ^ fromBytes(bytes).repr;
return mem.toBytes(x);
}
/// Encrypt a block with a round key.
pub inline fn encrypt(block: Block, round_key: Block) Block {
pub fn encrypt(block: Block, round_key: Block) callconv(.Inline) Block {
return Block{
.repr = asm (
\\ vaesenc %[rk], %[in], %[out]
@ -48,7 +48,7 @@ pub const Block = struct {
}
/// Encrypt a block with the last round key.
pub inline fn encryptLast(block: Block, round_key: Block) Block {
pub fn encryptLast(block: Block, round_key: Block) callconv(.Inline) Block {
return Block{
.repr = asm (
\\ vaesenclast %[rk], %[in], %[out]
@ -60,7 +60,7 @@ pub const Block = struct {
}
/// Decrypt a block with a round key.
pub inline fn decrypt(block: Block, inv_round_key: Block) Block {
pub fn decrypt(block: Block, inv_round_key: Block) callconv(.Inline) Block {
return Block{
.repr = asm (
\\ vaesdec %[rk], %[in], %[out]
@ -72,7 +72,7 @@ pub const Block = struct {
}
/// Decrypt a block with the last round key.
pub inline fn decryptLast(block: Block, inv_round_key: Block) Block {
pub fn decryptLast(block: Block, inv_round_key: Block) callconv(.Inline) Block {
return Block{
.repr = asm (
\\ vaesdeclast %[rk], %[in], %[out]
@ -84,17 +84,17 @@ pub const Block = struct {
}
/// Apply the bitwise XOR operation to the content of two blocks.
pub inline fn xorBlocks(block1: Block, block2: Block) Block {
pub fn xorBlocks(block1: Block, block2: Block) callconv(.Inline) Block {
return Block{ .repr = block1.repr ^ block2.repr };
}
/// Apply the bitwise AND operation to the content of two blocks.
pub inline fn andBlocks(block1: Block, block2: Block) Block {
pub fn andBlocks(block1: Block, block2: Block) callconv(.Inline) Block {
return Block{ .repr = block1.repr & block2.repr };
}
/// Apply the bitwise OR operation to the content of two blocks.
pub inline fn orBlocks(block1: Block, block2: Block) Block {
pub fn orBlocks(block1: Block, block2: Block) callconv(.Inline) Block {
return Block{ .repr = block1.repr | block2.repr };
}
@ -114,7 +114,7 @@ pub const Block = struct {
};
/// Encrypt multiple blocks in parallel, each their own round key.
pub inline fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
pub fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) callconv(.Inline) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -124,7 +124,7 @@ pub const Block = struct {
}
/// Decrypt multiple blocks in parallel, each their own round key.
pub inline fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
pub fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) callconv(.Inline) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -134,7 +134,7 @@ pub const Block = struct {
}
/// Encrypt multiple blocks in parallel with the same round key.
pub inline fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) callconv(.Inline) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -144,7 +144,7 @@ pub const Block = struct {
}
/// Decrypt multiple blocks in parallel with the same round key.
pub inline fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) callconv(.Inline) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -154,7 +154,7 @@ pub const Block = struct {
}
/// Encrypt multiple blocks in parallel with the same last round key.
pub inline fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) callconv(.Inline) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -164,7 +164,7 @@ pub const Block = struct {
}
/// Decrypt multiple blocks in parallel with the same last round key.
pub inline fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) callconv(.Inline) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {

View File

@ -19,18 +19,18 @@ pub const Block = struct {
repr: BlockVec,
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [16]u8) Block {
pub fn fromBytes(bytes: *const [16]u8) callconv(.Inline) Block {
const repr = mem.bytesToValue(BlockVec, bytes);
return Block{ .repr = repr };
}
/// Convert the internal representation of a block into a byte sequence.
pub inline fn toBytes(block: Block) [16]u8 {
pub fn toBytes(block: Block) callconv(.Inline) [16]u8 {
return mem.toBytes(block.repr);
}
/// XOR the block with a byte sequence.
pub inline fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 {
pub fn xorBytes(block: Block, bytes: *const [16]u8) callconv(.Inline) [16]u8 {
const x = block.repr ^ fromBytes(bytes).repr;
return mem.toBytes(x);
}
@ -38,7 +38,7 @@ pub const Block = struct {
const zero = Vector(2, u64){ 0, 0 };
/// Encrypt a block with a round key.
pub inline fn encrypt(block: Block, round_key: Block) Block {
pub fn encrypt(block: Block, round_key: Block) callconv(.Inline) Block {
return Block{
.repr = asm (
\\ mov %[out].16b, %[in].16b
@ -54,7 +54,7 @@ pub const Block = struct {
}
/// Encrypt a block with the last round key.
pub inline fn encryptLast(block: Block, round_key: Block) Block {
pub fn encryptLast(block: Block, round_key: Block) callconv(.Inline) Block {
return Block{
.repr = asm (
\\ mov %[out].16b, %[in].16b
@ -69,7 +69,7 @@ pub const Block = struct {
}
/// Decrypt a block with a round key.
pub inline fn decrypt(block: Block, inv_round_key: Block) Block {
pub fn decrypt(block: Block, inv_round_key: Block) callconv(.Inline) Block {
return Block{
.repr = asm (
\\ mov %[out].16b, %[in].16b
@ -85,7 +85,7 @@ pub const Block = struct {
}
/// Decrypt a block with the last round key.
pub inline fn decryptLast(block: Block, inv_round_key: Block) Block {
pub fn decryptLast(block: Block, inv_round_key: Block) callconv(.Inline) Block {
return Block{
.repr = asm (
\\ mov %[out].16b, %[in].16b
@ -100,17 +100,17 @@ pub const Block = struct {
}
/// Apply the bitwise XOR operation to the content of two blocks.
pub inline fn xorBlocks(block1: Block, block2: Block) Block {
pub fn xorBlocks(block1: Block, block2: Block) callconv(.Inline) Block {
return Block{ .repr = block1.repr ^ block2.repr };
}
/// Apply the bitwise AND operation to the content of two blocks.
pub inline fn andBlocks(block1: Block, block2: Block) Block {
pub fn andBlocks(block1: Block, block2: Block) callconv(.Inline) Block {
return Block{ .repr = block1.repr & block2.repr };
}
/// Apply the bitwise OR operation to the content of two blocks.
pub inline fn orBlocks(block1: Block, block2: Block) Block {
pub fn orBlocks(block1: Block, block2: Block) callconv(.Inline) Block {
return Block{ .repr = block1.repr | block2.repr };
}
@ -120,7 +120,7 @@ pub const Block = struct {
pub const optimal_parallel_blocks = 8;
/// Encrypt multiple blocks in parallel, each their own round key.
pub inline fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
pub fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) callconv(.Inline) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -130,7 +130,7 @@ pub const Block = struct {
}
/// Decrypt multiple blocks in parallel, each their own round key.
pub inline fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
pub fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) callconv(.Inline) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -140,7 +140,7 @@ pub const Block = struct {
}
/// Encrypt multiple blocks in parallel with the same round key.
pub inline fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) callconv(.Inline) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -150,7 +150,7 @@ pub const Block = struct {
}
/// Decrypt multiple blocks in parallel with the same round key.
pub inline fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) callconv(.Inline) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -160,7 +160,7 @@ pub const Block = struct {
}
/// Encrypt multiple blocks in parallel with the same last round key.
pub inline fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) callconv(.Inline) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -170,7 +170,7 @@ pub const Block = struct {
}
/// Decrypt multiple blocks in parallel with the same last round key.
pub inline fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) callconv(.Inline) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {

View File

@ -18,7 +18,7 @@ pub const Block = struct {
repr: BlockVec align(16),
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [16]u8) Block {
pub fn fromBytes(bytes: *const [16]u8) callconv(.Inline) Block {
const s0 = mem.readIntBig(u32, bytes[0..4]);
const s1 = mem.readIntBig(u32, bytes[4..8]);
const s2 = mem.readIntBig(u32, bytes[8..12]);
@ -27,7 +27,7 @@ pub const Block = struct {
}
/// Convert the internal representation of a block into a byte sequence.
pub inline fn toBytes(block: Block) [16]u8 {
pub fn toBytes(block: Block) callconv(.Inline) [16]u8 {
var bytes: [16]u8 = undefined;
mem.writeIntBig(u32, bytes[0..4], block.repr[0]);
mem.writeIntBig(u32, bytes[4..8], block.repr[1]);
@ -37,7 +37,7 @@ pub const Block = struct {
}
/// XOR the block with a byte sequence.
pub inline fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 {
pub fn xorBytes(block: Block, bytes: *const [16]u8) callconv(.Inline) [16]u8 {
const block_bytes = block.toBytes();
var x: [16]u8 = undefined;
comptime var i: usize = 0;
@ -48,7 +48,7 @@ pub const Block = struct {
}
/// Encrypt a block with a round key.
pub inline fn encrypt(block: Block, round_key: Block) Block {
pub fn encrypt(block: Block, round_key: Block) callconv(.Inline) Block {
const src = &block.repr;
const s0 = block.repr[0];
@ -65,7 +65,7 @@ pub const Block = struct {
}
/// Encrypt a block with the last round key.
pub inline fn encryptLast(block: Block, round_key: Block) Block {
pub fn encryptLast(block: Block, round_key: Block) callconv(.Inline) Block {
const src = &block.repr;
const t0 = block.repr[0];
@ -87,7 +87,7 @@ pub const Block = struct {
}
/// Decrypt a block with a round key.
pub inline fn decrypt(block: Block, round_key: Block) Block {
pub fn decrypt(block: Block, round_key: Block) callconv(.Inline) Block {
const src = &block.repr;
const s0 = block.repr[0];
@ -104,7 +104,7 @@ pub const Block = struct {
}
/// Decrypt a block with the last round key.
pub inline fn decryptLast(block: Block, round_key: Block) Block {
pub fn decryptLast(block: Block, round_key: Block) callconv(.Inline) Block {
const src = &block.repr;
const t0 = block.repr[0];
@ -126,7 +126,7 @@ pub const Block = struct {
}
/// Apply the bitwise XOR operation to the content of two blocks.
pub inline fn xorBlocks(block1: Block, block2: Block) Block {
pub fn xorBlocks(block1: Block, block2: Block) callconv(.Inline) Block {
var x: BlockVec = undefined;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
@ -136,7 +136,7 @@ pub const Block = struct {
}
/// Apply the bitwise AND operation to the content of two blocks.
pub inline fn andBlocks(block1: Block, block2: Block) Block {
pub fn andBlocks(block1: Block, block2: Block) callconv(.Inline) Block {
var x: BlockVec = undefined;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
@ -146,7 +146,7 @@ pub const Block = struct {
}
/// Apply the bitwise OR operation to the content of two blocks.
pub inline fn orBlocks(block1: Block, block2: Block) Block {
pub fn orBlocks(block1: Block, block2: Block) callconv(.Inline) Block {
var x: BlockVec = undefined;
comptime var i = 0;
inline while (i < 4) : (i += 1) {

View File

@ -66,7 +66,7 @@ const CompressVectorized = struct {
const Lane = Vector(4, u32);
const Rows = [4]Lane;
inline fn g(comptime even: bool, rows: *Rows, m: Lane) void {
fn g(comptime even: bool, rows: *Rows, m: Lane) callconv(.Inline) void {
rows[0] +%= rows[1] +% m;
rows[3] ^= rows[0];
rows[3] = math.rotr(Lane, rows[3], if (even) 8 else 16);
@ -75,13 +75,13 @@ const CompressVectorized = struct {
rows[1] = math.rotr(Lane, rows[1], if (even) 7 else 12);
}
inline fn diagonalize(rows: *Rows) void {
fn diagonalize(rows: *Rows) callconv(.Inline) void {
rows[0] = @shuffle(u32, rows[0], undefined, [_]i32{ 3, 0, 1, 2 });
rows[3] = @shuffle(u32, rows[3], undefined, [_]i32{ 2, 3, 0, 1 });
rows[2] = @shuffle(u32, rows[2], undefined, [_]i32{ 1, 2, 3, 0 });
}
inline fn undiagonalize(rows: *Rows) void {
fn undiagonalize(rows: *Rows) callconv(.Inline) void {
rows[0] = @shuffle(u32, rows[0], undefined, [_]i32{ 1, 2, 3, 0 });
rows[3] = @shuffle(u32, rows[3], undefined, [_]i32{ 2, 3, 0, 1 });
rows[2] = @shuffle(u32, rows[2], undefined, [_]i32{ 3, 0, 1, 2 });

View File

@ -35,7 +35,7 @@ const ChaCha20VecImpl = struct {
};
}
inline fn chacha20Core(x: *BlockVec, input: BlockVec) void {
fn chacha20Core(x: *BlockVec, input: BlockVec) callconv(.Inline) void {
x.* = input;
var r: usize = 0;
@ -80,7 +80,7 @@ const ChaCha20VecImpl = struct {
}
}
inline fn hashToBytes(out: *[64]u8, x: BlockVec) void {
fn hashToBytes(out: *[64]u8, x: BlockVec) callconv(.Inline) void {
var i: usize = 0;
while (i < 4) : (i += 1) {
mem.writeIntLittle(u32, out[16 * i + 0 ..][0..4], x[i][0]);
@ -90,7 +90,7 @@ const ChaCha20VecImpl = struct {
}
}
inline fn contextFeedback(x: *BlockVec, ctx: BlockVec) void {
fn contextFeedback(x: *BlockVec, ctx: BlockVec) callconv(.Inline) void {
x[0] +%= ctx[0];
x[1] +%= ctx[1];
x[2] +%= ctx[2];
@ -190,7 +190,7 @@ const ChaCha20NonVecImpl = struct {
};
}
inline fn chacha20Core(x: *BlockVec, input: BlockVec) void {
fn chacha20Core(x: *BlockVec, input: BlockVec) callconv(.Inline) void {
x.* = input;
const rounds = comptime [_]QuarterRound{
@ -219,7 +219,7 @@ const ChaCha20NonVecImpl = struct {
}
}
inline fn hashToBytes(out: *[64]u8, x: BlockVec) void {
fn hashToBytes(out: *[64]u8, x: BlockVec) callconv(.Inline) void {
var i: usize = 0;
while (i < 4) : (i += 1) {
mem.writeIntLittle(u32, out[16 * i + 0 ..][0..4], x[i * 4 + 0]);
@ -229,7 +229,7 @@ const ChaCha20NonVecImpl = struct {
}
}
inline fn contextFeedback(x: *BlockVec, ctx: BlockVec) void {
fn contextFeedback(x: *BlockVec, ctx: BlockVec) callconv(.Inline) void {
var i: usize = 0;
while (i < 16) : (i += 1) {
x[i] +%= ctx[i];

View File

@ -95,7 +95,7 @@ pub const Ghash = struct {
}
}
inline fn clmul_pclmul(x: u64, y: u64) u64 {
fn clmul_pclmul(x: u64, y: u64) callconv(.Inline) u64 {
const Vector = std.meta.Vector;
const product = asm (
\\ vpclmulqdq $0x00, %[x], %[y], %[out]
@ -106,7 +106,7 @@ pub const Ghash = struct {
return product[0];
}
inline fn clmul_pmull(x: u64, y: u64) u64 {
fn clmul_pmull(x: u64, y: u64) callconv(.Inline) u64 {
const Vector = std.meta.Vector;
const product = asm (
\\ pmull %[out].1q, %[x].1d, %[y].1d

View File

@ -48,7 +48,7 @@ pub const State = struct {
return mem.asBytes(&self.data);
}
inline fn endianSwap(self: *Self) void {
fn endianSwap(self: *Self) callconv(.Inline) void {
for (self.data) |*w| {
w.* = mem.littleToNative(u32, w.*);
}
@ -116,7 +116,7 @@ pub const State = struct {
const Lane = Vector(4, u32);
inline fn shift(x: Lane, comptime n: comptime_int) Lane {
fn shift(x: Lane, comptime n: comptime_int) callconv(.Inline) Lane {
return x << @splat(4, @as(u5, n));
}

View File

@ -37,7 +37,7 @@ const Salsa20VecImpl = struct {
};
}
inline fn salsa20Core(x: *BlockVec, input: BlockVec, comptime feedback: bool) void {
fn salsa20Core(x: *BlockVec, input: BlockVec, comptime feedback: bool) callconv(.Inline) void {
const n1n2n3n0 = Lane{ input[3][1], input[3][2], input[3][3], input[3][0] };
const n1n2 = Half{ n1n2n3n0[0], n1n2n3n0[1] };
const n3n0 = Half{ n1n2n3n0[2], n1n2n3n0[3] };
@ -211,7 +211,7 @@ const Salsa20NonVecImpl = struct {
d: u6,
};
inline fn Rp(a: usize, b: usize, c: usize, d: u6) QuarterRound {
fn Rp(a: usize, b: usize, c: usize, d: u6) callconv(.Inline) QuarterRound {
return QuarterRound{
.a = a,
.b = b,
@ -220,7 +220,7 @@ const Salsa20NonVecImpl = struct {
};
}
inline fn salsa20Core(x: *BlockVec, input: BlockVec, comptime feedback: bool) void {
fn salsa20Core(x: *BlockVec, input: BlockVec, comptime feedback: bool) callconv(.Inline) void {
const arx_steps = comptime [_]QuarterRound{
Rp(4, 0, 12, 7), Rp(8, 4, 0, 9), Rp(12, 8, 4, 13), Rp(0, 12, 8, 18),
Rp(9, 5, 1, 7), Rp(13, 9, 5, 9), Rp(1, 13, 9, 13), Rp(5, 1, 13, 18),

View File

@ -720,10 +720,10 @@ pub const Elf32_Rel = extern struct {
r_offset: Elf32_Addr,
r_info: Elf32_Word,
pub inline fn r_sym(self: @This()) u24 {
pub fn r_sym(self: @This()) callconv(.Inline) u24 {
return @truncate(u24, self.r_info >> 8);
}
pub inline fn r_type(self: @This()) u8 {
pub fn r_type(self: @This()) callconv(.Inline) u8 {
return @truncate(u8, self.r_info & 0xff);
}
};
@ -731,10 +731,10 @@ pub const Elf64_Rel = extern struct {
r_offset: Elf64_Addr,
r_info: Elf64_Xword,
pub inline fn r_sym(self: @This()) u32 {
pub fn r_sym(self: @This()) callconv(.Inline) u32 {
return @truncate(u32, self.r_info >> 32);
}
pub inline fn r_type(self: @This()) u32 {
pub fn r_type(self: @This()) callconv(.Inline) u32 {
return @truncate(u32, self.r_info & 0xffffffff);
}
};
@ -743,10 +743,10 @@ pub const Elf32_Rela = extern struct {
r_info: Elf32_Word,
r_addend: Elf32_Sword,
pub inline fn r_sym(self: @This()) u24 {
pub fn r_sym(self: @This()) callconv(.Inline) u24 {
return @truncate(u24, self.r_info >> 8);
}
pub inline fn r_type(self: @This()) u8 {
pub fn r_type(self: @This()) callconv(.Inline) u8 {
return @truncate(u8, self.r_info & 0xff);
}
};
@ -755,10 +755,10 @@ pub const Elf64_Rela = extern struct {
r_info: Elf64_Xword,
r_addend: Elf64_Sxword,
pub inline fn r_sym(self: @This()) u32 {
pub fn r_sym(self: @This()) callconv(.Inline) u32 {
return @truncate(u32, self.r_info >> 32);
}
pub inline fn r_type(self: @This()) u32 {
pub fn r_type(self: @This()) callconv(.Inline) u32 {
return @truncate(u32, self.r_info & 0xffffffff);
}
};

View File

@ -52,21 +52,21 @@ const Z96 = struct {
d2: u32,
// d = s >> 1
inline fn shiftRight1(d: *Z96, s: Z96) void {
fn shiftRight1(d: *Z96, s: Z96) callconv(.Inline) void {
d.d0 = (s.d0 >> 1) | ((s.d1 & 1) << 31);
d.d1 = (s.d1 >> 1) | ((s.d2 & 1) << 31);
d.d2 = s.d2 >> 1;
}
// d = s << 1
inline fn shiftLeft1(d: *Z96, s: Z96) void {
fn shiftLeft1(d: *Z96, s: Z96) callconv(.Inline) void {
d.d2 = (s.d2 << 1) | ((s.d1 & (1 << 31)) >> 31);
d.d1 = (s.d1 << 1) | ((s.d0 & (1 << 31)) >> 31);
d.d0 = s.d0 << 1;
}
// d += s
inline fn add(d: *Z96, s: Z96) void {
fn add(d: *Z96, s: Z96) callconv(.Inline) void {
var w = @as(u64, d.d0) + @as(u64, s.d0);
d.d0 = @truncate(u32, w);
@ -80,7 +80,7 @@ const Z96 = struct {
}
// d -= s
inline fn sub(d: *Z96, s: Z96) void {
fn sub(d: *Z96, s: Z96) callconv(.Inline) void {
var w = @as(u64, d.d0) -% @as(u64, s.d0);
d.d0 = @truncate(u32, w);

View File

@ -6,7 +6,7 @@
const std = @import("std");
const builtin = @import("builtin");
inline fn offsetPtr(ptr: [*]const u8, offset: usize) [*]const u8 {
fn offsetPtr(ptr: [*]const u8, offset: usize) callconv(.Inline) [*]const u8 {
// ptr + offset doesn't work at comptime so we need this instead.
return @ptrCast([*]const u8, &ptr[offset]);
}

View File

@ -815,16 +815,16 @@ pub const sigval = extern union {
pub const _SIG_WORDS = 4;
pub const _SIG_MAXSIG = 128;
pub inline fn _SIG_IDX(sig: usize) usize {
pub fn _SIG_IDX(sig: usize) callconv(.Inline) usize {
return sig - 1;
}
pub inline fn _SIG_WORD(sig: usize) usize {
pub fn _SIG_WORD(sig: usize) callconv(.Inline) usize {
return_SIG_IDX(sig) >> 5;
}
pub inline fn _SIG_BIT(sig: usize) usize {
pub fn _SIG_BIT(sig: usize) callconv(.Inline) usize {
return 1 << (_SIG_IDX(sig) & 31);
}
pub inline fn _SIG_VALID(sig: usize) usize {
pub fn _SIG_VALID(sig: usize) callconv(.Inline) usize {
return sig <= _SIG_MAXSIG and sig > 0;
}

View File

@ -796,16 +796,16 @@ pub const _ksiginfo = extern struct {
pub const _SIG_WORDS = 4;
pub const _SIG_MAXSIG = 128;
pub inline fn _SIG_IDX(sig: usize) usize {
pub fn _SIG_IDX(sig: usize) callconv(.Inline) usize {
return sig - 1;
}
pub inline fn _SIG_WORD(sig: usize) usize {
pub fn _SIG_WORD(sig: usize) callconv(.Inline) usize {
return_SIG_IDX(sig) >> 5;
}
pub inline fn _SIG_BIT(sig: usize) usize {
pub fn _SIG_BIT(sig: usize) callconv(.Inline) usize {
return 1 << (_SIG_IDX(sig) & 31);
}
pub inline fn _SIG_VALID(sig: usize) usize {
pub fn _SIG_VALID(sig: usize) callconv(.Inline) usize {
return sig <= _SIG_MAXSIG and sig > 0;
}

View File

@ -126,7 +126,7 @@ pub fn fork() usize {
/// It is advised to avoid this function and use clone instead, because
/// the compiler is not aware of how vfork affects control flow and you may
/// see different results in optimized builds.
pub inline fn vfork() usize {
pub fn vfork() callconv(.Inline) usize {
return @call(.{ .modifier = .always_inline }, syscall0, .{.vfork});
}

View File

@ -300,7 +300,7 @@ fn initTLS() void {
};
}
inline fn alignPtrCast(comptime T: type, ptr: [*]u8) *T {
fn alignPtrCast(comptime T: type, ptr: [*]u8) callconv(.Inline) *T {
return @ptrCast(*T, @alignCast(@alignOf(*T), ptr));
}

View File

@ -1669,7 +1669,7 @@ pub fn wToPrefixedFileW(s: []const u16) !PathSpace {
return path_space;
}
inline fn MAKELANGID(p: c_ushort, s: c_ushort) LANGID {
fn MAKELANGID(p: c_ushort, s: c_ushort) callconv(.Inline) LANGID {
return (s << 10) | p;
}

View File

@ -262,7 +262,7 @@ const bad_main_ret = "expected return type of main to be 'void', '!void', 'noret
// This is marked inline because for some reason LLVM in release mode fails to inline it,
// and we want fewer call frames in stack traces.
inline fn initEventLoopAndCallMain() u8 {
fn initEventLoopAndCallMain() callconv(.Inline) u8 {
if (std.event.Loop.instance) |loop| {
if (!@hasDecl(root, "event_loop")) {
loop.init() catch |err| {
@ -291,7 +291,7 @@ inline fn initEventLoopAndCallMain() u8 {
// and we want fewer call frames in stack traces.
// TODO This function is duplicated from initEventLoopAndCallMain instead of using generics
// because it is working around stage1 compiler bugs.
inline fn initEventLoopAndCallWinMain() std.os.windows.INT {
fn initEventLoopAndCallWinMain() callconv(.Inline) std.os.windows.INT {
if (std.event.Loop.instance) |loop| {
if (!@hasDecl(root, "event_loop")) {
loop.init() catch |err| {

View File

@ -1357,6 +1357,7 @@ pub const Node = struct {
extern_export_inline_token: TokenIndex,
is_extern_prototype: void, // TODO: Remove once extern fn rewriting is
is_async: void, // TODO: remove once async fn rewriting is
is_inline: void, // TODO: remove once inline fn rewriting is
});
pub const RequiredFields = struct {
@ -1523,6 +1524,14 @@ pub const Node = struct {
self.setTrailer(.is_async, value);
}
pub fn getIsInline(self: *const FnProto) ?void {
return self.getTrailer(.is_inline);
}
pub fn setIsInline(self: *FnProto, value: void) void {
self.setTrailer(.is_inline, value);
}
fn getTrailer(self: *const FnProto, comptime field: TrailerFlags.FieldEnum) ?TrailerFlags.Field(field) {
const trailers_start = @alignCast(
@alignOf(ParamDecl),

View File

@ -493,9 +493,15 @@ const Parser = struct {
extern_export_inline_token: ?TokenIndex = null,
lib_name: ?*Node = null,
}) !?*Node {
// TODO: Remove once extern/async fn rewriting is
var is_async: ?void = null;
// TODO: Remove once extern/async/inline fn rewriting is
var is_extern_prototype: ?void = null;
var is_async: ?void = null;
var is_inline: ?void = null;
if (fields.extern_export_inline_token != null and
p.token_ids[fields.extern_export_inline_token.?] == .Keyword_inline)
{
is_inline = {};
}
const cc_token: ?TokenIndex = blk: {
if (p.eatToken(.Keyword_extern)) |token| {
is_extern_prototype = {};
@ -573,6 +579,7 @@ const Parser = struct {
.callconv_expr = callconv_expr,
.is_extern_prototype = is_extern_prototype,
.is_async = is_async,
.is_inline = is_inline,
});
std.mem.copy(Node.FnProto.ParamDecl, fn_proto_node.params(), params);

View File

@ -2355,17 +2355,17 @@ test "zig fmt: functions" {
\\extern fn puts(s: *const u8) c_int;
\\extern "c" fn puts(s: *const u8) c_int;
\\export fn puts(s: *const u8) c_int;
\\inline fn puts(s: *const u8) c_int;
\\fn puts(s: *const u8) callconv(.Inline) c_int;
\\noinline fn puts(s: *const u8) c_int;
\\pub extern fn puts(s: *const u8) c_int;
\\pub extern "c" fn puts(s: *const u8) c_int;
\\pub export fn puts(s: *const u8) c_int;
\\pub inline fn puts(s: *const u8) c_int;
\\pub fn puts(s: *const u8) callconv(.Inline) c_int;
\\pub noinline fn puts(s: *const u8) c_int;
\\pub extern fn puts(s: *const u8) align(2 + 2) c_int;
\\pub extern "c" fn puts(s: *const u8) align(2 + 2) c_int;
\\pub export fn puts(s: *const u8) align(2 + 2) c_int;
\\pub inline fn puts(s: *const u8) align(2 + 2) c_int;
\\pub fn puts(s: *const u8) align(2 + 2) callconv(.Inline) c_int;
\\pub noinline fn puts(s: *const u8) align(2 + 2) c_int;
\\
);

View File

@ -1558,7 +1558,7 @@ fn renderExpression(
}
if (fn_proto.getExternExportInlineToken()) |extern_export_inline_token| {
if (fn_proto.getIsExternPrototype() == null)
if (fn_proto.getIsExternPrototype() == null and fn_proto.getIsInline() == null)
try renderToken(tree, ais, extern_export_inline_token, Space.Space); // extern/export/inline
}
@ -1664,6 +1664,8 @@ fn renderExpression(
try ais.writer().writeAll("callconv(.C) ");
} else if (fn_proto.getIsAsync() != null) {
try ais.writer().writeAll("callconv(.Async) ");
} else if (fn_proto.getIsInline() != null) {
try ais.writer().writeAll("callconv(.Inline) ");
}
switch (fn_proto.return_type) {

View File

@ -19,11 +19,11 @@ fn setFeature(cpu: *Target.Cpu, feature: Target.x86.Feature, enabled: bool) void
if (enabled) cpu.features.addFeature(idx) else cpu.features.removeFeature(idx);
}
inline fn bit(input: u32, offset: u5) bool {
fn bit(input: u32, offset: u5) callconv(.Inline) bool {
return (input >> offset) & 1 != 0;
}
inline fn hasMask(input: u32, mask: u32) bool {
fn hasMask(input: u32, mask: u32) callconv(.Inline) bool {
return (input & mask) == mask;
}

View File

@ -1087,14 +1087,23 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
if (fn_proto.getSectionExpr()) |sect_expr| {
return self.failNode(&fn_type_scope.base, sect_expr, "TODO implement function section expression", .{});
}
if (fn_proto.getCallconvExpr()) |callconv_expr| {
return self.failNode(
&fn_type_scope.base,
callconv_expr,
"TODO implement function calling convention expression",
.{},
);
}
const enum_literal_type = try astgen.addZIRInstConst(self, &fn_type_scope.base, fn_src, .{
.ty = Type.initTag(.type),
.val = Value.initTag(.enum_literal_type),
});
const enum_literal_type_rl: astgen.ResultLoc = .{ .ty = enum_literal_type };
const cc = if (fn_proto.getCallconvExpr()) |callconv_expr|
try astgen.expr(self, &fn_type_scope.base, enum_literal_type_rl, callconv_expr)
else
try astgen.addZIRInstConst(self, &fn_type_scope.base, fn_src, .{
.ty = Type.initTag(.enum_literal),
.val = try Value.Tag.enum_literal.create(
&fn_type_scope_arena.allocator,
try fn_type_scope_arena.allocator.dupe(u8, "Unspecified"),
),
});
const return_type_expr = switch (fn_proto.return_type) {
.Explicit => |node| node,
.InferErrorSet => |node| return self.failNode(&fn_type_scope.base, node, "TODO implement inferred error sets", .{}),
@ -1105,6 +1114,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
const fn_type_inst = try astgen.addZIRInst(self, &fn_type_scope.base, fn_src, zir.Inst.FnType, .{
.return_type = return_type_inst,
.param_types = param_types,
.cc = cc,
}, .{});
if (std.builtin.mode == .Debug and self.comp.verbose_ir) {
@ -1230,14 +1240,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
};
};
const is_inline = blk: {
if (fn_proto.getExternExportInlineToken()) |maybe_inline_token| {
if (tree.token_ids[maybe_inline_token] == .Keyword_inline) {
break :blk true;
}
}
break :blk false;
};
const is_inline = fn_type.fnCallingConvention() == .Inline;
const anal_state = ([2]Fn.Analysis{ .queued, .inline_only })[@boolToInt(is_inline)];
new_func.* = .{

View File

@ -2366,7 +2366,7 @@ fn allocatedSizeLinkedit(self: *MachO, start: u64) u64 {
return min_pos - start;
}
inline fn checkForCollision(start: u64, end: u64, off: u64, size: u64) ?u64 {
fn checkForCollision(start: u64, end: u64, off: u64, size: u64) callconv(.Inline) ?u64 {
const increased_size = padToIdeal(size);
const test_end = off + increased_size;
if (end > off and start < test_end) {

View File

@ -74,6 +74,7 @@ enum CallingConvention {
CallingConventionC,
CallingConventionNaked,
CallingConventionAsync,
CallingConventionInline,
CallingConventionInterrupt,
CallingConventionSignal,
CallingConventionStdcall,
@ -703,12 +704,6 @@ enum NodeType {
NodeTypeAnyTypeField,
};
enum FnInline {
FnInlineAuto,
FnInlineAlways,
FnInlineNever,
};
struct AstNodeFnProto {
Buf *name;
ZigList<AstNode *> params;
@ -725,13 +720,12 @@ struct AstNodeFnProto {
AstNode *callconv_expr;
Buf doc_comments;
FnInline fn_inline;
VisibMod visib_mod;
bool auto_err_set;
bool is_var_args;
bool is_extern;
bool is_export;
bool is_noinline;
};
struct AstNodeFnDef {
@ -1719,7 +1713,6 @@ struct ZigFn {
LLVMValueRef valgrind_client_request_array;
FnInline fn_inline;
FnAnalState anal_state;
uint32_t align_bytes;
@ -1728,6 +1721,7 @@ struct ZigFn {
bool calls_or_awaits_errorable_fn;
bool is_cold;
bool is_test;
bool is_noinline;
};
uint32_t fn_table_entry_hash(ZigFn*);

View File

@ -973,6 +973,7 @@ const char *calling_convention_name(CallingConvention cc) {
case CallingConventionAPCS: return "APCS";
case CallingConventionAAPCS: return "AAPCS";
case CallingConventionAAPCSVFP: return "AAPCSVFP";
case CallingConventionInline: return "Inline";
}
zig_unreachable();
}
@ -981,6 +982,7 @@ bool calling_convention_allows_zig_types(CallingConvention cc) {
switch (cc) {
case CallingConventionUnspecified:
case CallingConventionAsync:
case CallingConventionInline:
return true;
case CallingConventionC:
case CallingConventionNaked:
@ -1007,7 +1009,8 @@ ZigType *get_stack_trace_type(CodeGen *g) {
}
bool want_first_arg_sret(CodeGen *g, FnTypeId *fn_type_id) {
if (fn_type_id->cc == CallingConventionUnspecified) {
if (fn_type_id->cc == CallingConventionUnspecified
|| fn_type_id->cc == CallingConventionInline) {
return handle_is_ptr(g, fn_type_id->return_type);
}
if (fn_type_id->cc != CallingConventionC) {
@ -1888,6 +1891,7 @@ Error emit_error_unless_callconv_allowed_for_target(CodeGen *g, AstNode *source_
case CallingConventionC:
case CallingConventionNaked:
case CallingConventionAsync:
case CallingConventionInline:
break;
case CallingConventionInterrupt:
if (g->zig_target->arch != ZigLLVM_x86
@ -3587,7 +3591,7 @@ static void get_fully_qualified_decl_name(CodeGen *g, Buf *buf, Tld *tld, bool i
}
}
static ZigFn *create_fn_raw(CodeGen *g, FnInline inline_value) {
static ZigFn *create_fn_raw(CodeGen *g, bool is_noinline) {
ZigFn *fn_entry = heap::c_allocator.create<ZigFn>();
fn_entry->ir_executable = heap::c_allocator.create<IrExecutableSrc>();
@ -3597,7 +3601,7 @@ static ZigFn *create_fn_raw(CodeGen *g, FnInline inline_value) {
fn_entry->analyzed_executable.backward_branch_quota = &fn_entry->prealloc_backward_branch_quota;
fn_entry->analyzed_executable.fn_entry = fn_entry;
fn_entry->ir_executable->fn_entry = fn_entry;
fn_entry->fn_inline = inline_value;
fn_entry->is_noinline = is_noinline;
return fn_entry;
}
@ -3606,7 +3610,7 @@ ZigFn *create_fn(CodeGen *g, AstNode *proto_node) {
assert(proto_node->type == NodeTypeFnProto);
AstNodeFnProto *fn_proto = &proto_node->data.fn_proto;
ZigFn *fn_entry = create_fn_raw(g, fn_proto->fn_inline);
ZigFn *fn_entry = create_fn_raw(g, fn_proto->is_noinline);
fn_entry->proto_node = proto_node;
fn_entry->body_node = (proto_node->data.fn_proto.fn_def_node == nullptr) ? nullptr :
@ -3739,6 +3743,12 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) {
fn_table_entry->type_entry = g->builtin_types.entry_invalid;
tld_fn->base.resolution = TldResolutionInvalid;
return;
case CallingConventionInline:
add_node_error(g, fn_def_node,
buf_sprintf("exported function cannot be inline"));
fn_table_entry->type_entry = g->builtin_types.entry_invalid;
tld_fn->base.resolution = TldResolutionInvalid;
return;
case CallingConventionC:
case CallingConventionNaked:
case CallingConventionInterrupt:
@ -3774,7 +3784,7 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) {
fn_table_entry->inferred_async_node = fn_table_entry->proto_node;
}
} else if (source_node->type == NodeTypeTestDecl) {
ZigFn *fn_table_entry = create_fn_raw(g, FnInlineAuto);
ZigFn *fn_table_entry = create_fn_raw(g, false);
get_fully_qualified_decl_name(g, &fn_table_entry->symbol_name, &tld_fn->base, true);

View File

@ -123,13 +123,8 @@ static const char *export_string(bool is_export) {
// zig_unreachable();
//}
static const char *inline_string(FnInline fn_inline) {
switch (fn_inline) {
case FnInlineAlways: return "inline ";
case FnInlineNever: return "noinline ";
case FnInlineAuto: return "";
}
zig_unreachable();
static const char *inline_string(bool is_inline) {
return is_inline ? "inline" : "";
}
static const char *const_or_var_string(bool is_const) {
@ -446,7 +441,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
const char *pub_str = visib_mod_string(node->data.fn_proto.visib_mod);
const char *extern_str = extern_string(node->data.fn_proto.is_extern);
const char *export_str = export_string(node->data.fn_proto.is_export);
const char *inline_str = inline_string(node->data.fn_proto.fn_inline);
const char *inline_str = inline_string(node->data.fn_proto.is_noinline);
fprintf(ar->f, "%s%s%s%sfn ", pub_str, inline_str, export_str, extern_str);
if (node->data.fn_proto.name != nullptr) {
print_symbol(ar, node->data.fn_proto.name);

View File

@ -159,6 +159,7 @@ static const char *get_mangled_name(CodeGen *g, const char *original_name) {
static ZigLLVM_CallingConv get_llvm_cc(CodeGen *g, CallingConvention cc) {
switch (cc) {
case CallingConventionUnspecified:
case CallingConventionInline:
return ZigLLVM_Fast;
case CallingConventionC:
return ZigLLVM_C;
@ -350,6 +351,7 @@ static bool cc_want_sret_attr(CallingConvention cc) {
return true;
case CallingConventionAsync:
case CallingConventionUnspecified:
case CallingConventionInline:
return false;
}
zig_unreachable();
@ -452,20 +454,11 @@ static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) {
}
}
switch (fn->fn_inline) {
case FnInlineAlways:
addLLVMFnAttr(llvm_fn, "alwaysinline");
g->inline_fns.append(fn);
break;
case FnInlineNever:
addLLVMFnAttr(llvm_fn, "noinline");
break;
case FnInlineAuto:
if (fn->alignstack_value != 0) {
addLLVMFnAttr(llvm_fn, "noinline");
}
break;
}
if (cc == CallingConventionInline)
addLLVMFnAttr(llvm_fn, "alwaysinline");
if (fn->is_noinline || (cc != CallingConventionInline && fn->alignstack_value != 0))
addLLVMFnAttr(llvm_fn, "noinline");
if (cc == CallingConventionNaked) {
addLLVMFnAttr(llvm_fn, "naked");
@ -532,7 +525,7 @@ static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) {
addLLVMFnAttr(llvm_fn, "nounwind");
add_uwtable_attr(g, llvm_fn);
addLLVMFnAttr(llvm_fn, "nobuiltin");
if (codegen_have_frame_pointer(g) && fn->fn_inline != FnInlineAlways) {
if (codegen_have_frame_pointer(g) && cc != CallingConventionInline) {
ZigLLVMAddFunctionAttr(llvm_fn, "frame-pointer", "all");
}
if (fn->section_name) {
@ -9043,19 +9036,16 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
static_assert(CallingConventionC == 1, "");
static_assert(CallingConventionNaked == 2, "");
static_assert(CallingConventionAsync == 3, "");
static_assert(CallingConventionInterrupt == 4, "");
static_assert(CallingConventionSignal == 5, "");
static_assert(CallingConventionStdcall == 6, "");
static_assert(CallingConventionFastcall == 7, "");
static_assert(CallingConventionVectorcall == 8, "");
static_assert(CallingConventionThiscall == 9, "");
static_assert(CallingConventionAPCS == 10, "");
static_assert(CallingConventionAAPCS == 11, "");
static_assert(CallingConventionAAPCSVFP == 12, "");
static_assert(FnInlineAuto == 0, "");
static_assert(FnInlineAlways == 1, "");
static_assert(FnInlineNever == 2, "");
static_assert(CallingConventionInline == 4, "");
static_assert(CallingConventionInterrupt == 5, "");
static_assert(CallingConventionSignal == 6, "");
static_assert(CallingConventionStdcall == 7, "");
static_assert(CallingConventionFastcall == 8, "");
static_assert(CallingConventionVectorcall == 9, "");
static_assert(CallingConventionThiscall == 10, "");
static_assert(CallingConventionAPCS == 11, "");
static_assert(CallingConventionAAPCS == 12, "");
static_assert(CallingConventionAAPCSVFP == 13, "");
static_assert(BuiltinPtrSizeOne == 0, "");
static_assert(BuiltinPtrSizeMany == 1, "");

View File

@ -19000,7 +19000,7 @@ static IrInstGen *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstSrcDeclV
} else if (init_val->type->id == ZigTypeIdFn &&
init_val->special != ConstValSpecialUndef &&
init_val->data.x_ptr.special == ConstPtrSpecialFunction &&
init_val->data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways)
init_val->data.x_ptr.data.fn.fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionInline)
{
var_class_requires_const = true;
if (!var->src_is_const && !is_comptime_var) {
@ -19182,6 +19182,11 @@ static IrInstGen *ir_analyze_instruction_export(IrAnalyze *ira, IrInstSrcExport
buf_sprintf("exported function cannot be async"));
add_error_note(ira->codegen, msg, fn_entry->proto_node, buf_sprintf("declared here"));
} break;
case CallingConventionInline: {
ErrorMsg *msg = ir_add_error(ira, &target->base,
buf_sprintf("exported function cannot be inline"));
add_error_note(ira->codegen, msg, fn_entry->proto_node, buf_sprintf("declared here"));
} break;
case CallingConventionC:
case CallingConventionNaked:
case CallingConventionInterrupt:
@ -21120,7 +21125,7 @@ static IrInstGen *ir_analyze_fn_call(IrAnalyze *ira, IrInst* source_instr,
if (type_is_invalid(return_type))
return ira->codegen->invalid_inst_gen;
if (fn_entry != nullptr && fn_entry->fn_inline == FnInlineAlways && modifier == CallModifierNeverInline) {
if (fn_entry != nullptr && fn_type_id->cc == CallingConventionInline && modifier == CallModifierNeverInline) {
ir_add_error(ira, source_instr,
buf_sprintf("no-inline call of inline function"));
return ira->codegen->invalid_inst_gen;
@ -25219,10 +25224,6 @@ static Error ir_make_type_info_decls(IrAnalyze *ira, IrInst* source_instr, ZigVa
if ((err = type_resolve(ira->codegen, type_info_fn_decl_type, ResolveStatusSizeKnown)))
return err;
ZigType *type_info_fn_decl_inline_type = ir_type_info_get_type(ira, "Inline", type_info_fn_decl_type);
if ((err = type_resolve(ira->codegen, type_info_fn_decl_inline_type, ResolveStatusSizeKnown)))
return err;
resolve_container_usingnamespace_decls(ira->codegen, decls_scope);
// The unresolved declarations are collected in a separate queue to avoid
@ -25365,11 +25366,11 @@ static Error ir_make_type_info_decls(IrAnalyze *ira, IrInst* source_instr, ZigVa
fn_decl_fields[0]->special = ConstValSpecialStatic;
fn_decl_fields[0]->type = ira->codegen->builtin_types.entry_type;
fn_decl_fields[0]->data.x_type = fn_entry->type_entry;
// inline_type: Data.FnDecl.Inline
ensure_field_index(fn_decl_val->type, "inline_type", 1);
// is_noinline: bool
ensure_field_index(fn_decl_val->type, "is_noinline", 1);
fn_decl_fields[1]->special = ConstValSpecialStatic;
fn_decl_fields[1]->type = type_info_fn_decl_inline_type;
bigint_init_unsigned(&fn_decl_fields[1]->data.x_enum_tag, fn_entry->fn_inline);
fn_decl_fields[1]->type = ira->codegen->builtin_types.entry_bool;
fn_decl_fields[1]->data.x_bool = fn_entry->is_noinline;
// is_var_args: bool
ensure_field_index(fn_decl_val->type, "is_var_args", 2);
bool is_varargs = fn_node->is_var_args;
@ -30957,7 +30958,7 @@ static IrInstGen *ir_analyze_instruction_set_align_stack(IrAnalyze *ira, IrInstS
return ira->codegen->invalid_inst_gen;
}
if (fn_entry->fn_inline == FnInlineAlways) {
if (fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionInline) {
ir_add_error(ira, &instruction->base.base, buf_sprintf("@setAlignStack in inline function"));
return ira->codegen->invalid_inst_gen;
}

View File

@ -693,8 +693,6 @@ static AstNode *ast_parse_top_level_decl(ParseContext *pc, VisibMod visib_mod, B
Token *first = eat_token_if(pc, TokenIdKeywordExport);
if (first == nullptr)
first = eat_token_if(pc, TokenIdKeywordExtern);
if (first == nullptr)
first = eat_token_if(pc, TokenIdKeywordInline);
if (first == nullptr)
first = eat_token_if(pc, TokenIdKeywordNoInline);
if (first != nullptr) {
@ -702,7 +700,7 @@ static AstNode *ast_parse_top_level_decl(ParseContext *pc, VisibMod visib_mod, B
if (first->id == TokenIdKeywordExtern)
lib_name = eat_token_if(pc, TokenIdStringLiteral);
if (first->id != TokenIdKeywordInline && first->id != TokenIdKeywordNoInline) {
if (first->id != TokenIdKeywordNoInline) {
Token *thread_local_kw = eat_token_if(pc, TokenIdKeywordThreadLocal);
AstNode *var_decl = ast_parse_var_decl(pc);
if (var_decl != nullptr) {
@ -739,17 +737,8 @@ static AstNode *ast_parse_top_level_decl(ParseContext *pc, VisibMod visib_mod, B
if (!fn_proto->data.fn_proto.is_extern)
fn_proto->data.fn_proto.is_extern = first->id == TokenIdKeywordExtern;
fn_proto->data.fn_proto.is_export = first->id == TokenIdKeywordExport;
switch (first->id) {
case TokenIdKeywordInline:
fn_proto->data.fn_proto.fn_inline = FnInlineAlways;
break;
case TokenIdKeywordNoInline:
fn_proto->data.fn_proto.fn_inline = FnInlineNever;
break;
default:
fn_proto->data.fn_proto.fn_inline = FnInlineAuto;
break;
}
if (first->id == TokenIdKeywordNoInline)
fn_proto->data.fn_proto.is_noinline = true;
fn_proto->data.fn_proto.lib_name = token_buf(lib_name);
AstNode *res = fn_proto;

View File

@ -31,7 +31,7 @@ pub const Ctx = if (enable) ___tracy_c_zone_context else struct {
pub fn end(self: Ctx) void {}
};
pub inline fn trace(comptime src: std.builtin.SourceLocation) Ctx {
pub fn trace(comptime src: std.builtin.SourceLocation) callconv(.Inline) Ctx {
if (!enable) return .{};
const loc: ___tracy_source_location_data = .{

View File

@ -4716,7 +4716,6 @@ fn transCreateNodeMacroFn(c: *Context, name: []const u8, ref: *ast.Node, proto_a
const scope = &c.global_scope.base;
const pub_tok = try appendToken(c, .Keyword_pub, "pub");
const inline_tok = try appendToken(c, .Keyword_inline, "inline");
const fn_tok = try appendToken(c, .Keyword_fn, "fn");
const name_tok = try appendIdentifier(c, name);
_ = try appendToken(c, .LParen, "(");
@ -4744,6 +4743,11 @@ fn transCreateNodeMacroFn(c: *Context, name: []const u8, ref: *ast.Node, proto_a
_ = try appendToken(c, .RParen, ")");
_ = try appendToken(c, .Keyword_callconv, "callconv");
_ = try appendToken(c, .LParen, "(");
const callconv_expr = try transCreateNodeEnumLiteral(c, "Inline");
_ = try appendToken(c, .RParen, ")");
const block_lbrace = try appendToken(c, .LBrace, "{");
const return_kw = try appendToken(c, .Keyword_return, "return");
@ -4783,8 +4787,8 @@ fn transCreateNodeMacroFn(c: *Context, name: []const u8, ref: *ast.Node, proto_a
}, .{
.visib_token = pub_tok,
.name_token = name_tok,
.extern_export_inline_token = inline_tok,
.body_node = &block.base,
.callconv_expr = callconv_expr,
});
mem.copy(ast.Node.FnProto.ParamDecl, fn_proto.params(), fn_params.items);
return &fn_proto.base;
@ -5734,7 +5738,6 @@ fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void {
const scope = &block_scope.base;
const pub_tok = try appendToken(c, .Keyword_pub, "pub");
const inline_tok = try appendToken(c, .Keyword_inline, "inline");
const fn_tok = try appendToken(c, .Keyword_fn, "fn");
const name_tok = try appendIdentifier(c, m.name);
_ = try appendToken(c, .LParen, "(");
@ -5779,6 +5782,11 @@ fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void {
_ = try appendToken(c, .RParen, ")");
_ = try appendToken(c, .Keyword_callconv, "callconv");
_ = try appendToken(c, .LParen, "(");
const callconv_expr = try transCreateNodeEnumLiteral(c, "Inline");
_ = try appendToken(c, .RParen, ")");
const type_of = try c.createBuiltinCall("@TypeOf", 1);
const return_kw = try appendToken(c, .Keyword_return, "return");
@ -5810,9 +5818,9 @@ fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void {
.return_type = .{ .Explicit = &type_of.base },
}, .{
.visib_token = pub_tok,
.extern_export_inline_token = inline_tok,
.name_token = name_tok,
.body_node = block_node,
.callconv_expr = callconv_expr,
});
mem.copy(ast.Node.FnProto.ParamDecl, fn_proto.params(), fn_params.items);

View File

@ -552,7 +552,9 @@ pub const Type = extern union {
if (i != 0) try out_stream.writeAll(", ");
try param_type.format("", .{}, out_stream);
}
try out_stream.writeAll(") ");
try out_stream.writeAll(") callconv(.");
try out_stream.writeAll(@tagName(payload.cc));
try out_stream.writeAll(")");
ty = payload.return_type;
continue;
},

View File

@ -863,9 +863,7 @@ pub const Inst = struct {
fn_type: *Inst,
body: Body,
},
kw_args: struct {
is_inline: bool = false,
},
kw_args: struct {},
};
pub const FnType = struct {
@ -875,10 +873,9 @@ pub const Inst = struct {
positionals: struct {
param_types: []*Inst,
return_type: *Inst,
cc: *Inst,
},
kw_args: struct {
cc: std.builtin.CallingConvention = .Unspecified,
},
kw_args: struct {},
};
pub const IntType = struct {

View File

@ -980,18 +980,8 @@ fn zirCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError!*Inst {
const b = try mod.requireFunctionBlock(scope, inst.base.src);
const is_comptime_call = b.is_comptime or inst.kw_args.modifier == .compile_time;
const is_inline_call = is_comptime_call or inst.kw_args.modifier == .always_inline or blk: {
// This logic will get simplified by
// https://github.com/ziglang/zig/issues/6429
if (try mod.resolveDefinedValue(scope, func)) |func_val| {
const module_fn = switch (func_val.tag()) {
.function => func_val.castTag(.function).?.data,
else => break :blk false,
};
break :blk module_fn.state == .inline_only;
}
break :blk false;
};
const is_inline_call = is_comptime_call or inst.kw_args.modifier == .always_inline or
func.ty.fnCallingConvention() == .Inline;
if (is_inline_call) {
const func_val = try mod.resolveConstValue(scope, func);
const module_fn = switch (func_val.tag()) {
@ -1075,7 +1065,7 @@ fn zirFn(mod: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError!*Inst {
const fn_type = try resolveType(mod, scope, fn_inst.positionals.fn_type);
const new_func = try scope.arena().create(Module.Fn);
new_func.* = .{
.state = if (fn_inst.kw_args.is_inline) .inline_only else .queued,
.state = if (fn_type.fnCallingConvention() == .Inline) .inline_only else .queued,
.zir = fn_inst.positionals.body,
.body = undefined,
.owner_decl = scope.ownerDecl().?,
@ -1305,22 +1295,26 @@ fn zirFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) InnerError!*
const tracy = trace(@src());
defer tracy.end();
const return_type = try resolveType(mod, scope, fntype.positionals.return_type);
const cc_tv = try resolveInstConst(mod, scope, fntype.positionals.cc);
const cc_str = cc_tv.val.castTag(.enum_literal).?.data;
const cc = std.meta.stringToEnum(std.builtin.CallingConvention, cc_str) orelse
return mod.fail(scope, fntype.positionals.cc.src, "Unknown calling convention {s}", .{cc_str});
// Hot path for some common function types.
if (fntype.positionals.param_types.len == 0) {
if (return_type.zigTypeTag() == .NoReturn and fntype.kw_args.cc == .Unspecified) {
if (return_type.zigTypeTag() == .NoReturn and cc == .Unspecified) {
return mod.constType(scope, fntype.base.src, Type.initTag(.fn_noreturn_no_args));
}
if (return_type.zigTypeTag() == .Void and fntype.kw_args.cc == .Unspecified) {
if (return_type.zigTypeTag() == .Void and cc == .Unspecified) {
return mod.constType(scope, fntype.base.src, Type.initTag(.fn_void_no_args));
}
if (return_type.zigTypeTag() == .NoReturn and fntype.kw_args.cc == .Naked) {
if (return_type.zigTypeTag() == .NoReturn and cc == .Naked) {
return mod.constType(scope, fntype.base.src, Type.initTag(.fn_naked_noreturn_no_args));
}
if (return_type.zigTypeTag() == .Void and fntype.kw_args.cc == .C) {
if (return_type.zigTypeTag() == .Void and cc == .C) {
return mod.constType(scope, fntype.base.src, Type.initTag(.fn_ccc_void_no_args));
}
}
@ -1337,9 +1331,9 @@ fn zirFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) InnerError!*
}
const fn_ty = try Type.Tag.function.create(arena, .{
.cc = fntype.kw_args.cc,
.return_type = return_type,
.param_types = param_types,
.return_type = return_type,
.cc = cc,
});
return mod.constType(scope, fntype.base.src, fn_ty);
}

View File

@ -113,7 +113,7 @@ fn testGodboltApi(zig_exe: []const u8, dir_path: []const u8) anyerror!void {
\\ return num * num;
\\}
\\extern fn zig_panic() noreturn;
\\pub inline fn panic(msg: []const u8, error_return_trace: ?*@import("builtin").StackTrace) noreturn {
\\pub fn panic(msg: []const u8, error_return_trace: ?*@import("builtin").StackTrace) noreturn {
\\ zig_panic();
\\}
);

View File

@ -1648,7 +1648,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ @call(.{ .modifier = .compile_time }, baz, .{});
\\}
\\fn foo() void {}
\\inline fn bar() void {}
\\fn bar() callconv(.Inline) void {}
\\fn baz1() void {}
\\fn baz2() void {}
, &[_][]const u8{
@ -3944,7 +3944,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\export fn entry() void {
\\ var a = b;
\\}
\\inline fn b() void { }
\\fn b() callconv(.Inline) void { }
, &[_][]const u8{
"tmp.zig:2:5: error: functions marked inline must be stored in const or comptime var",
"tmp.zig:4:1: note: declared here",
@ -6782,11 +6782,11 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
// \\export fn foo() void {
// \\ bar();
// \\}
// \\inline fn bar() void {
// \\fn bar() callconv(.Inline) void {
// \\ baz();
// \\ quux();
// \\}
// \\inline fn baz() void {
// \\fn baz() callconv(.Inline) void {
// \\ bar();
// \\ quux();
// \\}
@ -6799,7 +6799,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
// \\export fn foo() void {
// \\ quux(@ptrToInt(bar));
// \\}
// \\inline fn bar() void { }
// \\fn bar() callconv(.Inline) void { }
// \\extern fn quux(usize) void;
//, &[_][]const u8{
// "tmp.zig:4:1: error: unable to inline function",
@ -7207,7 +7207,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\export fn entry() void {
\\ foo();
\\}
\\inline fn foo() void {
\\fn foo() callconv(.Inline) void {
\\ @setAlignStack(16);
\\}
, &[_][]const u8{

View File

@ -113,7 +113,7 @@ test "assign inline fn to const variable" {
a();
}
inline fn inlineFn() void {}
fn inlineFn() callconv(.Inline) void {}
test "pass by non-copying value" {
expect(addPointCoords(Point{ .x = 1, .y = 2 }) == 3);

View File

@ -179,7 +179,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ return y - 1;
\\}
\\
\\inline fn rec(n: usize) usize {
\\fn rec(n: usize) callconv(.Inline) usize {
\\ if (n <= 1) return n;
\\ return rec(n - 1);
\\}

View File

@ -255,7 +255,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ exit(y - 6);
\\}
\\
\\inline fn add(a: usize, b: usize, c: usize) usize {
\\fn add(a: usize, b: usize, c: usize) callconv(.Inline) usize {
\\ return a + b + c;
\\}
\\
@ -1228,7 +1228,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ exit(y - 6);
\\}
\\
\\inline fn add(a: usize, b: usize, c: usize) usize {
\\fn add(a: usize, b: usize, c: usize) callconv(.Inline) usize {
\\ if (a == 10) @compileError("bad");
\\ return a + b + c;
\\}
@ -1251,7 +1251,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ exit(y - 6);
\\}
\\
\\inline fn add(a: usize, b: usize, c: usize) usize {
\\fn add(a: usize, b: usize, c: usize) callconv(.Inline) usize {
\\ if (a == 10) @compileError("bad");
\\ return a + b + c;
\\}
@ -1277,7 +1277,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ exit(y - 21);
\\}
\\
\\inline fn fibonacci(n: usize) usize {
\\fn fibonacci(n: usize) callconv(.Inline) usize {
\\ if (n <= 2) return n;
\\ return fibonacci(n - 2) + fibonacci(n - 1);
\\}
@ -1300,7 +1300,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ exit(y - 21);
\\}
\\
\\inline fn fibonacci(n: usize) usize {
\\fn fibonacci(n: usize) callconv(.Inline) usize {
\\ if (n <= 2) return n;
\\ return fibonacci(n - 2) + fibonacci(n - 1);
\\}

View File

@ -43,7 +43,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
,
\\pub const VALUE = ((((1 + (2 * 3)) + (4 * 5)) + 6) << 7) | @boolToInt(8 == 9);
,
\\pub inline fn _AL_READ3BYTES(p: anytype) @TypeOf(((@import("std").meta.cast([*c]u8, p)).* | (((@import("std").meta.cast([*c]u8, p)) + 1).* << 8)) | (((@import("std").meta.cast([*c]u8, p)) + 2).* << 16)) {
\\pub fn _AL_READ3BYTES(p: anytype) callconv(.Inline) @TypeOf(((@import("std").meta.cast([*c]u8, p)).* | (((@import("std").meta.cast([*c]u8, p)) + 1).* << 8)) | (((@import("std").meta.cast([*c]u8, p)) + 2).* << 16)) {
\\ return ((@import("std").meta.cast([*c]u8, p)).* | (((@import("std").meta.cast([*c]u8, p)) + 1).* << 8)) | (((@import("std").meta.cast([*c]u8, p)) + 2).* << 16);
\\}
});
@ -116,7 +116,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\};
\\pub const Color = struct_Color;
,
\\pub inline fn CLITERAL(type_1: anytype) @TypeOf(type_1) {
\\pub fn CLITERAL(type_1: anytype) callconv(.Inline) @TypeOf(type_1) {
\\ return type_1;
\\}
,
@ -148,7 +148,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
cases.add("correct semicolon after infixop",
\\#define __ferror_unlocked_body(_fp) (((_fp)->_flags & _IO_ERR_SEEN) != 0)
, &[_][]const u8{
\\pub inline fn __ferror_unlocked_body(_fp: anytype) @TypeOf(((_fp.*._flags) & _IO_ERR_SEEN) != 0) {
\\pub fn __ferror_unlocked_body(_fp: anytype) callconv(.Inline) @TypeOf(((_fp.*._flags) & _IO_ERR_SEEN) != 0) {
\\ return ((_fp.*._flags) & _IO_ERR_SEEN) != 0;
\\}
});
@ -157,7 +157,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\#define FOO(x) ((x >= 0) + (x >= 0))
\\#define BAR 1 && 2 > 4
, &[_][]const u8{
\\pub inline fn FOO(x: anytype) @TypeOf(@boolToInt(x >= 0) + @boolToInt(x >= 0)) {
\\pub fn FOO(x: anytype) callconv(.Inline) @TypeOf(@boolToInt(x >= 0) + @boolToInt(x >= 0)) {
\\ return @boolToInt(x >= 0) + @boolToInt(x >= 0);
\\}
,
@ -208,7 +208,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ break :blk bar;
\\};
,
\\pub inline fn bar(x: anytype) @TypeOf(baz(1, 2)) {
\\pub fn bar(x: anytype) callconv(.Inline) @TypeOf(baz(1, 2)) {
\\ return blk: {
\\ _ = &x;
\\ _ = 3;
@ -1590,13 +1590,13 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
, &[_][]const u8{
\\pub extern var fn_ptr: ?fn () callconv(.C) void;
,
\\pub inline fn foo() void {
\\pub fn foo() callconv(.Inline) void {
\\ return fn_ptr.?();
\\}
,
\\pub extern var fn_ptr2: ?fn (c_int, f32) callconv(.C) u8;
,
\\pub inline fn bar(arg_1: c_int, arg_2: f32) u8 {
\\pub fn bar(arg_1: c_int, arg_2: f32) callconv(.Inline) u8 {
\\ return fn_ptr2.?(arg_1, arg_2);
\\}
});
@ -1629,7 +1629,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
,
\\pub const glClearPFN = PFNGLCLEARPROC;
,
\\pub inline fn glClearUnion(arg_2: GLbitfield) void {
\\pub fn glClearUnion(arg_2: GLbitfield) callconv(.Inline) void {
\\ return glProcs.gl.Clear.?(arg_2);
\\}
,
@ -1650,15 +1650,15 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
, &[_][]const u8{
\\pub extern var c: c_int;
,
\\pub inline fn BASIC(c_1: anytype) @TypeOf(c_1 * 2) {
\\pub fn BASIC(c_1: anytype) callconv(.Inline) @TypeOf(c_1 * 2) {
\\ return c_1 * 2;
\\}
,
\\pub inline fn FOO(L: anytype, b: anytype) @TypeOf(L + b) {
\\pub fn FOO(L: anytype, b: anytype) callconv(.Inline) @TypeOf(L + b) {
\\ return L + b;
\\}
,
\\pub inline fn BAR() @TypeOf(c * c) {
\\pub fn BAR() callconv(.Inline) @TypeOf(c * c) {
\\ return c * c;
\\}
});
@ -2310,7 +2310,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
cases.add("macro call",
\\#define CALL(arg) bar(arg)
, &[_][]const u8{
\\pub inline fn CALL(arg: anytype) @TypeOf(bar(arg)) {
\\pub fn CALL(arg: anytype) callconv(.Inline) @TypeOf(bar(arg)) {
\\ return bar(arg);
\\}
});
@ -2872,7 +2872,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\#define BAR (void*) a
\\#define BAZ (uint32_t)(2)
, &[_][]const u8{
\\pub inline fn FOO(bar: anytype) @TypeOf(baz((@import("std").meta.cast(?*c_void, baz)))) {
\\pub fn FOO(bar: anytype) callconv(.Inline) @TypeOf(baz((@import("std").meta.cast(?*c_void, baz)))) {
\\ return baz((@import("std").meta.cast(?*c_void, baz)));
\\}
,
@ -2914,11 +2914,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\#define MIN(a, b) ((b) < (a) ? (b) : (a))
\\#define MAX(a, b) ((b) > (a) ? (b) : (a))
, &[_][]const u8{
\\pub inline fn MIN(a: anytype, b: anytype) @TypeOf(if (b < a) b else a) {
\\pub fn MIN(a: anytype, b: anytype) callconv(.Inline) @TypeOf(if (b < a) b else a) {
\\ return if (b < a) b else a;
\\}
,
\\pub inline fn MAX(a: anytype, b: anytype) @TypeOf(if (b > a) b else a) {
\\pub fn MAX(a: anytype, b: anytype) callconv(.Inline) @TypeOf(if (b > a) b else a) {
\\ return if (b > a) b else a;
\\}
});
@ -3106,7 +3106,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\#define DefaultScreen(dpy) (((_XPrivDisplay)(dpy))->default_screen)
\\
, &[_][]const u8{
\\pub inline fn DefaultScreen(dpy: anytype) @TypeOf((@import("std").meta.cast(_XPrivDisplay, dpy)).*.default_screen) {
\\pub fn DefaultScreen(dpy: anytype) callconv(.Inline) @TypeOf((@import("std").meta.cast(_XPrivDisplay, dpy)).*.default_screen) {
\\ return (@import("std").meta.cast(_XPrivDisplay, dpy)).*.default_screen;
\\}
});