Remove uses of deprecated callconv aliases

This commit is contained in:
Linus Groh 2025-03-03 18:01:47 +00:00
parent 05937b362a
commit 79460d4a3e
251 changed files with 826 additions and 822 deletions

View File

@ -2,6 +2,6 @@ comptime {
@export(&internalName, .{ .name = "foo", .linkage = .strong });
}
fn internalName() callconv(.C) void {}
fn internalName() callconv(.c) void {}
// obj

View File

@ -2,7 +2,7 @@ const std = @import("std");
const testing = std.testing;
const builtin = @import("builtin");
fn add(count: c_int, ...) callconv(.C) c_int {
fn add(count: c_int, ...) callconv(.c) c_int {
var ap = @cVaStart();
defer @cVaEnd(&ap);
var i: usize = 0;

View File

@ -35,7 +35,7 @@ fn abort() noreturn {
// The naked calling convention makes a function not have any function prologue or epilogue.
// This can be useful when integrating with assembly.
fn _start() callconv(.Naked) noreturn {
fn _start() callconv(.naked) noreturn {
abort();
}

View File

@ -2,7 +2,7 @@ const Derp = opaque {};
const Wat = opaque {};
extern fn bar(d: *Derp) void;
fn foo(w: *Wat) callconv(.C) void {
fn foo(w: *Wat) callconv(.c) void {
bar(w);
}

View File

@ -54,11 +54,11 @@ pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace, _: ?
}
extern fn main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
fn wasm_start() callconv(.C) void {
fn wasm_start() callconv(.c) void {
_ = main(0, undefined);
}
fn strcpy(dest: [*:0]u8, src: [*:0]const u8) callconv(.C) [*:0]u8 {
fn strcpy(dest: [*:0]u8, src: [*:0]const u8) callconv(.c) [*:0]u8 {
var i: usize = 0;
while (src[i] != 0) : (i += 1) {
dest[i] = src[i];
@ -76,7 +76,7 @@ test "strcpy" {
try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.sliceTo(&s1, 0));
}
fn strncpy(dest: [*:0]u8, src: [*:0]const u8, n: usize) callconv(.C) [*:0]u8 {
fn strncpy(dest: [*:0]u8, src: [*:0]const u8, n: usize) callconv(.c) [*:0]u8 {
var i: usize = 0;
while (i < n and src[i] != 0) : (i += 1) {
dest[i] = src[i];
@ -96,7 +96,7 @@ test "strncpy" {
try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.sliceTo(&s1, 0));
}
fn strcat(dest: [*:0]u8, src: [*:0]const u8) callconv(.C) [*:0]u8 {
fn strcat(dest: [*:0]u8, src: [*:0]const u8) callconv(.c) [*:0]u8 {
var dest_end: usize = 0;
while (dest[dest_end] != 0) : (dest_end += 1) {}
@ -119,7 +119,7 @@ test "strcat" {
try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.sliceTo(&s1, 0));
}
fn strncat(dest: [*:0]u8, src: [*:0]const u8, avail: usize) callconv(.C) [*:0]u8 {
fn strncat(dest: [*:0]u8, src: [*:0]const u8, avail: usize) callconv(.c) [*:0]u8 {
var dest_end: usize = 0;
while (dest[dest_end] != 0) : (dest_end += 1) {}
@ -142,7 +142,7 @@ test "strncat" {
try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.sliceTo(&s1, 0));
}
fn strcmp(s1: [*:0]const u8, s2: [*:0]const u8) callconv(.C) c_int {
fn strcmp(s1: [*:0]const u8, s2: [*:0]const u8) callconv(.c) c_int {
return switch (std.mem.orderZ(u8, s1, s2)) {
.lt => -1,
.eq => 0,
@ -150,11 +150,11 @@ fn strcmp(s1: [*:0]const u8, s2: [*:0]const u8) callconv(.C) c_int {
};
}
fn strlen(s: [*:0]const u8) callconv(.C) usize {
fn strlen(s: [*:0]const u8) callconv(.c) usize {
return std.mem.len(s);
}
fn strncmp(_l: [*:0]const u8, _r: [*:0]const u8, _n: usize) callconv(.C) c_int {
fn strncmp(_l: [*:0]const u8, _r: [*:0]const u8, _n: usize) callconv(.c) c_int {
if (_n == 0) return 0;
var l = _l;
var r = _r;
@ -167,7 +167,7 @@ fn strncmp(_l: [*:0]const u8, _r: [*:0]const u8, _n: usize) callconv(.C) c_int {
return @as(c_int, l[0]) - @as(c_int, r[0]);
}
fn strerror(errnum: c_int) callconv(.C) [*:0]const u8 {
fn strerror(errnum: c_int) callconv(.c) [*:0]const u8 {
_ = errnum;
return "TODO strerror implementation";
}

View File

@ -350,7 +350,7 @@ var is_fuzz_test: bool = undefined;
extern fn fuzzer_set_name(name_ptr: [*]const u8, name_len: usize) void;
extern fn fuzzer_init(cache_dir: FuzzerSlice) void;
extern fn fuzzer_init_corpus_elem(input_ptr: [*]const u8, input_len: usize) void;
extern fn fuzzer_start(testOne: *const fn ([*]const u8, usize) callconv(.C) void) void;
extern fn fuzzer_start(testOne: *const fn ([*]const u8, usize) callconv(.c) void) void;
extern fn fuzzer_coverage_id() u64;
pub fn fuzz(
@ -382,7 +382,7 @@ pub fn fuzz(
const global = struct {
var ctx: @TypeOf(context) = undefined;
fn fuzzer_one(input_ptr: [*]const u8, input_len: usize) callconv(.C) void {
fn fuzzer_one(input_ptr: [*]const u8, input_len: usize) callconv(.c) void {
@disableInstrumentation();
testing.allocator_instance = .{};
defer if (testing.allocator_instance.deinit() == .leak) std.process.exit(1);

View File

@ -10,7 +10,7 @@ const always_has_lse = std.Target.aarch64.featureSetHas(builtin.cpu.features, .l
/// which ARM is concerned would have too much overhead.
var __aarch64_have_lse_atomics: u8 = @intFromBool(always_has_lse);
fn __aarch64_cas1_relax() align(16) callconv(.Naked) void {
fn __aarch64_cas1_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -32,7 +32,7 @@ fn __aarch64_cas1_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp1_relax() align(16) callconv(.Naked) void {
fn __aarch64_swp1_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -52,7 +52,7 @@ fn __aarch64_swp1_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd1_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldadd1_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -73,7 +73,7 @@ fn __aarch64_ldadd1_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr1_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldclr1_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -94,7 +94,7 @@ fn __aarch64_ldclr1_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor1_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldeor1_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -115,7 +115,7 @@ fn __aarch64_ldeor1_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset1_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldset1_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -136,7 +136,7 @@ fn __aarch64_ldset1_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas1_acq() align(16) callconv(.Naked) void {
fn __aarch64_cas1_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -158,7 +158,7 @@ fn __aarch64_cas1_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp1_acq() align(16) callconv(.Naked) void {
fn __aarch64_swp1_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -178,7 +178,7 @@ fn __aarch64_swp1_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd1_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldadd1_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -199,7 +199,7 @@ fn __aarch64_ldadd1_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr1_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldclr1_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -220,7 +220,7 @@ fn __aarch64_ldclr1_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor1_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldeor1_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -241,7 +241,7 @@ fn __aarch64_ldeor1_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset1_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldset1_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -262,7 +262,7 @@ fn __aarch64_ldset1_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas1_rel() align(16) callconv(.Naked) void {
fn __aarch64_cas1_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -284,7 +284,7 @@ fn __aarch64_cas1_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp1_rel() align(16) callconv(.Naked) void {
fn __aarch64_swp1_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -304,7 +304,7 @@ fn __aarch64_swp1_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd1_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldadd1_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -325,7 +325,7 @@ fn __aarch64_ldadd1_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr1_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldclr1_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -346,7 +346,7 @@ fn __aarch64_ldclr1_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor1_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldeor1_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -367,7 +367,7 @@ fn __aarch64_ldeor1_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset1_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldset1_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -388,7 +388,7 @@ fn __aarch64_ldset1_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas1_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_cas1_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -410,7 +410,7 @@ fn __aarch64_cas1_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp1_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_swp1_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -430,7 +430,7 @@ fn __aarch64_swp1_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd1_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldadd1_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -451,7 +451,7 @@ fn __aarch64_ldadd1_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr1_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldclr1_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -472,7 +472,7 @@ fn __aarch64_ldclr1_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor1_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldeor1_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -493,7 +493,7 @@ fn __aarch64_ldeor1_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset1_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldset1_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -514,7 +514,7 @@ fn __aarch64_ldset1_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas2_relax() align(16) callconv(.Naked) void {
fn __aarch64_cas2_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -536,7 +536,7 @@ fn __aarch64_cas2_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp2_relax() align(16) callconv(.Naked) void {
fn __aarch64_swp2_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -556,7 +556,7 @@ fn __aarch64_swp2_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd2_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldadd2_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -577,7 +577,7 @@ fn __aarch64_ldadd2_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr2_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldclr2_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -598,7 +598,7 @@ fn __aarch64_ldclr2_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor2_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldeor2_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -619,7 +619,7 @@ fn __aarch64_ldeor2_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset2_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldset2_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -640,7 +640,7 @@ fn __aarch64_ldset2_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas2_acq() align(16) callconv(.Naked) void {
fn __aarch64_cas2_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -662,7 +662,7 @@ fn __aarch64_cas2_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp2_acq() align(16) callconv(.Naked) void {
fn __aarch64_swp2_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -682,7 +682,7 @@ fn __aarch64_swp2_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd2_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldadd2_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -703,7 +703,7 @@ fn __aarch64_ldadd2_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr2_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldclr2_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -724,7 +724,7 @@ fn __aarch64_ldclr2_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor2_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldeor2_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -745,7 +745,7 @@ fn __aarch64_ldeor2_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset2_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldset2_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -766,7 +766,7 @@ fn __aarch64_ldset2_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas2_rel() align(16) callconv(.Naked) void {
fn __aarch64_cas2_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -788,7 +788,7 @@ fn __aarch64_cas2_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp2_rel() align(16) callconv(.Naked) void {
fn __aarch64_swp2_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -808,7 +808,7 @@ fn __aarch64_swp2_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd2_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldadd2_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -829,7 +829,7 @@ fn __aarch64_ldadd2_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr2_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldclr2_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -850,7 +850,7 @@ fn __aarch64_ldclr2_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor2_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldeor2_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -871,7 +871,7 @@ fn __aarch64_ldeor2_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset2_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldset2_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -892,7 +892,7 @@ fn __aarch64_ldset2_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas2_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_cas2_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -914,7 +914,7 @@ fn __aarch64_cas2_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp2_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_swp2_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -934,7 +934,7 @@ fn __aarch64_swp2_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd2_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldadd2_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -955,7 +955,7 @@ fn __aarch64_ldadd2_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr2_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldclr2_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -976,7 +976,7 @@ fn __aarch64_ldclr2_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor2_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldeor2_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -997,7 +997,7 @@ fn __aarch64_ldeor2_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset2_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldset2_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1018,7 +1018,7 @@ fn __aarch64_ldset2_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas4_relax() align(16) callconv(.Naked) void {
fn __aarch64_cas4_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1040,7 +1040,7 @@ fn __aarch64_cas4_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp4_relax() align(16) callconv(.Naked) void {
fn __aarch64_swp4_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1060,7 +1060,7 @@ fn __aarch64_swp4_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd4_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldadd4_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1081,7 +1081,7 @@ fn __aarch64_ldadd4_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr4_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldclr4_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1102,7 +1102,7 @@ fn __aarch64_ldclr4_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor4_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldeor4_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1123,7 +1123,7 @@ fn __aarch64_ldeor4_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset4_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldset4_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1144,7 +1144,7 @@ fn __aarch64_ldset4_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas4_acq() align(16) callconv(.Naked) void {
fn __aarch64_cas4_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1166,7 +1166,7 @@ fn __aarch64_cas4_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp4_acq() align(16) callconv(.Naked) void {
fn __aarch64_swp4_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1186,7 +1186,7 @@ fn __aarch64_swp4_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd4_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldadd4_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1207,7 +1207,7 @@ fn __aarch64_ldadd4_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr4_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldclr4_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1228,7 +1228,7 @@ fn __aarch64_ldclr4_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor4_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldeor4_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1249,7 +1249,7 @@ fn __aarch64_ldeor4_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset4_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldset4_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1270,7 +1270,7 @@ fn __aarch64_ldset4_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas4_rel() align(16) callconv(.Naked) void {
fn __aarch64_cas4_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1292,7 +1292,7 @@ fn __aarch64_cas4_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp4_rel() align(16) callconv(.Naked) void {
fn __aarch64_swp4_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1312,7 +1312,7 @@ fn __aarch64_swp4_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd4_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldadd4_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1333,7 +1333,7 @@ fn __aarch64_ldadd4_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr4_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldclr4_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1354,7 +1354,7 @@ fn __aarch64_ldclr4_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor4_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldeor4_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1375,7 +1375,7 @@ fn __aarch64_ldeor4_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset4_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldset4_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1396,7 +1396,7 @@ fn __aarch64_ldset4_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas4_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_cas4_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1418,7 +1418,7 @@ fn __aarch64_cas4_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp4_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_swp4_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1438,7 +1438,7 @@ fn __aarch64_swp4_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd4_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldadd4_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1459,7 +1459,7 @@ fn __aarch64_ldadd4_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr4_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldclr4_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1480,7 +1480,7 @@ fn __aarch64_ldclr4_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor4_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldeor4_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1501,7 +1501,7 @@ fn __aarch64_ldeor4_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset4_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldset4_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1522,7 +1522,7 @@ fn __aarch64_ldset4_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas8_relax() align(16) callconv(.Naked) void {
fn __aarch64_cas8_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1544,7 +1544,7 @@ fn __aarch64_cas8_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp8_relax() align(16) callconv(.Naked) void {
fn __aarch64_swp8_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1564,7 +1564,7 @@ fn __aarch64_swp8_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd8_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldadd8_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1585,7 +1585,7 @@ fn __aarch64_ldadd8_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr8_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldclr8_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1606,7 +1606,7 @@ fn __aarch64_ldclr8_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor8_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldeor8_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1627,7 +1627,7 @@ fn __aarch64_ldeor8_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset8_relax() align(16) callconv(.Naked) void {
fn __aarch64_ldset8_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1648,7 +1648,7 @@ fn __aarch64_ldset8_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas8_acq() align(16) callconv(.Naked) void {
fn __aarch64_cas8_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1670,7 +1670,7 @@ fn __aarch64_cas8_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp8_acq() align(16) callconv(.Naked) void {
fn __aarch64_swp8_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1690,7 +1690,7 @@ fn __aarch64_swp8_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd8_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldadd8_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1711,7 +1711,7 @@ fn __aarch64_ldadd8_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr8_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldclr8_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1732,7 +1732,7 @@ fn __aarch64_ldclr8_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor8_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldeor8_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1753,7 +1753,7 @@ fn __aarch64_ldeor8_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset8_acq() align(16) callconv(.Naked) void {
fn __aarch64_ldset8_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1774,7 +1774,7 @@ fn __aarch64_ldset8_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas8_rel() align(16) callconv(.Naked) void {
fn __aarch64_cas8_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1796,7 +1796,7 @@ fn __aarch64_cas8_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp8_rel() align(16) callconv(.Naked) void {
fn __aarch64_swp8_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1816,7 +1816,7 @@ fn __aarch64_swp8_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd8_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldadd8_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1837,7 +1837,7 @@ fn __aarch64_ldadd8_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr8_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldclr8_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1858,7 +1858,7 @@ fn __aarch64_ldclr8_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor8_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldeor8_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1879,7 +1879,7 @@ fn __aarch64_ldeor8_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset8_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldset8_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1900,7 +1900,7 @@ fn __aarch64_ldset8_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas8_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_cas8_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1922,7 +1922,7 @@ fn __aarch64_cas8_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_swp8_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_swp8_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1942,7 +1942,7 @@ fn __aarch64_swp8_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldadd8_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldadd8_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1963,7 +1963,7 @@ fn __aarch64_ldadd8_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldclr8_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldclr8_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -1984,7 +1984,7 @@ fn __aarch64_ldclr8_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldeor8_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldeor8_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -2005,7 +2005,7 @@ fn __aarch64_ldeor8_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_ldset8_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_ldset8_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -2026,7 +2026,7 @@ fn __aarch64_ldset8_acq_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas16_relax() align(16) callconv(.Naked) void {
fn __aarch64_cas16_relax() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -2050,7 +2050,7 @@ fn __aarch64_cas16_relax() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas16_acq() align(16) callconv(.Naked) void {
fn __aarch64_cas16_acq() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -2074,7 +2074,7 @@ fn __aarch64_cas16_acq() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas16_rel() align(16) callconv(.Naked) void {
fn __aarch64_cas16_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
@ -2098,7 +2098,7 @@ fn __aarch64_cas16_rel() align(16) callconv(.Naked) void {
);
unreachable;
}
fn __aarch64_cas16_acq_rel() align(16) callconv(.Naked) void {
fn __aarch64_cas16_acq_rel() align(16) callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f

View File

@ -7,6 +7,6 @@ comptime {
@export(&__absvdi2, .{ .name = "__absvdi2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __absvdi2(a: i64) callconv(.C) i64 {
pub fn __absvdi2(a: i64) callconv(.c) i64 {
return absv(i64, a);
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__absvsi2, .{ .name = "__absvsi2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __absvsi2(a: i32) callconv(.C) i32 {
pub fn __absvsi2(a: i32) callconv(.c) i32 {
return absv(i32, a);
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__absvti2, .{ .name = "__absvti2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __absvti2(a: i128) callconv(.C) i128 {
pub fn __absvti2(a: i128) callconv(.c) i128 {
return absv(i128, a);
}

View File

@ -11,10 +11,10 @@ comptime {
}
}
fn __adddf3(a: f64, b: f64) callconv(.C) f64 {
fn __adddf3(a: f64, b: f64) callconv(.c) f64 {
return addf3(f64, a, b);
}
fn __aeabi_dadd(a: f64, b: f64) callconv(.AAPCS) f64 {
fn __aeabi_dadd(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) f64 {
return addf3(f64, a, b);
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__addhf3, .{ .name = "__addhf3", .linkage = common.linkage, .visibility = common.visibility });
}
fn __addhf3(a: f16, b: f16) callconv(.C) f16 {
fn __addhf3(a: f16, b: f16) callconv(.c) f16 {
return addf3(f16, a, b);
}

View File

@ -31,13 +31,13 @@ inline fn addoXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST
return sum;
}
pub fn __addosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
pub fn __addosi4(a: i32, b: i32, overflow: *c_int) callconv(.c) i32 {
return addoXi4_generic(i32, a, b, overflow);
}
pub fn __addodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
pub fn __addodi4(a: i64, b: i64, overflow: *c_int) callconv(.c) i64 {
return addoXi4_generic(i64, a, b, overflow);
}
pub fn __addoti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
pub fn __addoti4(a: i128, b: i128, overflow: *c_int) callconv(.c) i128 {
return addoXi4_generic(i128, a, b, overflow);
}

View File

@ -11,10 +11,10 @@ comptime {
}
}
fn __addsf3(a: f32, b: f32) callconv(.C) f32 {
fn __addsf3(a: f32, b: f32) callconv(.c) f32 {
return addf3(f32, a, b);
}
fn __aeabi_fadd(a: f32, b: f32) callconv(.AAPCS) f32 {
fn __aeabi_fadd(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) f32 {
return addf3(f32, a, b);
}

View File

@ -12,10 +12,10 @@ comptime {
@export(&__addtf3, .{ .name = "__addtf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __addtf3(a: f128, b: f128) callconv(.C) f128 {
pub fn __addtf3(a: f128, b: f128) callconv(.c) f128 {
return addf3(f128, a, b);
}
fn _Qp_add(c: *f128, a: *f128, b: *f128) callconv(.C) void {
fn _Qp_add(c: *f128, a: *f128, b: *f128) callconv(.c) void {
c.* = addf3(f128, a.*, b.*);
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__addxf3, .{ .name = "__addxf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
pub fn __addxf3(a: f80, b: f80) callconv(.c) f80 {
return addf3(f80, a, b);
}

View File

@ -57,67 +57,67 @@ extern fn memset(dest: ?[*]u8, c: i32, n: usize) ?[*]u8;
extern fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) ?[*]u8;
extern fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) ?[*]u8;
pub fn __aeabi_memcpy(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
pub fn __aeabi_memcpy(dest: [*]u8, src: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
@setRuntimeSafety(false);
_ = memcpy(dest, src, n);
}
pub fn __aeabi_memcpy4(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
pub fn __aeabi_memcpy4(dest: [*]u8, src: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
@setRuntimeSafety(false);
_ = memcpy(dest, src, n);
}
pub fn __aeabi_memcpy8(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
pub fn __aeabi_memcpy8(dest: [*]u8, src: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
@setRuntimeSafety(false);
_ = memcpy(dest, src, n);
}
pub fn __aeabi_memmove(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
pub fn __aeabi_memmove(dest: [*]u8, src: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
@setRuntimeSafety(false);
_ = memmove(dest, src, n);
}
pub fn __aeabi_memmove4(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
pub fn __aeabi_memmove4(dest: [*]u8, src: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
@setRuntimeSafety(false);
_ = memmove(dest, src, n);
}
pub fn __aeabi_memmove8(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
pub fn __aeabi_memmove8(dest: [*]u8, src: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
@setRuntimeSafety(false);
_ = memmove(dest, src, n);
}
pub fn __aeabi_memset(dest: [*]u8, n: usize, c: i32) callconv(.AAPCS) void {
pub fn __aeabi_memset(dest: [*]u8, n: usize, c: i32) callconv(.{ .arm_aapcs = .{} }) void {
@setRuntimeSafety(false);
// This is dentical to the standard `memset` definition but with the last
// two arguments swapped
_ = memset(dest, c, n);
}
pub fn __aeabi_memset4(dest: [*]u8, n: usize, c: i32) callconv(.AAPCS) void {
pub fn __aeabi_memset4(dest: [*]u8, n: usize, c: i32) callconv(.{ .arm_aapcs = .{} }) void {
@setRuntimeSafety(false);
_ = memset(dest, c, n);
}
pub fn __aeabi_memset8(dest: [*]u8, n: usize, c: i32) callconv(.AAPCS) void {
pub fn __aeabi_memset8(dest: [*]u8, n: usize, c: i32) callconv(.{ .arm_aapcs = .{} }) void {
@setRuntimeSafety(false);
_ = memset(dest, c, n);
}
pub fn __aeabi_memclr(dest: [*]u8, n: usize) callconv(.AAPCS) void {
pub fn __aeabi_memclr(dest: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
@setRuntimeSafety(false);
_ = memset(dest, 0, n);
}
pub fn __aeabi_memclr4(dest: [*]u8, n: usize) callconv(.AAPCS) void {
pub fn __aeabi_memclr4(dest: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
@setRuntimeSafety(false);
_ = memset(dest, 0, n);
}
pub fn __aeabi_memclr8(dest: [*]u8, n: usize) callconv(.AAPCS) void {
pub fn __aeabi_memclr8(dest: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
@setRuntimeSafety(false);
_ = memset(dest, 0, n);
}
// Dummy functions to avoid errors during the linking phase
pub fn __aeabi_unwind_cpp_pr0() callconv(.AAPCS) void {}
pub fn __aeabi_unwind_cpp_pr1() callconv(.AAPCS) void {}
pub fn __aeabi_unwind_cpp_pr2() callconv(.AAPCS) void {}
pub fn __aeabi_unwind_cpp_pr0() callconv(.{ .arm_aapcs = .{} }) void {}
pub fn __aeabi_unwind_cpp_pr1() callconv(.{ .arm_aapcs = .{} }) void {}
pub fn __aeabi_unwind_cpp_pr2() callconv(.{ .arm_aapcs = .{} }) void {}
// This function can only clobber r0 according to the ABI
pub fn __aeabi_read_tp() callconv(.Naked) void {
pub fn __aeabi_read_tp() callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ mrc p15, 0, r0, c13, c0, 3
@ -129,7 +129,7 @@ pub fn __aeabi_read_tp() callconv(.Naked) void {
// The following functions are wrapped in an asm block to ensure the required
// calling convention is always respected
pub fn __aeabi_uidivmod() callconv(.Naked) void {
pub fn __aeabi_uidivmod() callconv(.naked) void {
@setRuntimeSafety(false);
// Divide r0 by r1; the quotient goes in r0, the remainder in r1
asm volatile (
@ -147,7 +147,7 @@ pub fn __aeabi_uidivmod() callconv(.Naked) void {
unreachable;
}
pub fn __aeabi_uldivmod() callconv(.Naked) void {
pub fn __aeabi_uldivmod() callconv(.naked) void {
@setRuntimeSafety(false);
// Divide r1:r0 by r3:r2; the quotient goes in r1:r0, the remainder in r3:r2
asm volatile (
@ -167,7 +167,7 @@ pub fn __aeabi_uldivmod() callconv(.Naked) void {
unreachable;
}
pub fn __aeabi_idivmod() callconv(.Naked) void {
pub fn __aeabi_idivmod() callconv(.naked) void {
@setRuntimeSafety(false);
// Divide r0 by r1; the quotient goes in r0, the remainder in r1
asm volatile (
@ -185,7 +185,7 @@ pub fn __aeabi_idivmod() callconv(.Naked) void {
unreachable;
}
pub fn __aeabi_ldivmod() callconv(.Naked) void {
pub fn __aeabi_ldivmod() callconv(.naked) void {
@setRuntimeSafety(false);
// Divide r1:r0 by r3:r2; the quotient goes in r1:r0, the remainder in r3:r2
asm volatile (
@ -207,12 +207,12 @@ pub fn __aeabi_ldivmod() callconv(.Naked) void {
// Float Arithmetic
fn __aeabi_frsub(a: f32, b: f32) callconv(.AAPCS) f32 {
fn __aeabi_frsub(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) f32 {
const neg_a: f32 = @bitCast(@as(u32, @bitCast(a)) ^ (@as(u32, 1) << 31));
return b + neg_a;
}
fn __aeabi_drsub(a: f64, b: f64) callconv(.AAPCS) f64 {
fn __aeabi_drsub(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) f64 {
const neg_a: f64 = @bitCast(@as(u64, @bitCast(a)) ^ (@as(u64, 1) << 63));
return b + neg_a;
}

View File

@ -117,21 +117,21 @@ var spinlocks: SpinlockTable = SpinlockTable{};
// Generic version of GCC atomic builtin functions.
// Those work on any object no matter the pointer alignment nor its size.
fn __atomic_load(size: u32, src: [*]u8, dest: [*]u8, model: i32) callconv(.C) void {
fn __atomic_load(size: u32, src: [*]u8, dest: [*]u8, model: i32) callconv(.c) void {
_ = model;
var sl = spinlocks.get(@intFromPtr(src));
defer sl.release();
@memcpy(dest[0..size], src);
}
fn __atomic_store(size: u32, dest: [*]u8, src: [*]u8, model: i32) callconv(.C) void {
fn __atomic_store(size: u32, dest: [*]u8, src: [*]u8, model: i32) callconv(.c) void {
_ = model;
var sl = spinlocks.get(@intFromPtr(dest));
defer sl.release();
@memcpy(dest[0..size], src);
}
fn __atomic_exchange(size: u32, ptr: [*]u8, val: [*]u8, old: [*]u8, model: i32) callconv(.C) void {
fn __atomic_exchange(size: u32, ptr: [*]u8, val: [*]u8, old: [*]u8, model: i32) callconv(.c) void {
_ = model;
var sl = spinlocks.get(@intFromPtr(ptr));
defer sl.release();
@ -146,7 +146,7 @@ fn __atomic_compare_exchange(
desired: [*]u8,
success: i32,
failure: i32,
) callconv(.C) i32 {
) callconv(.c) i32 {
_ = success;
_ = failure;
var sl = spinlocks.get(@intFromPtr(ptr));
@ -176,23 +176,23 @@ inline fn atomic_load_N(comptime T: type, src: *T, model: i32) T {
}
}
fn __atomic_load_1(src: *u8, model: i32) callconv(.C) u8 {
fn __atomic_load_1(src: *u8, model: i32) callconv(.c) u8 {
return atomic_load_N(u8, src, model);
}
fn __atomic_load_2(src: *u16, model: i32) callconv(.C) u16 {
fn __atomic_load_2(src: *u16, model: i32) callconv(.c) u16 {
return atomic_load_N(u16, src, model);
}
fn __atomic_load_4(src: *u32, model: i32) callconv(.C) u32 {
fn __atomic_load_4(src: *u32, model: i32) callconv(.c) u32 {
return atomic_load_N(u32, src, model);
}
fn __atomic_load_8(src: *u64, model: i32) callconv(.C) u64 {
fn __atomic_load_8(src: *u64, model: i32) callconv(.c) u64 {
return atomic_load_N(u64, src, model);
}
fn __atomic_load_16(src: *u128, model: i32) callconv(.C) u128 {
fn __atomic_load_16(src: *u128, model: i32) callconv(.c) u128 {
return atomic_load_N(u128, src, model);
}
@ -207,23 +207,23 @@ inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void {
}
}
fn __atomic_store_1(dst: *u8, value: u8, model: i32) callconv(.C) void {
fn __atomic_store_1(dst: *u8, value: u8, model: i32) callconv(.c) void {
return atomic_store_N(u8, dst, value, model);
}
fn __atomic_store_2(dst: *u16, value: u16, model: i32) callconv(.C) void {
fn __atomic_store_2(dst: *u16, value: u16, model: i32) callconv(.c) void {
return atomic_store_N(u16, dst, value, model);
}
fn __atomic_store_4(dst: *u32, value: u32, model: i32) callconv(.C) void {
fn __atomic_store_4(dst: *u32, value: u32, model: i32) callconv(.c) void {
return atomic_store_N(u32, dst, value, model);
}
fn __atomic_store_8(dst: *u64, value: u64, model: i32) callconv(.C) void {
fn __atomic_store_8(dst: *u64, value: u64, model: i32) callconv(.c) void {
return atomic_store_N(u64, dst, value, model);
}
fn __atomic_store_16(dst: *u128, value: u128, model: i32) callconv(.C) void {
fn __atomic_store_16(dst: *u128, value: u128, model: i32) callconv(.c) void {
return atomic_store_N(u128, dst, value, model);
}
@ -274,23 +274,23 @@ inline fn atomic_exchange_N(comptime T: type, ptr: *T, val: T, model: i32) T {
}
}
fn __atomic_exchange_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
fn __atomic_exchange_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
return atomic_exchange_N(u8, ptr, val, model);
}
fn __atomic_exchange_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
fn __atomic_exchange_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
return atomic_exchange_N(u16, ptr, val, model);
}
fn __atomic_exchange_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
fn __atomic_exchange_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
return atomic_exchange_N(u32, ptr, val, model);
}
fn __atomic_exchange_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
fn __atomic_exchange_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
return atomic_exchange_N(u64, ptr, val, model);
}
fn __atomic_exchange_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
fn __atomic_exchange_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
return atomic_exchange_N(u128, ptr, val, model);
}
@ -323,23 +323,23 @@ inline fn atomic_compare_exchange_N(
}
}
fn __atomic_compare_exchange_1(ptr: *u8, expected: *u8, desired: u8, success: i32, failure: i32) callconv(.C) i32 {
fn __atomic_compare_exchange_1(ptr: *u8, expected: *u8, desired: u8, success: i32, failure: i32) callconv(.c) i32 {
return atomic_compare_exchange_N(u8, ptr, expected, desired, success, failure);
}
fn __atomic_compare_exchange_2(ptr: *u16, expected: *u16, desired: u16, success: i32, failure: i32) callconv(.C) i32 {
fn __atomic_compare_exchange_2(ptr: *u16, expected: *u16, desired: u16, success: i32, failure: i32) callconv(.c) i32 {
return atomic_compare_exchange_N(u16, ptr, expected, desired, success, failure);
}
fn __atomic_compare_exchange_4(ptr: *u32, expected: *u32, desired: u32, success: i32, failure: i32) callconv(.C) i32 {
fn __atomic_compare_exchange_4(ptr: *u32, expected: *u32, desired: u32, success: i32, failure: i32) callconv(.c) i32 {
return atomic_compare_exchange_N(u32, ptr, expected, desired, success, failure);
}
fn __atomic_compare_exchange_8(ptr: *u64, expected: *u64, desired: u64, success: i32, failure: i32) callconv(.C) i32 {
fn __atomic_compare_exchange_8(ptr: *u64, expected: *u64, desired: u64, success: i32, failure: i32) callconv(.c) i32 {
return atomic_compare_exchange_N(u64, ptr, expected, desired, success, failure);
}
fn __atomic_compare_exchange_16(ptr: *u128, expected: *u128, desired: u128, success: i32, failure: i32) callconv(.C) i32 {
fn __atomic_compare_exchange_16(ptr: *u128, expected: *u128, desired: u128, success: i32, failure: i32) callconv(.c) i32 {
return atomic_compare_exchange_N(u128, ptr, expected, desired, success, failure);
}
@ -376,163 +376,163 @@ inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr
return @atomicRmw(T, ptr, op, val, .seq_cst);
}
fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
return fetch_op_N(u8, .Add, ptr, val, model);
}
fn __atomic_fetch_add_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
fn __atomic_fetch_add_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
return fetch_op_N(u16, .Add, ptr, val, model);
}
fn __atomic_fetch_add_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
fn __atomic_fetch_add_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
return fetch_op_N(u32, .Add, ptr, val, model);
}
fn __atomic_fetch_add_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
fn __atomic_fetch_add_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
return fetch_op_N(u64, .Add, ptr, val, model);
}
fn __atomic_fetch_add_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
fn __atomic_fetch_add_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
return fetch_op_N(u128, .Add, ptr, val, model);
}
fn __atomic_fetch_sub_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
fn __atomic_fetch_sub_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
return fetch_op_N(u8, .Sub, ptr, val, model);
}
fn __atomic_fetch_sub_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
fn __atomic_fetch_sub_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
return fetch_op_N(u16, .Sub, ptr, val, model);
}
fn __atomic_fetch_sub_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
fn __atomic_fetch_sub_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
return fetch_op_N(u32, .Sub, ptr, val, model);
}
fn __atomic_fetch_sub_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
fn __atomic_fetch_sub_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
return fetch_op_N(u64, .Sub, ptr, val, model);
}
fn __atomic_fetch_sub_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
fn __atomic_fetch_sub_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
return fetch_op_N(u128, .Sub, ptr, val, model);
}
fn __atomic_fetch_and_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
fn __atomic_fetch_and_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
return fetch_op_N(u8, .And, ptr, val, model);
}
fn __atomic_fetch_and_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
fn __atomic_fetch_and_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
return fetch_op_N(u16, .And, ptr, val, model);
}
fn __atomic_fetch_and_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
fn __atomic_fetch_and_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
return fetch_op_N(u32, .And, ptr, val, model);
}
fn __atomic_fetch_and_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
fn __atomic_fetch_and_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
return fetch_op_N(u64, .And, ptr, val, model);
}
fn __atomic_fetch_and_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
fn __atomic_fetch_and_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
return fetch_op_N(u128, .And, ptr, val, model);
}
fn __atomic_fetch_or_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
fn __atomic_fetch_or_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
return fetch_op_N(u8, .Or, ptr, val, model);
}
fn __atomic_fetch_or_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
fn __atomic_fetch_or_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
return fetch_op_N(u16, .Or, ptr, val, model);
}
fn __atomic_fetch_or_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
fn __atomic_fetch_or_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
return fetch_op_N(u32, .Or, ptr, val, model);
}
fn __atomic_fetch_or_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
fn __atomic_fetch_or_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
return fetch_op_N(u64, .Or, ptr, val, model);
}
fn __atomic_fetch_or_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
fn __atomic_fetch_or_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
return fetch_op_N(u128, .Or, ptr, val, model);
}
fn __atomic_fetch_xor_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
fn __atomic_fetch_xor_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
return fetch_op_N(u8, .Xor, ptr, val, model);
}
fn __atomic_fetch_xor_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
fn __atomic_fetch_xor_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
return fetch_op_N(u16, .Xor, ptr, val, model);
}
fn __atomic_fetch_xor_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
fn __atomic_fetch_xor_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
return fetch_op_N(u32, .Xor, ptr, val, model);
}
fn __atomic_fetch_xor_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
fn __atomic_fetch_xor_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
return fetch_op_N(u64, .Xor, ptr, val, model);
}
fn __atomic_fetch_xor_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
fn __atomic_fetch_xor_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
return fetch_op_N(u128, .Xor, ptr, val, model);
}
fn __atomic_fetch_nand_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
fn __atomic_fetch_nand_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
return fetch_op_N(u8, .Nand, ptr, val, model);
}
fn __atomic_fetch_nand_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
fn __atomic_fetch_nand_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
return fetch_op_N(u16, .Nand, ptr, val, model);
}
fn __atomic_fetch_nand_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
fn __atomic_fetch_nand_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
return fetch_op_N(u32, .Nand, ptr, val, model);
}
fn __atomic_fetch_nand_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
fn __atomic_fetch_nand_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
return fetch_op_N(u64, .Nand, ptr, val, model);
}
fn __atomic_fetch_nand_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
fn __atomic_fetch_nand_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
return fetch_op_N(u128, .Nand, ptr, val, model);
}
fn __atomic_fetch_umax_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
fn __atomic_fetch_umax_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
return fetch_op_N(u8, .Max, ptr, val, model);
}
fn __atomic_fetch_umax_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
fn __atomic_fetch_umax_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
return fetch_op_N(u16, .Max, ptr, val, model);
}
fn __atomic_fetch_umax_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
fn __atomic_fetch_umax_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
return fetch_op_N(u32, .Max, ptr, val, model);
}
fn __atomic_fetch_umax_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
fn __atomic_fetch_umax_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
return fetch_op_N(u64, .Max, ptr, val, model);
}
fn __atomic_fetch_umax_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
fn __atomic_fetch_umax_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
return fetch_op_N(u128, .Max, ptr, val, model);
}
fn __atomic_fetch_umin_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
fn __atomic_fetch_umin_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
return fetch_op_N(u8, .Min, ptr, val, model);
}
fn __atomic_fetch_umin_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
fn __atomic_fetch_umin_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
return fetch_op_N(u16, .Min, ptr, val, model);
}
fn __atomic_fetch_umin_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
fn __atomic_fetch_umin_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
return fetch_op_N(u32, .Min, ptr, val, model);
}
fn __atomic_fetch_umin_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
fn __atomic_fetch_umin_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
return fetch_op_N(u64, .Min, ptr, val, model);
}
fn __atomic_fetch_umin_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
fn __atomic_fetch_umin_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
return fetch_op_N(u128, .Min, ptr, val, model);
}

View File

@ -15,7 +15,7 @@ comptime {
}
}
pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 {
pub fn _alldiv(a: i64, b: i64) callconv(.{ .x86_stdcall = .{} }) i64 {
const s_a = a >> (64 - 1);
const s_b = b >> (64 - 1);
@ -27,7 +27,7 @@ pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 {
return (@as(i64, @bitCast(r)) ^ s) -% s;
}
pub fn _aulldiv() callconv(.Naked) void {
pub fn _aulldiv() callconv(.naked) void {
@setRuntimeSafety(false);
// The stack layout is:

View File

@ -15,7 +15,7 @@ comptime {
}
}
pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 {
pub fn _allrem(a: i64, b: i64) callconv(.{ .x86_stdcall = .{} }) i64 {
const s_a = a >> (64 - 1);
const s_b = b >> (64 - 1);
@ -27,7 +27,7 @@ pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 {
return (@as(i64, @bitCast(r)) ^ s) -% s;
}
pub fn _aullrem() callconv(.Naked) void {
pub fn _aullrem() callconv(.naked) void {
@setRuntimeSafety(false);
// The stack layout is:

View File

@ -5,7 +5,7 @@ comptime {
@export(&bcmp, .{ .name = "bcmp", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn bcmp(vl: [*]allowzero const u8, vr: [*]allowzero const u8, n: usize) callconv(.C) c_int {
pub fn bcmp(vl: [*]allowzero const u8, vr: [*]allowzero const u8, n: usize) callconv(.c) c_int {
@setRuntimeSafety(false);
var index: usize = 0;

View File

@ -46,15 +46,15 @@ inline fn bitreverseXi2(comptime T: type, a: T) T {
}
}
pub fn __bitreversesi2(a: u32) callconv(.C) u32 {
pub fn __bitreversesi2(a: u32) callconv(.c) u32 {
return bitreverseXi2(u32, a);
}
pub fn __bitreversedi2(a: u64) callconv(.C) u64 {
pub fn __bitreversedi2(a: u64) callconv(.c) u64 {
return bitreverseXi2(u64, a);
}
pub fn __bitreverseti2(a: u128) callconv(.C) u128 {
pub fn __bitreverseti2(a: u128) callconv(.c) u128 {
return bitreverseXi2(u128, a);
}

View File

@ -66,15 +66,15 @@ inline fn bswapXi2(comptime T: type, a: T) T {
}
}
pub fn __bswapsi2(a: u32) callconv(.C) u32 {
pub fn __bswapsi2(a: u32) callconv(.c) u32 {
return bswapXi2(u32, a);
}
pub fn __bswapdi2(a: u64) callconv(.C) u64 {
pub fn __bswapdi2(a: u64) callconv(.c) u64 {
return bswapXi2(u64, a);
}
pub fn __bswapti2(a: u128) callconv(.C) u128 {
pub fn __bswapti2(a: u128) callconv(.c) u128 {
return bswapXi2(u128, a);
}

View File

@ -26,12 +26,12 @@ comptime {
@export(&ceill, .{ .name = "ceill", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __ceilh(x: f16) callconv(.C) f16 {
pub fn __ceilh(x: f16) callconv(.c) f16 {
// TODO: more efficient implementation
return @floatCast(ceilf(x));
}
pub fn ceilf(x: f32) callconv(.C) f32 {
pub fn ceilf(x: f32) callconv(.c) f32 {
var u: u32 = @bitCast(x);
const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
var m: u32 = undefined;
@ -64,7 +64,7 @@ pub fn ceilf(x: f32) callconv(.C) f32 {
}
}
pub fn ceil(x: f64) callconv(.C) f64 {
pub fn ceil(x: f64) callconv(.c) f64 {
const f64_toint = 1.0 / math.floatEps(f64);
const u: u64 = @bitCast(x);
@ -95,12 +95,12 @@ pub fn ceil(x: f64) callconv(.C) f64 {
}
}
pub fn __ceilx(x: f80) callconv(.C) f80 {
pub fn __ceilx(x: f80) callconv(.c) f80 {
// TODO: more efficient implementation
return @floatCast(ceilq(x));
}
pub fn ceilq(x: f128) callconv(.C) f128 {
pub fn ceilq(x: f128) callconv(.c) f128 {
const f128_toint = 1.0 / math.floatEps(f128);
const u: u128 = @bitCast(x);
@ -129,7 +129,7 @@ pub fn ceilq(x: f128) callconv(.C) f128 {
}
}
pub fn ceill(x: c_longdouble) callconv(.C) c_longdouble {
pub fn ceill(x: c_longdouble) callconv(.c) c_longdouble {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __ceilh(x),
32 => return ceilf(x),

View File

@ -15,7 +15,7 @@ comptime {
_ = &clear_cache;
}
fn clear_cache(start: usize, end: usize) callconv(.C) void {
fn clear_cache(start: usize, end: usize) callconv(.c) void {
const x86 = switch (arch) {
.x86, .x86_64 => true,
else => false,

View File

@ -4,7 +4,7 @@ const testing = @import("std").testing;
fn test__clzsi2(a: u32, expected: i32) !void {
const nakedClzsi2 = clz.__clzsi2;
const actualClzsi2 = @as(*const fn (a: i32) callconv(.C) i32, @ptrCast(&nakedClzsi2));
const actualClzsi2 = @as(*const fn (a: i32) callconv(.c) i32, @ptrCast(&nakedClzsi2));
const x: i32 = @bitCast(a);
const result = actualClzsi2(x);
try testing.expectEqual(expected, result);

View File

@ -34,27 +34,27 @@ inline fn XcmpXi2(comptime T: type, a: T, b: T) i32 {
return cmp1 - cmp2 + 1;
}
pub fn __cmpsi2(a: i32, b: i32) callconv(.C) i32 {
pub fn __cmpsi2(a: i32, b: i32) callconv(.c) i32 {
return XcmpXi2(i32, a, b);
}
pub fn __cmpdi2(a: i64, b: i64) callconv(.C) i32 {
pub fn __cmpdi2(a: i64, b: i64) callconv(.c) i32 {
return XcmpXi2(i64, a, b);
}
pub fn __cmpti2(a: i128, b: i128) callconv(.C) i32 {
pub fn __cmpti2(a: i128, b: i128) callconv(.c) i32 {
return XcmpXi2(i128, a, b);
}
pub fn __ucmpsi2(a: u32, b: u32) callconv(.C) i32 {
pub fn __ucmpsi2(a: u32, b: u32) callconv(.c) i32 {
return XcmpXi2(u32, a, b);
}
pub fn __ucmpdi2(a: u64, b: u64) callconv(.C) i32 {
pub fn __ucmpdi2(a: u64, b: u64) callconv(.c) i32 {
return XcmpXi2(u64, a, b);
}
pub fn __ucmpti2(a: u128, b: u128) callconv(.C) i32 {
pub fn __ucmpti2(a: u128, b: u128) callconv(.c) i32 {
return XcmpXi2(u128, a, b);
}

View File

@ -25,44 +25,44 @@ comptime {
///
/// Note that this matches the definition of `__ledf2`, `__eqdf2`, `__nedf2`, `__cmpdf2`,
/// and `__ltdf2`.
fn __cmpdf2(a: f64, b: f64) callconv(.C) i32 {
fn __cmpdf2(a: f64, b: f64) callconv(.c) i32 {
return @intFromEnum(comparef.cmpf2(f64, comparef.LE, a, b));
}
/// "These functions return a value less than or equal to zero if neither argument is NaN,
/// and a is less than or equal to b."
pub fn __ledf2(a: f64, b: f64) callconv(.C) i32 {
pub fn __ledf2(a: f64, b: f64) callconv(.c) i32 {
return __cmpdf2(a, b);
}
/// "These functions return zero if neither argument is NaN, and a and b are equal."
/// Note that due to some kind of historical accident, __eqdf2 and __nedf2 are defined
/// to have the same return value.
pub fn __eqdf2(a: f64, b: f64) callconv(.C) i32 {
pub fn __eqdf2(a: f64, b: f64) callconv(.c) i32 {
return __cmpdf2(a, b);
}
/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
/// Note that due to some kind of historical accident, __eqdf2 and __nedf2 are defined
/// to have the same return value.
pub fn __nedf2(a: f64, b: f64) callconv(.C) i32 {
pub fn __nedf2(a: f64, b: f64) callconv(.c) i32 {
return __cmpdf2(a, b);
}
/// "These functions return a value less than zero if neither argument is NaN, and a
/// is strictly less than b."
pub fn __ltdf2(a: f64, b: f64) callconv(.C) i32 {
pub fn __ltdf2(a: f64, b: f64) callconv(.c) i32 {
return __cmpdf2(a, b);
}
fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.AAPCS) i32 {
fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) i32 {
return @intFromBool(comparef.cmpf2(f64, comparef.LE, a, b) == .Equal);
}
fn __aeabi_dcmplt(a: f64, b: f64) callconv(.AAPCS) i32 {
fn __aeabi_dcmplt(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) i32 {
return @intFromBool(comparef.cmpf2(f64, comparef.LE, a, b) == .Less);
}
fn __aeabi_dcmple(a: f64, b: f64) callconv(.AAPCS) i32 {
fn __aeabi_dcmple(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) i32 {
return @intFromBool(comparef.cmpf2(f64, comparef.LE, a, b) != .Greater);
}

View File

@ -19,32 +19,32 @@ comptime {
///
/// Note that this matches the definition of `__lehf2`, `__eqhf2`, `__nehf2`, `__cmphf2`,
/// and `__lthf2`.
fn __cmphf2(a: f16, b: f16) callconv(.C) i32 {
fn __cmphf2(a: f16, b: f16) callconv(.c) i32 {
return @intFromEnum(comparef.cmpf2(f16, comparef.LE, a, b));
}
/// "These functions return a value less than or equal to zero if neither argument is NaN,
/// and a is less than or equal to b."
pub fn __lehf2(a: f16, b: f16) callconv(.C) i32 {
pub fn __lehf2(a: f16, b: f16) callconv(.c) i32 {
return __cmphf2(a, b);
}
/// "These functions return zero if neither argument is NaN, and a and b are equal."
/// Note that due to some kind of historical accident, __eqhf2 and __nehf2 are defined
/// to have the same return value.
pub fn __eqhf2(a: f16, b: f16) callconv(.C) i32 {
pub fn __eqhf2(a: f16, b: f16) callconv(.c) i32 {
return __cmphf2(a, b);
}
/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
/// Note that due to some kind of historical accident, __eqhf2 and __nehf2 are defined
/// to have the same return value.
pub fn __nehf2(a: f16, b: f16) callconv(.C) i32 {
pub fn __nehf2(a: f16, b: f16) callconv(.c) i32 {
return __cmphf2(a, b);
}
/// "These functions return a value less than zero if neither argument is NaN, and a
/// is strictly less than b."
pub fn __lthf2(a: f16, b: f16) callconv(.C) i32 {
pub fn __lthf2(a: f16, b: f16) callconv(.c) i32 {
return __cmphf2(a, b);
}

View File

@ -25,44 +25,44 @@ comptime {
///
/// Note that this matches the definition of `__lesf2`, `__eqsf2`, `__nesf2`, `__cmpsf2`,
/// and `__ltsf2`.
fn __cmpsf2(a: f32, b: f32) callconv(.C) i32 {
fn __cmpsf2(a: f32, b: f32) callconv(.c) i32 {
return @intFromEnum(comparef.cmpf2(f32, comparef.LE, a, b));
}
/// "These functions return a value less than or equal to zero if neither argument is NaN,
/// and a is less than or equal to b."
pub fn __lesf2(a: f32, b: f32) callconv(.C) i32 {
pub fn __lesf2(a: f32, b: f32) callconv(.c) i32 {
return __cmpsf2(a, b);
}
/// "These functions return zero if neither argument is NaN, and a and b are equal."
/// Note that due to some kind of historical accident, __eqsf2 and __nesf2 are defined
/// to have the same return value.
pub fn __eqsf2(a: f32, b: f32) callconv(.C) i32 {
pub fn __eqsf2(a: f32, b: f32) callconv(.c) i32 {
return __cmpsf2(a, b);
}
/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
/// Note that due to some kind of historical accident, __eqsf2 and __nesf2 are defined
/// to have the same return value.
pub fn __nesf2(a: f32, b: f32) callconv(.C) i32 {
pub fn __nesf2(a: f32, b: f32) callconv(.c) i32 {
return __cmpsf2(a, b);
}
/// "These functions return a value less than zero if neither argument is NaN, and a
/// is strictly less than b."
pub fn __ltsf2(a: f32, b: f32) callconv(.C) i32 {
pub fn __ltsf2(a: f32, b: f32) callconv(.c) i32 {
return __cmpsf2(a, b);
}
fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.AAPCS) i32 {
fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) i32 {
return @intFromBool(comparef.cmpf2(f32, comparef.LE, a, b) == .Equal);
}
fn __aeabi_fcmplt(a: f32, b: f32) callconv(.AAPCS) i32 {
fn __aeabi_fcmplt(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) i32 {
return @intFromBool(comparef.cmpf2(f32, comparef.LE, a, b) == .Less);
}
fn __aeabi_fcmple(a: f32, b: f32) callconv(.AAPCS) i32 {
fn __aeabi_fcmple(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) i32 {
return @intFromBool(comparef.cmpf2(f32, comparef.LE, a, b) != .Greater);
}

View File

@ -33,33 +33,33 @@ comptime {
///
/// Note that this matches the definition of `__letf2`, `__eqtf2`, `__netf2`, `__cmptf2`,
/// and `__lttf2`.
fn __cmptf2(a: f128, b: f128) callconv(.C) i32 {
fn __cmptf2(a: f128, b: f128) callconv(.c) i32 {
return @intFromEnum(comparef.cmpf2(f128, comparef.LE, a, b));
}
/// "These functions return a value less than or equal to zero if neither argument is NaN,
/// and a is less than or equal to b."
fn __letf2(a: f128, b: f128) callconv(.C) i32 {
fn __letf2(a: f128, b: f128) callconv(.c) i32 {
return __cmptf2(a, b);
}
/// "These functions return zero if neither argument is NaN, and a and b are equal."
/// Note that due to some kind of historical accident, __eqtf2 and __netf2 are defined
/// to have the same return value.
fn __eqtf2(a: f128, b: f128) callconv(.C) i32 {
fn __eqtf2(a: f128, b: f128) callconv(.c) i32 {
return __cmptf2(a, b);
}
/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
/// Note that due to some kind of historical accident, __eqtf2 and __netf2 are defined
/// to have the same return value.
fn __netf2(a: f128, b: f128) callconv(.C) i32 {
fn __netf2(a: f128, b: f128) callconv(.c) i32 {
return __cmptf2(a, b);
}
/// "These functions return a value less than zero if neither argument is NaN, and a
/// is strictly less than b."
fn __lttf2(a: f128, b: f128) callconv(.C) i32 {
fn __lttf2(a: f128, b: f128) callconv(.c) i32 {
return __cmptf2(a, b);
}
@ -70,34 +70,34 @@ const SparcFCMP = enum(i32) {
Unordered = 3,
};
fn _Qp_cmp(a: *const f128, b: *const f128) callconv(.C) i32 {
fn _Qp_cmp(a: *const f128, b: *const f128) callconv(.c) i32 {
return @intFromEnum(comparef.cmpf2(f128, SparcFCMP, a.*, b.*));
}
fn _Qp_feq(a: *const f128, b: *const f128) callconv(.C) bool {
fn _Qp_feq(a: *const f128, b: *const f128) callconv(.c) bool {
return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Equal;
}
fn _Qp_fne(a: *const f128, b: *const f128) callconv(.C) bool {
fn _Qp_fne(a: *const f128, b: *const f128) callconv(.c) bool {
return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) != .Equal;
}
fn _Qp_flt(a: *const f128, b: *const f128) callconv(.C) bool {
fn _Qp_flt(a: *const f128, b: *const f128) callconv(.c) bool {
return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Less;
}
fn _Qp_fgt(a: *const f128, b: *const f128) callconv(.C) bool {
fn _Qp_fgt(a: *const f128, b: *const f128) callconv(.c) bool {
return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Greater;
}
fn _Qp_fge(a: *const f128, b: *const f128) callconv(.C) bool {
fn _Qp_fge(a: *const f128, b: *const f128) callconv(.c) bool {
return switch (@as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b)))) {
.Equal, .Greater => true,
.Less, .Unordered => false,
};
}
fn _Qp_fle(a: *const f128, b: *const f128) callconv(.C) bool {
fn _Qp_fle(a: *const f128, b: *const f128) callconv(.c) bool {
return switch (@as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b)))) {
.Equal, .Less => true,
.Greater, .Unordered => false,

View File

@ -19,32 +19,32 @@ comptime {
///
/// Note that this matches the definition of `__lexf2`, `__eqxf2`, `__nexf2`, `__cmpxf2`,
/// and `__ltxf2`.
fn __cmpxf2(a: f80, b: f80) callconv(.C) i32 {
fn __cmpxf2(a: f80, b: f80) callconv(.c) i32 {
return @intFromEnum(comparef.cmp_f80(comparef.LE, a, b));
}
/// "These functions return a value less than or equal to zero if neither argument is NaN,
/// and a is less than or equal to b."
fn __lexf2(a: f80, b: f80) callconv(.C) i32 {
fn __lexf2(a: f80, b: f80) callconv(.c) i32 {
return __cmpxf2(a, b);
}
/// "These functions return zero if neither argument is NaN, and a and b are equal."
/// Note that due to some kind of historical accident, __eqxf2 and __nexf2 are defined
/// to have the same return value.
fn __eqxf2(a: f80, b: f80) callconv(.C) i32 {
fn __eqxf2(a: f80, b: f80) callconv(.c) i32 {
return __cmpxf2(a, b);
}
/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
/// Note that due to some kind of historical accident, __eqxf2 and __nexf2 are defined
/// to have the same return value.
fn __nexf2(a: f80, b: f80) callconv(.C) i32 {
fn __nexf2(a: f80, b: f80) callconv(.c) i32 {
return __cmpxf2(a, b);
}
/// "These functions return a value less than zero if neither argument is NaN, and a
/// is strictly less than b."
fn __ltxf2(a: f80, b: f80) callconv(.C) i32 {
fn __ltxf2(a: f80, b: f80) callconv(.c) i32 {
return __cmpxf2(a, b);
}

View File

@ -22,12 +22,12 @@ comptime {
@export(&cosl, .{ .name = "cosl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __cosh(a: f16) callconv(.C) f16 {
pub fn __cosh(a: f16) callconv(.c) f16 {
// TODO: more efficient implementation
return @floatCast(cosf(a));
}
pub fn cosf(x: f32) callconv(.C) f32 {
pub fn cosf(x: f32) callconv(.c) f32 {
// Small multiples of pi/2 rounded to double precision.
const c1pio2: f64 = 1.0 * math.pi / 2.0; // 0x3FF921FB, 0x54442D18
const c2pio2: f64 = 2.0 * math.pi / 2.0; // 0x400921FB, 0x54442D18
@ -84,7 +84,7 @@ pub fn cosf(x: f32) callconv(.C) f32 {
};
}
pub fn cos(x: f64) callconv(.C) f64 {
pub fn cos(x: f64) callconv(.c) f64 {
var ix = @as(u64, @bitCast(x)) >> 32;
ix &= 0x7fffffff;
@ -113,17 +113,17 @@ pub fn cos(x: f64) callconv(.C) f64 {
};
}
pub fn __cosx(a: f80) callconv(.C) f80 {
pub fn __cosx(a: f80) callconv(.c) f80 {
// TODO: more efficient implementation
return @floatCast(cosq(a));
}
pub fn cosq(a: f128) callconv(.C) f128 {
pub fn cosq(a: f128) callconv(.c) f128 {
// TODO: more correct implementation
return cos(@floatCast(a));
}
pub fn cosl(x: c_longdouble) callconv(.C) c_longdouble {
pub fn cosl(x: c_longdouble) callconv(.c) c_longdouble {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __cosh(x),
32 => return cosf(x),

View File

@ -52,7 +52,7 @@ inline fn clzXi2(comptime T: type, a: T) i32 {
return @intCast(n - @as(T, @bitCast(x)));
}
fn __clzsi2_thumb1() callconv(.Naked) void {
fn __clzsi2_thumb1() callconv(.naked) void {
@setRuntimeSafety(false);
// Similar to the generic version with the last two rounds replaced by a LUT
@ -86,7 +86,7 @@ fn __clzsi2_thumb1() callconv(.Naked) void {
unreachable;
}
fn __clzsi2_arm32() callconv(.Naked) void {
fn __clzsi2_arm32() callconv(.naked) void {
@setRuntimeSafety(false);
asm volatile (
@ -135,7 +135,7 @@ fn __clzsi2_arm32() callconv(.Naked) void {
unreachable;
}
fn clzsi2_generic(a: i32) callconv(.C) i32 {
fn clzsi2_generic(a: i32) callconv(.c) i32 {
return clzXi2(i32, a);
}
@ -159,11 +159,11 @@ pub const __clzsi2 = switch (builtin.cpu.arch) {
else => clzsi2_generic,
};
pub fn __clzdi2(a: i64) callconv(.C) i32 {
pub fn __clzdi2(a: i64) callconv(.c) i32 {
return clzXi2(i64, a);
}
pub fn __clzti2(a: i128) callconv(.C) i32 {
pub fn __clzti2(a: i128) callconv(.c) i32 {
return clzXi2(i128, a);
}
@ -190,15 +190,15 @@ inline fn ctzXi2(comptime T: type, a: T) i32 {
return @intCast(n - @as(T, @bitCast((x & 1))));
}
pub fn __ctzsi2(a: i32) callconv(.C) i32 {
pub fn __ctzsi2(a: i32) callconv(.c) i32 {
return ctzXi2(i32, a);
}
pub fn __ctzdi2(a: i64) callconv(.C) i32 {
pub fn __ctzdi2(a: i64) callconv(.c) i32 {
return ctzXi2(i64, a);
}
pub fn __ctzti2(a: i128) callconv(.C) i32 {
pub fn __ctzti2(a: i128) callconv(.c) i32 {
return ctzXi2(i128, a);
}
@ -222,15 +222,15 @@ inline fn ffsXi2(comptime T: type, a: T) i32 {
return @as(i32, @intCast(n - @as(T, @bitCast((x & 1))))) + 1;
}
pub fn __ffssi2(a: i32) callconv(.C) i32 {
pub fn __ffssi2(a: i32) callconv(.c) i32 {
return ffsXi2(i32, a);
}
pub fn __ffsdi2(a: i64) callconv(.C) i32 {
pub fn __ffsdi2(a: i64) callconv(.c) i32 {
return ffsXi2(i64, a);
}
pub fn __ffsti2(a: i128) callconv(.C) i32 {
pub fn __ffsti2(a: i128) callconv(.c) i32 {
return ffsXi2(i128, a);
}

View File

@ -17,7 +17,7 @@ test "divc3" {
try testDiv(f128, __divtc3);
}
fn testDiv(comptime T: type, comptime f: fn (T, T, T, T) callconv(.C) Complex(T)) !void {
fn testDiv(comptime T: type, comptime f: fn (T, T, T, T) callconv(.c) Complex(T)) !void {
{
const a: T = 1.0;
const b: T = 0.0;

View File

@ -8,6 +8,6 @@ comptime {
}
}
pub fn __divdc3(a: f64, b: f64, c: f64, d: f64) callconv(.C) Complex(f64) {
pub fn __divdc3(a: f64, b: f64, c: f64, d: f64) callconv(.c) Complex(f64) {
return divc3.divc3(f64, a, b, c, d);
}

View File

@ -21,11 +21,11 @@ comptime {
}
}
pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
pub fn __divdf3(a: f64, b: f64) callconv(.c) f64 {
return div(a, b);
}
fn __aeabi_ddiv(a: f64, b: f64) callconv(.AAPCS) f64 {
fn __aeabi_ddiv(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) f64 {
return div(a, b);
}

View File

@ -8,6 +8,6 @@ comptime {
}
}
pub fn __divhc3(a: f16, b: f16, c: f16, d: f16) callconv(.C) Complex(f16) {
pub fn __divhc3(a: f16, b: f16, c: f16, d: f16) callconv(.c) Complex(f16) {
return divc3.divc3(f16, a, b, c, d);
}

View File

@ -5,7 +5,7 @@ comptime {
@export(&__divhf3, .{ .name = "__divhf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __divhf3(a: f16, b: f16) callconv(.C) f16 {
pub fn __divhf3(a: f16, b: f16) callconv(.c) f16 {
// TODO: more efficient implementation
return @floatCast(divsf3.__divsf3(a, b));
}

View File

@ -33,7 +33,7 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []u32, v: []u32) !void {
if (r) |x| if (u_sign < 0) neg(x);
}
pub fn __divei4(r_q: [*]u32, u_p: [*]u32, v_p: [*]u32, bits: usize) callconv(.C) void {
pub fn __divei4(r_q: [*]u32, u_p: [*]u32, v_p: [*]u32, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
const u = u_p[0 .. std.math.divCeil(usize, bits, 32) catch unreachable];
const v = v_p[0 .. std.math.divCeil(usize, bits, 32) catch unreachable];
@ -41,7 +41,7 @@ pub fn __divei4(r_q: [*]u32, u_p: [*]u32, v_p: [*]u32, bits: usize) callconv(.C)
@call(.always_inline, divmod, .{ q, null, u, v }) catch unreachable;
}
pub fn __modei4(r_p: [*]u32, u_p: [*]u32, v_p: [*]u32, bits: usize) callconv(.C) void {
pub fn __modei4(r_p: [*]u32, u_p: [*]u32, v_p: [*]u32, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
const u = u_p[0 .. std.math.divCeil(usize, bits, 32) catch unreachable];
const v = v_p[0 .. std.math.divCeil(usize, bits, 32) catch unreachable];

View File

@ -8,6 +8,6 @@ comptime {
}
}
pub fn __divsc3(a: f32, b: f32, c: f32, d: f32) callconv(.C) Complex(f32) {
pub fn __divsc3(a: f32, b: f32, c: f32, d: f32) callconv(.c) Complex(f32) {
return divc3.divc3(f32, a, b, c, d);
}

View File

@ -19,11 +19,11 @@ comptime {
}
}
pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
pub fn __divsf3(a: f32, b: f32) callconv(.c) f32 {
return div(a, b);
}
fn __aeabi_fdiv(a: f32, b: f32) callconv(.AAPCS) f32 {
fn __aeabi_fdiv(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) f32 {
return div(a, b);
}

View File

@ -10,6 +10,6 @@ comptime {
}
}
pub fn __divtc3(a: f128, b: f128, c: f128, d: f128) callconv(.C) Complex(f128) {
pub fn __divtc3(a: f128, b: f128, c: f128, d: f128) callconv(.c) Complex(f128) {
return divc3.divc3(f128, a, b, c, d);
}

View File

@ -16,11 +16,11 @@ comptime {
@export(&__divtf3, .{ .name = "__divtf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
pub fn __divtf3(a: f128, b: f128) callconv(.c) f128 {
return div(a, b);
}
fn _Qp_div(c: *f128, a: *const f128, b: *const f128) callconv(.C) void {
fn _Qp_div(c: *f128, a: *const f128, b: *const f128) callconv(.c) void {
c.* = div(a.*, b.*);
}

View File

@ -14,13 +14,13 @@ comptime {
}
}
pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
pub fn __divti3(a: i128, b: i128) callconv(.c) i128 {
return div(a, b);
}
const v128 = @Vector(2, u64);
fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.c) v128 {
return @bitCast(div(@bitCast(a), @bitCast(b)));
}

View File

@ -8,6 +8,6 @@ comptime {
}
}
pub fn __divxc3(a: f80, b: f80, c: f80, d: f80) callconv(.C) Complex(f80) {
pub fn __divxc3(a: f80, b: f80, c: f80, d: f80) callconv(.c) Complex(f80) {
return divc3.divc3(f80, a, b, c, d);
}

View File

@ -12,7 +12,7 @@ comptime {
@export(&__divxf3, .{ .name = "__divxf3", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
pub fn __divxf3(a: f80, b: f80) callconv(.c) f80 {
const T = f80;
const Z = std.meta.Int(.unsigned, @bitSizeOf(T));

View File

@ -24,7 +24,7 @@ comptime {
}
/// public entrypoint for generated code using EmulatedTLS
pub fn __emutls_get_address(control: *emutls_control) callconv(.C) *anyopaque {
pub fn __emutls_get_address(control: *emutls_control) callconv(.c) *anyopaque {
return control.getPointer();
}
@ -191,7 +191,7 @@ const current_thread_storage = struct {
}
/// Invoked by pthread specific destructor. the passed argument is the ObjectArray pointer.
fn deinit(arrayPtr: *anyopaque) callconv(.C) void {
fn deinit(arrayPtr: *anyopaque) callconv(.c) void {
var array: *ObjectArray = @ptrCast(@alignCast(arrayPtr));
array.deinit();
}

View File

@ -26,12 +26,12 @@ comptime {
@export(&expl, .{ .name = "expl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __exph(a: f16) callconv(.C) f16 {
pub fn __exph(a: f16) callconv(.c) f16 {
// TODO: more efficient implementation
return @floatCast(expf(a));
}
pub fn expf(x_: f32) callconv(.C) f32 {
pub fn expf(x_: f32) callconv(.c) f32 {
const half = [_]f32{ 0.5, -0.5 };
const ln2hi = 6.9314575195e-1;
const ln2lo = 1.4286067653e-6;
@ -106,7 +106,7 @@ pub fn expf(x_: f32) callconv(.C) f32 {
}
}
pub fn exp(x_: f64) callconv(.C) f64 {
pub fn exp(x_: f64) callconv(.c) f64 {
const half = [_]f64{ 0.5, -0.5 };
const ln2hi: f64 = 6.93147180369123816490e-01;
const ln2lo: f64 = 1.90821492927058770002e-10;
@ -190,17 +190,17 @@ pub fn exp(x_: f64) callconv(.C) f64 {
}
}
pub fn __expx(a: f80) callconv(.C) f80 {
pub fn __expx(a: f80) callconv(.c) f80 {
// TODO: more efficient implementation
return @floatCast(expq(a));
}
pub fn expq(a: f128) callconv(.C) f128 {
pub fn expq(a: f128) callconv(.c) f128 {
// TODO: more correct implementation
return exp(@floatCast(a));
}
pub fn expl(x: c_longdouble) callconv(.C) c_longdouble {
pub fn expl(x: c_longdouble) callconv(.c) c_longdouble {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __exph(x),
32 => return expf(x),

View File

@ -26,12 +26,12 @@ comptime {
@export(&exp2l, .{ .name = "exp2l", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __exp2h(x: f16) callconv(.C) f16 {
pub fn __exp2h(x: f16) callconv(.c) f16 {
// TODO: more efficient implementation
return @floatCast(exp2f(x));
}
pub fn exp2f(x: f32) callconv(.C) f32 {
pub fn exp2f(x: f32) callconv(.c) f32 {
const tblsiz: u32 = @intCast(exp2ft.len);
const redux: f32 = 0x1.8p23 / @as(f32, @floatFromInt(tblsiz));
const P1: f32 = 0x1.62e430p-1;
@ -88,7 +88,7 @@ pub fn exp2f(x: f32) callconv(.C) f32 {
return @floatCast(r * uk);
}
pub fn exp2(x: f64) callconv(.C) f64 {
pub fn exp2(x: f64) callconv(.c) f64 {
const tblsiz: u32 = @intCast(exp2dt.len / 2);
const redux: f64 = 0x1.8p52 / @as(f64, @floatFromInt(tblsiz));
const P1: f64 = 0x1.62e42fefa39efp-1;
@ -157,17 +157,17 @@ pub fn exp2(x: f64) callconv(.C) f64 {
return math.scalbn(r, ik);
}
pub fn __exp2x(x: f80) callconv(.C) f80 {
pub fn __exp2x(x: f80) callconv(.c) f80 {
// TODO: more efficient implementation
return @floatCast(exp2q(x));
}
pub fn exp2q(x: f128) callconv(.C) f128 {
pub fn exp2q(x: f128) callconv(.c) f128 {
// TODO: more correct implementation
return exp2(@floatCast(x));
}
pub fn exp2l(x: c_longdouble) callconv(.C) c_longdouble {
pub fn exp2l(x: c_longdouble) callconv(.c) c_longdouble {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __exp2h(x),
32 => return exp2f(x),

View File

@ -12,10 +12,10 @@ comptime {
@export(&__extenddftf2, .{ .name = "__extenddftf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __extenddftf2(a: f64) callconv(.C) f128 {
pub fn __extenddftf2(a: f64) callconv(.c) f128 {
return extendf(f128, f64, @as(u64, @bitCast(a)));
}
fn _Qp_dtoq(c: *f128, a: f64) callconv(.C) void {
fn _Qp_dtoq(c: *f128, a: f64) callconv(.c) void {
c.* = extendf(f128, f64, @as(u64, @bitCast(a)));
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__extenddfxf2, .{ .name = "__extenddfxf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __extenddfxf2(a: f64) callconv(.C) f80 {
pub fn __extenddfxf2(a: f64) callconv(.c) f80 {
return extend_f80(f64, @as(u64, @bitCast(a)));
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__extendhfdf2, .{ .name = "__extendhfdf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __extendhfdf2(a: common.F16T(f64)) callconv(.C) f64 {
pub fn __extendhfdf2(a: common.F16T(f64)) callconv(.c) f64 {
return extendf(f64, f16, @as(u16, @bitCast(a)));
}

View File

@ -12,14 +12,14 @@ comptime {
@export(&__extendhfsf2, .{ .name = "__extendhfsf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __extendhfsf2(a: common.F16T(f32)) callconv(.C) f32 {
pub fn __extendhfsf2(a: common.F16T(f32)) callconv(.c) f32 {
return extendf(f32, f16, @as(u16, @bitCast(a)));
}
fn __gnu_h2f_ieee(a: common.F16T(f32)) callconv(.C) f32 {
fn __gnu_h2f_ieee(a: common.F16T(f32)) callconv(.c) f32 {
return extendf(f32, f16, @as(u16, @bitCast(a)));
}
fn __aeabi_h2f(a: u16) callconv(.AAPCS) f32 {
fn __aeabi_h2f(a: u16) callconv(.{ .arm_aapcs = .{} }) f32 {
return extendf(f32, f16, @as(u16, @bitCast(a)));
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__extendhftf2, .{ .name = "__extendhftf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __extendhftf2(a: common.F16T(f128)) callconv(.C) f128 {
pub fn __extendhftf2(a: common.F16T(f128)) callconv(.c) f128 {
return extendf(f128, f16, @as(u16, @bitCast(a)));
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__extendhfxf2, .{ .name = "__extendhfxf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __extendhfxf2(a: common.F16T(f80)) callconv(.C) f80 {
fn __extendhfxf2(a: common.F16T(f80)) callconv(.c) f80 {
return extend_f80(f16, @as(u16, @bitCast(a)));
}

View File

@ -11,10 +11,10 @@ comptime {
}
}
fn __extendsfdf2(a: f32) callconv(.C) f64 {
fn __extendsfdf2(a: f32) callconv(.c) f64 {
return extendf(f64, f32, @as(u32, @bitCast(a)));
}
fn __aeabi_f2d(a: f32) callconv(.AAPCS) f64 {
fn __aeabi_f2d(a: f32) callconv(.{ .arm_aapcs = .{} }) f64 {
return extendf(f64, f32, @as(u32, @bitCast(a)));
}

View File

@ -12,10 +12,10 @@ comptime {
@export(&__extendsftf2, .{ .name = "__extendsftf2", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __extendsftf2(a: f32) callconv(.C) f128 {
pub fn __extendsftf2(a: f32) callconv(.c) f128 {
return extendf(f128, f32, @as(u32, @bitCast(a)));
}
fn _Qp_stoq(c: *f128, a: f32) callconv(.C) void {
fn _Qp_stoq(c: *f128, a: f32) callconv(.c) void {
c.* = extendf(f128, f32, @as(u32, @bitCast(a)));
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__extendsfxf2, .{ .name = "__extendsfxf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __extendsfxf2(a: f32) callconv(.C) f80 {
fn __extendsfxf2(a: f32) callconv(.c) f80 {
return extend_f80(f32, @as(u32, @bitCast(a)));
}

View File

@ -7,7 +7,7 @@ comptime {
@export(&__extendxftf2, .{ .name = "__extendxftf2", .linkage = common.linkage, .visibility = common.visibility });
}
fn __extendxftf2(a: f80) callconv(.C) f128 {
fn __extendxftf2(a: f80) callconv(.c) f128 {
const src_int_bit: u64 = 0x8000000000000000;
const src_sig_mask = ~src_int_bit;
const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit

View File

@ -17,27 +17,27 @@ comptime {
@export(&fabsl, .{ .name = "fabsl", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fabsh(a: f16) callconv(.C) f16 {
pub fn __fabsh(a: f16) callconv(.c) f16 {
return generic_fabs(a);
}
pub fn fabsf(a: f32) callconv(.C) f32 {
pub fn fabsf(a: f32) callconv(.c) f32 {
return generic_fabs(a);
}
pub fn fabs(a: f64) callconv(.C) f64 {
pub fn fabs(a: f64) callconv(.c) f64 {
return generic_fabs(a);
}
pub fn __fabsx(a: f80) callconv(.C) f80 {
pub fn __fabsx(a: f80) callconv(.c) f80 {
return generic_fabs(a);
}
pub fn fabsq(a: f128) callconv(.C) f128 {
pub fn fabsq(a: f128) callconv(.c) f128 {
return generic_fabs(a);
}
pub fn fabsl(x: c_longdouble) callconv(.C) c_longdouble {
pub fn fabsl(x: c_longdouble) callconv(.c) c_longdouble {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __fabsh(x),
32 => return fabsf(x),

View File

@ -12,10 +12,10 @@ comptime {
}
}
pub fn __fixdfdi(a: f64) callconv(.C) i64 {
pub fn __fixdfdi(a: f64) callconv(.c) i64 {
return intFromFloat(i64, a);
}
fn __aeabi_d2lz(a: f64) callconv(.AAPCS) i64 {
fn __aeabi_d2lz(a: f64) callconv(.{ .arm_aapcs = .{} }) i64 {
return intFromFloat(i64, a);
}

View File

@ -11,10 +11,10 @@ comptime {
}
}
pub fn __fixdfsi(a: f64) callconv(.C) i32 {
pub fn __fixdfsi(a: f64) callconv(.c) i32 {
return intFromFloat(i32, a);
}
fn __aeabi_d2iz(a: f64) callconv(.AAPCS) i32 {
fn __aeabi_d2iz(a: f64) callconv(.{ .arm_aapcs = .{} }) i32 {
return intFromFloat(i32, a);
}

View File

@ -15,12 +15,12 @@ comptime {
}
}
pub fn __fixdfti(a: f64) callconv(.C) i128 {
pub fn __fixdfti(a: f64) callconv(.c) i128 {
return intFromFloat(i128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixdfti_windows_x86_64(a: f64) callconv(.C) v2u64 {
fn __fixdfti_windows_x86_64(a: f64) callconv(.c) v2u64 {
return @bitCast(intFromFloat(i128, a));
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__fixhfdi, .{ .name = "__fixhfdi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixhfdi(a: f16) callconv(.C) i64 {
fn __fixhfdi(a: f16) callconv(.c) i64 {
return intFromFloat(i64, a);
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__fixhfsi, .{ .name = "__fixhfsi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixhfsi(a: f16) callconv(.C) i32 {
fn __fixhfsi(a: f16) callconv(.c) i32 {
return intFromFloat(i32, a);
}

View File

@ -12,12 +12,12 @@ comptime {
}
}
pub fn __fixhfti(a: f16) callconv(.C) i128 {
pub fn __fixhfti(a: f16) callconv(.c) i128 {
return intFromFloat(i128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixhfti_windows_x86_64(a: f16) callconv(.C) v2u64 {
fn __fixhfti_windows_x86_64(a: f16) callconv(.c) v2u64 {
return @bitCast(intFromFloat(i128, a));
}

View File

@ -12,10 +12,10 @@ comptime {
}
}
pub fn __fixsfdi(a: f32) callconv(.C) i64 {
pub fn __fixsfdi(a: f32) callconv(.c) i64 {
return intFromFloat(i64, a);
}
fn __aeabi_f2lz(a: f32) callconv(.AAPCS) i64 {
fn __aeabi_f2lz(a: f32) callconv(.{ .arm_aapcs = .{} }) i64 {
return intFromFloat(i64, a);
}

View File

@ -11,10 +11,10 @@ comptime {
}
}
pub fn __fixsfsi(a: f32) callconv(.C) i32 {
pub fn __fixsfsi(a: f32) callconv(.c) i32 {
return intFromFloat(i32, a);
}
fn __aeabi_f2iz(a: f32) callconv(.AAPCS) i32 {
fn __aeabi_f2iz(a: f32) callconv(.{ .arm_aapcs = .{} }) i32 {
return intFromFloat(i32, a);
}

View File

@ -15,12 +15,12 @@ comptime {
}
}
pub fn __fixsfti(a: f32) callconv(.C) i128 {
pub fn __fixsfti(a: f32) callconv(.c) i128 {
return intFromFloat(i128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixsfti_windows_x86_64(a: f32) callconv(.C) v2u64 {
fn __fixsfti_windows_x86_64(a: f32) callconv(.c) v2u64 {
return @bitCast(intFromFloat(i128, a));
}

View File

@ -12,10 +12,10 @@ comptime {
@export(&__fixtfdi, .{ .name = "__fixtfdi", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fixtfdi(a: f128) callconv(.C) i64 {
pub fn __fixtfdi(a: f128) callconv(.c) i64 {
return intFromFloat(i64, a);
}
fn _Qp_qtox(a: *const f128) callconv(.C) i64 {
fn _Qp_qtox(a: *const f128) callconv(.c) i64 {
return intFromFloat(i64, a.*);
}

View File

@ -12,10 +12,10 @@ comptime {
@export(&__fixtfsi, .{ .name = "__fixtfsi", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fixtfsi(a: f128) callconv(.C) i32 {
pub fn __fixtfsi(a: f128) callconv(.c) i32 {
return intFromFloat(i32, a);
}
fn _Qp_qtoi(a: *const f128) callconv(.C) i32 {
fn _Qp_qtoi(a: *const f128) callconv(.c) i32 {
return intFromFloat(i32, a.*);
}

View File

@ -14,12 +14,12 @@ comptime {
}
}
pub fn __fixtfti(a: f128) callconv(.C) i128 {
pub fn __fixtfti(a: f128) callconv(.c) i128 {
return intFromFloat(i128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixtfti_windows_x86_64(a: f128) callconv(.C) v2u64 {
fn __fixtfti_windows_x86_64(a: f128) callconv(.c) v2u64 {
return @bitCast(intFromFloat(i128, a));
}

View File

@ -12,10 +12,10 @@ comptime {
}
}
pub fn __fixunsdfdi(a: f64) callconv(.C) u64 {
pub fn __fixunsdfdi(a: f64) callconv(.c) u64 {
return intFromFloat(u64, a);
}
fn __aeabi_d2ulz(a: f64) callconv(.AAPCS) u64 {
fn __aeabi_d2ulz(a: f64) callconv(.{ .arm_aapcs = .{} }) u64 {
return intFromFloat(u64, a);
}

View File

@ -11,10 +11,10 @@ comptime {
}
}
pub fn __fixunsdfsi(a: f64) callconv(.C) u32 {
pub fn __fixunsdfsi(a: f64) callconv(.c) u32 {
return intFromFloat(u32, a);
}
fn __aeabi_d2uiz(a: f64) callconv(.AAPCS) u32 {
fn __aeabi_d2uiz(a: f64) callconv(.{ .arm_aapcs = .{} }) u32 {
return intFromFloat(u32, a);
}

View File

@ -15,12 +15,12 @@ comptime {
}
}
pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
pub fn __fixunsdfti(a: f64) callconv(.c) u128 {
return intFromFloat(u128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixunsdfti_windows_x86_64(a: f64) callconv(.C) v2u64 {
fn __fixunsdfti_windows_x86_64(a: f64) callconv(.c) v2u64 {
return @bitCast(intFromFloat(u128, a));
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__fixunshfdi, .{ .name = "__fixunshfdi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixunshfdi(a: f16) callconv(.C) u64 {
fn __fixunshfdi(a: f16) callconv(.c) u64 {
return intFromFloat(u64, a);
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__fixunshfsi, .{ .name = "__fixunshfsi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixunshfsi(a: f16) callconv(.C) u32 {
fn __fixunshfsi(a: f16) callconv(.c) u32 {
return intFromFloat(u32, a);
}

View File

@ -12,12 +12,12 @@ comptime {
}
}
pub fn __fixunshfti(a: f16) callconv(.C) u128 {
pub fn __fixunshfti(a: f16) callconv(.c) u128 {
return intFromFloat(u128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixunshfti_windows_x86_64(a: f16) callconv(.C) v2u64 {
fn __fixunshfti_windows_x86_64(a: f16) callconv(.c) v2u64 {
return @bitCast(intFromFloat(u128, a));
}

View File

@ -12,10 +12,10 @@ comptime {
}
}
pub fn __fixunssfdi(a: f32) callconv(.C) u64 {
pub fn __fixunssfdi(a: f32) callconv(.c) u64 {
return intFromFloat(u64, a);
}
fn __aeabi_f2ulz(a: f32) callconv(.AAPCS) u64 {
fn __aeabi_f2ulz(a: f32) callconv(.{ .arm_aapcs = .{} }) u64 {
return intFromFloat(u64, a);
}

View File

@ -11,10 +11,10 @@ comptime {
}
}
pub fn __fixunssfsi(a: f32) callconv(.C) u32 {
pub fn __fixunssfsi(a: f32) callconv(.c) u32 {
return intFromFloat(u32, a);
}
fn __aeabi_f2uiz(a: f32) callconv(.AAPCS) u32 {
fn __aeabi_f2uiz(a: f32) callconv(.{ .arm_aapcs = .{} }) u32 {
return intFromFloat(u32, a);
}

View File

@ -15,12 +15,12 @@ comptime {
}
}
pub fn __fixunssfti(a: f32) callconv(.C) u128 {
pub fn __fixunssfti(a: f32) callconv(.c) u128 {
return intFromFloat(u128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixunssfti_windows_x86_64(a: f32) callconv(.C) v2u64 {
fn __fixunssfti_windows_x86_64(a: f32) callconv(.c) v2u64 {
return @bitCast(intFromFloat(u128, a));
}

View File

@ -12,10 +12,10 @@ comptime {
@export(&__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fixunstfdi(a: f128) callconv(.C) u64 {
pub fn __fixunstfdi(a: f128) callconv(.c) u64 {
return intFromFloat(u64, a);
}
fn _Qp_qtoux(a: *const f128) callconv(.C) u64 {
fn _Qp_qtoux(a: *const f128) callconv(.c) u64 {
return intFromFloat(u64, a.*);
}

View File

@ -12,10 +12,10 @@ comptime {
@export(&__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __fixunstfsi(a: f128) callconv(.C) u32 {
pub fn __fixunstfsi(a: f128) callconv(.c) u32 {
return intFromFloat(u32, a);
}
fn _Qp_qtoui(a: *const f128) callconv(.C) u32 {
fn _Qp_qtoui(a: *const f128) callconv(.c) u32 {
return intFromFloat(u32, a.*);
}

View File

@ -14,12 +14,12 @@ comptime {
}
}
pub fn __fixunstfti(a: f128) callconv(.C) u128 {
pub fn __fixunstfti(a: f128) callconv(.c) u128 {
return intFromFloat(u128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixunstfti_windows_x86_64(a: f128) callconv(.C) v2u64 {
fn __fixunstfti_windows_x86_64(a: f128) callconv(.c) v2u64 {
return @bitCast(intFromFloat(u128, a));
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__fixunsxfdi, .{ .name = "__fixunsxfdi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixunsxfdi(a: f80) callconv(.C) u64 {
fn __fixunsxfdi(a: f80) callconv(.c) u64 {
return intFromFloat(u64, a);
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__fixunsxfsi, .{ .name = "__fixunsxfsi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixunsxfsi(a: f80) callconv(.C) u32 {
fn __fixunsxfsi(a: f80) callconv(.c) u32 {
return intFromFloat(u32, a);
}

View File

@ -12,12 +12,12 @@ comptime {
}
}
pub fn __fixunsxfti(a: f80) callconv(.C) u128 {
pub fn __fixunsxfti(a: f80) callconv(.c) u128 {
return intFromFloat(u128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixunsxfti_windows_x86_64(a: f80) callconv(.C) v2u64 {
fn __fixunsxfti_windows_x86_64(a: f80) callconv(.c) v2u64 {
return @bitCast(intFromFloat(u128, a));
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__fixxfdi, .{ .name = "__fixxfdi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixxfdi(a: f80) callconv(.C) i64 {
fn __fixxfdi(a: f80) callconv(.c) i64 {
return intFromFloat(i64, a);
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__fixxfsi, .{ .name = "__fixxfsi", .linkage = common.linkage, .visibility = common.visibility });
}
fn __fixxfsi(a: f80) callconv(.C) i32 {
fn __fixxfsi(a: f80) callconv(.c) i32 {
return intFromFloat(i32, a);
}

View File

@ -12,12 +12,12 @@ comptime {
}
}
pub fn __fixxfti(a: f80) callconv(.C) i128 {
pub fn __fixxfti(a: f80) callconv(.c) i128 {
return intFromFloat(i128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixxfti_windows_x86_64(a: f80) callconv(.C) v2u64 {
fn __fixxfti_windows_x86_64(a: f80) callconv(.c) v2u64 {
return @bitCast(intFromFloat(i128, a));
}

View File

@ -12,10 +12,10 @@ comptime {
}
}
pub fn __floatdidf(a: i64) callconv(.C) f64 {
pub fn __floatdidf(a: i64) callconv(.c) f64 {
return floatFromInt(f64, a);
}
fn __aeabi_l2d(a: i64) callconv(.AAPCS) f64 {
fn __aeabi_l2d(a: i64) callconv(.{ .arm_aapcs = .{} }) f64 {
return floatFromInt(f64, a);
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__floatdihf, .{ .name = "__floatdihf", .linkage = common.linkage, .visibility = common.visibility });
}
fn __floatdihf(a: i64) callconv(.C) f16 {
fn __floatdihf(a: i64) callconv(.c) f16 {
return floatFromInt(f16, a);
}

View File

@ -12,10 +12,10 @@ comptime {
}
}
pub fn __floatdisf(a: i64) callconv(.C) f32 {
pub fn __floatdisf(a: i64) callconv(.c) f32 {
return floatFromInt(f32, a);
}
fn __aeabi_l2f(a: i64) callconv(.AAPCS) f32 {
fn __aeabi_l2f(a: i64) callconv(.{ .arm_aapcs = .{} }) f32 {
return floatFromInt(f32, a);
}

View File

@ -12,10 +12,10 @@ comptime {
@export(&__floatditf, .{ .name = "__floatditf", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __floatditf(a: i64) callconv(.C) f128 {
pub fn __floatditf(a: i64) callconv(.c) f128 {
return floatFromInt(f128, a);
}
fn _Qp_xtoq(c: *f128, a: i64) callconv(.C) void {
fn _Qp_xtoq(c: *f128, a: i64) callconv(.c) void {
c.* = floatFromInt(f128, a);
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__floatdixf, .{ .name = "__floatdixf", .linkage = common.linkage, .visibility = common.visibility });
}
fn __floatdixf(a: i64) callconv(.C) f80 {
fn __floatdixf(a: i64) callconv(.c) f80 {
return floatFromInt(f80, a);
}

View File

@ -11,10 +11,10 @@ comptime {
}
}
pub fn __floatsidf(a: i32) callconv(.C) f64 {
pub fn __floatsidf(a: i32) callconv(.c) f64 {
return floatFromInt(f64, a);
}
fn __aeabi_i2d(a: i32) callconv(.AAPCS) f64 {
fn __aeabi_i2d(a: i32) callconv(.{ .arm_aapcs = .{} }) f64 {
return floatFromInt(f64, a);
}

View File

@ -7,6 +7,6 @@ comptime {
@export(&__floatsihf, .{ .name = "__floatsihf", .linkage = common.linkage, .visibility = common.visibility });
}
fn __floatsihf(a: i32) callconv(.C) f16 {
fn __floatsihf(a: i32) callconv(.c) f16 {
return floatFromInt(f16, a);
}

View File

@ -11,10 +11,10 @@ comptime {
}
}
pub fn __floatsisf(a: i32) callconv(.C) f32 {
pub fn __floatsisf(a: i32) callconv(.c) f32 {
return floatFromInt(f32, a);
}
fn __aeabi_i2f(a: i32) callconv(.AAPCS) f32 {
fn __aeabi_i2f(a: i32) callconv(.{ .arm_aapcs = .{} }) f32 {
return floatFromInt(f32, a);
}

View File

@ -12,10 +12,10 @@ comptime {
@export(&__floatsitf, .{ .name = "__floatsitf", .linkage = common.linkage, .visibility = common.visibility });
}
pub fn __floatsitf(a: i32) callconv(.C) f128 {
pub fn __floatsitf(a: i32) callconv(.c) f128 {
return floatFromInt(f128, a);
}
fn _Qp_itoq(c: *f128, a: i32) callconv(.C) void {
fn _Qp_itoq(c: *f128, a: i32) callconv(.c) void {
c.* = floatFromInt(f128, a);
}

Some files were not shown because too many files have changed in this diff Show More