math: make cast return optional instead of an error

This commit is contained in:
Ali Chraghi 2022-05-22 19:36:59 +04:30 committed by Andrew Kelley
parent ddd5b57045
commit 0e6285c8fc
37 changed files with 152 additions and 175 deletions

View File

@ -88,7 +88,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
.windows => {
var buf: [max_name_len]u16 = undefined;
const len = try std.unicode.utf8ToUtf16Le(&buf, name);
const byte_len = math.cast(c_ushort, len * 2) catch return error.NameTooLong;
const byte_len = math.cast(c_ushort, len * 2) orelse return error.NameTooLong;
// Note: NT allocates its own copy, no use-after-free here.
const unicode_string = os.windows.UNICODE_STRING{
@ -526,7 +526,7 @@ const WindowsThreadImpl = struct {
// Windows appears to only support SYSTEM_INFO.dwAllocationGranularity minimum stack size.
// Going lower makes it default to that specified in the executable (~1mb).
// Its also fine if the limit here is incorrect as stack size is only a hint.
var stack_size = std.math.cast(u32, config.stack_size) catch std.math.maxInt(u32);
var stack_size = std.math.cast(u32, config.stack_size) orelse std.math.maxInt(u32);
stack_size = std.math.max(64 * 1024, stack_size);
instance.thread.thread_handle = windows.kernel32.CreateThread(

View File

@ -152,7 +152,7 @@ const WindowsImpl = struct {
// Round the nanoseconds to the nearest millisecond,
// then saturating cast it to windows DWORD for use in kernel32 call.
const ms = (timeout_ns +| (std.time.ns_per_ms / 2)) / std.time.ns_per_ms;
timeout_ms = std.math.cast(os.windows.DWORD, ms) catch std.math.maxInt(os.windows.DWORD);
timeout_ms = std.math.cast(os.windows.DWORD, ms) orelse std.math.maxInt(os.windows.DWORD);
// Track if the timeout overflowed into INFINITE and make sure not to wait forever.
if (timeout_ms == os.windows.INFINITE) {

View File

@ -193,7 +193,7 @@ const DarwinImpl = struct {
break :blk os.darwin.__ulock_wait2(flags, addr, expect, timeout_ns, 0);
}
const timeout_us = std.math.cast(u32, timeout_ns / std.time.ns_per_us) catch overflow: {
const timeout_us = std.math.cast(u32, timeout_ns / std.time.ns_per_us) orelse overflow: {
timeout_overflowed = true;
break :overflow std.math.maxInt(u32);
};
@ -274,7 +274,7 @@ const LinuxImpl = struct {
const rc = os.linux.futex_wake(
@ptrCast(*const i32, &ptr.value),
os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAKE,
std.math.cast(i32, max_waiters) catch std.math.maxInt(i32),
std.math.cast(i32, max_waiters) orelse std.math.maxInt(i32),
);
switch (os.linux.getErrno(rc)) {
@ -379,7 +379,7 @@ const OpenbsdImpl = struct {
const rc = os.openbsd.futex(
@ptrCast(*const volatile u32, &ptr.value),
os.openbsd.FUTEX_WAKE | os.openbsd.FUTEX_PRIVATE_FLAG,
std.math.cast(c_int, max_waiters) catch std.math.maxInt(c_int),
std.math.cast(c_int, max_waiters) orelse std.math.maxInt(c_int),
null, // FUTEX_WAKE takes no timeout ptr
null, // FUTEX_WAKE takes no requeue address
);
@ -400,7 +400,7 @@ const DragonflyImpl = struct {
if (timeout) |delay| {
assert(delay != 0); // handled by timedWait().
timeout_us = std.math.cast(c_int, delay / std.time.ns_per_us) catch blk: {
timeout_us = std.math.cast(c_int, delay / std.time.ns_per_us) orelse blk: {
timeout_overflowed = true;
break :blk std.math.maxInt(c_int);
};
@ -436,7 +436,7 @@ const DragonflyImpl = struct {
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
// A count of zero means wake all waiters.
assert(max_waiters != 0);
const to_wake = std.math.cast(c_int, max_waiters) catch 0;
const to_wake = std.math.cast(c_int, max_waiters) orelse 0;
// https://man.dragonflybsd.org/?command=umtx&section=2
// > umtx_wakeup() will generally return 0 unless the address is bad.

View File

@ -284,7 +284,7 @@ pub const ChildProcess = struct {
const next_buf = buf.unusedCapacitySlice();
if (next_buf.len == 0) return .full;
var read_bytes: u32 = undefined;
const read_result = windows.kernel32.ReadFile(handle, next_buf.ptr, math.cast(u32, next_buf.len) catch maxInt(u32), &read_bytes, overlapped);
const read_result = windows.kernel32.ReadFile(handle, next_buf.ptr, math.cast(u32, next_buf.len) orelse maxInt(u32), &read_bytes, overlapped);
if (read_result == 0) return switch (windows.kernel32.GetLastError()) {
.IO_PENDING => .pending,
.BROKEN_PIPE => .closed,

View File

@ -853,9 +853,9 @@ fn readCoffDebugInfo(allocator: mem.Allocator, coff_file: File) !ModuleDebugInfo
}
}
fn chopSlice(ptr: []const u8, offset: u64, size: u64) ![]const u8 {
const start = try math.cast(usize, offset);
const end = start + try math.cast(usize, size);
fn chopSlice(ptr: []const u8, offset: u64, size: u64) error{Overflow}![]const u8 {
const start = math.cast(usize, offset) orelse return error.Overflow;
const end = start + (math.cast(usize, size) orelse return error.Overflow);
return ptr[start..end];
}
@ -880,7 +880,7 @@ pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugIn
const str_section_off = shoff + @as(u64, hdr.e_shentsize) * @as(u64, hdr.e_shstrndx);
const str_shdr = @ptrCast(
*const elf.Shdr,
@alignCast(@alignOf(elf.Shdr), &mapped_mem[try math.cast(usize, str_section_off)]),
@alignCast(@alignOf(elf.Shdr), &mapped_mem[math.cast(usize, str_section_off) orelse return error.Overflow]),
);
const header_strings = mapped_mem[str_shdr.sh_offset .. str_shdr.sh_offset + str_shdr.sh_size];
const shdrs = @ptrCast(
@ -1119,7 +1119,7 @@ fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 {
nosuspend {
defer file.close();
const file_len = try math.cast(usize, try file.getEndPos());
const file_len = math.cast(usize, try file.getEndPos()) orelse math.maxInt(usize);
const mapped_mem = try os.mmap(
null,
file_len,
@ -1248,7 +1248,7 @@ pub const DebugInfo = struct {
if (windows.kernel32.K32EnumProcessModules(
process_handle,
modules.ptr,
try math.cast(windows.DWORD, modules.len * @sizeOf(windows.HMODULE)),
math.cast(windows.DWORD, modules.len * @sizeOf(windows.HMODULE)) orelse return error.Overflow,
&bytes_needed,
) == 0)
return error.MissingDebugInfo;

View File

@ -1068,7 +1068,7 @@ pub const DwarfInfo = struct {
});
},
else => {
const fwd_amt = math.cast(isize, op_size - 1) catch return error.InvalidDebugInfo;
const fwd_amt = math.cast(isize, op_size - 1) orelse return error.InvalidDebugInfo;
try seekable.seekBy(fwd_amt);
},
}
@ -1133,7 +1133,7 @@ pub const DwarfInfo = struct {
fn getString(di: *DwarfInfo, offset: u64) ![]const u8 {
if (offset > di.debug_str.len)
return error.InvalidDebugInfo;
const casted_offset = math.cast(usize, offset) catch
const casted_offset = math.cast(usize, offset) orelse
return error.InvalidDebugInfo;
// Valid strings always have a terminating zero byte
@ -1148,7 +1148,7 @@ pub const DwarfInfo = struct {
const debug_line_str = di.debug_line_str orelse return error.InvalidDebugInfo;
if (offset > debug_line_str.len)
return error.InvalidDebugInfo;
const casted_offset = math.cast(usize, offset) catch
const casted_offset = math.cast(usize, offset) orelse
return error.InvalidDebugInfo;
// Valid strings always have a terminating zero byte

View File

@ -104,6 +104,7 @@ pub const ElfDynLib = struct {
memory: []align(mem.page_size) u8,
pub const Error = error{
FileTooBig,
NotElfFile,
NotDynamicLibrary,
MissingDynamicLinkingInformation,
@ -118,7 +119,7 @@ pub const ElfDynLib = struct {
defer os.close(fd);
const stat = try os.fstat(fd);
const size = try std.math.cast(usize, stat.size);
const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig;
// This one is to read the ELF info. We do more mmapping later
// corresponding to the actual LOAD sections.

View File

@ -1778,8 +1778,8 @@ fn parseWithSign(
if (c == '_') continue;
const digit = try charToDigit(c, buf_radix);
if (x != 0) x = try math.mul(T, x, try math.cast(T, buf_radix));
x = try add(T, x, try math.cast(T, digit));
if (x != 0) x = try math.mul(T, x, math.cast(T, buf_radix) orelse return error.Overflow);
x = try add(T, x, math.cast(T, digit) orelse return error.Overflow);
}
return x;
@ -1893,10 +1893,7 @@ pub fn count(comptime fmt: []const u8, args: anytype) u64 {
pub const AllocPrintError = error{OutOfMemory};
pub fn allocPrint(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 {
const size = math.cast(usize, count(fmt, args)) catch |err| switch (err) {
// Output too long. Can't possibly allocate enough memory to display it.
error.Overflow => return error.OutOfMemory,
};
const size = math.cast(usize, count(fmt, args)) orelse return error.OutOfMemory;
const buf = try allocator.alloc(u8, size);
return bufPrint(buf, fmt, args) catch |err| switch (err) {
error.NoSpaceLeft => unreachable, // we just counted the size above

View File

@ -1899,7 +1899,7 @@ pub const Dir = struct {
// If the file size doesn't fit a usize it'll be certainly greater than
// `max_bytes`
const stat_size = size_hint orelse math.cast(usize, try file.getEndPos()) catch
const stat_size = size_hint orelse math.cast(usize, try file.getEndPos()) orelse
return error.FileTooBig;
return file.readToEndAllocOptions(allocator, max_bytes, stat_size, alignment, optional_sentinel);

View File

@ -907,12 +907,12 @@ pub const File = struct {
}
const times = [2]os.timespec{
os.timespec{
.tv_sec = math.cast(isize, @divFloor(atime, std.time.ns_per_s)) catch maxInt(isize),
.tv_nsec = math.cast(isize, @mod(atime, std.time.ns_per_s)) catch maxInt(isize),
.tv_sec = math.cast(isize, @divFloor(atime, std.time.ns_per_s)) orelse maxInt(isize),
.tv_nsec = math.cast(isize, @mod(atime, std.time.ns_per_s)) orelse maxInt(isize),
},
os.timespec{
.tv_sec = math.cast(isize, @divFloor(mtime, std.time.ns_per_s)) catch maxInt(isize),
.tv_nsec = math.cast(isize, @mod(mtime, std.time.ns_per_s)) catch maxInt(isize),
.tv_sec = math.cast(isize, @divFloor(mtime, std.time.ns_per_s)) orelse maxInt(isize),
.tv_nsec = math.cast(isize, @mod(mtime, std.time.ns_per_s)) orelse maxInt(isize),
},
};
try os.futimens(self.handle, &times);
@ -1218,7 +1218,7 @@ pub const File = struct {
pub const CopyRangeError = os.CopyFileRangeError;
pub fn copyRange(in: File, in_offset: u64, out: File, out_offset: u64, len: u64) CopyRangeError!u64 {
const adjusted_len = math.cast(usize, len) catch math.maxInt(usize);
const adjusted_len = math.cast(usize, len) orelse math.maxInt(usize);
const result = try os.copy_file_range(in.handle, in_offset, out.handle, out_offset, adjusted_len, 0);
return result;
}

View File

@ -76,20 +76,20 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
}
pub fn seekTo(self: *Self, pos: u64) SeekError!void {
self.pos = if (std.math.cast(usize, pos)) |x| std.math.min(self.buffer.len, x) else |_| self.buffer.len;
self.pos = if (std.math.cast(usize, pos)) |x| std.math.min(self.buffer.len, x) else self.buffer.len;
}
pub fn seekBy(self: *Self, amt: i64) SeekError!void {
if (amt < 0) {
const abs_amt = std.math.absCast(amt);
const abs_amt_usize = std.math.cast(usize, abs_amt) catch std.math.maxInt(usize);
const abs_amt_usize = std.math.cast(usize, abs_amt) orelse std.math.maxInt(usize);
if (abs_amt_usize > self.pos) {
self.pos = 0;
} else {
self.pos -= abs_amt_usize;
}
} else {
const amt_usize = std.math.cast(usize, amt) catch std.math.maxInt(usize);
const amt_usize = std.math.cast(usize, amt) orelse std.math.maxInt(usize);
const new_pos = std.math.add(usize, self.pos, amt_usize) catch std.math.maxInt(usize);
self.pos = std.math.min(self.buffer.len, new_pos);
}

View File

@ -989,28 +989,27 @@ test "negateCast" {
}
/// Cast an integer to a different integer type. If the value doesn't fit,
/// return an error.
/// TODO make this an optional not an error.
pub fn cast(comptime T: type, x: anytype) (error{Overflow}!T) {
/// return null.
pub fn cast(comptime T: type, x: anytype) ?T {
comptime assert(@typeInfo(T) == .Int); // must pass an integer
comptime assert(@typeInfo(@TypeOf(x)) == .Int); // must pass an integer
if (maxInt(@TypeOf(x)) > maxInt(T) and x > maxInt(T)) {
return error.Overflow;
return null;
} else if (minInt(@TypeOf(x)) < minInt(T) and x < minInt(T)) {
return error.Overflow;
return null;
} else {
return @intCast(T, x);
}
}
test "cast" {
try testing.expectError(error.Overflow, cast(u8, @as(u32, 300)));
try testing.expectError(error.Overflow, cast(i8, @as(i32, -200)));
try testing.expectError(error.Overflow, cast(u8, @as(i8, -1)));
try testing.expectError(error.Overflow, cast(u64, @as(i8, -1)));
try testing.expect(cast(u8, @as(u32, 300)) == null);
try testing.expect(cast(i8, @as(i32, -200)) == null);
try testing.expect(cast(u8, @as(i8, -1)) == null);
try testing.expect(cast(u64, @as(i8, -1)) == null);
try testing.expect((try cast(u8, @as(u32, 255))) == @as(u8, 255));
try testing.expect(@TypeOf(try cast(u8, @as(u32, 255))) == u8);
try testing.expect(cast(u8, @as(u32, 255)).? == @as(u8, 255));
try testing.expect(@TypeOf(cast(u8, @as(u32, 255)).?) == u8);
}
pub const AlignCastError = error{UnalignedMemory};

View File

@ -2014,7 +2014,7 @@ pub const Const = struct {
} else {
if (math.cast(T, r)) |ok| {
return -ok;
} else |_| {
} else {
return minInt(T);
}
}

View File

@ -660,7 +660,7 @@ pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize {
else => |err| return unexpectedErrno(err),
}
}
const iov_count = math.cast(u31, iov.len) catch math.maxInt(u31);
const iov_count = math.cast(u31, iov.len) orelse math.maxInt(u31);
while (true) {
// TODO handle the case when iov_len is too large and get rid of this @intCast
const rc = system.readv(fd, iov.ptr, iov_count);
@ -877,7 +877,7 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize {
}
}
const iov_count = math.cast(u31, iov.len) catch math.maxInt(u31);
const iov_count = math.cast(u31, iov.len) orelse math.maxInt(u31);
const preadv_sym = if (builtin.os.tag == .linux and builtin.link_libc)
system.preadv64
@ -4163,9 +4163,9 @@ pub fn kevent(
const rc = system.kevent(
kq,
changelist.ptr,
try math.cast(c_int, changelist.len),
math.cast(c_int, changelist.len) orelse return error.Overflow,
eventlist.ptr,
try math.cast(c_int, eventlist.len),
math.cast(c_int, eventlist.len) orelse return error.Overflow,
timeout,
);
switch (errno(rc)) {
@ -4531,9 +4531,7 @@ pub fn faccessatW(dirfd: fd_t, sub_path_w: [*:0]const u16, mode: u32, flags: u32
return;
}
const path_len_bytes = math.cast(u16, mem.sliceTo(sub_path_w, 0).len * 2) catch |err| switch (err) {
error.Overflow => return error.NameTooLong,
};
const path_len_bytes = math.cast(u16, mem.sliceTo(sub_path_w, 0).len * 2) orelse return error.NameTooLong;
var nt_name = windows.UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
@ -4650,7 +4648,7 @@ pub fn sysctl(
@panic("unsupported"); // TODO should be compile error, not panic
}
const name_len = math.cast(c_uint, name.len) catch return error.NameTooLong;
const name_len = math.cast(c_uint, name.len) orelse return error.NameTooLong;
switch (errno(system.sysctl(name.ptr, name_len, oldp, oldlenp, newp, newlen))) {
.SUCCESS => return,
.FAULT => unreachable,
@ -5191,8 +5189,8 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
/// Spurious wakeups are possible and no precision of timing is guaranteed.
pub fn nanosleep(seconds: u64, nanoseconds: u64) void {
var req = timespec{
.tv_sec = math.cast(isize, seconds) catch math.maxInt(isize),
.tv_nsec = math.cast(isize, nanoseconds) catch math.maxInt(isize),
.tv_sec = math.cast(isize, seconds) orelse math.maxInt(isize),
.tv_nsec = math.cast(isize, nanoseconds) orelse math.maxInt(isize),
};
var rem: timespec = undefined;
while (true) {
@ -6006,10 +6004,10 @@ pub fn sendfile(
if (headers.len != 0 or trailers.len != 0) {
// Here we carefully avoid `@intCast` by returning partial writes when
// too many io vectors are provided.
const hdr_cnt = math.cast(u31, headers.len) catch math.maxInt(u31);
const hdr_cnt = math.cast(u31, headers.len) orelse math.maxInt(u31);
if (headers.len > hdr_cnt) return writev(out_fd, headers);
const trl_cnt = math.cast(u31, trailers.len) catch math.maxInt(u31);
const trl_cnt = math.cast(u31, trailers.len) orelse math.maxInt(u31);
hdtr_data = std.c.sf_hdtr{
.headers = headers.ptr,
@ -6085,10 +6083,10 @@ pub fn sendfile(
if (headers.len != 0 or trailers.len != 0) {
// Here we carefully avoid `@intCast` by returning partial writes when
// too many io vectors are provided.
const hdr_cnt = math.cast(u31, headers.len) catch math.maxInt(u31);
const hdr_cnt = math.cast(u31, headers.len) orelse math.maxInt(u31);
if (headers.len > hdr_cnt) return writev(out_fd, headers);
const trl_cnt = math.cast(u31, trailers.len) catch math.maxInt(u31);
const trl_cnt = math.cast(u31, trailers.len) orelse math.maxInt(u31);
hdtr_data = std.c.sf_hdtr{
.headers = headers.ptr,
@ -6276,7 +6274,7 @@ pub const PollError = error{
pub fn poll(fds: []pollfd, timeout: i32) PollError!usize {
while (true) {
const fds_count = math.cast(nfds_t, fds.len) catch return error.SystemResources;
const fds_count = math.cast(nfds_t, fds.len) orelse return error.SystemResources;
const rc = system.poll(fds.ptr, fds_count, timeout);
if (builtin.os.tag == .windows) {
if (rc == windows.ws2_32.SOCKET_ERROR) {
@ -6319,7 +6317,7 @@ pub fn ppoll(fds: []pollfd, timeout: ?*const timespec, mask: ?*const sigset_t) P
ts_ptr = &ts;
ts = timeout_ns.*;
}
const fds_count = math.cast(nfds_t, fds.len) catch return error.SystemResources;
const fds_count = math.cast(nfds_t, fds.len) orelse return error.SystemResources;
const rc = system.ppoll(fds.ptr, fds_count, ts_ptr, mask);
switch (errno(rc)) {
.SUCCESS => return @intCast(usize, rc),

View File

@ -78,9 +78,7 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
var result: HANDLE = undefined;
const path_len_bytes = math.cast(u16, sub_path_w.len * 2) catch |err| switch (err) {
error.Overflow => return error.NameTooLong,
};
const path_len_bytes = math.cast(u16, sub_path_w.len * 2) orelse return error.NameTooLong;
var nt_name = UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
@ -551,7 +549,7 @@ pub fn WriteFile(
};
loop.beginOneEvent();
suspend {
const adjusted_len = math.cast(DWORD, bytes.len) catch maxInt(DWORD);
const adjusted_len = math.cast(DWORD, bytes.len) orelse maxInt(DWORD);
_ = kernel32.WriteFile(handle, bytes.ptr, adjusted_len, null, &resume_node.base.overlapped);
}
var bytes_transferred: DWORD = undefined;
@ -589,7 +587,7 @@ pub fn WriteFile(
};
break :blk &overlapped_data;
} else null;
const adjusted_len = math.cast(u32, bytes.len) catch maxInt(u32);
const adjusted_len = math.cast(u32, bytes.len) orelse maxInt(u32);
if (kernel32.WriteFile(handle, bytes.ptr, adjusted_len, &bytes_written, overlapped) == 0) {
switch (kernel32.GetLastError()) {
.INVALID_USER_BUFFER => return error.SystemResources,
@ -618,9 +616,7 @@ pub const SetCurrentDirectoryError = error{
};
pub fn SetCurrentDirectory(path_name: []const u16) SetCurrentDirectoryError!void {
const path_len_bytes = math.cast(u16, path_name.len * 2) catch |err| switch (err) {
error.Overflow => return error.NameTooLong,
};
const path_len_bytes = math.cast(u16, path_name.len * 2) orelse return error.NameTooLong;
var nt_name = UNICODE_STRING{
.Length = path_len_bytes,
@ -753,9 +749,7 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin
// With the latter, we'd need to call `NtCreateFile` twice, once for file symlink, and if that
// failed, again for dir symlink. Omitting any mention of file/dir flags makes it possible
// to open the symlink there and then.
const path_len_bytes = math.cast(u16, sub_path_w.len * 2) catch |err| switch (err) {
error.Overflow => return error.NameTooLong,
};
const path_len_bytes = math.cast(u16, sub_path_w.len * 2) orelse return error.NameTooLong;
var nt_name = UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
@ -1013,9 +1007,7 @@ pub fn QueryObjectName(
const info = @ptrCast(*OBJECT_NAME_INFORMATION, out_buffer_aligned);
//buffer size is specified in bytes
const out_buffer_len = std.math.cast(ULONG, out_buffer_aligned.len * 2) catch |e| switch (e) {
error.Overflow => std.math.maxInt(ULONG),
};
const out_buffer_len = std.math.cast(ULONG, out_buffer_aligned.len * 2) orelse std.math.maxInt(ULONG);
//last argument would return the length required for full_buffer, not exposed here
const rc = ntdll.NtQueryObject(handle, .ObjectNameInformation, info, out_buffer_len, null);
switch (rc) {
@ -1221,7 +1213,7 @@ pub fn QueryInformationFile(
out_buffer: []u8,
) QueryInformationFileError!void {
var io: IO_STATUS_BLOCK = undefined;
const len_bytes = std.math.cast(u32, out_buffer.len) catch unreachable;
const len_bytes = std.math.cast(u32, out_buffer.len) orelse unreachable;
const rc = ntdll.NtQueryInformationFile(handle, &io, out_buffer.ptr, len_bytes, info_class);
switch (rc) {
.SUCCESS => {},

View File

@ -16,7 +16,7 @@ pub fn sleep(nanoseconds: u64) void {
if (builtin.os.tag == .windows) {
const big_ms_from_ns = nanoseconds / ns_per_ms;
const ms = math.cast(os.windows.DWORD, big_ms_from_ns) catch math.maxInt(os.windows.DWORD);
const ms = math.cast(os.windows.DWORD, big_ms_from_ns) orelse math.maxInt(os.windows.DWORD);
os.windows.kernel32.Sleep(ms);
return;
}

View File

@ -657,9 +657,7 @@ pub fn abiAndDynamicLinkerFromFile(
const strtab_read_len = try preadMin(file, &strtab_buf, ds.offset, strtab_len);
const strtab = strtab_buf[0..strtab_read_len];
// TODO this pointer cast should not be necessary
const rpoff_usize = std.math.cast(usize, rpoff) catch |err| switch (err) {
error.Overflow => return error.InvalidElfFile,
};
const rpoff_usize = std.math.cast(usize, rpoff) orelse return error.InvalidElfFile;
const rpath_list = mem.sliceTo(std.meta.assumeSentinel(strtab[rpoff_usize..].ptr, 0), 0);
var it = mem.tokenize(u8, rpath_list, ":");
while (it.next()) |rpath| {

View File

@ -4395,7 +4395,7 @@ pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*Emb
.inode = actual_stat.inode,
.mtime = actual_stat.mtime,
};
const size_usize = try std.math.cast(usize, actual_stat.size);
const size_usize = std.math.cast(usize, actual_stat.size) orelse return error.Overflow;
const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), size_usize, 1, 0);
errdefer gpa.free(bytes);
@ -4435,7 +4435,7 @@ pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void {
if (unchanged_metadata) return;
const gpa = mod.gpa;
const size_usize = try std.math.cast(usize, stat.size);
const size_usize = std.math.cast(usize, stat.size) orelse return error.Overflow;
const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), size_usize, 1, 0);
gpa.free(embed_file.bytes);
embed_file.bytes = bytes;

View File

@ -21786,9 +21786,7 @@ fn cmpNumeric(
const dest_ty = if (dest_float_type) |ft| ft else blk: {
const max_bits = std.math.max(lhs_bits, rhs_bits);
const casted_bits = std.math.cast(u16, max_bits) catch |err| switch (err) {
error.Overflow => return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}),
};
const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits});
const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned;
break :blk try Module.makeIntType(sema.arena, signedness, casted_bits);
};
@ -24073,9 +24071,7 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr
/// is too big to fit.
fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError!usize {
if (@bitSizeOf(u64) <= @bitSizeOf(usize)) return int;
return std.math.cast(usize, int) catch |err| switch (err) {
error.Overflow => return sema.fail(block, src, "expression produces integer value {d} which is too big for this compiler implementation to handle", .{int}),
};
return std.math.cast(usize, int) orelse return sema.fail(block, src, "expression produces integer value {d} which is too big for this compiler implementation to handle", .{int});
}
/// For pointer-like optionals, it returns the pointer type. For pointers,

View File

@ -453,7 +453,7 @@ fn gen(self: *Self) !void {
.tag = .sub_immediate,
.data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = size } },
});
} else |_| {
} else {
return self.failSymbol("TODO AArch64: allow larger stacks", .{});
}
@ -860,7 +860,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return @as(u32, 0);
}
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
@ -871,7 +871,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
@ -3031,7 +3031,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
// Copy registers to the stack
.register => |reg| blk: {
const mod = self.bin_file.options.module.?;
const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch {
const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)});
};
const abi_align = ty.abiAlignment(self.target.*);
@ -4173,7 +4173,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
},
.ptr_stack_offset => |off| {
// TODO: maybe addressing from sp instead of fp
const imm12 = math.cast(u12, off) catch
const imm12 = math.cast(u12, off) orelse
return self.fail("TODO larger stack offsets", .{});
_ = try self.addInst(.{

View File

@ -216,21 +216,21 @@ fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
.cbz => {
if (std.math.cast(i19, @shrExact(offset, 2))) |_| {
return BranchType.cbz;
} else |_| {
} else {
return emit.fail("TODO support cbz branches larger than +-1 MiB", .{});
}
},
.b, .bl => {
if (std.math.cast(i26, @shrExact(offset, 2))) |_| {
return BranchType.unconditional_branch_immediate;
} else |_| {
} else {
return emit.fail("TODO support unconditional branches larger than +-128 MiB", .{});
}
},
.b_cond => {
if (std.math.cast(i19, @shrExact(offset, 2))) |_| {
return BranchType.b_cond;
} else |_| {
} else {
return emit.fail("TODO support conditional branches larger than +-1 MiB", .{});
}
},
@ -927,7 +927,7 @@ fn mirLoadStoreStack(emit: *Emit, inst: Mir.Inst.Index) !void {
.ldrb_stack, .ldrsb_stack, .strb_stack => blk: {
if (math.cast(u12, raw_offset)) |imm| {
break :blk Instruction.LoadStoreOffset.imm(imm);
} else |_| {
} else {
return emit.fail("TODO load/store stack byte with larger offset", .{});
}
},
@ -935,7 +935,7 @@ fn mirLoadStoreStack(emit: *Emit, inst: Mir.Inst.Index) !void {
assert(std.mem.isAlignedGeneric(u32, raw_offset, 2)); // misaligned stack entry
if (math.cast(u12, @divExact(raw_offset, 2))) |imm| {
break :blk Instruction.LoadStoreOffset.imm(imm);
} else |_| {
} else {
return emit.fail("TODO load/store stack halfword with larger offset", .{});
}
},
@ -949,7 +949,7 @@ fn mirLoadStoreStack(emit: *Emit, inst: Mir.Inst.Index) !void {
assert(std.mem.isAlignedGeneric(u32, raw_offset, alignment)); // misaligned stack entry
if (math.cast(u12, @divExact(raw_offset, alignment))) |imm| {
break :blk Instruction.LoadStoreOffset.imm(imm);
} else |_| {
} else {
return emit.fail("TODO load/store stack with larger offset", .{});
}
},

View File

@ -850,7 +850,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return @as(u32, 0);
}
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
@ -861,7 +861,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
@ -4299,7 +4299,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
1, 4 => {
const offset = if (math.cast(u12, stack_offset)) |imm| blk: {
break :blk Instruction.Offset.imm(imm);
} else |_| Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none);
} else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb,
@ -4707,7 +4707,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
1, 4 => {
const offset = if (math.cast(u12, stack_offset)) |imm| blk: {
break :blk Instruction.Offset.imm(imm);
} else |_| Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none);
} else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb,

View File

@ -164,7 +164,7 @@ fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
.b => {
if (std.math.cast(i24, @divExact(offset, 4))) |_| {
return BranchType.b;
} else |_| {
} else {
return emit.fail("TODO support larger branches", .{});
}
},

View File

@ -779,7 +779,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const elem_ty = self.air.typeOfIndex(inst).elemType();
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
@ -790,7 +790,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};

View File

@ -421,7 +421,7 @@ fn gen(self: *Self) !void {
},
},
});
} else |_| {
} else {
// TODO for large stacks, replace the prologue with:
// setx stack_size, %g1
// save %sp, %g1, %sp
@ -1591,7 +1591,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return @as(u32, 0);
}
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
@ -1602,7 +1602,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
@ -2299,7 +2299,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa });
},
.ptr_stack_offset => |off| {
const simm13 = math.cast(u12, off + abi.stack_bias + abi.stack_reserved_area) catch
const simm13 = math.cast(u12, off + abi.stack_bias + abi.stack_reserved_area) orelse
return self.fail("TODO larger stack offsets", .{});
_ = try self.addInst(.{
@ -2432,7 +2432,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
},
.stack_offset => |off| {
const real_offset = off + abi.stack_bias + abi.stack_reserved_area;
const simm13 = math.cast(i13, real_offset) catch
const simm13 = math.cast(i13, real_offset) orelse
return self.fail("TODO larger stack offsets", .{});
try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(self.target.*));
},
@ -2466,7 +2466,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
},
.register => |reg| {
const real_offset = stack_offset + abi.stack_bias + abi.stack_reserved_area;
const simm13 = math.cast(i13, real_offset) catch
const simm13 = math.cast(i13, real_offset) orelse
return self.fail("TODO larger stack offsets", .{});
return self.genStore(reg, .sp, i13, simm13, abi_size);
},

View File

@ -519,14 +519,14 @@ fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
.bpcc => {
if (std.math.cast(i21, offset)) |_| {
return BranchType.bpcc;
} else |_| {
} else {
return emit.fail("TODO support BPcc branches larger than +-1 MiB", .{});
}
},
.bpr => {
if (std.math.cast(i18, offset)) |_| {
return BranchType.bpr;
} else |_| {
} else {
return emit.fail("TODO support BPr branches larger than +-128 KiB", .{});
}
},

View File

@ -1163,7 +1163,7 @@ fn allocStack(self: *Self, ty: Type) !WValue {
try self.initializeStack();
}
const abi_size = std.math.cast(u32, ty.abiSize(self.target)) catch {
const abi_size = std.math.cast(u32, ty.abiSize(self.target)) orelse {
const module = self.bin_file.base.options.module.?;
return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
ty.fmt(module), ty.abiSize(self.target),
@ -1198,7 +1198,7 @@ fn allocStackPtr(self: *Self, inst: Air.Inst.Index) !WValue {
}
const abi_alignment = ptr_ty.ptrAlignment(self.target);
const abi_size = std.math.cast(u32, pointee_ty.abiSize(self.target)) catch {
const abi_size = std.math.cast(u32, pointee_ty.abiSize(self.target)) orelse {
const module = self.bin_file.base.options.module.?;
return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
pointee_ty.fmt(module), pointee_ty.abiSize(self.target),
@ -2695,7 +2695,7 @@ fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const extra = self.air.extraData(Air.StructField, ty_pl.payload);
const struct_ptr = try self.resolveInst(extra.data.struct_operand);
const struct_ty = self.air.typeOf(extra.data.struct_operand).childType();
const offset = std.math.cast(u32, struct_ty.structFieldOffset(extra.data.field_index, self.target)) catch {
const offset = std.math.cast(u32, struct_ty.structFieldOffset(extra.data.field_index, self.target)) orelse {
const module = self.bin_file.base.options.module.?;
return self.fail("Field type '{}' too big to fit into stack frame", .{
struct_ty.structFieldType(extra.data.field_index).fmt(module),
@ -2709,7 +2709,7 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u32) InnerEr
const struct_ptr = try self.resolveInst(ty_op.operand);
const struct_ty = self.air.typeOf(ty_op.operand).childType();
const field_ty = struct_ty.structFieldType(index);
const offset = std.math.cast(u32, struct_ty.structFieldOffset(index, self.target)) catch {
const offset = std.math.cast(u32, struct_ty.structFieldOffset(index, self.target)) orelse {
const module = self.bin_file.base.options.module.?;
return self.fail("Field type '{}' too big to fit into stack frame", .{
field_ty.fmt(module),
@ -2737,7 +2737,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} };
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, self.target)) catch {
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, self.target)) orelse {
const module = self.bin_file.base.options.module.?;
return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)});
};
@ -3193,7 +3193,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue
return operand;
}
const offset = std.math.cast(u32, opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) catch {
const offset = std.math.cast(u32, opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) orelse {
const module = self.bin_file.base.options.module.?;
return self.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)});
};
@ -3223,7 +3223,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (op_ty.optionalReprIsPayload()) {
return operand;
}
const offset = std.math.cast(u32, op_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) catch {
const offset = std.math.cast(u32, op_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) orelse {
const module = self.bin_file.base.options.module.?;
return self.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)});
};

View File

@ -854,7 +854,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return self.allocMem(inst, @sizeOf(usize), @alignOf(usize));
}
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
@ -865,7 +865,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};

View File

@ -166,7 +166,7 @@ pub fn generateSymbol(
});
if (typed_value.val.isUndefDeep()) {
const abi_size = try math.cast(usize, typed_value.ty.abiSize(target));
const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
try code.appendNTimes(0xaa, abi_size);
return Result{ .appended = {} };
}
@ -452,7 +452,7 @@ pub fn generateSymbol(
if (info.bits > 64) {
var bigint_buffer: Value.BigIntSpace = undefined;
const bigint = typed_value.val.toBigInt(&bigint_buffer, target);
const abi_size = try math.cast(usize, typed_value.ty.abiSize(target));
const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
const start = code.items.len;
try code.resize(start + abi_size);
bigint.writeTwosComplement(code.items[start..][0..abi_size], info.bits, abi_size, endian);
@ -571,7 +571,7 @@ pub fn generateSymbol(
// Pad struct members if required
const padded_field_end = typed_value.ty.structFieldOffset(index + 1, target);
const padding = try math.cast(usize, padded_field_end - unpadded_field_end);
const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow;
if (padding > 0) {
try code.writer().writeByteNTimes(0, padding);
@ -611,7 +611,7 @@ pub fn generateSymbol(
assert(union_ty.haveFieldTypes());
const field_ty = union_ty.fields.values()[field_index].ty;
if (!field_ty.hasRuntimeBits()) {
try code.writer().writeByteNTimes(0xaa, try math.cast(usize, layout.payload_size));
try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
} else {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = field_ty,
@ -624,7 +624,7 @@ pub fn generateSymbol(
.fail => |em| return Result{ .fail = em },
}
const padding = try math.cast(usize, layout.payload_size - field_ty.abiSize(target));
const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(target)) orelse return error.Overflow;
if (padding > 0) {
try code.writer().writeByteNTimes(0, padding);
}
@ -649,8 +649,8 @@ pub fn generateSymbol(
var opt_buf: Type.Payload.ElemType = undefined;
const payload_type = typed_value.ty.optionalChild(&opt_buf);
const is_pl = !typed_value.val.isNull();
const abi_size = try math.cast(usize, typed_value.ty.abiSize(target));
const offset = abi_size - try math.cast(usize, payload_type.abiSize(target));
const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
const offset = abi_size - (math.cast(usize, payload_type.abiSize(target)) orelse return error.Overflow);
if (!payload_type.hasRuntimeBits()) {
try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size);
@ -758,7 +758,7 @@ pub fn generateSymbol(
}
const unpadded_end = code.items.len - begin;
const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align);
const padding = try math.cast(usize, padded_end - unpadded_end);
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
if (padding > 0) {
try code.writer().writeByteNTimes(0, padding);
@ -780,7 +780,7 @@ pub fn generateSymbol(
}
const unpadded_end = code.items.len - begin;
const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align);
const padding = try math.cast(usize, padded_end - unpadded_end);
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
if (padding > 0) {
try code.writer().writeByteNTimes(0, padding);

View File

@ -2022,7 +2022,7 @@ pub fn getMatchingSection(self: *MachO, sect: macho.section_64) !?MatchingSectio
}
pub fn createEmptyAtom(self: *MachO, local_sym_index: u32, size: u64, alignment: u32) !*Atom {
const size_usize = try math.cast(usize, size);
const size_usize = math.cast(usize, size) orelse return error.Overflow;
const atom = try self.base.allocator.create(Atom);
errdefer self.base.allocator.destroy(atom);
atom.* = Atom.empty;
@ -2157,7 +2157,7 @@ fn writeAllAtoms(self: *MachO) !void {
var buffer = std.ArrayList(u8).init(self.base.allocator);
defer buffer.deinit();
try buffer.ensureTotalCapacity(try math.cast(usize, sect.size));
try buffer.ensureTotalCapacity(math.cast(usize, sect.size) orelse return error.Overflow);
log.debug("writing atoms in {s},{s}", .{ sect.segName(), sect.sectName() });
@ -2170,7 +2170,7 @@ fn writeAllAtoms(self: *MachO) !void {
const padding_size: usize = if (atom.next) |next| blk: {
const next_sym = self.locals.items[next.local_sym_index];
const size = next_sym.n_value - (atom_sym.n_value + atom.size);
break :blk try math.cast(usize, size);
break :blk math.cast(usize, size) orelse return error.Overflow;
} else 0;
log.debug(" (adding atom {s} to buffer: {})", .{ self.getString(atom_sym.n_strx), atom_sym });
@ -2507,7 +2507,7 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
.aarch64 => {
const literal = blk: {
const div_res = try math.divExact(u64, stub_size - @sizeOf(u32), 4);
break :blk try math.cast(u18, div_res);
break :blk math.cast(u18, div_res) orelse return error.Overflow;
};
// ldr w16, literal
mem.writeIntLittle(u32, atom.code.items[0..4], aarch64.Instruction.ldrLiteral(
@ -5080,7 +5080,7 @@ fn growSegment(self: *MachO, seg_id: u16, new_size: u64) !void {
self.base.file.?,
next_seg.inner.fileoff,
next_seg.inner.fileoff + offset_amt,
try math.cast(usize, next_seg.inner.filesize),
math.cast(usize, next_seg.inner.filesize) orelse return error.Overflow,
);
next_seg.inner.fileoff += offset_amt;
@ -5165,7 +5165,7 @@ fn growSection(self: *MachO, match: MatchingSection, new_size: u32) !void {
self.base.file.?,
next_sect.offset,
next_sect.offset + offset_amt,
try math.cast(usize, total_size),
math.cast(usize, total_size) orelse return error.Overflow,
);
var next = match.sect + 1;
@ -5950,7 +5950,7 @@ fn writeDices(self: *MachO) !void {
while (true) {
if (atom.dices.items.len > 0) {
const sym = self.locals.items[atom.local_sym_index];
const base_off = try math.cast(u32, sym.n_value - text_sect.addr + text_sect.offset);
const base_off = math.cast(u32, sym.n_value - text_sect.addr + text_sect.offset) orelse return error.Overflow;
try buf.ensureUnusedCapacity(atom.dices.items.len * @sizeOf(macho.data_in_code_entry));
for (atom.dices.items) |dice| {

View File

@ -763,17 +763,15 @@ pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
const displacement = math.cast(
i28,
@intCast(i64, target_addr) - @intCast(i64, source_addr),
) catch |err| switch (err) {
error.Overflow => {
log.err("jump too big to encode as i28 displacement value", .{});
log.err(" (target - source) = displacement => 0x{x} - 0x{x} = 0x{x}", .{
target_addr,
source_addr,
@intCast(i64, target_addr) - @intCast(i64, source_addr),
});
log.err(" TODO implement branch islands to extend jump distance for arm64", .{});
return error.TODOImplementBranchIslands;
},
) orelse {
log.err("jump too big to encode as i28 displacement value", .{});
log.err(" (target - source) = displacement => 0x{x} - 0x{x} = 0x{x}", .{
target_addr,
source_addr,
@intCast(i64, target_addr) - @intCast(i64, source_addr),
});
log.err(" TODO implement branch islands to extend jump distance for arm64", .{});
return error.TODOImplementBranchIslands;
};
const code = self.code.items[rel.offset..][0..4];
var inst = aarch64.Instruction{
@ -915,7 +913,7 @@ pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
mem.writeIntLittle(u32, code, inst.toU32());
},
.ARM64_RELOC_POINTER_TO_GOT => {
const result = try math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr));
const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse return error.Overflow;
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, result));
},
.ARM64_RELOC_UNSIGNED => {
@ -945,17 +943,17 @@ pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
.x86_64 => {
switch (@intToEnum(macho.reloc_type_x86_64, rel.@"type")) {
.X86_64_RELOC_BRANCH => {
const displacement = try math.cast(
const displacement = math.cast(
i32,
@intCast(i64, target_addr) - @intCast(i64, source_addr) - 4 + rel.addend,
);
) orelse return error.Overflow;
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, displacement));
},
.X86_64_RELOC_GOT, .X86_64_RELOC_GOT_LOAD => {
const displacement = try math.cast(
const displacement = math.cast(
i32,
@intCast(i64, target_addr) - @intCast(i64, source_addr) - 4 + rel.addend,
);
) orelse return error.Overflow;
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, displacement));
},
.X86_64_RELOC_TLV => {
@ -963,10 +961,10 @@ pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
// We need to rewrite the opcode from movq to leaq.
self.code.items[rel.offset - 2] = 0x8d;
}
const displacement = try math.cast(
const displacement = math.cast(
i32,
@intCast(i64, target_addr) - @intCast(i64, source_addr) - 4 + rel.addend,
);
) orelse return error.Overflow;
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, displacement));
},
.X86_64_RELOC_SIGNED,
@ -982,10 +980,10 @@ pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
else => unreachable,
};
const actual_target_addr = @intCast(i64, target_addr) + rel.addend;
const displacement = try math.cast(
const displacement = math.cast(
i32,
actual_target_addr - @intCast(i64, source_addr + correction + 4),
);
) orelse return error.Overflow;
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, displacement));
},
.X86_64_RELOC_UNSIGNED => {

View File

@ -616,7 +616,7 @@ fn writeSymbolTable(self: *DebugSymbols) !void {
self.file,
dwarf_seg.inner.fileoff,
dwarf_seg.inner.fileoff + diff,
try math.cast(usize, dwarf_seg.inner.filesize),
math.cast(usize, dwarf_seg.inner.filesize) orelse return error.Overflow,
);
const old_seg_fileoff = dwarf_seg.inner.fileoff;
@ -669,7 +669,7 @@ fn writeStringTable(self: *DebugSymbols) !void {
self.file,
dwarf_seg.inner.fileoff,
dwarf_seg.inner.fileoff + diff,
try math.cast(usize, dwarf_seg.inner.filesize),
math.cast(usize, dwarf_seg.inner.filesize) orelse return error.Overflow,
);
const old_seg_fileoff = dwarf_seg.inner.fileoff;

View File

@ -83,7 +83,7 @@ pub const Id = struct {
switch (version) {
.int => |int| {
var out: u32 = 0;
const major = try math.cast(u16, int);
const major = math.cast(u16, int) orelse return error.Overflow;
out += @intCast(u32, major) << 16;
return out;
},

View File

@ -504,7 +504,7 @@ pub fn parseIntoAtoms(self: *Object, allocator: Allocator, macho_file: *MachO) !
for (dices) |dice| {
atom.dices.appendAssumeCapacity(.{
.offset = dice.offset - try math.cast(u32, sect.addr),
.offset = dice.offset - (math.cast(u32, sect.addr) orelse return error.Overflow),
.length = dice.length,
.kind = dice.kind,
});

View File

@ -316,7 +316,7 @@ pub const Yaml = struct {
fn parseValue(self: *Yaml, comptime T: type, value: Value) Error!T {
return switch (@typeInfo(T)) {
.Int => math.cast(T, try value.asInt()),
.Int => math.cast(T, try value.asInt()) orelse error.Overflow,
.Float => math.lossyCast(T, try value.asFloat()),
.Struct => self.parseStruct(T, try value.asMap()),
.Union => self.parseUnion(T, value),

View File

@ -4067,7 +4067,7 @@ fn fmtPathFile(
const source_code = try readSourceFileToEndAlloc(
fmt.gpa,
&source_file,
std.math.cast(usize, stat.size) catch return error.FileTooBig,
std.math.cast(usize, stat.size) orelse return error.FileTooBig,
);
defer fmt.gpa.free(source_code);

View File

@ -4526,9 +4526,7 @@ fn transCreateNodeBoolInfixOp(
}
fn transCreateNodeAPInt(c: *Context, int: *const clang.APSInt) !Node {
const num_limbs = math.cast(usize, int.getNumWords()) catch |err| switch (err) {
error.Overflow => return error.OutOfMemory,
};
const num_limbs = math.cast(usize, int.getNumWords()) orelse return error.OutOfMemory;
var aps_int = int;
const is_negative = int.isSigned() and int.isNegative();
if (is_negative) aps_int = aps_int.negate();
@ -5627,12 +5625,12 @@ fn parseCNumLit(c: *Context, m: *MacroCtx) ParseError!Node {
// make the output less noisy by skipping promoteIntLiteral where
// it's guaranteed to not be required because of C standard type constraints
const guaranteed_to_fit = switch (suffix) {
.none => !meta.isError(math.cast(i16, value)),
.u => !meta.isError(math.cast(u16, value)),
.l => !meta.isError(math.cast(i32, value)),
.lu => !meta.isError(math.cast(u32, value)),
.ll => !meta.isError(math.cast(i64, value)),
.llu => !meta.isError(math.cast(u64, value)),
.none => math.cast(i16, value) != null,
.u => math.cast(u16, value) != null,
.l => math.cast(i32, value) != null,
.lu => math.cast(u32, value) != null,
.ll => math.cast(i64, value) != null,
.llu => math.cast(u64, value) != null,
.f => unreachable,
};