Merge pull request #4516 from xackus/remove-bytes-to-slice

remove @bytesToSlice, @sliceToBytes from the language
This commit is contained in:
Andrew Kelley 2020-02-24 13:51:47 -05:00 committed by GitHub
commit 1d06c82c3b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 225 additions and 589 deletions

View File

@ -2025,7 +2025,8 @@ test "volatile" {
conversions are not possible.
</p>
{#code_begin|test#}
const assert = @import("std").debug.assert;
const std = @import("std");
const assert = std.debug.assert;
test "pointer casting" {
const bytes align(@alignOf(u32)) = [_]u8{ 0x12, 0x12, 0x12, 0x12 };
@ -2034,7 +2035,7 @@ test "pointer casting" {
// Even this example is contrived - there are better ways to do the above than
// pointer casting. For example, using a slice narrowing cast:
const u32_value = @bytesToSlice(u32, bytes[0..])[0];
const u32_value = std.mem.bytesAsSlice(u32, bytes[0..])[0];
assert(u32_value == 0x12121212);
// And even another way, the most straightforward way to do it:
@ -2114,16 +2115,16 @@ test "function alignment" {
{#link|safety check|Incorrect Pointer Alignment#}:
</p>
{#code_begin|test_safety|incorrect alignment#}
const assert = @import("std").debug.assert;
const std = @import("std");
test "pointer alignment safety" {
var array align(4) = [_]u32{ 0x11111111, 0x11111111 };
const bytes = @sliceToBytes(array[0..]);
assert(foo(bytes) == 0x11111111);
const bytes = std.mem.sliceAsBytes(array[0..]);
std.debug.assert(foo(bytes) == 0x11111111);
}
fn foo(bytes: []u8) u32 {
const slice4 = bytes[1..5];
const int_slice = @bytesToSlice(u32, @alignCast(4, slice4));
const int_slice = std.mem.bytesAsSlice(u32, @alignCast(4, slice4));
return int_slice[0];
}
{#code_end#}
@ -2249,7 +2250,7 @@ test "slice widening" {
// Zig supports slice widening and slice narrowing. Cast a slice of u8
// to a slice of anything else, and Zig will perform the length conversion.
const array align(@alignOf(u32)) = [_]u8{ 0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13 };
const slice = @bytesToSlice(u32, array[0..]);
const slice = mem.bytesAsSlice(u32, array[0..]);
assert(slice.len == 2);
assert(slice[0] == 0x12121212);
assert(slice[1] == 0x13131313);
@ -5186,7 +5187,6 @@ test "coercion of zero bit types" {
<li>{#link|@bitCast#} - change type but maintain bit representation</li>
<li>{#link|@alignCast#} - make a pointer have more alignment</li>
<li>{#link|@boolToInt#} - convert true to 1 and false to 0</li>
<li>{#link|@bytesToSlice#} - convert a slice of bytes to a slice of another type</li>
<li>{#link|@enumToInt#} - obtain the integer tag value of an enum or tagged union</li>
<li>{#link|@errSetCast#} - convert to a smaller error set</li>
<li>{#link|@errorToInt#} - obtain the integer value of an error code</li>
@ -5199,7 +5199,6 @@ test "coercion of zero bit types" {
<li>{#link|@intToPtr#} - convert an address to a pointer</li>
<li>{#link|@ptrCast#} - convert between pointer types</li>
<li>{#link|@ptrToInt#} - obtain the address of a pointer</li>
<li>{#link|@sliceToBytes#} - convert a slice of anything to a slice of bytes</li>
<li>{#link|@truncate#} - convert between integer types, chopping off bits</li>
</ul>
{#header_close#}
@ -6929,18 +6928,6 @@ async fn func(y: *i32) void {
{#see_also|@bitOffsetOf#}
{#header_close#}
{#header_open|@bytesToSlice#}
<pre>{#syntax#}@bytesToSlice(comptime Element: type, bytes: []u8) []Element{#endsyntax#}</pre>
<p>
Converts a slice of bytes or array of bytes into a slice of {#syntax#}Element{#endsyntax#}.
The resulting slice has the same {#link|pointer|Pointers#} properties as the parameter.
</p>
<p>
Attempting to convert a number of bytes with a length that does not evenly divide into a slice of
elements results in safety-protected {#link|Undefined Behavior#}.
</p>
{#header_close#}
{#header_open|@call#}
<pre>{#syntax#}@call(options: std.builtin.CallOptions, function: var, args: var) var{#endsyntax#}</pre>
<p>
@ -8101,14 +8088,6 @@ test "@setRuntimeSafety" {
{#see_also|@bitSizeOf|@typeInfo#}
{#header_close#}
{#header_open|@sliceToBytes#}
<pre>{#syntax#}@sliceToBytes(value: var) []u8{#endsyntax#}</pre>
<p>
Converts a slice or array to a slice of {#syntax#}u8{#endsyntax#}. The resulting slice has the same
{#link|pointer|Pointers#} properties as the parameter.
</p>
{#header_close#}
{#header_open|@splat#}
<pre>{#syntax#}@splat(comptime len: u32, scalar: var) @Vector(len, @TypeOf(scalar)){#endsyntax#}</pre>
<p>
@ -8919,25 +8898,6 @@ pub fn main() void {
var b: u32 = 3;
var c = @divExact(a, b);
std.debug.warn("value: {}\n", .{c});
}
{#code_end#}
{#header_close#}
{#header_open|Slice Widen Remainder#}
<p>At compile-time:</p>
{#code_begin|test_err|unable to convert#}
comptime {
var bytes = [5]u8{ 1, 2, 3, 4, 5 };
var slice = @bytesToSlice(u32, bytes[0..]);
}
{#code_end#}
<p>At runtime:</p>
{#code_begin|exe_err#}
const std = @import("std");
pub fn main() void {
var bytes = [5]u8{ 1, 2, 3, 4, 5 };
var slice = @bytesToSlice(u32, bytes[0..]);
std.debug.warn("value: {}\n", .{slice[0]});
}
{#code_end#}
{#header_close#}
@ -9119,14 +9079,15 @@ comptime {
{#code_end#}
<p>At runtime:</p>
{#code_begin|exe_err#}
const mem = @import("std").mem;
pub fn main() !void {
var array align(4) = [_]u32{ 0x11111111, 0x11111111 };
const bytes = @sliceToBytes(array[0..]);
const bytes = mem.sliceAsBytes(array[0..]);
if (foo(bytes) != 0x11111111) return error.Wrong;
}
fn foo(bytes: []u8) u32 {
const slice4 = bytes[1..5];
const int_slice = @bytesToSlice(u32, @alignCast(4, slice4));
const int_slice = mem.bytesAsSlice(u32, @alignCast(4, slice4));
return int_slice[0];
}
{#code_end#}

View File

@ -24,11 +24,11 @@ pub const State = struct {
const Self = @This();
pub fn toSlice(self: *Self) []u8 {
return @sliceToBytes(self.data[0..]);
return mem.sliceAsBytes(self.data[0..]);
}
pub fn toSliceConst(self: *Self) []const u8 {
return @sliceToBytes(self.data[0..]);
return mem.sliceAsBytes(self.data[0..]);
}
pub fn permute(self: *Self) void {

View File

@ -72,7 +72,7 @@ pub const NullTerminated2DArray = struct {
errdefer allocator.free(buf);
var write_index = index_size;
const index_buf = @bytesToSlice(?[*]u8, buf);
const index_buf = mem.bytesAsSlice(?[*]u8, buf);
var i: usize = 0;
for (slices) |slice| {

View File

@ -101,7 +101,7 @@ pub fn LinearFifo(
}
}
{ // set unused area to undefined
const unused = @sliceToBytes(self.buf[self.count..]);
const unused = mem.sliceAsBytes(self.buf[self.count..]);
@memset(unused.ptr, undefined, unused.len);
}
}
@ -166,12 +166,12 @@ pub fn LinearFifo(
{ // set old range to undefined. Note: may be wrapped around
const slice = self.readableSliceMut(0);
if (slice.len >= count) {
const unused = @sliceToBytes(slice[0..count]);
const unused = mem.sliceAsBytes(slice[0..count]);
@memset(unused.ptr, undefined, unused.len);
} else {
const unused = @sliceToBytes(slice[0..]);
const unused = mem.sliceAsBytes(slice[0..]);
@memset(unused.ptr, undefined, unused.len);
const unused2 = @sliceToBytes(self.readableSliceMut(slice.len)[0 .. count - slice.len]);
const unused2 = mem.sliceAsBytes(self.readableSliceMut(slice.len)[0 .. count - slice.len]);
@memset(unused2.ptr, undefined, unused2.len);
}
}

View File

@ -26,7 +26,7 @@ fn eqlString(a: []const u16, b: []const u16) bool {
}
fn hashString(s: []const u16) u32 {
return @truncate(u32, std.hash.Wyhash.hash(0, @sliceToBytes(s)));
return @truncate(u32, std.hash.Wyhash.hash(0, mem.sliceAsBytes(s)));
}
const WatchEventError = error{

View File

@ -283,14 +283,14 @@ const WasmPageAllocator = struct {
fn getBit(self: FreeBlock, idx: usize) PageStatus {
const bit_offset = 0;
return @intToEnum(PageStatus, Io.get(@sliceToBytes(self.data), idx, bit_offset));
return @intToEnum(PageStatus, Io.get(mem.sliceAsBytes(self.data), idx, bit_offset));
}
fn setBits(self: FreeBlock, start_idx: usize, len: usize, val: PageStatus) void {
const bit_offset = 0;
var i: usize = 0;
while (i < len) : (i += 1) {
Io.set(@sliceToBytes(self.data), start_idx + i, bit_offset, @enumToInt(val));
Io.set(mem.sliceAsBytes(self.data), start_idx + i, bit_offset, @enumToInt(val));
}
}
@ -552,7 +552,7 @@ pub const ArenaAllocator = struct {
if (len >= actual_min_size) break;
}
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
const buf_node_slice = @bytesToSlice(BufNode, buf[0..@sizeOf(BufNode)]);
const buf_node_slice = mem.bytesAsSlice(BufNode, buf[0..@sizeOf(BufNode)]);
const buf_node = &buf_node_slice[0];
buf_node.* = BufNode{
.data = buf,

View File

@ -235,7 +235,7 @@ pub fn InStream(comptime ReadError: type) type {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
var res: [1]T = undefined;
try self.readNoEof(@sliceToBytes(res[0..]));
try self.readNoEof(mem.sliceAsBytes(res[0..]));
return res[0];
}

View File

@ -132,7 +132,7 @@ pub const Allocator = struct {
// their own frame with @Frame(func).
return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..n];
} else {
return @bytesToSlice(T, @alignCast(a, byte_slice));
return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
}
}
@ -173,7 +173,7 @@ pub const Allocator = struct {
return @as([*]align(new_alignment) T, undefined)[0..0];
}
const old_byte_slice = @sliceToBytes(old_mem);
const old_byte_slice = mem.sliceAsBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
const byte_slice = try self.reallocFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
@ -181,7 +181,7 @@ pub const Allocator = struct {
if (new_n > old_mem.len) {
@memset(byte_slice.ptr + old_byte_slice.len, undefined, byte_slice.len - old_byte_slice.len);
}
return @bytesToSlice(T, @alignCast(new_alignment, byte_slice));
return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice));
}
/// Prefer calling realloc to shrink if you can tolerate failure, such as
@ -221,18 +221,18 @@ pub const Allocator = struct {
// new_n <= old_mem.len and the multiplication didn't overflow for that operation.
const byte_count = @sizeOf(T) * new_n;
const old_byte_slice = @sliceToBytes(old_mem);
const old_byte_slice = mem.sliceAsBytes(old_mem);
@memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
const byte_slice = self.shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
assert(byte_slice.len == byte_count);
return @bytesToSlice(T, @alignCast(new_alignment, byte_slice));
return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice));
}
/// Free an array allocated with `alloc`. To free a single item,
/// see `destroy`.
pub fn free(self: *Allocator, memory: var) void {
const Slice = @typeInfo(@TypeOf(memory)).Pointer;
const bytes = @sliceToBytes(memory);
const bytes = mem.sliceAsBytes(memory);
const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
if (bytes_len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
@ -1486,6 +1486,162 @@ test "bytesToValue" {
testing.expect(deadbeef == @as(u32, 0xDEADBEEF));
}
//TODO copy also is_volatile, etc. I tried to use @typeInfo, modify child type, use @Type, but ran into issues.
fn BytesAsSliceReturnType(comptime T: type, comptime bytesType: type) type {
if (!(trait.isSlice(bytesType) and meta.Child(bytesType) == u8) and !(trait.isPtrTo(.Array)(bytesType) and meta.Child(meta.Child(bytesType)) == u8)) {
@compileError("expected []u8 or *[_]u8, passed " ++ @typeName(bytesType));
}
if (trait.isPtrTo(.Array)(bytesType) and @typeInfo(meta.Child(bytesType)).Array.len % @sizeOf(T) != 0) {
@compileError("number of bytes in " ++ @typeName(bytesType) ++ " is not divisible by size of " ++ @typeName(T));
}
const alignment = meta.alignment(bytesType);
return if (trait.isConstPtr(bytesType)) []align(alignment) const T else []align(alignment) T;
}
pub fn bytesAsSlice(comptime T: type, bytes: var) BytesAsSliceReturnType(T, @TypeOf(bytes)) {
const bytesSlice = if (comptime trait.isPtrTo(.Array)(@TypeOf(bytes))) bytes[0..] else bytes;
// let's not give an undefined pointer to @ptrCast
// it may be equal to zero and fail a null check
if (bytesSlice.len == 0) {
return &[0]T{};
}
const bytesType = @TypeOf(bytesSlice);
const alignment = comptime meta.alignment(bytesType);
const castTarget = if (comptime trait.isConstPtr(bytesType)) [*]align(alignment) const T else [*]align(alignment) T;
return @ptrCast(castTarget, bytesSlice.ptr)[0..@divExact(bytes.len, @sizeOf(T))];
}
test "bytesAsSlice" {
const bytes = [_]u8{ 0xDE, 0xAD, 0xBE, 0xEF };
const slice = bytesAsSlice(u16, bytes[0..]);
testing.expect(slice.len == 2);
testing.expect(bigToNative(u16, slice[0]) == 0xDEAD);
testing.expect(bigToNative(u16, slice[1]) == 0xBEEF);
}
test "bytesAsSlice keeps pointer alignment" {
var bytes = [_]u8{ 0x01, 0x02, 0x03, 0x04 };
const numbers = bytesAsSlice(u32, bytes[0..]);
comptime testing.expect(@TypeOf(numbers) == []align(@alignOf(@TypeOf(bytes))) u32);
}
test "bytesAsSlice on a packed struct" {
const F = packed struct {
a: u8,
};
var b = [1]u8{9};
var f = bytesAsSlice(F, &b);
testing.expect(f[0].a == 9);
}
test "bytesAsSlice with specified alignment" {
var bytes align(4) = [_]u8{
0x33,
0x33,
0x33,
0x33,
};
const slice: []u32 = std.mem.bytesAsSlice(u32, bytes[0..]);
testing.expect(slice[0] == 0x33333333);
}
//TODO copy also is_volatile, etc. I tried to use @typeInfo, modify child type, use @Type, but ran into issues.
fn SliceAsBytesReturnType(comptime sliceType: type) type {
if (!trait.isSlice(sliceType) and !trait.isPtrTo(.Array)(sliceType)) {
@compileError("expected []T or *[_]T, passed " ++ @typeName(sliceType));
}
const alignment = meta.alignment(sliceType);
return if (trait.isConstPtr(sliceType)) []align(alignment) const u8 else []align(alignment) u8;
}
pub fn sliceAsBytes(slice: var) SliceAsBytesReturnType(@TypeOf(slice)) {
const actualSlice = if (comptime trait.isPtrTo(.Array)(@TypeOf(slice))) slice[0..] else slice;
// let's not give an undefined pointer to @ptrCast
// it may be equal to zero and fail a null check
if (actualSlice.len == 0) {
return &[0]u8{};
}
const sliceType = @TypeOf(actualSlice);
const alignment = comptime meta.alignment(sliceType);
const castTarget = if (comptime trait.isConstPtr(sliceType)) [*]align(alignment) const u8 else [*]align(alignment) u8;
return @ptrCast(castTarget, actualSlice.ptr)[0 .. actualSlice.len * @sizeOf(comptime meta.Child(sliceType))];
}
test "sliceAsBytes" {
const bytes = [_]u16{ 0xDEAD, 0xBEEF };
const slice = sliceAsBytes(bytes[0..]);
testing.expect(slice.len == 4);
testing.expect(eql(u8, slice, switch (builtin.endian) {
.Big => "\xDE\xAD\xBE\xEF",
.Little => "\xAD\xDE\xEF\xBE",
}));
}
test "sliceAsBytes packed struct at runtime and comptime" {
const Foo = packed struct {
a: u4,
b: u4,
};
const S = struct {
fn doTheTest() void {
var foo: Foo = undefined;
var slice = sliceAsBytes(@as(*[1]Foo, &foo)[0..1]);
slice[0] = 0x13;
switch (builtin.endian) {
.Big => {
testing.expect(foo.a == 0x1);
testing.expect(foo.b == 0x3);
},
.Little => {
testing.expect(foo.a == 0x3);
testing.expect(foo.b == 0x1);
},
}
}
};
S.doTheTest();
comptime S.doTheTest();
}
test "sliceAsBytes and bytesAsSlice back" {
testing.expect(@sizeOf(i32) == 4);
var big_thing_array = [_]i32{ 1, 2, 3, 4 };
const big_thing_slice: []i32 = big_thing_array[0..];
const bytes = sliceAsBytes(big_thing_slice);
testing.expect(bytes.len == 4 * 4);
bytes[4] = 0;
bytes[5] = 0;
bytes[6] = 0;
bytes[7] = 0;
testing.expect(big_thing_slice[1] == 0);
const big_thing_again = bytesAsSlice(i32, bytes);
testing.expect(big_thing_again[2] == 3);
big_thing_again[2] = -1;
testing.expect(bytes[8] == math.maxInt(u8));
testing.expect(bytes[9] == math.maxInt(u8));
testing.expect(bytes[10] == math.maxInt(u8));
testing.expect(bytes[11] == math.maxInt(u8));
}
fn SubArrayPtrReturnType(comptime T: type, comptime length: usize) type {
if (trait.isConstPtr(T))
return *const [length]meta.Child(meta.Child(T));

View File

@ -135,6 +135,22 @@ test "std.meta.trait.isPtrTo" {
testing.expect(!isPtrTo(.Struct)(**struct {}));
}
pub fn isSliceOf(comptime id: builtin.TypeId) TraitFn {
const Closure = struct {
pub fn trait(comptime T: type) bool {
if (!comptime isSlice(T)) return false;
return id == @typeId(meta.Child(T));
}
};
return Closure.trait;
}
test "std.meta.trait.isSliceOf" {
testing.expect(!isSliceOf(.Struct)(struct {}));
testing.expect(isSliceOf(.Struct)([]struct {}));
testing.expect(!isSliceOf(.Struct)([][]struct {}));
}
///////////Strait trait Fns
//@TODO:

View File

@ -120,7 +120,7 @@ pub const Address = extern union {
ip_slice[10] = 0xff;
ip_slice[11] = 0xff;
const ptr = @sliceToBytes(@as(*const [1]u32, &addr)[0..]);
const ptr = mem.sliceAsBytes(@as(*const [1]u32, &addr)[0..]);
ip_slice[12] = ptr[0];
ip_slice[13] = ptr[1];
@ -164,7 +164,7 @@ pub const Address = extern union {
.addr = undefined,
},
};
const out_ptr = @sliceToBytes(@as(*[1]u32, &result.in.addr)[0..]);
const out_ptr = mem.sliceAsBytes(@as(*[1]u32, &result.in.addr)[0..]);
var x: u8 = 0;
var index: u8 = 0;

View File

@ -1852,7 +1852,7 @@ pub fn isCygwinPty(handle: fd_t) bool {
const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]);
const name_bytes = name_info_bytes[size .. size + @as(usize, name_info.FileNameLength)];
const name_wide = @bytesToSlice(u16, name_bytes);
const name_wide = mem.bytesAsSlice(u16, name_bytes);
return mem.indexOf(u16, name_wide, &[_]u16{ 'm', 's', 'y', 's', '-' }) != null or
mem.indexOf(u16, name_wide, &[_]u16{ '-', 'p', 't', 'y' }) != null;
}

View File

@ -430,7 +430,7 @@ pub fn argsAlloc(allocator: *mem.Allocator) ![][]u8 {
const buf = try allocator.alignedAlloc(u8, @alignOf([]u8), total_bytes);
errdefer allocator.free(buf);
const result_slice_list = @bytesToSlice([]u8, buf[0..slice_list_bytes]);
const result_slice_list = mem.bytesAsSlice([]u8, buf[0..slice_list_bytes]);
const result_contents = buf[slice_list_bytes..];
mem.copy(u8, result_contents, contents_slice);

View File

@ -243,7 +243,7 @@ pub const Utf16LeIterator = struct {
pub fn init(s: []const u16) Utf16LeIterator {
return Utf16LeIterator{
.bytes = @sliceToBytes(s),
.bytes = mem.sliceAsBytes(s),
.i = 0,
};
}
@ -496,7 +496,7 @@ pub fn utf16leToUtf8(utf8: []u8, utf16le: []const u16) !usize {
test "utf16leToUtf8" {
var utf16le: [2]u16 = undefined;
const utf16le_as_bytes = @sliceToBytes(utf16le[0..]);
const utf16le_as_bytes = mem.sliceAsBytes(utf16le[0..]);
{
mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 'A');
@ -606,12 +606,12 @@ test "utf8ToUtf16Le" {
{
const length = try utf8ToUtf16Le(utf16le[0..], "𐐷");
testing.expectEqual(@as(usize, 2), length);
testing.expectEqualSlices(u8, "\x01\xd8\x37\xdc", @sliceToBytes(utf16le[0..]));
testing.expectEqualSlices(u8, "\x01\xd8\x37\xdc", mem.sliceAsBytes(utf16le[0..]));
}
{
const length = try utf8ToUtf16Le(utf16le[0..], "\u{10FFFF}");
testing.expectEqual(@as(usize, 2), length);
testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", @sliceToBytes(utf16le[0..]));
testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", mem.sliceAsBytes(utf16le[0..]));
}
}
@ -619,13 +619,13 @@ test "utf8ToUtf16LeWithNull" {
{
const utf16 = try utf8ToUtf16LeWithNull(testing.allocator, "𐐷");
defer testing.allocator.free(utf16);
testing.expectEqualSlices(u8, "\x01\xd8\x37\xdc", @sliceToBytes(utf16[0..]));
testing.expectEqualSlices(u8, "\x01\xd8\x37\xdc", mem.sliceAsBytes(utf16[0..]));
testing.expect(utf16[2] == 0);
}
{
const utf16 = try utf8ToUtf16LeWithNull(testing.allocator, "\u{10FFFF}");
defer testing.allocator.free(utf16);
testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", @sliceToBytes(utf16[0..]));
testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", mem.sliceAsBytes(utf16[0..]));
testing.expect(utf16[2] == 0);
}
}

View File

@ -1750,8 +1750,6 @@ enum BuiltinFnId {
BuiltinFnIdIntCast,
BuiltinFnIdFloatCast,
BuiltinFnIdErrSetCast,
BuiltinFnIdToBytes,
BuiltinFnIdFromBytes,
BuiltinFnIdIntToFloat,
BuiltinFnIdFloatToInt,
BuiltinFnIdBoolToInt,
@ -1821,7 +1819,6 @@ enum PanicMsgId {
PanicMsgIdDivisionByZero,
PanicMsgIdRemainderDivisionByZero,
PanicMsgIdExactDivisionRemainder,
PanicMsgIdSliceWidenRemainder,
PanicMsgIdUnwrapOptionalFail,
PanicMsgIdInvalidErrorCode,
PanicMsgIdIncorrectAlignment,
@ -2699,8 +2696,6 @@ enum IrInstSrcId {
IrInstSrcIdSaveErrRetAddr,
IrInstSrcIdAddImplicitReturnType,
IrInstSrcIdErrSetCast,
IrInstSrcIdToBytes,
IrInstSrcIdFromBytes,
IrInstSrcIdCheckRuntimeScope,
IrInstSrcIdHasDecl,
IrInstSrcIdUndeclaredIdent,
@ -2739,7 +2734,6 @@ enum IrInstGenId {
IrInstGenIdCall,
IrInstGenIdReturn,
IrInstGenIdCast,
IrInstGenIdResizeSlice,
IrInstGenIdUnreachable,
IrInstGenIdAsm,
IrInstGenIdTestNonNull,
@ -3271,13 +3265,6 @@ struct IrInstGenCast {
CastOp cast_op;
};
struct IrInstGenResizeSlice {
IrInstGen base;
IrInstGen *operand;
IrInstGen *result_loc;
};
struct IrInstSrcContainerInitList {
IrInstSrc base;
@ -3629,21 +3616,6 @@ struct IrInstSrcErrSetCast {
IrInstSrc *target;
};
struct IrInstSrcToBytes {
IrInstSrc base;
IrInstSrc *target;
ResultLoc *result_loc;
};
struct IrInstSrcFromBytes {
IrInstSrc base;
IrInstSrc *dest_child_type;
IrInstSrc *target;
ResultLoc *result_loc;
};
struct IrInstSrcIntToFloat {
IrInstSrc base;

View File

@ -972,8 +972,6 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("remainder division by zero or negative value");
case PanicMsgIdExactDivisionRemainder:
return buf_create_from_str("exact division produced remainder");
case PanicMsgIdSliceWidenRemainder:
return buf_create_from_str("slice widening size mismatch");
case PanicMsgIdUnwrapOptionalFail:
return buf_create_from_str("attempt to unwrap null");
case PanicMsgIdUnreachable:
@ -3085,74 +3083,6 @@ static void add_error_range_check(CodeGen *g, ZigType *err_set_type, ZigType *in
}
}
static LLVMValueRef ir_render_resize_slice(CodeGen *g, IrExecutableGen *executable,
IrInstGenResizeSlice *instruction)
{
ZigType *actual_type = instruction->operand->value->type;
ZigType *wanted_type = instruction->base.value->type;
LLVMValueRef expr_val = ir_llvm_value(g, instruction->operand);
assert(expr_val);
LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc);
assert(wanted_type->id == ZigTypeIdStruct);
assert(wanted_type->data.structure.special == StructSpecialSlice);
assert(actual_type->id == ZigTypeIdStruct);
assert(actual_type->data.structure.special == StructSpecialSlice);
ZigType *actual_pointer_type = actual_type->data.structure.fields[0]->type_entry;
ZigType *actual_child_type = actual_pointer_type->data.pointer.child_type;
ZigType *wanted_pointer_type = wanted_type->data.structure.fields[0]->type_entry;
ZigType *wanted_child_type = wanted_pointer_type->data.pointer.child_type;
size_t actual_ptr_index = actual_type->data.structure.fields[slice_ptr_index]->gen_index;
size_t actual_len_index = actual_type->data.structure.fields[slice_len_index]->gen_index;
size_t wanted_ptr_index = wanted_type->data.structure.fields[slice_ptr_index]->gen_index;
size_t wanted_len_index = wanted_type->data.structure.fields[slice_len_index]->gen_index;
LLVMValueRef src_ptr_ptr = LLVMBuildStructGEP(g->builder, expr_val, (unsigned)actual_ptr_index, "");
LLVMValueRef src_ptr = gen_load_untyped(g, src_ptr_ptr, 0, false, "");
LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, src_ptr,
get_llvm_type(g, wanted_type->data.structure.fields[0]->type_entry), "");
LLVMValueRef dest_ptr_ptr = LLVMBuildStructGEP(g->builder, result_loc,
(unsigned)wanted_ptr_index, "");
gen_store_untyped(g, src_ptr_casted, dest_ptr_ptr, 0, false);
LLVMValueRef src_len_ptr = LLVMBuildStructGEP(g->builder, expr_val, (unsigned)actual_len_index, "");
LLVMValueRef src_len = gen_load_untyped(g, src_len_ptr, 0, false, "");
uint64_t src_size = type_size(g, actual_child_type);
uint64_t dest_size = type_size(g, wanted_child_type);
LLVMValueRef new_len;
if (dest_size == 1) {
LLVMValueRef src_size_val = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, src_size, false);
new_len = LLVMBuildMul(g->builder, src_len, src_size_val, "");
} else if (src_size == 1) {
LLVMValueRef dest_size_val = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, dest_size, false);
if (ir_want_runtime_safety(g, &instruction->base)) {
LLVMValueRef remainder_val = LLVMBuildURem(g->builder, src_len, dest_size_val, "");
LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_usize->llvm_type);
LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, remainder_val, zero, "");
LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "SliceWidenOk");
LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "SliceWidenFail");
LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
gen_safety_crash(g, PanicMsgIdSliceWidenRemainder);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
new_len = LLVMBuildExactUDiv(g->builder, src_len, dest_size_val, "");
} else {
zig_unreachable();
}
LLVMValueRef dest_len_ptr = LLVMBuildStructGEP(g->builder, result_loc, (unsigned)wanted_len_index, "");
gen_store_untyped(g, new_len, dest_len_ptr, 0, false);
return result_loc;
}
static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutableGen *executable,
IrInstGenCast *cast_instruction)
{
@ -6485,8 +6415,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutableGen *executabl
return ir_render_assert_zero(g, executable, (IrInstGenAssertZero *)instruction);
case IrInstGenIdAssertNonNull:
return ir_render_assert_non_null(g, executable, (IrInstGenAssertNonNull *)instruction);
case IrInstGenIdResizeSlice:
return ir_render_resize_slice(g, executable, (IrInstGenResizeSlice *)instruction);
case IrInstGenIdPtrOfArrayToSlice:
return ir_render_ptr_of_array_to_slice(g, executable, (IrInstGenPtrOfArrayToSlice *)instruction);
case IrInstGenIdSuspendBegin:
@ -8317,8 +8245,6 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdAtomicLoad, "atomicLoad", 3);
create_builtin_fn(g, BuiltinFnIdAtomicStore, "atomicStore", 4);
create_builtin_fn(g, BuiltinFnIdErrSetCast, "errSetCast", 2);
create_builtin_fn(g, BuiltinFnIdToBytes, "sliceToBytes", 1);
create_builtin_fn(g, BuiltinFnIdFromBytes, "bytesToSlice", 2);
create_builtin_fn(g, BuiltinFnIdThis, "This", 0);
create_builtin_fn(g, BuiltinFnIdHasDecl, "hasDecl", 2);
create_builtin_fn(g, BuiltinFnIdUnionInit, "unionInit", 3);

View File

@ -383,10 +383,6 @@ static void destroy_instruction_src(IrInstSrc *inst) {
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFloatCast *>(inst));
case IrInstSrcIdErrSetCast:
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcErrSetCast *>(inst));
case IrInstSrcIdFromBytes:
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcFromBytes *>(inst));
case IrInstSrcIdToBytes:
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcToBytes *>(inst));
case IrInstSrcIdIntToFloat:
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcIntToFloat *>(inst));
case IrInstSrcIdFloatToInt:
@ -707,8 +703,6 @@ void destroy_instruction_gen(IrInstGen *inst) {
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAssertZero *>(inst));
case IrInstGenIdAssertNonNull:
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAssertNonNull *>(inst));
case IrInstGenIdResizeSlice:
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenResizeSlice *>(inst));
case IrInstGenIdAlloca:
return heap::c_allocator.destroy(reinterpret_cast<IrInstGenAlloca *>(inst));
case IrInstGenIdSuspendBegin:
@ -1563,14 +1557,6 @@ static constexpr IrInstSrcId ir_inst_id(IrInstSrcErrSetCast *) {
return IrInstSrcIdErrSetCast;
}
static constexpr IrInstSrcId ir_inst_id(IrInstSrcToBytes *) {
return IrInstSrcIdToBytes;
}
static constexpr IrInstSrcId ir_inst_id(IrInstSrcFromBytes *) {
return IrInstSrcIdFromBytes;
}
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCheckRuntimeScope *) {
return IrInstSrcIdCheckRuntimeScope;
}
@ -1700,10 +1686,6 @@ static constexpr IrInstGenId ir_inst_id(IrInstGenCast *) {
return IrInstGenIdCast;
}
static constexpr IrInstGenId ir_inst_id(IrInstGenResizeSlice *) {
return IrInstGenIdResizeSlice;
}
static constexpr IrInstGenId ir_inst_id(IrInstGenUnreachable *) {
return IrInstGenIdUnreachable;
}
@ -2759,21 +2741,6 @@ static IrInstGen *ir_build_var_decl_gen(IrAnalyze *ira, IrInst *source_instructi
return &inst->base;
}
static IrInstGen *ir_build_resize_slice(IrAnalyze *ira, IrInst *source_instruction,
IrInstGen *operand, ZigType *ty, IrInstGen *result_loc)
{
IrInstGenResizeSlice *instruction = ir_build_inst_gen<IrInstGenResizeSlice>(&ira->new_irb,
source_instruction->scope, source_instruction->source_node);
instruction->base.value->type = ty;
instruction->operand = operand;
instruction->result_loc = result_loc;
ir_ref_inst_gen(operand, ira->new_irb.current_basic_block);
if (result_loc != nullptr) ir_ref_inst_gen(result_loc, ira->new_irb.current_basic_block);
return &instruction->base;
}
static IrInstSrc *ir_build_export(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
IrInstSrc *target, IrInstSrc *options)
{
@ -3540,32 +3507,6 @@ static IrInstSrc *ir_build_err_set_cast(IrBuilderSrc *irb, Scope *scope, AstNode
return &instruction->base;
}
static IrInstSrc *ir_build_to_bytes(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, IrInstSrc *target,
ResultLoc *result_loc)
{
IrInstSrcToBytes *instruction = ir_build_instruction<IrInstSrcToBytes>(irb, scope, source_node);
instruction->target = target;
instruction->result_loc = result_loc;
ir_ref_instruction(target, irb->current_basic_block);
return &instruction->base;
}
static IrInstSrc *ir_build_from_bytes(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
IrInstSrc *dest_child_type, IrInstSrc *target, ResultLoc *result_loc)
{
IrInstSrcFromBytes *instruction = ir_build_instruction<IrInstSrcFromBytes>(irb, scope, source_node);
instruction->dest_child_type = dest_child_type;
instruction->target = target;
instruction->result_loc = result_loc;
ir_ref_instruction(dest_child_type, irb->current_basic_block);
ir_ref_instruction(target, irb->current_basic_block);
return &instruction->base;
}
static IrInstSrc *ir_build_int_to_float(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
IrInstSrc *dest_type, IrInstSrc *target)
{
@ -6597,31 +6538,6 @@ static IrInstSrc *ir_gen_builtin_fn_call(IrBuilderSrc *irb, Scope *scope, AstNod
IrInstSrc *result = ir_build_err_set_cast(irb, scope, node, arg0_value, arg1_value);
return ir_lval_wrap(irb, scope, result, lval, result_loc);
}
case BuiltinFnIdFromBytes:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
if (arg0_value == irb->codegen->invalid_inst_src)
return arg0_value;
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
IrInstSrc *arg1_value = ir_gen_node(irb, arg1_node, scope);
if (arg1_value == irb->codegen->invalid_inst_src)
return arg1_value;
IrInstSrc *result = ir_build_from_bytes(irb, scope, node, arg0_value, arg1_value, result_loc);
return ir_lval_wrap(irb, scope, result, lval, result_loc);
}
case BuiltinFnIdToBytes:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
IrInstSrc *arg0_value = ir_gen_node(irb, arg0_node, scope);
if (arg0_value == irb->codegen->invalid_inst_src)
return arg0_value;
IrInstSrc *result = ir_build_to_bytes(irb, scope, node, arg0_value, result_loc);
return ir_lval_wrap(irb, scope, result, lval, result_loc);
}
case BuiltinFnIdIntToFloat:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
@ -25489,171 +25405,6 @@ static IrInstGen *ir_analyze_instruction_err_set_cast(IrAnalyze *ira, IrInstSrcE
return ir_analyze_err_set_cast(ira, &instruction->base.base, target, dest_type);
}
static IrInstGen *ir_analyze_instruction_from_bytes(IrAnalyze *ira, IrInstSrcFromBytes *instruction) {
Error err;
ZigType *dest_child_type = ir_resolve_type(ira, instruction->dest_child_type->child);
if (type_is_invalid(dest_child_type))
return ira->codegen->invalid_inst_gen;
IrInstGen *target = instruction->target->child;
if (type_is_invalid(target->value->type))
return ira->codegen->invalid_inst_gen;
bool src_ptr_const;
bool src_ptr_volatile;
uint32_t src_ptr_align;
if (target->value->type->id == ZigTypeIdPointer) {
src_ptr_const = target->value->type->data.pointer.is_const;
src_ptr_volatile = target->value->type->data.pointer.is_volatile;
if ((err = resolve_ptr_align(ira, target->value->type, &src_ptr_align)))
return ira->codegen->invalid_inst_gen;
} else if (is_slice(target->value->type)) {
ZigType *src_ptr_type = target->value->type->data.structure.fields[slice_ptr_index]->type_entry;
src_ptr_const = src_ptr_type->data.pointer.is_const;
src_ptr_volatile = src_ptr_type->data.pointer.is_volatile;
if ((err = resolve_ptr_align(ira, src_ptr_type, &src_ptr_align)))
return ira->codegen->invalid_inst_gen;
} else {
src_ptr_const = true;
src_ptr_volatile = false;
if ((err = type_resolve(ira->codegen, target->value->type, ResolveStatusAlignmentKnown)))
return ira->codegen->invalid_inst_gen;
src_ptr_align = get_abi_alignment(ira->codegen, target->value->type);
}
if (src_ptr_align != 0) {
if ((err = type_resolve(ira->codegen, dest_child_type, ResolveStatusAlignmentKnown)))
return ira->codegen->invalid_inst_gen;
}
ZigType *dest_ptr_type = get_pointer_to_type_extra(ira->codegen, dest_child_type,
src_ptr_const, src_ptr_volatile, PtrLenUnknown,
src_ptr_align, 0, 0, false);
ZigType *dest_slice_type = get_slice_type(ira->codegen, dest_ptr_type);
ZigType *u8_ptr = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
src_ptr_const, src_ptr_volatile, PtrLenUnknown,
src_ptr_align, 0, 0, false);
ZigType *u8_slice = get_slice_type(ira->codegen, u8_ptr);
IrInstGen *casted_value = ir_implicit_cast2(ira, &instruction->target->base, target, u8_slice);
if (type_is_invalid(casted_value->value->type))
return ira->codegen->invalid_inst_gen;
bool have_known_len = false;
uint64_t known_len;
if (instr_is_comptime(casted_value)) {
ZigValue *val = ir_resolve_const(ira, casted_value, UndefBad);
if (!val)
return ira->codegen->invalid_inst_gen;
ZigValue *len_val = val->data.x_struct.fields[slice_len_index];
if (value_is_comptime(len_val)) {
known_len = bigint_as_u64(&len_val->data.x_bigint);
have_known_len = true;
}
}
IrInstGen *result_loc = ir_resolve_result(ira, &instruction->base.base, instruction->result_loc,
dest_slice_type, nullptr, true, true);
if (result_loc != nullptr && (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable)) {
return result_loc;
}
if (target->value->type->id == ZigTypeIdPointer &&
target->value->type->data.pointer.ptr_len == PtrLenSingle &&
target->value->type->data.pointer.child_type->id == ZigTypeIdArray)
{
known_len = target->value->type->data.pointer.child_type->data.array.len;
have_known_len = true;
} else if (casted_value->value->data.rh_slice.id == RuntimeHintSliceIdLen) {
known_len = casted_value->value->data.rh_slice.len;
have_known_len = true;
}
if (have_known_len) {
if ((err = type_resolve(ira->codegen, dest_child_type, ResolveStatusSizeKnown)))
return ira->codegen->invalid_inst_gen;
uint64_t child_type_size = type_size(ira->codegen, dest_child_type);
uint64_t remainder = known_len % child_type_size;
if (remainder != 0) {
ErrorMsg *msg = ir_add_error(ira, &instruction->base.base,
buf_sprintf("unable to convert [%" ZIG_PRI_u64 "]u8 to %s: size mismatch",
known_len, buf_ptr(&dest_slice_type->name)));
add_error_note(ira->codegen, msg, instruction->dest_child_type->base.source_node,
buf_sprintf("%s has size %" ZIG_PRI_u64 "; remaining bytes: %" ZIG_PRI_u64,
buf_ptr(&dest_child_type->name), child_type_size, remainder));
return ira->codegen->invalid_inst_gen;
}
}
return ir_build_resize_slice(ira, &instruction->base.base, casted_value, dest_slice_type, result_loc);
}
static IrInstGen *ir_analyze_instruction_to_bytes(IrAnalyze *ira, IrInstSrcToBytes *instruction) {
Error err;
IrInstGen *target = instruction->target->child;
if (type_is_invalid(target->value->type))
return ira->codegen->invalid_inst_gen;
if (!is_slice(target->value->type)) {
ir_add_error(ira, &instruction->target->base,
buf_sprintf("expected slice, found '%s'", buf_ptr(&target->value->type->name)));
return ira->codegen->invalid_inst_gen;
}
ZigType *src_ptr_type = target->value->type->data.structure.fields[slice_ptr_index]->type_entry;
uint32_t alignment;
if ((err = resolve_ptr_align(ira, src_ptr_type, &alignment)))
return ira->codegen->invalid_inst_gen;
ZigType *dest_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
src_ptr_type->data.pointer.is_const, src_ptr_type->data.pointer.is_volatile, PtrLenUnknown,
alignment, 0, 0, false);
ZigType *dest_slice_type = get_slice_type(ira->codegen, dest_ptr_type);
if (instr_is_comptime(target)) {
ZigValue *target_val = ir_resolve_const(ira, target, UndefBad);
if (target_val == nullptr)
return ira->codegen->invalid_inst_gen;
IrInstGen *result = ir_const(ira, &instruction->base.base, dest_slice_type);
result->value->data.x_struct.fields = alloc_const_vals_ptrs(ira->codegen, 2);
ZigValue *ptr_val = result->value->data.x_struct.fields[slice_ptr_index];
ZigValue *target_ptr_val = target_val->data.x_struct.fields[slice_ptr_index];
copy_const_val(ira->codegen, ptr_val, target_ptr_val);
ptr_val->type = dest_ptr_type;
ZigValue *len_val = result->value->data.x_struct.fields[slice_len_index];
len_val->special = ConstValSpecialStatic;
len_val->type = ira->codegen->builtin_types.entry_usize;
ZigValue *target_len_val = target_val->data.x_struct.fields[slice_len_index];
ZigType *elem_type = src_ptr_type->data.pointer.child_type;
BigInt elem_size_bigint;
bigint_init_unsigned(&elem_size_bigint, type_size(ira->codegen, elem_type));
bigint_mul(&len_val->data.x_bigint, &target_len_val->data.x_bigint, &elem_size_bigint);
return result;
}
IrInstGen *result_loc = ir_resolve_result(ira, &instruction->base.base, instruction->result_loc,
dest_slice_type, nullptr, true, true);
if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
return result_loc;
}
return ir_build_resize_slice(ira, &instruction->base.base, target, dest_slice_type, result_loc);
}
static Error resolve_ptr_align(IrAnalyze *ira, ZigType *ty, uint32_t *result_align) {
Error err;
@ -29783,10 +29534,6 @@ static IrInstGen *ir_analyze_instruction_base(IrAnalyze *ira, IrInstSrc *instruc
return ir_analyze_instruction_float_cast(ira, (IrInstSrcFloatCast *)instruction);
case IrInstSrcIdErrSetCast:
return ir_analyze_instruction_err_set_cast(ira, (IrInstSrcErrSetCast *)instruction);
case IrInstSrcIdFromBytes:
return ir_analyze_instruction_from_bytes(ira, (IrInstSrcFromBytes *)instruction);
case IrInstSrcIdToBytes:
return ir_analyze_instruction_to_bytes(ira, (IrInstSrcToBytes *)instruction);
case IrInstSrcIdIntToFloat:
return ir_analyze_instruction_int_to_float(ira, (IrInstSrcIntToFloat *)instruction);
case IrInstSrcIdFloatToInt:
@ -30113,7 +29860,6 @@ bool ir_inst_gen_has_side_effects(IrInstGen *instruction) {
case IrInstGenIdCmpxchg:
case IrInstGenIdAssertZero:
case IrInstGenIdAssertNonNull:
case IrInstGenIdResizeSlice:
case IrInstGenIdPtrOfArrayToSlice:
case IrInstGenIdSlice:
case IrInstGenIdOptionalWrap:
@ -30339,8 +30085,6 @@ bool ir_inst_src_has_side_effects(IrInstSrc *instruction) {
case IrInstSrcIdIntToFloat:
case IrInstSrcIdFloatToInt:
case IrInstSrcIdBoolToInt:
case IrInstSrcIdFromBytes:
case IrInstSrcIdToBytes:
case IrInstSrcIdEnumToInt:
case IrInstSrcIdHasDecl:
case IrInstSrcIdAlloca:

View File

@ -307,10 +307,6 @@ const char* ir_inst_src_type_str(IrInstSrcId id) {
return "SrcAddImplicitReturnType";
case IrInstSrcIdErrSetCast:
return "SrcErrSetCast";
case IrInstSrcIdToBytes:
return "SrcToBytes";
case IrInstSrcIdFromBytes:
return "SrcFromBytes";
case IrInstSrcIdCheckRuntimeScope:
return "SrcCheckRuntimeScope";
case IrInstSrcIdHasDecl:
@ -383,8 +379,6 @@ const char* ir_inst_gen_type_str(IrInstGenId id) {
return "GenReturn";
case IrInstGenIdCast:
return "GenCast";
case IrInstGenIdResizeSlice:
return "GenResizeSlice";
case IrInstGenIdUnreachable:
return "GenUnreachable";
case IrInstGenIdAsm:
@ -1644,20 +1638,6 @@ static void ir_print_err_set_cast(IrPrintSrc *irp, IrInstSrcErrSetCast *instruct
fprintf(irp->f, ")");
}
static void ir_print_from_bytes(IrPrintSrc *irp, IrInstSrcFromBytes *instruction) {
fprintf(irp->f, "@bytesToSlice(");
ir_print_other_inst_src(irp, instruction->dest_child_type);
fprintf(irp->f, ", ");
ir_print_other_inst_src(irp, instruction->target);
fprintf(irp->f, ")");
}
static void ir_print_to_bytes(IrPrintSrc *irp, IrInstSrcToBytes *instruction) {
fprintf(irp->f, "@sliceToBytes(");
ir_print_other_inst_src(irp, instruction->target);
fprintf(irp->f, ")");
}
static void ir_print_int_to_float(IrPrintSrc *irp, IrInstSrcIntToFloat *instruction) {
fprintf(irp->f, "@intToFloat(");
ir_print_other_inst_src(irp, instruction->dest_type);
@ -2142,13 +2122,6 @@ static void ir_print_assert_non_null(IrPrintGen *irp, IrInstGenAssertNonNull *in
fprintf(irp->f, ")");
}
static void ir_print_resize_slice(IrPrintGen *irp, IrInstGenResizeSlice *instruction) {
fprintf(irp->f, "@resizeSlice(");
ir_print_other_inst_gen(irp, instruction->operand);
fprintf(irp->f, ")result=");
ir_print_other_inst_gen(irp, instruction->result_loc);
}
static void ir_print_alloca_src(IrPrintSrc *irp, IrInstSrcAlloca *instruction) {
fprintf(irp->f, "Alloca(align=");
ir_print_other_inst_src(irp, instruction->align);
@ -2793,12 +2766,6 @@ static void ir_print_inst_src(IrPrintSrc *irp, IrInstSrc *instruction, bool trai
case IrInstSrcIdErrSetCast:
ir_print_err_set_cast(irp, (IrInstSrcErrSetCast *)instruction);
break;
case IrInstSrcIdFromBytes:
ir_print_from_bytes(irp, (IrInstSrcFromBytes *)instruction);
break;
case IrInstSrcIdToBytes:
ir_print_to_bytes(irp, (IrInstSrcToBytes *)instruction);
break;
case IrInstSrcIdIntToFloat:
ir_print_int_to_float(irp, (IrInstSrcIntToFloat *)instruction);
break;
@ -3273,9 +3240,6 @@ static void ir_print_inst_gen(IrPrintGen *irp, IrInstGen *instruction, bool trai
case IrInstGenIdAssertNonNull:
ir_print_assert_non_null(irp, (IrInstGenAssertNonNull *)instruction);
break;
case IrInstGenIdResizeSlice:
ir_print_resize_slice(irp, (IrInstGenResizeSlice *)instruction);
break;
case IrInstGenIdAlloca:
ir_print_alloca_gen(irp, (IrInstGenAlloca *)instruction);
break;

View File

@ -4745,16 +4745,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"tmp.zig:1:15: error: comptime parameter not allowed in function with calling convention 'C'",
});
cases.add("convert fixed size array to slice with invalid size",
\\export fn f() void {
\\ var array: [5]u8 = undefined;
\\ var foo = @bytesToSlice(u32, &array)[0];
\\}
, &[_][]const u8{
"tmp.zig:3:15: error: unable to convert [5]u8 to []align(1) u32: size mismatch",
"tmp.zig:3:29: note: u32 has size 4; remaining bytes: 1",
});
cases.add("non-pure function returns type",
\\var a: u32 = 0;
\\pub fn List(comptime T: type) type {

View File

@ -553,15 +553,16 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("cast []u8 to bigger slice of wrong size",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\const std = @import("std");
\\pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
\\ std.os.exit(126);
\\}
\\pub fn main() !void {
\\ const x = widenSlice(&[_]u8{1, 2, 3, 4, 5});
\\ if (x.len == 0) return error.Whatever;
\\}
\\fn widenSlice(slice: []align(1) const u8) []align(1) const i32 {
\\ return @bytesToSlice(i32, slice);
\\ return std.mem.bytesAsSlice(i32, slice);
\\}
);
@ -656,17 +657,18 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("@alignCast misaligned",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\const std = @import("std");
\\pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
\\ std.os.exit(126);
\\}
\\pub fn main() !void {
\\ var array align(4) = [_]u32{0x11111111, 0x11111111};
\\ const bytes = @sliceToBytes(array[0..]);
\\ const bytes = std.mem.sliceAsBytes(array[0..]);
\\ if (foo(bytes) != 0x11111111) return error.Wrong;
\\}
\\fn foo(bytes: []u8) u32 {
\\ const slice4 = bytes[1..5];
\\ const int_slice = @bytesToSlice(u32, @alignCast(4, slice4));
\\ const int_slice = std.mem.bytesAsSlice(u32, @alignCast(4, slice4));
\\ return int_slice[0];
\\}
);

View File

@ -93,7 +93,6 @@ comptime {
_ = @import("behavior/shuffle.zig");
_ = @import("behavior/sizeof_and_typeof.zig");
_ = @import("behavior/slice.zig");
_ = @import("behavior/slicetobytes.zig");
_ = @import("behavior/struct.zig");
_ = @import("behavior/struct_contains_null_ptr_itself.zig");
_ = @import("behavior/struct_contains_slice_of_itself.zig");

View File

@ -81,20 +81,6 @@ fn testBytesAlign(b: u8) void {
expect(ptr.* == 0x33333333);
}
test "specifying alignment allows slice cast" {
testBytesAlignSlice(0x33);
}
fn testBytesAlignSlice(b: u8) void {
var bytes align(4) = [_]u8{
b,
b,
b,
b,
};
const slice: []u32 = @bytesToSlice(u32, bytes[0..]);
expect(slice[0] == 0x33333333);
}
test "@alignCast pointers" {
var x: u32 align(4) = 1;
expectsOnly1(&x);

View File

@ -334,7 +334,7 @@ test "async fn with inferred error set" {
var frame: [1]@Frame(middle) = undefined;
var fn_ptr = middle;
var result: @TypeOf(fn_ptr).ReturnType.ErrorSet!void = undefined;
_ = @asyncCall(@sliceToBytes(frame[0..]), &result, fn_ptr);
_ = @asyncCall(std.mem.sliceAsBytes(frame[0..]), &result, fn_ptr);
resume global_frame;
std.testing.expectError(error.Fail, result);
}
@ -954,7 +954,7 @@ test "@asyncCall with comptime-known function, but not awaited directly" {
fn doTheTest() void {
var frame: [1]@Frame(middle) = undefined;
var result: @TypeOf(middle).ReturnType.ErrorSet!void = undefined;
_ = @asyncCall(@sliceToBytes(frame[0..]), &result, middle);
_ = @asyncCall(std.mem.sliceAsBytes(frame[0..]), &result, middle);
resume global_frame;
std.testing.expectError(error.Fail, result);
}

View File

@ -13,7 +13,7 @@ test "allocation and looping over 3-byte integer" {
x[0] = 0xFFFFFF;
x[1] = 0xFFFFFF;
const bytes = @sliceToBytes(x);
const bytes = std.mem.sliceAsBytes(x);
expect(@TypeOf(bytes) == []align(4) u8);
expect(bytes.len == 8);

View File

@ -304,20 +304,6 @@ fn cast128Float(x: u128) f128 {
return @bitCast(f128, x);
}
test "const slice widen cast" {
const bytes align(4) = [_]u8{
0x12,
0x12,
0x12,
0x12,
};
const u32_value = @bytesToSlice(u32, bytes[0..])[0];
expect(u32_value == 0x12121212);
expect(@bitCast(u32, bytes) == 0x12121212);
}
test "single-item pointer of array to slice and to unknown length pointer" {
testCastPtrOfArrayToSliceAndPtr();
comptime testCastPtrOfArrayToSliceAndPtr();
@ -392,12 +378,6 @@ test "comptime_int @intToFloat" {
}
}
test "@bytesToSlice keeps pointer alignment" {
var bytes = [_]u8{ 0x01, 0x02, 0x03, 0x04 };
const numbers = @bytesToSlice(u32, bytes[0..]);
comptime expect(@TypeOf(numbers) == []align(@alignOf(@TypeOf(bytes))) u32);
}
test "@intCast i32 to u7" {
var x: u128 = maxInt(u128);
var y: i32 = 120;

View File

@ -711,16 +711,6 @@ test "bit shift a u1" {
expect(y == 1);
}
test "@bytesToslice on a packed struct" {
const F = packed struct {
a: u8,
};
var b = [1]u8{9};
var f = @bytesToSlice(F, &b);
expect(f[0].a == 9);
}
test "comptime pointer cast array and then slice" {
const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };

View File

@ -3,7 +3,6 @@ const expect = std.testing.expect;
const expectEqualSlices = std.testing.expectEqualSlices;
const mem = std.mem;
const builtin = @import("builtin");
const maxInt = std.math.maxInt;
// normal comment
@ -377,26 +376,6 @@ test "string concatenation" {
expect(b[len] == 0);
}
test "cast slice to u8 slice" {
expect(@sizeOf(i32) == 4);
var big_thing_array = [_]i32{ 1, 2, 3, 4 };
const big_thing_slice: []i32 = big_thing_array[0..];
const bytes = @sliceToBytes(big_thing_slice);
expect(bytes.len == 4 * 4);
bytes[4] = 0;
bytes[5] = 0;
bytes[6] = 0;
bytes[7] = 0;
expect(big_thing_slice[1] == 0);
const big_thing_again = @bytesToSlice(i32, bytes);
expect(big_thing_again[2] == 3);
big_thing_again[2] = -1;
expect(bytes[8] == maxInt(u8));
expect(bytes[9] == maxInt(u8));
expect(bytes[10] == maxInt(u8));
expect(bytes[11] == maxInt(u8));
}
test "pointer to void return type" {
testPointerToVoidReturnType() catch unreachable;
}

View File

@ -1,29 +0,0 @@
const std = @import("std");
const builtin = @import("builtin");
const expect = std.testing.expect;
test "@sliceToBytes packed struct at runtime and comptime" {
const Foo = packed struct {
a: u4,
b: u4,
};
const S = struct {
fn doTheTest() void {
var foo: Foo = undefined;
var slice = @sliceToBytes(@as(*[1]Foo, &foo)[0..1]);
slice[0] = 0x13;
switch (builtin.endian) {
builtin.Endian.Big => {
expect(foo.a == 0x1);
expect(foo.b == 0x3);
},
builtin.Endian.Little => {
expect(foo.a == 0x3);
expect(foo.b == 0x1);
},
}
}
};
S.doTheTest();
comptime S.doTheTest();
}

View File

@ -315,7 +315,7 @@ test "packed array 24bits" {
var bytes = [_]u8{0} ** (@sizeOf(FooArray24Bits) + 1);
bytes[bytes.len - 1] = 0xaa;
const ptr = &@bytesToSlice(FooArray24Bits, bytes[0 .. bytes.len - 1])[0];
const ptr = &std.mem.bytesAsSlice(FooArray24Bits, bytes[0 .. bytes.len - 1])[0];
expect(ptr.a == 0);
expect(ptr.b[0].field == 0);
expect(ptr.b[1].field == 0);
@ -364,7 +364,7 @@ test "aligned array of packed struct" {
}
var bytes = [_]u8{0xbb} ** @sizeOf(FooArrayOfAligned);
const ptr = &@bytesToSlice(FooArrayOfAligned, bytes[0..bytes.len])[0];
const ptr = &std.mem.bytesAsSlice(FooArrayOfAligned, bytes[0..])[0];
expect(ptr.a[0].a == 0xbb);
expect(ptr.a[0].b == 0xbb);