mirror of
https://github.com/ziglang/zig.git
synced 2026-02-20 16:24:51 +00:00
Merge pull request #15278 from ziglang/memcpy-memset
change semantics of `@memcpy` and `@memset`
This commit is contained in:
commit
3c66850e42
@ -8681,40 +8681,30 @@ test "integer cast panic" {
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@memcpy#}
|
||||
<pre>{#syntax#}@memcpy(noalias dest: [*]u8, noalias source: [*]const u8, byte_count: usize) void{#endsyntax#}</pre>
|
||||
<p>
|
||||
This function copies bytes from one region of memory to another. {#syntax#}dest{#endsyntax#} and
|
||||
{#syntax#}source{#endsyntax#} are both pointers and must not overlap.
|
||||
</p>
|
||||
<p>
|
||||
This function is a low level intrinsic with no safety mechanisms. Most code
|
||||
should not use this function, instead using something like this:
|
||||
</p>
|
||||
<pre>{#syntax#}for (dest, source[0..byte_count]) |*d, s| d.* = s;{#endsyntax#}</pre>
|
||||
<p>
|
||||
The optimizer is intelligent enough to turn the above snippet into a memcpy.
|
||||
</p>
|
||||
<p>There is also a standard library function for this:</p>
|
||||
<pre>{#syntax#}const mem = @import("std").mem;
|
||||
mem.copy(u8, dest[0..byte_count], source[0..byte_count]);{#endsyntax#}</pre>
|
||||
<pre>{#syntax#}@memcpy(noalias dest, noalias source) void{#endsyntax#}</pre>
|
||||
<p>This function copies bytes from one region of memory to another.</p>
|
||||
<p>{#syntax#}dest{#endsyntax#} must be a mutable slice, a mutable pointer to an array, or
|
||||
a mutable many-item {#link|pointer|Pointers#}. It may have any
|
||||
alignment, and it may have any element type.</p>
|
||||
<p>Likewise, {#syntax#}source{#endsyntax#} must be a mutable slice, a
|
||||
mutable pointer to an array, or a mutable many-item
|
||||
{#link|pointer|Pointers#}. It may have any alignment, and it may have any
|
||||
element type.</p>
|
||||
<p>The {#syntax#}source{#endsyntax#} element type must support {#link|Type Coercion#}
|
||||
into the {#syntax#}dest{#endsyntax#} element type. The element types may have
|
||||
different ABI size, however, that may incur a performance penalty.</p>
|
||||
<p>Similar to {#link|for#} loops, at least one of {#syntax#}source{#endsyntax#} and
|
||||
{#syntax#}dest{#endsyntax#} must provide a length, and if two lengths are provided,
|
||||
they must be equal.</p>
|
||||
<p>Finally, the two memory regions must not overlap.</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@memset#}
|
||||
<pre>{#syntax#}@memset(dest: [*]u8, c: u8, byte_count: usize) void{#endsyntax#}</pre>
|
||||
<p>
|
||||
This function sets a region of memory to {#syntax#}c{#endsyntax#}. {#syntax#}dest{#endsyntax#} is a pointer.
|
||||
</p>
|
||||
<p>
|
||||
This function is a low level intrinsic with no safety mechanisms. Most
|
||||
code should not use this function, instead using something like this:
|
||||
</p>
|
||||
<pre>{#syntax#}for (dest[0..byte_count]) |*b| b.* = c;{#endsyntax#}</pre>
|
||||
<p>
|
||||
The optimizer is intelligent enough to turn the above snippet into a memset.
|
||||
</p>
|
||||
<p>There is also a standard library function for this:</p>
|
||||
<pre>{#syntax#}const mem = @import("std").mem;
|
||||
mem.set(u8, dest, c);{#endsyntax#}</pre>
|
||||
<pre>{#syntax#}@memset(dest, elem) void{#endsyntax#}</pre>
|
||||
<p>This function sets all the elements of a memory region to {#syntax#}elem{#endsyntax#}.</p>
|
||||
<p>{#syntax#}dest{#endsyntax#} must be a mutable slice or a mutable pointer to an array.
|
||||
It may have any alignment, and it may have any element type.</p>
|
||||
<p>{#syntax#}elem{#endsyntax#} is coerced to the element type of {#syntax#}dest{#endsyntax#}.</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@min#}
|
||||
|
||||
@ -121,22 +121,22 @@ fn __atomic_load(size: u32, src: [*]u8, dest: [*]u8, model: i32) callconv(.C) vo
|
||||
_ = model;
|
||||
var sl = spinlocks.get(@ptrToInt(src));
|
||||
defer sl.release();
|
||||
@memcpy(dest, src, size);
|
||||
@memcpy(dest[0..size], src);
|
||||
}
|
||||
|
||||
fn __atomic_store(size: u32, dest: [*]u8, src: [*]u8, model: i32) callconv(.C) void {
|
||||
_ = model;
|
||||
var sl = spinlocks.get(@ptrToInt(dest));
|
||||
defer sl.release();
|
||||
@memcpy(dest, src, size);
|
||||
@memcpy(dest[0..size], src);
|
||||
}
|
||||
|
||||
fn __atomic_exchange(size: u32, ptr: [*]u8, val: [*]u8, old: [*]u8, model: i32) callconv(.C) void {
|
||||
_ = model;
|
||||
var sl = spinlocks.get(@ptrToInt(ptr));
|
||||
defer sl.release();
|
||||
@memcpy(old, ptr, size);
|
||||
@memcpy(ptr, val, size);
|
||||
@memcpy(old[0..size], ptr);
|
||||
@memcpy(ptr[0..size], val);
|
||||
}
|
||||
|
||||
fn __atomic_compare_exchange(
|
||||
@ -155,10 +155,10 @@ fn __atomic_compare_exchange(
|
||||
if (expected[i] != b) break;
|
||||
} else {
|
||||
// The two objects, ptr and expected, are equal
|
||||
@memcpy(ptr, desired, size);
|
||||
@memcpy(ptr[0..size], desired);
|
||||
return 1;
|
||||
}
|
||||
@memcpy(expected, ptr, size);
|
||||
@memcpy(expected[0..size], ptr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -139,10 +139,10 @@ const ObjectArray = struct {
|
||||
|
||||
if (control.default_value) |value| {
|
||||
// default value: copy the content to newly allocated object.
|
||||
@memcpy(data, @ptrCast([*]const u8, value), size);
|
||||
@memcpy(data[0..size], @ptrCast([*]const u8, value));
|
||||
} else {
|
||||
// no default: return zeroed memory.
|
||||
@memset(data, 0, size);
|
||||
@memset(data[0..size], 0);
|
||||
}
|
||||
|
||||
self.slots[index] = @ptrCast(*anyopaque, data);
|
||||
|
||||
@ -1893,7 +1893,7 @@ const IndexHeader = struct {
|
||||
const index_size = hash_map.capacityIndexSize(new_bit_index);
|
||||
const nbytes = @sizeOf(IndexHeader) + index_size * len;
|
||||
const bytes = try allocator.alignedAlloc(u8, @alignOf(IndexHeader), nbytes);
|
||||
@memset(bytes.ptr + @sizeOf(IndexHeader), 0xff, bytes.len - @sizeOf(IndexHeader));
|
||||
@memset(bytes[@sizeOf(IndexHeader)..], 0xff);
|
||||
const result = @ptrCast(*IndexHeader, bytes.ptr);
|
||||
result.* = .{
|
||||
.bit_index = new_bit_index,
|
||||
@ -1914,7 +1914,7 @@ const IndexHeader = struct {
|
||||
const index_size = hash_map.capacityIndexSize(header.bit_index);
|
||||
const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header);
|
||||
const nbytes = @sizeOf(IndexHeader) + header.length() * index_size;
|
||||
@memset(ptr + @sizeOf(IndexHeader), 0xff, nbytes - @sizeOf(IndexHeader));
|
||||
@memset(ptr[@sizeOf(IndexHeader)..nbytes], 0xff);
|
||||
}
|
||||
|
||||
// Verify that the header has sufficient alignment to produce aligned arrays.
|
||||
|
||||
@ -121,7 +121,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
|
||||
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
|
||||
mem.copy(T, new_memory, self.items);
|
||||
@memset(@ptrCast([*]u8, self.items.ptr), undefined, self.items.len * @sizeOf(T));
|
||||
@memset(self.items, undefined);
|
||||
self.clearAndFree();
|
||||
return new_memory;
|
||||
}
|
||||
@ -281,11 +281,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
const new_len = old_len + items.len;
|
||||
assert(new_len <= self.capacity);
|
||||
self.items.len = new_len;
|
||||
@memcpy(
|
||||
@ptrCast([*]align(@alignOf(T)) u8, self.items.ptr + old_len),
|
||||
@ptrCast([*]const u8, items.ptr),
|
||||
items.len * @sizeOf(T),
|
||||
);
|
||||
@memcpy(self.items[old_len..][0..items.len], items);
|
||||
}
|
||||
|
||||
pub const Writer = if (T != u8)
|
||||
@ -601,7 +597,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
|
||||
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
|
||||
mem.copy(T, new_memory, self.items);
|
||||
@memset(@ptrCast([*]u8, self.items.ptr), undefined, self.items.len * @sizeOf(T));
|
||||
@memset(self.items, undefined);
|
||||
self.clearAndFree(allocator);
|
||||
return new_memory;
|
||||
}
|
||||
@ -740,11 +736,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
const new_len = old_len + items.len;
|
||||
assert(new_len <= self.capacity);
|
||||
self.items.len = new_len;
|
||||
@memcpy(
|
||||
@ptrCast([*]align(@alignOf(T)) u8, self.items.ptr + old_len),
|
||||
@ptrCast([*]const u8, items.ptr),
|
||||
items.len * @sizeOf(T),
|
||||
);
|
||||
@memcpy(self.items[old_len..][0..items.len], items);
|
||||
}
|
||||
|
||||
pub const WriterContext = struct {
|
||||
|
||||
@ -1002,6 +1002,8 @@ pub const panic_messages = struct {
|
||||
pub const index_out_of_bounds = "index out of bounds";
|
||||
pub const start_index_greater_than_end = "start index is larger than end index";
|
||||
pub const for_len_mismatch = "for loop over objects with non-equal lengths";
|
||||
pub const memcpy_len_mismatch = "@memcpy arguments have non-equal lengths";
|
||||
pub const memcpy_alias = "@memcpy arguments alias";
|
||||
};
|
||||
|
||||
pub noinline fn returnError(st: *StackTrace) void {
|
||||
|
||||
@ -3670,7 +3670,7 @@ pub const MachTask = extern struct {
|
||||
else => |err| return unexpectedKernError(err),
|
||||
}
|
||||
|
||||
@memcpy(out_buf[0..].ptr, @intToPtr([*]const u8, vm_memory), curr_bytes_read);
|
||||
@memcpy(out_buf[0..curr_bytes_read], @intToPtr([*]const u8, vm_memory));
|
||||
_ = vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read);
|
||||
|
||||
out_buf = out_buf[curr_bytes_read..];
|
||||
|
||||
@ -209,7 +209,7 @@ fn Aegis128LGeneric(comptime tag_bits: u9) type {
|
||||
acc |= (computed_tag[j] ^ tag[j]);
|
||||
}
|
||||
if (acc != 0) {
|
||||
@memset(m.ptr, undefined, m.len);
|
||||
@memset(m, undefined);
|
||||
return error.AuthenticationFailed;
|
||||
}
|
||||
}
|
||||
@ -390,7 +390,7 @@ fn Aegis256Generic(comptime tag_bits: u9) type {
|
||||
acc |= (computed_tag[j] ^ tag[j]);
|
||||
}
|
||||
if (acc != 0) {
|
||||
@memset(m.ptr, undefined, m.len);
|
||||
@memset(m, undefined);
|
||||
return error.AuthenticationFailed;
|
||||
}
|
||||
}
|
||||
|
||||
@ -91,7 +91,7 @@ fn AesGcm(comptime Aes: anytype) type {
|
||||
acc |= (computed_tag[p] ^ tag[p]);
|
||||
}
|
||||
if (acc != 0) {
|
||||
@memset(m.ptr, undefined, m.len);
|
||||
@memset(m, undefined);
|
||||
return error.AuthenticationFailed;
|
||||
}
|
||||
|
||||
|
||||
@ -531,7 +531,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
|
||||
const pub_key = subject.pubKey();
|
||||
if (pub_key.len > main_cert_pub_key_buf.len)
|
||||
return error.CertificatePublicKeyInvalid;
|
||||
@memcpy(&main_cert_pub_key_buf, pub_key.ptr, pub_key.len);
|
||||
@memcpy(main_cert_pub_key_buf[0..pub_key.len], pub_key);
|
||||
main_cert_pub_key_len = @intCast(@TypeOf(main_cert_pub_key_len), pub_key.len);
|
||||
} else {
|
||||
try prev_cert.verify(subject, now_sec);
|
||||
|
||||
@ -135,11 +135,11 @@ pub fn timingSafeSub(comptime T: type, a: []const T, b: []const T, result: []T,
|
||||
/// Sets a slice to zeroes.
|
||||
/// Prevents the store from being optimized out.
|
||||
pub fn secureZero(comptime T: type, s: []T) void {
|
||||
// NOTE: We do not use a volatile slice cast here since LLVM cannot
|
||||
// see that it can be replaced by a memset.
|
||||
// TODO: implement `@memset` for non-byte-sized element type in the llvm backend
|
||||
//@memset(@as([]volatile T, s), 0);
|
||||
const ptr = @ptrCast([*]volatile u8, s.ptr);
|
||||
const length = s.len * @sizeOf(T);
|
||||
@memset(ptr, 0, length);
|
||||
@memset(ptr[0..length], 0);
|
||||
}
|
||||
|
||||
test "crypto.utils.timingSafeEql" {
|
||||
|
||||
@ -104,7 +104,7 @@ pub fn LinearFifo(
|
||||
}
|
||||
{ // set unused area to undefined
|
||||
const unused = mem.sliceAsBytes(self.buf[self.count..]);
|
||||
@memset(unused.ptr, undefined, unused.len);
|
||||
@memset(unused, undefined);
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,12 +182,12 @@ pub fn LinearFifo(
|
||||
const slice = self.readableSliceMut(0);
|
||||
if (slice.len >= count) {
|
||||
const unused = mem.sliceAsBytes(slice[0..count]);
|
||||
@memset(unused.ptr, undefined, unused.len);
|
||||
@memset(unused, undefined);
|
||||
} else {
|
||||
const unused = mem.sliceAsBytes(slice[0..]);
|
||||
@memset(unused.ptr, undefined, unused.len);
|
||||
@memset(unused, undefined);
|
||||
const unused2 = mem.sliceAsBytes(self.readableSliceMut(slice.len)[0 .. count - slice.len]);
|
||||
@memset(unused2.ptr, undefined, unused2.len);
|
||||
@memset(unused2, undefined);
|
||||
}
|
||||
}
|
||||
if (autoalign and self.count == count) {
|
||||
|
||||
@ -99,9 +99,8 @@ pub const Murmur2_64 = struct {
|
||||
|
||||
pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
|
||||
const m: u64 = 0xc6a4a7935bd1e995;
|
||||
const len = @as(u64, str.len);
|
||||
var h1: u64 = seed ^ (len *% m);
|
||||
for (@ptrCast([*]align(1) const u64, str.ptr)[0..@intCast(usize, len >> 3)]) |v| {
|
||||
var h1: u64 = seed ^ (@as(u64, str.len) *% m);
|
||||
for (@ptrCast([*]align(1) const u64, str.ptr)[0 .. str.len / 8]) |v| {
|
||||
var k1: u64 = v;
|
||||
if (native_endian == .Big)
|
||||
k1 = @byteSwap(k1);
|
||||
@ -111,11 +110,11 @@ pub const Murmur2_64 = struct {
|
||||
h1 ^= k1;
|
||||
h1 *%= m;
|
||||
}
|
||||
const rest = len & 7;
|
||||
const offset = len - rest;
|
||||
const rest = str.len & 7;
|
||||
const offset = str.len - rest;
|
||||
if (rest > 0) {
|
||||
var k1: u64 = 0;
|
||||
@memcpy(@ptrCast([*]u8, &k1), @ptrCast([*]const u8, &str[@intCast(usize, offset)]), @intCast(usize, rest));
|
||||
@memcpy(@ptrCast([*]u8, &k1)[0..rest], str[offset..]);
|
||||
if (native_endian == .Big)
|
||||
k1 = @byteSwap(k1);
|
||||
h1 ^= k1;
|
||||
@ -282,13 +281,8 @@ pub const Murmur3_32 = struct {
|
||||
|
||||
fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
|
||||
const hashbytes = hashbits / 8;
|
||||
var key: [256]u8 = undefined;
|
||||
var hashes: [hashbytes * 256]u8 = undefined;
|
||||
var final: [hashbytes]u8 = undefined;
|
||||
|
||||
@memset(@ptrCast([*]u8, &key[0]), 0, @sizeOf(@TypeOf(key)));
|
||||
@memset(@ptrCast([*]u8, &hashes[0]), 0, @sizeOf(@TypeOf(hashes)));
|
||||
@memset(@ptrCast([*]u8, &final[0]), 0, @sizeOf(@TypeOf(final)));
|
||||
var key: [256]u8 = [1]u8{0} ** 256;
|
||||
var hashes: [hashbytes * 256]u8 = [1]u8{0} ** (hashbytes * 256);
|
||||
|
||||
var i: u32 = 0;
|
||||
while (i < 256) : (i += 1) {
|
||||
@ -297,7 +291,7 @@ fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
|
||||
var h = hash_fn(key[0..i], 256 - i);
|
||||
if (native_endian == .Big)
|
||||
h = @byteSwap(h);
|
||||
@memcpy(@ptrCast([*]u8, &hashes[i * hashbytes]), @ptrCast([*]u8, &h), hashbytes);
|
||||
@memcpy(hashes[i * hashbytes ..][0..hashbytes], @ptrCast([*]u8, &h));
|
||||
}
|
||||
|
||||
return @truncate(u32, hash_fn(&hashes, 0));
|
||||
|
||||
@ -1449,7 +1449,7 @@ pub fn HashMapUnmanaged(
|
||||
}
|
||||
|
||||
fn initMetadatas(self: *Self) void {
|
||||
@memset(@ptrCast([*]u8, self.metadata.?), 0, @sizeOf(Metadata) * self.capacity());
|
||||
@memset(@ptrCast([*]u8, self.metadata.?)[0 .. @sizeOf(Metadata) * self.capacity()], 0);
|
||||
}
|
||||
|
||||
// This counts the number of occupied slots (not counting tombstones), which is
|
||||
|
||||
@ -759,7 +759,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
|
||||
if (new_size_class <= size_class) {
|
||||
if (old_mem.len > new_size) {
|
||||
@memset(old_mem.ptr + new_size, undefined, old_mem.len - new_size);
|
||||
@memset(old_mem[new_size..], undefined);
|
||||
}
|
||||
if (config.verbose_log) {
|
||||
log.info("small resize {d} bytes at {*} to {d}", .{
|
||||
@ -911,7 +911,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
self.empty_buckets = bucket;
|
||||
}
|
||||
} else {
|
||||
@memset(old_mem.ptr, undefined, old_mem.len);
|
||||
@memset(old_mem, undefined);
|
||||
}
|
||||
if (config.safety) {
|
||||
assert(self.small_allocations.remove(@ptrToInt(old_mem.ptr)));
|
||||
@ -1011,7 +1011,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
};
|
||||
self.buckets[bucket_index] = ptr;
|
||||
// Set the used bits to all zeroes
|
||||
@memset(@as(*[1]u8, ptr.usedBits(0)), 0, usedBitsCount(size_class));
|
||||
@memset(@as([*]u8, @as(*[1]u8, ptr.usedBits(0)))[0..usedBitsCount(size_class)], 0);
|
||||
return ptr;
|
||||
}
|
||||
};
|
||||
|
||||
@ -2756,7 +2756,7 @@ test "big int conversion read twos complement with padding" {
|
||||
|
||||
var buffer1 = try testing.allocator.alloc(u8, 16);
|
||||
defer testing.allocator.free(buffer1);
|
||||
@memset(buffer1.ptr, 0xaa, buffer1.len);
|
||||
@memset(buffer1, 0xaa);
|
||||
|
||||
// writeTwosComplement:
|
||||
// (1) should not write beyond buffer[0..abi_size]
|
||||
@ -2773,7 +2773,7 @@ test "big int conversion read twos complement with padding" {
|
||||
a.toConst().writeTwosComplement(buffer1[0..16], .Big);
|
||||
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x0, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }));
|
||||
|
||||
@memset(buffer1.ptr, 0xaa, buffer1.len);
|
||||
@memset(buffer1, 0xaa);
|
||||
try a.set(-0x01_02030405_06070809_0a0b0c0d);
|
||||
bit_count = 12 * 8 + 2;
|
||||
|
||||
@ -2794,7 +2794,7 @@ test "big int write twos complement +/- zero" {
|
||||
|
||||
var buffer1 = try testing.allocator.alloc(u8, 16);
|
||||
defer testing.allocator.free(buffer1);
|
||||
@memset(buffer1.ptr, 0xaa, buffer1.len);
|
||||
@memset(buffer1, 0xaa);
|
||||
|
||||
// Test zero
|
||||
|
||||
@ -2807,7 +2807,7 @@ test "big int write twos complement +/- zero" {
|
||||
m.toConst().writeTwosComplement(buffer1[0..16], .Big);
|
||||
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
|
||||
|
||||
@memset(buffer1.ptr, 0xaa, buffer1.len);
|
||||
@memset(buffer1, 0xaa);
|
||||
m.positive = false;
|
||||
|
||||
// Test negative zero
|
||||
|
||||
@ -215,7 +215,7 @@ pub fn allocAdvancedWithRetAddr(
|
||||
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
|
||||
const byte_ptr = self.rawAlloc(byte_count, log2a(a), return_address) orelse return Error.OutOfMemory;
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(byte_ptr, undefined, byte_count);
|
||||
@memset(byte_ptr[0..byte_count], undefined);
|
||||
const byte_slice = byte_ptr[0..byte_count];
|
||||
return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
|
||||
}
|
||||
@ -282,9 +282,10 @@ pub fn reallocAdvanced(
|
||||
|
||||
const new_mem = self.rawAlloc(byte_count, log2a(Slice.alignment), return_address) orelse
|
||||
return error.OutOfMemory;
|
||||
@memcpy(new_mem, old_byte_slice.ptr, @min(byte_count, old_byte_slice.len));
|
||||
const copy_len = @min(byte_count, old_byte_slice.len);
|
||||
@memcpy(new_mem[0..copy_len], old_byte_slice[0..copy_len]);
|
||||
// TODO https://github.com/ziglang/zig/issues/4298
|
||||
@memset(old_byte_slice.ptr, undefined, old_byte_slice.len);
|
||||
@memset(old_byte_slice, undefined);
|
||||
self.rawFree(old_byte_slice, log2a(Slice.alignment), return_address);
|
||||
|
||||
return mem.bytesAsSlice(T, @alignCast(Slice.alignment, new_mem[0..byte_count]));
|
||||
@ -299,7 +300,7 @@ pub fn free(self: Allocator, memory: anytype) void {
|
||||
if (bytes_len == 0) return;
|
||||
const non_const_ptr = @constCast(bytes.ptr);
|
||||
// TODO: https://github.com/ziglang/zig/issues/4298
|
||||
@memset(non_const_ptr, undefined, bytes_len);
|
||||
@memset(non_const_ptr[0..bytes_len], undefined);
|
||||
self.rawFree(non_const_ptr[0..bytes_len], log2a(Slice.alignment), @returnAddress());
|
||||
}
|
||||
|
||||
|
||||
@ -360,11 +360,10 @@ pub fn MultiArrayList(comptime T: type) type {
|
||||
if (@sizeOf(field_info.type) != 0) {
|
||||
const field = @intToEnum(Field, i);
|
||||
const dest_slice = self_slice.items(field)[new_len..];
|
||||
const byte_count = dest_slice.len * @sizeOf(field_info.type);
|
||||
// We use memset here for more efficient codegen in safety-checked,
|
||||
// valgrind-enabled builds. Otherwise the valgrind client request
|
||||
// will be repeated for every element.
|
||||
@memset(@ptrCast([*]u8, dest_slice.ptr), undefined, byte_count);
|
||||
@memset(dest_slice, undefined);
|
||||
}
|
||||
}
|
||||
self.len = new_len;
|
||||
|
||||
@ -1020,7 +1020,7 @@ fn linuxLookupName(
|
||||
for (addrs.items, 0..) |*addr, i| {
|
||||
var key: i32 = 0;
|
||||
var sa6: os.sockaddr.in6 = undefined;
|
||||
@memset(@ptrCast([*]u8, &sa6), 0, @sizeOf(os.sockaddr.in6));
|
||||
@memset(@ptrCast([*]u8, &sa6)[0..@sizeOf(os.sockaddr.in6)], 0);
|
||||
var da6 = os.sockaddr.in6{
|
||||
.family = os.AF.INET6,
|
||||
.scope_id = addr.addr.in6.sa.scope_id,
|
||||
@ -1029,7 +1029,7 @@ fn linuxLookupName(
|
||||
.addr = [1]u8{0} ** 16,
|
||||
};
|
||||
var sa4: os.sockaddr.in = undefined;
|
||||
@memset(@ptrCast([*]u8, &sa4), 0, @sizeOf(os.sockaddr.in));
|
||||
@memset(@ptrCast([*]u8, &sa4)[0..@sizeOf(os.sockaddr.in)], 0);
|
||||
var da4 = os.sockaddr.in{
|
||||
.family = os.AF.INET,
|
||||
.port = 65535,
|
||||
@ -1577,7 +1577,7 @@ fn resMSendRc(
|
||||
|
||||
// Get local address and open/bind a socket
|
||||
var sa: Address = undefined;
|
||||
@memset(@ptrCast([*]u8, &sa), 0, @sizeOf(Address));
|
||||
@memset(@ptrCast([*]u8, &sa)[0..@sizeOf(Address)], 0);
|
||||
sa.any.family = family;
|
||||
try os.bind(fd, &sa.any, sl);
|
||||
|
||||
|
||||
@ -5217,7 +5217,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
|
||||
.macos, .ios, .watchos, .tvos => {
|
||||
// On macOS, we can use F.GETPATH fcntl command to query the OS for
|
||||
// the path to the file descriptor.
|
||||
@memset(out_buffer, 0, MAX_PATH_BYTES);
|
||||
@memset(out_buffer[0..MAX_PATH_BYTES], 0);
|
||||
switch (errno(system.fcntl(fd, F.GETPATH, out_buffer))) {
|
||||
.SUCCESS => {},
|
||||
.BADF => return error.FileNotFound,
|
||||
@ -5308,7 +5308,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
|
||||
if (comptime builtin.os.version_range.semver.max.order(.{ .major = 6, .minor = 0 }) == .lt) {
|
||||
@compileError("querying for canonical path of a handle is unsupported on this host");
|
||||
}
|
||||
@memset(out_buffer, 0, MAX_PATH_BYTES);
|
||||
@memset(out_buffer[0..MAX_PATH_BYTES], 0);
|
||||
switch (errno(system.fcntl(fd, F.GETPATH, out_buffer))) {
|
||||
.SUCCESS => {},
|
||||
.BADF => return error.FileNotFound,
|
||||
@ -5322,7 +5322,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
|
||||
if (comptime builtin.os.version_range.semver.max.order(.{ .major = 10, .minor = 0 }) == .lt) {
|
||||
@compileError("querying for canonical path of a handle is unsupported on this host");
|
||||
}
|
||||
@memset(out_buffer, 0, MAX_PATH_BYTES);
|
||||
@memset(out_buffer[0..MAX_PATH_BYTES], 0);
|
||||
switch (errno(system.fcntl(fd, F.GETPATH, out_buffer))) {
|
||||
.SUCCESS => {},
|
||||
.ACCES => return error.AccessDenied,
|
||||
@ -5548,7 +5548,7 @@ pub fn toPosixPath(file_path: []const u8) ![MAX_PATH_BYTES - 1:0]u8 {
|
||||
var path_with_null: [MAX_PATH_BYTES - 1:0]u8 = undefined;
|
||||
// >= rather than > to make room for the null byte
|
||||
if (file_path.len >= MAX_PATH_BYTES) return error.NameTooLong;
|
||||
mem.copy(u8, &path_with_null, file_path);
|
||||
@memcpy(path_with_null[0..file_path.len], file_path);
|
||||
path_with_null[file_path.len] = 0;
|
||||
return path_with_null;
|
||||
}
|
||||
@ -5720,7 +5720,7 @@ pub fn res_mkquery(
|
||||
|
||||
// Construct query template - ID will be filled later
|
||||
var q: [280]u8 = undefined;
|
||||
@memset(&q, 0, n);
|
||||
@memset(q[0..n], 0);
|
||||
q[2] = @as(u8, op) * 8 + 1;
|
||||
q[5] = 1;
|
||||
mem.copy(u8, q[13..], name);
|
||||
|
||||
@ -1184,7 +1184,7 @@ pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigact
|
||||
.mask = undefined,
|
||||
.restorer = @ptrCast(k_sigaction_funcs.restorer, restorer_fn),
|
||||
};
|
||||
@memcpy(@ptrCast([*]u8, &ksa.mask), @ptrCast([*]const u8, &new.mask), mask_size);
|
||||
@memcpy(@ptrCast([*]u8, &ksa.mask)[0..mask_size], @ptrCast([*]const u8, &new.mask));
|
||||
}
|
||||
|
||||
const ksa_arg = if (act != null) @ptrToInt(&ksa) else 0;
|
||||
@ -1200,7 +1200,7 @@ pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigact
|
||||
if (oact) |old| {
|
||||
old.handler.handler = oldksa.handler;
|
||||
old.flags = @truncate(c_uint, oldksa.flags);
|
||||
@memcpy(@ptrCast([*]u8, &old.mask), @ptrCast([*]const u8, &oldksa.mask), mask_size);
|
||||
@memcpy(@ptrCast([*]u8, &old.mask)[0..mask_size], @ptrCast([*]const u8, &oldksa.mask));
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1515,7 +1515,7 @@ pub fn sched_yield() usize {
|
||||
pub fn sched_getaffinity(pid: pid_t, size: usize, set: *cpu_set_t) usize {
|
||||
const rc = syscall3(.sched_getaffinity, @bitCast(usize, @as(isize, pid)), size, @ptrToInt(set));
|
||||
if (@bitCast(isize, rc) < 0) return rc;
|
||||
if (rc < size) @memset(@ptrCast([*]u8, set) + rc, 0, size - rc);
|
||||
if (rc < size) @memset(@ptrCast([*]u8, set)[rc..size], 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -755,9 +755,9 @@ pub fn CreateSymbolicLink(
|
||||
};
|
||||
|
||||
std.mem.copy(u8, buffer[0..], std.mem.asBytes(&symlink_data));
|
||||
@memcpy(buffer[@sizeOf(SYMLINK_DATA)..], @ptrCast([*]const u8, target_path), target_path.len * 2);
|
||||
@memcpy(buffer[@sizeOf(SYMLINK_DATA)..][0 .. target_path.len * 2], @ptrCast([*]const u8, target_path));
|
||||
const paths_start = @sizeOf(SYMLINK_DATA) + target_path.len * 2;
|
||||
@memcpy(buffer[paths_start..].ptr, @ptrCast([*]const u8, target_path), target_path.len * 2);
|
||||
@memcpy(buffer[paths_start..][0 .. target_path.len * 2], @ptrCast([*]const u8, target_path));
|
||||
_ = try DeviceIoControl(symlink_handle, FSCTL_SET_REPARSE_POINT, buffer[0..buf_len], null);
|
||||
}
|
||||
|
||||
@ -1179,7 +1179,7 @@ pub fn GetFinalPathNameByHandle(
|
||||
var input_struct = @ptrCast(*MOUNTMGR_MOUNT_POINT, &input_buf[0]);
|
||||
input_struct.DeviceNameOffset = @sizeOf(MOUNTMGR_MOUNT_POINT);
|
||||
input_struct.DeviceNameLength = @intCast(USHORT, volume_name_u16.len * 2);
|
||||
@memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..], @ptrCast([*]const u8, volume_name_u16.ptr), volume_name_u16.len * 2);
|
||||
@memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..][0 .. volume_name_u16.len * 2], @ptrCast([*]const u8, volume_name_u16.ptr));
|
||||
|
||||
DeviceIoControl(mgmt_handle, IOCTL_MOUNTMGR_QUERY_POINTS, &input_buf, &output_buf) catch |err| switch (err) {
|
||||
error.AccessDenied => unreachable,
|
||||
|
||||
@ -152,7 +152,7 @@ pub inline fn __builtin___memset_chk(
|
||||
|
||||
pub inline fn __builtin_memset(dst: ?*anyopaque, val: c_int, len: usize) ?*anyopaque {
|
||||
const dst_cast = @ptrCast([*c]u8, dst);
|
||||
@memset(dst_cast, @bitCast(u8, @truncate(i8, val)), len);
|
||||
@memset(dst_cast[0..len], @bitCast(u8, @truncate(i8, val)));
|
||||
return dst;
|
||||
}
|
||||
|
||||
@ -171,10 +171,10 @@ pub inline fn __builtin_memcpy(
|
||||
noalias src: ?*const anyopaque,
|
||||
len: usize,
|
||||
) ?*anyopaque {
|
||||
const dst_cast = @ptrCast([*c]u8, dst);
|
||||
const src_cast = @ptrCast([*c]const u8, src);
|
||||
|
||||
@memcpy(dst_cast, src_cast, len);
|
||||
if (len > 0) @memcpy(
|
||||
@ptrCast([*]u8, dst.?)[0..len],
|
||||
@ptrCast([*]const u8, src.?),
|
||||
);
|
||||
return dst;
|
||||
}
|
||||
|
||||
|
||||
48
src/Air.zig
48
src/Air.zig
@ -138,12 +138,14 @@ pub const Inst = struct {
|
||||
/// The offset is in element type units, not bytes.
|
||||
/// Wrapping is undefined behavior.
|
||||
/// The lhs is the pointer, rhs is the offset. Result type is the same as lhs.
|
||||
/// The pointer may be a slice.
|
||||
/// Uses the `ty_pl` field. Payload is `Bin`.
|
||||
ptr_add,
|
||||
/// Subtract an offset from a pointer, returning a new pointer.
|
||||
/// The offset is in element type units, not bytes.
|
||||
/// Wrapping is undefined behavior.
|
||||
/// The lhs is the pointer, rhs is the offset. Result type is the same as lhs.
|
||||
/// The pointer may be a slice.
|
||||
/// Uses the `ty_pl` field. Payload is `Bin`.
|
||||
ptr_sub,
|
||||
/// Given two operands which can be floats, integers, or vectors, returns the
|
||||
@ -462,6 +464,7 @@ pub const Inst = struct {
|
||||
/// Uses the `ty_op` field.
|
||||
load,
|
||||
/// Converts a pointer to its address. Result type is always `usize`.
|
||||
/// Pointer type size may be any, including slice.
|
||||
/// Uses the `un_op` field.
|
||||
ptrtoint,
|
||||
/// Given a boolean, returns 0 or 1.
|
||||
@ -484,7 +487,16 @@ pub const Inst = struct {
|
||||
/// Write a value to a pointer. LHS is pointer, RHS is value.
|
||||
/// Result type is always void.
|
||||
/// Uses the `bin_op` field.
|
||||
/// The value to store may be undefined, in which case the destination
|
||||
/// memory region has undefined bytes after this instruction is
|
||||
/// evaluated. In such case ignoring this instruction is legal
|
||||
/// lowering.
|
||||
store,
|
||||
/// Same as `store`, except if the value to store is undefined, the
|
||||
/// memory region should be filled with 0xaa bytes, and any other
|
||||
/// safety metadata such as Valgrind integrations should be notified of
|
||||
/// this memory region being undefined.
|
||||
store_safe,
|
||||
/// Indicates the program counter will never get to this instruction.
|
||||
/// Result type is always noreturn; no instructions in a block follow this one.
|
||||
unreach,
|
||||
@ -632,17 +644,33 @@ pub const Inst = struct {
|
||||
/// Uses the `pl_op` field with `pred` as operand, and payload `Bin`.
|
||||
select,
|
||||
|
||||
/// Given dest ptr, value, and len, set all elements at dest to value.
|
||||
/// Given dest pointer and value, set all elements at dest to value.
|
||||
/// Dest pointer is either a slice or a pointer to array.
|
||||
/// The element type may be any type, and the slice may have any alignment.
|
||||
/// Result type is always void.
|
||||
/// Uses the `pl_op` field. Operand is the dest ptr. Payload is `Bin`. `lhs` is the
|
||||
/// value, `rhs` is the length.
|
||||
/// The element type may be any type, not just u8.
|
||||
/// Uses the `bin_op` field. LHS is the dest slice. RHS is the element value.
|
||||
/// The element value may be undefined, in which case the destination
|
||||
/// memory region has undefined bytes after this instruction is
|
||||
/// evaluated. In such case ignoring this instruction is legal
|
||||
/// lowering.
|
||||
/// If the length is compile-time known (due to the destination being a
|
||||
/// pointer-to-array), then it is guaranteed to be greater than zero.
|
||||
memset,
|
||||
/// Given dest ptr, src ptr, and len, copy len elements from src to dest.
|
||||
/// Same as `memset`, except if the element value is undefined, the memory region
|
||||
/// should be filled with 0xaa bytes, and any other safety metadata such as Valgrind
|
||||
/// integrations should be notified of this memory region being undefined.
|
||||
memset_safe,
|
||||
/// Given dest pointer and source pointer, copy elements from source to dest.
|
||||
/// Dest pointer is either a slice or a pointer to array.
|
||||
/// The dest element type may be any type.
|
||||
/// Source pointer must have same element type as dest element type.
|
||||
/// Dest slice may have any alignment; source pointer may have any alignment.
|
||||
/// The two memory regions must not overlap.
|
||||
/// Result type is always void.
|
||||
/// Uses the `pl_op` field. Operand is the dest ptr. Payload is `Bin`. `lhs` is the
|
||||
/// src ptr, `rhs` is the length.
|
||||
/// The element type may be any type, not just u8.
|
||||
/// Uses the `bin_op` field. LHS is the dest slice. RHS is the source pointer.
|
||||
/// If the length is compile-time known (due to the destination or
|
||||
/// source being a pointer-to-array), then it is guaranteed to be
|
||||
/// greater than zero.
|
||||
memcpy,
|
||||
|
||||
/// Uses the `ty_pl` field with payload `Cmpxchg`.
|
||||
@ -1226,12 +1254,14 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
|
||||
.dbg_var_ptr,
|
||||
.dbg_var_val,
|
||||
.store,
|
||||
.store_safe,
|
||||
.fence,
|
||||
.atomic_store_unordered,
|
||||
.atomic_store_monotonic,
|
||||
.atomic_store_release,
|
||||
.atomic_store_seq_cst,
|
||||
.memset,
|
||||
.memset_safe,
|
||||
.memcpy,
|
||||
.set_union_tag,
|
||||
.prefetch,
|
||||
@ -1406,11 +1436,13 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool {
|
||||
.ret,
|
||||
.ret_load,
|
||||
.store,
|
||||
.store_safe,
|
||||
.unreach,
|
||||
.optional_payload_ptr_set,
|
||||
.errunion_payload_ptr_set,
|
||||
.set_union_tag,
|
||||
.memset,
|
||||
.memset_safe,
|
||||
.memcpy,
|
||||
.cmpxchg_weak,
|
||||
.cmpxchg_strong,
|
||||
|
||||
@ -8453,18 +8453,16 @@ fn builtinCall(
|
||||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
.memcpy => {
|
||||
_ = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{
|
||||
.dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]),
|
||||
.source = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_const_u8_type } }, params[1]),
|
||||
.byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]),
|
||||
_ = try gz.addPlNode(.memcpy, node, Zir.Inst.Bin{
|
||||
.lhs = try expr(gz, scope, .{ .rl = .none }, params[0]),
|
||||
.rhs = try expr(gz, scope, .{ .rl = .none }, params[1]),
|
||||
});
|
||||
return rvalue(gz, ri, .void_value, node);
|
||||
},
|
||||
.memset => {
|
||||
_ = try gz.addPlNode(.memset, node, Zir.Inst.Memset{
|
||||
.dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]),
|
||||
.byte = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u8_type } }, params[1]),
|
||||
.byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]),
|
||||
_ = try gz.addPlNode(.memset, node, Zir.Inst.Bin{
|
||||
.lhs = try expr(gz, scope, .{ .rl = .none }, params[0]),
|
||||
.rhs = try expr(gz, scope, .{ .rl = .none }, params[1]),
|
||||
});
|
||||
return rvalue(gz, ri, .void_value, node);
|
||||
},
|
||||
|
||||
@ -615,14 +615,14 @@ pub const list = list: {
|
||||
"@memcpy",
|
||||
.{
|
||||
.tag = .memcpy,
|
||||
.param_count = 3,
|
||||
.param_count = 2,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@memset",
|
||||
.{
|
||||
.tag = .memset,
|
||||
.param_count = 3,
|
||||
.param_count = 2,
|
||||
},
|
||||
},
|
||||
.{
|
||||
|
||||
@ -299,11 +299,15 @@ pub fn categorizeOperand(
|
||||
},
|
||||
|
||||
.store,
|
||||
.store_safe,
|
||||
.atomic_store_unordered,
|
||||
.atomic_store_monotonic,
|
||||
.atomic_store_release,
|
||||
.atomic_store_seq_cst,
|
||||
.set_union_tag,
|
||||
.memset,
|
||||
.memset_safe,
|
||||
.memcpy,
|
||||
=> {
|
||||
const o = air_datas[inst].bin_op;
|
||||
if (o.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
|
||||
@ -597,16 +601,6 @@ pub fn categorizeOperand(
|
||||
if (extra.operand == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write);
|
||||
return .write;
|
||||
},
|
||||
.memset,
|
||||
.memcpy,
|
||||
=> {
|
||||
const pl_op = air_datas[inst].pl_op;
|
||||
const extra = air.extraData(Air.Bin, pl_op.payload).data;
|
||||
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
|
||||
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write);
|
||||
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 2, .write);
|
||||
return .write;
|
||||
},
|
||||
|
||||
.br => {
|
||||
const br = air_datas[inst].br;
|
||||
@ -972,6 +966,7 @@ fn analyzeInst(
|
||||
.bool_and,
|
||||
.bool_or,
|
||||
.store,
|
||||
.store_safe,
|
||||
.array_elem_val,
|
||||
.slice_elem_val,
|
||||
.ptr_elem_val,
|
||||
@ -987,6 +982,9 @@ fn analyzeInst(
|
||||
.set_union_tag,
|
||||
.min,
|
||||
.max,
|
||||
.memset,
|
||||
.memset_safe,
|
||||
.memcpy,
|
||||
=> {
|
||||
const o = inst_datas[inst].bin_op;
|
||||
return analyzeOperands(a, pass, data, inst, .{ o.lhs, o.rhs, .none });
|
||||
@ -1234,13 +1232,6 @@ fn analyzeInst(
|
||||
const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data;
|
||||
return analyzeOperands(a, pass, data, inst, .{ pl_op.operand, extra.operand, .none });
|
||||
},
|
||||
.memset,
|
||||
.memcpy,
|
||||
=> {
|
||||
const pl_op = inst_datas[inst].pl_op;
|
||||
const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
return analyzeOperands(a, pass, data, inst, .{ pl_op.operand, extra.lhs, extra.rhs });
|
||||
},
|
||||
|
||||
.br => return analyzeInstBr(a, pass, data, inst),
|
||||
|
||||
|
||||
@ -239,6 +239,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
||||
.bool_and,
|
||||
.bool_or,
|
||||
.store,
|
||||
.store_safe,
|
||||
.array_elem_val,
|
||||
.slice_elem_val,
|
||||
.ptr_elem_val,
|
||||
@ -254,6 +255,9 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
||||
.set_union_tag,
|
||||
.min,
|
||||
.max,
|
||||
.memset,
|
||||
.memset_safe,
|
||||
.memcpy,
|
||||
=> {
|
||||
const bin_op = data[inst].bin_op;
|
||||
try self.verifyInst(inst, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
@ -306,13 +310,6 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
||||
const extra = self.air.extraData(Air.Bin, vector_store_elem.payload).data;
|
||||
try self.verifyInst(inst, .{ vector_store_elem.vector_ptr, extra.lhs, extra.rhs });
|
||||
},
|
||||
.memset,
|
||||
.memcpy,
|
||||
=> {
|
||||
const pl_op = data[inst].pl_op;
|
||||
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
try self.verifyInst(inst, .{ pl_op.operand, extra.lhs, extra.rhs });
|
||||
},
|
||||
.cmpxchg_strong,
|
||||
.cmpxchg_weak,
|
||||
=> {
|
||||
|
||||
374
src/Sema.zig
374
src/Sema.zig
@ -2500,7 +2500,7 @@ fn coerceResultPtr(
|
||||
|
||||
// The last one is always `store`.
|
||||
const trash_inst = trash_block.instructions.items[trash_block.instructions.items.len - 1];
|
||||
if (air_tags[trash_inst] != .store) {
|
||||
if (air_tags[trash_inst] != .store and air_tags[trash_inst] != .store_safe) {
|
||||
// no store instruction is generated for zero sized types
|
||||
assert((try sema.typeHasOnePossibleValue(pointee_ty)) != null);
|
||||
} else {
|
||||
@ -3386,17 +3386,39 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
|
||||
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
||||
const src = inst_data.src();
|
||||
const object = try sema.resolveInst(inst_data.operand);
|
||||
|
||||
return indexablePtrLen(sema, block, src, object);
|
||||
}
|
||||
|
||||
fn indexablePtrLen(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
src: LazySrcLoc,
|
||||
object: Air.Inst.Ref,
|
||||
) CompileError!Air.Inst.Ref {
|
||||
const object_ty = sema.typeOf(object);
|
||||
|
||||
const is_pointer_to = object_ty.isSinglePointer();
|
||||
|
||||
const array_ty = if (is_pointer_to)
|
||||
object_ty.childType()
|
||||
else
|
||||
object_ty;
|
||||
|
||||
const array_ty = if (is_pointer_to) object_ty.childType() else object_ty;
|
||||
try checkIndexable(sema, block, src, array_ty);
|
||||
return sema.fieldVal(block, src, object, "len", src);
|
||||
}
|
||||
|
||||
fn indexablePtrLenOrNone(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
src: LazySrcLoc,
|
||||
object: Air.Inst.Ref,
|
||||
) CompileError!Air.Inst.Ref {
|
||||
const object_ty = sema.typeOf(object);
|
||||
const array_ty = t: {
|
||||
const ptr_size = object_ty.ptrSizeOrNull() orelse break :t object_ty;
|
||||
break :t switch (ptr_size) {
|
||||
.Many => return .none,
|
||||
.One => object_ty.childType(),
|
||||
else => object_ty,
|
||||
};
|
||||
};
|
||||
try checkIndexable(sema, block, src, array_ty);
|
||||
return sema.fieldVal(block, src, object, "len", src);
|
||||
}
|
||||
|
||||
@ -3502,7 +3524,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
|
||||
const candidate = block.instructions.items[search_index];
|
||||
switch (air_tags[candidate]) {
|
||||
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
|
||||
.store => break candidate,
|
||||
.store, .store_safe => break candidate,
|
||||
else => break :ct,
|
||||
}
|
||||
};
|
||||
@ -3728,7 +3750,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
|
||||
const candidate = block.instructions.items[search_index];
|
||||
switch (air_tags[candidate]) {
|
||||
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
|
||||
.store => break candidate,
|
||||
.store, .store_safe => break candidate,
|
||||
else => break :ct,
|
||||
}
|
||||
};
|
||||
@ -3838,7 +3860,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
|
||||
assert(replacement_block.instructions.items.len > 0);
|
||||
break :result sub_ptr;
|
||||
},
|
||||
.store => result: {
|
||||
.store, .store_safe => result: {
|
||||
const bin_op = sema.air_instructions.items(.data)[placeholder_inst].bin_op;
|
||||
try sema.storePtr2(&replacement_block, src, bin_op.lhs, src, bin_op.rhs, src, .bitcast);
|
||||
break :result .void_value;
|
||||
@ -4220,7 +4242,10 @@ fn validateUnionInit(
|
||||
while (block_index > 0) : (block_index -= 1) {
|
||||
const store_inst = block.instructions.items[block_index];
|
||||
if (store_inst == field_ptr_air_inst) break;
|
||||
if (air_tags[store_inst] != .store) continue;
|
||||
switch (air_tags[store_inst]) {
|
||||
.store, .store_safe => {},
|
||||
else => continue,
|
||||
}
|
||||
const bin_op = air_datas[store_inst].bin_op;
|
||||
var lhs = bin_op.lhs;
|
||||
if (Air.refToIndex(lhs)) |lhs_index| {
|
||||
@ -4432,7 +4457,10 @@ fn validateStructInit(
|
||||
struct_is_comptime = false;
|
||||
continue :field;
|
||||
}
|
||||
if (air_tags[store_inst] != .store) continue;
|
||||
switch (air_tags[store_inst]) {
|
||||
.store, .store_safe => {},
|
||||
else => continue,
|
||||
}
|
||||
const bin_op = air_datas[store_inst].bin_op;
|
||||
var lhs = bin_op.lhs;
|
||||
{
|
||||
@ -4660,7 +4688,10 @@ fn zirValidateArrayInit(
|
||||
array_is_comptime = false;
|
||||
continue :outer;
|
||||
}
|
||||
if (air_tags[store_inst] != .store) continue;
|
||||
switch (air_tags[store_inst]) {
|
||||
.store, .store_safe => {},
|
||||
else => continue,
|
||||
}
|
||||
const bin_op = air_datas[store_inst].bin_op;
|
||||
var lhs = bin_op.lhs;
|
||||
{
|
||||
@ -5003,7 +5034,12 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
|
||||
|
||||
const ptr_src: LazySrcLoc = .{ .node_offset_store_ptr = inst_data.src_node };
|
||||
const operand_src: LazySrcLoc = .{ .node_offset_store_operand = inst_data.src_node };
|
||||
const air_tag: Air.Inst.Tag = if (is_ret) .ret_ptr else .store;
|
||||
const air_tag: Air.Inst.Tag = if (is_ret)
|
||||
.ret_ptr
|
||||
else if (block.wantSafety())
|
||||
.store_safe
|
||||
else
|
||||
.store;
|
||||
return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag);
|
||||
}
|
||||
|
||||
@ -9861,8 +9897,11 @@ fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
|
||||
const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data;
|
||||
const array_ptr = try sema.resolveInst(extra.lhs);
|
||||
const start = try sema.resolveInst(extra.start);
|
||||
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
|
||||
const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
|
||||
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
|
||||
|
||||
return sema.analyzeSlice(block, src, array_ptr, start, .none, .none, .unneeded);
|
||||
return sema.analyzeSlice(block, src, array_ptr, start, .none, .none, .unneeded, ptr_src, start_src, end_src);
|
||||
}
|
||||
|
||||
fn zirSliceEnd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
@ -9875,8 +9914,11 @@ fn zirSliceEnd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
const array_ptr = try sema.resolveInst(extra.lhs);
|
||||
const start = try sema.resolveInst(extra.start);
|
||||
const end = try sema.resolveInst(extra.end);
|
||||
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
|
||||
const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
|
||||
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
|
||||
|
||||
return sema.analyzeSlice(block, src, array_ptr, start, end, .none, .unneeded);
|
||||
return sema.analyzeSlice(block, src, array_ptr, start, end, .none, .unneeded, ptr_src, start_src, end_src);
|
||||
}
|
||||
|
||||
fn zirSliceSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
@ -9891,8 +9933,11 @@ fn zirSliceSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
|
||||
const start = try sema.resolveInst(extra.start);
|
||||
const end = try sema.resolveInst(extra.end);
|
||||
const sentinel = try sema.resolveInst(extra.sentinel);
|
||||
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
|
||||
const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
|
||||
const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
|
||||
|
||||
return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src);
|
||||
return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src, ptr_src, start_src, end_src);
|
||||
}
|
||||
|
||||
fn zirSwitchCapture(
|
||||
@ -21748,90 +21793,270 @@ fn analyzeMinMax(
|
||||
return block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs);
|
||||
}
|
||||
|
||||
fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref {
|
||||
const mod = sema.mod;
|
||||
const info = sema.typeOf(ptr).ptrInfo().data;
|
||||
if (info.size == .One) {
|
||||
// Already an array pointer.
|
||||
return ptr;
|
||||
}
|
||||
const new_ty = try Type.ptr(sema.arena, mod, .{
|
||||
.pointee_type = try Type.array(sema.arena, len, info.sentinel, info.pointee_type, mod),
|
||||
.sentinel = null,
|
||||
.@"align" = info.@"align",
|
||||
.@"addrspace" = info.@"addrspace",
|
||||
.mutable = info.mutable,
|
||||
.@"allowzero" = info.@"allowzero",
|
||||
.@"volatile" = info.@"volatile",
|
||||
.size = .One,
|
||||
});
|
||||
if (info.size == .Slice) {
|
||||
return block.addTyOp(.slice_ptr, new_ty, ptr);
|
||||
}
|
||||
return block.addBitCast(new_ty, ptr);
|
||||
}
|
||||
|
||||
fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
||||
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
||||
const extra = sema.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data;
|
||||
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
||||
const src = inst_data.src();
|
||||
const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
||||
const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
||||
const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
||||
const uncasted_dest_ptr = try sema.resolveInst(extra.dest);
|
||||
const dest_ptr = try sema.resolveInst(extra.lhs);
|
||||
const src_ptr = try sema.resolveInst(extra.rhs);
|
||||
const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr);
|
||||
const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr);
|
||||
const target = sema.mod.getTarget();
|
||||
|
||||
// TODO AstGen's coerced_ty cannot handle volatile here
|
||||
var dest_ptr_info = Type.initTag(.manyptr_u8).ptrInfo().data;
|
||||
dest_ptr_info.@"volatile" = sema.typeOf(uncasted_dest_ptr).isVolatilePtr();
|
||||
const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, dest_ptr_info);
|
||||
const dest_ptr = try sema.coerce(block, dest_ptr_ty, uncasted_dest_ptr, dest_src);
|
||||
if (dest_len == .none and src_len == .none) {
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(block, src, "unknown @memcpy length", .{});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
try sema.errNote(block, dest_src, msg, "destination type {} provides no length", .{
|
||||
sema.typeOf(dest_ptr).fmt(sema.mod),
|
||||
});
|
||||
try sema.errNote(block, src_src, msg, "source type {} provides no length", .{
|
||||
sema.typeOf(src_ptr).fmt(sema.mod),
|
||||
});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(msg);
|
||||
}
|
||||
|
||||
const uncasted_src_ptr = try sema.resolveInst(extra.source);
|
||||
var src_ptr_info = Type.initTag(.manyptr_const_u8).ptrInfo().data;
|
||||
src_ptr_info.@"volatile" = sema.typeOf(uncasted_src_ptr).isVolatilePtr();
|
||||
const src_ptr_ty = try Type.ptr(sema.arena, sema.mod, src_ptr_info);
|
||||
const src_ptr = try sema.coerce(block, src_ptr_ty, uncasted_src_ptr, src_src);
|
||||
const len = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.byte_count), len_src);
|
||||
var len_val: ?Value = null;
|
||||
|
||||
if (dest_len != .none and src_len != .none) check: {
|
||||
// If we can check at compile-time, no need for runtime safety.
|
||||
if (try sema.resolveDefinedValue(block, dest_src, dest_len)) |dest_len_val| {
|
||||
len_val = dest_len_val;
|
||||
if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
|
||||
if (!(try sema.valuesEqual(dest_len_val, src_len_val, Type.usize))) {
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(block, src, "non-matching @memcpy lengths", .{});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
try sema.errNote(block, dest_src, msg, "length {} here", .{
|
||||
dest_len_val.fmtValue(Type.usize, sema.mod),
|
||||
});
|
||||
try sema.errNote(block, src_src, msg, "length {} here", .{
|
||||
src_len_val.fmtValue(Type.usize, sema.mod),
|
||||
});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(msg);
|
||||
}
|
||||
break :check;
|
||||
}
|
||||
} else if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
|
||||
len_val = src_len_val;
|
||||
}
|
||||
|
||||
if (block.wantSafety()) {
|
||||
const ok = try block.addBinOp(.cmp_eq, dest_len, src_len);
|
||||
try sema.addSafetyCheck(block, ok, .memcpy_len_mismatch);
|
||||
}
|
||||
}
|
||||
|
||||
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: {
|
||||
if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src;
|
||||
if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |src_ptr_val| {
|
||||
if (!src_ptr_val.isComptimeMutablePtr()) break :rs src_src;
|
||||
if (try sema.resolveDefinedValue(block, len_src, len)) |len_val| {
|
||||
_ = len_val;
|
||||
return sema.fail(block, src, "TODO: Sema.zirMemcpy at comptime", .{});
|
||||
} else break :rs len_src;
|
||||
if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| {
|
||||
const len_u64 = (try len_val.?.getUnsignedIntAdvanced(target, sema)).?;
|
||||
const len = try sema.usizeCast(block, dest_src, len_u64);
|
||||
for (0..len) |i| {
|
||||
const elem_index = try sema.addIntUnsigned(Type.usize, i);
|
||||
const dest_elem_ptr = try sema.elemPtr(
|
||||
block,
|
||||
src,
|
||||
dest_ptr,
|
||||
elem_index,
|
||||
src,
|
||||
true, // init
|
||||
false, // oob_safety
|
||||
);
|
||||
const src_elem_ptr = try sema.elemPtr(
|
||||
block,
|
||||
src,
|
||||
src_ptr,
|
||||
elem_index,
|
||||
src,
|
||||
false, // init
|
||||
false, // oob_safety
|
||||
);
|
||||
const uncoerced_elem = try sema.analyzeLoad(block, src, src_elem_ptr, src_src);
|
||||
try sema.storePtr2(
|
||||
block,
|
||||
src,
|
||||
dest_elem_ptr,
|
||||
dest_src,
|
||||
uncoerced_elem,
|
||||
src_src,
|
||||
.store,
|
||||
);
|
||||
}
|
||||
return;
|
||||
} else break :rs src_src;
|
||||
} else dest_src;
|
||||
|
||||
const dest_ty = sema.typeOf(dest_ptr);
|
||||
const src_ty = sema.typeOf(src_ptr);
|
||||
|
||||
// If in-memory coercion is not allowed, explode this memcpy call into a
|
||||
// for loop that copies element-wise.
|
||||
// Likewise if this is an iterable rather than a pointer, do the same
|
||||
// lowering. The AIR instruction requires pointers with element types of
|
||||
// equal ABI size.
|
||||
|
||||
if (dest_ty.zigTypeTag() != .Pointer or src_ty.zigTypeTag() != .Pointer) {
|
||||
return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{});
|
||||
}
|
||||
|
||||
const dest_elem_ty = dest_ty.elemType2();
|
||||
const src_elem_ty = src_ty.elemType2();
|
||||
if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src)) {
|
||||
return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{});
|
||||
}
|
||||
|
||||
// If the length is comptime-known, then upgrade src and destination types
|
||||
// into pointer-to-array. At this point we know they are both pointers
|
||||
// already.
|
||||
var new_dest_ptr = dest_ptr;
|
||||
var new_src_ptr = src_ptr;
|
||||
if (len_val) |val| {
|
||||
const len = val.toUnsignedInt(target);
|
||||
if (len == 0) {
|
||||
// This AIR instruction guarantees length > 0 if it is comptime-known.
|
||||
return;
|
||||
}
|
||||
new_dest_ptr = try upgradeToArrayPtr(sema, block, dest_ptr, len);
|
||||
new_src_ptr = try upgradeToArrayPtr(sema, block, src_ptr, len);
|
||||
}
|
||||
|
||||
if (dest_len != .none) {
|
||||
// Change the src from slice to a many pointer, to avoid multiple ptr
|
||||
// slice extractions in AIR instructions.
|
||||
const new_src_ptr_ty = sema.typeOf(new_src_ptr);
|
||||
if (new_src_ptr_ty.isSlice()) {
|
||||
new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty);
|
||||
}
|
||||
}
|
||||
|
||||
try sema.requireRuntimeBlock(block, src, runtime_src);
|
||||
|
||||
// Aliasing safety check.
|
||||
if (block.wantSafety()) {
|
||||
const len = if (len_val) |v|
|
||||
try sema.addConstant(Type.usize, v)
|
||||
else if (dest_len != .none)
|
||||
dest_len
|
||||
else
|
||||
src_len;
|
||||
|
||||
// Extract raw pointer from dest slice. The AIR instructions could support them, but
|
||||
// it would cause redundant machine code instructions.
|
||||
const new_dest_ptr_ty = sema.typeOf(new_dest_ptr);
|
||||
const raw_dest_ptr = if (new_dest_ptr_ty.isSlice())
|
||||
try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty)
|
||||
else
|
||||
new_dest_ptr;
|
||||
|
||||
// ok1: dest >= src + len
|
||||
// ok2: src >= dest + len
|
||||
const src_plus_len = try sema.analyzePtrArithmetic(block, src, new_src_ptr, len, .ptr_add, src_src, src);
|
||||
const dest_plus_len = try sema.analyzePtrArithmetic(block, src, raw_dest_ptr, len, .ptr_add, dest_src, src);
|
||||
const ok1 = try block.addBinOp(.cmp_gte, raw_dest_ptr, src_plus_len);
|
||||
const ok2 = try block.addBinOp(.cmp_gte, new_src_ptr, dest_plus_len);
|
||||
const ok = try block.addBinOp(.bit_or, ok1, ok2);
|
||||
try sema.addSafetyCheck(block, ok, .memcpy_alias);
|
||||
}
|
||||
|
||||
_ = try block.addInst(.{
|
||||
.tag = .memcpy,
|
||||
.data = .{ .pl_op = .{
|
||||
.operand = dest_ptr,
|
||||
.payload = try sema.addExtra(Air.Bin{
|
||||
.lhs = src_ptr,
|
||||
.rhs = len,
|
||||
}),
|
||||
.data = .{ .bin_op = .{
|
||||
.lhs = new_dest_ptr,
|
||||
.rhs = new_src_ptr,
|
||||
} },
|
||||
});
|
||||
}
|
||||
|
||||
fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
||||
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
||||
const extra = sema.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data;
|
||||
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
||||
const src = inst_data.src();
|
||||
const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
||||
const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
||||
const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
||||
const uncasted_dest_ptr = try sema.resolveInst(extra.dest);
|
||||
const dest_ptr = try sema.resolveInst(extra.lhs);
|
||||
const uncoerced_elem = try sema.resolveInst(extra.rhs);
|
||||
const dest_ptr_ty = sema.typeOf(dest_ptr);
|
||||
try checkIndexable(sema, block, dest_src, dest_ptr_ty);
|
||||
|
||||
// TODO AstGen's coerced_ty cannot handle volatile here
|
||||
var ptr_info = Type.initTag(.manyptr_u8).ptrInfo().data;
|
||||
ptr_info.@"volatile" = sema.typeOf(uncasted_dest_ptr).isVolatilePtr();
|
||||
const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
|
||||
const dest_ptr = try sema.coerce(block, dest_ptr_ty, uncasted_dest_ptr, dest_src);
|
||||
|
||||
const value = try sema.coerce(block, Type.u8, try sema.resolveInst(extra.byte), value_src);
|
||||
const len = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.byte_count), len_src);
|
||||
const dest_elem_ty = dest_ptr_ty.elemType2();
|
||||
const target = sema.mod.getTarget();
|
||||
|
||||
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: {
|
||||
const len_air_ref = try sema.fieldVal(block, src, dest_ptr, "len", dest_src);
|
||||
const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse
|
||||
break :rs dest_src;
|
||||
const len_u64 = (try len_val.getUnsignedIntAdvanced(target, sema)).?;
|
||||
const len = try sema.usizeCast(block, dest_src, len_u64);
|
||||
if (len == 0) {
|
||||
// This AIR instruction guarantees length > 0 if it is comptime-known.
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ptr_val.isComptimeMutablePtr()) break :rs dest_src;
|
||||
if (try sema.resolveDefinedValue(block, len_src, len)) |len_val| {
|
||||
if (try sema.resolveMaybeUndefVal(value)) |val| {
|
||||
_ = len_val;
|
||||
_ = val;
|
||||
return sema.fail(block, src, "TODO: Sema.zirMemset at comptime", .{});
|
||||
} else break :rs value_src;
|
||||
} else break :rs len_src;
|
||||
if (try sema.resolveMaybeUndefVal(uncoerced_elem)) |_| {
|
||||
for (0..len) |i| {
|
||||
const elem_index = try sema.addIntUnsigned(Type.usize, i);
|
||||
const elem_ptr = try sema.elemPtr(
|
||||
block,
|
||||
src,
|
||||
dest_ptr,
|
||||
elem_index,
|
||||
src,
|
||||
true, // init
|
||||
false, // oob_safety
|
||||
);
|
||||
try sema.storePtr2(
|
||||
block,
|
||||
src,
|
||||
elem_ptr,
|
||||
dest_src,
|
||||
uncoerced_elem,
|
||||
value_src,
|
||||
.store,
|
||||
);
|
||||
}
|
||||
return;
|
||||
} else break :rs value_src;
|
||||
} else dest_src;
|
||||
|
||||
const elem = try sema.coerce(block, dest_elem_ty, uncoerced_elem, value_src);
|
||||
|
||||
try sema.requireRuntimeBlock(block, src, runtime_src);
|
||||
_ = try block.addInst(.{
|
||||
.tag = .memset,
|
||||
.data = .{ .pl_op = .{
|
||||
.operand = dest_ptr,
|
||||
.payload = try sema.addExtra(Air.Bin{
|
||||
.lhs = value,
|
||||
.rhs = len,
|
||||
}),
|
||||
.tag = if (block.wantSafety()) .memset_safe else .memset,
|
||||
.data = .{ .bin_op = .{
|
||||
.lhs = dest_ptr,
|
||||
.rhs = elem,
|
||||
} },
|
||||
});
|
||||
}
|
||||
@ -22948,6 +23173,8 @@ pub const PanicId = enum {
|
||||
index_out_of_bounds,
|
||||
start_index_greater_than_end,
|
||||
for_len_mismatch,
|
||||
memcpy_len_mismatch,
|
||||
memcpy_alias,
|
||||
};
|
||||
|
||||
fn addSafetyCheck(
|
||||
@ -26521,7 +26748,8 @@ fn storePtr(
|
||||
ptr: Air.Inst.Ref,
|
||||
uncasted_operand: Air.Inst.Ref,
|
||||
) CompileError!void {
|
||||
return sema.storePtr2(block, src, ptr, src, uncasted_operand, src, .store);
|
||||
const air_tag: Air.Inst.Tag = if (block.wantSafety()) .store_safe else .store;
|
||||
return sema.storePtr2(block, src, ptr, src, uncasted_operand, src, air_tag);
|
||||
}
|
||||
|
||||
fn storePtr2(
|
||||
@ -28768,10 +28996,10 @@ fn analyzeSlice(
|
||||
uncasted_end_opt: Air.Inst.Ref,
|
||||
sentinel_opt: Air.Inst.Ref,
|
||||
sentinel_src: LazySrcLoc,
|
||||
ptr_src: LazySrcLoc,
|
||||
start_src: LazySrcLoc,
|
||||
end_src: LazySrcLoc,
|
||||
) CompileError!Air.Inst.Ref {
|
||||
const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = src.node_offset.x };
|
||||
const start_src: LazySrcLoc = .{ .node_offset_slice_start = src.node_offset.x };
|
||||
const end_src: LazySrcLoc = .{ .node_offset_slice_end = src.node_offset.x };
|
||||
// Slice expressions can operate on a variable whose type is an array. This requires
|
||||
// the slice operand to be a pointer. In the case of a non-array, it will be a double pointer.
|
||||
const ptr_ptr_ty = sema.typeOf(ptr_ptr);
|
||||
|
||||
16
src/Zir.zig
16
src/Zir.zig
@ -922,10 +922,10 @@ pub const Inst = struct {
|
||||
/// Uses the `pl_node` union field with payload `FieldParentPtr`.
|
||||
field_parent_ptr,
|
||||
/// Implements the `@memcpy` builtin.
|
||||
/// Uses the `pl_node` union field with payload `Memcpy`.
|
||||
/// Uses the `pl_node` union field with payload `Bin`.
|
||||
memcpy,
|
||||
/// Implements the `@memset` builtin.
|
||||
/// Uses the `pl_node` union field with payload `Memset`.
|
||||
/// Uses the `pl_node` union field with payload `Bin`.
|
||||
memset,
|
||||
/// Implements the `@min` builtin.
|
||||
/// Uses the `pl_node` union field with payload `Bin`
|
||||
@ -3501,18 +3501,6 @@ pub const Inst = struct {
|
||||
field_ptr: Ref,
|
||||
};
|
||||
|
||||
pub const Memcpy = struct {
|
||||
dest: Ref,
|
||||
source: Ref,
|
||||
byte_count: Ref,
|
||||
};
|
||||
|
||||
pub const Memset = struct {
|
||||
dest: Ref,
|
||||
byte: Ref,
|
||||
byte_count: Ref,
|
||||
};
|
||||
|
||||
pub const Shuffle = struct {
|
||||
elem_type: Ref,
|
||||
a: Ref,
|
||||
|
||||
@ -764,7 +764,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.ptrtoint => try self.airPtrToInt(inst),
|
||||
.ret => try self.airRet(inst),
|
||||
.ret_load => try self.airRetLoad(inst),
|
||||
.store => try self.airStore(inst),
|
||||
.store => try self.airStore(inst, false),
|
||||
.store_safe => try self.airStore(inst, true),
|
||||
.struct_field_ptr=> try self.airStructFieldPtr(inst),
|
||||
.struct_field_val=> try self.airStructFieldVal(inst),
|
||||
.array_to_slice => try self.airArrayToSlice(inst),
|
||||
@ -775,7 +776,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.atomic_rmw => try self.airAtomicRmw(inst),
|
||||
.atomic_load => try self.airAtomicLoad(inst),
|
||||
.memcpy => try self.airMemcpy(inst),
|
||||
.memset => try self.airMemset(inst),
|
||||
.memset => try self.airMemset(inst, false),
|
||||
.memset_safe => try self.airMemset(inst, true),
|
||||
.set_union_tag => try self.airSetUnionTag(inst),
|
||||
.get_union_tag => try self.airGetUnionTag(inst),
|
||||
.clz => try self.airClz(inst),
|
||||
@ -4035,7 +4037,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
}
|
||||
}
|
||||
|
||||
fn airStore(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
|
||||
if (safety) {
|
||||
// TODO if the value is undef, write 0xaa bytes to dest
|
||||
} else {
|
||||
// TODO if the value is undef, don't lower this instruction
|
||||
}
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const ptr = try self.resolveInst(bin_op.lhs);
|
||||
const value = try self.resolveInst(bin_op.rhs);
|
||||
@ -5975,8 +5982,13 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr
|
||||
return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch});
|
||||
}
|
||||
|
||||
fn airMemset(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
|
||||
_ = inst;
|
||||
if (safety) {
|
||||
// TODO if the value is undef, write 0xaa bytes to dest
|
||||
} else {
|
||||
// TODO if the value is undef, don't lower this instruction
|
||||
}
|
||||
return self.fail("TODO implement airMemset for {}", .{self.target.cpu.arch});
|
||||
}
|
||||
|
||||
|
||||
@ -748,7 +748,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.ptrtoint => try self.airPtrToInt(inst),
|
||||
.ret => try self.airRet(inst),
|
||||
.ret_load => try self.airRetLoad(inst),
|
||||
.store => try self.airStore(inst),
|
||||
.store => try self.airStore(inst, false),
|
||||
.store_safe => try self.airStore(inst, true),
|
||||
.struct_field_ptr=> try self.airStructFieldPtr(inst),
|
||||
.struct_field_val=> try self.airStructFieldVal(inst),
|
||||
.array_to_slice => try self.airArrayToSlice(inst),
|
||||
@ -759,7 +760,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.atomic_rmw => try self.airAtomicRmw(inst),
|
||||
.atomic_load => try self.airAtomicLoad(inst),
|
||||
.memcpy => try self.airMemcpy(inst),
|
||||
.memset => try self.airMemset(inst),
|
||||
.memset => try self.airMemset(inst, false),
|
||||
.memset_safe => try self.airMemset(inst, true),
|
||||
.set_union_tag => try self.airSetUnionTag(inst),
|
||||
.get_union_tag => try self.airGetUnionTag(inst),
|
||||
.clz => try self.airClz(inst),
|
||||
@ -2835,7 +2837,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
}
|
||||
}
|
||||
|
||||
fn airStore(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
|
||||
if (safety) {
|
||||
// TODO if the value is undef, write 0xaa bytes to dest
|
||||
} else {
|
||||
// TODO if the value is undef, don't lower this instruction
|
||||
}
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const ptr = try self.resolveInst(bin_op.lhs);
|
||||
const value = try self.resolveInst(bin_op.rhs);
|
||||
@ -5921,7 +5928,12 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr
|
||||
return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch});
|
||||
}
|
||||
|
||||
fn airMemset(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
|
||||
if (safety) {
|
||||
// TODO if the value is undef, write 0xaa bytes to dest
|
||||
} else {
|
||||
// TODO if the value is undef, don't lower this instruction
|
||||
}
|
||||
_ = inst;
|
||||
return self.fail("TODO implement airMemset for {}", .{self.target.cpu.arch});
|
||||
}
|
||||
|
||||
@ -578,7 +578,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.ptrtoint => try self.airPtrToInt(inst),
|
||||
.ret => try self.airRet(inst),
|
||||
.ret_load => try self.airRetLoad(inst),
|
||||
.store => try self.airStore(inst),
|
||||
.store => try self.airStore(inst, false),
|
||||
.store_safe => try self.airStore(inst, true),
|
||||
.struct_field_ptr=> try self.airStructFieldPtr(inst),
|
||||
.struct_field_val=> try self.airStructFieldVal(inst),
|
||||
.array_to_slice => try self.airArrayToSlice(inst),
|
||||
@ -589,7 +590,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.atomic_rmw => try self.airAtomicRmw(inst),
|
||||
.atomic_load => try self.airAtomicLoad(inst),
|
||||
.memcpy => try self.airMemcpy(inst),
|
||||
.memset => try self.airMemset(inst),
|
||||
.memset => try self.airMemset(inst, false),
|
||||
.memset_safe => try self.airMemset(inst, true),
|
||||
.set_union_tag => try self.airSetUnionTag(inst),
|
||||
.get_union_tag => try self.airGetUnionTag(inst),
|
||||
.clz => try self.airClz(inst),
|
||||
@ -1572,7 +1574,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
}
|
||||
}
|
||||
|
||||
fn airStore(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
|
||||
if (safety) {
|
||||
// TODO if the value is undef, write 0xaa bytes to dest
|
||||
} else {
|
||||
// TODO if the value is undef, don't lower this instruction
|
||||
}
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const ptr = try self.resolveInst(bin_op.lhs);
|
||||
const value = try self.resolveInst(bin_op.rhs);
|
||||
@ -2421,8 +2428,13 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr
|
||||
return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch});
|
||||
}
|
||||
|
||||
fn airMemset(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
|
||||
_ = inst;
|
||||
if (safety) {
|
||||
// TODO if the value is undef, write 0xaa bytes to dest
|
||||
} else {
|
||||
// TODO if the value is undef, don't lower this instruction
|
||||
}
|
||||
return self.fail("TODO implement airMemset for {}", .{self.target.cpu.arch});
|
||||
}
|
||||
|
||||
|
||||
@ -593,7 +593,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.ptrtoint => try self.airPtrToInt(inst),
|
||||
.ret => try self.airRet(inst),
|
||||
.ret_load => try self.airRetLoad(inst),
|
||||
.store => try self.airStore(inst),
|
||||
.store => try self.airStore(inst, false),
|
||||
.store_safe => try self.airStore(inst, true),
|
||||
.struct_field_ptr=> @panic("TODO try self.airStructFieldPtr(inst)"),
|
||||
.struct_field_val=> try self.airStructFieldVal(inst),
|
||||
.array_to_slice => try self.airArrayToSlice(inst),
|
||||
@ -605,7 +606,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.atomic_rmw => try self.airAtomicRmw(inst),
|
||||
.atomic_load => try self.airAtomicLoad(inst),
|
||||
.memcpy => @panic("TODO try self.airMemcpy(inst)"),
|
||||
.memset => try self.airMemset(inst),
|
||||
.memset => try self.airMemset(inst, false),
|
||||
.memset_safe => try self.airMemset(inst, true),
|
||||
.set_union_tag => try self.airSetUnionTag(inst),
|
||||
.get_union_tag => try self.airGetUnionTag(inst),
|
||||
.clz => try self.airClz(inst),
|
||||
@ -1764,7 +1766,12 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.finishAirBookkeeping();
|
||||
}
|
||||
|
||||
fn airMemset(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
|
||||
if (safety) {
|
||||
// TODO if the value is undef, write 0xaa bytes to dest
|
||||
} else {
|
||||
// TODO if the value is undef, don't lower this instruction
|
||||
}
|
||||
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
|
||||
const extra = self.air.extraData(Air.Bin, pl_op.payload);
|
||||
|
||||
@ -2401,7 +2408,12 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
||||
}
|
||||
|
||||
fn airStore(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
|
||||
if (safety) {
|
||||
// TODO if the value is undef, write 0xaa bytes to dest
|
||||
} else {
|
||||
// TODO if the value is undef, don't lower this instruction
|
||||
}
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const ptr = try self.resolveInst(bin_op.lhs);
|
||||
const value = try self.resolveInst(bin_op.rhs);
|
||||
|
||||
@ -1883,7 +1883,8 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
|
||||
.load => func.airLoad(inst),
|
||||
.loop => func.airLoop(inst),
|
||||
.memset => func.airMemset(inst),
|
||||
.memset => func.airMemset(inst, false),
|
||||
.memset_safe => func.airMemset(inst, true),
|
||||
.not => func.airNot(inst),
|
||||
.optional_payload => func.airOptionalPayload(inst),
|
||||
.optional_payload_ptr => func.airOptionalPayloadPtr(inst),
|
||||
@ -1913,7 +1914,8 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
.slice_ptr => func.airSlicePtr(inst),
|
||||
.ptr_slice_len_ptr => func.airPtrSliceFieldPtr(inst, func.ptrSize()),
|
||||
.ptr_slice_ptr_ptr => func.airPtrSliceFieldPtr(inst, 0),
|
||||
.store => func.airStore(inst),
|
||||
.store => func.airStore(inst, false),
|
||||
.store_safe => func.airStore(inst, true),
|
||||
|
||||
.set_union_tag => func.airSetUnionTag(inst),
|
||||
.struct_field_ptr => func.airStructFieldPtr(inst),
|
||||
@ -2221,7 +2223,12 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
func.finishAir(inst, value, &.{});
|
||||
}
|
||||
|
||||
fn airStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
|
||||
if (safety) {
|
||||
// TODO if the value is undef, write 0xaa bytes to dest
|
||||
} else {
|
||||
// TODO if the value is undef, don't lower this instruction
|
||||
}
|
||||
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
const lhs = try func.resolveInst(bin_op.lhs);
|
||||
@ -4148,9 +4155,7 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
|
||||
|
||||
const operand = try func.resolveInst(ty_op.operand);
|
||||
const len = try func.load(operand, Type.usize, func.ptrSize());
|
||||
const result = try len.toLocal(func, Type.usize);
|
||||
func.finishAir(inst, result, &.{ty_op.operand});
|
||||
func.finishAir(inst, try func.sliceLen(operand), &.{ty_op.operand});
|
||||
}
|
||||
|
||||
fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
@ -4208,9 +4213,17 @@ fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
fn airSlicePtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
|
||||
const operand = try func.resolveInst(ty_op.operand);
|
||||
func.finishAir(inst, try func.slicePtr(operand), &.{ty_op.operand});
|
||||
}
|
||||
|
||||
fn slicePtr(func: *CodeGen, operand: WValue) InnerError!WValue {
|
||||
const ptr = try func.load(operand, Type.usize, 0);
|
||||
const result = try ptr.toLocal(func, Type.usize);
|
||||
func.finishAir(inst, result, &.{ty_op.operand});
|
||||
return ptr.toLocal(func, Type.usize);
|
||||
}
|
||||
|
||||
fn sliceLen(func: *CodeGen, operand: WValue) InnerError!WValue {
|
||||
const len = try func.load(operand, Type.usize, func.ptrSize());
|
||||
return len.toLocal(func, Type.usize);
|
||||
}
|
||||
|
||||
fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
@ -4274,8 +4287,10 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const un_op = func.air.instructions.items(.data)[inst].un_op;
|
||||
const operand = try func.resolveInst(un_op);
|
||||
|
||||
const result = switch (operand) {
|
||||
const ptr_ty = func.air.typeOf(un_op);
|
||||
const result = if (ptr_ty.isSlice())
|
||||
try func.slicePtr(operand)
|
||||
else switch (operand) {
|
||||
// for stack offset, return a pointer to this offset.
|
||||
.stack_offset => try func.buildPointerOffset(operand, 0, .new),
|
||||
else => func.reuseOperand(un_op, operand),
|
||||
@ -4375,16 +4390,25 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
|
||||
func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
|
||||
}
|
||||
|
||||
fn airMemset(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
|
||||
const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
|
||||
if (safety) {
|
||||
// TODO if the value is undef, write 0xaa bytes to dest
|
||||
} else {
|
||||
// TODO if the value is undef, don't lower this instruction
|
||||
}
|
||||
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
const ptr = try func.resolveInst(pl_op.operand);
|
||||
const value = try func.resolveInst(bin_op.lhs);
|
||||
const len = try func.resolveInst(bin_op.rhs);
|
||||
const ptr = try func.resolveInst(bin_op.lhs);
|
||||
const ptr_ty = func.air.typeOf(bin_op.lhs);
|
||||
const value = try func.resolveInst(bin_op.rhs);
|
||||
const len = switch (ptr_ty.ptrSize()) {
|
||||
.Slice => try func.sliceLen(ptr),
|
||||
.One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType().arrayLen()) }),
|
||||
.C, .Many => unreachable,
|
||||
};
|
||||
try func.memset(ptr, len, value);
|
||||
|
||||
func.finishAir(inst, .none, &.{ pl_op.operand, bin_op.lhs, bin_op.rhs });
|
||||
func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
|
||||
}
|
||||
|
||||
/// Sets a region of memory at `ptr` to the value of `value`
|
||||
@ -5155,15 +5179,30 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
func.finishAir(inst, result, &.{extra.field_ptr});
|
||||
}
|
||||
|
||||
fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
|
||||
const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
const dst = try func.resolveInst(pl_op.operand);
|
||||
const src = try func.resolveInst(bin_op.lhs);
|
||||
const len = try func.resolveInst(bin_op.rhs);
|
||||
try func.memcpy(dst, src, len);
|
||||
fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue {
|
||||
if (ptr_ty.isSlice()) {
|
||||
return func.slicePtr(ptr);
|
||||
} else {
|
||||
return ptr;
|
||||
}
|
||||
}
|
||||
|
||||
func.finishAir(inst, .none, &.{ pl_op.operand, bin_op.lhs, bin_op.rhs });
|
||||
fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
|
||||
const dst = try func.resolveInst(bin_op.lhs);
|
||||
const dst_ty = func.air.typeOf(bin_op.lhs);
|
||||
const src = try func.resolveInst(bin_op.rhs);
|
||||
const src_ty = func.air.typeOf(bin_op.rhs);
|
||||
const len = switch (dst_ty.ptrSize()) {
|
||||
.Slice => try func.sliceLen(dst),
|
||||
.One => @as(WValue, .{ .imm64 = dst_ty.childType().arrayLen() }),
|
||||
.C, .Many => unreachable,
|
||||
};
|
||||
const dst_ptr = try func.sliceOrArrayPtr(dst, dst_ty);
|
||||
const src_ptr = try func.sliceOrArrayPtr(src, src_ty);
|
||||
try func.memcpy(dst_ptr, src_ptr, len);
|
||||
|
||||
func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
|
||||
}
|
||||
|
||||
fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
|
||||
@ -1035,7 +1035,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.ptrtoint => try self.airPtrToInt(inst),
|
||||
.ret => try self.airRet(inst),
|
||||
.ret_load => try self.airRetLoad(inst),
|
||||
.store => try self.airStore(inst),
|
||||
.store => try self.airStore(inst, false),
|
||||
.store_safe => try self.airStore(inst, true),
|
||||
.struct_field_ptr=> try self.airStructFieldPtr(inst),
|
||||
.struct_field_val=> try self.airStructFieldVal(inst),
|
||||
.array_to_slice => try self.airArrayToSlice(inst),
|
||||
@ -1046,7 +1047,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.atomic_rmw => try self.airAtomicRmw(inst),
|
||||
.atomic_load => try self.airAtomicLoad(inst),
|
||||
.memcpy => try self.airMemcpy(inst),
|
||||
.memset => try self.airMemset(inst),
|
||||
.memset => try self.airMemset(inst, false),
|
||||
.memset_safe => try self.airMemset(inst, true),
|
||||
.set_union_tag => try self.airSetUnionTag(inst),
|
||||
.get_union_tag => try self.airGetUnionTag(inst),
|
||||
.clz => try self.airClz(inst),
|
||||
@ -3935,7 +3937,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
}
|
||||
}
|
||||
|
||||
fn airStore(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
|
||||
if (safety) {
|
||||
// TODO if the value is undef, write 0xaa bytes to dest
|
||||
} else {
|
||||
// TODO if the value is undef, don't lower this instruction
|
||||
}
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const ptr = try self.resolveInst(bin_op.lhs);
|
||||
const ptr_ty = self.air.typeOf(bin_op.lhs);
|
||||
@ -7678,6 +7685,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const result = result: {
|
||||
// TODO: handle case where the operand is a slice not a raw pointer
|
||||
const src_mcv = try self.resolveInst(un_op);
|
||||
if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv;
|
||||
|
||||
@ -8148,64 +8156,164 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airMemset(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
|
||||
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
|
||||
if (safety) {
|
||||
// TODO if the value is undef, write 0xaa bytes to dest
|
||||
} else {
|
||||
// TODO if the value is undef, don't lower this instruction
|
||||
}
|
||||
|
||||
const dst_ptr = try self.resolveInst(pl_op.operand);
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
const dst_ptr = try self.resolveInst(bin_op.lhs);
|
||||
const dst_ptr_ty = self.air.typeOf(bin_op.lhs);
|
||||
const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const src_val = try self.resolveInst(extra.lhs);
|
||||
const src_val = try self.resolveInst(bin_op.rhs);
|
||||
const elem_ty = self.air.typeOf(bin_op.rhs);
|
||||
const src_val_lock: ?RegisterLock = switch (src_val) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const len = try self.resolveInst(extra.rhs);
|
||||
const len_lock: ?RegisterLock = switch (len) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (len_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
const elem_abi_size = @intCast(u31, elem_ty.abiSize(self.target.*));
|
||||
|
||||
try self.genInlineMemset(dst_ptr, src_val, len, .{});
|
||||
if (elem_abi_size == 1) {
|
||||
const ptr = switch (dst_ptr_ty.ptrSize()) {
|
||||
// TODO: this only handles slices stored in the stack
|
||||
.Slice => @as(MCValue, .{ .stack_offset = dst_ptr.stack_offset - 0 }),
|
||||
.One => dst_ptr,
|
||||
.C, .Many => unreachable,
|
||||
};
|
||||
const len = switch (dst_ptr_ty.ptrSize()) {
|
||||
// TODO: this only handles slices stored in the stack
|
||||
.Slice => @as(MCValue, .{ .stack_offset = dst_ptr.stack_offset - 8 }),
|
||||
.One => @as(MCValue, .{ .immediate = dst_ptr_ty.childType().arrayLen() }),
|
||||
.C, .Many => unreachable,
|
||||
};
|
||||
const len_lock: ?RegisterLock = switch (len) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (len_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
return self.finishAir(inst, .unreach, .{ pl_op.operand, extra.lhs, extra.rhs });
|
||||
try self.genInlineMemset(ptr, src_val, len, .{});
|
||||
return self.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
// Store the first element, and then rely on memcpy copying forwards.
|
||||
// Length zero requires a runtime check - so we handle arrays specially
|
||||
// here to elide it.
|
||||
switch (dst_ptr_ty.ptrSize()) {
|
||||
.Slice => {
|
||||
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
|
||||
const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf);
|
||||
|
||||
// TODO: this only handles slices stored in the stack
|
||||
const ptr = @as(MCValue, .{ .stack_offset = dst_ptr.stack_offset - 0 });
|
||||
const len = @as(MCValue, .{ .stack_offset = dst_ptr.stack_offset - 8 });
|
||||
|
||||
// Used to store the number of elements for comparison.
|
||||
// After comparison, updated to store number of bytes needed to copy.
|
||||
const len_reg = try self.register_manager.allocReg(null, gp);
|
||||
const len_mcv: MCValue = .{ .register = len_reg };
|
||||
const len_lock = self.register_manager.lockRegAssumeUnused(len_reg);
|
||||
defer self.register_manager.unlockReg(len_lock);
|
||||
|
||||
try self.asmRegisterMemory(.mov, len_reg, Memory.sib(.qword, .{
|
||||
.base = .rbp,
|
||||
.disp = -len.stack_offset,
|
||||
}));
|
||||
|
||||
const skip_reloc = try self.asmJccReloc(undefined, .z);
|
||||
try self.store(ptr, src_val, slice_ptr_ty, elem_ty);
|
||||
|
||||
const second_elem_ptr_reg = try self.register_manager.allocReg(null, gp);
|
||||
const second_elem_ptr_mcv: MCValue = .{ .register = second_elem_ptr_reg };
|
||||
const second_elem_ptr_lock = self.register_manager.lockRegAssumeUnused(second_elem_ptr_reg);
|
||||
defer self.register_manager.unlockReg(second_elem_ptr_lock);
|
||||
|
||||
try self.asmRegisterMemory(
|
||||
.lea,
|
||||
second_elem_ptr_reg,
|
||||
Memory.sib(.qword, .{
|
||||
.base = try self.copyToTmpRegister(Type.usize, ptr),
|
||||
.disp = elem_abi_size,
|
||||
}),
|
||||
);
|
||||
|
||||
try self.genBinOpMir(.sub, Type.usize, len_mcv, .{ .immediate = 1 });
|
||||
try self.asmRegisterRegisterImmediate(.imul, len_reg, len_reg, Immediate.u(elem_abi_size));
|
||||
try self.genInlineMemcpy(second_elem_ptr_mcv, ptr, len_mcv, .{});
|
||||
|
||||
try self.performReloc(skip_reloc);
|
||||
},
|
||||
.One => {
|
||||
const len = dst_ptr_ty.childType().arrayLen();
|
||||
assert(len != 0); // prevented by Sema
|
||||
try self.store(dst_ptr, src_val, dst_ptr_ty, elem_ty);
|
||||
|
||||
const second_elem_ptr_reg = try self.register_manager.allocReg(null, gp);
|
||||
const second_elem_ptr_mcv: MCValue = .{ .register = second_elem_ptr_reg };
|
||||
const second_elem_ptr_lock = self.register_manager.lockRegAssumeUnused(second_elem_ptr_reg);
|
||||
defer self.register_manager.unlockReg(second_elem_ptr_lock);
|
||||
|
||||
try self.asmRegisterMemory(
|
||||
.lea,
|
||||
second_elem_ptr_reg,
|
||||
Memory.sib(.qword, .{
|
||||
.base = try self.copyToTmpRegister(Type.usize, dst_ptr),
|
||||
.disp = elem_abi_size,
|
||||
}),
|
||||
);
|
||||
|
||||
const bytes_to_copy: MCValue = .{ .immediate = elem_abi_size * (len - 1) };
|
||||
try self.genInlineMemcpy(second_elem_ptr_mcv, dst_ptr, bytes_to_copy, .{});
|
||||
},
|
||||
.C, .Many => unreachable,
|
||||
}
|
||||
|
||||
return self.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
|
||||
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
const dst_ptr = try self.resolveInst(pl_op.operand);
|
||||
const dst_ptr = try self.resolveInst(bin_op.lhs);
|
||||
const dst_ptr_ty = self.air.typeOf(bin_op.lhs);
|
||||
const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const src_ptr = try self.resolveInst(extra.lhs);
|
||||
const src_ptr = try self.resolveInst(bin_op.rhs);
|
||||
const src_ptr_lock: ?RegisterLock = switch (src_ptr) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const len = try self.resolveInst(extra.rhs);
|
||||
const len = switch (dst_ptr_ty.ptrSize()) {
|
||||
.Slice => @as(MCValue, .{ .stack_offset = dst_ptr.stack_offset - 8 }),
|
||||
.One => @as(MCValue, .{ .immediate = dst_ptr_ty.childType().arrayLen() }),
|
||||
.C, .Many => unreachable,
|
||||
};
|
||||
const len_lock: ?RegisterLock = switch (len) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (len_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
// TODO: dst_ptr and src_ptr could be slices rather than raw pointers
|
||||
try self.genInlineMemcpy(dst_ptr, src_ptr, len, .{});
|
||||
|
||||
return self.finishAir(inst, .unreach, .{ pl_op.operand, extra.lhs, extra.rhs });
|
||||
return self.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
@ -2924,7 +2924,8 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
|
||||
.load => try airLoad(f, inst),
|
||||
.ret => try airRet(f, inst, false),
|
||||
.ret_load => try airRet(f, inst, true),
|
||||
.store => try airStore(f, inst),
|
||||
.store => try airStore(f, inst, false),
|
||||
.store_safe => try airStore(f, inst, true),
|
||||
.loop => try airLoop(f, inst),
|
||||
.cond_br => try airCondBr(f, inst),
|
||||
.br => try airBr(f, inst),
|
||||
@ -2935,7 +2936,8 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
|
||||
.cmpxchg_strong => try airCmpxchg(f, inst, "strong"),
|
||||
.atomic_rmw => try airAtomicRmw(f, inst),
|
||||
.atomic_load => try airAtomicLoad(f, inst),
|
||||
.memset => try airMemset(f, inst),
|
||||
.memset => try airMemset(f, inst, false),
|
||||
.memset_safe => try airMemset(f, inst, true),
|
||||
.memcpy => try airMemcpy(f, inst),
|
||||
.set_union_tag => try airSetUnionTag(f, inst),
|
||||
.get_union_tag => try airGetUnionTag(f, inst),
|
||||
@ -3574,19 +3576,7 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
return local;
|
||||
}
|
||||
|
||||
fn storeUndefined(f: *Function, lhs_child_ty: Type, dest_ptr: CValue) !CValue {
|
||||
if (f.wantSafety()) {
|
||||
const writer = f.object.writer();
|
||||
try writer.writeAll("memset(");
|
||||
try f.writeCValue(writer, dest_ptr, .FunctionArgument);
|
||||
try writer.print(", {x}, sizeof(", .{try f.fmtIntLiteral(Type.u8, Value.undef)});
|
||||
try f.renderType(writer, lhs_child_ty);
|
||||
try writer.writeAll("));\n");
|
||||
}
|
||||
return .none;
|
||||
}
|
||||
|
||||
fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
||||
// *a = b;
|
||||
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
@ -3597,18 +3587,19 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const ptr_val = try f.resolveInst(bin_op.lhs);
|
||||
const src_ty = f.air.typeOf(bin_op.rhs);
|
||||
|
||||
// TODO Sema should emit a different instruction when the store should
|
||||
// possibly do the safety 0xaa bytes for undefined.
|
||||
const src_val_is_undefined =
|
||||
if (f.air.value(bin_op.rhs)) |v| v.isUndefDeep() else false;
|
||||
if (src_val_is_undefined) {
|
||||
if (ptr_info.host_size == 0) {
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
return try storeUndefined(f, ptr_info.pointee_type, ptr_val);
|
||||
} else if (!f.wantSafety()) {
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
return .none;
|
||||
const val_is_undef = if (f.air.value(bin_op.rhs)) |v| v.isUndefDeep() else false;
|
||||
|
||||
if (val_is_undef) {
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
if (safety and ptr_info.host_size == 0) {
|
||||
const writer = f.object.writer();
|
||||
try writer.writeAll("memset(");
|
||||
try f.writeCValue(writer, ptr_val, .FunctionArgument);
|
||||
try writer.writeAll(", 0xaa, sizeof(");
|
||||
try f.renderType(writer, ptr_info.pointee_type);
|
||||
try writer.writeAll("));\n");
|
||||
}
|
||||
return .none;
|
||||
}
|
||||
|
||||
const target = f.object.dg.module.getTarget();
|
||||
@ -3844,8 +3835,8 @@ fn airCmpOp(
|
||||
data: anytype,
|
||||
operator: std.math.CompareOperator,
|
||||
) !CValue {
|
||||
const operand_ty = f.air.typeOf(data.lhs);
|
||||
const scalar_ty = operand_ty.scalarType();
|
||||
const lhs_ty = f.air.typeOf(data.lhs);
|
||||
const scalar_ty = lhs_ty.scalarType();
|
||||
|
||||
const target = f.object.dg.module.getTarget();
|
||||
const scalar_bits = scalar_ty.bitSize(target);
|
||||
@ -3866,17 +3857,21 @@ fn airCmpOp(
|
||||
const rhs = try f.resolveInst(data.rhs);
|
||||
try reap(f, inst, &.{ data.lhs, data.rhs });
|
||||
|
||||
const rhs_ty = f.air.typeOf(data.rhs);
|
||||
const need_cast = lhs_ty.isSinglePointer() != rhs_ty.isSinglePointer();
|
||||
const writer = f.object.writer();
|
||||
const local = try f.allocLocal(inst, inst_ty);
|
||||
const v = try Vectorize.start(f, inst, writer, operand_ty);
|
||||
const v = try Vectorize.start(f, inst, writer, lhs_ty);
|
||||
try f.writeCValue(writer, local, .Other);
|
||||
try v.elem(f, writer);
|
||||
try writer.writeAll(" = ");
|
||||
if (need_cast) try writer.writeAll("(void*)");
|
||||
try f.writeCValue(writer, lhs, .Other);
|
||||
try v.elem(f, writer);
|
||||
try writer.writeByte(' ');
|
||||
try writer.writeAll(compareOperatorC(operator));
|
||||
try writer.writeByte(' ');
|
||||
if (need_cast) try writer.writeAll("(void*)");
|
||||
try f.writeCValue(writer, rhs, .Other);
|
||||
try v.elem(f, writer);
|
||||
try writer.writeAll(";\n");
|
||||
@ -5784,6 +5779,7 @@ fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const un_op = f.air.instructions.items(.data)[inst].un_op;
|
||||
|
||||
const operand = try f.resolveInst(un_op);
|
||||
const operand_ty = f.air.typeOf(un_op);
|
||||
try reap(f, inst, &.{un_op});
|
||||
const inst_ty = f.air.typeOfIndex(inst);
|
||||
const writer = f.object.writer();
|
||||
@ -5793,7 +5789,11 @@ fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try writer.writeAll(" = (");
|
||||
try f.renderType(writer, inst_ty);
|
||||
try writer.writeByte(')');
|
||||
try f.writeCValue(writer, operand, .Other);
|
||||
if (operand_ty.isSlice()) {
|
||||
try f.writeCValueMember(writer, operand, .{ .identifier = "len" });
|
||||
} else {
|
||||
try f.writeCValue(writer, operand, .Other);
|
||||
}
|
||||
try writer.writeAll(";\n");
|
||||
return local;
|
||||
}
|
||||
@ -6186,19 +6186,66 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
|
||||
return .none;
|
||||
}
|
||||
|
||||
fn airMemset(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
|
||||
const extra = f.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
const dest_ty = f.air.typeOf(pl_op.operand);
|
||||
const dest_ptr = try f.resolveInst(pl_op.operand);
|
||||
const value = try f.resolveInst(extra.lhs);
|
||||
const len = try f.resolveInst(extra.rhs);
|
||||
fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !void {
|
||||
if (ptr_ty.isSlice()) {
|
||||
try f.writeCValueMember(writer, ptr, .{ .identifier = "ptr" });
|
||||
} else {
|
||||
try f.writeCValue(writer, ptr, .FunctionArgument);
|
||||
}
|
||||
}
|
||||
|
||||
fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
|
||||
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
|
||||
const dest_ty = f.air.typeOf(bin_op.lhs);
|
||||
const dest_slice = try f.resolveInst(bin_op.lhs);
|
||||
const value = try f.resolveInst(bin_op.rhs);
|
||||
const elem_ty = f.air.typeOf(bin_op.rhs);
|
||||
const target = f.object.dg.module.getTarget();
|
||||
const elem_abi_size = elem_ty.abiSize(target);
|
||||
const val_is_undef = if (f.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
|
||||
const writer = f.object.writer();
|
||||
if (dest_ty.isVolatilePtr()) {
|
||||
var u8_ptr_pl = dest_ty.ptrInfo();
|
||||
u8_ptr_pl.data.pointee_type = Type.u8;
|
||||
const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base);
|
||||
|
||||
if (val_is_undef) {
|
||||
if (!safety) {
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
return .none;
|
||||
}
|
||||
|
||||
try writer.writeAll("memset(");
|
||||
switch (dest_ty.ptrSize()) {
|
||||
.Slice => {
|
||||
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" });
|
||||
try writer.writeAll(", 0xaa, ");
|
||||
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "len" });
|
||||
if (elem_abi_size > 1) {
|
||||
try writer.print(" * {d});\n", .{elem_abi_size});
|
||||
} else {
|
||||
try writer.writeAll(");\n");
|
||||
}
|
||||
},
|
||||
.One => {
|
||||
const array_ty = dest_ty.childType();
|
||||
const len = array_ty.arrayLen() * elem_abi_size;
|
||||
|
||||
try f.writeCValue(writer, dest_slice, .FunctionArgument);
|
||||
try writer.print(", 0xaa, {d});\n", .{len});
|
||||
},
|
||||
.Many, .C => unreachable,
|
||||
}
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
return .none;
|
||||
}
|
||||
|
||||
if (elem_abi_size > 1 or dest_ty.isVolatilePtr()) {
|
||||
// For the assignment in this loop, the array pointer needs to get
|
||||
// casted to a regular pointer, otherwise an error like this occurs:
|
||||
// error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable
|
||||
var elem_ptr_ty_pl: Type.Payload.ElemType = .{
|
||||
.base = .{ .tag = .c_mut_pointer },
|
||||
.data = elem_ty,
|
||||
};
|
||||
const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base);
|
||||
|
||||
const index = try f.allocLocal(inst, Type.usize);
|
||||
|
||||
try writer.writeAll("for (");
|
||||
@ -6208,56 +6255,95 @@ fn airMemset(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try writer.writeAll("; ");
|
||||
try f.writeCValue(writer, index, .Other);
|
||||
try writer.writeAll(" != ");
|
||||
try f.writeCValue(writer, len, .Other);
|
||||
try writer.writeAll("; ");
|
||||
switch (dest_ty.ptrSize()) {
|
||||
.Slice => {
|
||||
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "len" });
|
||||
},
|
||||
.One => {
|
||||
const array_ty = dest_ty.childType();
|
||||
try writer.print("{d}", .{array_ty.arrayLen()});
|
||||
},
|
||||
.Many, .C => unreachable,
|
||||
}
|
||||
try writer.writeAll("; ++");
|
||||
try f.writeCValue(writer, index, .Other);
|
||||
try writer.writeAll(" += ");
|
||||
try f.object.dg.renderValue(writer, Type.usize, Value.one, .Other);
|
||||
try writer.writeAll(") ((");
|
||||
try f.renderType(writer, u8_ptr_ty);
|
||||
try f.renderType(writer, elem_ptr_ty);
|
||||
try writer.writeByte(')');
|
||||
try f.writeCValue(writer, dest_ptr, .FunctionArgument);
|
||||
try writeSliceOrPtr(f, writer, dest_slice, dest_ty);
|
||||
try writer.writeAll(")[");
|
||||
try f.writeCValue(writer, index, .Other);
|
||||
try writer.writeAll("] = ");
|
||||
try f.writeCValue(writer, value, .FunctionArgument);
|
||||
try writer.writeAll(";\n");
|
||||
|
||||
try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs });
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
try freeLocal(f, inst, index.new_local, 0);
|
||||
|
||||
return .none;
|
||||
}
|
||||
|
||||
try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs });
|
||||
try writer.writeAll("memset(");
|
||||
try f.writeCValue(writer, dest_ptr, .FunctionArgument);
|
||||
try writer.writeAll(", ");
|
||||
try f.writeCValue(writer, value, .FunctionArgument);
|
||||
try writer.writeAll(", ");
|
||||
try f.writeCValue(writer, len, .FunctionArgument);
|
||||
try writer.writeAll(");\n");
|
||||
switch (dest_ty.ptrSize()) {
|
||||
.Slice => {
|
||||
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" });
|
||||
try writer.writeAll(", ");
|
||||
try f.writeCValue(writer, value, .FunctionArgument);
|
||||
try writer.writeAll(", ");
|
||||
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "len" });
|
||||
try writer.writeAll(");\n");
|
||||
},
|
||||
.One => {
|
||||
const array_ty = dest_ty.childType();
|
||||
const len = array_ty.arrayLen() * elem_abi_size;
|
||||
|
||||
try f.writeCValue(writer, dest_slice, .FunctionArgument);
|
||||
try writer.writeAll(", ");
|
||||
try f.writeCValue(writer, value, .FunctionArgument);
|
||||
try writer.print(", {d});\n", .{len});
|
||||
},
|
||||
.Many, .C => unreachable,
|
||||
}
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
return .none;
|
||||
}
|
||||
|
||||
fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
|
||||
const extra = f.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
const dest_ptr = try f.resolveInst(pl_op.operand);
|
||||
const src_ptr = try f.resolveInst(extra.lhs);
|
||||
const len = try f.resolveInst(extra.rhs);
|
||||
try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs });
|
||||
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
|
||||
const dest_ptr = try f.resolveInst(bin_op.lhs);
|
||||
const src_ptr = try f.resolveInst(bin_op.rhs);
|
||||
const dest_ty = f.air.typeOf(bin_op.lhs);
|
||||
const src_ty = f.air.typeOf(bin_op.rhs);
|
||||
const target = f.object.dg.module.getTarget();
|
||||
const writer = f.object.writer();
|
||||
|
||||
try writer.writeAll("memcpy(");
|
||||
try f.writeCValue(writer, dest_ptr, .FunctionArgument);
|
||||
try writeSliceOrPtr(f, writer, dest_ptr, dest_ty);
|
||||
try writer.writeAll(", ");
|
||||
try f.writeCValue(writer, src_ptr, .FunctionArgument);
|
||||
try writeSliceOrPtr(f, writer, src_ptr, src_ty);
|
||||
try writer.writeAll(", ");
|
||||
try f.writeCValue(writer, len, .FunctionArgument);
|
||||
try writer.writeAll(");\n");
|
||||
switch (dest_ty.ptrSize()) {
|
||||
.Slice => {
|
||||
const elem_ty = dest_ty.childType();
|
||||
const elem_abi_size = elem_ty.abiSize(target);
|
||||
try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" });
|
||||
if (elem_abi_size > 1) {
|
||||
try writer.print(" * {d});\n", .{elem_abi_size});
|
||||
} else {
|
||||
try writer.writeAll(");\n");
|
||||
}
|
||||
},
|
||||
.One => {
|
||||
const array_ty = dest_ty.childType();
|
||||
const elem_ty = array_ty.childType();
|
||||
const elem_abi_size = elem_ty.abiSize(target);
|
||||
const len = array_ty.arrayLen() * elem_abi_size;
|
||||
try writer.print("{d});\n", .{len});
|
||||
},
|
||||
.Many, .C => unreachable,
|
||||
}
|
||||
|
||||
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
|
||||
return .none;
|
||||
}
|
||||
|
||||
|
||||
@ -4649,7 +4649,8 @@ pub const FuncGen = struct {
|
||||
.not => try self.airNot(inst),
|
||||
.ret => try self.airRet(inst),
|
||||
.ret_load => try self.airRetLoad(inst),
|
||||
.store => try self.airStore(inst),
|
||||
.store => try self.airStore(inst, false),
|
||||
.store_safe => try self.airStore(inst, true),
|
||||
.assembly => try self.airAssembly(inst),
|
||||
.slice_ptr => try self.airSliceField(inst, 0),
|
||||
.slice_len => try self.airSliceField(inst, 1),
|
||||
@ -4672,7 +4673,8 @@ pub const FuncGen = struct {
|
||||
.fence => try self.airFence(inst),
|
||||
.atomic_rmw => try self.airAtomicRmw(inst),
|
||||
.atomic_load => try self.airAtomicLoad(inst),
|
||||
.memset => try self.airMemset(inst),
|
||||
.memset => try self.airMemset(inst, false),
|
||||
.memset_safe => try self.airMemset(inst, true),
|
||||
.memcpy => try self.airMemcpy(inst),
|
||||
.set_union_tag => try self.airSetUnionTag(inst),
|
||||
.get_union_tag => try self.airGetUnionTag(inst),
|
||||
@ -5776,6 +5778,36 @@ pub const FuncGen = struct {
|
||||
return result;
|
||||
}
|
||||
|
||||
fn sliceOrArrayPtr(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value {
|
||||
if (ty.isSlice()) {
|
||||
return fg.builder.buildExtractValue(ptr, 0, "");
|
||||
} else {
|
||||
return ptr;
|
||||
}
|
||||
}
|
||||
|
||||
fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value {
|
||||
const target = fg.dg.module.getTarget();
|
||||
const llvm_usize_ty = fg.context.intType(target.cpu.arch.ptrBitWidth());
|
||||
switch (ty.ptrSize()) {
|
||||
.Slice => {
|
||||
const len = fg.builder.buildExtractValue(ptr, 1, "");
|
||||
const elem_ty = ty.childType();
|
||||
const abi_size = elem_ty.abiSize(target);
|
||||
if (abi_size == 1) return len;
|
||||
const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False);
|
||||
return fg.builder.buildMul(len, abi_size_llvm_val, "");
|
||||
},
|
||||
.One => {
|
||||
const array_ty = ty.childType();
|
||||
const elem_ty = array_ty.childType();
|
||||
const abi_size = elem_ty.abiSize(target);
|
||||
return llvm_usize_ty.constInt(array_ty.arrayLen() * abi_size, .False);
|
||||
},
|
||||
.Many, .C => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
@ -7261,39 +7293,53 @@ pub const FuncGen = struct {
|
||||
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const base_ptr = try self.resolveInst(bin_op.lhs);
|
||||
const ptr = try self.resolveInst(bin_op.lhs);
|
||||
const offset = try self.resolveInst(bin_op.rhs);
|
||||
const ptr_ty = self.air.typeOf(bin_op.lhs);
|
||||
const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType());
|
||||
if (ptr_ty.ptrSize() == .One) {
|
||||
// It's a pointer to an array, so according to LLVM we need an extra GEP index.
|
||||
const indices: [2]*llvm.Value = .{
|
||||
self.context.intType(32).constNull(), offset,
|
||||
};
|
||||
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
|
||||
} else {
|
||||
const indices: [1]*llvm.Value = .{offset};
|
||||
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
|
||||
switch (ptr_ty.ptrSize()) {
|
||||
.One => {
|
||||
// It's a pointer to an array, so according to LLVM we need an extra GEP index.
|
||||
const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), offset };
|
||||
return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, "");
|
||||
},
|
||||
.C, .Many => {
|
||||
const indices: [1]*llvm.Value = .{offset};
|
||||
return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, "");
|
||||
},
|
||||
.Slice => {
|
||||
const base = self.builder.buildExtractValue(ptr, 0, "");
|
||||
const indices: [1]*llvm.Value = .{offset};
|
||||
return self.builder.buildInBoundsGEP(llvm_elem_ty, base, &indices, indices.len, "");
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const base_ptr = try self.resolveInst(bin_op.lhs);
|
||||
const ptr = try self.resolveInst(bin_op.lhs);
|
||||
const offset = try self.resolveInst(bin_op.rhs);
|
||||
const negative_offset = self.builder.buildNeg(offset, "");
|
||||
const ptr_ty = self.air.typeOf(bin_op.lhs);
|
||||
const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType());
|
||||
if (ptr_ty.ptrSize() == .One) {
|
||||
// It's a pointer to an array, so according to LLVM we need an extra GEP index.
|
||||
const indices: [2]*llvm.Value = .{
|
||||
self.context.intType(32).constNull(), negative_offset,
|
||||
};
|
||||
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
|
||||
} else {
|
||||
const indices: [1]*llvm.Value = .{negative_offset};
|
||||
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
|
||||
switch (ptr_ty.ptrSize()) {
|
||||
.One => {
|
||||
// It's a pointer to an array, so according to LLVM we need an extra GEP index.
|
||||
const indices: [2]*llvm.Value = .{
|
||||
self.context.intType(32).constNull(), negative_offset,
|
||||
};
|
||||
return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, "");
|
||||
},
|
||||
.C, .Many => {
|
||||
const indices: [1]*llvm.Value = .{negative_offset};
|
||||
return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, "");
|
||||
},
|
||||
.Slice => {
|
||||
const base = self.builder.buildExtractValue(ptr, 0, "");
|
||||
const indices: [1]*llvm.Value = .{negative_offset};
|
||||
return self.builder.buildInBoundsGEP(llvm_elem_ty, base, &indices, indices.len, "");
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -7887,8 +7933,10 @@ pub const FuncGen = struct {
|
||||
fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const operand = try self.resolveInst(un_op);
|
||||
const ptr_ty = self.air.typeOf(un_op);
|
||||
const operand_ptr = self.sliceOrArrayPtr(operand, ptr_ty);
|
||||
const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
|
||||
return self.builder.buildPtrToInt(operand, dest_llvm_ty, "");
|
||||
return self.builder.buildPtrToInt(operand_ptr, dest_llvm_ty, "");
|
||||
}
|
||||
|
||||
fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
|
||||
@ -8082,48 +8130,36 @@ pub const FuncGen = struct {
|
||||
return buildAllocaInner(self.context, self.builder, self.llvm_func, self.di_scope != null, llvm_ty, alignment, self.dg.module.getTarget());
|
||||
}
|
||||
|
||||
fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
|
||||
fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const dest_ptr = try self.resolveInst(bin_op.lhs);
|
||||
const ptr_ty = self.air.typeOf(bin_op.lhs);
|
||||
const operand_ty = ptr_ty.childType();
|
||||
|
||||
// TODO Sema should emit a different instruction when the store should
|
||||
// possibly do the safety 0xaa bytes for undefined.
|
||||
const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
|
||||
if (val_is_undef) {
|
||||
{
|
||||
// TODO let's handle this in AIR rather than by having each backend
|
||||
// check the optimization mode of the compilation because the plan is
|
||||
// to support setting the optimization mode at finer grained scopes
|
||||
// which happens in Sema. Codegen should not be aware of this logic.
|
||||
// I think this comment is basically the same as the other TODO comment just
|
||||
// above but I'm leaving them both here to make it look super messy and
|
||||
// thereby bait contributors (or let's be honest, probably myself) into
|
||||
// fixing this instead of letting it rot.
|
||||
const safety = switch (self.dg.module.comp.bin_file.options.optimize_mode) {
|
||||
.ReleaseSmall, .ReleaseFast => false,
|
||||
.Debug, .ReleaseSafe => true,
|
||||
};
|
||||
if (!safety) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
// Even if safety is disabled, we still emit a memset to undefined since it conveys
|
||||
// extra information to LLVM. However, safety makes the difference between using
|
||||
// 0xaa or actual undefined for the fill byte.
|
||||
const u8_llvm_ty = self.context.intType(8);
|
||||
const fill_byte = if (safety)
|
||||
u8_llvm_ty.constInt(0xaa, .False)
|
||||
else
|
||||
u8_llvm_ty.getUndef();
|
||||
const target = self.dg.module.getTarget();
|
||||
const operand_size = operand_ty.abiSize(target);
|
||||
const u8_llvm_ty = self.context.intType(8);
|
||||
const fill_char = u8_llvm_ty.constInt(0xaa, .False);
|
||||
const dest_ptr_align = ptr_ty.ptrAlignment(target);
|
||||
const usize_llvm_ty = try self.dg.lowerType(Type.usize);
|
||||
const len = usize_llvm_ty.constInt(operand_size, .False);
|
||||
_ = self.builder.buildMemSet(dest_ptr, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr());
|
||||
if (self.dg.module.comp.bin_file.options.valgrind) {
|
||||
const dest_ptr_align = ptr_ty.ptrAlignment(target);
|
||||
_ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr());
|
||||
if (safety and self.dg.module.comp.bin_file.options.valgrind) {
|
||||
self.valgrindMarkUndef(dest_ptr, len);
|
||||
}
|
||||
} else {
|
||||
const src_operand = try self.resolveInst(bin_op.rhs);
|
||||
try self.store(dest_ptr, ptr_ty, src_operand, .NotAtomic);
|
||||
return null;
|
||||
}
|
||||
|
||||
const src_operand = try self.resolveInst(bin_op.rhs);
|
||||
try self.store(dest_ptr, ptr_ty, src_operand, .NotAtomic);
|
||||
return null;
|
||||
}
|
||||
|
||||
@ -8373,34 +8409,107 @@ pub const FuncGen = struct {
|
||||
return null;
|
||||
}
|
||||
|
||||
fn airMemset(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
|
||||
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
|
||||
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
const dest_ptr = try self.resolveInst(pl_op.operand);
|
||||
const ptr_ty = self.air.typeOf(pl_op.operand);
|
||||
const value = try self.resolveInst(extra.lhs);
|
||||
const val_is_undef = if (self.air.value(extra.lhs)) |val| val.isUndefDeep() else false;
|
||||
const len = try self.resolveInst(extra.rhs);
|
||||
const u8_llvm_ty = self.context.intType(8);
|
||||
const fill_char = if (val_is_undef) u8_llvm_ty.constInt(0xaa, .False) else value;
|
||||
fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const dest_slice = try self.resolveInst(bin_op.lhs);
|
||||
const ptr_ty = self.air.typeOf(bin_op.lhs);
|
||||
const elem_ty = self.air.typeOf(bin_op.rhs);
|
||||
const target = self.dg.module.getTarget();
|
||||
const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
|
||||
const dest_ptr_align = ptr_ty.ptrAlignment(target);
|
||||
_ = self.builder.buildMemSet(dest_ptr, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr());
|
||||
const u8_llvm_ty = self.context.intType(8);
|
||||
const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty);
|
||||
|
||||
if (val_is_undef and self.dg.module.comp.bin_file.options.valgrind) {
|
||||
self.valgrindMarkUndef(dest_ptr, len);
|
||||
if (val_is_undef) {
|
||||
// Even if safety is disabled, we still emit a memset to undefined since it conveys
|
||||
// extra information to LLVM. However, safety makes the difference between using
|
||||
// 0xaa or actual undefined for the fill byte.
|
||||
const fill_byte = if (safety)
|
||||
u8_llvm_ty.constInt(0xaa, .False)
|
||||
else
|
||||
u8_llvm_ty.getUndef();
|
||||
const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
|
||||
_ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr());
|
||||
|
||||
if (safety and self.dg.module.comp.bin_file.options.valgrind) {
|
||||
self.valgrindMarkUndef(dest_ptr, len);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
const value = try self.resolveInst(bin_op.rhs);
|
||||
const elem_abi_size = elem_ty.abiSize(target);
|
||||
|
||||
if (elem_abi_size == 1) {
|
||||
// In this case we can take advantage of LLVM's intrinsic.
|
||||
const fill_byte = self.builder.buildBitCast(value, u8_llvm_ty, "");
|
||||
const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
|
||||
_ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr());
|
||||
return null;
|
||||
}
|
||||
|
||||
// non-byte-sized element. lower with a loop. something like this:
|
||||
|
||||
// entry:
|
||||
// ...
|
||||
// %end_ptr = getelementptr %ptr, %len
|
||||
// br loop
|
||||
// loop:
|
||||
// %it_ptr = phi body %next_ptr, entry %ptr
|
||||
// %end = cmp eq %it_ptr, %end_ptr
|
||||
// cond_br %end body, end
|
||||
// body:
|
||||
// store %it_ptr, %value
|
||||
// %next_ptr = getelementptr %it_ptr, 1
|
||||
// br loop
|
||||
// end:
|
||||
// ...
|
||||
const entry_block = self.builder.getInsertBlock();
|
||||
const loop_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetLoop");
|
||||
const body_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetBody");
|
||||
const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd");
|
||||
|
||||
const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth());
|
||||
const len = switch (ptr_ty.ptrSize()) {
|
||||
.Slice => self.builder.buildExtractValue(dest_slice, 1, ""),
|
||||
.One => llvm_usize_ty.constInt(ptr_ty.childType().arrayLen(), .False),
|
||||
.Many, .C => unreachable,
|
||||
};
|
||||
const elem_llvm_ty = try self.dg.lowerType(elem_ty);
|
||||
const len_gep = [_]*llvm.Value{len};
|
||||
const end_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, dest_ptr, &len_gep, len_gep.len, "");
|
||||
_ = self.builder.buildBr(loop_block);
|
||||
|
||||
self.builder.positionBuilderAtEnd(loop_block);
|
||||
const it_ptr = self.builder.buildPhi(self.context.pointerType(0), "");
|
||||
const end = self.builder.buildICmp(.NE, it_ptr, end_ptr, "");
|
||||
_ = self.builder.buildCondBr(end, body_block, end_block);
|
||||
|
||||
self.builder.positionBuilderAtEnd(body_block);
|
||||
const store_inst = self.builder.buildStore(value, it_ptr);
|
||||
store_inst.setAlignment(@min(elem_ty.abiAlignment(target), dest_ptr_align));
|
||||
const one_gep = [_]*llvm.Value{llvm_usize_ty.constInt(1, .False)};
|
||||
const next_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, it_ptr, &one_gep, one_gep.len, "");
|
||||
_ = self.builder.buildBr(loop_block);
|
||||
|
||||
self.builder.positionBuilderAtEnd(end_block);
|
||||
|
||||
const incoming_values: [2]*llvm.Value = .{ next_ptr, dest_ptr };
|
||||
const incoming_blocks: [2]*llvm.BasicBlock = .{ body_block, entry_block };
|
||||
it_ptr.addIncoming(&incoming_values, &incoming_blocks, 2);
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
|
||||
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
|
||||
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
const dest_ptr = try self.resolveInst(pl_op.operand);
|
||||
const dest_ptr_ty = self.air.typeOf(pl_op.operand);
|
||||
const src_ptr = try self.resolveInst(extra.lhs);
|
||||
const src_ptr_ty = self.air.typeOf(extra.lhs);
|
||||
const len = try self.resolveInst(extra.rhs);
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const dest_slice = try self.resolveInst(bin_op.lhs);
|
||||
const dest_ptr_ty = self.air.typeOf(bin_op.lhs);
|
||||
const src_slice = try self.resolveInst(bin_op.rhs);
|
||||
const src_ptr_ty = self.air.typeOf(bin_op.rhs);
|
||||
const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty);
|
||||
const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty);
|
||||
const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty);
|
||||
const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr();
|
||||
const target = self.dg.module.getTarget();
|
||||
_ = self.builder.buildMemCpy(
|
||||
|
||||
@ -140,6 +140,7 @@ const Writer = struct {
|
||||
.bool_and,
|
||||
.bool_or,
|
||||
.store,
|
||||
.store_safe,
|
||||
.array_elem_val,
|
||||
.slice_elem_val,
|
||||
.ptr_elem_val,
|
||||
@ -169,6 +170,9 @@ const Writer = struct {
|
||||
.cmp_gte_optimized,
|
||||
.cmp_gt_optimized,
|
||||
.cmp_neq_optimized,
|
||||
.memcpy,
|
||||
.memset,
|
||||
.memset_safe,
|
||||
=> try w.writeBinOp(s, inst),
|
||||
|
||||
.is_null,
|
||||
@ -315,8 +319,6 @@ const Writer = struct {
|
||||
.atomic_store_release => try w.writeAtomicStore(s, inst, .Release),
|
||||
.atomic_store_seq_cst => try w.writeAtomicStore(s, inst, .SeqCst),
|
||||
.atomic_rmw => try w.writeAtomicRmw(s, inst),
|
||||
.memcpy => try w.writeMemcpy(s, inst),
|
||||
.memset => try w.writeMemset(s, inst),
|
||||
.field_parent_ptr => try w.writeFieldParentPtr(s, inst),
|
||||
.wasm_memory_size => try w.writeWasmMemorySize(s, inst),
|
||||
.wasm_memory_grow => try w.writeWasmMemoryGrow(s, inst),
|
||||
@ -591,17 +593,6 @@ const Writer = struct {
|
||||
try s.print(", {s}, {s}", .{ @tagName(extra.op()), @tagName(extra.ordering()) });
|
||||
}
|
||||
|
||||
fn writeMemset(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
|
||||
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
|
||||
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
|
||||
try w.writeOperand(s, inst, 0, pl_op.operand);
|
||||
try s.writeAll(", ");
|
||||
try w.writeOperand(s, inst, 1, extra.lhs);
|
||||
try s.writeAll(", ");
|
||||
try w.writeOperand(s, inst, 2, extra.rhs);
|
||||
}
|
||||
|
||||
fn writeFieldParentPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
|
||||
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
|
||||
const extra = w.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
|
||||
@ -610,17 +601,6 @@ const Writer = struct {
|
||||
try s.print(", {d}", .{extra.field_index});
|
||||
}
|
||||
|
||||
fn writeMemcpy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
|
||||
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
|
||||
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
|
||||
try w.writeOperand(s, inst, 0, pl_op.operand);
|
||||
try s.writeAll(", ");
|
||||
try w.writeOperand(s, inst, 1, extra.lhs);
|
||||
try s.writeAll(", ");
|
||||
try w.writeOperand(s, inst, 2, extra.rhs);
|
||||
}
|
||||
|
||||
fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
|
||||
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
|
||||
const val = w.air.values[ty_pl.payload];
|
||||
|
||||
@ -277,8 +277,6 @@ const Writer = struct {
|
||||
.atomic_load => try self.writeAtomicLoad(stream, inst),
|
||||
.atomic_store => try self.writeAtomicStore(stream, inst),
|
||||
.atomic_rmw => try self.writeAtomicRmw(stream, inst),
|
||||
.memcpy => try self.writeMemcpy(stream, inst),
|
||||
.memset => try self.writeMemset(stream, inst),
|
||||
.shuffle => try self.writeShuffle(stream, inst),
|
||||
.mul_add => try self.writeMulAdd(stream, inst),
|
||||
.field_parent_ptr => try self.writeFieldParentPtr(stream, inst),
|
||||
@ -346,6 +344,8 @@ const Writer = struct {
|
||||
.vector_type,
|
||||
.max,
|
||||
.min,
|
||||
.memcpy,
|
||||
.memset,
|
||||
.elem_ptr_node,
|
||||
.elem_val_node,
|
||||
.elem_ptr,
|
||||
@ -1000,32 +1000,6 @@ const Writer = struct {
|
||||
try self.writeSrc(stream, inst_data.src());
|
||||
}
|
||||
|
||||
fn writeMemcpy(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
|
||||
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
|
||||
const extra = self.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data;
|
||||
|
||||
try self.writeInstRef(stream, extra.dest);
|
||||
try stream.writeAll(", ");
|
||||
try self.writeInstRef(stream, extra.source);
|
||||
try stream.writeAll(", ");
|
||||
try self.writeInstRef(stream, extra.byte_count);
|
||||
try stream.writeAll(") ");
|
||||
try self.writeSrc(stream, inst_data.src());
|
||||
}
|
||||
|
||||
fn writeMemset(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
|
||||
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
|
||||
const extra = self.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data;
|
||||
|
||||
try self.writeInstRef(stream, extra.dest);
|
||||
try stream.writeAll(", ");
|
||||
try self.writeInstRef(stream, extra.byte);
|
||||
try stream.writeAll(", ");
|
||||
try self.writeInstRef(stream, extra.byte_count);
|
||||
try stream.writeAll(") ");
|
||||
try self.writeSrc(stream, inst_data.src());
|
||||
}
|
||||
|
||||
fn writeStructInitAnon(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
|
||||
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
|
||||
const extra = self.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index);
|
||||
|
||||
15
src/type.zig
15
src/type.zig
@ -3843,9 +3843,14 @@ pub const Type = extern union {
|
||||
};
|
||||
}
|
||||
|
||||
/// Asserts the `Type` is a pointer.
|
||||
pub fn ptrSize(self: Type) std.builtin.Type.Pointer.Size {
|
||||
return switch (self.tag()) {
|
||||
/// Asserts `ty` is a pointer.
|
||||
pub fn ptrSize(ty: Type) std.builtin.Type.Pointer.Size {
|
||||
return ptrSizeOrNull(ty).?;
|
||||
}
|
||||
|
||||
/// Returns `null` if `ty` is not a pointer.
|
||||
pub fn ptrSizeOrNull(ty: Type) ?std.builtin.Type.Pointer.Size {
|
||||
return switch (ty.tag()) {
|
||||
.const_slice,
|
||||
.mut_slice,
|
||||
.const_slice_u8,
|
||||
@ -3870,9 +3875,9 @@ pub const Type = extern union {
|
||||
.inferred_alloc_mut,
|
||||
=> .One,
|
||||
|
||||
.pointer => self.castTag(.pointer).?.data.size,
|
||||
.pointer => ty.castTag(.pointer).?.data.size,
|
||||
|
||||
else => unreachable,
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
BIN
stage1/zig1.wasm
BIN
stage1/zig1.wasm
Binary file not shown.
@ -353,22 +353,90 @@ fn f2(x: bool) []const u8 {
|
||||
return (if (x) &fA else &fB)();
|
||||
}
|
||||
|
||||
test "@memset on array pointers" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) {
|
||||
// TODO: implement memset when element ABI size > 1
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
try testMemsetArray();
|
||||
try comptime testMemsetArray();
|
||||
}
|
||||
|
||||
fn testMemsetArray() !void {
|
||||
{
|
||||
// memset array to non-undefined, ABI size == 1
|
||||
var foo: [20]u8 = undefined;
|
||||
@memset(&foo, 'A');
|
||||
try expect(foo[0] == 'A');
|
||||
try expect(foo[11] == 'A');
|
||||
try expect(foo[19] == 'A');
|
||||
}
|
||||
{
|
||||
// memset array to non-undefined, ABI size > 1
|
||||
var foo: [20]u32 = undefined;
|
||||
@memset(&foo, 1234);
|
||||
try expect(foo[0] == 1234);
|
||||
try expect(foo[11] == 1234);
|
||||
try expect(foo[19] == 1234);
|
||||
}
|
||||
}
|
||||
|
||||
test "@memset on slices" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) {
|
||||
// TODO: implement memset when element ABI size > 1
|
||||
// TODO: implement memset on slices
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
try testMemsetSlice();
|
||||
try comptime testMemsetSlice();
|
||||
}
|
||||
|
||||
fn testMemsetSlice() !void {
|
||||
{
|
||||
// memset slice to non-undefined, ABI size == 1
|
||||
var array: [20]u8 = undefined;
|
||||
var len = array.len;
|
||||
var slice = array[0..len];
|
||||
@memset(slice, 'A');
|
||||
try expect(slice[0] == 'A');
|
||||
try expect(slice[11] == 'A');
|
||||
try expect(slice[19] == 'A');
|
||||
}
|
||||
{
|
||||
// memset slice to non-undefined, ABI size > 1
|
||||
var array: [20]u32 = undefined;
|
||||
var len = array.len;
|
||||
var slice = array[0..len];
|
||||
@memset(slice, 1234);
|
||||
try expect(slice[0] == 1234);
|
||||
try expect(slice[11] == 1234);
|
||||
try expect(slice[19] == 1234);
|
||||
}
|
||||
}
|
||||
|
||||
test "memcpy and memset intrinsics" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
try testMemcpyMemset();
|
||||
// TODO add comptime test coverage
|
||||
//comptime try testMemcpyMemset();
|
||||
try comptime testMemcpyMemset();
|
||||
}
|
||||
|
||||
fn testMemcpyMemset() !void {
|
||||
var foo: [20]u8 = undefined;
|
||||
var bar: [20]u8 = undefined;
|
||||
|
||||
@memset(&foo, 'A', foo.len);
|
||||
@memcpy(&bar, &foo, bar.len);
|
||||
@memset(&foo, 'A');
|
||||
@memcpy(&bar, &foo);
|
||||
|
||||
try expect(bar[0] == 'A');
|
||||
try expect(bar[11] == 'A');
|
||||
|
||||
@ -14,7 +14,7 @@ test "zero keys with @memset" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
@memset(@ptrCast([*]u8, &keys), 0, @sizeOf(@TypeOf(keys)));
|
||||
@memset(@ptrCast([*]u8, &keys)[0..@sizeOf(@TypeOf(keys))], 0);
|
||||
try expect(!keys.up);
|
||||
try expect(!keys.down);
|
||||
try expect(!keys.left);
|
||||
|
||||
@ -17,8 +17,8 @@ test {
|
||||
try testing.expectEqual(void, @TypeOf(@breakpoint()));
|
||||
try testing.expectEqual({}, @export(x, .{ .name = "x" }));
|
||||
try testing.expectEqual({}, @fence(.Acquire));
|
||||
try testing.expectEqual({}, @memcpy(@intToPtr([*]u8, 1), @intToPtr([*]u8, 1), 0));
|
||||
try testing.expectEqual({}, @memset(@intToPtr([*]u8, 1), undefined, 0));
|
||||
try testing.expectEqual({}, @memcpy(@intToPtr([*]u8, 1)[0..0], @intToPtr([*]u8, 1)[0..0]));
|
||||
try testing.expectEqual({}, @memset(@intToPtr([*]u8, 1)[0..0], undefined));
|
||||
try testing.expectEqual(noreturn, @TypeOf(if (true) @panic("") else {}));
|
||||
try testing.expectEqual({}, @prefetch(&val, .{}));
|
||||
try testing.expectEqual({}, @setAlignStack(16));
|
||||
|
||||
@ -91,7 +91,7 @@ test "structs" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
var foo: StructFoo = undefined;
|
||||
@memset(@ptrCast([*]u8, &foo), 0, @sizeOf(StructFoo));
|
||||
@memset(@ptrCast([*]u8, &foo)[0..@sizeOf(StructFoo)], 0);
|
||||
foo.a += 1;
|
||||
foo.b = foo.a == 1;
|
||||
try testFoo(foo);
|
||||
@ -498,7 +498,7 @@ test "packed struct fields are ordered from LSB to MSB" {
|
||||
|
||||
var all: u64 = 0x7765443322221111;
|
||||
var bytes: [8]u8 align(@alignOf(Bitfields)) = undefined;
|
||||
@memcpy(&bytes, @ptrCast([*]u8, &all), 8);
|
||||
@memcpy(bytes[0..8], @ptrCast([*]u8, &all));
|
||||
var bitfields = @ptrCast(*Bitfields, &bytes).*;
|
||||
|
||||
try expect(bitfields.f1 == 0x1111);
|
||||
|
||||
@ -2,18 +2,35 @@ pub export fn entry() void {
|
||||
var buf: [5]u8 = .{ 1, 2, 3, 4, 5 };
|
||||
var slice: []u8 = &buf;
|
||||
const a: u32 = 1234;
|
||||
@memcpy(slice, @ptrCast([*]const u8, &a), 4);
|
||||
@memcpy(slice.ptr, @ptrCast([*]const u8, &a));
|
||||
}
|
||||
pub export fn entry1() void {
|
||||
var buf: [5]u8 = .{ 1, 2, 3, 4, 5 };
|
||||
var ptr: *u8 = &buf[0];
|
||||
@memcpy(ptr, 0, 4);
|
||||
@memcpy(ptr, 0);
|
||||
}
|
||||
pub export fn entry2() void {
|
||||
var buf: [5]u8 = .{ 1, 2, 3, 4, 5 };
|
||||
var ptr: *u8 = &buf[0];
|
||||
@memset(ptr, 0);
|
||||
}
|
||||
pub export fn non_matching_lengths() void {
|
||||
var buf1: [5]u8 = .{ 1, 2, 3, 4, 5 };
|
||||
var buf2: [6]u8 = .{ 1, 2, 3, 4, 5, 6 };
|
||||
@memcpy(&buf2, &buf1);
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :5:13: error: expected type '[*]u8', found '[]u8'
|
||||
// :10:13: error: expected type '[*]u8', found '*u8'
|
||||
// :10:13: note: a single pointer cannot cast into a many pointer
|
||||
// :5:5: error: unknown @memcpy length
|
||||
// :5:18: note: destination type [*]u8 provides no length
|
||||
// :5:24: note: source type [*]align(4) const u8 provides no length
|
||||
// :10:13: error: type 'u8' does not support indexing
|
||||
// :10:13: note: for loop operand must be an array, slice, tuple, or vector
|
||||
// :15:13: error: type '*u8' does not support indexing
|
||||
// :15:13: note: for loop operand must be an array, slice, tuple, or vector
|
||||
// :20:5: error: non-matching @memcpy lengths
|
||||
// :20:13: note: length 6 here
|
||||
// :20:20: note: length 5 here
|
||||
|
||||
@ -15,7 +15,7 @@ const E = enum(u32) {
|
||||
|
||||
pub fn main() !void {
|
||||
var e: E = undefined;
|
||||
@memset(@ptrCast([*]u8, &e), 0x55, @sizeOf(E));
|
||||
@memset(@ptrCast([*]u8, &e)[0..@sizeOf(E)], 0x55);
|
||||
var n = @tagName(e);
|
||||
_ = n;
|
||||
return error.TestFailed;
|
||||
|
||||
@ -15,7 +15,7 @@ const U = union(enum(u32)) {
|
||||
|
||||
pub fn main() !void {
|
||||
var u: U = undefined;
|
||||
@memset(@ptrCast([*]u8, &u), 0x55, @sizeOf(U));
|
||||
@memset(@ptrCast([*]u8, &u)[0..@sizeOf(U)], 0x55);
|
||||
var t: @typeInfo(U).Union.tag_type.? = u;
|
||||
var n = @tagName(t);
|
||||
_ = n;
|
||||
|
||||
17
test/cases/safety/memcpy_alias.zig
Normal file
17
test/cases/safety/memcpy_alias.zig
Normal file
@ -0,0 +1,17 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "@memcpy arguments alias")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
pub fn main() !void {
|
||||
var buffer = [2]u8{ 1, 2 } ** 5;
|
||||
var len: usize = 5;
|
||||
@memcpy(buffer[0..len], buffer[4 .. 4 + len]);
|
||||
}
|
||||
// run
|
||||
// backend=llvm
|
||||
// target=native
|
||||
17
test/cases/safety/memcpy_len_mismatch.zig
Normal file
17
test/cases/safety/memcpy_len_mismatch.zig
Normal file
@ -0,0 +1,17 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "@memcpy arguments have non-equal lengths")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
pub fn main() !void {
|
||||
var buffer = [2]u8{ 1, 2 } ** 5;
|
||||
var len: usize = 5;
|
||||
@memcpy(buffer[0..len], buffer[len .. len + 4]);
|
||||
}
|
||||
// run
|
||||
// backend=llvm
|
||||
// target=native
|
||||
18
test/cases/safety/memset_array_undefined_bytes.zig
Normal file
18
test/cases/safety/memset_array_undefined_bytes.zig
Normal file
@ -0,0 +1,18 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "integer overflow")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
pub fn main() !void {
|
||||
var buffer = [6]u8{ 1, 2, 3, 4, 5, 6 };
|
||||
@memset(&buffer, undefined);
|
||||
var x: u8 = buffer[1];
|
||||
x += buffer[2];
|
||||
}
|
||||
// run
|
||||
// backend=llvm
|
||||
// target=native
|
||||
18
test/cases/safety/memset_array_undefined_large.zig
Normal file
18
test/cases/safety/memset_array_undefined_large.zig
Normal file
@ -0,0 +1,18 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "integer overflow")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
pub fn main() !void {
|
||||
var buffer = [6]i32{ 1, 2, 3, 4, 5, 6 };
|
||||
@memset(&buffer, undefined);
|
||||
var x: i32 = buffer[1];
|
||||
x += buffer[2];
|
||||
}
|
||||
// run
|
||||
// backend=llvm
|
||||
// target=native
|
||||
19
test/cases/safety/memset_slice_undefined_bytes.zig
Normal file
19
test/cases/safety/memset_slice_undefined_bytes.zig
Normal file
@ -0,0 +1,19 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "integer overflow")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
pub fn main() !void {
|
||||
var buffer = [6]u8{ 1, 2, 3, 4, 5, 6 };
|
||||
var len = buffer.len;
|
||||
@memset(buffer[0..len], undefined);
|
||||
var x: u8 = buffer[1];
|
||||
x += buffer[2];
|
||||
}
|
||||
// run
|
||||
// backend=llvm
|
||||
// target=native
|
||||
19
test/cases/safety/memset_slice_undefined_large.zig
Normal file
19
test/cases/safety/memset_slice_undefined_large.zig
Normal file
@ -0,0 +1,19 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "integer overflow")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
pub fn main() !void {
|
||||
var buffer = [6]i32{ 1, 2, 3, 4, 5, 6 };
|
||||
var len = buffer.len;
|
||||
@memset(buffer[0..len], undefined);
|
||||
var x: i32 = buffer[1];
|
||||
x += buffer[2];
|
||||
}
|
||||
// run
|
||||
// backend=llvm
|
||||
// target=native
|
||||
@ -15,7 +15,7 @@ const E = enum(u32) {
|
||||
|
||||
pub fn main() !void {
|
||||
var e: E = undefined;
|
||||
@memset(@ptrCast([*]u8, &e), 0x55, @sizeOf(E));
|
||||
@memset(@ptrCast([*]u8, &e)[0..@sizeOf(E)], 0x55);
|
||||
switch (e) {
|
||||
.X, .Y => @breakpoint(),
|
||||
}
|
||||
|
||||
@ -15,7 +15,7 @@ const U = union(enum(u32)) {
|
||||
|
||||
pub fn main() !void {
|
||||
var u: U = undefined;
|
||||
@memset(@ptrCast([*]u8, &u), 0x55, @sizeOf(U));
|
||||
@memset(@ptrCast([*]u8, &u)[0..@sizeOf(U)], 0x55);
|
||||
switch (u) {
|
||||
.X, .Y => @breakpoint(),
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user