Merge pull request #9960 from Snektron/bit-not

Some not and vector stuff
This commit is contained in:
Andrew Kelley 2021-10-17 21:59:10 -04:00 committed by GitHub
commit ad17108bdd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 285 additions and 49 deletions

View File

@ -53,7 +53,8 @@ cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DZIG_TARGET_TRIPLE="$HOST_TARGET" \
-DZIG_TARGET_MCPU="$HOST_MCPU" \
-DZIG_STATIC=ON
-DZIG_STATIC=ON \
-DZIG_OMIT_STAGE2=ON
unset CC
unset CXX

View File

@ -39,7 +39,8 @@ cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DZIG_TARGET_TRIPLE="$TARGET" \
-DZIG_TARGET_MCPU="$MCPU" \
-DZIG_STATIC=ON
-DZIG_STATIC=ON \
-DZIG_OMIT_STAGE2=ON
# Now cmake will use zig as the C/C++ compiler. We reset the environment variables
# so that installation and testing do not get affected by them.

View File

@ -825,7 +825,7 @@ pub const Mutable = struct {
///
/// Asserts there is enough memory to fit the result. The upper bound Limb count is
/// r is `calcTwosCompLimbCount(bit_count)`.
pub fn shiftLeftSat(r: *Mutable, a: Const, shift: usize, signedness: std.builtin.Signedness, bit_count: usize) void {
pub fn shiftLeftSat(r: *Mutable, a: Const, shift: usize, signedness: Signedness, bit_count: usize) void {
// Special case: When the argument is negative, but the result is supposed to be unsigned,
// return 0 in all cases.
if (!a.positive and signedness == .unsigned) {
@ -906,6 +906,17 @@ pub const Mutable = struct {
r.positive = a.positive;
}
/// r = ~a under 2s complement wrapping semantics.
/// r may alias with a.
///
/// Assets that r has enough limbs to store the result. The upper bound Limb count is
/// r is `calcTwosCompLimbCount(bit_count)`.
pub fn bitNotWrap(r: *Mutable, a: Const, signedness: Signedness, bit_count: usize) void {
r.copy(a.negate());
const negative_one = Const{ .limbs = &.{1}, .positive = false };
r.addWrap(r.toConst(), negative_one, signedness, bit_count);
}
/// r = a | b under 2s complement semantics.
/// r may alias with a or b.
///
@ -2455,7 +2466,7 @@ pub const Managed = struct {
}
/// r = a <<| shift with 2s-complement saturating semantics.
pub fn shiftLeftSat(r: *Managed, a: Managed, shift: usize, signedness: std.builtin.Signedness, bit_count: usize) !void {
pub fn shiftLeftSat(r: *Managed, a: Managed, shift: usize, signedness: Signedness, bit_count: usize) !void {
try r.ensureTwosCompCapacity(bit_count);
var m = r.toMutable();
m.shiftLeftSat(a.toConst(), shift, signedness, bit_count);
@ -2476,6 +2487,14 @@ pub const Managed = struct {
r.setMetadata(m.positive, m.len);
}
/// r = ~a under 2s-complement wrapping semantics.
pub fn bitNotWrap(r: *Managed, a: Managed, signedness: Signedness, bit_count: usize) !void {
try r.ensureTwosCompCapacity(bit_count);
var m = r.toMutable();
m.bitNotWrap(a.toConst(), signedness, bit_count);
r.setMetadata(m.positive, m.len);
}
/// r = a | b
///
/// a and b are zero-extended to the longer of a or b.

View File

@ -1866,6 +1866,42 @@ test "big.int sat shift-left signed multi negative" {
try testing.expect((try a.to(SignedDoubleLimb)) == @as(SignedDoubleLimb, x) <<| shift);
}
test "big.int bitNotWrap unsigned simple" {
var a = try Managed.initSet(testing.allocator, 123);
defer a.deinit();
try a.bitNotWrap(a, .unsigned, 10);
try testing.expect((try a.to(u10)) == ~@as(u10, 123));
}
test "big.int bitNotWrap unsigned multi" {
var a = try Managed.initSet(testing.allocator, 0);
defer a.deinit();
try a.bitNotWrap(a, .unsigned, @bitSizeOf(DoubleLimb));
try testing.expect((try a.to(DoubleLimb)) == maxInt(DoubleLimb));
}
test "big.int bitNotWrap signed simple" {
var a = try Managed.initSet(testing.allocator, -456);
defer a.deinit();
try a.bitNotWrap(a, .signed, 11);
try testing.expect((try a.to(i11)) == ~@as(i11, -456));
}
test "big.int bitNotWrap signed multi" {
var a = try Managed.initSet(testing.allocator, 0);
defer a.deinit();
try a.bitNotWrap(a, .signed, @bitSizeOf(SignedDoubleLimb));
try testing.expect((try a.to(SignedDoubleLimb)) == -1);
}
test "big.int bitwise and simple" {
var a = try Managed.initSet(testing.allocator, 0xffffffff11111111);
defer a.deinit();

View File

@ -6629,8 +6629,42 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const tracy = trace(@src());
defer tracy.end();
_ = inst;
return sema.fail(block, sema.src, "TODO implement zirBitNot", .{});
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src = src; // TODO put this on the operand, not the '~'
const operand = sema.resolveInst(inst_data.operand);
const operand_type = sema.typeOf(operand);
const scalar_type = operand_type.scalarType();
if (scalar_type.zigTypeTag() != .Int) {
return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{operand_type});
}
if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
const target = sema.mod.getTarget();
if (val.isUndef()) {
return sema.addConstUndef(scalar_type);
} else if (operand_type.zigTypeTag() == .Vector) {
const vec_len = operand_type.arrayLen();
var elem_val_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
for (elems) |*elem, i| {
const elem_val = val.elemValueBuffer(i, &elem_val_buf);
elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, target);
}
return sema.addConstant(
operand_type,
try Value.Tag.array.create(sema.arena, elems),
);
} else {
const result_val = try val.bitwiseNot(scalar_type, sema.arena, target);
return sema.addConstant(scalar_type, result_val);
}
}
try sema.requireRuntimeBlock(block, src);
return block.addTyOp(.not, operand_type, operand);
}
fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -8239,12 +8273,13 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const bool_type = Type.initTag(.bool);
const operand = try sema.coerce(block, bool_type, uncasted_operand, operand_src);
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
if (val.toBool()) {
return Air.Inst.Ref.bool_false;
} else {
return Air.Inst.Ref.bool_true;
}
if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
return if (val.isUndef())
sema.addConstUndef(bool_type)
else if (val.toBool())
Air.Inst.Ref.bool_false
else
Air.Inst.Ref.bool_true;
}
try sema.requireRuntimeBlock(block, src);
return block.addTyOp(.not, bool_type, operand);
@ -11640,7 +11675,11 @@ fn coerce(
else => {},
},
.Array => switch (inst_ty.zigTypeTag()) {
.Vector => return sema.coerceVectorToArray(block, dest_ty, dest_ty_src, inst, inst_src),
.Vector => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src),
else => {},
},
.Vector => switch (inst_ty.zigTypeTag()) {
.Array => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src),
else => {},
},
else => {},
@ -12224,46 +12263,49 @@ fn coerceEnumToUnion(
return sema.failWithOwnedErrorMsg(msg);
}
fn coerceVectorToArray(
// Coerces vectors/arrays which have the same in-memory layout. This can be used for
// both coercing from and to vectors.
fn coerceVectorInMemory(
sema: *Sema,
block: *Block,
array_ty: Type,
array_ty_src: LazySrcLoc,
vector: Air.Inst.Ref,
vector_src: LazySrcLoc,
dest_ty: Type,
dest_ty_src: LazySrcLoc,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const vector_ty = sema.typeOf(vector);
const array_len = array_ty.arrayLen();
const vector_len = vector_ty.arrayLen();
if (array_len != vector_len) {
const inst_ty = sema.typeOf(inst);
const inst_len = inst_ty.arrayLen();
const dest_len = dest_ty.arrayLen();
if (dest_len != inst_len) {
const msg = msg: {
const msg = try sema.errMsg(block, vector_src, "expected {}, found {}", .{
array_ty, vector_ty,
const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{
dest_ty, inst_ty,
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, array_ty_src, msg, "array has length {d}", .{array_len});
try sema.errNote(block, vector_src, msg, "vector has length {d}", .{vector_len});
try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len});
try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const target = sema.mod.getTarget();
const array_elem_ty = array_ty.childType();
const vector_elem_ty = vector_ty.childType();
const in_memory_result = coerceInMemoryAllowed(array_elem_ty, vector_elem_ty, false, target);
const dest_elem_ty = dest_ty.childType();
const inst_elem_ty = inst_ty.childType();
const in_memory_result = coerceInMemoryAllowed(dest_elem_ty, inst_elem_ty, false, target);
if (in_memory_result != .ok) {
// TODO recursive error notes for coerceInMemoryAllowed failure
return sema.fail(block, vector_src, "expected {}, found {}", .{ array_ty, vector_ty });
return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty });
}
if (try sema.resolveMaybeUndefVal(block, vector_src, vector)) |vector_val| {
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |inst_val| {
// These types share the same comptime value representation.
return sema.addConstant(array_ty, vector_val);
return sema.addConstant(dest_ty, inst_val);
}
try sema.requireRuntimeBlock(block, vector_src);
return block.addTyOp(.bitcast, array_ty, vector);
try sema.requireRuntimeBlock(block, inst_src);
return block.addTyOp(.bitcast, dest_ty, inst);
}
fn coerceCompatibleErrorSets(

View File

@ -1299,6 +1299,66 @@ pub const DeclGen = struct {
}
return llvm_union_ty.constNamedStruct(&fields, fields.len);
},
.Vector => switch (tv.val.tag()) {
.bytes => {
// Note, sentinel is not stored even if the type has a sentinel.
const bytes = tv.val.castTag(.bytes).?.data;
const vector_len = tv.ty.arrayLen();
assert(vector_len == bytes.len or vector_len + 1 == bytes.len);
const elem_ty = tv.ty.elemType();
const llvm_elems = try self.gpa.alloc(*const llvm.Value, vector_len);
defer self.gpa.free(llvm_elems);
for (llvm_elems) |*elem, i| {
var byte_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = bytes[i],
};
elem.* = try self.genTypedValue(.{
.ty = elem_ty,
.val = Value.initPayload(&byte_payload.base),
});
}
return llvm.constVector(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
);
},
.array => {
// Note, sentinel is not stored even if the type has a sentinel.
// The value includes the sentinel in those cases.
const elem_vals = tv.val.castTag(.array).?.data;
const vector_len = tv.ty.arrayLen();
assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len);
const elem_ty = tv.ty.elemType();
const llvm_elems = try self.gpa.alloc(*const llvm.Value, vector_len);
defer self.gpa.free(llvm_elems);
for (llvm_elems) |*elem, i| {
elem.* = try self.genTypedValue(.{ .ty = elem_ty, .val = elem_vals[i] });
}
return llvm.constVector(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
);
},
.repeated => {
// Note, sentinel is not stored even if the type has a sentinel.
const val = tv.val.castTag(.repeated).?.data;
const elem_ty = tv.ty.elemType();
const len = tv.ty.arrayLen();
const llvm_elems = try self.gpa.alloc(*const llvm.Value, len);
defer self.gpa.free(llvm_elems);
for (llvm_elems) |*elem| {
elem.* = try self.genTypedValue(.{ .ty = elem_ty, .val = val });
}
return llvm.constVector(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
);
},
else => unreachable,
},
.ComptimeInt => unreachable,
.ComptimeFloat => unreachable,
@ -1313,7 +1373,6 @@ pub const DeclGen = struct {
.Frame,
.AnyFrame,
.Vector,
=> return self.todo("implement const of type '{}'", .{tv.ty}),
}
}
@ -2992,6 +3051,43 @@ pub const FuncGen = struct {
}
}
return array_ptr;
} else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) {
const target = self.dg.module.getTarget();
const elem_ty = operand_ty.childType();
const llvm_vector_ty = try self.dg.llvmType(inst_ty);
if (!isByRef(operand_ty)) {
return self.dg.todo("implement bitcast non-ref array to vector", .{});
}
const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8;
if (bitcast_ok) {
const llvm_vector_ptr_ty = llvm_vector_ty.pointerType(0);
const casted_ptr = self.builder.buildBitCast(operand, llvm_vector_ptr_ty, "");
const vector = self.builder.buildLoad(casted_ptr, "");
// The array is aligned to the element's alignment, while the vector might have a completely
// different alignment. This means we need to enforce the alignment of this load.
vector.setAlignment(elem_ty.abiAlignment(target));
return vector;
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
// a simple bitcast will not work, and we fall back to extractelement.
const llvm_usize = try self.dg.llvmType(Type.usize);
const llvm_u32 = self.context.intType(32);
const zero = llvm_usize.constNull();
const vector_len = operand_ty.arrayLen();
var vector = llvm_vector_ty.getUndef();
var i: u64 = 0;
while (i < vector_len) : (i += 1) {
const index_usize = llvm_usize.constInt(i, .False);
const index_u32 = llvm_u32.constInt(i, .False);
const indexes: [2]*const llvm.Value = .{ zero, index_usize };
const elem_ptr = self.builder.buildInBoundsGEP(operand, &indexes, indexes.len, "");
const elem = self.builder.buildLoad(elem_ptr, "");
vector = self.builder.buildInsertElement(vector, elem, index_u32, "");
}
return vector;
}
}
return self.builder.buildBitCast(operand, llvm_dest_ty, "");

View File

@ -313,6 +313,12 @@ pub const VerifierFailureAction = enum(c_int) {
pub const constNeg = LLVMConstNeg;
extern fn LLVMConstNeg(ConstantVal: *const Value) *const Value;
pub const constVector = LLVMConstVector;
extern fn LLVMConstVector(
ScalarConstantVals: [*]*const Value,
Size: c_uint,
) *const Value;
pub const getEnumAttributeKindForName = LLVMGetEnumAttributeKindForName;
extern fn LLVMGetEnumAttributeKindForName(Name: [*]const u8, SLen: usize) c_uint;
@ -567,6 +573,15 @@ pub const Builder = opaque {
Name: [*:0]const u8,
) *const Value;
pub const buildInsertElement = LLVMBuildInsertElement;
extern fn LLVMBuildInsertElement(
*const Builder,
VecVal: *const Value,
EltVal: *const Value,
Index: *const Value,
Name: [*:0]const u8,
) *const Value;
pub const buildPtrToInt = LLVMBuildPtrToInt;
extern fn LLVMBuildPtrToInt(
*const Builder,

View File

@ -2081,6 +2081,32 @@ pub const Value = extern union {
};
}
/// operands must be integers; handles undefined.
pub fn bitwiseNot(val: Value, ty: Type, arena: *Allocator, target: Target) !Value {
if (val.isUndef()) return Value.initTag(.undef);
const info = ty.intInfo(target);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var val_space: Value.BigIntSpace = undefined;
const val_bigint = val.toBigInt(&val_space);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits);
const result_limbs = result_bigint.limbs[0..result_bigint.len];
if (result_bigint.positive) {
return Value.Tag.int_big_positive.create(arena, result_limbs);
} else {
return Value.Tag.int_big_negative.create(arena, result_limbs);
}
}
/// operands must be integers; handles undefined.
pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: *Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);

View File

@ -235,3 +235,17 @@ test "comptime_int param and return" {
fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int {
return a + b;
}
test "binary not" {
try expect(comptime x: {
break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101;
});
try expect(comptime x: {
break :x ~@as(u64, 2147483647) == 18446744071562067968;
});
try testBinaryNot(0b1010101010101010);
}
fn testBinaryNot(x: u16) !void {
try expect(~x == 0b0101010101010101);
}

View File

@ -219,20 +219,6 @@ const DivResult = struct {
remainder: u64,
};
test "binary not" {
try expect(comptime x: {
break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101;
});
try expect(comptime x: {
break :x ~@as(u64, 2147483647) == 18446744071562067968;
});
try testBinaryNot(0b1010101010101010);
}
fn testBinaryNot(x: u16) !void {
try expect(~x == 0b0101010101010101);
}
test "small int addition" {
var x: u2 = 0;
try expect(x == 0);