Merge pull request #10532 from Hejsil/stage2-bit-shifting-passing

Stage2 bit_shifting.zig passing
This commit is contained in:
Andrew Kelley 2022-01-08 01:00:59 -05:00 committed by GitHub
commit c4ab8d9e12
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 171 additions and 42 deletions

View File

@ -831,6 +831,10 @@ pub const Struct = struct {
have_field_types,
layout_wip,
have_layout,
fully_resolved_wip,
// The types and all its fields have had their layout resolved. Even through pointer,
// which `have_layout` does not ensure.
fully_resolved,
},
/// If true, definitely nonzero size at runtime. If false, resolving the fields
/// is necessary to determine whether it has bits at runtime.
@ -889,6 +893,22 @@ pub const Struct = struct {
.have_field_types,
.layout_wip,
.have_layout,
.fully_resolved_wip,
.fully_resolved,
=> true,
};
}
pub fn haveLayout(s: Struct) bool {
return switch (s.status) {
.none,
.field_types_wip,
.have_field_types,
.layout_wip,
=> false,
.have_layout,
.fully_resolved_wip,
.fully_resolved,
=> true,
};
}
@ -1003,6 +1023,10 @@ pub const Union = struct {
have_field_types,
layout_wip,
have_layout,
fully_resolved_wip,
// The types and all its fields have had their layout resolved. Even through pointer,
// which `have_layout` does not ensure.
fully_resolved,
},
pub const Field = struct {
@ -1033,6 +1057,8 @@ pub const Union = struct {
.have_field_types,
.layout_wip,
.have_layout,
.fully_resolved_wip,
.fully_resolved,
=> true,
};
}
@ -1102,8 +1128,22 @@ pub const Union = struct {
tag_size: u64,
};
pub fn haveLayout(u: Union) bool {
return switch (u.status) {
.none,
.field_types_wip,
.have_field_types,
.layout_wip,
=> false,
.have_layout,
.fully_resolved_wip,
.fully_resolved,
=> true,
};
}
pub fn getLayout(u: Union, target: Target, have_tag: bool) Layout {
assert(u.status == .have_layout);
assert(u.haveLayout());
var most_aligned_field: u32 = undefined;
var most_aligned_field_size: u64 = undefined;
var biggest_field: u32 = undefined;
@ -4397,6 +4437,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem
const arg = try sema.addConstant(param_type, opv);
sema.inst_map.putAssumeCapacityNoClobber(inst, arg);
total_param_index += 1;
runtime_param_index += 1;
continue;
}
const ty_ref = try sema.addType(param_type);

View File

@ -4503,14 +4503,14 @@ fn analyzeCall(
const arg_src = call_src; // TODO: better source location
if (i < fn_params_len) {
const param_ty = func_ty.fnParamType(i);
try sema.resolveTypeForCodegen(block, arg_src, param_ty);
try sema.resolveTypeFully(block, arg_src, param_ty);
args[i] = try sema.coerce(block, param_ty, uncasted_arg, arg_src);
} else {
args[i] = uncasted_arg;
}
}
try sema.resolveTypeForCodegen(block, call_src, func_ty_info.return_type);
try sema.resolveTypeFully(block, call_src, func_ty_info.return_type);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len +
args.len);
@ -4580,7 +4580,7 @@ fn finishGenericCall(
const param_ty = new_fn_ty.fnParamType(runtime_i);
const arg_src = call_src; // TODO: better source location
const uncasted_arg = uncasted_args[total_i];
try sema.resolveTypeForCodegen(block, arg_src, param_ty);
try sema.resolveTypeFully(block, arg_src, param_ty);
const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src);
runtime_args[runtime_i] = casted_arg;
runtime_i += 1;
@ -4588,7 +4588,7 @@ fn finishGenericCall(
total_i += 1;
}
try sema.resolveTypeForCodegen(block, call_src, new_fn_ty.fnReturnType());
try sema.resolveTypeFully(block, call_src, new_fn_ty.fnReturnType());
}
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len +
runtime_args_len);
@ -7318,8 +7318,8 @@ fn zirShr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const lhs = sema.resolveInst(extra.lhs);
const rhs = sema.resolveInst(extra.rhs);
if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| {
if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| {
if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| {
const lhs_ty = sema.typeOf(lhs);
if (lhs_val.isUndef() or rhs_val.isUndef()) {
return sema.addConstUndef(lhs_ty);
@ -7331,6 +7331,12 @@ fn zirShr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const val = try lhs_val.shr(rhs_val, sema.arena);
return sema.addConstant(lhs_ty, val);
}
// Even if lhs is not comptime known, we can still deduce certain things based
// on rhs.
// If rhs is 0, return lhs without doing any calculations.
else if (rhs_val.compareWithZero(.eq)) {
return lhs;
}
}
try sema.requireRuntimeBlock(block, src);
@ -15222,7 +15228,7 @@ fn resolveStructLayout(
.field_types_wip, .layout_wip => {
return sema.fail(block, src, "struct {} depends on itself", .{ty});
},
.have_layout => return,
.have_layout, .fully_resolved_wip, .fully_resolved => return,
}
struct_obj.status = .layout_wip;
for (struct_obj.fields.values()) |field| {
@ -15244,7 +15250,7 @@ fn resolveUnionLayout(
.field_types_wip, .layout_wip => {
return sema.fail(block, src, "union {} depends on itself", .{ty});
},
.have_layout => return,
.have_layout, .fully_resolved_wip, .fully_resolved => return,
}
union_obj.status = .layout_wip;
for (union_obj.fields.values()) |field| {
@ -15253,7 +15259,7 @@ fn resolveUnionLayout(
union_obj.status = .have_layout;
}
fn resolveTypeForCodegen(
fn resolveTypeFully(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
@ -15262,20 +15268,67 @@ fn resolveTypeForCodegen(
switch (ty.zigTypeTag()) {
.Pointer => {
const child_ty = try sema.resolveTypeFields(block, src, ty.childType());
return resolveTypeForCodegen(sema, block, src, child_ty);
return resolveTypeFully(sema, block, src, child_ty);
},
.Struct => return resolveStructLayout(sema, block, src, ty),
.Union => return resolveUnionLayout(sema, block, src, ty),
.Array => return resolveTypeForCodegen(sema, block, src, ty.childType()),
.Struct => return resolveStructFully(sema, block, src, ty),
.Union => return resolveUnionFully(sema, block, src, ty),
.Array => return resolveTypeFully(sema, block, src, ty.childType()),
.Optional => {
var buf: Type.Payload.ElemType = undefined;
return resolveTypeForCodegen(sema, block, src, ty.optionalChild(&buf));
return resolveTypeFully(sema, block, src, ty.optionalChild(&buf));
},
.ErrorUnion => return resolveTypeForCodegen(sema, block, src, ty.errorUnionPayload()),
.ErrorUnion => return resolveTypeFully(sema, block, src, ty.errorUnionPayload()),
else => {},
}
}
fn resolveStructFully(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ty: Type,
) CompileError!void {
try resolveStructLayout(sema, block, src, ty);
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
const struct_obj = resolved_ty.castTag(.@"struct").?.data;
switch (struct_obj.status) {
.none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
.fully_resolved_wip, .fully_resolved => return,
}
// After we have resolve struct layout we have to go over the fields again to
// make sure pointer fields get their child types resolved as well
struct_obj.status = .fully_resolved_wip;
for (struct_obj.fields.values()) |field| {
try sema.resolveTypeFully(block, src, field.ty);
}
struct_obj.status = .fully_resolved;
}
fn resolveUnionFully(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ty: Type,
) CompileError!void {
try resolveUnionLayout(sema, block, src, ty);
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
const union_obj = resolved_ty.cast(Type.Payload.Union).?.data;
switch (union_obj.status) {
.none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
.fully_resolved_wip, .fully_resolved => return,
}
// Same goes for unions (see comment about structs)
union_obj.status = .fully_resolved_wip;
for (union_obj.fields.values()) |field| {
try sema.resolveTypeFully(block, src, field.ty);
}
union_obj.status = .fully_resolved;
}
fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!Type {
switch (ty.tag()) {
.@"struct" => {
@ -15285,7 +15338,12 @@ fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Comp
.field_types_wip => {
return sema.fail(block, src, "struct {} depends on itself", .{ty});
},
.have_field_types, .have_layout, .layout_wip => return ty,
.have_field_types,
.have_layout,
.layout_wip,
.fully_resolved_wip,
.fully_resolved,
=> return ty,
}
struct_obj.status = .field_types_wip;
@ -15318,7 +15376,12 @@ fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Comp
.field_types_wip => {
return sema.fail(block, src, "union {} depends on itself", .{ty});
},
.have_field_types, .have_layout, .layout_wip => return ty,
.have_field_types,
.have_layout,
.layout_wip,
.fully_resolved_wip,
.fully_resolved,
=> return ty,
}
union_obj.status = .field_types_wip;

View File

@ -2647,7 +2647,7 @@ pub const FuncGen = struct {
switch (struct_ty.zigTypeTag()) {
.Struct => {
var ptr_ty_buf: Type.Payload.Pointer = undefined;
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf);
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
const field_ptr = self.builder.buildStructGEP(struct_llvm_val, llvm_field_index, "");
const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base);
return self.load(field_ptr, field_ptr_ty);
@ -4354,8 +4354,18 @@ pub const FuncGen = struct {
.Struct => {
const target = self.dg.module.getTarget();
var ty_buf: Type.Payload.Pointer = undefined;
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ty_buf);
return self.builder.buildStructGEP(struct_ptr, llvm_field_index, "");
if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| {
return self.builder.buildStructGEP(struct_ptr, llvm_field_index, "");
} else {
// If we found no index then this means this is a zero sized field at the
// end of the struct. Treat our struct pointer as an array of two and get
// the index to the element at index `1` to get a pointer to the end of
// the struct.
const llvm_usize = try self.dg.llvmType(Type.usize);
const llvm_index = llvm_usize.constInt(1, .False);
const indices: [1]*const llvm.Value = .{llvm_index};
return self.builder.buildInBoundsGEP(struct_ptr, &indices, indices.len, "");
}
},
.Union => return self.unionFieldPtr(inst, struct_ptr, struct_ty, field_index),
else => unreachable,
@ -4750,32 +4760,37 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) llvm.Ca
};
}
/// Take into account 0 bit fields.
/// Take into account 0 bit fields. Returns null if an llvm field could not be found. This only
/// happends if you want the field index of a zero sized field at the end of the struct.
fn llvmFieldIndex(
ty: Type,
field_index: u32,
target: std.Target,
ptr_pl_buf: *Type.Payload.Pointer,
) c_uint {
) ?c_uint {
const struct_obj = ty.castTag(.@"struct").?.data;
if (struct_obj.layout != .Packed) {
var llvm_field_index: c_uint = 0;
for (struct_obj.fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits()) continue;
if (i == field_index) {
ptr_pl_buf.* = .{
.data = .{
.pointee_type = field.ty,
.@"align" = field.normalAlignment(target),
.@"addrspace" = .generic,
},
};
return llvm_field_index;
if (!field.ty.hasCodeGenBits())
continue;
if (field_index > i) {
llvm_field_index += 1;
continue;
}
llvm_field_index += 1;
ptr_pl_buf.* = .{
.data = .{
.pointee_type = field.ty,
.@"align" = field.normalAlignment(target),
.@"addrspace" = .generic,
},
};
return llvm_field_index;
} else {
// We did not find an llvm field that corrispons to this zig field.
return null;
}
unreachable;
}
// Our job here is to return the host integer field index.
@ -4784,7 +4799,8 @@ fn llvmFieldIndex(
var running_bits: u16 = 0;
var llvm_field_index: c_uint = 0;
for (struct_obj.fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasCodeGenBits())
continue;
const field_align = field.packedAlignment();
if (field_align == 0) {

View File

@ -1916,7 +1916,7 @@ pub const Type = extern union {
const fields = self.structFields();
const is_packed = if (self.castTag(.@"struct")) |payload| p: {
const struct_obj = payload.data;
assert(struct_obj.status == .have_layout);
assert(struct_obj.haveLayout());
break :p struct_obj.layout == .Packed;
} else false;
@ -2220,7 +2220,7 @@ pub const Type = extern union {
if (field_count == 0) return 0;
const struct_obj = ty.castTag(.@"struct").?.data;
assert(struct_obj.status == .have_layout);
assert(struct_obj.haveLayout());
var total: u64 = 0;
for (struct_obj.fields.values()) |field| {
@ -3771,7 +3771,7 @@ pub const Type = extern union {
switch (ty.tag()) {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
assert(struct_obj.status == .have_layout);
assert(struct_obj.haveLayout());
const is_packed = struct_obj.layout == .Packed;
if (!is_packed) {
var offset: u64 = 0;

View File

@ -2635,9 +2635,17 @@ pub const Value = extern union {
var lhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space);
const shift = @intCast(usize, rhs.toUnsignedInt());
const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8));
if (result_limbs == 0) {
// The shift is enough to remove all the bits from the number, which means the
// result is zero.
return Value.zero;
}
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len - (shift / (@sizeOf(std.math.big.Limb) * 8)),
result_limbs,
);
var result_bigint = BigIntMutable{
.limbs = limbs,

View File

@ -73,6 +73,7 @@ test {
_ = @import("behavior/array_llvm.zig");
_ = @import("behavior/atomics.zig");
_ = @import("behavior/basic_llvm.zig");
_ = @import("behavior/bit_shifting.zig");
_ = @import("behavior/bugs/394.zig");
_ = @import("behavior/bugs/656.zig");
_ = @import("behavior/bugs/1277.zig");
@ -120,7 +121,6 @@ test {
_ = @import("behavior/async_fn.zig");
}
_ = @import("behavior/await_struct.zig");
_ = @import("behavior/bit_shifting.zig");
_ = @import("behavior/bitcast_stage1.zig");
_ = @import("behavior/bitreverse.zig");
_ = @import("behavior/bugs/421.zig");

View File

@ -72,6 +72,7 @@ test "sharded table" {
try testShardedTable(u0, 0, 1);
}
fn testShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, comptime node_count: comptime_int) !void {
const Table = ShardedTable(Key, mask_bit_count, void);