mirror of
https://github.com/ziglang/zig.git
synced 2026-02-13 12:59:04 +00:00
Merge pull request #12337 from Vexu/stage2-safety
Stage2: implement remaining runtime safety checks
This commit is contained in:
commit
b3d463c9e6
@ -660,6 +660,10 @@ pub const Inst = struct {
|
||||
/// Uses the `pl_op` field with payload `AtomicRmw`. Operand is `ptr`.
|
||||
atomic_rmw,
|
||||
|
||||
/// Returns true if enum tag value has a name.
|
||||
/// Uses the `un_op` field.
|
||||
is_named_enum_value,
|
||||
|
||||
/// Given an enum tag value, returns the tag name. The enum type may be non-exhaustive.
|
||||
/// Result type is always `[:0]const u8`.
|
||||
/// Uses the `un_op` field.
|
||||
@ -1057,6 +1061,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
|
||||
.is_non_err,
|
||||
.is_err_ptr,
|
||||
.is_non_err_ptr,
|
||||
.is_named_enum_value,
|
||||
=> return Type.bool,
|
||||
|
||||
.const_ty => return Type.type,
|
||||
|
||||
@ -291,6 +291,7 @@ pub fn categorizeOperand(
|
||||
.is_non_err_ptr,
|
||||
.ptrtoint,
|
||||
.bool_to_int,
|
||||
.is_named_enum_value,
|
||||
.tag_name,
|
||||
.error_name,
|
||||
.sqrt,
|
||||
@ -858,6 +859,7 @@ fn analyzeInst(
|
||||
.bool_to_int,
|
||||
.ret,
|
||||
.ret_load,
|
||||
.is_named_enum_value,
|
||||
.tag_name,
|
||||
.error_name,
|
||||
.sqrt,
|
||||
|
||||
212
src/Sema.zig
212
src/Sema.zig
@ -1578,8 +1578,7 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize)
|
||||
|
||||
// st.index = 0;
|
||||
const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "index", src, true);
|
||||
const zero = try sema.addConstant(Type.usize, Value.zero);
|
||||
try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, zero, src, .store);
|
||||
try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store);
|
||||
|
||||
// @errorReturnTrace() = &st;
|
||||
_ = try err_trace_block.addUnOp(.set_err_return_trace, st_ptr);
|
||||
@ -6949,8 +6948,12 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
}
|
||||
|
||||
try sema.requireRuntimeBlock(block, src, operand_src);
|
||||
// TODO insert safety check to make sure the value matches an enum value
|
||||
return block.addTyOp(.intcast, dest_ty, operand);
|
||||
const result = try block.addTyOp(.intcast, dest_ty, operand);
|
||||
if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum() and sema.mod.comp.bin_file.options.use_llvm) {
|
||||
const ok = try block.addUnOp(.is_named_enum_value, result);
|
||||
try sema.addSafetyCheck(block, ok, .invalid_enum_value);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Pointer in, pointer out.
|
||||
@ -9707,7 +9710,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
|
||||
}
|
||||
|
||||
var final_else_body: []const Air.Inst.Index = &.{};
|
||||
if (special.body.len != 0 or !is_first) {
|
||||
if (special.body.len != 0 or !is_first or case_block.wantSafety()) {
|
||||
var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope);
|
||||
defer wip_captures.deinit();
|
||||
|
||||
@ -9730,9 +9733,11 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
|
||||
} else {
|
||||
// We still need a terminator in this block, but we have proven
|
||||
// that it is unreachable.
|
||||
// TODO this should be a special safety panic other than unreachable, something
|
||||
// like "panic: switch operand had corrupt value not allowed by the type"
|
||||
try case_block.addUnreachable(src, true);
|
||||
if (case_block.wantSafety()) {
|
||||
_ = try sema.safetyPanic(&case_block, src, .corrupt_switch);
|
||||
} else {
|
||||
_ = try case_block.addNoOp(.unreach);
|
||||
}
|
||||
}
|
||||
|
||||
try wip_captures.finalize();
|
||||
@ -10241,34 +10246,57 @@ fn zirShl(
|
||||
} else rhs;
|
||||
|
||||
try sema.requireRuntimeBlock(block, src, runtime_src);
|
||||
if (block.wantSafety() and air_tag == .shl_exact) {
|
||||
const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(lhs_ty);
|
||||
const op_ov = try block.addInst(.{
|
||||
.tag = .shl_with_overflow,
|
||||
.data = .{ .ty_pl = .{
|
||||
.ty = try sema.addType(op_ov_tuple_ty),
|
||||
.payload = try sema.addExtra(Air.Bin{
|
||||
.lhs = lhs,
|
||||
.rhs = rhs,
|
||||
}),
|
||||
} },
|
||||
});
|
||||
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
|
||||
const any_ov_bit = if (lhs_ty.zigTypeTag() == .Vector)
|
||||
try block.addInst(.{
|
||||
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = ov_bit,
|
||||
.operation = .Or,
|
||||
} },
|
||||
})
|
||||
else
|
||||
ov_bit;
|
||||
const zero_ov = try sema.addConstant(Type.@"u1", Value.zero);
|
||||
const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
|
||||
if (block.wantSafety()) {
|
||||
const bit_count = scalar_ty.intInfo(target).bits;
|
||||
if (!std.math.isPowerOfTwo(bit_count)) {
|
||||
const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count);
|
||||
|
||||
try sema.addSafetyCheck(block, no_ov, .shl_overflow);
|
||||
return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty);
|
||||
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
|
||||
const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
|
||||
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt, try sema.addType(rhs_ty));
|
||||
break :ok try block.addInst(.{
|
||||
.tag = .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = lt,
|
||||
.operation = .And,
|
||||
} },
|
||||
});
|
||||
} else ok: {
|
||||
const bit_count_inst = try sema.addConstant(rhs_ty, bit_count_val);
|
||||
break :ok try block.addBinOp(.cmp_lt, rhs, bit_count_inst);
|
||||
};
|
||||
try sema.addSafetyCheck(block, ok, .shift_rhs_too_big);
|
||||
}
|
||||
|
||||
if (air_tag == .shl_exact) {
|
||||
const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(lhs_ty);
|
||||
const op_ov = try block.addInst(.{
|
||||
.tag = .shl_with_overflow,
|
||||
.data = .{ .ty_pl = .{
|
||||
.ty = try sema.addType(op_ov_tuple_ty),
|
||||
.payload = try sema.addExtra(Air.Bin{
|
||||
.lhs = lhs,
|
||||
.rhs = rhs,
|
||||
}),
|
||||
} },
|
||||
});
|
||||
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
|
||||
const any_ov_bit = if (lhs_ty.zigTypeTag() == .Vector)
|
||||
try block.addInst(.{
|
||||
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = ov_bit,
|
||||
.operation = .Or,
|
||||
} },
|
||||
})
|
||||
else
|
||||
ov_bit;
|
||||
const zero_ov = try sema.addConstant(Type.@"u1", Value.zero);
|
||||
const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
|
||||
|
||||
try sema.addSafetyCheck(block, no_ov, .shl_overflow);
|
||||
return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty);
|
||||
}
|
||||
}
|
||||
return block.addBinOp(air_tag, lhs, new_rhs);
|
||||
}
|
||||
@ -10347,20 +10375,43 @@ fn zirShr(
|
||||
|
||||
try sema.requireRuntimeBlock(block, src, runtime_src);
|
||||
const result = try block.addBinOp(air_tag, lhs, rhs);
|
||||
if (block.wantSafety() and air_tag == .shr_exact) {
|
||||
const back = try block.addBinOp(.shl, result, rhs);
|
||||
if (block.wantSafety()) {
|
||||
const bit_count = scalar_ty.intInfo(target).bits;
|
||||
if (!std.math.isPowerOfTwo(bit_count)) {
|
||||
const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count);
|
||||
|
||||
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
|
||||
const eql = try block.addCmpVector(lhs, back, .eq, try sema.addType(rhs_ty));
|
||||
break :ok try block.addInst(.{
|
||||
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = eql,
|
||||
.operation = .And,
|
||||
} },
|
||||
});
|
||||
} else try block.addBinOp(.cmp_eq, lhs, back);
|
||||
try sema.addSafetyCheck(block, ok, .shr_overflow);
|
||||
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
|
||||
const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
|
||||
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt, try sema.addType(rhs_ty));
|
||||
break :ok try block.addInst(.{
|
||||
.tag = .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = lt,
|
||||
.operation = .And,
|
||||
} },
|
||||
});
|
||||
} else ok: {
|
||||
const bit_count_inst = try sema.addConstant(rhs_ty, bit_count_val);
|
||||
break :ok try block.addBinOp(.cmp_lt, rhs, bit_count_inst);
|
||||
};
|
||||
try sema.addSafetyCheck(block, ok, .shift_rhs_too_big);
|
||||
}
|
||||
|
||||
if (air_tag == .shr_exact) {
|
||||
const back = try block.addBinOp(.shl, result, rhs);
|
||||
|
||||
const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
|
||||
const eql = try block.addCmpVector(lhs, back, .eq, try sema.addType(rhs_ty));
|
||||
break :ok try block.addInst(.{
|
||||
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = eql,
|
||||
.operation = .And,
|
||||
} },
|
||||
});
|
||||
} else try block.addBinOp(.cmp_eq, lhs, back);
|
||||
try sema.addSafetyCheck(block, ok, .shr_overflow);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -15961,6 +16012,11 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
|
||||
const field_name = enum_ty.enumFieldName(field_index);
|
||||
return sema.addStrLit(block, field_name);
|
||||
}
|
||||
try sema.requireRuntimeBlock(block, src, operand_src);
|
||||
if (block.wantSafety() and sema.mod.comp.bin_file.options.use_llvm) {
|
||||
const ok = try block.addUnOp(.is_named_enum_value, casted_operand);
|
||||
try sema.addSafetyCheck(block, ok, .invalid_enum_value);
|
||||
}
|
||||
// In case the value is runtime-known, we have an AIR instruction for this instead
|
||||
// of trying to lower it in Sema because an optimization pass may result in the operand
|
||||
// being comptime-known, which would let us elide the `tag_name` AIR instruction.
|
||||
@ -16942,7 +16998,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
}
|
||||
|
||||
try sema.requireRuntimeBlock(block, src, operand_src);
|
||||
if (block.wantSafety()) {
|
||||
if (block.wantSafety() and try sema.typeHasRuntimeBits(block, sema.src, type_res.elemType2())) {
|
||||
if (!type_res.isAllowzeroPtr()) {
|
||||
const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize);
|
||||
try sema.addSafetyCheck(block, is_non_zero, .cast_to_null);
|
||||
@ -17234,7 +17290,9 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
}
|
||||
|
||||
try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src);
|
||||
if (block.wantSafety() and dest_align > 1) {
|
||||
if (block.wantSafety() and dest_align > 1 and
|
||||
try sema.typeHasRuntimeBits(block, sema.src, dest_ty.elemType2()))
|
||||
{
|
||||
const val_payload = try sema.arena.create(Value.Payload.U64);
|
||||
val_payload.* = .{
|
||||
.base = .{ .tag = .int_u64 },
|
||||
@ -17253,7 +17311,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
|
||||
const ok = if (ptr_ty.isSlice()) ok: {
|
||||
const len = try sema.analyzeSliceLen(block, ptr_src, ptr);
|
||||
const len_zero = try block.addBinOp(.cmp_eq, len, try sema.addConstant(Type.usize, Value.zero));
|
||||
const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
|
||||
break :ok try block.addBinOp(.bit_or, len_zero, is_aligned);
|
||||
} else is_aligned;
|
||||
try sema.addSafetyCheck(block, ok, .incorrect_alignment);
|
||||
@ -20114,6 +20172,9 @@ pub const PanicId = enum {
|
||||
/// TODO make this call `std.builtin.panicInactiveUnionField`.
|
||||
inactive_union_field,
|
||||
integer_part_out_of_bounds,
|
||||
corrupt_switch,
|
||||
shift_rhs_too_big,
|
||||
invalid_enum_value,
|
||||
};
|
||||
|
||||
fn addSafetyCheck(
|
||||
@ -20408,6 +20469,9 @@ fn safetyPanic(
|
||||
.exact_division_remainder => "exact division produced remainder",
|
||||
.inactive_union_field => "access of inactive union field",
|
||||
.integer_part_out_of_bounds => "integer part of floating point value out of bounds",
|
||||
.corrupt_switch => "switch on corrupt value",
|
||||
.shift_rhs_too_big => "shift amount is greater than the type size",
|
||||
.invalid_enum_value => "invalid enum value",
|
||||
};
|
||||
|
||||
const msg_inst = msg_inst: {
|
||||
@ -22096,7 +22160,6 @@ fn coerceExtra(
|
||||
.ok => {},
|
||||
else => break :src_c_ptr,
|
||||
}
|
||||
// TODO add safety check for null pointer
|
||||
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
|
||||
}
|
||||
|
||||
@ -24569,6 +24632,24 @@ fn coerceCompatiblePtrs(
|
||||
return sema.addConstant(dest_ty, val);
|
||||
}
|
||||
try sema.requireRuntimeBlock(block, inst_src, null);
|
||||
const inst_ty = sema.typeOf(inst);
|
||||
const inst_allows_zero = (inst_ty.zigTypeTag() == .Pointer and inst_ty.ptrAllowsZero()) or true;
|
||||
if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero() and
|
||||
try sema.typeHasRuntimeBits(block, sema.src, dest_ty.elemType2()))
|
||||
{
|
||||
const actual_ptr = if (inst_ty.isSlice())
|
||||
try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty)
|
||||
else
|
||||
inst;
|
||||
const ptr_int = try block.addUnOp(.ptrtoint, actual_ptr);
|
||||
const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
|
||||
const ok = if (inst_ty.isSlice()) ok: {
|
||||
const len = try sema.analyzeSliceLen(block, inst_src, inst);
|
||||
const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
|
||||
break :ok try block.addBinOp(.bit_or, len_zero, is_non_zero);
|
||||
} else is_non_zero;
|
||||
try sema.addSafetyCheck(block, ok, .cast_to_null);
|
||||
}
|
||||
return sema.bitCast(block, dest_ty, inst, inst_src);
|
||||
}
|
||||
|
||||
@ -25708,6 +25789,27 @@ fn analyzeSlice(
|
||||
const new_ptr_val = opt_new_ptr_val orelse {
|
||||
const result = try block.addBitCast(return_ty, new_ptr);
|
||||
if (block.wantSafety()) {
|
||||
// requirement: slicing C ptr is non-null
|
||||
if (ptr_ptr_child_ty.isCPtr()) {
|
||||
const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true);
|
||||
try sema.addSafetyCheck(block, is_non_null, .unwrap_null);
|
||||
}
|
||||
|
||||
if (slice_ty.isSlice()) {
|
||||
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
|
||||
const actual_len = if (slice_ty.sentinel() == null)
|
||||
slice_len_inst
|
||||
else
|
||||
try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src);
|
||||
|
||||
const actual_end = if (slice_sentinel != null)
|
||||
try sema.analyzeArithmetic(block, .add, end, .one, src, end_src, end_src)
|
||||
else
|
||||
end;
|
||||
|
||||
try sema.panicIndexOutOfBounds(block, src, actual_end, actual_len, .cmp_lte);
|
||||
}
|
||||
|
||||
// requirement: result[new_len] == slice_sentinel
|
||||
try sema.panicSentinelMismatch(block, src, slice_sentinel, elem_ty, result, new_len);
|
||||
}
|
||||
@ -25769,7 +25871,11 @@ fn analyzeSlice(
|
||||
break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src);
|
||||
} else null;
|
||||
if (opt_len_inst) |len_inst| {
|
||||
try sema.panicIndexOutOfBounds(block, src, end, len_inst, .cmp_lte);
|
||||
const actual_end = if (slice_sentinel != null)
|
||||
try sema.analyzeArithmetic(block, .add, end, .one, src, end_src, end_src)
|
||||
else
|
||||
end;
|
||||
try sema.panicIndexOutOfBounds(block, src, actual_end, len_inst, .cmp_lte);
|
||||
}
|
||||
|
||||
// requirement: start <= end
|
||||
|
||||
@ -777,6 +777,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.float_to_int_optimized,
|
||||
=> return self.fail("TODO implement optimized float mode", .{}),
|
||||
|
||||
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
|
||||
|
||||
.wasm_memory_size => unreachable,
|
||||
.wasm_memory_grow => unreachable,
|
||||
// zig fmt: on
|
||||
|
||||
@ -768,6 +768,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.float_to_int_optimized,
|
||||
=> return self.fail("TODO implement optimized float mode", .{}),
|
||||
|
||||
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
|
||||
|
||||
.wasm_memory_size => unreachable,
|
||||
.wasm_memory_grow => unreachable,
|
||||
// zig fmt: on
|
||||
|
||||
@ -693,6 +693,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.float_to_int_optimized,
|
||||
=> return self.fail("TODO implement optimized float mode", .{}),
|
||||
|
||||
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
|
||||
|
||||
.wasm_memory_size => unreachable,
|
||||
.wasm_memory_grow => unreachable,
|
||||
// zig fmt: on
|
||||
|
||||
@ -705,6 +705,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.float_to_int_optimized,
|
||||
=> @panic("TODO implement optimized float mode"),
|
||||
|
||||
.is_named_enum_value => @panic("TODO implement is_named_enum_value"),
|
||||
|
||||
.wasm_memory_size => unreachable,
|
||||
.wasm_memory_grow => unreachable,
|
||||
// zig fmt: on
|
||||
|
||||
@ -1621,6 +1621,7 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
|
||||
.tag_name,
|
||||
.err_return_trace,
|
||||
.set_err_return_trace,
|
||||
.is_named_enum_value,
|
||||
=> |tag| return self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
|
||||
|
||||
.add_optimized,
|
||||
|
||||
@ -775,6 +775,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.float_to_int_optimized,
|
||||
=> return self.fail("TODO implement optimized float mode", .{}),
|
||||
|
||||
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
|
||||
|
||||
.wasm_memory_size => unreachable,
|
||||
.wasm_memory_grow => unreachable,
|
||||
// zig fmt: on
|
||||
|
||||
@ -1952,6 +1952,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
|
||||
.reduce_optimized,
|
||||
.float_to_int_optimized,
|
||||
=> return f.fail("TODO implement optimized float mode", .{}),
|
||||
|
||||
.is_named_enum_value => return f.fail("TODO: C backend: implement is_named_enum_value", .{}),
|
||||
// zig fmt: on
|
||||
};
|
||||
switch (result_value) {
|
||||
@ -3250,7 +3252,7 @@ fn airIsNull(
|
||||
|
||||
const ty = f.air.typeOf(un_op);
|
||||
var opt_buf: Type.Payload.ElemType = undefined;
|
||||
const payload_ty = if (ty.zigTypeTag() == .Pointer)
|
||||
const payload_ty = if (deref_suffix[0] != 0)
|
||||
ty.childType().optionalChild(&opt_buf)
|
||||
else
|
||||
ty.optionalChild(&opt_buf);
|
||||
|
||||
@ -201,6 +201,8 @@ pub const Object = struct {
|
||||
/// * it works for functions not all globals.
|
||||
/// Therefore, this table keeps track of the mapping.
|
||||
decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *const llvm.Value),
|
||||
/// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction.
|
||||
named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *const llvm.Value),
|
||||
/// Maps Zig types to LLVM types. The table memory itself is backed by the GPA of
|
||||
/// the compiler, but the Type/Value memory here is backed by `type_map_arena`.
|
||||
/// TODO we need to remove entries from this map in response to incremental compilation
|
||||
@ -377,6 +379,7 @@ pub const Object = struct {
|
||||
.target_data = target_data,
|
||||
.target = options.target,
|
||||
.decl_map = .{},
|
||||
.named_enum_map = .{},
|
||||
.type_map = .{},
|
||||
.type_map_arena = std.heap.ArenaAllocator.init(gpa),
|
||||
.di_type_map = .{},
|
||||
@ -396,6 +399,7 @@ pub const Object = struct {
|
||||
self.llvm_module.dispose();
|
||||
self.context.dispose();
|
||||
self.decl_map.deinit(gpa);
|
||||
self.named_enum_map.deinit(gpa);
|
||||
self.type_map.deinit(gpa);
|
||||
self.type_map_arena.deinit();
|
||||
self.extern_collisions.deinit(gpa);
|
||||
@ -4180,6 +4184,8 @@ pub const FuncGen = struct {
|
||||
.union_init => try self.airUnionInit(inst),
|
||||
.prefetch => try self.airPrefetch(inst),
|
||||
|
||||
.is_named_enum_value => try self.airIsNamedEnumValue(inst),
|
||||
|
||||
.reduce => try self.airReduce(inst, false),
|
||||
.reduce_optimized => try self.airReduce(inst, true),
|
||||
|
||||
@ -7882,6 +7888,87 @@ pub const FuncGen = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst)) return null;
|
||||
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const operand = try self.resolveInst(un_op);
|
||||
const enum_ty = self.air.typeOf(un_op);
|
||||
|
||||
const llvm_fn = try self.getIsNamedEnumValueFunction(enum_ty);
|
||||
const params = [_]*const llvm.Value{operand};
|
||||
return self.builder.buildCall(llvm_fn, ¶ms, params.len, .Fast, .Auto, "");
|
||||
}
|
||||
|
||||
fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*const llvm.Value {
|
||||
const enum_decl = enum_ty.getOwnerDecl();
|
||||
|
||||
// TODO: detect when the type changes and re-emit this function.
|
||||
const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_decl);
|
||||
if (gop.found_existing) return gop.value_ptr.*;
|
||||
errdefer assert(self.dg.object.named_enum_map.remove(enum_decl));
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(self.gpa);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = arena_allocator.allocator();
|
||||
|
||||
const mod = self.dg.module;
|
||||
const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{
|
||||
try mod.declPtr(enum_decl).getFullyQualifiedName(mod),
|
||||
});
|
||||
|
||||
var int_tag_type_buffer: Type.Payload.Bits = undefined;
|
||||
const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer);
|
||||
const param_types = [_]*const llvm.Type{try self.dg.lowerType(int_tag_ty)};
|
||||
|
||||
const llvm_ret_ty = try self.dg.lowerType(Type.bool);
|
||||
const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False);
|
||||
const fn_val = self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
|
||||
fn_val.setLinkage(.Internal);
|
||||
fn_val.setFunctionCallConv(.Fast);
|
||||
self.dg.addCommonFnAttributes(fn_val);
|
||||
gop.value_ptr.* = fn_val;
|
||||
|
||||
const prev_block = self.builder.getInsertBlock();
|
||||
const prev_debug_location = self.builder.getCurrentDebugLocation2();
|
||||
defer {
|
||||
self.builder.positionBuilderAtEnd(prev_block);
|
||||
if (self.di_scope != null) {
|
||||
self.builder.setCurrentDebugLocation2(prev_debug_location);
|
||||
}
|
||||
}
|
||||
|
||||
const entry_block = self.dg.context.appendBasicBlock(fn_val, "Entry");
|
||||
self.builder.positionBuilderAtEnd(entry_block);
|
||||
self.builder.clearCurrentDebugLocation();
|
||||
|
||||
const fields = enum_ty.enumFields();
|
||||
const named_block = self.dg.context.appendBasicBlock(fn_val, "Named");
|
||||
const unnamed_block = self.dg.context.appendBasicBlock(fn_val, "Unnamed");
|
||||
const tag_int_value = fn_val.getParam(0);
|
||||
const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, fields.count()));
|
||||
|
||||
for (fields.keys()) |_, field_index| {
|
||||
const this_tag_int_value = int: {
|
||||
var tag_val_payload: Value.Payload.U32 = .{
|
||||
.base = .{ .tag = .enum_field_index },
|
||||
.data = @intCast(u32, field_index),
|
||||
};
|
||||
break :int try self.dg.lowerValue(.{
|
||||
.ty = enum_ty,
|
||||
.val = Value.initPayload(&tag_val_payload.base),
|
||||
});
|
||||
};
|
||||
switch_instr.addCase(this_tag_int_value, named_block);
|
||||
}
|
||||
self.builder.positionBuilderAtEnd(named_block);
|
||||
_ = self.builder.buildRet(self.dg.context.intType(1).constInt(1, .False));
|
||||
|
||||
self.builder.positionBuilderAtEnd(unnamed_block);
|
||||
_ = self.builder.buildRet(self.dg.context.intType(1).constInt(0, .False));
|
||||
return fn_val;
|
||||
}
|
||||
|
||||
fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst)) return null;
|
||||
|
||||
|
||||
@ -170,6 +170,7 @@ const Writer = struct {
|
||||
.bool_to_int,
|
||||
.ret,
|
||||
.ret_load,
|
||||
.is_named_enum_value,
|
||||
.tag_name,
|
||||
.error_name,
|
||||
.sqrt,
|
||||
|
||||
@ -531,6 +531,7 @@ test "switch with null and T peer types and inferred result location type" {
|
||||
test "switch prongs with cases with identical payload types" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
const Union = union(enum) {
|
||||
A: usize,
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = message;
|
||||
_ = stack_trace;
|
||||
std.process.exit(0);
|
||||
if (std.mem.eql(u8, message, "invalid enum value")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
const Foo = enum {
|
||||
A,
|
||||
@ -18,6 +20,7 @@ fn bar(a: u2) Foo {
|
||||
return @intToEnum(Foo, a);
|
||||
}
|
||||
fn baz(_: Foo) void {}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -10,6 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noretur
|
||||
|
||||
const E = enum(u32) {
|
||||
X = 1,
|
||||
Y = 2,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
@ -21,5 +22,5 @@ pub fn main() !void {
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -10,6 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noretur
|
||||
|
||||
const U = union(enum(u32)) {
|
||||
X: u8,
|
||||
Y: i8,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
@ -22,5 +23,5 @@ pub fn main() !void {
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = message;
|
||||
_ = stack_trace;
|
||||
std.process.exit(0);
|
||||
if (std.mem.eql(u8, message, "exact division produced remainder")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
@ -15,5 +17,5 @@ fn widenSlice(slice: []align(1) const u8) []align(1) const i32 {
|
||||
return std.mem.bytesAsSlice(i32, slice);
|
||||
}
|
||||
// run
|
||||
// backend=stage1
|
||||
// target=native
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -2,7 +2,7 @@ const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "index out of bounds")) {
|
||||
if (std.mem.eql(u8, message, "attempt to index out of bound: index 1, len 0")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
@ -17,5 +17,5 @@ pub fn main() !void {
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -1,16 +1,20 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = message;
|
||||
_ = stack_trace;
|
||||
std.process.exit(0);
|
||||
if (std.mem.eql(u8, message, "cast causes pointer to be null")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
var c_ptr: [*c]u8 = 0;
|
||||
var zig_ptr: *u8 = c_ptr;
|
||||
_ = zig_ptr;
|
||||
return error.TestFailed;
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -2,14 +2,14 @@ const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "sentinel mismatch")) {
|
||||
if (std.mem.eql(u8, message, "sentinel mismatch: expected 0, found 4")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
var buf: [4]u8 = undefined;
|
||||
var buf: [4]u8 = .{ 1, 2, 3, 4 };
|
||||
const ptr: [*]u8 = &buf;
|
||||
const slice = ptr[0..3 :0];
|
||||
_ = slice;
|
||||
@ -17,5 +17,5 @@ pub fn main() !void {
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -17,5 +17,5 @@ pub fn main() !void {
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -17,5 +17,5 @@ pub fn main() !void {
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = message;
|
||||
_ = stack_trace;
|
||||
std.process.exit(0);
|
||||
if (std.mem.eql(u8, message, "integer overflow")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
@ -17,5 +19,5 @@ fn div(a: @Vector(4, i16), b: @Vector(4, i16)) @Vector(4, i16) {
|
||||
return @divTrunc(a, b);
|
||||
}
|
||||
// run
|
||||
// backend=stage1
|
||||
// target=native
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = message;
|
||||
_ = stack_trace;
|
||||
std.process.exit(0);
|
||||
if (std.mem.eql(u8, message, "integer overflow")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
@ -15,5 +17,5 @@ fn div(a: i16, b: i16) i16 {
|
||||
return @divTrunc(a, b);
|
||||
}
|
||||
// run
|
||||
// backend=stage1
|
||||
// target=native
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -2,19 +2,19 @@ const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "sentinel mismatch")) {
|
||||
if (std.mem.eql(u8, message, "sentinel mismatch: expected 1.20000004e+00, found 4.0e+00")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
var buf: [4]f32 = undefined;
|
||||
var buf: [4]f32 = .{ 1, 2, 3, 4 };
|
||||
const slice = buf[0..3 :1.2];
|
||||
_ = slice;
|
||||
return error.TestFailed;
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -2,19 +2,19 @@ const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "sentinel mismatch")) {
|
||||
if (std.mem.eql(u8, message, "sentinel mismatch: expected null, found i32@10")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
var buf: [4]?*i32 = undefined;
|
||||
var buf: [4]?*i32 = .{ @intToPtr(*i32, 4), @intToPtr(*i32, 8), @intToPtr(*i32, 12), @intToPtr(*i32, 16) };
|
||||
const slice = buf[0..3 :null];
|
||||
_ = slice;
|
||||
return error.TestFailed;
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -2,18 +2,18 @@ const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "sentinel mismatch")) {
|
||||
if (std.mem.eql(u8, message, "sentinel mismatch: expected 0, found 4")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
pub fn main() !void {
|
||||
var buf: [4]u8 = undefined;
|
||||
var buf: [4]u8 = .{ 1, 2, 3, 4 };
|
||||
const slice = buf[0..];
|
||||
const slice2 = slice[0..3 :0];
|
||||
_ = slice2;
|
||||
return error.TestFailed;
|
||||
}
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -0,0 +1,22 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "attempt to index out of bound: index 5, len 4")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
var buf = [4]u8{ 'a', 'b', 'c', 0 };
|
||||
const input: []u8 = &buf;
|
||||
var len: usize = 4;
|
||||
const slice = input[0..len :0];
|
||||
_ = slice;
|
||||
return error.TestFailed;
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=llvm
|
||||
// target=native
|
||||
@ -2,7 +2,7 @@ const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "index out of bounds")) {
|
||||
if (std.mem.eql(u8, message, "attempt to index out of bound: index 5, len 4")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
@ -17,5 +17,5 @@ pub fn main() !void {
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
20
test/cases/safety/slicing null C pointer - runtime len.zig
Normal file
20
test/cases/safety/slicing null C pointer - runtime len.zig
Normal file
@ -0,0 +1,20 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "attempt to use null value")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
var ptr: [*c]const u32 = null;
|
||||
var len: usize = 3;
|
||||
var slice = ptr[0..len];
|
||||
_ = slice;
|
||||
return error.TestFailed;
|
||||
}
|
||||
// run
|
||||
// backend=llvm
|
||||
// target=native
|
||||
@ -1,9 +1,11 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = message;
|
||||
_ = stack_trace;
|
||||
std.process.exit(0);
|
||||
if (std.mem.eql(u8, message, "attempt to use null value")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
@ -13,5 +15,5 @@ pub fn main() !void {
|
||||
return error.TestFailed;
|
||||
}
|
||||
// run
|
||||
// backend=stage1
|
||||
// target=native
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -2,7 +2,7 @@ const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "reached unreachable code")) {
|
||||
if (std.mem.eql(u8, message, "switch on corrupt value")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
@ -10,17 +10,18 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noretur
|
||||
|
||||
const E = enum(u32) {
|
||||
X = 1,
|
||||
Y = 2,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
var e: E = undefined;
|
||||
@memset(@ptrCast([*]u8, &e), 0x55, @sizeOf(E));
|
||||
switch (e) {
|
||||
.X => @breakpoint(),
|
||||
.X, .Y => @breakpoint(),
|
||||
}
|
||||
return error.TestFailed;
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
@ -2,7 +2,7 @@ const std = @import("std");
|
||||
|
||||
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
|
||||
_ = stack_trace;
|
||||
if (std.mem.eql(u8, message, "reached unreachable code")) {
|
||||
if (std.mem.eql(u8, message, "switch on corrupt value")) {
|
||||
std.process.exit(0);
|
||||
}
|
||||
std.process.exit(1);
|
||||
@ -10,17 +10,18 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noretur
|
||||
|
||||
const U = union(enum(u32)) {
|
||||
X: u8,
|
||||
Y: i8,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
var u: U = undefined;
|
||||
@memset(@ptrCast([*]u8, &u), 0x55, @sizeOf(U));
|
||||
switch (u) {
|
||||
.X => @breakpoint(),
|
||||
.X, .Y => @breakpoint(),
|
||||
}
|
||||
return error.TestFailed;
|
||||
}
|
||||
|
||||
// run
|
||||
// backend=stage1
|
||||
// backend=llvm
|
||||
// target=native
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user