mirror of
https://github.com/ziglang/zig.git
synced 2026-02-13 04:48:20 +00:00
fixes in semantic analysis needed to support this feature
This commit is contained in:
parent
2182d28cb0
commit
0707be8de8
@ -560,7 +560,7 @@ pub fn span(ptr: var) Span(@TypeOf(ptr)) {
|
||||
|
||||
test "span" {
|
||||
var array: [5]u16 = [_]u16{ 1, 2, 3, 4, 5 };
|
||||
const ptr = array[0..2 :3].ptr;
|
||||
const ptr = @as([*:3]u16, array[0..2 :3]);
|
||||
testing.expect(eql(u16, span(ptr), &[_]u16{ 1, 2 }));
|
||||
testing.expect(eql(u16, span(&array), &[_]u16{ 1, 2, 3, 4, 5 }));
|
||||
}
|
||||
@ -602,7 +602,7 @@ test "len" {
|
||||
testing.expect(len(&array) == 5);
|
||||
testing.expect(len(array[0..3]) == 3);
|
||||
array[2] = 0;
|
||||
const ptr = array[0..2 :0].ptr;
|
||||
const ptr = @as([*:0]u16, array[0..2 :0]);
|
||||
testing.expect(len(ptr) == 2);
|
||||
}
|
||||
{
|
||||
|
||||
@ -128,7 +128,7 @@ export fn stage2_translate_c(
|
||||
args_end: [*]?[*]const u8,
|
||||
resources_path: [*:0]const u8,
|
||||
) Error {
|
||||
var errors = @as([*]translate_c.ClangErrMsg, undefined)[0..0];
|
||||
var errors: []translate_c.ClangErrMsg = &[0]translate_c.ClangErrMsg{};
|
||||
out_ast.* = translate_c.translate(std.heap.c_allocator, args_begin, args_end, &errors, resources_path) catch |err| switch (err) {
|
||||
error.SemanticAnalyzeFail => {
|
||||
out_errors_ptr.* = errors.ptr;
|
||||
|
||||
@ -231,6 +231,7 @@ enum ConstPtrSpecial {
|
||||
// The pointer is a reference to a single object.
|
||||
ConstPtrSpecialRef,
|
||||
// The pointer points to an element in an underlying array.
|
||||
// Not to be confused with ConstPtrSpecialSubArray.
|
||||
ConstPtrSpecialBaseArray,
|
||||
// The pointer points to a field in an underlying struct.
|
||||
ConstPtrSpecialBaseStruct,
|
||||
@ -257,6 +258,10 @@ enum ConstPtrSpecial {
|
||||
// types to be the same, so all optionals of pointer types use x_ptr
|
||||
// instead of x_optional.
|
||||
ConstPtrSpecialNull,
|
||||
// The pointer points to a sub-array (not an individual element).
|
||||
// Not to be confused with ConstPtrSpecialBaseArray. However, it uses the same
|
||||
// union payload struct (base_array).
|
||||
ConstPtrSpecialSubArray,
|
||||
};
|
||||
|
||||
enum ConstPtrMut {
|
||||
|
||||
@ -5280,6 +5280,11 @@ static uint32_t hash_const_val_ptr(ZigValue *const_val) {
|
||||
hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val);
|
||||
hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index);
|
||||
return hash_val;
|
||||
case ConstPtrSpecialSubArray:
|
||||
hash_val += (uint32_t)2643358777;
|
||||
hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val);
|
||||
hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index);
|
||||
return hash_val;
|
||||
case ConstPtrSpecialBaseStruct:
|
||||
hash_val += (uint32_t)3518317043;
|
||||
hash_val += hash_ptr(const_val->data.x_ptr.data.base_struct.struct_val);
|
||||
@ -6746,6 +6751,7 @@ bool const_values_equal_ptr(ZigValue *a, ZigValue *b) {
|
||||
return false;
|
||||
return true;
|
||||
case ConstPtrSpecialBaseArray:
|
||||
case ConstPtrSpecialSubArray:
|
||||
if (a->data.x_ptr.data.base_array.array_val != b->data.x_ptr.data.base_array.array_val) {
|
||||
return false;
|
||||
}
|
||||
@ -7003,6 +7009,7 @@ static void render_const_val_ptr(CodeGen *g, Buf *buf, ZigValue *const_val, ZigT
|
||||
render_const_value(g, buf, const_ptr_pointee(nullptr, g, const_val, nullptr));
|
||||
return;
|
||||
case ConstPtrSpecialBaseArray:
|
||||
case ConstPtrSpecialSubArray:
|
||||
buf_appendf(buf, "*");
|
||||
// TODO we need a source node for const_ptr_pointee because it can generate compile errors
|
||||
render_const_value(g, buf, const_ptr_pointee(nullptr, g, const_val, nullptr));
|
||||
|
||||
@ -5418,8 +5418,6 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutableGen *executable, IrI
|
||||
ZigType *array_type = array_ptr_type->data.pointer.child_type;
|
||||
LLVMValueRef array_ptr = get_handle_value(g, array_ptr_ptr, array_type, array_ptr_type);
|
||||
|
||||
LLVMValueRef tmp_struct_ptr = ir_llvm_value(g, instruction->result_loc);
|
||||
|
||||
bool want_runtime_safety = instruction->safety_check_on && ir_want_runtime_safety(g, &instruction->base);
|
||||
|
||||
ZigType *result_type = instruction->base.value->type;
|
||||
@ -5472,6 +5470,8 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutableGen *executable, IrI
|
||||
}
|
||||
}
|
||||
if (!type_has_bits(g, array_type)) {
|
||||
LLVMValueRef tmp_struct_ptr = ir_llvm_value(g, instruction->result_loc);
|
||||
|
||||
LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, slice_len_index, "");
|
||||
|
||||
// TODO if runtime safety is on, store 0xaaaaaaa in ptr field
|
||||
@ -5486,20 +5486,20 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutableGen *executable, IrI
|
||||
};
|
||||
LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, array_ptr, indices, 2, "");
|
||||
if (result_type->id == ZigTypeIdPointer) {
|
||||
ir_assert(instruction->result_loc == nullptr, &instruction->base);
|
||||
LLVMTypeRef result_ptr_type = get_llvm_type(g, result_type);
|
||||
LLVMValueRef bitcasted = LLVMBuildBitCast(g->builder, slice_start_ptr, result_ptr_type, "");
|
||||
gen_store_untyped(g, bitcasted, tmp_struct_ptr, 0, false);
|
||||
return slice_start_ptr;
|
||||
return LLVMBuildBitCast(g->builder, slice_start_ptr, result_ptr_type, "");
|
||||
} else {
|
||||
LLVMValueRef tmp_struct_ptr = ir_llvm_value(g, instruction->result_loc);
|
||||
LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, slice_ptr_index, "");
|
||||
gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false);
|
||||
|
||||
LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, slice_len_index, "");
|
||||
LLVMValueRef len_value = LLVMBuildNSWSub(g->builder, end_val, start_val, "");
|
||||
gen_store_untyped(g, len_value, len_field_ptr, 0, false);
|
||||
}
|
||||
|
||||
return tmp_struct_ptr;
|
||||
return tmp_struct_ptr;
|
||||
}
|
||||
} else if (array_type->id == ZigTypeIdPointer) {
|
||||
assert(array_type->data.pointer.ptr_len != PtrLenSingle);
|
||||
LLVMValueRef start_val = ir_llvm_value(g, instruction->start);
|
||||
@ -5515,12 +5515,12 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutableGen *executable, IrI
|
||||
|
||||
LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, array_ptr, &start_val, 1, "");
|
||||
if (result_type->id == ZigTypeIdPointer) {
|
||||
ir_assert(instruction->result_loc == nullptr, &instruction->base);
|
||||
LLVMTypeRef result_ptr_type = get_llvm_type(g, result_type);
|
||||
LLVMValueRef bitcasted = LLVMBuildBitCast(g->builder, slice_start_ptr, result_ptr_type, "");
|
||||
gen_store_untyped(g, bitcasted, tmp_struct_ptr, 0, false);
|
||||
return bitcasted;
|
||||
return LLVMBuildBitCast(g->builder, slice_start_ptr, result_ptr_type, "");
|
||||
}
|
||||
|
||||
LLVMValueRef tmp_struct_ptr = ir_llvm_value(g, instruction->result_loc);
|
||||
if (type_has_bits(g, array_type)) {
|
||||
size_t gen_ptr_index = result_type->data.structure.fields[slice_ptr_index]->gen_index;
|
||||
LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, gen_ptr_index, "");
|
||||
@ -5537,9 +5537,6 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutableGen *executable, IrI
|
||||
assert(array_type->data.structure.special == StructSpecialSlice);
|
||||
assert(LLVMGetTypeKind(LLVMTypeOf(array_ptr)) == LLVMPointerTypeKind);
|
||||
assert(LLVMGetTypeKind(LLVMGetElementType(LLVMTypeOf(array_ptr))) == LLVMStructTypeKind);
|
||||
if (result_type->id != ZigTypeIdPointer) {
|
||||
assert(LLVMGetTypeKind(LLVMGetElementType(LLVMTypeOf(tmp_struct_ptr))) == LLVMStructTypeKind);
|
||||
}
|
||||
|
||||
size_t ptr_index = array_type->data.structure.fields[slice_ptr_index]->gen_index;
|
||||
assert(ptr_index != SIZE_MAX);
|
||||
@ -5578,11 +5575,11 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutableGen *executable, IrI
|
||||
|
||||
LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, src_ptr, &start_val, 1, "");
|
||||
if (result_type->id == ZigTypeIdPointer) {
|
||||
ir_assert(instruction->result_loc == nullptr, &instruction->base);
|
||||
LLVMTypeRef result_ptr_type = get_llvm_type(g, result_type);
|
||||
LLVMValueRef bitcasted = LLVMBuildBitCast(g->builder, slice_start_ptr, result_ptr_type, "");
|
||||
gen_store_untyped(g, bitcasted, tmp_struct_ptr, 0, false);
|
||||
return bitcasted;
|
||||
return LLVMBuildBitCast(g->builder, slice_start_ptr, result_ptr_type, "");
|
||||
} else {
|
||||
LLVMValueRef tmp_struct_ptr = ir_llvm_value(g, instruction->result_loc);
|
||||
LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, (unsigned)ptr_index, "");
|
||||
gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false);
|
||||
|
||||
@ -6676,7 +6673,6 @@ static LLVMValueRef gen_const_ptr_array_recursive(CodeGen *g, ZigValue *array_co
|
||||
};
|
||||
return LLVMConstInBoundsGEP(base_ptr, indices, 2);
|
||||
} else {
|
||||
assert(parent->id == ConstParentIdScalar);
|
||||
return base_ptr;
|
||||
}
|
||||
}
|
||||
@ -6904,6 +6900,7 @@ static LLVMValueRef gen_const_val_ptr(CodeGen *g, ZigValue *const_val, const cha
|
||||
return const_val->llvm_value;
|
||||
}
|
||||
case ConstPtrSpecialBaseArray:
|
||||
case ConstPtrSpecialSubArray:
|
||||
{
|
||||
ZigValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val;
|
||||
assert(array_const_val->type->id == ZigTypeIdArray);
|
||||
|
||||
155
src/ir.cpp
155
src/ir.cpp
@ -784,14 +784,32 @@ static ZigValue *const_ptr_pointee_unchecked_no_isf(CodeGen *g, ZigValue *const_
|
||||
break;
|
||||
case ConstPtrSpecialBaseArray: {
|
||||
ZigValue *array_val = const_val->data.x_ptr.data.base_array.array_val;
|
||||
if (const_val->data.x_ptr.data.base_array.elem_index == array_val->type->data.array.len) {
|
||||
size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
|
||||
if (elem_index == array_val->type->data.array.len) {
|
||||
result = array_val->type->data.array.sentinel;
|
||||
} else {
|
||||
expand_undef_array(g, array_val);
|
||||
result = &array_val->data.x_array.data.s_none.elements[const_val->data.x_ptr.data.base_array.elem_index];
|
||||
result = &array_val->data.x_array.data.s_none.elements[elem_index];
|
||||
}
|
||||
break;
|
||||
}
|
||||
case ConstPtrSpecialSubArray: {
|
||||
ZigValue *array_val = const_val->data.x_ptr.data.base_array.array_val;
|
||||
size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
|
||||
|
||||
// TODO handle sentinel terminated arrays
|
||||
expand_undef_array(g, array_val);
|
||||
result = g->pass1_arena->create<ZigValue>();
|
||||
result->special = array_val->special;
|
||||
result->type = get_array_type(g, array_val->type->data.array.child_type,
|
||||
array_val->type->data.array.len - elem_index, nullptr);
|
||||
result->data.x_array.special = ConstArraySpecialNone;
|
||||
result->data.x_array.data.s_none.elements = &array_val->data.x_array.data.s_none.elements[elem_index];
|
||||
result->parent.id = ConstParentIdArray;
|
||||
result->parent.data.p_array.array_val = array_val;
|
||||
result->parent.data.p_array.elem_index = elem_index;
|
||||
break;
|
||||
}
|
||||
case ConstPtrSpecialBaseStruct: {
|
||||
ZigValue *struct_val = const_val->data.x_ptr.data.base_struct.struct_val;
|
||||
expand_undef_struct(g, struct_val);
|
||||
@ -3727,8 +3745,8 @@ static IrInstGen *ir_build_slice_gen(IrAnalyze *ira, IrInst *source_instruction,
|
||||
|
||||
ir_ref_inst_gen(ptr, ira->new_irb.current_basic_block);
|
||||
ir_ref_inst_gen(start, ira->new_irb.current_basic_block);
|
||||
if (end) ir_ref_inst_gen(end, ira->new_irb.current_basic_block);
|
||||
ir_ref_inst_gen(result_loc, ira->new_irb.current_basic_block);
|
||||
if (end != nullptr) ir_ref_inst_gen(end, ira->new_irb.current_basic_block);
|
||||
if (result_loc != nullptr) ir_ref_inst_gen(result_loc, ira->new_irb.current_basic_block);
|
||||
|
||||
return &instruction->base;
|
||||
}
|
||||
@ -12672,40 +12690,63 @@ static IrInstGen *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInst* sourc
|
||||
Error err;
|
||||
|
||||
assert(array_ptr->value->type->id == ZigTypeIdPointer);
|
||||
assert(array_ptr->value->type->data.pointer.child_type->id == ZigTypeIdArray);
|
||||
|
||||
ZigType *array_type = array_ptr->value->type->data.pointer.child_type;
|
||||
const size_t array_len = array_type->data.array.len;
|
||||
|
||||
// A zero-sized array can be casted regardless of the destination alignment, or
|
||||
// whether the pointer is undefined, and the result is always comptime known.
|
||||
if (array_len == 0) {
|
||||
ZigValue *undef_array = ira->codegen->pass1_arena->create<ZigValue>();
|
||||
undef_array->special = ConstValSpecialUndef;
|
||||
undef_array->type = array_type;
|
||||
|
||||
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
||||
init_const_slice(ira->codegen, result->value, undef_array, 0, 0, false);
|
||||
result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = ConstPtrMutComptimeConst;
|
||||
result->value->type = wanted_type;
|
||||
return result;
|
||||
}
|
||||
|
||||
if ((err = type_resolve(ira->codegen, array_ptr->value->type, ResolveStatusAlignmentKnown))) {
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
|
||||
assert(array_ptr->value->type->data.pointer.child_type->id == ZigTypeIdArray);
|
||||
|
||||
const size_t array_len = array_ptr->value->type->data.pointer.child_type->data.array.len;
|
||||
|
||||
// A zero-sized array can always be casted irregardless of the destination
|
||||
// alignment
|
||||
if (array_len != 0) {
|
||||
wanted_type = adjust_slice_align(ira->codegen, wanted_type,
|
||||
get_ptr_align(ira->codegen, array_ptr->value->type));
|
||||
}
|
||||
wanted_type = adjust_slice_align(ira->codegen, wanted_type,
|
||||
get_ptr_align(ira->codegen, array_ptr->value->type));
|
||||
|
||||
if (instr_is_comptime(array_ptr)) {
|
||||
ZigValue *array_ptr_val = ir_resolve_const(ira, array_ptr, UndefBad);
|
||||
if (array_ptr_val == nullptr)
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
ZigValue *pointee = const_ptr_pointee(ira, ira->codegen, array_ptr_val, source_instr->source_node);
|
||||
if (pointee == nullptr)
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
if (pointee->special != ConstValSpecialRuntime) {
|
||||
assert(array_ptr_val->type->id == ZigTypeIdPointer);
|
||||
ZigType *array_type = array_ptr_val->type->data.pointer.child_type;
|
||||
assert(is_slice(wanted_type));
|
||||
bool is_const = wanted_type->data.structure.fields[slice_ptr_index]->type_entry->data.pointer.is_const;
|
||||
ir_assert(is_slice(wanted_type), source_instr);
|
||||
bool wanted_const = wanted_type->data.structure.fields[slice_ptr_index]->type_entry->data.pointer.is_const;
|
||||
// Optimization to avoid creating unnecessary ZigValue in const_ptr_pointee
|
||||
if (array_ptr_val->data.x_ptr.special == ConstPtrSpecialSubArray) {
|
||||
ZigValue *array_val = array_ptr_val->data.x_ptr.data.base_array.array_val;
|
||||
if (array_val->special != ConstValSpecialRuntime) {
|
||||
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
||||
init_const_slice(ira->codegen, result->value, array_val,
|
||||
array_ptr_val->data.x_ptr.data.base_array.elem_index,
|
||||
array_type->data.array.len, wanted_const);
|
||||
result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = array_ptr_val->data.x_ptr.mut;
|
||||
result->value->type = wanted_type;
|
||||
return result;
|
||||
}
|
||||
} else {
|
||||
ZigValue *pointee = const_ptr_pointee(ira, ira->codegen, array_ptr_val, source_instr->source_node);
|
||||
if (pointee == nullptr)
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
if (pointee->special != ConstValSpecialRuntime) {
|
||||
assert(array_ptr_val->type->id == ZigTypeIdPointer);
|
||||
|
||||
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
||||
init_const_slice(ira->codegen, result->value, pointee, 0, array_type->data.array.len, is_const);
|
||||
result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = array_ptr_val->data.x_ptr.mut;
|
||||
result->value->type = wanted_type;
|
||||
return result;
|
||||
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
|
||||
init_const_slice(ira->codegen, result->value, pointee, 0, array_type->data.array.len, wanted_const);
|
||||
result->value->data.x_struct.fields[slice_ptr_index]->data.x_ptr.mut = array_ptr_val->data.x_ptr.mut;
|
||||
result->value->type = wanted_type;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -19931,6 +19972,7 @@ static Error ir_read_const_ptr(IrAnalyze *ira, CodeGen *codegen, AstNode *source
|
||||
dst_size, buf_ptr(&pointee->type->name), src_size));
|
||||
return ErrorSemanticAnalyzeFail;
|
||||
}
|
||||
case ConstPtrSpecialSubArray:
|
||||
case ConstPtrSpecialBaseArray: {
|
||||
ZigValue *array_val = ptr_val->data.x_ptr.data.base_array.array_val;
|
||||
assert(array_val->type->id == ZigTypeIdArray);
|
||||
@ -20814,6 +20856,7 @@ static IrInstGen *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstSrcElemP
|
||||
}
|
||||
break;
|
||||
case ConstPtrSpecialBaseArray:
|
||||
case ConstPtrSpecialSubArray:
|
||||
{
|
||||
size_t offset = array_ptr_val->data.x_ptr.data.base_array.elem_index;
|
||||
new_index = offset + index;
|
||||
@ -20884,6 +20927,7 @@ static IrInstGen *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstSrcElemP
|
||||
out_val->data.x_ptr.special = ConstPtrSpecialRef;
|
||||
out_val->data.x_ptr.data.ref.pointee = ptr_field->data.x_ptr.data.ref.pointee;
|
||||
break;
|
||||
case ConstPtrSpecialSubArray:
|
||||
case ConstPtrSpecialBaseArray:
|
||||
{
|
||||
size_t offset = ptr_field->data.x_ptr.data.base_array.elem_index;
|
||||
@ -25894,6 +25938,7 @@ static IrInstGen *ir_analyze_instruction_memset(IrAnalyze *ira, IrInstSrcMemset
|
||||
start = 0;
|
||||
bound_end = 1;
|
||||
break;
|
||||
case ConstPtrSpecialSubArray:
|
||||
case ConstPtrSpecialBaseArray:
|
||||
{
|
||||
ZigValue *array_val = dest_ptr_val->data.x_ptr.data.base_array.array_val;
|
||||
@ -26027,6 +26072,7 @@ static IrInstGen *ir_analyze_instruction_memcpy(IrAnalyze *ira, IrInstSrcMemcpy
|
||||
dest_start = 0;
|
||||
dest_end = 1;
|
||||
break;
|
||||
case ConstPtrSpecialSubArray:
|
||||
case ConstPtrSpecialBaseArray:
|
||||
{
|
||||
ZigValue *array_val = dest_ptr_val->data.x_ptr.data.base_array.array_val;
|
||||
@ -26070,6 +26116,7 @@ static IrInstGen *ir_analyze_instruction_memcpy(IrAnalyze *ira, IrInstSrcMemcpy
|
||||
src_start = 0;
|
||||
src_end = 1;
|
||||
break;
|
||||
case ConstPtrSpecialSubArray:
|
||||
case ConstPtrSpecialBaseArray:
|
||||
{
|
||||
ZigValue *array_val = src_ptr_val->data.x_ptr.data.base_array.array_val;
|
||||
@ -26219,7 +26266,8 @@ static IrInstGen *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstSrcSlice *i
|
||||
ZigType *return_type;
|
||||
|
||||
if (value_is_comptime(casted_start->value) &&
|
||||
((end != nullptr && value_is_comptime(end->value)) || array_type->id == ZigTypeIdArray ))
|
||||
((end != nullptr && value_is_comptime(end->value)) ||
|
||||
(end == nullptr && array_type->id == ZigTypeIdArray)))
|
||||
{
|
||||
ZigValue *start_val = ir_resolve_const(ira, casted_start, UndefBad);
|
||||
if (!start_val)
|
||||
@ -26244,13 +26292,16 @@ static IrInstGen *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstSrcSlice *i
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
|
||||
// TODO in the case of non-zero start index, the byte alignment should be smarter here.
|
||||
// we should be able to use the same logic as indexing.
|
||||
uint32_t ptr_byte_alignment = ((end_scalar - start_scalar != 0) && start_scalar == 0) ?
|
||||
non_sentinel_slice_ptr_type->data.pointer.explicit_alignment : 0;
|
||||
ZigType *return_array_type = get_array_type(ira->codegen, elem_type, end_scalar - start_scalar,
|
||||
array_sentinel);
|
||||
return_type = get_pointer_to_type_extra(ira->codegen, return_array_type,
|
||||
non_sentinel_slice_ptr_type->data.pointer.is_const,
|
||||
non_sentinel_slice_ptr_type->data.pointer.is_volatile,
|
||||
PtrLenSingle,
|
||||
0, 0, 0, false);
|
||||
PtrLenSingle, ptr_byte_alignment, 0, 0, false);
|
||||
} else if (sentinel_val != nullptr) {
|
||||
ZigType *slice_ptr_type = adjust_ptr_sentinel(ira->codegen, non_sentinel_slice_ptr_type, sentinel_val);
|
||||
return_type = get_slice_type(ira->codegen, slice_ptr_type);
|
||||
@ -26283,6 +26334,10 @@ static IrInstGen *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstSrcSlice *i
|
||||
abs_offset = 0;
|
||||
rel_end = SIZE_MAX;
|
||||
ptr_is_undef = true;
|
||||
} else if (parent_ptr->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) {
|
||||
array_val = nullptr;
|
||||
abs_offset = 0;
|
||||
rel_end = SIZE_MAX;
|
||||
} else {
|
||||
array_val = const_ptr_pointee(ira, ira->codegen, parent_ptr, instruction->base.base.source_node);
|
||||
if (array_val == nullptr)
|
||||
@ -26325,6 +26380,7 @@ static IrInstGen *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstSrcSlice *i
|
||||
rel_end = 1;
|
||||
}
|
||||
break;
|
||||
case ConstPtrSpecialSubArray:
|
||||
case ConstPtrSpecialBaseArray:
|
||||
array_val = parent_ptr->data.x_ptr.data.base_array.array_val;
|
||||
abs_offset = parent_ptr->data.x_ptr.data.base_array.elem_index;
|
||||
@ -26375,6 +26431,7 @@ static IrInstGen *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstSrcSlice *i
|
||||
abs_offset = SIZE_MAX;
|
||||
rel_end = 1;
|
||||
break;
|
||||
case ConstPtrSpecialSubArray:
|
||||
case ConstPtrSpecialBaseArray:
|
||||
array_val = parent_ptr->data.x_ptr.data.base_array.array_val;
|
||||
abs_offset = parent_ptr->data.x_ptr.data.base_array.elem_index;
|
||||
@ -26454,6 +26511,9 @@ static IrInstGen *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstSrcSlice *i
|
||||
if (array_val) {
|
||||
size_t index = abs_offset + start_scalar;
|
||||
init_const_ptr_array(ira->codegen, ptr_val, array_val, index, return_type_is_const, PtrLenUnknown);
|
||||
if (return_type->id == ZigTypeIdPointer) {
|
||||
ptr_val->data.x_ptr.special = ConstPtrSpecialSubArray;
|
||||
}
|
||||
if (array_type->id == ZigTypeIdArray) {
|
||||
ptr_val->data.x_ptr.mut = ptr_ptr->value->data.x_ptr.mut;
|
||||
} else if (is_slice(array_type)) {
|
||||
@ -26463,7 +26523,7 @@ static IrInstGen *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstSrcSlice *i
|
||||
}
|
||||
} else if (ptr_is_undef) {
|
||||
ptr_val->type = get_pointer_to_type(ira->codegen, parent_ptr->type->data.pointer.child_type,
|
||||
return_type_is_const);
|
||||
return_type_is_const);
|
||||
ptr_val->special = ConstValSpecialUndef;
|
||||
} else switch (parent_ptr->data.x_ptr.special) {
|
||||
case ConstPtrSpecialInvalid:
|
||||
@ -26473,6 +26533,7 @@ static IrInstGen *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstSrcSlice *i
|
||||
init_const_ptr_ref(ira->codegen, ptr_val, parent_ptr->data.x_ptr.data.ref.pointee,
|
||||
return_type_is_const);
|
||||
break;
|
||||
case ConstPtrSpecialSubArray:
|
||||
case ConstPtrSpecialBaseArray:
|
||||
zig_unreachable();
|
||||
case ConstPtrSpecialBaseStruct:
|
||||
@ -26500,20 +26561,6 @@ static IrInstGen *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstSrcSlice *i
|
||||
return result;
|
||||
}
|
||||
|
||||
IrInstGen *result_loc = ir_resolve_result(ira, &instruction->base.base, instruction->result_loc,
|
||||
return_type, nullptr, true, true);
|
||||
if (result_loc != nullptr) {
|
||||
if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
|
||||
return result_loc;
|
||||
}
|
||||
IrInstGen *dummy_value = ir_const(ira, &instruction->base.base, return_type);
|
||||
dummy_value->value->special = ConstValSpecialRuntime;
|
||||
IrInstGen *dummy_result = ir_implicit_cast2(ira, &instruction->base.base,
|
||||
dummy_value, result_loc->value->type->data.pointer.child_type);
|
||||
if (type_is_invalid(dummy_result->value->type))
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
|
||||
if (generate_non_null_assert) {
|
||||
IrInstGen *ptr_val = ir_get_deref(ira, &instruction->base.base, ptr_ptr, nullptr);
|
||||
|
||||
@ -26523,6 +26570,24 @@ static IrInstGen *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstSrcSlice *i
|
||||
ir_build_assert_non_null(ira, &instruction->base.base, ptr_val);
|
||||
}
|
||||
|
||||
IrInstGen *result_loc = nullptr;
|
||||
|
||||
if (return_type->id != ZigTypeIdPointer) {
|
||||
result_loc = ir_resolve_result(ira, &instruction->base.base, instruction->result_loc,
|
||||
return_type, nullptr, true, true);
|
||||
if (result_loc != nullptr) {
|
||||
if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
|
||||
return result_loc;
|
||||
}
|
||||
IrInstGen *dummy_value = ir_const(ira, &instruction->base.base, return_type);
|
||||
dummy_value->value->special = ConstValSpecialRuntime;
|
||||
IrInstGen *dummy_result = ir_implicit_cast2(ira, &instruction->base.base,
|
||||
dummy_value, result_loc->value->type->data.pointer.child_type);
|
||||
if (type_is_invalid(dummy_result->value->type))
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
}
|
||||
|
||||
return ir_build_slice_gen(ira, &instruction->base.base, return_type, ptr_ptr,
|
||||
casted_start, end, instruction->safety_check_on, result_loc);
|
||||
}
|
||||
|
||||
@ -171,18 +171,19 @@ test "runtime known array index has best alignment possible" {
|
||||
|
||||
// because pointer is align 2 and u32 align % 2 == 0 we can assume align 2
|
||||
var smaller align(2) = [_]u32{ 1, 2, 3, 4 };
|
||||
comptime expect(@TypeOf(smaller[0..]) == []align(2) u32);
|
||||
comptime expect(@TypeOf(smaller[0..].ptr) == [*]align(2) u32);
|
||||
testIndex(smaller[0..].ptr, 0, *align(2) u32);
|
||||
testIndex(smaller[0..].ptr, 1, *align(2) u32);
|
||||
testIndex(smaller[0..].ptr, 2, *align(2) u32);
|
||||
testIndex(smaller[0..].ptr, 3, *align(2) u32);
|
||||
var runtime_zero: usize = 0;
|
||||
comptime expect(@TypeOf(smaller[runtime_zero..]) == []align(2) u32);
|
||||
comptime expect(@TypeOf(smaller[runtime_zero..].ptr) == [*]align(2) u32);
|
||||
testIndex(smaller[runtime_zero..].ptr, 0, *align(2) u32);
|
||||
testIndex(smaller[runtime_zero..].ptr, 1, *align(2) u32);
|
||||
testIndex(smaller[runtime_zero..].ptr, 2, *align(2) u32);
|
||||
testIndex(smaller[runtime_zero..].ptr, 3, *align(2) u32);
|
||||
|
||||
// has to use ABI alignment because index known at runtime only
|
||||
testIndex2(array[0..].ptr, 0, *u8);
|
||||
testIndex2(array[0..].ptr, 1, *u8);
|
||||
testIndex2(array[0..].ptr, 2, *u8);
|
||||
testIndex2(array[0..].ptr, 3, *u8);
|
||||
testIndex2(array[runtime_zero..].ptr, 0, *u8);
|
||||
testIndex2(array[runtime_zero..].ptr, 1, *u8);
|
||||
testIndex2(array[runtime_zero..].ptr, 2, *u8);
|
||||
testIndex2(array[runtime_zero..].ptr, 3, *u8);
|
||||
}
|
||||
fn testIndex(smaller: [*]align(2) u32, index: usize, comptime T: type) void {
|
||||
comptime expect(@TypeOf(&smaller[index]) == T);
|
||||
|
||||
@ -435,7 +435,8 @@ fn incrementVoidPtrValue(value: ?*c_void) void {
|
||||
|
||||
test "implicit cast from [*]T to ?*c_void" {
|
||||
var a = [_]u8{ 3, 2, 1 };
|
||||
incrementVoidPtrArray(a[0..].ptr, 3);
|
||||
var runtime_zero: usize = 0;
|
||||
incrementVoidPtrArray(a[runtime_zero..].ptr, 3);
|
||||
expect(std.mem.eql(u8, &a, &[_]u8{ 4, 3, 2 }));
|
||||
}
|
||||
|
||||
|
||||
@ -524,7 +524,7 @@ test "comptime slice of slice preserves comptime var" {
|
||||
test "comptime slice of pointer preserves comptime var" {
|
||||
comptime {
|
||||
var buff: [10]u8 = undefined;
|
||||
var a = buff[0..].ptr;
|
||||
var a = @ptrCast([*]u8, &buff);
|
||||
a[0..1][0] = 1;
|
||||
expect(buff[0..][0..][0] == 1);
|
||||
}
|
||||
|
||||
@ -102,8 +102,8 @@ test "memcpy and memset intrinsics" {
|
||||
var foo: [20]u8 = undefined;
|
||||
var bar: [20]u8 = undefined;
|
||||
|
||||
@memset(foo[0..].ptr, 'A', foo.len);
|
||||
@memcpy(bar[0..].ptr, foo[0..].ptr, bar.len);
|
||||
@memset(&foo, 'A', foo.len);
|
||||
@memcpy(&bar, &foo, bar.len);
|
||||
|
||||
if (bar[11] != 'A') unreachable;
|
||||
}
|
||||
@ -565,12 +565,16 @@ test "volatile load and store" {
|
||||
expect(ptr.* == 1235);
|
||||
}
|
||||
|
||||
test "slice string literal has type []const u8" {
|
||||
test "slice string literal has correct type" {
|
||||
comptime {
|
||||
expect(@TypeOf("aoeu"[0..]) == []const u8);
|
||||
expect(@TypeOf("aoeu"[0..]) == *const [4:0]u8);
|
||||
const array = [_]i32{ 1, 2, 3, 4 };
|
||||
expect(@TypeOf(array[0..]) == []const i32);
|
||||
expect(@TypeOf(array[0..]) == *const [4]i32);
|
||||
}
|
||||
var runtime_zero: usize = 0;
|
||||
expect(@TypeOf("aoeu"[runtime_zero..]) == [:0]const u8);
|
||||
const array = [_]i32{ 1, 2, 3, 4 };
|
||||
expect(@TypeOf(array[runtime_zero..]) == []const u8);
|
||||
}
|
||||
|
||||
test "pointer child field" {
|
||||
|
||||
@ -13,7 +13,7 @@ fn testReinterpretBytesAsInteger() void {
|
||||
builtin.Endian.Little => 0xab785634,
|
||||
builtin.Endian.Big => 0x345678ab,
|
||||
};
|
||||
expect(@ptrCast(*align(1) const u32, bytes[1..5].ptr).* == expected);
|
||||
expect(@ptrCast(*align(1) const u32, bytes[1..5]).* == expected);
|
||||
}
|
||||
|
||||
test "reinterpret bytes of an array into an extern struct" {
|
||||
|
||||
@ -7,7 +7,7 @@ const mem = std.mem;
|
||||
const x = @intToPtr([*]i32, 0x1000)[0..0x500];
|
||||
const y = x[0x100..];
|
||||
test "compile time slice of pointer to hard coded address" {
|
||||
expect(@ptrToInt(x.ptr) == 0x1000);
|
||||
expect(@ptrToInt(x) == 0x1000);
|
||||
expect(x.len == 0x500);
|
||||
|
||||
expect(@ptrToInt(y.ptr) == 0x1100);
|
||||
@ -47,7 +47,9 @@ test "C pointer slice access" {
|
||||
var buf: [10]u32 = [1]u32{42} ** 10;
|
||||
const c_ptr = @ptrCast([*c]const u32, &buf);
|
||||
|
||||
comptime expectEqual([]const u32, @TypeOf(c_ptr[0..1]));
|
||||
var runtime_zero: usize = 0;
|
||||
comptime expectEqual([]const u32, @TypeOf(c_ptr[runtime_zero..1]));
|
||||
comptime expectEqual(*const [1]u32, @TypeOf(c_ptr[0..1]));
|
||||
|
||||
for (c_ptr[0..5]) |*cl| {
|
||||
expectEqual(@as(u32, 42), cl.*);
|
||||
@ -107,7 +109,9 @@ test "obtaining a null terminated slice" {
|
||||
const ptr2 = buf[0..runtime_len :0];
|
||||
// ptr2 is a null-terminated slice
|
||||
comptime expect(@TypeOf(ptr2) == [:0]u8);
|
||||
comptime expect(@TypeOf(ptr2[0..2]) == []u8);
|
||||
comptime expect(@TypeOf(ptr2[0..2]) == *[2]u8);
|
||||
var runtime_zero: usize = 0;
|
||||
comptime expect(@TypeOf(ptr2[runtime_zero..2]) == []u8);
|
||||
}
|
||||
|
||||
test "empty array to slice" {
|
||||
|
||||
@ -409,8 +409,8 @@ const Bitfields = packed struct {
|
||||
test "native bit field understands endianness" {
|
||||
var all: u64 = 0x7765443322221111;
|
||||
var bytes: [8]u8 = undefined;
|
||||
@memcpy(bytes[0..].ptr, @ptrCast([*]u8, &all), 8);
|
||||
var bitfields = @ptrCast(*Bitfields, bytes[0..].ptr).*;
|
||||
@memcpy(&bytes, @ptrCast([*]u8, &all), 8);
|
||||
var bitfields = @ptrCast(*Bitfields, &bytes).*;
|
||||
|
||||
expect(bitfields.f1 == 0x1111);
|
||||
expect(bitfields.f2 == 0x2222);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user