mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
replace most aggregate interns in x86_64/CodeGen
This commit is contained in:
parent
79e5c138c6
commit
76d2782149
@ -170058,12 +170058,9 @@ fn airTrunc(self: *CodeGen, inst: Air.Inst.Index) !void {
|
||||
});
|
||||
const splat_abi_size: u32 = @intCast(splat_ty.abiSize(zcu));
|
||||
|
||||
const splat_val = try pt.intern(.{ .aggregate = .{
|
||||
.ty = splat_ty.ip_index,
|
||||
.storage = .{ .repeated_elem = mask_val.ip_index },
|
||||
} });
|
||||
const splat_val = try pt.aggregateSplatValue(splat_ty, mask_val);
|
||||
|
||||
const splat_mcv = try self.lowerValue(.fromInterned(splat_val));
|
||||
const splat_mcv = try self.lowerValue(splat_val);
|
||||
const splat_addr_mcv: MCValue = switch (splat_mcv) {
|
||||
.memory, .indirect, .load_frame => splat_mcv.address(),
|
||||
else => .{ .register = try self.copyToTmpRegister(.usize, splat_mcv.address()) },
|
||||
@ -171693,12 +171690,12 @@ fn airShlShrBinOp(self: *CodeGen, inst: Air.Inst.Index) !void {
|
||||
defer self.register_manager.unlockReg(shift_lock);
|
||||
|
||||
const mask_ty = try pt.vectorType(.{ .len = 16, .child = .u8_type });
|
||||
const mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = mask_ty.toIntern(),
|
||||
.storage = .{ .elems = &([1]InternPool.Index{
|
||||
const mask_mcv = try self.lowerValue(try pt.aggregateValue(
|
||||
mask_ty,
|
||||
&([1]InternPool.Index{
|
||||
(try rhs_ty.childType(zcu).maxIntScalar(pt, .u8)).toIntern(),
|
||||
} ++ [1]InternPool.Index{.zero_u8} ** 15) },
|
||||
} })));
|
||||
} ++ [1]InternPool.Index{.zero_u8} ** 15),
|
||||
));
|
||||
const mask_addr_reg = try self.copyToTmpRegister(.usize, mask_mcv.address());
|
||||
const mask_addr_lock = self.register_manager.lockRegAssumeUnused(mask_addr_reg);
|
||||
defer self.register_manager.unlockReg(mask_addr_lock);
|
||||
@ -181139,10 +181136,7 @@ fn genSetReg(
|
||||
.child = .u8_type,
|
||||
});
|
||||
try self.genSetReg(dst_reg, full_ty, try self.lowerValue(
|
||||
.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = full_ty.toIntern(),
|
||||
.storage = .{ .repeated_elem = (try pt.intValue(.u8, 0xaa)).toIntern() },
|
||||
} })),
|
||||
try pt.aggregateSplatValue(full_ty, try pt.intValue(.u8, 0xaa)),
|
||||
), opts);
|
||||
},
|
||||
.x87 => try self.genSetReg(dst_reg, .f80, try self.lowerValue(
|
||||
@ -183565,10 +183559,7 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
|
||||
mask_elem_ty,
|
||||
@as(u8, 1) << @truncate(bit),
|
||||
)).toIntern();
|
||||
const mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = mask_ty.toIntern(),
|
||||
.storage = .{ .elems = mask_elems },
|
||||
} })));
|
||||
const mask_mcv = try self.lowerValue(try pt.aggregateValue(mask_ty, mask_elems));
|
||||
const mask_mem: Memory = .{
|
||||
.base = .{ .reg = try self.copyToTmpRegister(.usize, mask_mcv.address()) },
|
||||
.mod = .{ .rm = .{ .size = self.memSize(ty) } },
|
||||
@ -184296,10 +184287,9 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
|
||||
else
|
||||
try select_mask_elem_ty.minIntScalar(pt, select_mask_elem_ty)).toIntern();
|
||||
}
|
||||
const select_mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = select_mask_ty.toIntern(),
|
||||
.storage = .{ .elems = select_mask_elems[0..mask_elems.len] },
|
||||
} })));
|
||||
const select_mask_mcv = try self.lowerValue(
|
||||
try pt.aggregateValue(select_mask_ty, select_mask_elems[0..mask_elems.len]),
|
||||
);
|
||||
|
||||
if (self.hasFeature(.sse4_1)) {
|
||||
const mir_tag: Mir.Inst.FixedTag = .{
|
||||
@ -184441,10 +184431,9 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
|
||||
})).toIntern();
|
||||
}
|
||||
const lhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
|
||||
const lhs_mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = lhs_mask_ty.toIntern(),
|
||||
.storage = .{ .elems = lhs_mask_elems[0..max_abi_size] },
|
||||
} })));
|
||||
const lhs_mask_mcv = try self.lowerValue(
|
||||
try pt.aggregateValue(lhs_mask_ty, lhs_mask_elems[0..max_abi_size]),
|
||||
);
|
||||
const lhs_mask_mem: Memory = .{
|
||||
.base = .{ .reg = try self.copyToTmpRegister(.usize, lhs_mask_mcv.address()) },
|
||||
.mod = .{ .rm = .{ .size = .fromSize(@max(max_abi_size, 16)) } },
|
||||
@ -184472,10 +184461,9 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
|
||||
})).toIntern();
|
||||
}
|
||||
const rhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
|
||||
const rhs_mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = rhs_mask_ty.toIntern(),
|
||||
.storage = .{ .elems = rhs_mask_elems[0..max_abi_size] },
|
||||
} })));
|
||||
const rhs_mask_mcv = try self.lowerValue(
|
||||
try pt.aggregateValue(rhs_mask_ty, rhs_mask_elems[0..max_abi_size]),
|
||||
);
|
||||
const rhs_mask_mem: Memory = .{
|
||||
.base = .{ .reg = try self.copyToTmpRegister(.usize, rhs_mask_mcv.address()) },
|
||||
.mod = .{ .rm = .{ .size = .fromSize(@max(max_abi_size, 16)) } },
|
||||
@ -192924,36 +192912,30 @@ const Select = struct {
|
||||
break :res_scalar .{ res_scalar_ty, try pt.intValue_big(res_scalar_ty, res_big_int.toConst()) };
|
||||
},
|
||||
};
|
||||
const res_val: Value = if (res_vector_len) |len| .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = (try pt.vectorType(.{
|
||||
.len = len,
|
||||
.child = res_scalar_ty.toIntern(),
|
||||
})).toIntern(),
|
||||
.storage = .{ .repeated_elem = res_scalar_val.toIntern() },
|
||||
} })) else res_scalar_val;
|
||||
const res_val = if (res_vector_len) |len| try pt.aggregateSplatValue(try pt.vectorType(.{
|
||||
.len = len,
|
||||
.child = res_scalar_ty.toIntern(),
|
||||
}), res_scalar_val) else res_scalar_val;
|
||||
return .{ try cg.tempMemFromValue(res_val), true };
|
||||
},
|
||||
.f64_0x1p52_0x1p84_mem => .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = (try pt.vectorType(.{ .len = 2, .child = .f64_type })).toIntern(),
|
||||
.storage = .{ .elems = &.{
|
||||
.f64_0x1p52_0x1p84_mem => .{ try cg.tempMemFromValue(
|
||||
try pt.aggregateValue(try pt.vectorType(.{ .len = 2, .child = .f64_type }), &.{
|
||||
(try pt.floatValue(.f64, @as(f64, 0x1p52))).toIntern(),
|
||||
(try pt.floatValue(.f64, @as(f64, 0x1p84))).toIntern(),
|
||||
} },
|
||||
} }))), true },
|
||||
.u32_0x1p52_hi_0x1p84_hi_0_0_mem => .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = (try pt.vectorType(.{ .len = 4, .child = .u32_type })).toIntern(),
|
||||
.storage = .{ .elems = &(.{
|
||||
}),
|
||||
), true },
|
||||
.u32_0x1p52_hi_0x1p84_hi_0_0_mem => .{ try cg.tempMemFromValue(
|
||||
try pt.aggregateValue(try pt.vectorType(.{ .len = 4, .child = .u32_type }), &(.{
|
||||
(try pt.intValue(.u32, @as(u64, @bitCast(@as(f64, 0x1p52))) >> 32)).toIntern(),
|
||||
(try pt.intValue(.u32, @as(u64, @bitCast(@as(f64, 0x1p84))) >> 32)).toIntern(),
|
||||
} ++ .{(try pt.intValue(.u32, 0)).toIntern()} ** 2) },
|
||||
} }))), true },
|
||||
.f32_0_0x1p64_mem => .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = (try pt.vectorType(.{ .len = 2, .child = .f32_type })).toIntern(),
|
||||
.storage = .{ .elems = &.{
|
||||
} ++ .{(try pt.intValue(.u32, 0)).toIntern()} ** 2)),
|
||||
), true },
|
||||
.f32_0_0x1p64_mem => .{ try cg.tempMemFromValue(
|
||||
try pt.aggregateValue(try pt.vectorType(.{ .len = 2, .child = .f32_type }), &.{
|
||||
(try pt.floatValue(.f32, @as(f32, 0))).toIntern(),
|
||||
(try pt.floatValue(.f32, @as(f32, 0x1p64))).toIntern(),
|
||||
} },
|
||||
} }))), true },
|
||||
}),
|
||||
), true },
|
||||
.pshufb_splat_mem => |splat_spec| {
|
||||
const zcu = pt.zcu;
|
||||
assert(spec.type.isVector(zcu) and spec.type.childType(zcu).toIntern() == .u8_type);
|
||||
@ -193110,13 +193092,10 @@ const Select = struct {
|
||||
const mem_size = cg.unalignedSize(spec.type);
|
||||
return .{ try cg.tempMemFromAlignedValue(
|
||||
if (mem_size < 16) .fromByteUnits(mem_size) else .none,
|
||||
.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = if (mem_size < 16)
|
||||
(try pt.arrayType(.{ .len = elems.len, .child = elem_ty.toIntern() })).toIntern()
|
||||
else
|
||||
spec.type.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} })),
|
||||
try pt.aggregateValue(if (mem_size < 16) try pt.arrayType(.{
|
||||
.len = elems.len,
|
||||
.child = elem_ty.toIntern(),
|
||||
}) else spec.type, elems),
|
||||
), true };
|
||||
},
|
||||
.splat_float_mem => |splat_spec| {
|
||||
@ -193133,10 +193112,7 @@ const Select = struct {
|
||||
.zero => 0.0,
|
||||
}))).toIntern());
|
||||
@memset(elems[inside_len..], (try pt.floatValue(elem_ty, splat_spec.outside)).toIntern());
|
||||
return .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = spec.type.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} }))), true };
|
||||
return .{ try cg.tempMemFromValue(try pt.aggregateValue(spec.type, elems)), true };
|
||||
},
|
||||
.frame => |frame_index| .{ try cg.tempInit(spec.type, .{ .load_frame = .{
|
||||
.index = frame_index,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user