std.Target: Introduce Cpu convenience functions for feature tests.

Before:

* std.Target.arm.featureSetHas(target.cpu.features, .has_v7)
* std.Target.x86.featureSetHasAny(target.cpu.features, .{ .sse, .avx, .cmov })
* std.Target.wasm.featureSetHasAll(target.cpu.features, .{ .atomics, .bulk_memory })

After:

* target.cpu.has(.arm, .has_v7)
* target.cpu.hasAny(.x86, &.{ .sse, .avx, .cmov })
* target.cpu.hasAll(.wasm, &.{ .atomics, .bulk_memory })
This commit is contained in:
Alex Rønne Petersen 2025-02-18 05:25:36 +01:00
parent 14873f9a34
commit 9d534790eb
53 changed files with 373 additions and 393 deletions

View File

@ -162,7 +162,7 @@ pub fn ignoreNonZeroSizedBitfieldTypeAlignment(target: std.Target) bool {
switch (target.cpu.arch) {
.avr => return true,
.arm => {
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
if (target.cpu.has(.arm, .has_v7)) {
switch (target.os.tag) {
.ios => return true,
else => return false,
@ -185,7 +185,7 @@ pub fn minZeroWidthBitfieldAlignment(target: std.Target) ?u29 {
switch (target.cpu.arch) {
.avr => return 8,
.arm => {
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
if (target.cpu.has(.arm, .has_v7)) {
switch (target.os.tag) {
.ios => return 32,
else => return null,
@ -203,7 +203,7 @@ pub fn unnamedFieldAffectsAlignment(target: std.Target) bool {
return true;
},
.armeb => {
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
if (target.cpu.has(.arm, .has_v7)) {
if (std.Target.Abi.default(target.cpu.arch, target.os.tag) == .eabi) return true;
}
},
@ -230,7 +230,7 @@ pub fn defaultAlignment(target: std.Target) u29 {
switch (target.cpu.arch) {
.avr => return 1,
.arm => if (target.abi.isAndroid() or target.os.tag == .ios) return 16 else return 8,
.sparc => if (std.Target.sparc.featureSetHas(target.cpu.features, .v9)) return 16 else return 8,
.sparc => if (target.cpu.has(.sparc, .v9)) return 16 else return 8,
.mips, .mipsel => switch (target.abi) {
.none, .gnuabi64 => return 16,
else => return 8,
@ -268,7 +268,7 @@ pub fn systemCompiler(target: std.Target) LangOpts.Compiler {
pub fn hasFloat128(target: std.Target) bool {
if (target.cpu.arch.isWasm()) return true;
if (target.os.tag.isDarwin()) return false;
if (target.cpu.arch.isPowerPC()) return std.Target.powerpc.featureSetHas(target.cpu.features, .float128);
if (target.cpu.arch.isPowerPC()) return target.cpu.has(.powerpc, .float128);
return switch (target.os.tag) {
.dragonfly,
.haiku,
@ -334,7 +334,7 @@ pub const FPSemantics = enum {
.spirv32,
.spirv64,
=> return .IEEEHalf,
.x86, .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .sse2)) return .IEEEHalf,
.x86, .x86_64 => if (target.cpu.has(.x86, .sse2)) return .IEEEHalf,
else => {},
}
return null;
@ -399,7 +399,7 @@ pub fn defaultFpEvalMethod(target: std.Target) LangOpts.FPEvalMethod {
return .double;
}
}
if (std.Target.x86.featureSetHas(target.cpu.features, .sse)) {
if (target.cpu.has(.x86, .sse)) {
return .source;
}
return .extended;
@ -765,7 +765,7 @@ test "target size/align tests" {
.specifier = .char,
};
try std.testing.expectEqual(true, std.Target.arm.featureSetHas(comp.target.cpu.features, .has_v7));
try std.testing.expectEqual(true, comp.target.cpu.has(.arm, .has_v7));
try std.testing.expectEqual(@as(u64, 1), ct.sizeof(&comp).?);
try std.testing.expectEqual(@as(u64, 1), ct.alignof(&comp));
try std.testing.expectEqual(true, ignoreNonZeroSizedBitfieldTypeAlignment(comp.target));

View File

@ -318,7 +318,7 @@ pub fn buildLinkerArgs(self: *const Linux, tc: *const Toolchain, argv: *std.Arra
fn getMultiarchTriple(target: std.Target) ?[]const u8 {
const is_android = target.abi.isAndroid();
const is_mips_r6 = std.Target.mips.featureSetHas(target.cpu.features, .mips32r6);
const is_mips_r6 = target.cpu.has(.mips, .mips32r6);
return switch (target.cpu.arch) {
.arm, .thumb => if (is_android) "arm-linux-androideabi" else if (target.abi == .gnueabihf) "arm-linux-gnueabihf" else "arm-linux-gnueabi",
.armeb, .thumbeb => if (target.abi == .gnueabihf) "armeb-linux-gnueabihf" else "armeb-linux-gnueabi",

View File

@ -2,7 +2,7 @@
const builtin = @import("builtin");
const std = @import("std");
const common = @import("common.zig");
const always_has_lse = std.Target.aarch64.featureSetHas(builtin.cpu.features, .lse);
const always_has_lse = builtin.cpu.has(.aarch64, .lse);
/// This default is overridden at runtime after inspecting CPU properties.
/// It is intentionally not exported in order to make the machine code that

View File

@ -19,7 +19,7 @@ const supports_atomic_ops = switch (arch) {
// operations (unless we're targeting Linux, the kernel provides a way to
// perform CAS operations).
// XXX: The Linux code path is not implemented yet.
!std.Target.arm.featureSetHas(builtin.cpu.features, .has_v6m),
!builtin.cpu.has(.arm, .has_v6m),
else => true,
};
@ -30,7 +30,7 @@ const largest_atomic_size = switch (arch) {
// On SPARC systems that lacks CAS and/or swap instructions, the only
// available atomic operation is a test-and-set (`ldstub`), so we force
// every atomic memory access to go through the lock.
.sparc => if (std.Target.sparc.featureSetHas(builtin.cpu.features, .hasleoncasa)) @sizeOf(usize) else 0,
.sparc => if (builtin.cpu.has(.sparc, .hasleoncasa)) @sizeOf(usize) else 0,
// XXX: On x86/x86_64 we could check the presence of cmpxchg8b/cmpxchg16b
// and set this parameter accordingly.

View File

@ -124,7 +124,7 @@ pub fn F16T(comptime OtherType: type) type {
.spirv32,
.spirv64,
=> f16,
.hexagon => if (std.Target.hexagon.featureSetHas(builtin.target.cpu.features, .v68)) f16 else u16,
.hexagon => if (builtin.target.cpu.has(.hexagon, .v68)) f16 else u16,
.x86, .x86_64 => if (builtin.target.os.tag.isDarwin()) switch (OtherType) {
// Starting with LLVM 16, Darwin uses different abi for f16
// depending on the type of the other return/argument..???

View File

@ -142,9 +142,7 @@ fn clzsi2_generic(a: i32) callconv(.c) i32 {
pub const __clzsi2 = switch (builtin.cpu.arch) {
.arm, .armeb, .thumb, .thumbeb => impl: {
const use_thumb1 =
(builtin.cpu.arch.isThumb() or
std.Target.arm.featureSetHas(builtin.cpu.features, .noarm)) and
!std.Target.arm.featureSetHas(builtin.cpu.features, .thumb2);
(builtin.cpu.arch.isThumb() or builtin.cpu.has(.arm, .noarm)) and !builtin.cpu.has(.arm, .thumb2);
if (use_thumb1) {
break :impl __clzsi2_thumb1;

View File

@ -767,25 +767,27 @@ pub const Os = struct {
};
pub const aarch64 = @import("Target/aarch64.zig");
pub const arc = @import("Target/arc.zig");
pub const amdgcn = @import("Target/amdgcn.zig");
pub const arc = @import("Target/arc.zig");
pub const arm = @import("Target/arm.zig");
pub const avr = @import("Target/avr.zig");
pub const bpf = @import("Target/bpf.zig");
pub const csky = @import("Target/csky.zig");
pub const hexagon = @import("Target/hexagon.zig");
pub const kalimba = @import("Target/generic.zig");
pub const lanai = @import("Target/lanai.zig");
pub const loongarch = @import("Target/loongarch.zig");
pub const m68k = @import("Target/m68k.zig");
pub const mips = @import("Target/mips.zig");
pub const msp430 = @import("Target/msp430.zig");
pub const nvptx = @import("Target/nvptx.zig");
pub const or1k = @import("Target/generic.zig");
pub const powerpc = @import("Target/powerpc.zig");
pub const propeller = @import("Target/propeller.zig");
pub const riscv = @import("Target/riscv.zig");
pub const s390x = @import("Target/s390x.zig");
pub const sparc = @import("Target/sparc.zig");
pub const spirv = @import("Target/spirv.zig");
pub const s390x = @import("Target/s390x.zig");
pub const ve = @import("Target/ve.zig");
pub const wasm = @import("Target/wasm.zig");
pub const x86 = @import("Target/x86.zig");
@ -1094,7 +1096,7 @@ pub fn toElfMachine(target: Target) std.elf.EM {
.propeller => .PROPELLER,
.riscv32, .riscv64 => .RISCV,
.s390x => .S390,
.sparc => if (Target.sparc.featureSetHas(target.cpu.features, .v9)) .SPARC32PLUS else .SPARC,
.sparc => if (target.cpu.has(.sparc, .v9)) .SPARC32PLUS else .SPARC,
.sparc64 => .SPARCV9,
.ve => .VE,
.x86 => .@"386",
@ -1396,6 +1398,71 @@ pub const Cpu = struct {
// - tce
// - tcele
/// An architecture family can encompass multiple architectures as represented by `Arch`.
/// For a given family tag, it is guaranteed that an `std.Target.<tag>` namespace exists
/// containing CPU model and feature data.
pub const Family = enum {
amdgcn,
arc,
arm,
aarch64,
avr,
bpf,
csky,
hexagon,
kalimba,
lanai,
loongarch,
m68k,
mips,
msp430,
nvptx,
or1k,
powerpc,
propeller,
riscv,
s390x,
sparc,
spirv,
ve,
wasm,
x86,
xcore,
xtensa,
};
pub inline fn family(arch: Arch) Family {
return switch (arch) {
.amdgcn => .amdgcn,
.arc => .arc,
.arm, .armeb, .thumb, .thumbeb => .arm,
.aarch64, .aarch64_be => .aarch64,
.avr => .avr,
.bpfel, .bpfeb => .bpf,
.csky => .csky,
.hexagon => .hexagon,
.kalimba => .kalimba,
.lanai => .lanai,
.loongarch32, .loongarch64 => .loongarch,
.m68k => .m68k,
.mips, .mipsel, .mips64, .mips64el => .mips,
.msp430 => .msp430,
.or1k => .or1k,
.nvptx, .nvptx64 => .nvptx,
.powerpc, .powerpcle, .powerpc64, .powerpc64le => .powerpc,
.propeller => .propeller,
.riscv32, .riscv64 => .riscv,
.s390x => .s390x,
.sparc, .sparc64 => .sparc,
.spirv, .spirv32, .spirv64 => .spirv,
.ve => .ve,
.wasm32, .wasm64 => .wasm,
.x86, .x86_64 => .x86,
.xcore => .xcore,
.xtensa => .xtensa,
};
}
pub inline fn isX86(arch: Arch) bool {
return switch (arch) {
.x86, .x86_64 => true,
@ -1574,88 +1641,17 @@ pub const Cpu = struct {
};
}
/// Returns a name that matches the lib/std/target/* source file name.
pub fn genericName(arch: Arch) [:0]const u8 {
return switch (arch) {
.arm, .armeb, .thumb, .thumbeb => "arm",
.aarch64, .aarch64_be => "aarch64",
.bpfel, .bpfeb => "bpf",
.loongarch32, .loongarch64 => "loongarch",
.mips, .mipsel, .mips64, .mips64el => "mips",
.powerpc, .powerpcle, .powerpc64, .powerpc64le => "powerpc",
.propeller => "propeller",
.riscv32, .riscv64 => "riscv",
.sparc, .sparc64 => "sparc",
.s390x => "s390x",
.x86, .x86_64 => "x86",
.nvptx, .nvptx64 => "nvptx",
.wasm32, .wasm64 => "wasm",
.spirv, .spirv32, .spirv64 => "spirv",
else => @tagName(arch),
};
}
/// All CPU features Zig is aware of, sorted lexicographically by name.
pub fn allFeaturesList(arch: Arch) []const Cpu.Feature {
return switch (arch) {
.arm, .armeb, .thumb, .thumbeb => &arm.all_features,
.aarch64, .aarch64_be => &aarch64.all_features,
.arc => &arc.all_features,
.avr => &avr.all_features,
.bpfel, .bpfeb => &bpf.all_features,
.csky => &csky.all_features,
.hexagon => &hexagon.all_features,
.lanai => &lanai.all_features,
.loongarch32, .loongarch64 => &loongarch.all_features,
.m68k => &m68k.all_features,
.mips, .mipsel, .mips64, .mips64el => &mips.all_features,
.msp430 => &msp430.all_features,
.powerpc, .powerpcle, .powerpc64, .powerpc64le => &powerpc.all_features,
.amdgcn => &amdgcn.all_features,
.riscv32, .riscv64 => &riscv.all_features,
.sparc, .sparc64 => &sparc.all_features,
.spirv, .spirv32, .spirv64 => &spirv.all_features,
.s390x => &s390x.all_features,
.x86, .x86_64 => &x86.all_features,
.xcore => &xcore.all_features,
.xtensa => &xtensa.all_features,
.nvptx, .nvptx64 => &nvptx.all_features,
.ve => &ve.all_features,
.wasm32, .wasm64 => &wasm.all_features,
else => &[0]Cpu.Feature{},
return switch (arch.family()) {
inline else => |f| &@field(Target, @tagName(f)).all_features,
};
}
/// All processors Zig is aware of, sorted lexicographically by name.
pub fn allCpuModels(arch: Arch) []const *const Cpu.Model {
return switch (arch) {
.arc => comptime allCpusFromDecls(arc.cpu),
.arm, .armeb, .thumb, .thumbeb => comptime allCpusFromDecls(arm.cpu),
.aarch64, .aarch64_be => comptime allCpusFromDecls(aarch64.cpu),
.avr => comptime allCpusFromDecls(avr.cpu),
.bpfel, .bpfeb => comptime allCpusFromDecls(bpf.cpu),
.csky => comptime allCpusFromDecls(csky.cpu),
.hexagon => comptime allCpusFromDecls(hexagon.cpu),
.lanai => comptime allCpusFromDecls(lanai.cpu),
.loongarch32, .loongarch64 => comptime allCpusFromDecls(loongarch.cpu),
.m68k => comptime allCpusFromDecls(m68k.cpu),
.mips, .mipsel, .mips64, .mips64el => comptime allCpusFromDecls(mips.cpu),
.msp430 => comptime allCpusFromDecls(msp430.cpu),
.powerpc, .powerpcle, .powerpc64, .powerpc64le => comptime allCpusFromDecls(powerpc.cpu),
.amdgcn => comptime allCpusFromDecls(amdgcn.cpu),
.riscv32, .riscv64 => comptime allCpusFromDecls(riscv.cpu),
.sparc, .sparc64 => comptime allCpusFromDecls(sparc.cpu),
.spirv, .spirv32, .spirv64 => comptime allCpusFromDecls(spirv.cpu),
.s390x => comptime allCpusFromDecls(s390x.cpu),
.x86, .x86_64 => comptime allCpusFromDecls(x86.cpu),
.xcore => comptime allCpusFromDecls(xcore.cpu),
.xtensa => comptime allCpusFromDecls(xtensa.cpu),
.nvptx, .nvptx64 => comptime allCpusFromDecls(nvptx.cpu),
.ve => comptime allCpusFromDecls(ve.cpu),
.wasm32, .wasm64 => comptime allCpusFromDecls(wasm.cpu),
else => &[0]*const Model{},
return switch (arch.family()) {
inline else => |f| comptime allCpusFromDecls(@field(Target, @tagName(f)).cpu),
};
}
@ -1871,49 +1867,24 @@ pub const Cpu = struct {
/// can return CPU models that are understood by LLVM, but *not* understood by Clang. If
/// Clang compatibility is important, consider using `baseline` instead.
pub fn generic(arch: Arch) *const Model {
const S = struct {
const generic_model = Model{
.name = "generic",
.llvm_name = null,
.features = Cpu.Feature.Set.empty,
};
};
return switch (arch) {
.amdgcn => &amdgcn.cpu.gfx600,
.arc => &arc.cpu.generic,
.arm, .armeb, .thumb, .thumbeb => &arm.cpu.generic,
.aarch64, .aarch64_be => &aarch64.cpu.generic,
.avr => &avr.cpu.avr1,
.bpfel, .bpfeb => &bpf.cpu.generic,
.csky => &csky.cpu.generic,
.hexagon => &hexagon.cpu.generic,
.lanai => &lanai.cpu.generic,
.loongarch32 => &loongarch.cpu.generic_la32,
.loongarch64 => &loongarch.cpu.generic_la64,
.m68k => &m68k.cpu.generic,
.mips, .mipsel => &mips.cpu.mips32,
.mips64, .mips64el => &mips.cpu.mips64,
.msp430 => &msp430.cpu.generic,
.nvptx, .nvptx64 => &nvptx.cpu.sm_20,
.powerpc, .powerpcle => &powerpc.cpu.ppc,
.powerpc64, .powerpc64le => &powerpc.cpu.ppc64,
.propeller => &propeller.cpu.p1,
.riscv32 => &riscv.cpu.generic_rv32,
.riscv64 => &riscv.cpu.generic_rv64,
.spirv, .spirv32, .spirv64 => &spirv.cpu.generic,
.sparc => &sparc.cpu.generic,
.sparc64 => &sparc.cpu.v9, // 64-bit SPARC needs v9 as the baseline
.s390x => &s390x.cpu.generic,
.sparc64 => &sparc.cpu.v9, // SPARC can only be 64-bit from v9 and up.
.wasm32, .wasm64 => &wasm.cpu.mvp,
.x86 => &x86.cpu.i386,
.x86_64 => &x86.cpu.x86_64,
.nvptx, .nvptx64 => &nvptx.cpu.sm_20,
.ve => &ve.cpu.generic,
.wasm32, .wasm64 => &wasm.cpu.mvp,
.xcore => &xcore.cpu.generic,
.xtensa => &xtensa.cpu.generic,
.kalimba,
.or1k,
=> &S.generic_model,
inline else => |a| &@field(Target, @tagName(a.family())).cpu.generic,
};
}
@ -1994,7 +1965,7 @@ pub const Cpu = struct {
.fs, .gs, .ss => (arch == .x86_64 or arch == .x86) and (context == null or context == .pointer),
.flash, .flash1, .flash2, .flash3, .flash4, .flash5 => arch == .avr, // TODO this should also check how many flash banks the cpu has
.cog, .hub => arch == .propeller,
.lut => arch == .propeller and std.Target.propeller.featureSetHas(cpu.features, .p2),
.lut => arch == .propeller and cpu.has(.propeller, .p2),
.global, .local, .shared => is_gpu,
.constant => is_gpu and (context == null or context == .constant),
@ -2002,6 +1973,30 @@ pub const Cpu = struct {
.input, .output, .uniform, .push_constant, .storage_buffer, .physical_storage_buffer => is_spirv,
};
}
/// Returns true if `feature` is enabled.
pub fn has(cpu: Cpu, comptime family: Arch.Family, feature: @field(Target, @tagName(family)).Feature) bool {
if (family != cpu.arch.family()) return false;
return cpu.features.isEnabled(@intFromEnum(feature));
}
/// Returns true if any feature in `features` is enabled.
pub fn hasAny(cpu: Cpu, comptime family: Arch.Family, features: []const @field(Target, @tagName(family)).Feature) bool {
if (family != cpu.arch.family()) return false;
for (features) |feature| {
if (cpu.features.isEnabled(@intFromEnum(feature))) return true;
}
return false;
}
/// Returns true if all features in `features` are enabled.
pub fn hasAll(cpu: Cpu, comptime family: Arch.Family, features: []const @field(Target, @tagName(family)).Feature) bool {
if (family != cpu.arch.family()) return false;
for (features) |feature| {
if (!cpu.features.isEnabled(@intFromEnum(feature))) return false;
}
return true;
}
};
pub fn zigTriple(target: Target, allocator: Allocator) Allocator.Error![]u8 {
@ -2295,7 +2290,7 @@ pub const DynamicLinker = struct {
.mips,
.mipsel,
=> |arch| initFmt("/lib/ld-musl-mips{s}{s}{s}.so.1", .{
if (mips.featureSetHas(cpu.features, .mips32r6)) "r6" else "",
if (cpu.has(.mips, .mips32r6)) "r6" else "",
if (arch == .mipsel) "el" else "",
switch (abi) {
.musleabi => "-sf",
@ -2312,7 +2307,7 @@ pub const DynamicLinker = struct {
.muslabin32 => "n32",
else => return none,
},
if (mips.featureSetHas(cpu.features, .mips64r6)) "r6" else "",
if (cpu.has(.mips, .mips64r6)) "r6" else "",
if (arch == .mips64el) "el" else "",
}),
@ -2326,9 +2321,9 @@ pub const DynamicLinker = struct {
.riscv64,
=> |arch| if (abi == .musl) initFmt("/lib/ld-musl-{s}{s}.so.1", .{
@tagName(arch),
if (riscv.featureSetHas(cpu.features, .d))
if (cpu.has(.riscv, .d))
""
else if (riscv.featureSetHas(cpu.features, .f))
else if (cpu.has(.riscv, .f))
"-sp"
else
"-sf",
@ -2385,7 +2380,7 @@ pub const DynamicLinker = struct {
.gnueabi,
.gnueabihf,
=> initFmt("/lib/ld{s}.so.1", .{
if (mips.featureSetHas(cpu.features, .nan2008)) "-linux-mipsn8" else "",
if (cpu.has(.mips, .nan2008)) "-linux-mipsn8" else "",
}),
else => none,
},
@ -2398,7 +2393,7 @@ pub const DynamicLinker = struct {
.gnuabin32 => "32",
else => return none,
},
if (mips.featureSetHas(cpu.features, .nan2008)) "-linux-mipsn8" else "",
if (cpu.has(.mips, .nan2008)) "-linux-mipsn8" else "",
}),
.powerpc => switch (abi) {
@ -2419,9 +2414,9 @@ pub const DynamicLinker = struct {
.riscv64 => "riscv64-lp64",
else => unreachable,
},
if (riscv.featureSetHas(cpu.features, .d))
if (cpu.has(.riscv, .d))
"d"
else if (riscv.featureSetHas(cpu.features, .f))
else if (cpu.has(.riscv, .f))
"f"
else
"",
@ -2686,7 +2681,7 @@ pub fn stackAlignment(target: Target) u16 {
=> if (target.os.tag == .linux or target.os.tag == .aix) return 16,
.riscv32,
.riscv64,
=> if (!Target.riscv.featureSetHas(target.cpu.features, .e)) return 16,
=> if (!target.cpu.has(.riscv, .e)) return 16,
.x86 => if (target.os.tag != .windows and target.os.tag != .uefi) return 16,
.x86_64 => return 16,
else => {},
@ -3398,6 +3393,7 @@ const Target = @This();
const std = @import("std.zig");
const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
test {
std.testing.refAllDecls(Cpu.Arch);

View File

@ -653,16 +653,16 @@ test parse {
try std.testing.expect(target.os.tag == .linux);
try std.testing.expect(target.abi == .gnu);
try std.testing.expect(target.cpu.arch == .x86_64);
try std.testing.expect(!Target.x86.featureSetHas(target.cpu.features, .sse));
try std.testing.expect(!Target.x86.featureSetHas(target.cpu.features, .avx));
try std.testing.expect(!Target.x86.featureSetHas(target.cpu.features, .cx8));
try std.testing.expect(Target.x86.featureSetHas(target.cpu.features, .cmov));
try std.testing.expect(Target.x86.featureSetHas(target.cpu.features, .fxsr));
try std.testing.expect(!target.cpu.has(.x86, .sse));
try std.testing.expect(!target.cpu.has(.x86, .avx));
try std.testing.expect(!target.cpu.has(.x86, .cx8));
try std.testing.expect(target.cpu.has(.x86, .cmov));
try std.testing.expect(target.cpu.has(.x86, .fxsr));
try std.testing.expect(Target.x86.featureSetHasAny(target.cpu.features, .{ .sse, .avx, .cmov }));
try std.testing.expect(!Target.x86.featureSetHasAny(target.cpu.features, .{ .sse, .avx }));
try std.testing.expect(Target.x86.featureSetHasAll(target.cpu.features, .{ .mmx, .x87 }));
try std.testing.expect(!Target.x86.featureSetHasAll(target.cpu.features, .{ .mmx, .x87, .sse }));
try std.testing.expect(target.cpu.hasAny(.x86, &.{ .sse, .avx, .cmov }));
try std.testing.expect(!target.cpu.hasAny(.x86, &.{ .sse, .avx }));
try std.testing.expect(target.cpu.hasAll(.x86, &.{ .mmx, .x87 }));
try std.testing.expect(!target.cpu.hasAll(.x86, &.{ .mmx, .x87, .sse }));
const text = try query.zigTriple(std.testing.allocator);
defer std.testing.allocator.free(text);
@ -679,7 +679,7 @@ test parse {
try std.testing.expect(target.abi == .musleabihf);
try std.testing.expect(target.cpu.arch == .arm);
try std.testing.expect(target.cpu.model == &Target.arm.cpu.generic);
try std.testing.expect(Target.arm.featureSetHas(target.cpu.features, .v8a));
try std.testing.expect(target.cpu.has(.arm, .v8a));
const text = try query.zigTriple(std.testing.allocator);
defer std.testing.allocator.free(text);

View File

@ -0,0 +1,20 @@
const std = @import("../std.zig");
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {};
pub const featureSet = CpuFeature.FeatureSetFns(Feature).featureSet;
pub const featureSetHas = CpuFeature.FeatureSetFns(Feature).featureSetHas;
pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features: [0]CpuFeature = .{};
pub const cpu = struct {
pub const generic: CpuModel = .{
.name = "generic",
.llvm_name = null,
.features = featureSet(&.{}),
};
};

View File

@ -461,9 +461,8 @@ const DragonflyImpl = struct {
const WasmImpl = struct {
fn wait(ptr: *const atomic.Value(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
if (!comptime std.Target.wasm.featureSetHas(builtin.target.cpu.features, .atomics)) {
@compileError("WASI target missing cpu feature 'atomics'");
}
if (!comptime builtin.cpu.has(.wasm, .atomics)) @compileError("WASI target missing cpu feature 'atomics'");
const to: i64 = if (timeout) |to| @intCast(to) else -1;
const result = asm volatile (
\\local.get %[ptr]
@ -485,9 +484,8 @@ const WasmImpl = struct {
}
fn wake(ptr: *const atomic.Value(u32), max_waiters: u32) void {
if (!comptime std.Target.wasm.featureSetHas(builtin.target.cpu.features, .atomics)) {
@compileError("WASI target missing cpu feature 'atomics'");
}
if (!comptime builtin.cpu.has(.wasm, .atomics)) @compileError("WASI target missing cpu feature 'atomics'");
assert(max_waiters != 0);
const woken_count = asm volatile (
\\local.get %[ptr]

View File

@ -378,13 +378,8 @@ pub inline fn spinLoopHint() void {
.armeb,
.thumb,
.thumbeb,
=> {
const can_yield = comptime std.Target.arm.featureSetHasAny(builtin.target.cpu.features, .{
.has_v6k, .has_v6m,
});
if (can_yield) {
=> if (comptime builtin.cpu.hasAny(.arm, &.{ .has_v6k, .has_v6m })) {
asm volatile ("yield");
}
},
// The 8-bit immediate specifies the amount of cycles to pause for. We can't really be too
@ -394,7 +389,7 @@ pub inline fn spinLoopHint() void {
.riscv32,
.riscv64,
=> if (comptime std.Target.riscv.featureSetHas(builtin.target.cpu.features, .zihintpause)) {
=> if (comptime builtin.cpu.has(.riscv, .zihintpause)) {
asm volatile ("pause");
},
@ -430,7 +425,7 @@ pub fn cacheLineForCpu(cpu: std.Target.Cpu) u16 {
// https://github.com/llvm/llvm-project/blob/e379094328e49731a606304f7e3559d4f1fa96f9/clang/lib/Basic/Targets/Hexagon.h#L145-L151
.hexagon,
=> if (std.Target.hexagon.featureSetHas(cpu.features, .v73)) 64 else 32,
=> if (cpu.has(.hexagon, .v73)) 64 else 32,
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7

View File

@ -2,9 +2,9 @@ const std = @import("../std.zig");
const builtin = @import("builtin");
const testing = std.testing;
const has_aesni = std.Target.x86.featureSetHas(builtin.cpu.features, .aes);
const has_avx = std.Target.x86.featureSetHas(builtin.cpu.features, .avx);
const has_armaes = std.Target.aarch64.featureSetHas(builtin.cpu.features, .aes);
const has_aesni = builtin.cpu.has(.x86, .aes);
const has_avx = builtin.cpu.has(.x86, .avx);
const has_armaes = builtin.cpu.has(.aarch64, .aes);
// C backend doesn't currently support passing vectors to inline asm.
const impl = if (builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_c and has_aesni and has_avx) impl: {
break :impl @import("aes/aesni.zig");

View File

@ -3,8 +3,8 @@ const builtin = @import("builtin");
const mem = std.mem;
const debug = std.debug;
const has_vaes = builtin.cpu.arch == .x86_64 and std.Target.x86.featureSetHas(builtin.cpu.features, .vaes);
const has_avx512f = builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_x86_64 and std.Target.x86.featureSetHas(builtin.cpu.features, .avx512f);
const has_vaes = builtin.cpu.arch == .x86_64 and builtin.cpu.has(.x86, .vaes);
const has_avx512f = builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_x86_64 and builtin.cpu.has(.x86, .avx512f);
/// A single AES block.
pub const Block = struct {

View File

@ -101,8 +101,8 @@ fn AesOcb(comptime Aes: anytype) type {
return offset;
}
const has_aesni = std.Target.x86.featureSetHas(builtin.cpu.features, .aes);
const has_armaes = std.Target.aarch64.featureSetHas(builtin.cpu.features, .aes);
const has_aesni = builtin.cpu.has(.x86, .aes);
const has_armaes = builtin.cpu.has(.aarch64, .aes);
const wb: usize = if ((builtin.cpu.arch == .x86_64 and has_aesni) or (builtin.cpu.arch == .aarch64 and has_armaes)) 4 else 0;
/// c: ciphertext: output buffer should be of size m.len

View File

@ -499,12 +499,12 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
fn ChaChaImpl(comptime rounds_nb: usize) type {
switch (builtin.cpu.arch) {
.x86_64 => {
if (builtin.zig_backend != .stage2_x86_64 and std.Target.x86.featureSetHas(builtin.cpu.features, .avx512f)) return ChaChaVecImpl(rounds_nb, 4);
if (std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return ChaChaVecImpl(rounds_nb, 2);
if (builtin.zig_backend != .stage2_x86_64 and builtin.cpu.has(.x86, .avx512f)) return ChaChaVecImpl(rounds_nb, 4);
if (builtin.cpu.has(.x86, .avx2)) return ChaChaVecImpl(rounds_nb, 2);
return ChaChaVecImpl(rounds_nb, 1);
},
.aarch64 => {
if (builtin.zig_backend != .stage2_aarch64 and std.Target.aarch64.featureSetHas(builtin.cpu.features, .neon)) return ChaChaVecImpl(rounds_nb, 4);
if (builtin.zig_backend != .stage2_aarch64 and builtin.cpu.has(.aarch64, .neon)) return ChaChaVecImpl(rounds_nb, 4);
return ChaChaNonVecImpl(rounds_nb);
},
else => return ChaChaNonVecImpl(rounds_nb),

View File

@ -284,9 +284,9 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
return d ^ hi;
}
const has_pclmul = std.Target.x86.featureSetHas(builtin.cpu.features, .pclmul);
const has_avx = std.Target.x86.featureSetHas(builtin.cpu.features, .avx);
const has_armaes = std.Target.aarch64.featureSetHas(builtin.cpu.features, .aes);
const has_pclmul = builtin.cpu.has(.x86, .pclmul);
const has_avx = builtin.cpu.has(.x86, .avx);
const has_armaes = builtin.cpu.has(.aarch64, .aes);
// C backend doesn't currently support passing vectors to inline asm.
const clmul = if (builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_c and has_pclmul and has_avx) impl: {
break :impl clmulPclmul;

View File

@ -200,7 +200,7 @@ fn Sha2x32(comptime iv: Iv32, digest_bits: comptime_int) type {
if (!@inComptime()) {
const V4u32 = @Vector(4, u32);
switch (builtin.cpu.arch) {
.aarch64 => if (builtin.zig_backend != .stage2_c and comptime std.Target.aarch64.featureSetHas(builtin.cpu.features, .sha2)) {
.aarch64 => if (builtin.zig_backend != .stage2_c and comptime builtin.cpu.has(.aarch64, .sha2)) {
var x: V4u32 = d.s[0..4].*;
var y: V4u32 = d.s[4..8].*;
const s_v = @as(*[16]V4u32, @ptrCast(&s));
@ -238,7 +238,7 @@ fn Sha2x32(comptime iv: Iv32, digest_bits: comptime_int) type {
return;
},
// C backend doesn't currently support passing vectors to inline asm.
.x86_64 => if (builtin.zig_backend != .stage2_c and comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sha, .avx2 })) {
.x86_64 => if (builtin.zig_backend != .stage2_c and comptime builtin.cpu.hasAll(.x86, &.{ .sha, .avx2 })) {
var x: V4u32 = [_]u32{ d.s[5], d.s[4], d.s[1], d.s[0] };
var y: V4u32 = [_]u32{ d.s[7], d.s[6], d.s[3], d.s[2] };
const s_v = @as(*[16]V4u32, @ptrCast(&s));

View File

@ -773,7 +773,7 @@ pub const StackIterator = struct {
pub fn init(first_address: ?usize, fp: ?usize) StackIterator {
if (native_arch.isSPARC()) {
// Flush all the register windows on stack.
asm volatile (if (std.Target.sparc.featureSetHas(builtin.cpu.features, .v9))
asm volatile (if (builtin.cpu.has(.sparc, .v9))
"flushw"
else
"ta 3" // ST_FLUSH_WINDOWS

View File

@ -1365,8 +1365,7 @@ pub fn lerp(a: anytype, b: anytype, t: anytype) @TypeOf(a, b, t) {
test lerp {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/17884
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .fma)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/17884
if (builtin.zig_backend == .stage2_x86_64 and !comptime builtin.cpu.has(.x86, .fma)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/17884
try testing.expectEqual(@as(f64, 75), lerp(50, 100, 0.5));
try testing.expectEqual(@as(f32, 43.75), lerp(50, 25, 0.25));

View File

@ -13,68 +13,59 @@ pub fn suggestVectorLengthForCpu(comptime T: type, comptime cpu: std.Target.Cpu)
const element_bit_size = @max(8, std.math.ceilPowerOfTwo(u16, @bitSizeOf(T)) catch unreachable);
const vector_bit_size: u16 = blk: {
if (cpu.arch.isX86()) {
if (T == bool and std.Target.x86.featureSetHas(cpu.features, .prefer_mask_registers)) return 64;
if (builtin.zig_backend != .stage2_x86_64 and std.Target.x86.featureSetHas(cpu.features, .avx512f) and !std.Target.x86.featureSetHasAny(cpu.features, .{ .prefer_256_bit, .prefer_128_bit })) break :blk 512;
if (std.Target.x86.featureSetHasAny(cpu.features, .{ .prefer_256_bit, .avx2 }) and !std.Target.x86.featureSetHas(cpu.features, .prefer_128_bit)) break :blk 256;
if (std.Target.x86.featureSetHas(cpu.features, .sse)) break :blk 128;
if (std.Target.x86.featureSetHasAny(cpu.features, .{ .mmx, .@"3dnow" })) break :blk 64;
if (T == bool and cpu.has(.x86, .prefer_mask_registers)) return 64;
if (builtin.zig_backend != .stage2_x86_64 and cpu.has(.x86, .avx512f) and !cpu.hasAny(.x86, &.{ .prefer_256_bit, .prefer_128_bit })) break :blk 512;
if (cpu.hasAny(.x86, &.{ .prefer_256_bit, .avx2 }) and !cpu.has(.x86, .prefer_128_bit)) break :blk 256;
if (cpu.has(.x86, .sse)) break :blk 128;
if (cpu.hasAny(.x86, &.{ .mmx, .@"3dnow" })) break :blk 64;
} else if (cpu.arch.isArm()) {
if (std.Target.arm.featureSetHas(cpu.features, .neon)) break :blk 128;
if (cpu.has(.arm, .neon)) break :blk 128;
} else if (cpu.arch.isAARCH64()) {
// SVE allows up to 2048 bits in the specification, as of 2022 the most powerful machine has implemented 512-bit
// I think is safer to just be on 128 until is more common
// TODO: Check on this return when bigger values are more common
if (std.Target.aarch64.featureSetHas(cpu.features, .sve)) break :blk 128;
if (std.Target.aarch64.featureSetHas(cpu.features, .neon)) break :blk 128;
if (cpu.has(.aarch64, .sve)) break :blk 128;
if (cpu.has(.aarch64, .neon)) break :blk 128;
} else if (cpu.arch.isPowerPC()) {
if (std.Target.powerpc.featureSetHas(cpu.features, .altivec)) break :blk 128;
if (cpu.has(.powerpc, .altivec)) break :blk 128;
} else if (cpu.arch.isMIPS()) {
if (std.Target.mips.featureSetHas(cpu.features, .msa)) break :blk 128;
if (cpu.has(.mips, .msa)) break :blk 128;
// TODO: Test MIPS capability to handle bigger vectors
// In theory MDMX and by extension mips3d have 32 registers of 64 bits which can use in parallel
// for multiple processing, but I don't know what's optimal here, if using
// the 2048 bits or using just 64 per vector or something in between
if (std.Target.mips.featureSetHas(cpu.features, std.Target.mips.Feature.mips3d)) break :blk 64;
if (cpu.has(.mips, .mips3d)) break :blk 64;
} else if (cpu.arch.isRISCV()) {
// In RISC-V Vector Registers are length agnostic so there's no good way to determine the best size.
// The usual vector length in most RISC-V cpus is 256 bits, however it can get to multiple kB.
if (std.Target.riscv.featureSetHas(cpu.features, .v)) {
var vec_bit_length: u32 = 256;
if (std.Target.riscv.featureSetHas(cpu.features, .zvl32b)) {
vec_bit_length = 32;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl64b)) {
vec_bit_length = 64;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl128b)) {
vec_bit_length = 128;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl256b)) {
vec_bit_length = 256;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl512b)) {
vec_bit_length = 512;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl1024b)) {
vec_bit_length = 1024;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl2048b)) {
vec_bit_length = 2048;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl4096b)) {
vec_bit_length = 4096;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl8192b)) {
vec_bit_length = 8192;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl16384b)) {
vec_bit_length = 16384;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl32768b)) {
vec_bit_length = 32768;
} else if (std.Target.riscv.featureSetHas(cpu.features, .zvl65536b)) {
vec_bit_length = 65536;
if (cpu.has(.riscv, .v)) {
inline for (.{
.{ .zvl65536b, 65536 },
.{ .zvl32768b, 32768 },
.{ .zvl16384b, 16384 },
.{ .zvl8192b, 8192 },
.{ .zvl4096b, 4096 },
.{ .zvl2048b, 2048 },
.{ .zvl1024b, 1024 },
.{ .zvl512b, 512 },
.{ .zvl256b, 256 },
.{ .zvl128b, 128 },
.{ .zvl64b, 64 },
.{ .zvl32b, 32 },
}) |mapping| {
if (cpu.has(.riscv, mapping[0])) break :blk mapping[1];
}
break :blk vec_bit_length;
break :blk 256;
}
} else if (cpu.arch.isSPARC()) {
// TODO: Test Sparc capability to handle bigger vectors
// In theory Sparc have 32 registers of 64 bits which can use in parallel
// for multiple processing, but I don't know what's optimal here, if using
// the 2048 bits or using just 64 per vector or something in between
if (std.Target.sparc.featureSetHasAny(cpu.features, .{ .vis, .vis2, .vis3 })) break :blk 64;
if (cpu.hasAny(.sparc, &.{ .vis, .vis2, .vis3 })) break :blk 64;
} else if (cpu.arch.isWasm()) {
if (std.Target.wasm.featureSetHas(cpu.features, .simd128)) break :blk 128;
if (cpu.has(.wasm, .simd128)) break :blk 128;
}
return null;
};

View File

@ -109,7 +109,7 @@ pub fn getExternalExecutor(
.riscv64 => Executor{ .qemu = "qemu-riscv64" },
.s390x => Executor{ .qemu = "qemu-s390x" },
.sparc => Executor{
.qemu = if (std.Target.sparc.featureSetHas(candidate.cpu.features, .v9))
.qemu = if (candidate.cpu.has(.sparc, .v9))
"qemu-sparc32plus"
else
"qemu-sparc",

View File

@ -77,7 +77,7 @@ pub fn detectNativeCpuAndFeatures(arch: Target.Cpu.Arch, os: Target.Os, query: T
detectIntelProcessor(&cpu, family, model, brand_id);
},
0x68747541 => {
if (detectAMDProcessor(cpu.features, family, model)) |m| cpu.model = m;
if (detectAMDProcessor(cpu, family, model)) |m| cpu.model = m;
},
else => {},
}
@ -107,7 +107,7 @@ fn detectIntelProcessor(cpu: *Target.Cpu, family: u32, model: u32, brand_id: u32
return;
},
5 => {
if (Target.x86.featureSetHas(cpu.features, .mmx)) {
if (cpu.has(.x86, .mmx)) {
cpu.model = &Target.x86.cpu.pentium_mmx;
return;
}
@ -177,10 +177,10 @@ fn detectIntelProcessor(cpu: *Target.Cpu, family: u32, model: u32, brand_id: u32
return;
},
0x55 => {
if (Target.x86.featureSetHas(cpu.features, .avx512bf16)) {
if (cpu.has(.x86, .avx512bf16)) {
cpu.model = &Target.x86.cpu.cooperlake;
return;
} else if (Target.x86.featureSetHas(cpu.features, .avx512vnni)) {
} else if (cpu.has(.x86, .avx512vnni)) {
cpu.model = &Target.x86.cpu.cascadelake;
return;
} else {
@ -296,11 +296,11 @@ fn detectIntelProcessor(cpu: *Target.Cpu, family: u32, model: u32, brand_id: u32
}
},
15 => {
if (Target.x86.featureSetHas(cpu.features, .@"64bit")) {
if (cpu.has(.x86, .@"64bit")) {
cpu.model = &Target.x86.cpu.nocona;
return;
}
if (Target.x86.featureSetHas(cpu.features, .sse3)) {
if (cpu.has(.x86, .sse3)) {
cpu.model = &Target.x86.cpu.prescott;
return;
}
@ -311,7 +311,7 @@ fn detectIntelProcessor(cpu: *Target.Cpu, family: u32, model: u32, brand_id: u32
}
}
fn detectAMDProcessor(features: Target.Cpu.Feature.Set, family: u32, model: u32) ?*const Target.Cpu.Model {
fn detectAMDProcessor(cpu: Target.Cpu, family: u32, model: u32) ?*const Target.Cpu.Model {
return switch (family) {
4 => &Target.x86.cpu.i486,
5 => switch (model) {
@ -321,11 +321,11 @@ fn detectAMDProcessor(features: Target.Cpu.Feature.Set, family: u32, model: u32)
10 => &Target.x86.cpu.geode,
else => &Target.x86.cpu.pentium,
},
6 => if (Target.x86.featureSetHas(features, .sse))
6 => if (cpu.has(.x86, .sse))
&Target.x86.cpu.athlon_xp
else
&Target.x86.cpu.athlon,
15 => if (Target.x86.featureSetHas(features, .sse3))
15 => if (cpu.has(.x86, .sse3))
&Target.x86.cpu.k8_sse3
else
&Target.x86.cpu.k8,

View File

@ -47,7 +47,7 @@ pub fn generate(opts: @This(), allocator: Allocator) Allocator.Error![:0]u8 {
pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void {
const target = opts.target;
const generic_arch_name = target.cpu.arch.genericName();
const arch_family_name = @tagName(target.cpu.arch.family());
const zig_backend = opts.zig_backend;
@setEvalBranchQuota(4000);
@ -80,9 +80,9 @@ pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void {
opts.single_threaded,
std.zig.fmtId(@tagName(target.abi)),
std.zig.fmtId(@tagName(target.cpu.arch)),
std.zig.fmtId(generic_arch_name),
std.zig.fmtId(arch_family_name),
std.zig.fmtId(target.cpu.model.name),
std.zig.fmtId(generic_arch_name),
std.zig.fmtId(arch_family_name),
});
for (target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| {

View File

@ -6469,7 +6469,7 @@ pub fn addCCArgs(
var march_index: usize = prefix_len;
@memcpy(march_buf[0..prefix.len], prefix);
if (std.Target.riscv.featureSetHas(target.cpu.features, .e)) {
if (target.cpu.has(.riscv, .e)) {
march_buf[march_index] = 'e';
} else {
march_buf[march_index] = 'i';
@ -6477,7 +6477,7 @@ pub fn addCCArgs(
march_index += 1;
for (letters) |letter| {
if (std.Target.riscv.featureSetHas(target.cpu.features, letter.feat)) {
if (target.cpu.has(.riscv, letter.feat)) {
march_buf[march_index] = letter.char;
march_index += 1;
}
@ -6488,12 +6488,12 @@ pub fn addCCArgs(
});
try argv.append(march_arg);
if (std.Target.riscv.featureSetHas(target.cpu.features, .relax)) {
if (target.cpu.has(.riscv, .relax)) {
try argv.append("-mrelax");
} else {
try argv.append("-mno-relax");
}
if (std.Target.riscv.featureSetHas(target.cpu.features, .save_restore)) {
if (target.cpu.has(.riscv, .save_restore)) {
try argv.append("-msave-restore");
} else {
try argv.append("-mno-save-restore");

View File

@ -166,7 +166,7 @@ pub fn resolve(options: Options) ResolveError!Config {
if (options.shared_memory == true) return error.ObjectFilesCannotShareMemory;
break :b false;
}
if (!std.Target.wasm.featureSetHasAll(target.cpu.features, .{ .atomics, .bulk_memory })) {
if (!target.cpu.hasAll(.wasm, &.{ .atomics, .bulk_memory })) {
if (options.shared_memory == true)
return error.SharedMemoryRequiresAtomicsAndBulkMemory;
break :b false;

View File

@ -9594,7 +9594,7 @@ fn checkMergeAllowed(sema: *Sema, block: *Block, src: LazySrcLoc, peer_ty: Type)
const backend = target_util.zigBackend(target, zcu.comp.config.use_llvm);
try sema.errNote(src, msg, "pointers with address space '{s}' cannot be returned from a branch on target {s}-{s} by compiler backend {s}", .{
@tagName(as),
target.cpu.arch.genericName(),
@tagName(target.cpu.arch.family()),
@tagName(target.os.tag),
@tagName(backend),
});
@ -23604,7 +23604,7 @@ fn checkLogicalPtrOperation(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Typ
"cannot perform arithmetic on pointers with address space '{s}' on target {s}-{s}",
.{
@tagName(as),
target.cpu.arch.genericName(),
@tagName(target.cpu.arch.family()),
@tagName(target.os.tag),
},
);
@ -36719,7 +36719,7 @@ pub fn analyzeAsAddressSpace(
block,
src,
"{s} with address space '{s}' are not supported on {s}",
.{ entity, @tagName(address_space), target.cpu.arch.genericName() },
.{ entity, @tagName(address_space), @tagName(target.cpu.arch.family()) },
);
}

View File

@ -993,8 +993,8 @@ pub fn abiAlignmentInner(
},
.stage2_x86_64 => {
if (vector_type.child == .bool_type) {
if (vector_type.len > 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" };
if (vector_type.len > 128 and std.Target.x86.featureSetHas(target.cpu.features, .avx)) return .{ .scalar = .@"32" };
if (vector_type.len > 256 and target.cpu.has(.x86, .avx512f)) return .{ .scalar = .@"64" };
if (vector_type.len > 128 and target.cpu.has(.x86, .avx)) return .{ .scalar = .@"32" };
if (vector_type.len > 64) return .{ .scalar = .@"16" };
const bytes = std.math.divCeil(u32, vector_type.len, 8) catch unreachable;
const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
@ -1003,8 +1003,8 @@ pub fn abiAlignmentInner(
const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeInner(strat, zcu, tid)).scalar);
if (elem_bytes == 0) return .{ .scalar = .@"1" };
const bytes = elem_bytes * vector_type.len;
if (bytes > 32 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" };
if (bytes > 16 and std.Target.x86.featureSetHas(target.cpu.features, .avx)) return .{ .scalar = .@"32" };
if (bytes > 32 and target.cpu.has(.x86, .avx512f)) return .{ .scalar = .@"64" };
if (bytes > 16 and target.cpu.has(.x86, .avx)) return .{ .scalar = .@"32" };
return .{ .scalar = .@"16" };
},
}

View File

@ -3741,7 +3741,7 @@ pub fn errorSetBits(zcu: *const Zcu) u16 {
if (zcu.error_limit == 0) return 0;
if (target.cpu.arch.isSpirV()) {
if (!std.Target.spirv.featureSetHas(target.cpu.features, .storage_push_constant16)) {
if (!target.cpu.has(.spirv, .storage_push_constant16)) {
return 32;
}
}
@ -3911,7 +3911,7 @@ pub fn atomicPtrAlignment(
.aarch64_be,
=> 128,
.x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64,
.x86_64 => if (target.cpu.has(.x86, .cx16)) 128 else 64,
};
if (ty.toIntern() == .bool_type) return .none;

View File

@ -4344,7 +4344,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// TODO: add Instruction.supportedOn
// function for ARM
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) {
if (self.target.cpu.has(.arm, .has_v5t)) {
_ = try self.addInst(.{
.tag = .blx,
.data = .{ .reg = .lr },
@ -5578,7 +5578,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
} },
});
} else if (x <= math.maxInt(u16)) {
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v7)) {
if (self.target.cpu.has(.arm, .has_v7)) {
_ = try self.addInst(.{
.tag = .movw,
.data = .{ .r_imm16 = .{
@ -5606,7 +5606,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
} else {
// TODO write constant to code and load
// relative to pc
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v7)) {
if (self.target.cpu.has(.arm, .has_v7)) {
// immediate: 0xaaaabbbb
// movw reg, #0xbbbb
// movt reg, #0xaaaa

View File

@ -203,7 +203,7 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
} else if (Instruction.Operand.fromU32(imm32) != null) {
// sub
return 1 * 4;
} else if (Target.arm.featureSetHas(emit.target.cpu.features, .has_v7)) {
} else if (emit.target.cpu.has(.arm, .has_v7)) {
// movw; movt; sub
return 3 * 4;
} else {
@ -452,7 +452,7 @@ fn mirSubStackPointer(emit: *Emit, inst: Mir.Inst.Index) !void {
const operand = Instruction.Operand.fromU32(imm32) orelse blk: {
const scratch: Register = .r4;
if (Target.arm.featureSetHas(emit.target.cpu.features, .has_v7)) {
if (emit.target.cpu.has(.arm, .has_v7)) {
try emit.writeInstruction(Instruction.movw(cond, scratch, @as(u16, @truncate(imm32))));
try emit.writeInstruction(Instruction.movt(cond, scratch, @as(u16, @truncate(imm32 >> 16))));
} else {

View File

@ -8455,7 +8455,7 @@ fn typeOfIndex(func: *Func, inst: Air.Inst.Index) Type {
}
fn hasFeature(func: *Func, feature: Target.riscv.Feature) bool {
return Target.riscv.featureSetHas(func.target.cpu.features, feature);
return func.target.cpu.has(.riscv, feature);
}
pub fn errUnionPayloadOffset(payload_ty: Type, zcu: *Zcu) u64 {

View File

@ -590,9 +590,7 @@ pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
}
fn hasFeature(lower: *Lower, feature: std.Target.riscv.Feature) bool {
const target = lower.pt.zcu.getTarget();
const features = target.cpu.features;
return std.Target.riscv.featureSetHas(features, feature);
return lower.pt.zcu.getTarget().cpu.has(.riscv, feature);
}
const Lower = @This();

View File

@ -22,7 +22,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class {
return .byval;
}
if (std.Target.riscv.featureSetHas(target.cpu.features, .d)) fields: {
if (target.cpu.has(.riscv, .d)) fields: {
var any_fp = false;
var field_count: usize = 0;
for (0..ty.structFieldCount(zcu)) |field_index| {
@ -141,10 +141,9 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
},
.float => {
const target = zcu.getTarget();
const features = target.cpu.features;
const float_bits = ty.floatBits(zcu.getTarget());
const float_reg_size: u32 = if (std.Target.riscv.featureSetHas(features, .d)) 64 else 32;
const float_bits = ty.floatBits(target);
const float_reg_size: u32 = if (target.cpu.has(.riscv, .d)) 64 else 32;
if (float_bits <= float_reg_size) {
result[0] = .float;
return result;

View File

@ -211,12 +211,10 @@ pub const Register = enum(u8) {
}
pub fn bitSize(reg: Register, zcu: *const Zcu) u32 {
const features = zcu.getTarget().cpu.features;
return switch (@intFromEnum(reg)) {
// zig fmt: off
@intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => 64,
@intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => if (Target.riscv.featureSetHas(features, .d)) 64 else 32,
@intFromEnum(Register.ft0) ... @intFromEnum(Register.f31) => if (zcu.getTarget().cpu.has(.riscv, .d)) 64 else 32,
@intFromEnum(Register.v0) ... @intFromEnum(Register.v31) => 256, // TODO: look at suggestVectorSize
else => unreachable,
// zig fmt: on

View File

@ -1616,8 +1616,8 @@ fn memcpy(cg: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
};
// When bulk_memory is enabled, we lower it to wasm's memcpy instruction.
// If not, we lower it ourselves manually
if (std.Target.wasm.featureSetHas(cg.target.cpu.features, .bulk_memory)) {
const len0_ok = std.Target.wasm.featureSetHas(cg.target.cpu.features, .nontrapping_bulk_memory_len0);
if (cg.target.cpu.has(.wasm, .bulk_memory)) {
const len0_ok = cg.target.cpu.has(.wasm, .nontrapping_bulk_memory_len0);
const emit_check = !(len0_ok or len_known_neq_0);
if (emit_check) {
@ -1839,9 +1839,7 @@ const SimdStoreStrategy = enum {
pub fn determineSimdStoreStrategy(ty: Type, zcu: *const Zcu, target: *const std.Target) SimdStoreStrategy {
assert(ty.zigTypeTag(zcu) == .vector);
if (ty.bitSize(zcu) != 128) return .unrolled;
const hasFeature = std.Target.wasm.featureSetHas;
const features = target.cpu.features;
if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) {
if (target.cpu.has(.wasm, .relaxed_simd) or target.cpu.has(.wasm, .simd128)) {
return .direct;
}
return .unrolled;
@ -4838,8 +4836,8 @@ fn memset(cg: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue)
// When bulk_memory is enabled, we lower it to wasm's memset instruction.
// If not, we lower it ourselves.
if (std.Target.wasm.featureSetHas(cg.target.cpu.features, .bulk_memory) and abi_size == 1) {
const len0_ok = std.Target.wasm.featureSetHas(cg.target.cpu.features, .nontrapping_bulk_memory_len0);
if (cg.target.cpu.has(.wasm, .bulk_memory) and abi_size == 1) {
const len0_ok = cg.target.cpu.has(.wasm, .nontrapping_bulk_memory_len0);
if (!len0_ok) {
try cg.startBlock(.block, .empty);
@ -7304,7 +7302,7 @@ fn airErrorSetHasValue(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
inline fn useAtomicFeature(cg: *const CodeGen) bool {
return std.Target.wasm.featureSetHas(cg.target.cpu.features, .atomics);
return cg.target.cpu.has(.wasm, .atomics);
}
fn airCmpxchg(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {

View File

@ -182035,7 +182035,7 @@ fn hasFeature(cg: *CodeGen, feature: std.Target.x86.Feature) bool {
.x86_64 => null,
},
else => null,
} orelse std.Target.x86.featureSetHas(cg.target.cpu.features, feature);
} orelse cg.target.cpu.has(.x86, feature);
}
fn typeOf(self: *CodeGen, inst: Air.Inst.Ref) Type {

View File

@ -72,8 +72,8 @@ pub fn findByMnemonic(
},
inline .@"invpcid 32bit", .@"rdpid 32bit" => |tag| switch (target.cpu.arch) {
else => unreachable,
.x86 => std.Target.x86.featureSetHas(
target.cpu.features,
.x86 => target.cpu.has(
.x86,
@field(std.Target.x86.Feature, @tagName(tag)[0 .. @tagName(tag).len - " 32bit".len]),
),
.x86_64 => false,
@ -81,17 +81,17 @@ pub fn findByMnemonic(
inline .@"invpcid 64bit", .@"rdpid 64bit", .@"prefetchi 64bit" => |tag| switch (target.cpu.arch) {
else => unreachable,
.x86 => false,
.x86_64 => std.Target.x86.featureSetHas(
target.cpu.features,
.x86_64 => target.cpu.has(
.x86,
@field(std.Target.x86.Feature, @tagName(tag)[0 .. @tagName(tag).len - " 64bit".len]),
),
},
.prefetch => std.Target.x86.featureSetHasAny(target.cpu.features, .{ .sse, .prfchw, .prefetchi, .prefetchwt1 }),
.prefetch => target.cpu.hasAny(.x86, &.{ .sse, .prfchw, .prefetchi, .prefetchwt1 }),
inline else => |tag| has_features: {
comptime var feature_it = std.mem.splitScalar(u8, @tagName(tag), ' ');
comptime var features: []const std.Target.x86.Feature = &.{};
inline while (comptime feature_it.next()) |feature| features = features ++ .{@field(std.Target.x86.Feature, feature)};
break :has_features std.Target.x86.featureSetHasAll(target.cpu.features, features[0..].*);
break :has_features target.cpu.hasAll(.x86, features);
},
}) continue;

View File

@ -201,11 +201,11 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Cont
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx)) return .{
if (bits <= 256 and target.cpu.has(.x86, .avx)) return .{
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 512 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{
if (bits <= 512 and target.cpu.has(.x86, .avx512f)) return .{
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
@ -220,7 +220,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Cont
.sse, .sseup, .none, .none,
.none, .none, .none, .none,
};
if (ctx == .arg and !std.Target.x86.featureSetHas(target.cpu.features, .avx)) return memory_class;
if (ctx == .arg and !target.cpu.has(.x86, .avx)) return memory_class;
if (bits <= 192) return .{
.sse, .sseup, .sseup, .none,
.none, .none, .none, .none,
@ -229,7 +229,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Cont
.sse, .sseup, .sseup, .sseup,
.none, .none, .none, .none,
};
if (ctx == .arg and !std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return memory_class;
if (ctx == .arg and !target.cpu.has(.x86, .avx512f)) return memory_class;
if (bits <= 320) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .none, .none, .none,
@ -242,9 +242,9 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Cont
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .sseup, .none,
};
if (bits <= 512 or (ctx == .ret and bits <= @as(u64, if (std.Target.x86.featureSetHas(target.cpu.features, .avx512f))
if (bits <= 512 or (ctx == .ret and bits <= @as(u64, if (target.cpu.has(.x86, .avx512f))
2048
else if (std.Target.x86.featureSetHas(target.cpu.features, .avx))
else if (target.cpu.has(.x86, .avx))
1024
else
512))) return .{

View File

@ -40,9 +40,9 @@ pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
return null;
}
fn subArchName(features: std.Target.Cpu.Feature.Set, arch: anytype, mappings: anytype) ?[]const u8 {
fn subArchName(target: std.Target, comptime family: std.Target.Cpu.Arch.Family, mappings: anytype) ?[]const u8 {
inline for (mappings) |mapping| {
if (arch.featureSetHas(features, mapping[0])) return mapping[1];
if (target.cpu.has(family, mapping[0])) return mapping[1];
}
return null;
@ -52,8 +52,6 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
var llvm_triple = std.ArrayList(u8).init(allocator);
defer llvm_triple.deinit();
const features = target.cpu.features;
const llvm_arch = switch (target.cpu.arch) {
.arm => "arm",
.armeb => "armeb",
@ -69,10 +67,10 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
.loongarch64 => "loongarch64",
.m68k => "m68k",
// MIPS sub-architectures are a bit irregular, so we handle them manually here.
.mips => if (std.Target.mips.featureSetHas(features, .mips32r6)) "mipsisa32r6" else "mips",
.mipsel => if (std.Target.mips.featureSetHas(features, .mips32r6)) "mipsisa32r6el" else "mipsel",
.mips64 => if (std.Target.mips.featureSetHas(features, .mips64r6)) "mipsisa64r6" else "mips64",
.mips64el => if (std.Target.mips.featureSetHas(features, .mips64r6)) "mipsisa64r6el" else "mips64el",
.mips => if (target.cpu.has(.mips, .mips32r6)) "mipsisa32r6" else "mips",
.mipsel => if (target.cpu.has(.mips, .mips32r6)) "mipsisa32r6el" else "mipsel",
.mips64 => if (target.cpu.has(.mips, .mips64r6)) "mipsisa64r6" else "mips64",
.mips64el => if (target.cpu.has(.mips, .mips64r6)) "mipsisa64r6el" else "mips64el",
.msp430 => "msp430",
.powerpc => "powerpc",
.powerpcle => "powerpcle",
@ -109,7 +107,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
try llvm_triple.appendSlice(llvm_arch);
const llvm_sub_arch: ?[]const u8 = switch (target.cpu.arch) {
.arm, .armeb, .thumb, .thumbeb => subArchName(features, std.Target.arm, .{
.arm, .armeb, .thumb, .thumbeb => subArchName(target, .arm, .{
.{ .v4t, "v4t" },
.{ .v5t, "v5t" },
.{ .v5te, "v5te" },
@ -146,13 +144,13 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
.{ .v9_5a, "v9.5a" },
.{ .v9_6a, "v9.6a" },
}),
.powerpc => subArchName(features, std.Target.powerpc, .{
.powerpc => subArchName(target, .powerpc, .{
.{ .spe, "spe" },
}),
.spirv => subArchName(features, std.Target.spirv, .{
.spirv => subArchName(target, .spirv, .{
.{ .v1_5, "1.5" },
}),
.spirv32, .spirv64 => subArchName(features, std.Target.spirv, .{
.spirv32, .spirv64 => subArchName(target, .spirv, .{
.{ .v1_5, "1.5" },
.{ .v1_4, "1.4" },
.{ .v1_3, "1.3" },
@ -309,13 +307,13 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
}
pub fn supportsTailCall(target: std.Target) bool {
switch (target.cpu.arch) {
.wasm32, .wasm64 => return std.Target.wasm.featureSetHas(target.cpu.features, .tail_call),
return switch (target.cpu.arch) {
.wasm32, .wasm64 => target.cpu.has(.wasm, .tail_call),
// Although these ISAs support tail calls, LLVM does not support tail calls on them.
.mips, .mipsel, .mips64, .mips64el => return false,
.powerpc, .powerpcle, .powerpc64, .powerpc64le => return false,
else => return true,
}
.mips, .mipsel, .mips64, .mips64el => false,
.powerpc, .powerpcle, .powerpc64, .powerpc64le => false,
else => true,
};
}
pub fn dataLayout(target: std.Target) []const u8 {
@ -391,11 +389,11 @@ pub fn dataLayout(target: std.Target) []const u8 {
.nvptx => "e-p:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64",
.nvptx64 => "e-i64:64-i128:128-v16:16-v32:32-n16:32:64",
.amdgcn => "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9",
.riscv32 => if (std.Target.riscv.featureSetHas(target.cpu.features, .e))
.riscv32 => if (target.cpu.has(.riscv, .e))
"e-m:e-p:32:32-i64:64-n32-S32"
else
"e-m:e-p:32:32-i64:64-n32-S128",
.riscv64 => if (std.Target.riscv.featureSetHas(target.cpu.features, .e))
.riscv64 => if (target.cpu.has(.riscv, .e))
"e-m:e-p:64:64-i64:64-i128:128-n32:64-S64"
else
"e-m:e-p:64:64-i64:64-i128:128-n32:64-S128",
@ -12047,7 +12045,7 @@ fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool {
if (isByRef(ty, zcu)) {
return true;
} else if (target.cpu.arch.isX86() and
!std.Target.x86.featureSetHas(target.cpu.features, .evex512) and
!target.cpu.has(.x86, .evex512) and
ty.totalVectorBits(zcu) >= 512)
{
// As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns
@ -12322,7 +12320,7 @@ const ParamTypeIterator = struct {
} else if (isByRef(ty, zcu)) {
return .byref;
} else if (target.cpu.arch.isX86() and
!std.Target.x86.featureSetHas(target.cpu.features, .evex512) and
!target.cpu.has(.x86, .evex512) and
ty.totalVectorBits(zcu) >= 512)
{
// As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns
@ -12746,7 +12744,7 @@ fn isScalar(zcu: *Zcu, ty: Type) bool {
/// or if it produces miscompilations.
fn backendSupportsF80(target: std.Target) bool {
return switch (target.cpu.arch) {
.x86_64, .x86 => !std.Target.x86.featureSetHas(target.cpu.features, .soft_float),
.x86, .x86_64 => !target.cpu.has(.x86, .soft_float),
else => false,
};
}
@ -12778,11 +12776,11 @@ fn backendSupportsF16(target: std.Target) bool {
.armeb,
.thumb,
.thumbeb,
=> target.abi.float() == .soft or std.Target.arm.featureSetHas(target.cpu.features, .fullfp16),
=> target.abi.float() == .soft or target.cpu.has(.arm, .fullfp16),
// https://github.com/llvm/llvm-project/issues/129394
.aarch64,
.aarch64_be,
=> std.Target.aarch64.featureSetHas(target.cpu.features, .fp_armv8),
=> target.cpu.has(.aarch64, .fp_armv8),
else => true,
};
}
@ -12813,7 +12811,7 @@ fn backendSupportsF128(target: std.Target) bool {
.armeb,
.thumb,
.thumbeb,
=> target.abi.float() == .soft or std.Target.arm.featureSetHas(target.cpu.features, .fp_armv8),
=> target.abi.float() == .soft or target.cpu.has(.arm, .fp_armv8),
else => true,
};
}

View File

@ -190,12 +190,12 @@ entry_points: std.AutoArrayHashMapUnmanaged(IdRef, EntryPoint) = .empty,
pub fn init(gpa: Allocator, target: std.Target) Module {
const version_minor: u8 = blk: {
// Prefer higher versions
if (std.Target.spirv.featureSetHas(target.cpu.features, .v1_6)) break :blk 6;
if (std.Target.spirv.featureSetHas(target.cpu.features, .v1_5)) break :blk 5;
if (std.Target.spirv.featureSetHas(target.cpu.features, .v1_4)) break :blk 4;
if (std.Target.spirv.featureSetHas(target.cpu.features, .v1_3)) break :blk 3;
if (std.Target.spirv.featureSetHas(target.cpu.features, .v1_2)) break :blk 2;
if (std.Target.spirv.featureSetHas(target.cpu.features, .v1_1)) break :blk 1;
if (target.cpu.has(.spirv, .v1_6)) break :blk 6;
if (target.cpu.has(.spirv, .v1_5)) break :blk 5;
if (target.cpu.has(.spirv, .v1_4)) break :blk 4;
if (target.cpu.has(.spirv, .v1_3)) break :blk 3;
if (target.cpu.has(.spirv, .v1_2)) break :blk 2;
if (target.cpu.has(.spirv, .v1_1)) break :blk 1;
break :blk 0;
};
@ -268,7 +268,7 @@ pub fn idBound(self: Module) Word {
}
pub fn hasFeature(self: *Module, feature: std.Target.spirv.Feature) bool {
return std.Target.spirv.featureSetHas(self.target.cpu.features, feature);
return self.target.cpu.has(.spirv, feature);
}
fn addEntryPointDeps(

View File

@ -187,7 +187,6 @@ pub fn validateEFlags(
) !void {
switch (target.cpu.arch) {
.riscv64 => {
const features = target.cpu.features;
const flags: riscv.Eflags = @bitCast(e_flags);
var any_errors: bool = false;
@ -196,7 +195,7 @@ pub fn validateEFlags(
// Invalid when
// 1. The input uses C and we do not.
if (flags.rvc and !std.Target.riscv.featureSetHas(features, .c)) {
if (flags.rvc and !target.cpu.has(.riscv, .c)) {
any_errors = true;
diags.addParseError(
path,
@ -208,7 +207,7 @@ pub fn validateEFlags(
// Invalid when
// 1. We use E and the input does not.
// 2. The input uses E and we do not.
if (std.Target.riscv.featureSetHas(features, .e) != flags.rve) {
if (target.cpu.has(.riscv, .e) != flags.rve) {
any_errors = true;
diags.addParseError(
path,
@ -225,7 +224,7 @@ pub fn validateEFlags(
// Invalid when
// 1. We use total store order and the input does not.
// 2. The input uses total store order and we do not.
if (flags.tso != std.Target.riscv.featureSetHas(features, .ztso)) {
if (flags.tso != target.cpu.has(.riscv, .ztso)) {
any_errors = true;
diags.addParseError(
path,
@ -235,9 +234,9 @@ pub fn validateEFlags(
}
const fabi: riscv.Eflags.FloatAbi =
if (std.Target.riscv.featureSetHas(features, .d))
if (target.cpu.has(.riscv, .d))
.double
else if (std.Target.riscv.featureSetHas(features, .f))
else if (target.cpu.has(.riscv, .f))
.single
else
.soft;

View File

@ -1159,7 +1159,7 @@ fn emitFeaturesSection(
var safety_count = feature_count;
for (target.cpu.arch.allFeaturesList(), 0..) |*feature, i| {
if (!std.Target.wasm.featureSetHas(target.cpu.features, @enumFromInt(i))) continue;
if (!target.cpu.has(.wasm, @as(std.Target.wasm.Feature, @enumFromInt(i)))) continue;
safety_count -= 1;
try leb.writeUleb128(writer, @as(u32, '+'));

View File

@ -917,12 +917,12 @@ pub fn parse(
}
if (!saw_linking_section) return error.MissingLinkingSection;
const target_features = comp.root_mod.resolved_target.result.cpu.features;
const cpu = comp.root_mod.resolved_target.result.cpu;
if (has_tls) {
if (!std.Target.wasm.featureSetHas(target_features, .atomics))
if (!cpu.has(.wasm, .atomics))
return diags.failParse(path, "object has TLS segment but target CPU feature atomics is disabled", .{});
if (!std.Target.wasm.featureSetHas(target_features, .bulk_memory))
if (!cpu.has(.wasm, .bulk_memory))
return diags.failParse(path, "object has TLS segment but target CPU feature bulk_memory is disabled", .{});
}
@ -937,7 +937,7 @@ pub fn parse(
},
else => {
const f = feat.tag.toCpuFeature().?;
if (std.Target.wasm.featureSetHas(target_features, f)) {
if (cpu.has(.wasm, f)) {
return diags.failParse(
path,
"object forbids {s} but specified target features include {s}",
@ -952,7 +952,7 @@ pub fn parse(
},
else => {
const f = feat.tag.toCpuFeature().?;
if (!std.Target.wasm.featureSetHas(target_features, f)) {
if (!cpu.has(.wasm, f)) {
return diags.failParse(
path,
"object requires {s} but specified target features exclude {s}",

View File

@ -297,18 +297,21 @@ pub fn classifyCompilerRtLibName(name: []const u8) CompilerRtClassification {
pub fn hasDebugInfo(target: std.Target) bool {
return switch (target.cpu.arch) {
.nvptx, .nvptx64 => std.Target.nvptx.featureSetHas(target.cpu.features, .ptx75) or
std.Target.nvptx.featureSetHas(target.cpu.features, .ptx76) or
std.Target.nvptx.featureSetHas(target.cpu.features, .ptx77) or
std.Target.nvptx.featureSetHas(target.cpu.features, .ptx78) or
std.Target.nvptx.featureSetHas(target.cpu.features, .ptx80) or
std.Target.nvptx.featureSetHas(target.cpu.features, .ptx81) or
std.Target.nvptx.featureSetHas(target.cpu.features, .ptx82) or
std.Target.nvptx.featureSetHas(target.cpu.features, .ptx83) or
std.Target.nvptx.featureSetHas(target.cpu.features, .ptx84) or
std.Target.nvptx.featureSetHas(target.cpu.features, .ptx85) or
std.Target.nvptx.featureSetHas(target.cpu.features, .ptx86) or
std.Target.nvptx.featureSetHas(target.cpu.features, .ptx87),
// TODO: We should make newer PTX versions depend on older ones so we'd just check `ptx75`.
.nvptx, .nvptx64 => target.cpu.hasAny(.nvptx, &.{
.ptx75,
.ptx76,
.ptx77,
.ptx78,
.ptx80,
.ptx81,
.ptx82,
.ptx83,
.ptx84,
.ptx85,
.ptx86,
.ptx87,
}),
.bpfel, .bpfeb => false,
else => true,
};
@ -613,28 +616,22 @@ pub fn llvmMachineAbi(target: std.Target) ?[:0]const u8 {
else => if (target.abi.isMusl()) "elfv2" else "elfv1",
},
.powerpc64le => "elfv2",
.riscv64 => b: {
const featureSetHas = std.Target.riscv.featureSetHas;
break :b if (featureSetHas(target.cpu.features, .e))
.riscv64 => if (target.cpu.has(.riscv, .e))
"lp64e"
else if (featureSetHas(target.cpu.features, .d))
else if (target.cpu.has(.riscv, .d))
"lp64d"
else if (featureSetHas(target.cpu.features, .f))
else if (target.cpu.has(.riscv, .f))
"lp64f"
else
"lp64";
},
.riscv32 => b: {
const featureSetHas = std.Target.riscv.featureSetHas;
break :b if (featureSetHas(target.cpu.features, .e))
"lp64",
.riscv32 => if (target.cpu.has(.riscv, .e))
"ilp32e"
else if (featureSetHas(target.cpu.features, .d))
else if (target.cpu.has(.riscv, .d))
"ilp32d"
else if (featureSetHas(target.cpu.features, .f))
else if (target.cpu.has(.riscv, .f))
"ilp32f"
else
"ilp32";
},
"ilp32",
else => null,
};
}
@ -672,7 +669,7 @@ pub fn minFunctionAlignment(target: std.Target) Alignment {
return switch (target.cpu.arch) {
.riscv32,
.riscv64,
=> if (std.Target.riscv.featureSetHasAny(target.cpu.features, .{ .c, .zca })) .@"2" else .@"4",
=> if (target.cpu.hasAny(.riscv, &.{ .c, .zca })) .@"2" else .@"4",
.thumb,
.thumbeb,
.csky,

View File

@ -7,7 +7,7 @@ const supports_128_bit_atomics = switch (builtin.cpu.arch) {
// TODO: Ideally this could be sync'd with the logic in Sema.
.aarch64 => true,
.aarch64_be => false, // Fails due to LLVM issues.
.x86_64 => std.Target.x86.featureSetHas(builtin.cpu.features, .cx16),
.x86_64 => builtin.cpu.has(.x86, .cx16),
else => false,
};

View File

@ -17,7 +17,7 @@ test "add f16" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
try testAdd(f16);
try comptime testAdd(f16);
@ -129,7 +129,7 @@ test "cmp f16" {
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
try testCmp(f16);
try comptime testCmp(f16);
@ -345,7 +345,7 @@ test "different sized float comparisons" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
try testDifferentSizedFloatComparisons();
try comptime testDifferentSizedFloatComparisons();
@ -396,7 +396,7 @@ test "@sqrt f16" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
try testSqrt(f16);
try comptime testSqrt(f16);
@ -1140,7 +1140,7 @@ test "@abs f16" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
try testFabs(f16);
try comptime testFabs(f16);
@ -1276,7 +1276,7 @@ test "@floor f32/f64" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
try testFloor(f32);
try comptime testFloor(f32);
@ -1343,7 +1343,7 @@ test "@floor with vectors" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
try testFloorWithVectors();
try comptime testFloorWithVectors();
@ -1377,7 +1377,7 @@ test "@ceil f32/f64" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
try testCeil(f32);
try comptime testCeil(f32);
@ -1444,7 +1444,7 @@ test "@ceil with vectors" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
try testCeilWithVectors();
try comptime testCeilWithVectors();
@ -1478,7 +1478,7 @@ test "@trunc f32/f64" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
try testTrunc(f32);
try comptime testTrunc(f32);
@ -1545,7 +1545,7 @@ test "@trunc with vectors" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
try testTruncWithVectors();
try comptime testTruncWithVectors();
@ -1568,7 +1568,7 @@ test "neg f16" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
if (builtin.os.tag == .freebsd) {
// TODO file issue to track this failure

View File

@ -475,7 +475,7 @@ test "division" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
try testIntDivision();
try comptime testIntDivision();
@ -1930,7 +1930,7 @@ test "float vector division of comptime zero by runtime nan is nan" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
const ct_zero: @Vector(1, f32) = .{0};
var rt_nan: @Vector(1, f32) = .{math.nan(f32)};

View File

@ -10,7 +10,7 @@ test "@mulAdd" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .fma)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .fma)) return error.SkipZigTest;
try comptime testMulAdd();
try testMulAdd();
@ -143,7 +143,7 @@ test "vector f32" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .fma)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .fma)) return error.SkipZigTest;
try comptime vector32();
try vector32();
@ -171,7 +171,7 @@ test "vector f64" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .fma)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .fma)) return error.SkipZigTest;
try comptime vector64();
try vector64();

View File

@ -251,7 +251,7 @@ test "array to vector with element type coercion" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1259,9 +1259,7 @@ test "byte vector initialized in inline function" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch == .aarch64_be and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (comptime builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .x86_64 and
std.Target.x86.featureSetHas(builtin.cpu.features, .avx512f))
{
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .x86_64 and comptime builtin.cpu.has(.x86, .avx512f)) {
// TODO https://github.com/ziglang/zig/issues/13279
return error.SkipZigTest;
}

View File

@ -133,7 +133,7 @@ pub fn build(b: *std.Build) void {
.use_lld = false,
.root_module = test_mod,
});
if (!std.Target.x86.featureSetHas(target.result.cpu.features, .sse2)) {
if (!target.result.cpu.has(.x86, .sse2)) {
test_exe.bundle_compiler_rt = false;
test_mod.linkLibrary(compiler_rt_lib);
}

View File

@ -17,7 +17,7 @@ pub const Gpr = switch (builtin.cpu.arch) {
.x86 => u32,
.x86_64 => u64,
};
pub const Sse = if (std.Target.x86.featureSetHas(builtin.cpu.features, .avx))
pub const Sse = if (builtin.cpu.has(.x86, .avx))
@Vector(32, u8)
else
@Vector(16, u8);

View File

@ -1087,9 +1087,7 @@ extern fn c_medium_vec(MediumVec) void;
extern fn c_ret_medium_vec() MediumVec;
test "medium simd vector" {
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx)) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and !comptime builtin.cpu.has(.x86, .avx)) return error.SkipZigTest;
if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest;
c_medium_vec(.{ 1, 2, 3, 4 });

View File

@ -25,7 +25,7 @@ pub fn main() !void {
\\const builtin = @import("builtin");
\\const std = @import("std");
\\const common = @import("common.zig");
\\const always_has_lse = std.Target.aarch64.featureSetHas(builtin.cpu.features, .lse);
\\const always_has_lse = builtin.cpu.has(.aarch64, .lse);
\\
\\/// This default is overridden at runtime after inspecting CPU properties.
\\/// It is intentionally not exported in order to make the machine code that