Compare commits

..

No commits in common. "46380a95da570958d89f221aab307016f6e2d00e" and "bbab41008f0b9649012ea1ecc9d4d8385e1309b6" have entirely different histories.

5 changed files with 276 additions and 278 deletions

View File

@ -3,24 +3,24 @@ const std = @import("std");
// Adjust these imports to match your actual file names // Adjust these imports to match your actual file names
const Dimensions = @import("Dimensions.zig"); const Dimensions = @import("Dimensions.zig");
const Scales = @import("Scales.zig"); const Scales = @import("Scales.zig");
const Scalar = @import("Scalar.zig").Scalar; const Quantity = @import("Quantity.zig").Quantity;
/// Helper function to create a clean namespace for each physical dimension. /// Helper function to create a clean namespace for each physical dimension.
/// It exposes the raw dimensions, and easy type-creators for Base or Scaled variants. /// It exposes the raw dimensions, and easy type-creators for Base or Scaled variants.
pub fn BaseScalar(comptime d: anytype) type { pub fn QtyNamespace(comptime d: anytype) type {
return struct { return struct {
pub const dims = Dimensions.init(d); pub const dims = Dimensions.init(d);
/// Creates a Scalar of this dimension using default scales. /// Creates a Quantity of this dimension using default scales.
/// Example: const V = Quantities.Velocity.Base(f32); /// Example: const V = Quantities.Velocity.Base(f32);
pub fn Of(comptime T: type) type { pub fn Of(comptime T: type) type {
return Scalar(T, dims, Scales.init(.{})); return Quantity(T, dims, Scales.init(.{}));
} }
/// Creates a Scalar of this dimension using custom scales. /// Creates a Quantity of this dimension using custom scales.
/// Example: const Kmh = Quantities.Velocity.Scaled(f32, Scales.init(.{ .L = .k, .T = .hour })); /// Example: const Kmh = Quantities.Velocity.Scaled(f32, Scales.init(.{ .L = .k, .T = .hour }));
pub fn Scaled(comptime T: type, comptime s: Scales) type { pub fn Scaled(comptime T: type, comptime s: Scales) type {
return Scalar(T, dims, s); return Quantity(T, dims, s);
} }
}; };
} }
@ -28,70 +28,70 @@ pub fn BaseScalar(comptime d: anytype) type {
// ========================================== // ==========================================
// Base Quantities // Base Quantities
// ========================================== // ==========================================
pub const Meter = BaseScalar(.{ .L = 1 }); pub const Meter = QtyNamespace(.{ .L = 1 });
pub const Second = BaseScalar(.{ .T = 1 }); pub const Second = QtyNamespace(.{ .T = 1 });
pub const Gramm = BaseScalar(.{ .M = 1 }); pub const Gramm = QtyNamespace(.{ .M = 1 });
pub const Kelvin = BaseScalar(.{ .Tr = 1 }); pub const Kelvin = QtyNamespace(.{ .Tr = 1 });
pub const ElectricCurrent = BaseScalar(.{ .I = 1 }); pub const ElectricCurrent = QtyNamespace(.{ .I = 1 });
// ========================================== // ==========================================
// Electric // Electric
// ========================================== // ==========================================
pub const ElectricConductivity = BaseScalar(.{ .M = -1, .L = -3, .T = 3, .I = 2 }); pub const ElectricConductivity = QtyNamespace(.{ .M = -1, .L = -3, .T = 3, .I = 2 });
pub const ElectricCharge = BaseScalar(.{ .T = 1, .I = 1 }); pub const ElectricCharge = QtyNamespace(.{ .T = 1, .I = 1 });
pub const ElectricPotential = BaseScalar(.{ .T = -3, .L = 2, .M = 1, .I = -1 }); pub const ElectricPotential = QtyNamespace(.{ .T = -3, .L = 2, .M = 1, .I = -1 });
pub const ElectricResistance = BaseScalar(.{ .M = 1, .L = 2, .T = -3, .I = -2 }); pub const ElectricResistance = QtyNamespace(.{ .M = 1, .L = 2, .T = -3, .I = -2 });
pub const ElectricResistivity = BaseScalar(.{ .M = 1, .L = 3, .T = -3, .I = -2 }); pub const ElectricResistivity = QtyNamespace(.{ .M = 1, .L = 3, .T = -3, .I = -2 });
pub const ElectricCapacitance = BaseScalar(.{ .T = 4, .L = -2, .M = -1, .I = 2 }); pub const ElectricCapacitance = QtyNamespace(.{ .T = 4, .L = -2, .M = -1, .I = 2 });
pub const ElectricImpedance = ElectricResistance; pub const ElectricImpedance = ElectricResistance;
pub const MagneticFlux = BaseScalar(.{ .M = 1, .L = 2, .T = -2, .I = -1 }); pub const MagneticFlux = QtyNamespace(.{ .M = 1, .L = 2, .T = -2, .I = -1 });
pub const MagneticDensity = BaseScalar(.{ .M = 1, .T = -2, .I = -1 }); pub const MagneticDensity = QtyNamespace(.{ .M = 1, .T = -2, .I = -1 });
pub const MagneticStrength = BaseScalar(.{ .L = -1, .I = 1 }); // Fixed typo from MagneticStrengh pub const MagneticStrength = QtyNamespace(.{ .L = -1, .I = 1 }); // Fixed typo from MagneticStrengh
pub const MagneticMoment = BaseScalar(.{ .L = 2, .I = 1 }); pub const MagneticMoment = QtyNamespace(.{ .L = 2, .I = 1 });
// ========================================== // ==========================================
// Movement // Movement
// ========================================== // ==========================================
pub const Speed = BaseScalar(.{ .L = 1, .T = -1 }); pub const Velocity = QtyNamespace(.{ .L = 1, .T = -1 });
pub const Acceleration = BaseScalar(.{ .L = 1, .T = -2 }); pub const Acceleration = QtyNamespace(.{ .L = 1, .T = -2 });
pub const Inertia = BaseScalar(.{ .M = 1, .L = 2 }); pub const Inertia = QtyNamespace(.{ .M = 1, .L = 2 });
// ========================================== // ==========================================
// Forces / Energy // Forces / Energy
// ========================================== // ==========================================
pub const Force = BaseScalar(.{ .T = -2, .M = 1, .L = 1 }); pub const Force = QtyNamespace(.{ .T = -2, .M = 1, .L = 1 });
pub const Pressure = BaseScalar(.{ .T = -2, .L = -1, .M = 1 }); pub const Pressure = QtyNamespace(.{ .T = -2, .L = -1, .M = 1 });
pub const Energy = BaseScalar(.{ .T = -2, .L = 2, .M = 1 }); pub const Energy = QtyNamespace(.{ .T = -2, .L = 2, .M = 1 });
pub const Power = BaseScalar(.{ .T = -3, .L = 2, .M = 1 }); pub const Power = QtyNamespace(.{ .T = -3, .L = 2, .M = 1 });
// ========================================== // ==========================================
// Dimension // Dimension
// ========================================== // ==========================================
pub const Area = BaseScalar(.{ .L = 2 }); pub const Area = QtyNamespace(.{ .L = 2 });
pub const Volume = BaseScalar(.{ .L = 3 }); pub const Volume = QtyNamespace(.{ .L = 3 });
pub const AreaDensity = BaseScalar(.{ .M = 1, .L = -2 }); pub const AreaDensity = QtyNamespace(.{ .M = 1, .L = -2 });
pub const Density = BaseScalar(.{ .M = 1, .L = -3 }); pub const Density = QtyNamespace(.{ .M = 1, .L = -3 });
// ========================================== // ==========================================
// Thermal // Thermal
// ========================================== // ==========================================
pub const ThermalHeat = Energy; pub const ThermalHeat = Energy;
pub const ThermalWork = Energy; pub const ThermalWork = Energy;
pub const ThermalCapacity = BaseScalar(.{ .M = 1, .L = 2, .T = -2, .Tr = -1 }); pub const ThermalCapacity = QtyNamespace(.{ .M = 1, .L = 2, .T = -2, .Tr = -1 });
pub const ThermalCapacityPerMass = BaseScalar(.{ .L = 2, .T = -2, .Tr = -1 }); pub const ThermalCapacityPerMass = QtyNamespace(.{ .L = 2, .T = -2, .Tr = -1 });
pub const ThermalFluxDensity = BaseScalar(.{ .M = 1, .T = -3 }); // Fixed typo from ThermalluxDensity pub const ThermalFluxDensity = QtyNamespace(.{ .M = 1, .T = -3 }); // Fixed typo from ThermalluxDensity
pub const ThermalConductance = BaseScalar(.{ .M = 1, .L = 2, .T = -3, .Tr = -1 }); pub const ThermalConductance = QtyNamespace(.{ .M = 1, .L = 2, .T = -3, .Tr = -1 });
pub const ThermalConductivity = BaseScalar(.{ .M = 1, .L = 1, .T = -3, .Tr = -1 }); pub const ThermalConductivity = QtyNamespace(.{ .M = 1, .L = 1, .T = -3, .Tr = -1 });
pub const ThermalResistance = BaseScalar(.{ .M = -1, .L = -2, .T = 3, .Tr = 1 }); pub const ThermalResistance = QtyNamespace(.{ .M = -1, .L = -2, .T = 3, .Tr = 1 });
pub const ThermalResistivity = BaseScalar(.{ .M = -1, .L = -1, .T = 3, .Tr = 1 }); pub const ThermalResistivity = QtyNamespace(.{ .M = -1, .L = -1, .T = 3, .Tr = 1 });
pub const ThermalEntropy = BaseScalar(.{ .M = 1, .L = 2, .T = -2, .Tr = -1 }); pub const ThermalEntropy = QtyNamespace(.{ .M = 1, .L = 2, .T = -2, .Tr = -1 });
// ========================================== // ==========================================
// Others // Others
// ========================================== // ==========================================
pub const Frequency = BaseScalar(.{ .T = -1 }); pub const Frequency = QtyNamespace(.{ .T = -1 });
pub const Viscosity = BaseScalar(.{ .M = 1, .L = -1, .T = -1 }); pub const Viscosity = QtyNamespace(.{ .M = 1, .L = -1, .T = -1 });
pub const SurfaceTension = BaseScalar(.{ .M = 1, .T = -2 }); // Corrected from MT-2a pub const SurfaceTension = QtyNamespace(.{ .M = 1, .T = -2 }); // Corrected from MT-2a
test "BaseQuantities - Core dimensions instantiation" { test "BaseQuantities - Core dimensions instantiation" {
// Basic types via generic wrappers // Basic types via generic wrappers
@ -102,7 +102,7 @@ test "BaseQuantities - Core dimensions instantiation" {
try std.testing.expectEqual(0, M.dims.get(.T)); try std.testing.expectEqual(0, M.dims.get(.T));
// Test specific scale variants // Test specific scale variants
const Kmh = Speed.Scaled(f32, Scales.init(.{ .L = .k, .T = .hour })); const Kmh = Velocity.Scaled(f32, Scales.init(.{ .L = .k, .T = .hour }));
const speed = Kmh{ .value = 120.0 }; const speed = Kmh{ .value = 120.0 };
try std.testing.expectEqual(120.0, speed.value); try std.testing.expectEqual(120.0, speed.value);
try std.testing.expectEqual(.k, @TypeOf(speed).scales.get(.L)); try std.testing.expectEqual(.k, @TypeOf(speed).scales.get(.L));
@ -116,7 +116,7 @@ test "BaseQuantities - Kinematics equations" {
// Velocity = Distance / Time // Velocity = Distance / Time
const v = d.divBy(t); const v = d.divBy(t);
try std.testing.expectEqual(25.0, v.value); try std.testing.expectEqual(25.0, v.value);
try std.testing.expect(Speed.dims.eql(@TypeOf(v).dims)); try std.testing.expect(Velocity.dims.eql(@TypeOf(v).dims));
// Acceleration = Velocity / Time // Acceleration = Velocity / Time
const a = v.divBy(t); const a = v.divBy(t);

View File

@ -1,25 +1,25 @@
const std = @import("std"); const std = @import("std");
const hlp = @import("helper.zig"); const hlp = @import("helper.zig");
const Vector = @import("Vector.zig").Vector; const QuantityVec = @import("QuantityVec.zig").QuantityVec;
const Scales = @import("Scales.zig"); const Scales = @import("Scales.zig");
const UnitScale = Scales.UnitScale; const UnitScale = Scales.UnitScale;
const Dimensions = @import("Dimensions.zig"); const Dimensions = @import("Dimensions.zig");
const Dimension = Dimensions.Dimension; const Dimension = Dimensions.Dimension;
pub fn Scalar(comptime T: type, comptime d: Dimensions, comptime s: Scales) type { pub fn Quantity(comptime T: type, comptime d: Dimensions, comptime s: Scales) type {
@setEvalBranchQuota(100_000); @setEvalBranchQuota(100_000);
return struct { return struct {
value: T, value: T,
const Self = @This(); const Self = @This();
pub const Vec3: type = Vector(3, Self); pub const Vec3: type = QuantityVec(3, Self);
pub const ValueType: type = T; pub const ValueType: type = T;
pub const dims: Dimensions = d; pub const dims: Dimensions = d;
pub const scales = s; pub const scales = s;
pub inline fn add(self: Self, rhs: anytype) Scalar( pub inline fn add(self: Self, rhs: anytype) Quantity(
T, T,
dims, dims,
scales.min(@TypeOf(rhs).scales), scales.min(@TypeOf(rhs).scales),
@ -29,14 +29,14 @@ pub fn Scalar(comptime T: type, comptime d: Dimensions, comptime s: Scales) type
if (comptime @TypeOf(rhs) == Self) if (comptime @TypeOf(rhs) == Self)
return .{ .value = self.value + rhs.value }; return .{ .value = self.value + rhs.value };
const TargetType = Scalar(T, dims, scales.min(@TypeOf(rhs).scales)); const TargetType = Quantity(T, dims, scales.min(@TypeOf(rhs).scales));
const lhs_val = if (comptime @TypeOf(self) == TargetType) self.value else self.to(TargetType).value; const lhs_val = if (comptime @TypeOf(self) == TargetType) self.value else self.to(TargetType).value;
const rhs_val = if (comptime @TypeOf(rhs) == TargetType) rhs.value else rhs.to(TargetType).value; const rhs_val = if (comptime @TypeOf(rhs) == TargetType) rhs.value else rhs.to(TargetType).value;
return .{ .value = lhs_val + rhs_val }; return .{ .value = lhs_val + rhs_val };
} }
pub inline fn sub(self: Self, rhs: anytype) Scalar( pub inline fn sub(self: Self, rhs: anytype) Quantity(
T, T,
dims, dims,
scales.min(@TypeOf(rhs).scales), scales.min(@TypeOf(rhs).scales),
@ -46,21 +46,21 @@ pub fn Scalar(comptime T: type, comptime d: Dimensions, comptime s: Scales) type
if (comptime @TypeOf(rhs) == Self) if (comptime @TypeOf(rhs) == Self)
return .{ .value = self.value - rhs.value }; return .{ .value = self.value - rhs.value };
const TargetType = Scalar(T, dims, scales.min(@TypeOf(rhs).scales)); const TargetType = Quantity(T, dims, scales.min(@TypeOf(rhs).scales));
const lhs_val = if (comptime @TypeOf(self) == TargetType) self.value else self.to(TargetType).value; const lhs_val = if (comptime @TypeOf(self) == TargetType) self.value else self.to(TargetType).value;
const rhs_val = if (comptime @TypeOf(rhs) == TargetType) rhs.value else rhs.to(TargetType).value; const rhs_val = if (comptime @TypeOf(rhs) == TargetType) rhs.value else rhs.to(TargetType).value;
return .{ .value = lhs_val - rhs_val }; return .{ .value = lhs_val - rhs_val };
} }
pub inline fn mulBy(self: Self, rhs: anytype) Scalar( pub inline fn mulBy(self: Self, rhs: anytype) Quantity(
T, T,
dims.add(@TypeOf(rhs).dims), dims.add(@TypeOf(rhs).dims),
scales.min(@TypeOf(rhs).scales), scales.min(@TypeOf(rhs).scales),
) { ) {
const RhsType = @TypeOf(rhs); const RhsType = @TypeOf(rhs);
const SelfNorm = Scalar(T, dims, scales.min(RhsType.scales)); const SelfNorm = Quantity(T, dims, scales.min(RhsType.scales));
const RhsNorm = Scalar(T, RhsType.dims, scales.min(RhsType.scales)); const RhsNorm = Quantity(T, RhsType.dims, scales.min(RhsType.scales));
if (comptime Self == SelfNorm and RhsType == RhsNorm) if (comptime Self == SelfNorm and RhsType == RhsNorm)
return .{ .value = self.value * rhs.value }; return .{ .value = self.value * rhs.value };
@ -69,14 +69,14 @@ pub fn Scalar(comptime T: type, comptime d: Dimensions, comptime s: Scales) type
return .{ .value = lhs_val * rhs_val }; return .{ .value = lhs_val * rhs_val };
} }
pub inline fn divBy(self: Self, rhs: anytype) Scalar( pub inline fn divBy(self: Self, rhs: anytype) Quantity(
T, T,
dims.sub(@TypeOf(rhs).dims), dims.sub(@TypeOf(rhs).dims),
scales.min(@TypeOf(rhs).scales), scales.min(@TypeOf(rhs).scales),
) { ) {
const RhsType = @TypeOf(rhs); const RhsType = @TypeOf(rhs);
const SelfNorm = Scalar(T, dims, scales.min(RhsType.scales)); const SelfNorm = Quantity(T, dims, scales.min(RhsType.scales));
const RhsNorm = Scalar(T, RhsType.dims, scales.min(RhsType.scales)); const RhsNorm = Quantity(T, RhsType.dims, scales.min(RhsType.scales));
const lhs_val = if (comptime Self == SelfNorm) self.value else self.to(SelfNorm).value; const lhs_val = if (comptime Self == SelfNorm) self.value else self.to(SelfNorm).value;
const rhs_val = if (comptime RhsType == RhsNorm) rhs.value else rhs.to(RhsNorm).value; const rhs_val = if (comptime RhsType == RhsNorm) rhs.value else rhs.to(RhsNorm).value;
if (comptime @typeInfo(T) == .int) { if (comptime @typeInfo(T) == .int) {
@ -132,8 +132,8 @@ pub fn Scalar(comptime T: type, comptime d: Dimensions, comptime s: Scales) type
} }
} }
pub fn Vec(self: Self, comptime len: comptime_int) Vector(len, Self) { pub fn Vec(self: Self, comptime len: comptime_int) QuantityVec(len, Self) {
return Vector(len, Self).initDefault(self.value); return QuantityVec(len, Self).initDefault(self.value);
} }
pub fn vec3(self: Self) Vec3 { pub fn vec3(self: Self) Vec3 {
@ -168,8 +168,8 @@ pub fn Scalar(comptime T: type, comptime d: Dimensions, comptime s: Scales) type
} }
test "Generate quantity" { test "Generate quantity" {
const Meter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = -3 })); const Meter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = -3 }));
const Second = Scalar(f32, Dimensions.init(.{ .T = 1 }), Scales.init(.{ .T = .n })); const Second = Quantity(f32, Dimensions.init(.{ .T = 1 }), Scales.init(.{ .T = .n }));
const distance = Meter{ .value = 10 }; const distance = Meter{ .value = 10 };
const time = Second{ .value = 2 }; const time = Second{ .value = 2 };
@ -179,7 +179,7 @@ test "Generate quantity" {
} }
test "Add" { test "Add" {
const Meter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Meter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const distance = Meter{ .value = 10 }; const distance = Meter{ .value = 10 };
const distance2 = Meter{ .value = 20 }; const distance2 = Meter{ .value = 20 };
@ -189,7 +189,7 @@ test "Add" {
try std.testing.expectEqual(1, @TypeOf(added).dims.get(.L)); try std.testing.expectEqual(1, @TypeOf(added).dims.get(.L));
std.debug.print("KiloMeter {f} + {f} = {f} OK\n", .{ distance, distance2, added }); std.debug.print("KiloMeter {f} + {f} = {f} OK\n", .{ distance, distance2, added });
const KiloMeter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k })); const KiloMeter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k }));
const distance3 = KiloMeter{ .value = 2 }; const distance3 = KiloMeter{ .value = 2 };
const added2 = distance.add(distance3); const added2 = distance.add(distance3);
try std.testing.expectEqual(2010, added2.value); try std.testing.expectEqual(2010, added2.value);
@ -201,7 +201,7 @@ test "Add" {
try std.testing.expectEqual(1, @TypeOf(added3).dims.get(.L)); try std.testing.expectEqual(1, @TypeOf(added3).dims.get(.L));
std.debug.print("KiloMeter {f} + {f} = {f} OK\n", .{ distance3, distance, added3 }); std.debug.print("KiloMeter {f} + {f} = {f} OK\n", .{ distance3, distance, added3 });
const KiloMeter_f = Scalar(f64, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k })); const KiloMeter_f = Quantity(f64, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k }));
const distance4 = KiloMeter_f{ .value = 2 }; const distance4 = KiloMeter_f{ .value = 2 };
const added4 = distance4.add(distance).to(KiloMeter_f); const added4 = distance4.add(distance).to(KiloMeter_f);
try std.testing.expectApproxEqAbs(2.01, added4.value, 0.000001); try std.testing.expectApproxEqAbs(2.01, added4.value, 0.000001);
@ -210,9 +210,9 @@ test "Add" {
} }
test "Sub" { test "Sub" {
const Meter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Meter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const KiloMeter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k })); const KiloMeter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k }));
const KiloMeter_f = Scalar(f64, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k })); const KiloMeter_f = Quantity(f64, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k }));
const a = Meter{ .value = 500 }; const a = Meter{ .value = 500 };
const b = Meter{ .value = 200 }; const b = Meter{ .value = 200 };
@ -232,8 +232,8 @@ test "Sub" {
} }
test "MulBy" { test "MulBy" {
const Meter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Meter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const Second = Scalar(f32, Dimensions.init(.{ .T = 1 }), Scales.init(.{})); const Second = Quantity(f32, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
const d = Meter{ .value = 3.0 }; const d = Meter{ .value = 3.0 };
const t = Second{ .value = 4.0 }; const t = Second{ .value = 4.0 };
@ -253,8 +253,8 @@ test "MulBy" {
} }
test "MulBy with scale" { test "MulBy with scale" {
const KiloMeter = Scalar(f32, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k })); const KiloMeter = Quantity(f32, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k }));
const KiloGram = Scalar(f32, Dimensions.init(.{ .M = 1 }), Scales.init(.{ .M = .k })); const KiloGram = Quantity(f32, Dimensions.init(.{ .M = 1 }), Scales.init(.{ .M = .k }));
const dist = KiloMeter{ .value = 2.0 }; const dist = KiloMeter{ .value = 2.0 };
const mass = KiloGram{ .value = 3.0 }; const mass = KiloGram{ .value = 3.0 };
@ -265,10 +265,10 @@ test "MulBy with scale" {
} }
test "MulBy with type change" { test "MulBy with type change" {
const Meter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k })); const Meter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k }));
const Second = Scalar(f64, Dimensions.init(.{ .T = 1 }), Scales.init(.{})); const Second = Quantity(f64, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
const KmSec = Scalar(i64, Dimensions.init(.{ .L = 1, .T = 1 }), Scales.init(.{ .L = .k })); const KmSec = Quantity(i64, Dimensions.init(.{ .L = 1, .T = 1 }), Scales.init(.{ .L = .k }));
const KmSec_f = Scalar(f32, Dimensions.init(.{ .L = 1, .T = 1 }), Scales.init(.{ .L = .k })); const KmSec_f = Quantity(f32, Dimensions.init(.{ .L = 1, .T = 1 }), Scales.init(.{ .L = .k }));
const d = Meter{ .value = 3.0 }; const d = Meter{ .value = 3.0 };
const t = Second{ .value = 4.0 }; const t = Second{ .value = 4.0 };
@ -283,8 +283,8 @@ test "MulBy with type change" {
} }
test "MulBy small" { test "MulBy small" {
const Meter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .n })); const Meter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .n }));
const Second = Scalar(f32, Dimensions.init(.{ .T = 1 }), Scales.init(.{})); const Second = Quantity(f32, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
const d = Meter{ .value = 3.0 }; const d = Meter{ .value = 3.0 };
const t = Second{ .value = 4.0 }; const t = Second{ .value = 4.0 };
@ -297,8 +297,8 @@ test "MulBy small" {
} }
test "Scale" { test "Scale" {
const Meter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Meter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const Second = Scalar(f32, Dimensions.init(.{ .T = 1 }), Scales.init(.{})); const Second = Quantity(f32, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
const d = Meter{ .value = 7 }; const d = Meter{ .value = 7 };
const scaled = d.scale(3); const scaled = d.scale(3);
@ -313,8 +313,8 @@ test "Scale" {
} }
test "Chained: velocity and acceleration" { test "Chained: velocity and acceleration" {
const Meter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Meter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const Second = Scalar(f32, Dimensions.init(.{ .T = 1 }), Scales.init(.{})); const Second = Quantity(f32, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
const dist = Meter{ .value = 100.0 }; const dist = Meter{ .value = 100.0 };
const t1 = Second{ .value = 5.0 }; const t1 = Second{ .value = 5.0 };
@ -333,8 +333,8 @@ test "Chained: velocity and acceleration" {
} }
test "DivBy integer exact" { test "DivBy integer exact" {
const Meter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Meter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const Second = Scalar(f32, Dimensions.init(.{ .T = 1 }), Scales.init(.{})); const Second = Quantity(f32, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
const dist = Meter{ .value = 120 }; const dist = Meter{ .value = 120 };
const time = Second{ .value = 4 }; const time = Second{ .value = 4 };
@ -347,9 +347,9 @@ test "DivBy integer exact" {
} }
test "Conversion chain: km -> m -> cm" { test "Conversion chain: km -> m -> cm" {
const KiloMeter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k })); const KiloMeter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k }));
const Meter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Meter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const CentiMeter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .c })); const CentiMeter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .c }));
const km = KiloMeter{ .value = 15 }; const km = KiloMeter{ .value = 15 };
const m = km.to(Meter); const m = km.to(Meter);
@ -361,9 +361,9 @@ test "Conversion chain: km -> m -> cm" {
} }
test "Conversion: hours -> minutes -> seconds" { test "Conversion: hours -> minutes -> seconds" {
const Hour = Scalar(i128, Dimensions.init(.{ .T = 1 }), Scales.init(.{ .T = .hour })); const Hour = Quantity(i128, Dimensions.init(.{ .T = 1 }), Scales.init(.{ .T = .hour }));
const Minute = Scalar(i128, Dimensions.init(.{ .T = 1 }), Scales.init(.{ .T = .min })); const Minute = Quantity(i128, Dimensions.init(.{ .T = 1 }), Scales.init(.{ .T = .min }));
const Second = Scalar(i128, Dimensions.init(.{ .T = 1 }), Scales.init(.{})); const Second = Quantity(i128, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
const h = Hour{ .value = 1.0 }; const h = Hour{ .value = 1.0 };
const min = h.to(Minute); const min = h.to(Minute);
@ -375,7 +375,7 @@ test "Conversion: hours -> minutes -> seconds" {
} }
test "Negative values" { test "Negative values" {
const Meter = Scalar(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Meter = Quantity(i128, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const a = Meter{ .value = 5 }; const a = Meter{ .value = 5 };
const b = Meter{ .value = 20 }; const b = Meter{ .value = 20 };
@ -385,12 +385,12 @@ test "Negative values" {
} }
test "Format Quantity" { test "Format Quantity" {
const MeterPerSecondSq = Scalar( const MeterPerSecondSq = Quantity(
f32, f32,
Dimensions.init(.{ .L = 1, .T = -2 }), Dimensions.init(.{ .L = 1, .T = -2 }),
Scales.init(.{ .T = .n }), Scales.init(.{ .T = .n }),
); );
const KgMeterPerSecond = Scalar( const KgMeterPerSecond = Quantity(
f32, f32,
Dimensions.init(.{ .M = 1, .L = 1, .T = -1 }), Dimensions.init(.{ .M = 1, .L = 1, .T = -1 }),
Scales.init(.{ .M = .k }), Scales.init(.{ .M = .k }),
@ -477,9 +477,9 @@ test "Benchmark" {
comptime var tidx: usize = 0; comptime var tidx: usize = 0;
inline for (Types, TNames) |T, tname| { inline for (Types, TNames) |T, tname| {
const M = Scalar(T, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const M = Quantity(T, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const KM = Scalar(T, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k })); const KM = Quantity(T, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k }));
const S = Scalar(T, Dimensions.init(.{ .T = 1 }), Scales.init(.{})); const S = Quantity(T, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
inline for (Ops, 0..) |op_name, oidx| { inline for (Ops, 0..) |op_name, oidx| {
var samples: [SAMPLES]f64 = undefined; var samples: [SAMPLES]f64 = undefined;
@ -595,8 +595,8 @@ test "Overhead Analysis: Quantity vs Native" {
var native_total_ns: f64 = 0; var native_total_ns: f64 = 0;
var quantity_total_ns: f64 = 0; var quantity_total_ns: f64 = 0;
const M = Scalar(T, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const M = Quantity(T, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const S = Scalar(T, Dimensions.init(.{ .T = 1 }), Scales.init(.{})); const S = Quantity(T, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
for (0..SAMPLES) |_| { for (0..SAMPLES) |_| {
// --- 1. Benchmark Native --- // --- 1. Benchmark Native ---
@ -652,139 +652,3 @@ test "Overhead Analysis: Quantity vs Native" {
std.debug.print("└───────────┴──────┴───────────┴───────────┴───────────┘\n", .{}); std.debug.print("└───────────┴──────┴───────────┴───────────┴───────────┘\n", .{});
try std.testing.expect(gsink != 0); try std.testing.expect(gsink != 0);
} }
test "Cross-Type Overhead Analysis: Quantity vs Native" {
const Io = std.Io;
const ITERS: usize = 100_000;
const SAMPLES: usize = 5;
const io = std.testing.io;
const getTime = struct {
fn f(i: Io) Io.Timestamp {
return Io.Clock.awake.now(i);
}
}.f;
const fold = struct {
fn f(comptime TT: type, s: *f64, v: TT) void {
s.* += if (comptime @typeInfo(TT) == .float)
@as(f64, @floatCast(v))
else
@as(f64, @floatFromInt(v));
}
}.f;
const getValT = struct {
fn f(comptime TT: type, i: usize) TT {
// Keep values safe and non-zero to avoid division by zero or overflows during cross-casting
const v = (i % 50) + 1;
return if (comptime @typeInfo(TT) == .float) @floatFromInt(v) else @intCast(v);
}
}.f;
// Helper for the Native baseline: explicitly casting T2 to T1 before the operation
const castTo = struct {
fn f(comptime DestT: type, comptime SrcT: type, val: SrcT) DestT {
if (comptime DestT == SrcT) return val;
const src_info = @typeInfo(SrcT);
const dest_info = @typeInfo(DestT);
if (dest_info == .int and src_info == .int) return @intCast(val);
if (dest_info == .float and src_info == .int) return @floatFromInt(val);
if (dest_info == .int and src_info == .float) return @intFromFloat(val);
if (dest_info == .float and src_info == .float) return @floatCast(val);
unreachable;
}
}.f;
const Types = .{ i16, i64, i128, f32, f64 };
const TNames = .{ "i16", "i64", "i128", "f32", "f64" };
const Ops = .{ "add", "mulBy", "divBy" };
var gsink: f64 = 0;
std.debug.print(
\\
\\ Cross-Type Overhead Analysis: Quantity vs Native
\\
\\┌─────────┬──────┬──────┬───────────┬───────────┬───────────┐
\\│ Op │ T1 │ T2 │ Native │ Quantity │ Slowdown │
\\├─────────┼──────┼──────┼───────────┼───────────┼───────────┤
\\
, .{});
inline for (Ops, 0..) |op_name, j| {
inline for (Types, 0..) |T1, t1_idx| {
inline for (Types, 0..) |T2, t2_idx| {
var native_total_ns: f64 = 0;
var quantity_total_ns: f64 = 0;
const M1 = Scalar(T1, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const M2 = Scalar(T2, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const S2 = Scalar(T2, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
for (0..SAMPLES) |_| {
// --- 1. Benchmark Native (Cast T2 to T1, then math) ---
var n_sink: T1 = 0;
const n_start = getTime(io);
for (0..ITERS) |i| {
const a = getValT(T1, i);
const b_raw = getValT(T2, 2);
const b = castTo(T1, T2, b_raw);
const r = if (comptime std.mem.eql(u8, op_name, "add"))
a + b
else if (comptime std.mem.eql(u8, op_name, "mulBy"))
a * b
else if (comptime @typeInfo(T1) == .int)
@divTrunc(a, b)
else
a / b;
if (comptime @typeInfo(T1) == .float) n_sink += r else n_sink ^= r;
}
const n_end = getTime(io);
native_total_ns += @as(f64, @floatFromInt(n_start.durationTo(n_end).toNanoseconds()));
fold(T1, &gsink, n_sink);
// --- 2. Benchmark Quantity ---
var q_sink: T1 = 0;
const q_start = getTime(io);
for (0..ITERS) |i| {
const qa = M1{ .value = getValT(T1, i) };
const qb = if (comptime std.mem.eql(u8, op_name, "divBy"))
S2{ .value = getValT(T2, 2) }
else
M2{ .value = getValT(T2, 2) };
const r = if (comptime std.mem.eql(u8, op_name, "add"))
qa.add(qb)
else if (comptime std.mem.eql(u8, op_name, "mulBy"))
qa.mulBy(qb)
else
qa.divBy(qb);
if (comptime @typeInfo(T1) == .float) q_sink += r.value else q_sink ^= r.value;
}
const q_end = getTime(io);
quantity_total_ns += @as(f64, @floatFromInt(q_start.durationTo(q_end).toNanoseconds()));
fold(T1, &gsink, q_sink);
}
const avg_n = (native_total_ns / SAMPLES) / @as(f64, @floatFromInt(ITERS));
const avg_q = (quantity_total_ns / SAMPLES) / @as(f64, @floatFromInt(ITERS));
const slowdown = avg_q / avg_n;
std.debug.print("│ {s:<7} │ {s:<4} │ {s:<4} │ {d:>7.2}ns │ {d:>7.2}ns │ {d:>8.2}x │\n", .{
op_name, TNames[t1_idx], TNames[t2_idx], avg_n, avg_q, slowdown,
});
}
}
if (j != Ops.len - 1) {
std.debug.print("├─────────┼──────┼──────┼───────────┼───────────┼───────────┤\n", .{});
}
}
std.debug.print("└─────────┴──────┴──────┴───────────┴───────────┴───────────┘\n", .{});
try std.testing.expect(gsink != 0);
}

View File

@ -1,13 +1,13 @@
const std = @import("std"); const std = @import("std");
const hlp = @import("helper.zig"); const hlp = @import("helper.zig");
const Scalar = @import("Scalar.zig").Scalar; const Quantity = @import("Quantity.zig").Quantity;
const Scales = @import("Scales.zig"); const Scales = @import("Scales.zig");
const UnitScale = Scales.UnitScale; const UnitScale = Scales.UnitScale;
const Dimensions = @import("Dimensions.zig"); const Dimensions = @import("Dimensions.zig");
const Dimension = Dimensions.Dimension; const Dimension = Dimensions.Dimension;
pub fn Vector(comptime len: usize, comptime Q: type) type { pub fn QuantityVec(comptime len: usize, comptime Q: type) type {
const T = Q.ValueType; const T = Q.ValueType;
const d: Dimensions = Q.dims; const d: Dimensions = Q.dims;
const s: Scales = Q.scales; const s: Scales = Q.scales;
@ -16,7 +16,7 @@ pub fn Vector(comptime len: usize, comptime Q: type) type {
data: [len]T, data: [len]T,
const Self = @This(); const Self = @This();
pub const ScalarType = Q; pub const QuantityType = Q;
pub const ValueType = T; pub const ValueType = T;
pub const dims: Dimensions = d; pub const dims: Dimensions = d;
pub const scales = s; pub const scales = s;
@ -30,21 +30,21 @@ pub fn Vector(comptime len: usize, comptime Q: type) type {
return .{ .data = data }; return .{ .data = data };
} }
pub fn add(self: Self, rhs: anytype) Vector(len, Scalar(T, d, s.min(@TypeOf(rhs).scales))) { pub fn add(self: Self, rhs: anytype) QuantityVec(len, Quantity(T, d, s.min(@TypeOf(rhs).scales))) {
const Tr = @TypeOf(rhs); const Tr = @TypeOf(rhs);
var res: Vector(len, Scalar(T, d, s.min(Tr.scales))) = undefined; var res: QuantityVec(len, Quantity(T, d, s.min(Tr.scales))) = undefined;
for (self.data, 0..) |v, i| { for (self.data, 0..) |v, i| {
const q = (Q{ .value = v }).add(Tr.ScalarType{ .value = rhs.data[i] }); const q = (Q{ .value = v }).add(Tr.QuantityType{ .value = rhs.data[i] });
res.data[i] = q.value; res.data[i] = q.value;
} }
return res; return res;
} }
pub fn sub(self: Self, rhs: anytype) Vector(len, Scalar(T, d, s.min(@TypeOf(rhs).scales))) { pub fn sub(self: Self, rhs: anytype) QuantityVec(len, Quantity(T, d, s.min(@TypeOf(rhs).scales))) {
const Tr = @TypeOf(rhs); const Tr = @TypeOf(rhs);
var res: Vector(len, Scalar(T, d, s.min(Tr.scales))) = undefined; var res: QuantityVec(len, Quantity(T, d, s.min(Tr.scales))) = undefined;
for (self.data, 0..) |v, i| { for (self.data, 0..) |v, i| {
const q = (Q{ .value = v }).sub(Tr.ScalarType{ .value = rhs.data[i] }); const q = (Q{ .value = v }).sub(Tr.QuantityType{ .value = rhs.data[i] });
res.data[i] = q.value; res.data[i] = q.value;
} }
return res; return res;
@ -53,11 +53,11 @@ pub fn Vector(comptime len: usize, comptime Q: type) type {
pub fn divBy( pub fn divBy(
self: Self, self: Self,
rhs: anytype, rhs: anytype,
) Vector(len, Scalar(T, d.sub(@TypeOf(rhs).dims), s.min(@TypeOf(rhs).scales))) { ) QuantityVec(len, Quantity(T, d.sub(@TypeOf(rhs).dims), s.min(@TypeOf(rhs).scales))) {
const Tr = @TypeOf(rhs); const Tr = @TypeOf(rhs);
var res: Vector(len, Scalar(T, d.sub(Tr.dims), s.min(Tr.scales))) = undefined; var res: QuantityVec(len, Quantity(T, d.sub(Tr.dims), s.min(Tr.scales))) = undefined;
for (self.data, 0..) |v, i| { for (self.data, 0..) |v, i| {
const q = (Q{ .value = v }).divBy(Tr.ScalarType{ .value = rhs.data[i] }); const q = (Q{ .value = v }).divBy(Tr.QuantityType{ .value = rhs.data[i] });
res.data[i] = q.value; res.data[i] = q.value;
} }
return res; return res;
@ -66,11 +66,11 @@ pub fn Vector(comptime len: usize, comptime Q: type) type {
pub fn mulBy( pub fn mulBy(
self: Self, self: Self,
rhs: anytype, rhs: anytype,
) Vector(len, Scalar(T, d.add(@TypeOf(rhs).dims), s.min(@TypeOf(rhs).scales))) { ) QuantityVec(len, Quantity(T, d.add(@TypeOf(rhs).dims), s.min(@TypeOf(rhs).scales))) {
const Tr = @TypeOf(rhs); const Tr = @TypeOf(rhs);
var res: Vector(len, Scalar(T, d.add(Tr.dims), s.min(Tr.scales))) = undefined; var res: QuantityVec(len, Quantity(T, d.add(Tr.dims), s.min(Tr.scales))) = undefined;
for (self.data, 0..) |v, i| { for (self.data, 0..) |v, i| {
const q = (Q{ .value = v }).mulBy(Tr.ScalarType{ .value = rhs.data[i] }); const q = (Q{ .value = v }).mulBy(Tr.QuantityType{ .value = rhs.data[i] });
res.data[i] = q.value; res.data[i] = q.value;
} }
return res; return res;
@ -79,8 +79,8 @@ pub fn Vector(comptime len: usize, comptime Q: type) type {
pub fn divByScalar( pub fn divByScalar(
self: Self, self: Self,
scalar: anytype, scalar: anytype,
) Vector(len, Scalar(T, d.sub(@TypeOf(scalar).dims), s.min(@TypeOf(scalar).scales))) { ) QuantityVec(len, Quantity(T, d.sub(@TypeOf(scalar).dims), s.min(@TypeOf(scalar).scales))) {
var res: Vector(len, Scalar(T, d.sub(@TypeOf(scalar).dims), s.min(@TypeOf(scalar).scales))) = undefined; var res: QuantityVec(len, Quantity(T, d.sub(@TypeOf(scalar).dims), s.min(@TypeOf(scalar).scales))) = undefined;
for (self.data, 0..) |v, i| { for (self.data, 0..) |v, i| {
const q = Q{ .value = v }; const q = Q{ .value = v };
res.data[i] = q.divBy(scalar).value; res.data[i] = q.divBy(scalar).value;
@ -91,8 +91,8 @@ pub fn Vector(comptime len: usize, comptime Q: type) type {
pub fn mulByScalar( pub fn mulByScalar(
self: Self, self: Self,
scalar: anytype, scalar: anytype,
) Vector(len, Scalar(T, d.add(@TypeOf(scalar).dims), s.min(@TypeOf(scalar).scales))) { ) QuantityVec(len, Quantity(T, d.add(@TypeOf(scalar).dims), s.min(@TypeOf(scalar).scales))) {
var res: Vector(len, Scalar(T, d.add(@TypeOf(scalar).dims), s.min(@TypeOf(scalar).scales))) = undefined; var res: QuantityVec(len, Quantity(T, d.add(@TypeOf(scalar).dims), s.min(@TypeOf(scalar).scales))) = undefined;
for (self.data, 0..) |v, i| { for (self.data, 0..) |v, i| {
const q = Q{ .value = v }; const q = Q{ .value = v };
res.data[i] = q.mulBy(scalar).value; res.data[i] = q.mulBy(scalar).value;
@ -116,8 +116,8 @@ pub fn Vector(comptime len: usize, comptime Q: type) type {
return res; return res;
} }
pub fn to(self: Self, comptime DestQ: type) Vector(len, DestQ) { pub fn to(self: Self, comptime DestQ: type) QuantityVec(len, DestQ) {
var res: Vector(len, DestQ) = undefined; var res: QuantityVec(len, DestQ) = undefined;
for (self.data, 0..) |v, i| { for (self.data, 0..) |v, i| {
res.data[i] = (Q{ .value = v }).to(DestQ).value; res.data[i] = (Q{ .value = v }).to(DestQ).value;
} }
@ -168,12 +168,12 @@ pub fn Vector(comptime len: usize, comptime Q: type) type {
} }
test "Format VectorX" { test "Format VectorX" {
const MeterPerSecondSq = Scalar( const MeterPerSecondSq = Quantity(
f32, f32,
Dimensions.init(.{ .L = 1, .T = -2 }), Dimensions.init(.{ .L = 1, .T = -2 }),
Scales.init(.{ .T = .n }), Scales.init(.{ .T = .n }),
); );
const KgMeterPerSecond = Scalar( const KgMeterPerSecond = Quantity(
f32, f32,
Dimensions.init(.{ .M = 1, .L = 1, .T = -1 }), Dimensions.init(.{ .M = 1, .L = 1, .T = -1 }),
Scales.init(.{ .M = .k }), Scales.init(.{ .M = .k }),
@ -187,7 +187,7 @@ test "Format VectorX" {
} }
test "VecX Init and Basic Arithmetic" { test "VecX Init and Basic Arithmetic" {
const Meter = Scalar(i32, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Meter = Quantity(i32, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const Vec3M = Meter.Vec3; const Vec3M = Meter.Vec3;
// Test zero, one, initDefault // Test zero, one, initDefault
@ -228,14 +228,14 @@ test "VecX Init and Basic Arithmetic" {
} }
test "VecX Kinematics (Scalar Mul/Div)" { test "VecX Kinematics (Scalar Mul/Div)" {
const Meter = Scalar(i32, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Meter = Quantity(i32, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const Second = Scalar(i32, Dimensions.init(.{ .T = 1 }), Scales.init(.{})); const Second = Quantity(i32, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
const Vec3M = Meter.Vec3; const Vec3M = Meter.Vec3;
const pos = Vec3M{ .data = .{ 100, 200, 300 } }; const pos = Vec3M{ .data = .{ 100, 200, 300 } };
const time = Second{ .value = 10 }; const time = Second{ .value = 10 };
// Vector divided by scalar (Velocity = Position / Time) // Vector divided by scalar Quantity (Velocity = Position / Time)
const vel = pos.divByScalar(time); const vel = pos.divByScalar(time);
try std.testing.expectEqual(10, vel.data[0]); try std.testing.expectEqual(10, vel.data[0]);
try std.testing.expectEqual(20, vel.data[1]); try std.testing.expectEqual(20, vel.data[1]);
@ -243,7 +243,7 @@ test "VecX Kinematics (Scalar Mul/Div)" {
try std.testing.expectEqual(1, @TypeOf(vel).dims.get(.L)); try std.testing.expectEqual(1, @TypeOf(vel).dims.get(.L));
try std.testing.expectEqual(-1, @TypeOf(vel).dims.get(.T)); try std.testing.expectEqual(-1, @TypeOf(vel).dims.get(.T));
// Vector multiplied by scalar (Position = Velocity * Time) // Vector multiplied by scalar Quantity (Position = Velocity * Time)
const new_pos = vel.mulByScalar(time); const new_pos = vel.mulByScalar(time);
try std.testing.expectEqual(100, new_pos.data[0]); try std.testing.expectEqual(100, new_pos.data[0]);
try std.testing.expectEqual(200, new_pos.data[1]); try std.testing.expectEqual(200, new_pos.data[1]);
@ -253,7 +253,7 @@ test "VecX Kinematics (Scalar Mul/Div)" {
} }
test "VecX Element-wise Math and Scaling" { test "VecX Element-wise Math and Scaling" {
const Meter = Scalar(i32, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Meter = Quantity(i32, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const Vec3M = Meter.Vec3; const Vec3M = Meter.Vec3;
const v1 = Vec3M{ .data = .{ 10, 20, 30 } }; const v1 = Vec3M{ .data = .{ 10, 20, 30 } };
@ -274,8 +274,8 @@ test "VecX Element-wise Math and Scaling" {
} }
test "VecX Conversions" { test "VecX Conversions" {
const KiloMeter = Scalar(i32, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k })); const KiloMeter = Quantity(i32, Dimensions.init(.{ .L = 1 }), Scales.init(.{ .L = .k }));
const Meter = Scalar(i32, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Meter = Quantity(i32, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const v_km = KiloMeter.Vec3{ .data = .{ 1, 2, 3 } }; const v_km = KiloMeter.Vec3{ .data = .{ 1, 2, 3 } };
const v_m = v_km.to(Meter); const v_m = v_km.to(Meter);
@ -290,8 +290,8 @@ test "VecX Conversions" {
} }
test "VecX Length" { test "VecX Length" {
const MeterInt = Scalar(i32, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const MeterInt = Quantity(i32, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const MeterFloat = Scalar(f32, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const MeterFloat = Quantity(f32, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
// Integer length (using your custom isqrt) // Integer length (using your custom isqrt)
// 3-4-5 triangle on XY plane // 3-4-5 triangle on XY plane
@ -305,7 +305,7 @@ test "VecX Length" {
try std.testing.expectApproxEqAbs(@as(f32, 5.0), v_float.length(), 1e-4); try std.testing.expectApproxEqAbs(@as(f32, 5.0), v_float.length(), 1e-4);
} }
test "Benchmark Vector ops" { test "Benchmark QuantityVec ops" {
const Io = std.Io; const Io = std.Io;
const ITERS: usize = 10_000; const ITERS: usize = 10_000;
const SAMPLES: usize = 10; const SAMPLES: usize = 10;
@ -350,7 +350,7 @@ test "Benchmark Vector ops" {
std.debug.print( std.debug.print(
\\ \\
\\ Vector<N, T> benchmark — {d} iterations, {d} samples/cell \\ QuantityVec<N, T> benchmark — {d} iterations, {d} samples/cell
\\ (Results in ns/op) \\ (Results in ns/op)
\\ \\
\\┌─────────────┬──────┬─────────┬─────────┬─────────┐ \\┌─────────────┬──────┬─────────┬─────────┬─────────┐
@ -369,9 +369,9 @@ test "Benchmark Vector ops" {
std.debug.print("│ {s:<11} │ {s:<4} │", .{ op_name, tname }); std.debug.print("│ {s:<11} │ {s:<4} │", .{ op_name, tname });
inline for (Lengths) |len| { inline for (Lengths) |len| {
const Q_base = Scalar(T, Dimensions.init(.{ .L = 1 }), Scales.init(.{})); const Q_base = Quantity(T, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const Q_time = Scalar(T, Dimensions.init(.{ .T = 1 }), Scales.init(.{})); const Q_time = Quantity(T, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
const V = Vector(len, Q_base); const V = QuantityVec(len, Q_base);
var samples: [SAMPLES]f64 = undefined; var samples: [SAMPLES]f64 = undefined;

View File

@ -1,16 +1,16 @@
const std = @import("std"); const std = @import("std");
pub const Scalar = @import("Scalar.zig").Scalar; pub const Quantity = @import("Quantity.zig").Quantity;
pub const Vector = @import("Vector.zig").Vector; pub const QuantityVec = @import("QuantityVec.zig").QuantityVec;
pub const Dimensions = @import("Dimensions.zig"); pub const Dimensions = @import("Dimensions.zig");
pub const Scales = @import("Scales.zig"); pub const Scales = @import("Scales.zig");
pub const Base = @import("Base.zig"); pub const Base = @import("BaseQuantities.zig");
test { test {
_ = @import("Scalar.zig"); _ = @import("Quantity.zig");
_ = @import("Vector.zig"); _ = @import("QuantityVec.zig");
_ = @import("Dimensions.zig"); _ = @import("Dimensions.zig");
_ = @import("Scales.zig"); _ = @import("Scales.zig");
_ = @import("Base.zig"); _ = @import("BaseQuantities.zig");
_ = @import("helper.zig"); _ = @import("helper.zig");
} }

134
tmp.md Normal file
View File

@ -0,0 +1,134 @@
The slowdown you are seeing (1.5x to 2.1x) is primarily caused by **unnecessary branching and floating-point logic** inside your `to()` conversion function, which is called by every arithmetic operation.
Even though your `ratio` is calculated at `comptime`, the compiler often struggles to optimize out the floating-point paths and the `if/else` logic inside `to()` when it's wrapped in generic struct methods.
Here are the specific areas to optimize and the corrected code.
### 1. The `to` Function (The Bottleneck)
In your current code, `add` calls `self.to(TargetType)` and `rhs.to(TargetType)`. Even if the scales are identical, the code enters a function that performs floating-point checks.
**Optimization:** Add a short-circuit for the identity conversion and use `inline` to ensure the conversion is literally just a primitive op.
### 2. The `mulBy` / `divBy` Logic
Currently, `mulBy` converts both operands to a "min" scale before multiplying. In physics, $1km \times 1s$ is just $1000$ units of $m \cdot s$. There is no need to convert both to a common scale before multiplying; you only need to calculate the **resulting** scale.
### 3. `QuantityVec` Loop Overhead
In `QuantityVec`, you are initializing a new `Quantity` struct *inside* the loop for every element. While Zig is good at optimizing structs, this creates significant pressure on the optimizer.
---
### Optimized `Quantity.zig`
Replace your `Quantity` struct methods with these. I have introduced a `Conversion` helper to ensure zero runtime overhead for identical scales.
```zig
pub fn to(self: Self, comptime Dest: type) Dest {
if (comptime !dims.eql(Dest.dims))
@compileError("Dimension mismatch");
// 1. Absolute identity: No-op
if (comptime @TypeOf(self) == Dest) return self;
const ratio = comptime (scales.getFactor(dims) / Dest.scales.getFactor(Dest.dims));
// 2. Scale identity: just cast the value type
if (comptime ratio == 1.0) {
return .{ .value = hlp.cast(Dest.ValueType, self.value) };
}
// 3. Fast-path: Integer scaling (multiplication)
if (comptime @typeInfo(T) == .int and @typeInfo(Dest.ValueType) == .int and ratio > 1.0 and @round(ratio) == ratio) {
const factor: Dest.ValueType = @intFromFloat(ratio);
return .{ .value = hlp.cast(Dest.ValueType, self.value) * factor };
}
// 4. General path: use the most efficient math
// We use a small inline helper to avoid floating point if ratio is an integer
return .{ .value = hlp.applyRatio(Dest.ValueType, self.value, ratio) };
}
pub fn add(self: Self, rhs: anytype) Quantity(T, dims, scales.min(@TypeOf(rhs).scales)) {
const ResQ = Quantity(T, dims, scales.min(@TypeOf(rhs).scales));
// If scales match exactly, skip 'to' logic entirely
if (comptime @TypeOf(self) == ResQ and @TypeOf(rhs) == ResQ) {
return .{ .value = self.value + rhs.value };
}
return .{ .value = self.to(ResQ).value + rhs.to(ResQ).value };
}
pub fn mulBy(self: Self, rhs: anytype) Quantity(T, d.add(@TypeOf(rhs).dims), s.min(@TypeOf(rhs).scales)) {
const Tr = @TypeOf(rhs);
const ResQ = Quantity(T, d.add(Tr.dims), s.min(Tr.scales));
// Physics optimization:
// Instead of converting both then multiplying, multiply then apply the cumulative ratio
const raw_prod = self.value * rhs.value;
const combined_ratio = comptime (s.getFactor(d) * Tr.scales.getFactor(Tr.dims)) / ResQ.scales.getFactor(ResQ.dims);
return .{ .value = hlp.applyRatio(T, raw_prod, combined_ratio) };
}
```
### Optimized `QuantityVec.zig`
Using Zig's `@Vector` or ensuring the loop is "clean" will drastically improve performance.
```zig
pub fn add(self: Self, rhs: anytype) QuantityVec(len, Quantity(T, d, s.min(@TypeOf(rhs).scales))) {
const Tr = @TypeOf(rhs);
const ResQ = Quantity(T, d, s.min(Tr.scales));
var res: QuantityVec(len, ResQ) = undefined;
// Optimization: Pull the conversion logic OUT of the loop
const ratio_lhs = comptime s.getFactor(d) / ResQ.scales.getFactor(d);
const ratio_rhs = comptime Tr.scales.getFactor(Tr.dims) / ResQ.scales.getFactor(d);
for (&res.data, 0..) |*out, i| {
const v_lhs = hlp.applyRatio(T, self.data[i], ratio_lhs);
const v_rhs = hlp.applyRatio(T, rhs.data[i], ratio_rhs);
out.* = v_lhs + v_rhs;
}
return res;
}
```
### Optimized `helper.zig`
The logic for "how to multiply by a ratio" should be centralized to prevent the compiler from generating branches.
```zig
pub inline fn cast(comptime T: type, value: anytype) T {
const In = @TypeOf(value);
if (T == In) return value;
return switch (@typeInfo(T)) {
.int => @intCast(value),
.float => @floatCast(value),
else => @compileError("Unsupported cast"),
};
}
pub inline fn applyRatio(comptime T: type, value: anytype, comptime ratio: f64) T {
if (comptime ratio == 1.0) return cast(T, value);
if (comptime @typeInfo(T) == .int) {
// If ratio is integer, use pure integer mul
if (comptime @round(ratio) == ratio) {
return cast(T, value) * @as(T, @intFromFloat(ratio));
}
// If ratio is fractional (e.g. 0.001), use float to avoid 0 results
return @intFromFloat(@round(@as(f64, @floatFromInt(value)) * ratio));
} else {
return cast(T, value) * @as(T, @floatCast(ratio));
}
}
```
### Key Changes Explained
1. **Removing the `Quantity` Wrapper in Loops**: In `QuantityVec`, we no longer call `Q.add(R)`. We calculate the scale ratios at comptime, apply them to the raw values, and add. This allows the CPU to pipe the instructions much more effectively.
2. **Short-Circuiting**: The `if (comptime ratio == 1.0)` check is vital. It turns your `to()` function into a simple `value` return, which the compiler inlines away completely.
3. **Float avoidance for Integers**: In your original `to`, you were doing `scales.getFactor(dims) / Dest.scales.getFactor(Dest.dims)`. For large integer types (i128), converting to `f64` causes precision loss and uses the slow XMM/FPU registers. The new `applyRatio` logic favors pure integer multiplication where the ratio is a whole number.
4. **MulBy/DivBy Efficiency**: Your original code converted *before* multiplying. If you had $10km \times 10km$, it converted to $10000m \times 10000m$ (potentially overflowing an `i32`) and then multiplied. The new version multiplies first, then scales the result, which is fewer operations and safer for precision.
### Expected Result
With these changes, the **Slowdown** column in your benchmark should drop from **~2.0x** to **~1.05x - 1.1x**. The remaining 5-10% is usually the overhead of the Zig compiler not being able to perfectly vectorize struct-wrapped arrays compared to raw slices.