Added test to BaseQuantity and a benchmark for Vectors

This commit is contained in:
AdrienBouvais 2026-04-21 14:19:22 +02:00
parent f2e18da797
commit 52e58829eb
4 changed files with 196 additions and 9 deletions

View File

@ -3,8 +3,7 @@ const std = @import("std");
// Adjust these imports to match your actual file names
const Dimensions = @import("Dimensions.zig");
const Scales = @import("Scales.zig");
const quantity = @import("quantity.zig");
const Quantity = quantity.Quantity;
const Quantity = @import("Quantity.zig").Quantity;
/// Helper function to create a clean namespace for each physical dimension.
/// It exposes the raw dimensions, and easy type-creators for Base or Scaled variants.
@ -93,3 +92,62 @@ pub const ThermalEntropy = QtyNamespace(.{ .M = 1, .L = 2, .T = -2, .Tr = -1 });
pub const Frequency = QtyNamespace(.{ .T = -1 });
pub const Viscosity = QtyNamespace(.{ .M = 1, .L = -1, .T = -1 });
pub const SurfaceTension = QtyNamespace(.{ .M = 1, .T = -2 }); // Corrected from MT-2a
test "BaseQuantities - Core dimensions instantiation" {
// Basic types via generic wrappers
const M = Meter.Base(f32);
const distance = M{ .value = 100.0 };
try std.testing.expectEqual(100.0, distance.value);
try std.testing.expectEqual(1, M.dims.get(.L));
try std.testing.expectEqual(0, M.dims.get(.T));
// Test specific scale variants
const Kmh = Velocity.Scaled(f32, Scales.init(.{ .L = .k, .T = .hour }));
const speed = Kmh{ .value = 120.0 };
try std.testing.expectEqual(120.0, speed.value);
try std.testing.expectEqual(.k, @TypeOf(speed).scales.get(.L));
try std.testing.expectEqual(.hour, @TypeOf(speed).scales.get(.T));
}
test "BaseQuantities - Kinematics equations" {
const d = Meter.Base(f32){ .value = 50.0 };
const t = Second.Base(f32){ .value = 2.0 };
// Velocity = Distance / Time
const v = d.divBy(t);
try std.testing.expectEqual(25.0, v.value);
try std.testing.expect(Velocity.dims.eql(@TypeOf(v).dims));
// Acceleration = Velocity / Time
const a = v.divBy(t);
try std.testing.expectEqual(12.5, a.value);
try std.testing.expect(Acceleration.dims.eql(@TypeOf(a).dims));
}
test "BaseQuantities - Dynamics (Force and Work)" {
// 10 kg
const m = Gramm.Scaled(f32, Scales.init(.{ .M = .k })){ .value = 10.0 };
// 9.8 m/s^2
const a = Acceleration.Base(f32){ .value = 9.8 };
// Force = mass * acceleration
const f = m.mulBy(a);
try std.testing.expectEqual(98000, f.value);
try std.testing.expect(Force.dims.eql(@TypeOf(f).dims));
// Energy (Work) = Force * distance
const distance = Meter.Base(f32){ .value = 5.0 };
const energy = f.mulBy(distance);
try std.testing.expectEqual(490000, energy.value);
try std.testing.expect(Energy.dims.eql(@TypeOf(energy).dims));
}
test "BaseQuantities - Electric combinations" {
const current = ElectricCurrent.Base(f32){ .value = 2.0 }; // 2 A
const time = Second.Base(f32){ .value = 3.0 }; // 3 s
// Charge = Current * time
const charge = current.mulBy(time);
try std.testing.expectEqual(6.0, charge.value);
try std.testing.expect(ElectricCharge.dims.eql(@TypeOf(charge).dims));
}

View File

@ -1,7 +1,7 @@
const std = @import("std");
const hlp = @import("helper.zig");
const Quantity = @import("Quantity.zig");
const Quantity = @import("Quantity.zig").Quantity;
const Scales = @import("Scales.zig");
const UnitScale = Scales.UnitScale;
const Dimensions = @import("Dimensions.zig");
@ -305,3 +305,124 @@ test "VecX Length" {
try std.testing.expectApproxEqAbs(@as(f32, 25.0), v_float.lengthSqr(), 1e-4);
try std.testing.expectApproxEqAbs(@as(f32, 5.0), v_float.length(), 1e-4);
}
test "Benchmark QuantityVec ops" {
const Io = std.Io;
const ITERS: usize = 10_000;
const SAMPLES: usize = 10;
var gsink: f64 = 0;
// In Zig 0.14+, we use the testing IO for clock access in tests
const io = std.testing.io;
const getTime = struct {
fn f(i: Io) Io.Timestamp {
return Io.Clock.awake.now(i);
}
}.f;
const getVal = struct {
fn f(comptime TT: type, i: usize, comptime mask: u7) TT {
const v: u8 = @as(u8, @truncate(i & @as(usize, mask))) + 1;
return if (comptime @typeInfo(TT) == .float) @floatFromInt(v) else @intCast(v);
}
}.f;
const fold = struct {
fn f(comptime TT: type, s: *f64, v: TT) void {
s.* += if (comptime @typeInfo(TT) == .float)
@as(f64, @floatCast(v))
else
@as(f64, @floatFromInt(v));
}
}.f;
const computeStats = struct {
fn f(samples: []f64, iters: usize) f64 {
std.mem.sort(f64, samples, {}, std.sort.asc(f64));
const mid = samples.len / 2;
const median_ns = if (samples.len % 2 == 0)
(samples[mid - 1] + samples[mid]) / 2.0
else
samples[mid];
return median_ns / @as(f64, @floatFromInt(iters));
}
}.f;
std.debug.print(
\\
\\ QuantityVec<N, T> benchmark — {d} iterations, {d} samples/cell
\\ (Results in ns/op)
\\
\\┌─────────────┬──────┬─────────┬─────────┬─────────┐
\\│ Operation │ Type │ Len=3 │ Len=4 │ Len=16 │
\\├─────────────┼──────┼─────────┼─────────┼─────────┤
\\
, .{ ITERS, SAMPLES });
const Types = .{ i32, i64, i128, f32, f64 };
const TNames = .{ "i32", "i64", "i128", "f32", "f64" };
const Lengths = .{ 3, 4, 16 };
const Ops = .{ "add", "scale", "mulByScalar", "length" };
inline for (Ops, 0..) |op_name, o_idx| {
inline for (Types, TNames) |T, tname| {
std.debug.print("│ {s:<11} │ {s:<4} │", .{ op_name, tname });
inline for (Lengths) |len| {
const Q_base = Quantity(T, Dimensions.init(.{ .L = 1 }), Scales.init(.{}));
const Q_time = Quantity(T, Dimensions.init(.{ .T = 1 }), Scales.init(.{}));
const V = QuantityVec(len, Q_base);
var samples: [SAMPLES]f64 = undefined;
for (0..SAMPLES) |s_idx| {
var sink: T = 0;
const t_start = getTime(io);
for (0..ITERS) |i| {
const v1 = V.initDefault(getVal(T, i, 63));
if (comptime std.mem.eql(u8, op_name, "add")) {
const v2 = V.initDefault(getVal(T, i +% 7, 63));
const res = v1.add(v2);
for (res.data) |val| {
if (comptime @typeInfo(T) == .float) sink += val else sink ^= val;
}
} else if (comptime std.mem.eql(u8, op_name, "scale")) {
const sc = getVal(T, i +% 2, 63);
const res = v1.scale(sc);
for (res.data) |val| {
if (comptime @typeInfo(T) == .float) sink += val else sink ^= val;
}
} else if (comptime std.mem.eql(u8, op_name, "mulByScalar")) {
const s_val = Q_time{ .value = getVal(T, i +% 2, 63) };
const res = v1.mulByScalar(s_val);
for (res.data) |val| {
if (comptime @typeInfo(T) == .float) sink += val else sink ^= val;
}
} else if (comptime std.mem.eql(u8, op_name, "length")) {
const r = v1.length();
if (comptime @typeInfo(T) == .float) sink += r else sink ^= r;
}
}
const t_end = getTime(io);
samples[s_idx] = @as(f64, @floatFromInt(t_start.durationTo(t_end).toNanoseconds()));
fold(T, &gsink, sink);
}
const median_ns_per_op = computeStats(&samples, ITERS);
std.debug.print(" {d:>7.1} │", .{median_ns_per_op});
}
std.debug.print("\n", .{});
}
if (o_idx < Ops.len - 1) {
std.debug.print("├─────────────┼──────┼─────────┼─────────┼─────────┤\n", .{});
}
}
std.debug.print("└─────────────┴──────┴─────────┴─────────┴─────────┘\n", .{});
std.debug.print("\nAnti-optimisation sink: {d:.4}\n", .{gsink});
try std.testing.expect(gsink != 0);
}

View File

@ -82,6 +82,7 @@ pub fn set(self: *Scales, key: Dimension, val: UnitScale) void {
}
pub fn min(comptime s1: Scales, comptime s2: Scales) Scales {
@setEvalBranchQuota(10_000);
var out = Scales.initFill(.none);
for (std.enums.values(Dimension)) |dim|
out.set(dim, if (s1.get(dim).getFactorInt() > s2.get(dim).getFactorInt()) s2.get(dim) else s1.get(dim));

View File

@ -1,9 +1,16 @@
const std = @import("std");
const hlp = @import("helper.zig");
const Scales = @import("Scales.zig");
const UnitScale = Scales.UnitScale;
const Dimensions = @import("Dimensions.zig");
const Dimension = Dimensions.Dimension;
pub const Quantity = @import("Quantity.zig").Quantity;
pub const QuantityVec = @import("QuantityVec.zig").QuantityVec;
pub const Dimensions = @import("Dimensions.zig");
pub const Scales = @import("Scales.zig");
pub const Base = @import("BaseQuantities.zig");
pub fn main(_: std.process.Init) void {}
test {
_ = @import("Quantity.zig");
_ = @import("QuantityVec.zig");
_ = @import("Dimensions.zig");
_ = @import("Scales.zig");
_ = @import("BaseQuantities.zig");
_ = @import("helper.zig");
}