Merge branch 'tensor'
All checks were successful
Deploy MkDocs to Garage / build-and-deploy (push) Successful in 40s
All checks were successful
Deploy MkDocs to Garage / build-and-deploy (push) Successful in 40s
This commit is contained in:
commit
de2e9cce68
@ -2,7 +2,7 @@ const std = @import("std");
|
||||
|
||||
pub fn build(b: *std.Build) void {
|
||||
const target = b.standardTargetOptions(.{});
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
const optimize = b.standardOptimizeOption(.{ .preferred_optimize_mode = .ReleaseFast });
|
||||
|
||||
// 1. Define the module so other projects can import it
|
||||
_ = b.addModule("dimal", .{
|
||||
|
||||
11
current release.md
Normal file
11
current release.md
Normal file
@ -0,0 +1,11 @@
|
||||
- Changed Quantity to Tensor that can use any shape and is a single @Vector.
|
||||
Point being to add WebGPU easily from this.
|
||||
Scalr suffer in performance tho, I will work on that
|
||||
|
||||
Maybe I can do a jupiter like web interface with cells to make Dim analysis
|
||||
I could:
|
||||
- Use cells with a toy language
|
||||
- A nice debugger to display current variables with dimensions, type and value
|
||||
- Realtime error (I try to compile at change, display error on the cell)
|
||||
- Integrate a small graphic API that use Raylib canvas
|
||||
- COuld generate template at comptime =o
|
||||
69
src/Base.zig
69
src/Base.zig
@ -3,34 +3,39 @@ const std = @import("std");
|
||||
// Adjust these imports to match your actual file names
|
||||
const Dimensions = @import("Dimensions.zig");
|
||||
const Scales = @import("Scales.zig");
|
||||
const Scalar = @import("Quantity.zig").Scalar;
|
||||
const Tensor = @import("Tensor.zig").Tensor;
|
||||
|
||||
fn PhysicalConstant(comptime d: Dimensions.ArgOpts, comptime val: f64, comptime s: Scales.ArgOpts) type {
|
||||
return struct {
|
||||
const dims = Dimensions.init(d);
|
||||
const scales = Scales.init(s);
|
||||
pub const dims = Dimensions.init(d);
|
||||
pub const scales = Scales.init(s);
|
||||
|
||||
/// Instantiates the constant into a specific numeric type.
|
||||
pub fn Of(comptime T: type) Scalar(T, d, s) {
|
||||
return .{ .data = @splat(@as(T, @floatCast(val))) };
|
||||
pub fn Of(comptime T: type) Tensor(T, d, s, &.{1}) {
|
||||
const casted_val: T = switch (@typeInfo(T)) {
|
||||
.float => @floatCast(val),
|
||||
.int => @intFromFloat(val),
|
||||
else => @compileError("Unsupported type for PhysicalConstant"),
|
||||
};
|
||||
return Tensor(T, d, s, &.{1}).splat(casted_val);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn BaseScalar(comptime d: Dimensions.ArgOpts) type {
|
||||
return struct {
|
||||
const dims = Dimensions.init(d);
|
||||
pub const dims = Dimensions.init(d);
|
||||
|
||||
/// Creates a Scalar of this dimension using default scales.
|
||||
/// Example: const V = Quantities.Velocity.Base(f32);
|
||||
/// Example: const V = Quantities.Velocity.Of(f32);
|
||||
pub fn Of(comptime T: type) type {
|
||||
return Scalar(T, d, .{});
|
||||
return Tensor(T, d, .{}, &.{1});
|
||||
}
|
||||
|
||||
/// Creates a Scalar of this dimension using custom scales.
|
||||
/// Example: const Kmh = Quantities.Velocity.Scaled(f32, Scales.init(.{ .L = .k, .T = .hour }));
|
||||
/// Example: const Kmh = Quantities.Velocity.Scaled(f32, .{ .L = .k, .T = .hour });
|
||||
pub fn Scaled(comptime T: type, comptime s: Scales.ArgOpts) type {
|
||||
return Scalar(T, d, s);
|
||||
return Tensor(T, d, s, &.{1});
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -107,7 +112,7 @@ pub const ElectricCapacitance = BaseScalar(.{ .T = 4, .L = -2, .M = -1, .I = 2 }
|
||||
pub const ElectricImpedance = ElectricResistance;
|
||||
pub const MagneticFlux = BaseScalar(.{ .M = 1, .L = 2, .T = -2, .I = -1 });
|
||||
pub const MagneticDensity = BaseScalar(.{ .M = 1, .T = -2, .I = -1 });
|
||||
pub const MagneticStrength = BaseScalar(.{ .L = -1, .I = 1 }); // Fixed typo from MagneticStrengh
|
||||
pub const MagneticStrength = BaseScalar(.{ .L = -1, .I = 1 });
|
||||
pub const MagneticMoment = BaseScalar(.{ .L = 2, .I = 1 });
|
||||
|
||||
// ==========================================
|
||||
@ -140,7 +145,7 @@ pub const ThermalHeat = Energy;
|
||||
pub const ThermalWork = Energy;
|
||||
pub const ThermalCapacity = BaseScalar(.{ .M = 1, .L = 2, .T = -2, .Tr = -1 });
|
||||
pub const ThermalCapacityPerMass = BaseScalar(.{ .L = 2, .T = -2, .Tr = -1 });
|
||||
pub const ThermalFluxDensity = BaseScalar(.{ .M = 1, .T = -3 }); // Fixed typo from ThermalluxDensity
|
||||
pub const ThermalFluxDensity = BaseScalar(.{ .M = 1, .T = -3 });
|
||||
pub const ThermalConductance = BaseScalar(.{ .M = 1, .L = 2, .T = -3, .Tr = -1 });
|
||||
pub const ThermalConductivity = BaseScalar(.{ .M = 1, .L = 1, .T = -3, .Tr = -1 });
|
||||
pub const ThermalResistance = BaseScalar(.{ .M = -1, .L = -2, .T = 3, .Tr = 1 });
|
||||
@ -152,20 +157,24 @@ pub const ThermalEntropy = BaseScalar(.{ .M = 1, .L = 2, .T = -2, .Tr = -1 });
|
||||
// ==========================================
|
||||
pub const Frequency = BaseScalar(.{ .T = -1 });
|
||||
pub const Viscosity = BaseScalar(.{ .M = 1, .L = -1, .T = -1 });
|
||||
pub const SurfaceTension = BaseScalar(.{ .M = 1, .T = -2 }); // Corrected from MT-2a
|
||||
pub const SurfaceTension = BaseScalar(.{ .M = 1, .T = -2 });
|
||||
|
||||
// ==========================================
|
||||
// Tests
|
||||
// ==========================================
|
||||
|
||||
test "BaseQuantities - Core dimensions instantiation" {
|
||||
// Basic types via generic wrappers
|
||||
const M = Meter.Of(f32);
|
||||
const distance = M.splat(100);
|
||||
try std.testing.expectEqual(100.0, distance.value());
|
||||
try std.testing.expectEqual(100.0, distance.data[0]);
|
||||
try std.testing.expectEqual(1, M.dims.get(.L));
|
||||
try std.testing.expectEqual(0, M.dims.get(.T));
|
||||
|
||||
// Test specific scale variants
|
||||
const Kmh = Speed.Scaled(f32, .{ .L = .k, .T = .hour });
|
||||
const speed = Kmh.splat(120);
|
||||
try std.testing.expectEqual(120.0, speed.value());
|
||||
try std.testing.expectEqual(120.0, speed.data[0]);
|
||||
try std.testing.expectEqual(.k, @TypeOf(speed).scales.get(.L));
|
||||
try std.testing.expectEqual(.hour, @TypeOf(speed).scales.get(.T));
|
||||
}
|
||||
@ -176,13 +185,13 @@ test "BaseQuantities - Kinematics equations" {
|
||||
|
||||
// Velocity = Distance / Time
|
||||
const v = d.div(t);
|
||||
try std.testing.expectEqual(25.0, v.value());
|
||||
try std.testing.expect(Speed.dims.eql(@TypeOf(v).dims));
|
||||
try std.testing.expectEqual(25.0, v.data[0]);
|
||||
try comptime std.testing.expect(Speed.dims.eql(@TypeOf(v).dims));
|
||||
|
||||
// Acceleration = Velocity / Time
|
||||
const a = v.div(t);
|
||||
try std.testing.expectEqual(12.5, a.value());
|
||||
try std.testing.expect(Acceleration.dims.eql(@TypeOf(a).dims));
|
||||
try std.testing.expectEqual(12.5, a.data[0]);
|
||||
try comptime std.testing.expect(Acceleration.dims.eql(@TypeOf(a).dims));
|
||||
}
|
||||
|
||||
test "BaseQuantities - Dynamics (Force and Work)" {
|
||||
@ -193,14 +202,14 @@ test "BaseQuantities - Dynamics (Force and Work)" {
|
||||
|
||||
// Force = mass * acceleration
|
||||
const f = m.mul(a);
|
||||
try std.testing.expectEqual(98, f.value());
|
||||
try std.testing.expect(Force.dims.eql(@TypeOf(f).dims));
|
||||
try std.testing.expectEqual(98, f.data[0]);
|
||||
try comptime std.testing.expect(Force.dims.eql(@TypeOf(f).dims));
|
||||
|
||||
// Energy (Work) = Force * distance
|
||||
const distance = Meter.Of(f32).splat(5.0);
|
||||
const energy = f.mul(distance);
|
||||
try std.testing.expectEqual(490, energy.value());
|
||||
try std.testing.expect(Energy.dims.eql(@TypeOf(energy).dims));
|
||||
try std.testing.expectEqual(490, energy.data[0]);
|
||||
try comptime std.testing.expect(Energy.dims.eql(@TypeOf(energy).dims));
|
||||
}
|
||||
|
||||
test "BaseQuantities - Electric combinations" {
|
||||
@ -209,26 +218,26 @@ test "BaseQuantities - Electric combinations" {
|
||||
|
||||
// Charge = Current * time
|
||||
const charge = current.mul(time);
|
||||
try std.testing.expectEqual(6.0, charge.value());
|
||||
try std.testing.expect(ElectricCharge.dims.eql(@TypeOf(charge).dims));
|
||||
try std.testing.expectEqual(6.0, charge.data[0]);
|
||||
try comptime std.testing.expect(ElectricCharge.dims.eql(@TypeOf(charge).dims));
|
||||
}
|
||||
|
||||
test "Constants - Initialization and dimension checks" {
|
||||
// Speed of Light
|
||||
const c = Constants.SpeedOfLight.Of(f64);
|
||||
try std.testing.expectEqual(299792458.0, c.value());
|
||||
try std.testing.expectEqual(299792458.0, c.data[0]);
|
||||
try std.testing.expectEqual(1, @TypeOf(c).dims.get(.L));
|
||||
try std.testing.expectEqual(-1, @TypeOf(c).dims.get(.T));
|
||||
|
||||
// Electron Mass (verifying scale as well)
|
||||
const me = Constants.ElectronMass.Of(f64);
|
||||
try std.testing.expectEqual(9.1093837139e-31, me.value());
|
||||
try std.testing.expectEqual(9.1093837139e-31, me.data[0]);
|
||||
try std.testing.expectEqual(1, @TypeOf(me).dims.get(.M));
|
||||
try std.testing.expectEqual(.k, @TypeOf(me).scales.get(.M)); // Should be scaled to kg
|
||||
|
||||
// Boltzmann Constant (Complex derived dimensions)
|
||||
const kb = Constants.Boltzmann.Of(f64);
|
||||
try std.testing.expectEqual(1.380649e-23, kb.value());
|
||||
try std.testing.expectEqual(1.380649e-23, kb.data[0]);
|
||||
try std.testing.expectEqual(1, @TypeOf(kb).dims.get(.M));
|
||||
try std.testing.expectEqual(2, @TypeOf(kb).dims.get(.L));
|
||||
try std.testing.expectEqual(-2, @TypeOf(kb).dims.get(.T));
|
||||
@ -237,7 +246,7 @@ test "Constants - Initialization and dimension checks" {
|
||||
|
||||
// Vacuum Permittivity
|
||||
const eps0 = Constants.VacuumPermittivity.Of(f64);
|
||||
try std.testing.expectEqual(8.8541878188e-12, eps0.value());
|
||||
try std.testing.expectEqual(8.8541878188e-12, eps0.data[0]);
|
||||
try std.testing.expectEqual(-1, @TypeOf(eps0).dims.get(.M));
|
||||
try std.testing.expectEqual(-3, @TypeOf(eps0).dims.get(.L));
|
||||
try std.testing.expectEqual(4, @TypeOf(eps0).dims.get(.T));
|
||||
@ -245,7 +254,7 @@ test "Constants - Initialization and dimension checks" {
|
||||
|
||||
// Fine Structure Constant (Dimensionless)
|
||||
const alpha = Constants.FineStructure.Of(f64);
|
||||
try std.testing.expectEqual(0.0072973525643, alpha.value());
|
||||
try std.testing.expectEqual(0.0072973525643, alpha.data[0]);
|
||||
try std.testing.expectEqual(0, @TypeOf(alpha).dims.get(.M));
|
||||
try std.testing.expectEqual(0, @TypeOf(alpha).dims.get(.L));
|
||||
}
|
||||
|
||||
@ -49,81 +49,81 @@ data: std.EnumArray(Dimension, comptime_int),
|
||||
|
||||
/// Create a `Dimensions` from a struct literal, e.g. `.{ .L = 1, .T = -1 }`.
|
||||
/// Unspecified dimensions default to 0.
|
||||
pub fn init(comptime init_val: ArgOpts) Self {
|
||||
pub fn init(init_val: ArgOpts) Self {
|
||||
var s = Self{ .data = std.EnumArray(Dimension, comptime_int).initFill(0) };
|
||||
inline for (std.meta.fields(@TypeOf(init_val))) |f|
|
||||
for (std.meta.fields(@TypeOf(init_val))) |f|
|
||||
s.data.set(@field(Dimension, f.name), @field(init_val, f.name));
|
||||
return s;
|
||||
}
|
||||
|
||||
pub fn initFill(comptime val: comptime_int) Self {
|
||||
pub fn initFill(val: comptime_int) Self {
|
||||
return .{ .data = std.EnumArray(Dimension, comptime_int).initFill(val) };
|
||||
}
|
||||
|
||||
pub fn get(comptime self: Self, comptime key: Dimension) comptime_int {
|
||||
pub fn get(self: Self, key: Dimension) comptime_int {
|
||||
return self.data.get(key);
|
||||
}
|
||||
|
||||
pub fn set(comptime self: *Self, comptime key: Dimension, comptime val: i8) void {
|
||||
pub fn set(self: *Self, key: Dimension, val: i8) void {
|
||||
self.data.set(key, val);
|
||||
}
|
||||
|
||||
pub fn argsOpt(self: Self) ArgOpts {
|
||||
var args: ArgOpts = undefined;
|
||||
inline for (std.enums.values(Dimension)) |d|
|
||||
for (std.enums.values(Dimension)) |d|
|
||||
@field(args, @tagName(d)) = self.get(d);
|
||||
return args;
|
||||
}
|
||||
|
||||
/// Add exponents component-wise. Used internally by `mul`.
|
||||
pub fn add(comptime a: Self, comptime b: Self) Self {
|
||||
pub fn add(a: Self, b: Self) Self {
|
||||
var result = Self.initFill(0);
|
||||
inline for (std.enums.values(Dimension)) |d|
|
||||
for (std.enums.values(Dimension)) |d|
|
||||
result.set(d, a.get(d) + b.get(d));
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Subtract exponents component-wise. Used internally by `div`.
|
||||
pub fn sub(comptime a: Self, comptime b: Self) Self {
|
||||
pub fn sub(a: Self, b: Self) Self {
|
||||
var result = Self.initFill(0);
|
||||
inline for (std.enums.values(Dimension)) |d|
|
||||
for (std.enums.values(Dimension)) |d|
|
||||
result.set(d, a.get(d) - b.get(d));
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Multiply exponents by a scalar integer. Used internally by `pow` in Scalar.
|
||||
pub fn scale(comptime a: Self, comptime exp: comptime_int) Self {
|
||||
pub fn scale(a: Self, exp: comptime_int) Self {
|
||||
var result = Self.initFill(0);
|
||||
inline for (std.enums.values(Dimension)) |d|
|
||||
for (std.enums.values(Dimension)) |d|
|
||||
result.set(d, a.get(d) * exp);
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn div(comptime a: Self, comptime exp: comptime_int) Self {
|
||||
pub fn div(a: Self, exp: comptime_int) Self {
|
||||
var result = Self.initFill(0);
|
||||
inline for (std.enums.values(Dimension)) |d|
|
||||
for (std.enums.values(Dimension)) |d|
|
||||
result.set(d, a.get(d) / exp);
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Returns true if every dimension exponent is equal. Used to enforce type compatibility in `add`, `sub`, `to`.
|
||||
pub fn eql(comptime a: Self, comptime b: Self) bool {
|
||||
inline for (std.enums.values(Dimension)) |d|
|
||||
pub fn eql(a: Self, b: Self) bool {
|
||||
for (std.enums.values(Dimension)) |d|
|
||||
if (a.get(d) != b.get(d)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn isSquare(comptime a: Self) bool {
|
||||
inline for (std.enums.values(Dimension)) |d|
|
||||
pub fn isSquare(a: Self) bool {
|
||||
for (std.enums.values(Dimension)) |d|
|
||||
if (a.get(d) % 2 != 0) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn str(comptime a: Self) []const u8 {
|
||||
pub fn str(a: Self) []const u8 {
|
||||
var out: []const u8 = "";
|
||||
const dims = std.enums.values(Dimension);
|
||||
|
||||
inline for (dims) |d| {
|
||||
for (dims) |d| {
|
||||
const val = a.get(d);
|
||||
if (val != 0) {
|
||||
out = out ++ @tagName(d) ++ std.fmt.comptimePrint("{d}", .{val});
|
||||
|
||||
1259
src/Quantity.zig
1259
src/Quantity.zig
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,4 @@
|
||||
const std = @import("std");
|
||||
const hlp = @import("helper.zig");
|
||||
const Dimensions = @import("Dimensions.zig");
|
||||
const Dimension = @import("Dimensions.zig").Dimension;
|
||||
|
||||
@ -56,35 +55,35 @@ pub const UnitScale = enum(isize) {
|
||||
// Undefined
|
||||
_,
|
||||
|
||||
pub inline fn str(self: @This()) []const u8 {
|
||||
pub fn str(self: @This()) []const u8 {
|
||||
var buf: [16]u8 = undefined;
|
||||
return switch (self) {
|
||||
inline .none => "",
|
||||
inline .P, .T, .G, .M, .k, .h, .da, .d, .c, .m, .u, .n, .p, .f, .min, .hour, .year, .inch, .ft, .yd, .mi, .oz, .lb, .st => @tagName(self),
|
||||
.none => "",
|
||||
.P, .T, .G, .M, .k, .h, .da, .d, .c, .m, .u, .n, .p, .f, .min, .hour, .year, .inch, .ft, .yd, .mi, .oz, .lb, .st => @tagName(self),
|
||||
else => std.fmt.bufPrint(&buf, "[{d}]", .{@intFromEnum(self)}) catch "[]", // This cannot be inline because of non exhaustive enum, but that's ok, it is just str, not calculation
|
||||
};
|
||||
}
|
||||
|
||||
pub inline fn getFactor(self: @This()) comptime_float {
|
||||
return comptime switch (self) {
|
||||
pub fn getFactor(self: @This()) comptime_float {
|
||||
return switch (self) {
|
||||
// Standard SI Exponents
|
||||
inline .P, .T, .G, .M, .k, .h, .da, .none, .d, .c, .m, .u, .n, .p, .f => std.math.pow(f64, 10.0, @floatFromInt(@intFromEnum(self))),
|
||||
.P, .T, .G, .M, .k, .h, .da, .none, .d, .c, .m, .u, .n, .p, .f => std.math.pow(f64, 10.0, @floatFromInt(@intFromEnum(self))),
|
||||
|
||||
// Time Factors
|
||||
inline .min, .hour, .year => @floatFromInt(@intFromEnum(self)),
|
||||
.min, .hour, .year => @floatFromInt(@intFromEnum(self)),
|
||||
|
||||
// Imperial Length (metres)
|
||||
inline .inch => 0.0254,
|
||||
inline .ft => 0.3048,
|
||||
inline .yd => 0.9144,
|
||||
inline .mi => 1609.344,
|
||||
.inch => 0.0254,
|
||||
.ft => 0.3048,
|
||||
.yd => 0.9144,
|
||||
.mi => 1609.344,
|
||||
|
||||
// Imperial Mass (grams — base unit for M is gram, i.e. .none = 1 g)
|
||||
inline .oz => 28.3495231,
|
||||
inline .lb => 453.59237,
|
||||
inline .st => 6350.29318,
|
||||
.oz => 28.3495231,
|
||||
.lb => 453.59237,
|
||||
.st => 6350.29318,
|
||||
|
||||
inline else => @floatFromInt(@intFromEnum(self)),
|
||||
else => @floatFromInt(@intFromEnum(self)),
|
||||
};
|
||||
}
|
||||
};
|
||||
@ -98,40 +97,46 @@ data: std.EnumArray(Dimension, UnitScale),
|
||||
/// Unspecified dimensions default to `.none` (factor 1).
|
||||
pub fn init(comptime init_val: ArgOpts) Self {
|
||||
comptime var s = Self{ .data = std.EnumArray(Dimension, UnitScale).initFill(.none) };
|
||||
inline for (std.meta.fields(@TypeOf(init_val))) |f| {
|
||||
if (comptime hlp.isInt(@TypeOf(@field(init_val, f.name))))
|
||||
for (std.meta.fields(@TypeOf(init_val))) |f| {
|
||||
if (comptime @typeInfo(@TypeOf(@field(init_val, f.name))) == .comptime_int)
|
||||
s.data.set(@field(Dimension, f.name), @enumFromInt(@field(init_val, f.name)))
|
||||
else
|
||||
s.data.set(@field(Dimension, f.name), @field(init_val, f.name));
|
||||
}
|
||||
return s;
|
||||
return comptime s;
|
||||
}
|
||||
|
||||
pub fn initFill(comptime val: UnitScale) Self {
|
||||
return comptime .{ .data = std.EnumArray(Dimension, UnitScale).initFill(val) };
|
||||
pub fn initFill(val: UnitScale) Self {
|
||||
return .{ .data = std.EnumArray(Dimension, UnitScale).initFill(val) };
|
||||
}
|
||||
|
||||
pub fn get(comptime self: Self, comptime key: Dimension) UnitScale {
|
||||
return comptime self.data.get(key);
|
||||
pub fn get(self: Self, key: Dimension) UnitScale {
|
||||
return self.data.get(key);
|
||||
}
|
||||
|
||||
pub fn set(comptime self: *Self, comptime key: Dimension, comptime val: UnitScale) void {
|
||||
comptime self.data.set(key, val);
|
||||
pub fn set(self: *Self, key: Dimension, val: UnitScale) void {
|
||||
self.data.set(key, val);
|
||||
}
|
||||
|
||||
pub fn eql(self: Self, other: Self) bool {
|
||||
for (self.data.values, other.data.values) |l, r|
|
||||
if (l != r) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn argsOpt(self: Self) ArgOpts {
|
||||
var args: ArgOpts = undefined;
|
||||
inline for (std.enums.values(Dimension)) |d|
|
||||
for (std.enums.values(Dimension)) |d|
|
||||
@field(args, @tagName(d)) = self.get(d);
|
||||
return args;
|
||||
}
|
||||
|
||||
/// Compute the combined scale factor for a given dimension signature.
|
||||
/// Each dimension's prefix is raised to its exponent and multiplied together.
|
||||
pub inline fn getFactor(comptime s: Self, comptime d: Dimensions) comptime_float {
|
||||
pub fn getFactor(s: Self, d: Dimensions) comptime_float {
|
||||
var factor: f64 = 1.0;
|
||||
for (std.enums.values(Dimension)) |dim| {
|
||||
const power = comptime d.get(dim);
|
||||
const power = d.get(dim);
|
||||
if (power == 0) continue;
|
||||
|
||||
const base = s.get(dim).getFactor();
|
||||
@ -145,5 +150,5 @@ pub inline fn getFactor(comptime s: Self, comptime d: Dimensions) comptime_float
|
||||
factor /= base;
|
||||
}
|
||||
}
|
||||
return comptime factor;
|
||||
return factor;
|
||||
}
|
||||
|
||||
1395
src/Tensor.zig
Normal file
1395
src/Tensor.zig
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,6 @@
|
||||
const std = @import("std");
|
||||
const Io = std.Io;
|
||||
const Scalar = @import("Quantity.zig").Scalar;
|
||||
const Vector = @import("Quantity.zig").Vector;
|
||||
const Tensor = @import("Tensor.zig").Tensor;
|
||||
|
||||
var io: Io = undefined;
|
||||
pub fn main(init: std.process.Init) !void {
|
||||
@ -21,15 +20,17 @@ pub fn main(init: std.process.Init) !void {
|
||||
// try stdout_writer.flush();
|
||||
// try vectorSIMDvsNative(i128, &stdout_writer.interface);
|
||||
// try stdout_writer.flush();
|
||||
|
||||
try bench_Scalar(&stdout_writer.interface);
|
||||
try stdout_writer.flush();
|
||||
//
|
||||
// try bench_Scalar(&stdout_writer.interface);
|
||||
// try stdout_writer.flush();
|
||||
try bench_vsNative(&stdout_writer.interface);
|
||||
try stdout_writer.flush();
|
||||
try bench_crossTypeVsNative(&stdout_writer.interface);
|
||||
// try bench_crossTypeVsNative(&stdout_writer.interface);
|
||||
try stdout_writer.flush();
|
||||
try bench_Vector(&stdout_writer.interface);
|
||||
try stdout_writer.flush();
|
||||
try bench_HighDimTensor(&stdout_writer.interface);
|
||||
try stdout_writer.flush();
|
||||
}
|
||||
|
||||
fn getTime() Io.Timestamp {
|
||||
@ -97,9 +98,9 @@ fn bench_Scalar(writer: *std.Io.Writer) !void {
|
||||
|
||||
comptime var tidx: usize = 0;
|
||||
inline for (Types, TNames) |T, tname| {
|
||||
const M = Scalar(T, .{ .L = 1 }, .{});
|
||||
const KM = Scalar(T, .{ .L = 1 }, .{ .L = .k });
|
||||
const S = Scalar(T, .{ .T = 1 }, .{});
|
||||
const M = Tensor(T, .{ .L = 1 }, .{}, &.{1});
|
||||
const KM = Tensor(T, .{ .L = 1 }, .{ .L = .k }, &.{1});
|
||||
const S = Tensor(T, .{ .T = 1 }, .{}, &.{1});
|
||||
|
||||
inline for (Ops, 0..) |op_name, oidx| {
|
||||
var samples: [SAMPLES]f64 = undefined;
|
||||
@ -170,7 +171,7 @@ fn bench_Scalar(writer: *std.Io.Writer) !void {
|
||||
|
||||
fn bench_vsNative(writer: *std.Io.Writer) !void {
|
||||
const ITERS: usize = 100_000;
|
||||
const SAMPLES: usize = 5;
|
||||
const SAMPLES: usize = 100;
|
||||
|
||||
const getValT = struct {
|
||||
fn f(comptime TT: type, i: usize) TT {
|
||||
@ -179,8 +180,8 @@ fn bench_vsNative(writer: *std.Io.Writer) !void {
|
||||
}
|
||||
}.f;
|
||||
|
||||
const Types = .{ i32, i64, i128, f32, f64 };
|
||||
const TNames = .{ "i32", "i64", "i128", "f32", "f64" };
|
||||
const Types = .{ f64, i64, i128, f32, f64 };
|
||||
const TNames = .{ "f64", "i64", "i128", "f32", "f64" };
|
||||
// Expanded Ops to match bench_Scalar
|
||||
const Ops = .{ "add", "sub", "mul", "div", "abs", "eq", "gt" };
|
||||
|
||||
@ -188,35 +189,34 @@ fn bench_vsNative(writer: *std.Io.Writer) !void {
|
||||
\\
|
||||
\\ Scalar vs Native Overhead Analysis
|
||||
\\
|
||||
\\┌───────────┬──────┬───────────┬───────────┬───────────┐
|
||||
\\│ Operation │ Type │ Native │ Scalar │ Slowdown │
|
||||
\\├───────────┼──────┼───────────┼───────────┼───────────┤
|
||||
\\┌───────────┬──────┬───────────┬───────────┬───────────┬───────────────────────┐
|
||||
\\│ Operation │ Type │ Native │ @Vector │ Tensor{{1}} │ Slowdown Nat | Vec │
|
||||
\\├───────────┼──────┼───────────┼───────────┼───────────┼───────────────────────┤
|
||||
\\
|
||||
, .{});
|
||||
|
||||
inline for (Ops, 0..) |op_name, j| {
|
||||
inline for (Types, 0..) |T, tidx| {
|
||||
var native_total_ns: f64 = 0;
|
||||
var quantity_total_ns: f64 = 0;
|
||||
var vector_total_ns: f64 = 0;
|
||||
var tensor_total_ns: f64 = 0;
|
||||
|
||||
const M = Scalar(T, .{ .L = 1 }, .{});
|
||||
const S = Scalar(T, .{ .T = 1 }, .{});
|
||||
const M = Tensor(T, .{}, .{}, &.{1});
|
||||
|
||||
std.mem.doNotOptimizeAway({
|
||||
for (0..SAMPLES) |_| {
|
||||
// --- 1. Benchmark Native ---
|
||||
const n_start = getTime();
|
||||
for (0..ITERS) |i| {
|
||||
const a = getValT(T, i);
|
||||
const a = getValT(T, 10);
|
||||
const b = getValT(T, 2);
|
||||
|
||||
for (0..ITERS) |_| {
|
||||
// Native logic branch
|
||||
_ = if (comptime std.mem.eql(u8, op_name, "add"))
|
||||
a + b
|
||||
if (comptime @typeInfo(T) == .int) a +| b else a + b
|
||||
else if (comptime std.mem.eql(u8, op_name, "sub"))
|
||||
a - b
|
||||
if (comptime @typeInfo(T) == .int) a -| b else a - b
|
||||
else if (comptime std.mem.eql(u8, op_name, "mul"))
|
||||
a * b
|
||||
if (comptime @typeInfo(T) == .int) a *| b else a * b
|
||||
else if (comptime std.mem.eql(u8, op_name, "div"))
|
||||
if (comptime @typeInfo(T) == .int) @divTrunc(a, b) else a / b
|
||||
else if (comptime std.mem.eql(u8, op_name, "abs"))
|
||||
@ -231,12 +231,36 @@ fn bench_vsNative(writer: *std.Io.Writer) !void {
|
||||
const n_end = getTime();
|
||||
native_total_ns += @as(f64, @floatFromInt(n_start.durationTo(n_end).toNanoseconds()));
|
||||
|
||||
const v_start = getTime();
|
||||
const va = getValT(T, 10);
|
||||
const vb = getValT(T, 2);
|
||||
for (0..ITERS) |_| {
|
||||
// Native logic branch
|
||||
_ = if (comptime std.mem.eql(u8, op_name, "add"))
|
||||
if (comptime @typeInfo(T) == .int) va +| vb else va + vb
|
||||
else if (comptime std.mem.eql(u8, op_name, "sub"))
|
||||
if (comptime @typeInfo(T) == .int) va -| vb else va - vb
|
||||
else if (comptime std.mem.eql(u8, op_name, "mul"))
|
||||
if (comptime @typeInfo(T) == .int) va *| vb else va * vb
|
||||
else if (comptime std.mem.eql(u8, op_name, "div"))
|
||||
if (comptime @typeInfo(T) == .int) @divTrunc(va, vb) else va / vb
|
||||
else if (comptime std.mem.eql(u8, op_name, "abs"))
|
||||
if (comptime @typeInfo(T) == .int) @abs(va) else @as(T, @abs(va))
|
||||
else if (comptime std.mem.eql(u8, op_name, "eq"))
|
||||
va == vb
|
||||
else if (comptime std.mem.eql(u8, op_name, "gt"))
|
||||
va > vb
|
||||
else
|
||||
unreachable;
|
||||
}
|
||||
const v_end = getTime();
|
||||
vector_total_ns += @as(f64, @floatFromInt(v_start.durationTo(v_end).toNanoseconds()));
|
||||
|
||||
// --- 2. Benchmark Scalar ---
|
||||
const q_start = getTime();
|
||||
for (0..ITERS) |i| {
|
||||
const qa = M.splat(getValT(T, i));
|
||||
const qb = if (comptime std.mem.eql(u8, op_name, "div")) S.splat(getValT(T, 2)) else M.splat(getValT(T, 2));
|
||||
|
||||
const qa = M.splat(getValT(T, 10));
|
||||
const qb = M.splat(getValT(T, 2));
|
||||
for (0..ITERS) |_| {
|
||||
// Scalar logic branch
|
||||
_ = if (comptime std.mem.eql(u8, op_name, "add"))
|
||||
qa.add(qb)
|
||||
@ -256,22 +280,24 @@ fn bench_vsNative(writer: *std.Io.Writer) !void {
|
||||
unreachable;
|
||||
}
|
||||
const q_end = getTime();
|
||||
quantity_total_ns += @as(f64, @floatFromInt(q_start.durationTo(q_end).toNanoseconds()));
|
||||
tensor_total_ns += @as(f64, @floatFromInt(q_start.durationTo(q_end).toNanoseconds()));
|
||||
}
|
||||
});
|
||||
|
||||
const avg_n = (native_total_ns / SAMPLES) / @as(f64, @floatFromInt(ITERS));
|
||||
const avg_q = (quantity_total_ns / SAMPLES) / @as(f64, @floatFromInt(ITERS));
|
||||
const slowdown = avg_q / avg_n;
|
||||
const avg_v = (vector_total_ns / SAMPLES) / @as(f64, @floatFromInt(ITERS));
|
||||
const avg_t = (tensor_total_ns / SAMPLES) / @as(f64, @floatFromInt(ITERS));
|
||||
const slowdown_nt = avg_t / avg_n;
|
||||
const slowdown_vt = avg_t / avg_v;
|
||||
|
||||
try writer.print("│ {s:<9} │ {s:<4} │ {d:>7.2}ns │ {d:>7.2}ns │ {d:>8.2}x │\n", .{
|
||||
op_name, TNames[tidx], avg_n, avg_q, slowdown,
|
||||
try writer.print("│ {s:<9} │ {s:<4} │ {d:>7.2}ns │ {d:>7.2}ns │ {d:>7.2}ns │ {d:>8.2}x {d:>8.2}x │\n", .{
|
||||
op_name, TNames[tidx], avg_n, avg_v, avg_t, slowdown_nt, slowdown_vt,
|
||||
});
|
||||
}
|
||||
if (j != Ops.len - 1) try writer.print("├───────────┼──────┼───────────┼───────────┼───────────┤\n", .{});
|
||||
if (j != Ops.len - 1) try writer.print("├───────────┼──────┼───────────┼───────────┼───────────┼───────────────────────┤\n", .{});
|
||||
}
|
||||
|
||||
try writer.print("└───────────┴──────┴───────────┴───────────┴───────────┘\n", .{});
|
||||
try writer.print("└───────────┴──────┴───────────┴───────────┴───────────┴───────────────────────┘\n", .{});
|
||||
}
|
||||
|
||||
fn bench_crossTypeVsNative(writer: *std.Io.Writer) !void {
|
||||
@ -321,9 +347,9 @@ fn bench_crossTypeVsNative(writer: *std.Io.Writer) !void {
|
||||
var native_total_ns: f64 = 0;
|
||||
var quantity_total_ns: f64 = 0;
|
||||
|
||||
const M1 = Scalar(T1, .{ .L = 1 }, .{});
|
||||
const M2 = Scalar(T2, .{ .L = 1 }, .{});
|
||||
const S2 = Scalar(T2, .{ .T = 1 }, .{});
|
||||
const M1 = Tensor(T1, .{ .L = 1 }, .{}, &.{1});
|
||||
const M2 = Tensor(T2, .{ .L = 1 }, .{}, &.{1});
|
||||
const S2 = Tensor(T2, .{ .T = 1 }, .{}, &.{1});
|
||||
|
||||
std.mem.doNotOptimizeAway({
|
||||
for (0..SAMPLES) |_| {
|
||||
@ -429,9 +455,8 @@ fn bench_Vector(writer: *std.Io.Writer) !void {
|
||||
try writer.print("│ {s:<16} │ {s:<4} │", .{ op_name, tname });
|
||||
|
||||
inline for (Lengths) |len| {
|
||||
const Q_base = Scalar(T, .{ .L = 1 }, .{});
|
||||
const Q_time = Scalar(T, .{ .T = 1 }, .{});
|
||||
const V = Vector(len, Q_base);
|
||||
const Q_time = Tensor(T, .{ .T = 1 }, .{}, &.{1});
|
||||
const V = Tensor(T, .{ .L = 1 }, .{}, &.{len});
|
||||
|
||||
// cross product is only defined for len == 3
|
||||
const is_cross = comptime std.mem.eql(u8, op_name, "cross");
|
||||
@ -455,10 +480,10 @@ fn bench_Vector(writer: *std.Io.Writer) !void {
|
||||
_ = v1.div(V.splat(getVal(T, i +% 2, 63)));
|
||||
} else if (comptime std.mem.eql(u8, op_name, "mulScalar")) {
|
||||
const s_val = Q_time.splat(getVal(T, i +% 2, 63));
|
||||
_ = v1.mulScalar(s_val);
|
||||
_ = v1.mul(s_val);
|
||||
} else if (comptime std.mem.eql(u8, op_name, "dot")) {
|
||||
const v2 = V.splat(getVal(T, i +% 5, 63));
|
||||
_ = v1.dot(v2);
|
||||
_ = v1.contract(v2, 0, 0);
|
||||
} else if (comptime std.mem.eql(u8, op_name, "cross")) {
|
||||
// len == 3 guaranteed by the guard above
|
||||
const v2 = V.splat(getVal(T, i +% 5, 63));
|
||||
@ -490,6 +515,102 @@ fn bench_Vector(writer: *std.Io.Writer) !void {
|
||||
try writer.print("└──────────────────┴──────┴─────────┴─────────┴─────────┴─────────┴─────────┘\n", .{});
|
||||
}
|
||||
|
||||
fn bench_HighDimTensor(writer: *std.Io.Writer) !void {
|
||||
const ITERS: usize = 5_000;
|
||||
const SAMPLES: usize = 5;
|
||||
|
||||
const getVal = struct {
|
||||
fn f(comptime TT: type, i: usize, comptime mask: u7) TT {
|
||||
const v: u8 = @as(u8, @truncate(i & @as(usize, mask))) + 1;
|
||||
return if (comptime @typeInfo(TT) == .float) @floatFromInt(v) else @intCast(v);
|
||||
}
|
||||
}.f;
|
||||
|
||||
const computeStats = struct {
|
||||
fn f(samples: []f64, iters: usize) f64 {
|
||||
std.mem.sort(f64, samples, {}, std.sort.asc(f64));
|
||||
const mid = samples.len / 2;
|
||||
const median_ns = if (samples.len % 2 == 0)
|
||||
(samples[mid - 1] + samples[mid]) / 2.0
|
||||
else
|
||||
samples[mid];
|
||||
return median_ns / @as(f64, @floatFromInt(iters));
|
||||
}
|
||||
}.f;
|
||||
|
||||
try writer.print(
|
||||
\\
|
||||
\\ High Dimension Tensor benchmark — {d} iterations, {d} samples/cell
|
||||
\\ (Results in ns/op)
|
||||
\\
|
||||
\\┌─────────────────┬──────┬──────────────┬──────────────┬──────────────┬──────────────┐
|
||||
\\│ Operation │ Type │ 2x2x2 │ 3x3x3 │ 4x4x4 │ 10x10x10x10 │
|
||||
\\├─────────────────┼──────┼──────────────┼──────────────┼──────────────┼──────────────┤
|
||||
\\
|
||||
, .{ ITERS, SAMPLES });
|
||||
|
||||
const Types = .{ i32, i64, f32, f64 };
|
||||
const TNames = .{ "i32", "i64", "f32", "f64" };
|
||||
|
||||
// Testing multiple structural bounds
|
||||
const Shapes = .{
|
||||
&.{ 2, 2, 2 },
|
||||
&.{ 3, 3, 3 },
|
||||
&.{ 4, 4, 4 },
|
||||
&.{ 10, 10, 10, 10 },
|
||||
};
|
||||
|
||||
const Ops = .{ "add", "sub", "mulElem", "mulScalar", "abs" };
|
||||
|
||||
inline for (Ops, 0..) |op_name, o_idx| {
|
||||
inline for (Types, TNames) |T, tname| {
|
||||
try writer.print("│ {s:<15} │ {s:<4} │", .{ op_name, tname });
|
||||
|
||||
inline for (Shapes) |shape| {
|
||||
const V = Tensor(T, .{ .L = 1 }, .{}, shape);
|
||||
const Q = Tensor(T, .{ .T = 1 }, .{}, &.{1}); // For scalar broadcasting operations
|
||||
|
||||
var samples: [SAMPLES]f64 = undefined;
|
||||
|
||||
for (0..SAMPLES) |s_idx| {
|
||||
const t_start = getTime();
|
||||
|
||||
for (0..ITERS) |i| {
|
||||
std.mem.doNotOptimizeAway({
|
||||
const t1 = V.splat(getVal(T, i, 63));
|
||||
|
||||
_ = if (comptime std.mem.eql(u8, op_name, "add"))
|
||||
t1.add(V.splat(getVal(T, i +% 7, 63)))
|
||||
else if (comptime std.mem.eql(u8, op_name, "sub"))
|
||||
t1.sub(V.splat(getVal(T, i +% 3, 63)))
|
||||
else if (comptime std.mem.eql(u8, op_name, "mulElem"))
|
||||
t1.mul(V.splat(getVal(T, i +% 5, 63)))
|
||||
else if (comptime std.mem.eql(u8, op_name, "mulScalar"))
|
||||
t1.mul(Q.splat(getVal(T, i +% 2, 63)))
|
||||
else if (comptime std.mem.eql(u8, op_name, "abs"))
|
||||
t1.abs()
|
||||
else
|
||||
unreachable;
|
||||
});
|
||||
}
|
||||
|
||||
const t_end = getTime();
|
||||
samples[s_idx] = @as(f64, @floatFromInt(t_start.durationTo(t_end).toNanoseconds()));
|
||||
}
|
||||
|
||||
const median_ns_per_op = computeStats(&samples, ITERS);
|
||||
try writer.print(" {d:>12.1} │", .{median_ns_per_op});
|
||||
}
|
||||
try writer.print("\n", .{});
|
||||
}
|
||||
|
||||
if (o_idx < Ops.len - 1) {
|
||||
try writer.print("├─────────────────┼──────┼──────────────┼──────────────┼──────────────┼──────────────┤\n", .{});
|
||||
}
|
||||
}
|
||||
try writer.print("└─────────────────┴──────┴──────────────┴──────────────┴──────────────┴──────────────┘\n", .{});
|
||||
}
|
||||
|
||||
fn vectorSIMDvsNative(comptime T: type, writer: *std.Io.Writer) !void {
|
||||
const iterations: u64 = 10_000;
|
||||
const lens = [_]u32{ 1, 2, 3, 4, 5, 10, 100, 1_000, 10_000 };
|
||||
|
||||
@ -1,97 +0,0 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn isInt(comptime T: type) bool {
|
||||
return @typeInfo(T) == .int or @typeInfo(T) == .comptime_int;
|
||||
}
|
||||
|
||||
pub fn printSuperscript(writer: *std.Io.Writer, n: i32) !void {
|
||||
if (n == 0) return;
|
||||
var val = n;
|
||||
if (val < 0) {
|
||||
try writer.writeAll("\u{207B}");
|
||||
val = -val;
|
||||
}
|
||||
var buf: [12]u8 = undefined;
|
||||
const str = std.fmt.bufPrint(&buf, "{d}", .{val}) catch return;
|
||||
for (str) |c| {
|
||||
const s = switch (c) {
|
||||
'0' => "\u{2070}",
|
||||
'1' => "\u{00B9}",
|
||||
'2' => "\u{00B2}",
|
||||
'3' => "\u{00B3}",
|
||||
'4' => "\u{2074}",
|
||||
'5' => "\u{2075}",
|
||||
'6' => "\u{2076}",
|
||||
'7' => "\u{2077}",
|
||||
'8' => "\u{2078}",
|
||||
'9' => "\u{2079}",
|
||||
else => unreachable,
|
||||
};
|
||||
try writer.writeAll(s);
|
||||
}
|
||||
}
|
||||
|
||||
const Scales = @import("Scales.zig");
|
||||
const Dimensions = @import("Dimensions.zig");
|
||||
const Dimension = @import("Dimensions.zig").Dimension;
|
||||
|
||||
pub fn finerScales(comptime T1: type, comptime T2: type) Scales {
|
||||
const d1: Dimensions = T1.dims;
|
||||
const d2: Dimensions = T2.dims;
|
||||
const s1: Scales = T1.scales;
|
||||
const s2: Scales = T2.scales;
|
||||
comptime var out = Scales.initFill(.none);
|
||||
inline for (std.enums.values(Dimension)) |dim| {
|
||||
const scale1 = comptime s1.get(dim);
|
||||
const scale2 = comptime s2.get(dim);
|
||||
out.set(dim, if (comptime d1.get(dim) == 0 and d2.get(dim) == 0)
|
||||
.none
|
||||
else if (comptime d1.get(dim) == 0)
|
||||
scale2
|
||||
else if (comptime d2.get(dim) == 0)
|
||||
scale1
|
||||
else if (comptime scale1.getFactor() > scale2.getFactor())
|
||||
scale2
|
||||
else
|
||||
scale1);
|
||||
}
|
||||
comptime return out;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// RHS normalisation helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const Quantity = @import("Quantity.zig").Quantity;
|
||||
|
||||
/// Returns true if `T` is a `Scalar_` type (has `dims`, `scales`, and `value`).
|
||||
pub fn isScalarType(comptime T: type) bool {
|
||||
return @typeInfo(T) == .@"struct" and
|
||||
@hasDecl(T, "ISQUANTITY") and
|
||||
@field(T, "ISQUANTITY");
|
||||
}
|
||||
|
||||
/// Resolve the Scalar type that `rhs` will be treated as.
|
||||
///
|
||||
/// Accepted rhs types:
|
||||
/// - Any `Scalar_` type → returned as-is
|
||||
/// - `comptime_int` / `comptime_float` → dimensionless `Scalar_(BaseT, {}, {})`
|
||||
/// - `BaseT` (the scalar's value type) → dimensionless `Scalar_(BaseT, {}, {})`
|
||||
///
|
||||
/// Everything else is a compile error, including other int/float types.
|
||||
pub fn rhsQuantityType(comptime ValueType: type, N: usize, comptime RhsT: type) type {
|
||||
if (comptime isScalarType(RhsT)) return RhsT;
|
||||
if (comptime RhsT == comptime_int or RhsT == comptime_float or RhsT == ValueType)
|
||||
return Quantity(ValueType, N, .{}, .{});
|
||||
@compileError(
|
||||
"rhs must be a Scalar, " ++ @typeName(ValueType) ++
|
||||
", comptime_int, or comptime_float; got " ++ @typeName(RhsT),
|
||||
);
|
||||
}
|
||||
|
||||
/// Convert `rhs` to its normalised Scalar form (see `rhsScalarType`).
|
||||
pub inline fn toRhsQuantity(comptime BaseT: type, N: usize, rhs: anytype) rhsQuantityType(BaseT, N, @TypeOf(rhs)) {
|
||||
if (comptime isScalarType(@TypeOf(rhs))) return rhs;
|
||||
const DimLess = Quantity(BaseT, N, .{}, .{});
|
||||
return DimLess{ .data = @splat(@as(BaseT, rhs)) };
|
||||
}
|
||||
@ -1,15 +1,13 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub const Vector = @import("Quantity.zig").Vector;
|
||||
pub const Scalar = @import("Quantity.zig").Scalar;
|
||||
pub const Tensor = @import("Tensor.zig").Tensor;
|
||||
pub const Dimensions = @import("Dimensions.zig");
|
||||
pub const Scales = @import("Scales.zig");
|
||||
pub const Base = @import("Base.zig");
|
||||
|
||||
test {
|
||||
_ = @import("Quantity.zig");
|
||||
_ = @import("Tensor.zig");
|
||||
_ = @import("Dimensions.zig");
|
||||
_ = @import("Scales.zig");
|
||||
_ = @import("Base.zig");
|
||||
_ = @import("helper.zig");
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user