Add number of file to db metrics

This commit is contained in:
Adrien Bouvais 2025-01-14 22:31:55 +01:00
parent 77e1197894
commit 173b302976
4 changed files with 23 additions and 25 deletions

View File

@ -29,6 +29,7 @@ pub fn parse(self: *Self, null_term_line_str: [:0]const u8) !bool {
var state = State.expect_main_command; var state = State.expect_main_command;
defer _ = arena.reset(.free_all); defer _ = arena.reset(.free_all);
errdefer arena.deinit();
var last_token: cliToken = undefined; var last_token: cliToken = undefined;
@ -88,6 +89,10 @@ pub fn parse(self: *Self, null_term_line_str: [:0]const u8) !bool {
.keyword_csv => state = .expect_path_to_dump, .keyword_csv => state = .expect_path_to_dump,
.keyword_json => state = .expect_path_to_dump, .keyword_json => state = .expect_path_to_dump,
.keyword_zid => state = .expect_path_to_dump, .keyword_zid => state = .expect_path_to_dump,
.keyword_help => {
send("{s}", .{config.HELP_MESSAGE.dump});
state = .end;
},
else => { else => {
send("Error: format available: csv, json, zid", .{}); send("Error: format available: csv, json, zid", .{});
state = .end; state = .end;
@ -95,7 +100,7 @@ pub fn parse(self: *Self, null_term_line_str: [:0]const u8) !bool {
}, },
.expect_db_command => switch (token.tag) { .expect_db_command => switch (token.tag) {
.keyword_new, .keyword_use => state = .expect_path_to_db, //TODO: When new, create the dir. If use, dont create the dir .keyword_use => state = .expect_path_to_db,
.keyword_metrics => { .keyword_metrics => {
if (self.state == .MissingFileEngine) { if (self.state == .MissingFileEngine) {
send("{s}", .{config.HELP_MESSAGE.no_engine}); send("{s}", .{config.HELP_MESSAGE.no_engine});
@ -124,7 +129,7 @@ pub fn parse(self: *Self, null_term_line_str: [:0]const u8) !bool {
state = .end; state = .end;
}, },
else => { else => {
send("Error: db commands available: new, metrics, swap & help", .{}); send("Error: db commands available: use, metrics & help", .{});
state = .end; state = .end;
}, },
}, },
@ -165,8 +170,8 @@ pub fn parse(self: *Self, null_term_line_str: [:0]const u8) !bool {
send("Schema:\n {s}", .{self.schema_engine.null_terminated}); send("Schema:\n {s}", .{self.schema_engine.null_terminated});
state = .end; state = .end;
}, },
.keyword_init => { .keyword_use => {
if (self.state == .MissingFileEngine) send("Error: No database selected. Please use 'db new' or 'db use'.", .{}); if (self.state == .MissingFileEngine) send("Error: No database selected. Please use 'db use'.", .{});
state = .expect_path_to_schema; state = .expect_path_to_schema;
}, },
.keyword_help => { .keyword_help => {
@ -213,6 +218,7 @@ pub fn parse(self: *Self, null_term_line_str: [:0]const u8) !bool {
}; };
if (state == .quit) { if (state == .quit) {
arena.deinit();
log.info("Bye bye\n", .{}); log.info("Bye bye\n", .{});
return true; return true;
} }

View File

@ -19,6 +19,7 @@ pub usingnamespace @import("read.zig");
pub usingnamespace @import("write.zig"); pub usingnamespace @import("write.zig");
pub usingnamespace @import("dump.zig"); pub usingnamespace @import("dump.zig");
allocator: std.mem.Allocator = std.heap.page_allocator,
path_to_ZipponDB_dir: []const u8, path_to_ZipponDB_dir: []const u8,
thread_pool: *Pool, // same pool as the ThreadEngine thread_pool: *Pool, // same pool as the ThreadEngine
schema_engine: SchemaEngine = undefined, // This is init after the FileEngine and I attach after. Do I need to init after tho ? schema_engine: SchemaEngine = undefined, // This is init after the FileEngine and I attach after. Do I need to init after tho ?

View File

@ -26,27 +26,16 @@ var path_buffer: [1024]u8 = undefined;
/// Use a struct name to populate a list with all UUID of this struct /// Use a struct name to populate a list with all UUID of this struct
/// TODO: Multi thread that too /// TODO: Multi thread that too
pub fn getNumberOfEntity(self: *Self, struct_name: []const u8) ZipponError!usize { pub fn getNumberOfEntityAndFile(self: *Self, struct_name: []const u8) ZipponError!struct { entity: usize, file: usize } {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(self.allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name); const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
const max_file_index = try self.maxFileIndex(sstruct.name); const to_parse = try self.allFileIndex(allocator, struct_name);
var count: usize = 0; defer allocator.free(to_parse);
const dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{}); return .{ .entity = sstruct.uuid_file_index.map.count(), .file = to_parse.len };
for (0..(max_file_index + 1)) |i| {
const path_buff = std.fmt.bufPrint(&path_buffer, "{d}.zid", .{i}) catch return ZipponError.MemoryError;
var iter = zid.DataIterator.init(allocator, path_buff, dir, sstruct.zid_schema) catch return ZipponError.ZipponDataError;
defer iter.deinit();
while (iter.next() catch return ZipponError.ZipponDataError) |_| count += 1;
}
return count;
} }
/// Populate a map with all UUID bytes as key and file index as value /// Populate a map with all UUID bytes as key and file index as value
@ -56,7 +45,7 @@ pub fn populateFileIndexUUIDMap(
sstruct: SchemaStruct, sstruct: SchemaStruct,
map: *UUIDFileIndex, map: *UUIDFileIndex,
) ZipponError!void { ) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(self.allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
@ -146,7 +135,7 @@ pub fn populateVoidUUIDMap(
map: *std.AutoHashMap(UUID, void), map: *std.AutoHashMap(UUID, void),
additional_data: *AdditionalData, additional_data: *AdditionalData,
) ZipponError!void { ) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(self.allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
@ -250,7 +239,7 @@ pub fn parseEntities(
additional_data: *AdditionalData, additional_data: *AdditionalData,
entry_allocator: Allocator, entry_allocator: Allocator,
) ZipponError![]const u8 { ) ZipponError![]const u8 {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(self.allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();

View File

@ -45,10 +45,12 @@ pub fn writeDbMetrics(self: *Self, buffer: *std.ArrayList(u8)) ZipponError!void
if (entry.kind != .directory) continue; if (entry.kind != .directory) continue;
const sub_dir = data_dir.openDir(entry.name, .{ .iterate = true }) catch return ZipponError.CantOpenDir; const sub_dir = data_dir.openDir(entry.name, .{ .iterate = true }) catch return ZipponError.CantOpenDir;
const size = getDirTotalSize(sub_dir) catch 0; const size = getDirTotalSize(sub_dir) catch 0;
writer.print(" {s}: {d:.}Mb {d} entities\n", .{ const result = try self.getNumberOfEntityAndFile(entry.name);
writer.print(" {s}: {d:.2}Mb | {d} entities | {d} files\n", .{
entry.name, entry.name,
@as(f64, @floatFromInt(size)) / 1024.0 / 1024.0, @as(f64, @floatFromInt(size)) / 1024.0 / 1024.0,
try self.getNumberOfEntity(entry.name), result.entity,
result.file,
}) catch return ZipponError.WriteError; }) catch return ZipponError.WriteError;
} }
} }