Moved fileEngine to use the root as struct

This commit is contained in:
Adrien Bouvais 2025-01-11 17:55:56 +01:00
parent bd4f0aab7f
commit 0f6f34e706

View File

@ -40,29 +40,30 @@ var path_to_ZipponDB_dir_buffer: [1024]u8 = undefined;
/// Manage everything that is relate to read or write in files
/// Or even get stats, whatever. If it touch files, it's here
pub const FileEngine = struct {
path_to_ZipponDB_dir: []const u8,
thread_pool: *Pool, // same pool as the ThreadEngine
schema_engine: SchemaEngine = undefined, // This is init after the FileEngine and I attach after. Do I need to init after tho ?
pub const FileEngine = @This();
pub fn init(path: []const u8, thread_pool: *Pool) ZipponError!FileEngine {
path_to_ZipponDB_dir: []const u8,
thread_pool: *Pool, // same pool as the ThreadEngine
schema_engine: SchemaEngine = undefined, // This is init after the FileEngine and I attach after. Do I need to init after tho ?
pub fn init(path: []const u8, thread_pool: *Pool) ZipponError!FileEngine {
return FileEngine{
.path_to_ZipponDB_dir = std.fmt.bufPrint(&path_to_ZipponDB_dir_buffer, "{s}", .{path}) catch return ZipponError.MemoryError,
.thread_pool = thread_pool,
};
}
}
// --------------------Other--------------------
// --------------------Other--------------------
pub fn readSchemaFile(sub_path: []const u8, buffer: []u8) ZipponError!usize {
pub fn readSchemaFile(sub_path: []const u8, buffer: []u8) ZipponError!usize {
const file = std.fs.cwd().openFile(sub_path, .{}) catch return ZipponError.CantOpenFile;
defer file.close();
const len = file.readAll(buffer) catch return FileEngineError.ReadError;
return len;
}
}
pub fn writeDbMetrics(self: *FileEngine, buffer: *std.ArrayList(u8)) ZipponError!void {
pub fn writeDbMetrics(self: *FileEngine, buffer: *std.ArrayList(u8)) ZipponError!void {
const main_dir = std.fs.cwd().openDir(self.path_to_ZipponDB_dir, .{ .iterate = true }) catch return FileEngineError.CantOpenDir;
const writer = buffer.writer();
@ -93,12 +94,12 @@ pub const FileEngine = struct {
try self.getNumberOfEntity(entry.name),
}) catch return FileEngineError.WriteError;
}
}
}
// --------------------Init folder and files--------------------
// --------------------Init folder and files--------------------
/// Create the main folder. Including DATA, LOG and BACKUP
pub fn createMainDirectories(self: *FileEngine) ZipponError!void {
/// Create the main folder. Including DATA, LOG and BACKUP
pub fn createMainDirectories(self: *FileEngine) ZipponError!void {
var path_buff = std.fmt.bufPrint(&path_buffer, "{s}", .{self.path_to_ZipponDB_dir}) catch return FileEngineError.MemoryError;
const cwd = std.fs.cwd();
@ -138,11 +139,11 @@ pub const FileEngine = struct {
_ = cwd.createFile(path_buff, .{}) catch return FileEngineError.CantMakeFile;
};
}
}
}
/// Request a path to a schema file and then create the struct folder
/// TODO: Check if some data already exist and if so ask if the user want to delete it and make a backup
pub fn createStructDirectories(self: *FileEngine, struct_array: []SchemaStruct) ZipponError!void {
/// Request a path to a schema file and then create the struct folder
/// TODO: Check if some data already exist and if so ask if the user want to delete it and make a backup
pub fn createStructDirectories(self: *FileEngine, struct_array: []SchemaStruct) ZipponError!void {
var data_dir = try utils.printOpenDir("{s}/DATA", .{self.path_to_ZipponDB_dir}, .{});
defer data_dir.close();
@ -155,13 +156,13 @@ pub const FileEngine = struct {
zid.createFile("0.zid", struct_dir) catch return FileEngineError.CantMakeFile;
}
}
}
// --------------------Read and parse files--------------------
// --------------------Read and parse files--------------------
/// Use a struct name to populate a list with all UUID of this struct
/// TODO: Multi thread that too
pub fn getNumberOfEntity(self: *FileEngine, struct_name: []const u8) ZipponError!usize {
/// Use a struct name to populate a list with all UUID of this struct
/// TODO: Multi thread that too
pub fn getNumberOfEntity(self: *FileEngine, struct_name: []const u8) ZipponError!usize {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
@ -182,17 +183,17 @@ pub const FileEngine = struct {
}
return count;
}
}
const UUIDFileIndex = @import("stuffs/UUIDFileIndex.zig").UUIDIndexMap;
const UUIDFileIndex = @import("stuffs/UUIDFileIndex.zig").UUIDIndexMap;
/// Populate a map with all UUID bytes as key and file index as value
/// This map is store in the SchemaStruct to then by using a list of UUID, get a list of file_index to parse
pub fn populateFileIndexUUIDMap(
/// Populate a map with all UUID bytes as key and file index as value
/// This map is store in the SchemaStruct to then by using a list of UUID, get a list of file_index to parse
pub fn populateFileIndexUUIDMap(
self: *FileEngine,
sstruct: SchemaStruct,
map: *UUIDFileIndex,
) ZipponError!void {
) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
@ -238,15 +239,15 @@ pub const FileEngine = struct {
for (thread_writer_list, 0..) |list, file_index| {
for (list.items) |uuid| map.put(uuid, file_index) catch return ZipponError.MemoryError;
}
}
}
fn populateFileIndexUUIDMapOneFile(
fn populateFileIndexUUIDMapOneFile(
sstruct: SchemaStruct,
list: *std.ArrayList(UUID),
file_index: u64,
dir: std.fs.Dir,
sync_context: *ThreadSyncContext,
) void {
) void {
var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
defer fa.reset();
@ -274,17 +275,17 @@ pub const FileEngine = struct {
}
_ = sync_context.completeThread();
}
}
/// Use a struct name and filter to populate a map with all UUID bytes as key and void as value
/// This map is use as value for the ConditionValue of links, so I can do a `contains` on it.
pub fn populateVoidUUIDMap(
/// Use a struct name and filter to populate a map with all UUID bytes as key and void as value
/// This map is use as value for the ConditionValue of links, so I can do a `contains` on it.
pub fn populateVoidUUIDMap(
self: *FileEngine,
struct_name: []const u8,
filter: ?Filter,
map: *std.AutoHashMap(UUID, void),
additional_data: *AdditionalData,
) ZipponError!void {
) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
@ -338,16 +339,16 @@ pub const FileEngine = struct {
log.debug("{s}", .{UUID.format_bytes(entry.key_ptr.bytes)});
}
}
}
}
fn populateVoidUUIDMapOneFile(
fn populateVoidUUIDMapOneFile(
sstruct: SchemaStruct,
filter: ?Filter,
list: *std.ArrayList(UUID),
file_index: u64,
dir: std.fs.Dir,
sync_context: *ThreadSyncContext,
) void {
) void {
var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
defer fa.reset();
@ -380,17 +381,17 @@ pub const FileEngine = struct {
}
_ = sync_context.completeThread();
}
}
/// Take a filter, parse all file and if one struct if validate by the filter, write it in a JSON format to the writer
/// filter can be null. This will return all of them
pub fn parseEntities(
/// Take a filter, parse all file and if one struct if validate by the filter, write it in a JSON format to the writer
/// filter can be null. This will return all of them
pub fn parseEntities(
self: *FileEngine,
struct_name: []const u8,
filter: ?Filter,
additional_data: *AdditionalData,
entry_allocator: Allocator,
) ZipponError![]const u8 {
) ZipponError![]const u8 {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
@ -464,9 +465,9 @@ pub const FileEngine = struct {
for (relation_maps) |*relation_map| try self.parseEntitiesRelationMap(allocator, relation_map.struct_name, relation_map, &buff);
return buff.toOwnedSlice() catch return ZipponError.MemoryError;
}
}
fn parseEntitiesOneFile(
fn parseEntitiesOneFile(
writer: anytype,
file_index: u64,
dir: std.fs.Dir,
@ -475,7 +476,7 @@ pub const FileEngine = struct {
additional_data: AdditionalData,
data_types: []const DataType,
sync_context: *ThreadSyncContext,
) void {
) void {
var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
defer fa.reset();
@ -511,21 +512,21 @@ pub const FileEngine = struct {
}
_ = sync_context.completeThread();
}
}
// Receive a map of UUID -> empty JsonString
// Will parse the files and update the value to the JSON string of the entity that represent the key
// Will then write the input with the JSON in the map looking for {|<>|}
// Once the new input received, call parseEntitiesRelationMap again the string still contain {|<>|} because of sub relationship
// The buffer contain the string with {|<>|} and need to be updated at the end
// TODO: Use the new function in SchemaEngine to reduce the number of files to parse
pub fn parseEntitiesRelationMap(
// Receive a map of UUID -> empty JsonString
// Will parse the files and update the value to the JSON string of the entity that represent the key
// Will then write the input with the JSON in the map looking for {|<>|}
// Once the new input received, call parseEntitiesRelationMap again the string still contain {|<>|} because of sub relationship
// The buffer contain the string with {|<>|} and need to be updated at the end
// TODO: Use the new function in SchemaEngine to reduce the number of files to parse
pub fn parseEntitiesRelationMap(
self: *FileEngine,
parent_allocator: Allocator,
struct_name: []const u8,
relation_map: *RelationMap,
buff: *std.ArrayList(u8),
) ZipponError!void {
) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(parent_allocator);
defer arena.deinit();
const allocator = arena.allocator();
@ -613,9 +614,9 @@ pub const FileEngine = struct {
// I then call parseEntitiesRelationMap on each
// This will update the buff items to be the same Json but with {|<[16]u8>|} replaced with the right Json
for (relation_maps) |*sub_relation_map| try self.parseEntitiesRelationMap(allocator, sub_relation_map.struct_name, sub_relation_map, buff);
}
}
fn parseEntitiesRelationMapOneFile(
fn parseEntitiesRelationMapOneFile(
map: *std.AutoHashMap([16]u8, JsonString),
file_index: u64,
dir: std.fs.Dir,
@ -623,7 +624,7 @@ pub const FileEngine = struct {
additional_data: AdditionalData,
data_types: []const DataType,
sync_context: *ThreadSyncContext,
) void {
) void {
var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
defer fa.reset();
@ -675,17 +676,17 @@ pub const FileEngine = struct {
}
_ = sync_context.completeThread();
}
}
// --------------------Change existing files--------------------
// --------------------Change existing files--------------------
// TODO: Make it in batch too
pub fn addEntity(
// TODO: Make it in batch too
pub fn addEntity(
self: *FileEngine,
struct_name: []const u8,
maps: []std.StringHashMap(ConditionValue),
writer: anytype,
) ZipponError!void {
) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
@ -716,16 +717,16 @@ pub const FileEngine = struct {
}
data_writer.flush() catch return FileEngineError.ZipponDataError;
}
}
pub fn updateEntities(
pub fn updateEntities(
self: *FileEngine,
struct_name: []const u8,
filter: ?Filter,
map: std.StringHashMap(ConditionValue),
writer: anytype,
additional_data: *AdditionalData,
) ZipponError!void {
) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
@ -780,9 +781,9 @@ pub const FileEngine = struct {
writer.writeAll(list.items) catch return FileEngineError.WriteError;
}
writer.writeByte(']') catch return FileEngineError.WriteError;
}
}
fn updateEntitiesOneFile(
fn updateEntitiesOneFile(
new_data_buff: []zid.Data,
sstruct: SchemaStruct,
filter: ?Filter,
@ -791,7 +792,7 @@ pub const FileEngine = struct {
file_index: u64,
dir: std.fs.Dir,
sync_context: *ThreadSyncContext,
) void {
) void {
log.debug("{any}\n", .{@TypeOf(writer)});
var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
@ -881,16 +882,16 @@ pub const FileEngine = struct {
};
_ = sync_context.completeThread();
}
}
/// Delete all entity based on the filter. Will also write a JSON format list of all UUID deleted into the buffer
pub fn deleteEntities(
/// Delete all entity based on the filter. Will also write a JSON format list of all UUID deleted into the buffer
pub fn deleteEntities(
self: *FileEngine,
struct_name: []const u8,
filter: ?Filter,
writer: anytype,
additional_data: *AdditionalData,
) ZipponError!void {
) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
@ -940,16 +941,16 @@ pub const FileEngine = struct {
sstruct.uuid_file_index.map.clearRetainingCapacity();
_ = sstruct.uuid_file_index.arena.reset(.free_all);
try self.populateFileIndexUUIDMap(sstruct, sstruct.uuid_file_index);
}
}
fn deleteEntitiesOneFile(
fn deleteEntitiesOneFile(
sstruct: SchemaStruct,
filter: ?Filter,
writer: anytype,
file_index: u64,
dir: std.fs.Dir,
sync_context: *ThreadSyncContext,
) void {
) void {
var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
defer fa.reset();
@ -1029,15 +1030,15 @@ pub const FileEngine = struct {
}
sync_context.completeThread();
}
}
// TODO: Make a function that take a list of UUID and remove all instance in relationship
// It is to remove when they are deleted
// TODO: Make a function that take a list of UUID and remove all instance in relationship
// It is to remove when they are deleted
// --------------------ZipponData utils--------------------
// --------------------ZipponData utils--------------------
//TODO: Update to make it use ConditionValue
fn string2Data(allocator: Allocator, value: ConditionValue) ZipponError!zid.Data {
//TODO: Update to make it use ConditionValue
fn string2Data(allocator: Allocator, value: ConditionValue) ZipponError!zid.Data {
switch (value) {
.int => |v| return zid.Data.initInt(v),
.float => |v| return zid.Data.initFloat(v),
@ -1070,16 +1071,16 @@ pub const FileEngine = struct {
.bool_array => |v| return zid.Data.initBoolArray(zid.allocEncodArray.Bool(allocator, v) catch return FileEngineError.AllocEncodError),
.unix_array => |v| return zid.Data.initUnixArray(zid.allocEncodArray.Unix(allocator, v) catch return FileEngineError.AllocEncodError),
}
}
}
/// Take a map from the parseNewData and return an ordered array of Data to be use in a DataWriter
/// TODO: Optimize and maybe put it somewhere else than fileEngine
fn orderedNewData(
/// Take a map from the parseNewData and return an ordered array of Data to be use in a DataWriter
/// TODO: Optimize and maybe put it somewhere else than fileEngine
fn orderedNewData(
self: *FileEngine,
allocator: Allocator,
struct_name: []const u8,
map: std.StringHashMap(ConditionValue),
) ZipponError![]zid.Data {
) ZipponError![]zid.Data {
const members = try self.schema_engine.structName2structMembers(struct_name);
var datas = allocator.alloc(zid.Data, (members.len)) catch return FileEngineError.MemoryError;
@ -1092,13 +1093,13 @@ pub const FileEngine = struct {
}
return datas;
}
}
// --------------------Schema utils--------------------
// --------------------Schema utils--------------------
/// Get the index of the first file that is bellow the size limit. If not found, create a new file
/// TODO: Need some serious speed up. I should keep in memory a file->size as a hashmap and use that instead
fn getFirstUsableIndexFile(self: FileEngine, struct_name: []const u8) ZipponError!usize {
/// Get the index of the first file that is bellow the size limit. If not found, create a new file
/// TODO: Need some serious speed up. I should keep in memory a file->size as a hashmap and use that instead
fn getFirstUsableIndexFile(self: FileEngine, struct_name: []const u8) ZipponError!usize {
var member_dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
defer member_dir.close();
@ -1118,11 +1119,11 @@ pub const FileEngine = struct {
zid.createFile(path, null) catch return FileEngineError.ZipponDataError;
return i;
}
}
/// Iterate over all file of a struct and return the index of the last file.
/// E.g. a struct with 0.csv and 1.csv it return 1.
fn maxFileIndex(self: FileEngine, struct_name: []const u8) ZipponError!usize {
/// Iterate over all file of a struct and return the index of the last file.
/// E.g. a struct with 0.csv and 1.csv it return 1.
fn maxFileIndex(self: FileEngine, struct_name: []const u8) ZipponError!usize {
var member_dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
defer member_dir.close();
@ -1134,14 +1135,14 @@ pub const FileEngine = struct {
count += 1;
}
return count - 1;
}
}
pub fn isSchemaFileInDir(self: *FileEngine) bool {
pub fn isSchemaFileInDir(self: *FileEngine) bool {
_ = utils.printOpenFile("{s}/schema", .{self.path_to_ZipponDB_dir}, .{}) catch return false;
return true;
}
}
pub fn writeSchemaFile(self: *FileEngine, null_terminated_schema_buff: [:0]const u8) ZipponError!void {
pub fn writeSchemaFile(self: *FileEngine, null_terminated_schema_buff: [:0]const u8) ZipponError!void {
var zippon_dir = std.fs.cwd().openDir(self.path_to_ZipponDB_dir, .{}) catch return FileEngineError.MemoryError;
defer zippon_dir.close();
@ -1153,5 +1154,4 @@ pub const FileEngine = struct {
var file = zippon_dir.createFile("schema", .{}) catch return ZipponError.CantMakeFile;
defer file.close();
file.writeAll(null_terminated_schema_buff) catch return ZipponError.WriteError;
}
};
}