Moved fileEngine to use the root as struct

This commit is contained in:
Adrien Bouvais 2025-01-11 17:55:56 +01:00
parent bd4f0aab7f
commit 0f6f34e706

View File

@ -40,29 +40,30 @@ var path_to_ZipponDB_dir_buffer: [1024]u8 = undefined;
/// Manage everything that is relate to read or write in files /// Manage everything that is relate to read or write in files
/// Or even get stats, whatever. If it touch files, it's here /// Or even get stats, whatever. If it touch files, it's here
pub const FileEngine = struct { pub const FileEngine = @This();
path_to_ZipponDB_dir: []const u8,
thread_pool: *Pool, // same pool as the ThreadEngine
schema_engine: SchemaEngine = undefined, // This is init after the FileEngine and I attach after. Do I need to init after tho ?
pub fn init(path: []const u8, thread_pool: *Pool) ZipponError!FileEngine { path_to_ZipponDB_dir: []const u8,
thread_pool: *Pool, // same pool as the ThreadEngine
schema_engine: SchemaEngine = undefined, // This is init after the FileEngine and I attach after. Do I need to init after tho ?
pub fn init(path: []const u8, thread_pool: *Pool) ZipponError!FileEngine {
return FileEngine{ return FileEngine{
.path_to_ZipponDB_dir = std.fmt.bufPrint(&path_to_ZipponDB_dir_buffer, "{s}", .{path}) catch return ZipponError.MemoryError, .path_to_ZipponDB_dir = std.fmt.bufPrint(&path_to_ZipponDB_dir_buffer, "{s}", .{path}) catch return ZipponError.MemoryError,
.thread_pool = thread_pool, .thread_pool = thread_pool,
}; };
} }
// --------------------Other-------------------- // --------------------Other--------------------
pub fn readSchemaFile(sub_path: []const u8, buffer: []u8) ZipponError!usize { pub fn readSchemaFile(sub_path: []const u8, buffer: []u8) ZipponError!usize {
const file = std.fs.cwd().openFile(sub_path, .{}) catch return ZipponError.CantOpenFile; const file = std.fs.cwd().openFile(sub_path, .{}) catch return ZipponError.CantOpenFile;
defer file.close(); defer file.close();
const len = file.readAll(buffer) catch return FileEngineError.ReadError; const len = file.readAll(buffer) catch return FileEngineError.ReadError;
return len; return len;
} }
pub fn writeDbMetrics(self: *FileEngine, buffer: *std.ArrayList(u8)) ZipponError!void { pub fn writeDbMetrics(self: *FileEngine, buffer: *std.ArrayList(u8)) ZipponError!void {
const main_dir = std.fs.cwd().openDir(self.path_to_ZipponDB_dir, .{ .iterate = true }) catch return FileEngineError.CantOpenDir; const main_dir = std.fs.cwd().openDir(self.path_to_ZipponDB_dir, .{ .iterate = true }) catch return FileEngineError.CantOpenDir;
const writer = buffer.writer(); const writer = buffer.writer();
@ -93,12 +94,12 @@ pub const FileEngine = struct {
try self.getNumberOfEntity(entry.name), try self.getNumberOfEntity(entry.name),
}) catch return FileEngineError.WriteError; }) catch return FileEngineError.WriteError;
} }
} }
// --------------------Init folder and files-------------------- // --------------------Init folder and files--------------------
/// Create the main folder. Including DATA, LOG and BACKUP /// Create the main folder. Including DATA, LOG and BACKUP
pub fn createMainDirectories(self: *FileEngine) ZipponError!void { pub fn createMainDirectories(self: *FileEngine) ZipponError!void {
var path_buff = std.fmt.bufPrint(&path_buffer, "{s}", .{self.path_to_ZipponDB_dir}) catch return FileEngineError.MemoryError; var path_buff = std.fmt.bufPrint(&path_buffer, "{s}", .{self.path_to_ZipponDB_dir}) catch return FileEngineError.MemoryError;
const cwd = std.fs.cwd(); const cwd = std.fs.cwd();
@ -138,11 +139,11 @@ pub const FileEngine = struct {
_ = cwd.createFile(path_buff, .{}) catch return FileEngineError.CantMakeFile; _ = cwd.createFile(path_buff, .{}) catch return FileEngineError.CantMakeFile;
}; };
} }
} }
/// Request a path to a schema file and then create the struct folder /// Request a path to a schema file and then create the struct folder
/// TODO: Check if some data already exist and if so ask if the user want to delete it and make a backup /// TODO: Check if some data already exist and if so ask if the user want to delete it and make a backup
pub fn createStructDirectories(self: *FileEngine, struct_array: []SchemaStruct) ZipponError!void { pub fn createStructDirectories(self: *FileEngine, struct_array: []SchemaStruct) ZipponError!void {
var data_dir = try utils.printOpenDir("{s}/DATA", .{self.path_to_ZipponDB_dir}, .{}); var data_dir = try utils.printOpenDir("{s}/DATA", .{self.path_to_ZipponDB_dir}, .{});
defer data_dir.close(); defer data_dir.close();
@ -155,13 +156,13 @@ pub const FileEngine = struct {
zid.createFile("0.zid", struct_dir) catch return FileEngineError.CantMakeFile; zid.createFile("0.zid", struct_dir) catch return FileEngineError.CantMakeFile;
} }
} }
// --------------------Read and parse files-------------------- // --------------------Read and parse files--------------------
/// Use a struct name to populate a list with all UUID of this struct /// Use a struct name to populate a list with all UUID of this struct
/// TODO: Multi thread that too /// TODO: Multi thread that too
pub fn getNumberOfEntity(self: *FileEngine, struct_name: []const u8) ZipponError!usize { pub fn getNumberOfEntity(self: *FileEngine, struct_name: []const u8) ZipponError!usize {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
@ -182,17 +183,17 @@ pub const FileEngine = struct {
} }
return count; return count;
} }
const UUIDFileIndex = @import("stuffs/UUIDFileIndex.zig").UUIDIndexMap; const UUIDFileIndex = @import("stuffs/UUIDFileIndex.zig").UUIDIndexMap;
/// Populate a map with all UUID bytes as key and file index as value /// Populate a map with all UUID bytes as key and file index as value
/// This map is store in the SchemaStruct to then by using a list of UUID, get a list of file_index to parse /// This map is store in the SchemaStruct to then by using a list of UUID, get a list of file_index to parse
pub fn populateFileIndexUUIDMap( pub fn populateFileIndexUUIDMap(
self: *FileEngine, self: *FileEngine,
sstruct: SchemaStruct, sstruct: SchemaStruct,
map: *UUIDFileIndex, map: *UUIDFileIndex,
) ZipponError!void { ) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
@ -238,15 +239,15 @@ pub const FileEngine = struct {
for (thread_writer_list, 0..) |list, file_index| { for (thread_writer_list, 0..) |list, file_index| {
for (list.items) |uuid| map.put(uuid, file_index) catch return ZipponError.MemoryError; for (list.items) |uuid| map.put(uuid, file_index) catch return ZipponError.MemoryError;
} }
} }
fn populateFileIndexUUIDMapOneFile( fn populateFileIndexUUIDMapOneFile(
sstruct: SchemaStruct, sstruct: SchemaStruct,
list: *std.ArrayList(UUID), list: *std.ArrayList(UUID),
file_index: u64, file_index: u64,
dir: std.fs.Dir, dir: std.fs.Dir,
sync_context: *ThreadSyncContext, sync_context: *ThreadSyncContext,
) void { ) void {
var data_buffer: [BUFFER_SIZE]u8 = undefined; var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer); var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
defer fa.reset(); defer fa.reset();
@ -274,17 +275,17 @@ pub const FileEngine = struct {
} }
_ = sync_context.completeThread(); _ = sync_context.completeThread();
} }
/// Use a struct name and filter to populate a map with all UUID bytes as key and void as value /// Use a struct name and filter to populate a map with all UUID bytes as key and void as value
/// This map is use as value for the ConditionValue of links, so I can do a `contains` on it. /// This map is use as value for the ConditionValue of links, so I can do a `contains` on it.
pub fn populateVoidUUIDMap( pub fn populateVoidUUIDMap(
self: *FileEngine, self: *FileEngine,
struct_name: []const u8, struct_name: []const u8,
filter: ?Filter, filter: ?Filter,
map: *std.AutoHashMap(UUID, void), map: *std.AutoHashMap(UUID, void),
additional_data: *AdditionalData, additional_data: *AdditionalData,
) ZipponError!void { ) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
@ -338,16 +339,16 @@ pub const FileEngine = struct {
log.debug("{s}", .{UUID.format_bytes(entry.key_ptr.bytes)}); log.debug("{s}", .{UUID.format_bytes(entry.key_ptr.bytes)});
} }
} }
} }
fn populateVoidUUIDMapOneFile( fn populateVoidUUIDMapOneFile(
sstruct: SchemaStruct, sstruct: SchemaStruct,
filter: ?Filter, filter: ?Filter,
list: *std.ArrayList(UUID), list: *std.ArrayList(UUID),
file_index: u64, file_index: u64,
dir: std.fs.Dir, dir: std.fs.Dir,
sync_context: *ThreadSyncContext, sync_context: *ThreadSyncContext,
) void { ) void {
var data_buffer: [BUFFER_SIZE]u8 = undefined; var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer); var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
defer fa.reset(); defer fa.reset();
@ -380,17 +381,17 @@ pub const FileEngine = struct {
} }
_ = sync_context.completeThread(); _ = sync_context.completeThread();
} }
/// Take a filter, parse all file and if one struct if validate by the filter, write it in a JSON format to the writer /// Take a filter, parse all file and if one struct if validate by the filter, write it in a JSON format to the writer
/// filter can be null. This will return all of them /// filter can be null. This will return all of them
pub fn parseEntities( pub fn parseEntities(
self: *FileEngine, self: *FileEngine,
struct_name: []const u8, struct_name: []const u8,
filter: ?Filter, filter: ?Filter,
additional_data: *AdditionalData, additional_data: *AdditionalData,
entry_allocator: Allocator, entry_allocator: Allocator,
) ZipponError![]const u8 { ) ZipponError![]const u8 {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
@ -464,9 +465,9 @@ pub const FileEngine = struct {
for (relation_maps) |*relation_map| try self.parseEntitiesRelationMap(allocator, relation_map.struct_name, relation_map, &buff); for (relation_maps) |*relation_map| try self.parseEntitiesRelationMap(allocator, relation_map.struct_name, relation_map, &buff);
return buff.toOwnedSlice() catch return ZipponError.MemoryError; return buff.toOwnedSlice() catch return ZipponError.MemoryError;
} }
fn parseEntitiesOneFile( fn parseEntitiesOneFile(
writer: anytype, writer: anytype,
file_index: u64, file_index: u64,
dir: std.fs.Dir, dir: std.fs.Dir,
@ -475,7 +476,7 @@ pub const FileEngine = struct {
additional_data: AdditionalData, additional_data: AdditionalData,
data_types: []const DataType, data_types: []const DataType,
sync_context: *ThreadSyncContext, sync_context: *ThreadSyncContext,
) void { ) void {
var data_buffer: [BUFFER_SIZE]u8 = undefined; var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer); var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
defer fa.reset(); defer fa.reset();
@ -511,21 +512,21 @@ pub const FileEngine = struct {
} }
_ = sync_context.completeThread(); _ = sync_context.completeThread();
} }
// Receive a map of UUID -> empty JsonString // Receive a map of UUID -> empty JsonString
// Will parse the files and update the value to the JSON string of the entity that represent the key // Will parse the files and update the value to the JSON string of the entity that represent the key
// Will then write the input with the JSON in the map looking for {|<>|} // Will then write the input with the JSON in the map looking for {|<>|}
// Once the new input received, call parseEntitiesRelationMap again the string still contain {|<>|} because of sub relationship // Once the new input received, call parseEntitiesRelationMap again the string still contain {|<>|} because of sub relationship
// The buffer contain the string with {|<>|} and need to be updated at the end // The buffer contain the string with {|<>|} and need to be updated at the end
// TODO: Use the new function in SchemaEngine to reduce the number of files to parse // TODO: Use the new function in SchemaEngine to reduce the number of files to parse
pub fn parseEntitiesRelationMap( pub fn parseEntitiesRelationMap(
self: *FileEngine, self: *FileEngine,
parent_allocator: Allocator, parent_allocator: Allocator,
struct_name: []const u8, struct_name: []const u8,
relation_map: *RelationMap, relation_map: *RelationMap,
buff: *std.ArrayList(u8), buff: *std.ArrayList(u8),
) ZipponError!void { ) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(parent_allocator); var arena = std.heap.ArenaAllocator.init(parent_allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
@ -613,9 +614,9 @@ pub const FileEngine = struct {
// I then call parseEntitiesRelationMap on each // I then call parseEntitiesRelationMap on each
// This will update the buff items to be the same Json but with {|<[16]u8>|} replaced with the right Json // This will update the buff items to be the same Json but with {|<[16]u8>|} replaced with the right Json
for (relation_maps) |*sub_relation_map| try self.parseEntitiesRelationMap(allocator, sub_relation_map.struct_name, sub_relation_map, buff); for (relation_maps) |*sub_relation_map| try self.parseEntitiesRelationMap(allocator, sub_relation_map.struct_name, sub_relation_map, buff);
} }
fn parseEntitiesRelationMapOneFile( fn parseEntitiesRelationMapOneFile(
map: *std.AutoHashMap([16]u8, JsonString), map: *std.AutoHashMap([16]u8, JsonString),
file_index: u64, file_index: u64,
dir: std.fs.Dir, dir: std.fs.Dir,
@ -623,7 +624,7 @@ pub const FileEngine = struct {
additional_data: AdditionalData, additional_data: AdditionalData,
data_types: []const DataType, data_types: []const DataType,
sync_context: *ThreadSyncContext, sync_context: *ThreadSyncContext,
) void { ) void {
var data_buffer: [BUFFER_SIZE]u8 = undefined; var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer); var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
defer fa.reset(); defer fa.reset();
@ -675,17 +676,17 @@ pub const FileEngine = struct {
} }
_ = sync_context.completeThread(); _ = sync_context.completeThread();
} }
// --------------------Change existing files-------------------- // --------------------Change existing files--------------------
// TODO: Make it in batch too // TODO: Make it in batch too
pub fn addEntity( pub fn addEntity(
self: *FileEngine, self: *FileEngine,
struct_name: []const u8, struct_name: []const u8,
maps: []std.StringHashMap(ConditionValue), maps: []std.StringHashMap(ConditionValue),
writer: anytype, writer: anytype,
) ZipponError!void { ) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
@ -716,16 +717,16 @@ pub const FileEngine = struct {
} }
data_writer.flush() catch return FileEngineError.ZipponDataError; data_writer.flush() catch return FileEngineError.ZipponDataError;
} }
pub fn updateEntities( pub fn updateEntities(
self: *FileEngine, self: *FileEngine,
struct_name: []const u8, struct_name: []const u8,
filter: ?Filter, filter: ?Filter,
map: std.StringHashMap(ConditionValue), map: std.StringHashMap(ConditionValue),
writer: anytype, writer: anytype,
additional_data: *AdditionalData, additional_data: *AdditionalData,
) ZipponError!void { ) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
@ -780,9 +781,9 @@ pub const FileEngine = struct {
writer.writeAll(list.items) catch return FileEngineError.WriteError; writer.writeAll(list.items) catch return FileEngineError.WriteError;
} }
writer.writeByte(']') catch return FileEngineError.WriteError; writer.writeByte(']') catch return FileEngineError.WriteError;
} }
fn updateEntitiesOneFile( fn updateEntitiesOneFile(
new_data_buff: []zid.Data, new_data_buff: []zid.Data,
sstruct: SchemaStruct, sstruct: SchemaStruct,
filter: ?Filter, filter: ?Filter,
@ -791,7 +792,7 @@ pub const FileEngine = struct {
file_index: u64, file_index: u64,
dir: std.fs.Dir, dir: std.fs.Dir,
sync_context: *ThreadSyncContext, sync_context: *ThreadSyncContext,
) void { ) void {
log.debug("{any}\n", .{@TypeOf(writer)}); log.debug("{any}\n", .{@TypeOf(writer)});
var data_buffer: [BUFFER_SIZE]u8 = undefined; var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer); var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
@ -881,16 +882,16 @@ pub const FileEngine = struct {
}; };
_ = sync_context.completeThread(); _ = sync_context.completeThread();
} }
/// Delete all entity based on the filter. Will also write a JSON format list of all UUID deleted into the buffer /// Delete all entity based on the filter. Will also write a JSON format list of all UUID deleted into the buffer
pub fn deleteEntities( pub fn deleteEntities(
self: *FileEngine, self: *FileEngine,
struct_name: []const u8, struct_name: []const u8,
filter: ?Filter, filter: ?Filter,
writer: anytype, writer: anytype,
additional_data: *AdditionalData, additional_data: *AdditionalData,
) ZipponError!void { ) ZipponError!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit(); defer arena.deinit();
const allocator = arena.allocator(); const allocator = arena.allocator();
@ -940,16 +941,16 @@ pub const FileEngine = struct {
sstruct.uuid_file_index.map.clearRetainingCapacity(); sstruct.uuid_file_index.map.clearRetainingCapacity();
_ = sstruct.uuid_file_index.arena.reset(.free_all); _ = sstruct.uuid_file_index.arena.reset(.free_all);
try self.populateFileIndexUUIDMap(sstruct, sstruct.uuid_file_index); try self.populateFileIndexUUIDMap(sstruct, sstruct.uuid_file_index);
} }
fn deleteEntitiesOneFile( fn deleteEntitiesOneFile(
sstruct: SchemaStruct, sstruct: SchemaStruct,
filter: ?Filter, filter: ?Filter,
writer: anytype, writer: anytype,
file_index: u64, file_index: u64,
dir: std.fs.Dir, dir: std.fs.Dir,
sync_context: *ThreadSyncContext, sync_context: *ThreadSyncContext,
) void { ) void {
var data_buffer: [BUFFER_SIZE]u8 = undefined; var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer); var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
defer fa.reset(); defer fa.reset();
@ -1029,15 +1030,15 @@ pub const FileEngine = struct {
} }
sync_context.completeThread(); sync_context.completeThread();
} }
// TODO: Make a function that take a list of UUID and remove all instance in relationship // TODO: Make a function that take a list of UUID and remove all instance in relationship
// It is to remove when they are deleted // It is to remove when they are deleted
// --------------------ZipponData utils-------------------- // --------------------ZipponData utils--------------------
//TODO: Update to make it use ConditionValue //TODO: Update to make it use ConditionValue
fn string2Data(allocator: Allocator, value: ConditionValue) ZipponError!zid.Data { fn string2Data(allocator: Allocator, value: ConditionValue) ZipponError!zid.Data {
switch (value) { switch (value) {
.int => |v| return zid.Data.initInt(v), .int => |v| return zid.Data.initInt(v),
.float => |v| return zid.Data.initFloat(v), .float => |v| return zid.Data.initFloat(v),
@ -1070,16 +1071,16 @@ pub const FileEngine = struct {
.bool_array => |v| return zid.Data.initBoolArray(zid.allocEncodArray.Bool(allocator, v) catch return FileEngineError.AllocEncodError), .bool_array => |v| return zid.Data.initBoolArray(zid.allocEncodArray.Bool(allocator, v) catch return FileEngineError.AllocEncodError),
.unix_array => |v| return zid.Data.initUnixArray(zid.allocEncodArray.Unix(allocator, v) catch return FileEngineError.AllocEncodError), .unix_array => |v| return zid.Data.initUnixArray(zid.allocEncodArray.Unix(allocator, v) catch return FileEngineError.AllocEncodError),
} }
} }
/// Take a map from the parseNewData and return an ordered array of Data to be use in a DataWriter /// Take a map from the parseNewData and return an ordered array of Data to be use in a DataWriter
/// TODO: Optimize and maybe put it somewhere else than fileEngine /// TODO: Optimize and maybe put it somewhere else than fileEngine
fn orderedNewData( fn orderedNewData(
self: *FileEngine, self: *FileEngine,
allocator: Allocator, allocator: Allocator,
struct_name: []const u8, struct_name: []const u8,
map: std.StringHashMap(ConditionValue), map: std.StringHashMap(ConditionValue),
) ZipponError![]zid.Data { ) ZipponError![]zid.Data {
const members = try self.schema_engine.structName2structMembers(struct_name); const members = try self.schema_engine.structName2structMembers(struct_name);
var datas = allocator.alloc(zid.Data, (members.len)) catch return FileEngineError.MemoryError; var datas = allocator.alloc(zid.Data, (members.len)) catch return FileEngineError.MemoryError;
@ -1092,13 +1093,13 @@ pub const FileEngine = struct {
} }
return datas; return datas;
} }
// --------------------Schema utils-------------------- // --------------------Schema utils--------------------
/// Get the index of the first file that is bellow the size limit. If not found, create a new file /// Get the index of the first file that is bellow the size limit. If not found, create a new file
/// TODO: Need some serious speed up. I should keep in memory a file->size as a hashmap and use that instead /// TODO: Need some serious speed up. I should keep in memory a file->size as a hashmap and use that instead
fn getFirstUsableIndexFile(self: FileEngine, struct_name: []const u8) ZipponError!usize { fn getFirstUsableIndexFile(self: FileEngine, struct_name: []const u8) ZipponError!usize {
var member_dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true }); var member_dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
defer member_dir.close(); defer member_dir.close();
@ -1118,11 +1119,11 @@ pub const FileEngine = struct {
zid.createFile(path, null) catch return FileEngineError.ZipponDataError; zid.createFile(path, null) catch return FileEngineError.ZipponDataError;
return i; return i;
} }
/// Iterate over all file of a struct and return the index of the last file. /// Iterate over all file of a struct and return the index of the last file.
/// E.g. a struct with 0.csv and 1.csv it return 1. /// E.g. a struct with 0.csv and 1.csv it return 1.
fn maxFileIndex(self: FileEngine, struct_name: []const u8) ZipponError!usize { fn maxFileIndex(self: FileEngine, struct_name: []const u8) ZipponError!usize {
var member_dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true }); var member_dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
defer member_dir.close(); defer member_dir.close();
@ -1134,14 +1135,14 @@ pub const FileEngine = struct {
count += 1; count += 1;
} }
return count - 1; return count - 1;
} }
pub fn isSchemaFileInDir(self: *FileEngine) bool { pub fn isSchemaFileInDir(self: *FileEngine) bool {
_ = utils.printOpenFile("{s}/schema", .{self.path_to_ZipponDB_dir}, .{}) catch return false; _ = utils.printOpenFile("{s}/schema", .{self.path_to_ZipponDB_dir}, .{}) catch return false;
return true; return true;
} }
pub fn writeSchemaFile(self: *FileEngine, null_terminated_schema_buff: [:0]const u8) ZipponError!void { pub fn writeSchemaFile(self: *FileEngine, null_terminated_schema_buff: [:0]const u8) ZipponError!void {
var zippon_dir = std.fs.cwd().openDir(self.path_to_ZipponDB_dir, .{}) catch return FileEngineError.MemoryError; var zippon_dir = std.fs.cwd().openDir(self.path_to_ZipponDB_dir, .{}) catch return FileEngineError.MemoryError;
defer zippon_dir.close(); defer zippon_dir.close();
@ -1153,5 +1154,4 @@ pub const FileEngine = struct {
var file = zippon_dir.createFile("schema", .{}) catch return ZipponError.CantMakeFile; var file = zippon_dir.createFile("schema", .{}) catch return ZipponError.CantMakeFile;
defer file.close(); defer file.close();
file.writeAll(null_terminated_schema_buff) catch return ZipponError.WriteError; file.writeAll(null_terminated_schema_buff) catch return ZipponError.WriteError;
} }
};