Working new SchemaEngine

This commit is contained in:
Adrien Bouvais 2024-11-10 17:10:42 +01:00
parent dd65f5f03e
commit 91b1f61e66
4 changed files with 64 additions and 37 deletions

View File

@ -36,12 +36,12 @@ pub const FileEngine = struct {
allocator: Allocator,
state: FileEngineState,
path_to_ZipponDB_dir: []const u8,
schema_engine: *SchemaEngine = undefined, // I dont really like that, what if I never define it before using it ? Should I use ?*SchemaEngine instead ?
schema_engine: SchemaEngine = undefined, // I dont really like that here
pub fn init(allocator: Allocator, path: []const u8) ZipponError!FileEngine {
return FileEngine{
.allocator = allocator,
.path_to_ZipponDB_dir = path,
.path_to_ZipponDB_dir = allocator.dupe(u8, path) catch return ZipponError.MemoryError,
.state = .Waiting,
};
}
@ -57,7 +57,7 @@ pub const FileEngine = struct {
// --------------------Other--------------------
pub fn readSchemaFile(sub_path: []const u8, buffer: []u8) ZipponError!usize {
const file = try utils.printOpenFile("{s}/schema", .{sub_path}, .{});
const file = std.fs.cwd().openFile(sub_path, .{}) catch return ZipponError.CantOpenFile;
defer file.close();
const len = file.readAll(buffer) catch return FileEngineError.ReadError;
@ -157,7 +157,7 @@ pub const FileEngine = struct {
for (struct_array) |schema_struct| {
data_dir.makeDir(schema_struct.name) catch |err| switch (err) {
error.PathAlreadyExists => {},
error.PathAlreadyExists => continue,
else => return FileEngineError.CantMakeDir,
};
const struct_dir = data_dir.openDir(schema_struct.name, .{}) catch return FileEngineError.CantOpenDir;
@ -222,6 +222,8 @@ pub const FileEngine = struct {
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
const max_file_index = try self.maxFileIndex(sstruct.name);
log.debug("Max file index {d}", .{max_file_index});
// If there is no member to find, that mean we need to return all members, so let's populate additional data with all of them
if (additional_data.member_to_find.items.len == 0) {
additional_data.populateWithEverything(self.allocator, sstruct.members) catch return FileEngineError.MemoryError;
@ -285,6 +287,8 @@ pub const FileEngine = struct {
std.time.sleep(10_000_000); // Check every 10ms
}
if (error_count.load(.acquire) > 0) log.warn("Thread ended with an error {d}", .{error_count.load(.acquire)});
// Append all writer to each other
writer.writeByte('[') catch return FileEngineError.WriteError;
for (thread_writer_list) |list| writer.writeAll(list.items) catch return FileEngineError.WriteError;
@ -308,7 +312,7 @@ pub const FileEngine = struct {
defer fa.reset();
const allocator = fa.allocator();
var path_buffer: [128]u8 = undefined;
var path_buffer: [16]u8 = undefined;
const path = std.fmt.bufPrint(&path_buffer, "{d}.zid", .{file_index}) catch |err| {
logErrorAndIncrementCount("Error creating file path", err, error_count);
return;
@ -319,7 +323,10 @@ pub const FileEngine = struct {
return;
};
while (iter.next() catch return) |row| {
while (iter.next() catch |err| {
logErrorAndIncrementCount("Error in iter next", err, error_count);
return;
}) |row| {
if (filter) |f| if (!f.evaluate(row)) continue;
if (writeEntity(writer, row, additional_data, data_types)) |_| {

View File

@ -40,13 +40,13 @@ pub const std_options = .{
const DBEngineState = enum { MissingFileEngine, MissingSchemaEngine, Ok, Init };
const DBEngine = struct {
pub const DBEngine = struct {
allocator: Allocator,
state: DBEngineState = .Init,
file_engine: FileEngine = undefined,
schema_engine: SchemaEngine = undefined,
fn init(allocator: std.mem.Allocator, potential_main_path: ?[]const u8, potential_schema_path: ?[]const u8) DBEngine {
pub fn init(allocator: std.mem.Allocator, potential_main_path: ?[]const u8, potential_schema_path: ?[]const u8) DBEngine {
var self = DBEngine{ .allocator = allocator };
const potential_main_path_or_environment_variable = potential_main_path orelse utils.getEnvVariable(allocator, "ZIPPONDB_PATH");
@ -79,19 +79,21 @@ const DBEngine = struct {
defer allocator.free(schema_path);
log.info("Schema founded in the database directory.", .{});
self.schema_engine = SchemaEngine.init(self.allocator, schema_path) catch {
log.err("Error when init SchemaEngine", .{});
self.schema_engine = SchemaEngine.init(self.allocator, schema_path) catch |err| {
log.err("Error when init SchemaEngine: {any}", .{err});
self.state = .MissingSchemaEngine;
return self;
};
self.file_engine.createStructDirectories(self.schema_engine.struct_array) catch {
log.err("Error when creating struct directories", .{});
self.file_engine.createStructDirectories(self.schema_engine.struct_array) catch |err| {
log.err("Error when creating struct directories: {any}", .{err});
self.schema_engine.deinit();
self.state = .MissingSchemaEngine;
return self;
};
self.file_engine.schema_engine = &self.schema_engine;
log.debug("SchemaEngine created in DBEngine with {d} struct", .{self.schema_engine.struct_array.len});
self.file_engine.schema_engine = self.schema_engine;
self.state = .Ok;
return self;
}
@ -100,18 +102,18 @@ const DBEngine = struct {
const potential_schema_path_or_environment_variable = potential_schema_path orelse utils.getEnvVariable(allocator, "ZIPPONDB_SCHEMA");
if (potential_schema_path_or_environment_variable) |schema_path| {
log.info("Found schema path {s}.", .{schema_path});
self.schema_engine = SchemaEngine.init(self.allocator, schema_path) catch {
log.err("Error when init SchemaEngine", .{});
self.schema_engine = SchemaEngine.init(self.allocator, schema_path) catch |err| {
log.err("Error when init SchemaEngine: {any}", .{err});
self.state = .MissingSchemaEngine;
return self;
};
self.file_engine.createStructDirectories(self.schema_engine.struct_array) catch {
log.err("Error when creating struct directories", .{});
self.file_engine.createStructDirectories(self.schema_engine.struct_array) catch |err| {
log.err("Error when creating struct directories: {any}", .{err});
self.schema_engine.deinit();
self.state = .MissingSchemaEngine;
return self;
};
self.file_engine.schema_engine = &self.schema_engine;
self.file_engine.schema_engine = self.schema_engine;
self.state = .Ok;
} else {
log.info(HELP_MESSAGE.no_schema, .{self.file_engine.path_to_ZipponDB_dir});
@ -119,7 +121,7 @@ const DBEngine = struct {
return self;
}
fn deinit(self: *DBEngine) void {
pub fn deinit(self: *DBEngine) void {
if (self.state == .Ok or self.state == .MissingSchemaEngine) self.file_engine.deinit(); // Pretty sure I can use like state > 2 because enum of just number
if (self.state == .Ok) self.schema_engine.deinit();
}
@ -222,7 +224,7 @@ pub fn main() !void {
continue;
}
if (db_engine.state == .MissingSchemaEngine) {
send("{s}", .{HELP_MESSAGE.no_schema});
send(HELP_MESSAGE.no_schema, .{db_engine.file_engine.path_to_ZipponDB_dir});
state = .end;
continue;
}
@ -258,7 +260,7 @@ pub fn main() !void {
continue;
}
if (db_engine.state == .MissingSchemaEngine) {
send("{s}", .{HELP_MESSAGE.no_schema});
send(HELP_MESSAGE.no_schema, .{db_engine.file_engine.path_to_ZipponDB_dir});
state = .end;
continue;
}

View File

@ -11,10 +11,11 @@ const FileEngine = @import("fileEngine.zig").FileEngine;
const config = @import("config.zig");
const BUFFER_SIZE = config.BUFFER_SIZE;
const log = std.log.scoped(.fileEngine);
const log = std.log.scoped(.schemaEngine);
/// Manage everything that is relate to the schema
/// This include keeping in memory the schema and schema file, and some functions to get like all members of a specific struct.
/// For now it is a bit empty. But this is where I will manage migration
pub const SchemaEngine = struct {
allocator: Allocator,
null_terminated_schema_buff: [:0]u8,
@ -22,6 +23,7 @@ pub const SchemaEngine = struct {
// The path is the path to the schema file
pub fn init(allocator: Allocator, path: []const u8) ZipponError!SchemaEngine {
log.debug("Trying to init a SchemaEngine with path {s}", .{path});
var schema_buf = allocator.alloc(u8, BUFFER_SIZE) catch return ZipponError.MemoryError;
defer allocator.free(schema_buf);
@ -36,6 +38,8 @@ pub const SchemaEngine = struct {
errdefer struct_array.deinit();
parser.parse(&struct_array) catch return ZipponError.SchemaNotConform;
log.debug("SchemaEngine init with {d} SchemaStruct.", .{struct_array.items.len});
return SchemaEngine{
.allocator = allocator,
.null_terminated_schema_buff = null_terminated_schema_buff,

View File

@ -1058,16 +1058,22 @@ test "Synthax error" {
try expectParsingError("GRAB User {name < 'Hello'}", ZiQlParserError.ConditionError);
}
const DBEngine = @import("main.zig").DBEngine;
fn testParsing(source: [:0]const u8) !void {
const TEST_DATA_DIR = @import("config.zig").TEST_DATA_DIR;
const allocator = std.testing.allocator;
const path = try allocator.dupe(u8, TEST_DATA_DIR);
var file_engine = try FileEngine.init(allocator, path);
defer file_engine.deinit();
var db_engine = DBEngine.init(allocator, TEST_DATA_DIR, null);
defer db_engine.deinit();
var tokenizer = Tokenizer.init(source);
var parser = Parser.init(allocator, &tokenizer, &file_engine);
var toker = Tokenizer.init(source);
var parser = Parser.init(
allocator,
&toker,
&db_engine.file_engine,
&db_engine.schema_engine,
);
try parser.parse();
}
@ -1076,12 +1082,16 @@ fn expectParsingError(source: [:0]const u8, err: ZiQlParserError) !void {
const TEST_DATA_DIR = @import("config.zig").TEST_DATA_DIR;
const allocator = std.testing.allocator;
const path = try allocator.dupe(u8, TEST_DATA_DIR);
var file_engine = try FileEngine.init(allocator, path);
defer file_engine.deinit();
var db_engine = DBEngine.init(allocator, TEST_DATA_DIR, null);
defer db_engine.deinit();
var tokenizer = Tokenizer.init(source);
var parser = Parser.init(allocator, &tokenizer, &file_engine);
var toker = Tokenizer.init(source);
var parser = Parser.init(
allocator,
&toker,
&db_engine.file_engine,
&db_engine.schema_engine,
);
try std.testing.expectError(err, parser.parse());
}
@ -1099,12 +1109,16 @@ fn testParseFilter(source: [:0]const u8) !void {
const TEST_DATA_DIR = @import("config.zig").TEST_DATA_DIR;
const allocator = std.testing.allocator;
const path = try allocator.dupe(u8, TEST_DATA_DIR);
var file_engine = try FileEngine.init(allocator, path);
defer file_engine.deinit();
var db_engine = DBEngine.init(allocator, TEST_DATA_DIR, null);
defer db_engine.deinit();
var tokenizer = Tokenizer.init(source);
var parser = Parser.init(allocator, &tokenizer, &file_engine);
var toker = Tokenizer.init(source);
var parser = Parser.init(
allocator,
&toker,
&db_engine.file_engine,
&db_engine.schema_engine,
);
var filter = try parser.parseFilter("User", false);
defer filter.deinit();