diff --git a/src/cliParser.zig b/src/cliParser.zig index 1015e8f..21a4f55 100644 --- a/src/cliParser.zig +++ b/src/cliParser.zig @@ -1,13 +1,12 @@ const std = @import("std"); const Allocator = std.mem.Allocator; +const DataEngine = @import("fileEngine.zig").FileEngine; const cliTokenizer = @import("tokenizers/cli.zig").Tokenizer; const cliToken = @import("tokenizers/cli.zig").Token; const ziqlTokenizer = @import("tokenizers/ziql.zig").Tokenizer; const ziqlToken = @import("tokenizers/ziql.zig").Token; const ziqlParser = @import("ziqlParser.zig").Parser; -const stdout = std.io.getStdOut().writer(); - pub fn main() !void { // TODO: Use an environment variable for the path of the DB checkAndCreateDirectories(); @@ -28,6 +27,7 @@ pub fn main() !void { const line_buf = try allocator.alloc(u8, 1024 * 50); defer allocator.free(line_buf); + // TODO: Use a State to prevent first_token and second_token while (true) { std.debug.print("> ", .{}); const line = try std.io.getStdIn().reader().readUntilDelimiterOrEof(line_buf, '\n'); @@ -47,7 +47,7 @@ pub fn main() !void { defer allocator.free(null_term_query_str); try runCommand(null_term_query_str); }, - .keyword_help => std.debug.print("The run command will take a ZiQL query between \" and run it. eg: run \"GRAB User\"\n"), + .keyword_help => std.debug.print("The run command will take a ZiQL query between \" and run it. eg: run \"GRAB User\"\n", .{}), else => std.debug.print("After command run, need a string of a query, eg: \"GRAB User\"\n", .{}), } }, @@ -55,8 +55,20 @@ pub fn main() !void { const second_token = cliToker.next(); switch (second_token.tag) { - .keyword_describe => try runCommand("__DESCRIBE__"), - .keyword_build => std.debug.print("Need to do the SchemaEngine tu update and migrate the schema"), + .keyword_describe => std.debug.print("{s}\n", .{ // TODO: Change that to use the SchemaEngine + \\User ( + \\ name: str, + \\ email: str, + \\) + \\ + \\Message ( + \\ content: str, + \\) + }), + .keyword_build => { // Maybe rename that in init now that I dont build binary anymore + const data_engine = DataEngine.init(allocator, null); + try data_engine.initDataFolder(); + }, .keyword_help => { std.debug.print("{s}", .{ \\Here are all available options to use with the schema command: @@ -75,8 +87,7 @@ pub fn main() !void { \\ \\run To run a query. Args => query: str, the query to execute. \\schema Build a new engine and print current schema. - \\kill To stop the process without saving - \\save Save the database to the normal files. + \\quit To stop the process without saving \\dump Create a new folder with all data as copy. Args => foldername: str, the name of the folder. \\bump Replace current data with a previous dump. Args => foldername: str, the name of the folder. \\ @@ -115,7 +126,12 @@ fn checkAndCreateDirectories() void { else => @panic("Error other than path already exists when trying to create the DATA directory.\n"), }; - cwd.makeDir("ZipponDB/ENGINE") catch |err| switch (err) { + cwd.makeDir("ZipponDB/BACKUP") catch |err| switch (err) { + error.PathAlreadyExists => {}, + else => @panic("Error other than path already exists when trying to create the ENGINE directory.\n"), + }; + + cwd.makeDir("ZipponDB/LOG") catch |err| switch (err) { error.PathAlreadyExists => {}, else => @panic("Error other than path already exists when trying to create the ENGINE directory.\n"), }; diff --git a/src/fileEngine.zig b/src/fileEngine.zig index 61f79ef..d6b4020 100644 --- a/src/fileEngine.zig +++ b/src/fileEngine.zig @@ -4,7 +4,6 @@ const schemaEngine = @import("schemaEngine.zig"); const Allocator = std.mem.Allocator; const UUID = @import("types/uuid.zig").UUID; const DataType = @import("types/dataType.zig").DataType; -const stdout = std.io.getStdOut().writer(); //TODO: Create a union class and chose between file and memory @@ -12,9 +11,9 @@ const stdout = std.io.getStdOut().writer(); /// Or even get stats, whatever. If it touch files, it's here pub const FileEngine = struct { allocator: Allocator, - dir: std.fs.Dir, // The path to the DATA folder - max_file_size: usize = 1e+8, // 100mb - // + path_to_DATA_dir: []const u8, // The path to the DATA folder + max_file_size: usize = 1e+7, // 10mb + const DataEngineError = error{ ErrorCreateDataFolder, ErrorCreateStructFolder, @@ -23,17 +22,6 @@ pub const FileEngine = struct { ErrorCreateDataFile, }; - /// Suported operation for the filter - /// TODO: Add more operation, like IN for array and LIKE for regex - const Operation = enum { - equal, - different, - superior, - superior_or_equal, - inferior, - inferior_or_equal, - }; - const ComparisonValue = union { int: i64, float: f64, @@ -48,12 +36,11 @@ pub const FileEngine = struct { /// use to parse file. It take a struct name and member name to know what to parse. /// An Operation from equal, different, superior, superior_or_equal, ... /// The DataType from int, float and str - /// TODO: Change the value to be the right type and not just a string all the time pub const Condition = struct { struct_name: []const u8, member_name: []const u8 = undefined, value: []const u8 = undefined, - operation: Operation = undefined, + operation: enum { equal, different, superior, superior_or_equal, inferior, inferior_or_equal } = undefined, // Add more stuff like IN data_type: DataType = undefined, pub fn init(struct_name: []const u8) Condition { @@ -62,20 +49,15 @@ pub const FileEngine = struct { }; pub fn init(allocator: Allocator, DATA_path: ?[]const u8) FileEngine { - const path = DATA_path orelse "ZipponDB/DATA"; - const dir = std.fs.cwd().openDir(path, .{}) catch @panic("Error opening ZipponDB/DATA"); + // I think use env variable for the path, idk, something better at least than just that 😕 return FileEngine{ .allocator = allocator, - .dir = dir, + .path_to_DATA_dir = DATA_path orelse "ZipponDB/DATA", }; } - pub fn deinit(self: *FileEngine) void { - self.dir.close(); - } - /// Take a condition and an array of UUID and fill the array with all UUID that match the condition - pub fn getUUIDListUsingCondition(self: *FileEngine, condition: Condition, uuid_array: *std.ArrayList(UUID)) !void { + pub fn getUUIDListUsingCondition(self: FileEngine, condition: Condition, uuid_array: *std.ArrayList(UUID)) !void { var file_names = std.ArrayList([]const u8).init(self.allocator); self.getFilesNames(condition.struct_name, condition.member_name, &file_names) catch @panic("Can't get list of files"); defer { @@ -85,14 +67,11 @@ pub const FileEngine = struct { file_names.deinit(); } - const sub_path = std.fmt.allocPrint( - self.allocator, - "{s}/{s}/{s}", - .{ condition.struct_name, condition.member_name, file_names.items[0] }, - ) catch @panic("Can't create sub_path for init a DataIterator"); + const sub_path = std.fmt.allocPrint(self.allocator, "{s}/{s}/{s}/{s}", .{ self.path_to_DATA_dir, condition.struct_name, condition.member_name, file_names.items[0] }) catch @panic("Can't create sub_path for init a DataIterator"); defer self.allocator.free(sub_path); - var file = self.dir.openFile(sub_path, .{}) catch @panic("Can't open first file to init a data iterator"); + var file = std.fs.cwd().openFile(sub_path, .{}) catch @panic("Can't open first file to init a data iterator"); + defer file.close(); var output: [1024 * 50]u8 = undefined; // Maybe need to increase that as it limit the size of a line in files var output_fbs = std.io.fixedBufferStream(&output); @@ -133,7 +112,8 @@ pub const FileEngine = struct { if (file_index == file_names.items.len) break; - // TODO: Update the file and reader to be the next file of the list + // FIXME: Update the file and reader to be the next file of the list + std.debug.print("End of stream\n", .{}); break; }, // file read till the end @@ -151,7 +131,7 @@ pub const FileEngine = struct { switch (condition.data_type) { .int => if (compare_value.int == dataParsing.parseInt(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])), .float => if (compare_value.float == dataParsing.parseFloat(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])), - .str => if (std.mem.eql(u8, compare_value.str, output_fbs.getWritten()[38 .. output_fbs.getWritten().len - 1])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])), + .str => if (std.mem.eql(u8, compare_value.str, output_fbs.getWritten()[37..output_fbs.getWritten().len])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])), .bool => if (compare_value.bool_ == dataParsing.parseBool(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])), // TODO: Implement for array too else => {}, @@ -203,13 +183,15 @@ pub const FileEngine = struct { } } - // TODO: Test leak on that - pub fn writeEntity(self: *FileEngine, struct_name: []const u8, data_map: std.StringHashMap([]const u8)) !void { + // TODO: Clean a bit the code + // Do I need multiple files too ? I mean it duplicate UUID a lot, if it's just to save a name like 'Bob', storing a long UUID is overkill + // I could just use a tabular data format with separator using space + pub fn writeEntity(self: FileEngine, struct_name: []const u8, data_map: std.StringHashMap([]const u8)) !void { const uuid_str = UUID.init().format_uuid(); - defer stdout.print("Added new {s} successfully using UUID: {s}\n", .{ + defer std.debug.print("Added new {s} successfully using UUID: {s}\n", .{ struct_name, uuid_str, - }) catch {}; + }); const member_names = schemaEngine.structName2structMembers(struct_name); for (member_names) |member_name| { @@ -218,122 +200,48 @@ pub const FileEngine = struct { if (potential_file_name_to_use) |file_name| { defer self.allocator.free(file_name); - const file_index = self.fileName2Index(file_name); - - const path = try std.fmt.allocPrint(self.allocator, "{s}/{s}/{s}", .{ - struct_name, - member_name, - file_name, - }); + const path = try std.fmt.allocPrint(self.allocator, "{s}/{s}/{s}/{s}", .{ self.path_to_DATA_dir, struct_name, member_name, file_name }); defer self.allocator.free(path); - var file = self.dir.openFile(path, .{ + var file = std.fs.cwd().openFile(path, .{ .mode = .read_write, }) catch { - try stdout.print("Error opening data file.", .{}); + std.debug.print("Error opening data file.", .{}); return; }; defer file.close(); try file.seekFromEnd(0); try file.writer().print("{s} {s}\n", .{ uuid_str, data_map.get(member_name).? }); - - const path_to_main = try std.fmt.allocPrint(self.allocator, "{s}/{s}/main.zippondata", .{ - struct_name, - member_name, - }); - defer self.allocator.free(path_to_main); - - var file_main = self.dir.openFile(path_to_main, .{ - .mode = .read_write, - }) catch { - try stdout.print("Error opening data file.", .{}); - return; - }; - defer file_main.close(); - - try self.addUUIDToMainFile(file_main, file_index + 1, &uuid_str); } else { const max_index = try self.maxFileIndex(struct_name, member_name); - const new_file_path = try std.fmt.allocPrint(self.allocator, "{s}/{s}/{d}.zippondata", .{ - struct_name, - member_name, - max_index + 1, - }); + const new_file_path = try std.fmt.allocPrint(self.allocator, "{s}/{s}/{s}/{d}.zippondata", .{ self.path_to_DATA_dir, struct_name, member_name, max_index + 1 }); defer self.allocator.free(new_file_path); - try stdout.print("new file path: {s}\n", .{new_file_path}); + std.debug.print("new file path: {s}\n", .{new_file_path}); - const new_file = self.dir.createFile(new_file_path, .{}) catch @panic("Error creating new data file"); + const new_file = std.fs.cwd().createFile(new_file_path, .{}) catch @panic("Error creating new data file"); defer new_file.close(); try new_file.writer().print("{s} {s}\n", .{ &uuid_str, data_map.get(member_name).? }); - - const path_to_main = try std.fmt.allocPrint(self.allocator, "ZipponDB/DATA/{s}/{s}/main.zippondata", .{ - struct_name, - member_name, - }); - defer self.allocator.free(path_to_main); - - var file_main = self.dir.openFile(path_to_main, .{ - .mode = .read_write, - }) catch { - try stdout.print("Error opening data file.", .{}); - @panic(""); - }; - defer file_main.close(); - - try file_main.seekFromEnd(0); - try file_main.writeAll("\n "); - try file_main.seekTo(0); - try self.addUUIDToMainFile(file_main, max_index + 1, &uuid_str); } } } /// Use a filename in the format 1.zippondata and return the 1 - fn fileName2Index(_: *FileEngine, file_name: []const u8) usize { + fn fileName2Index(_: FileEngine, file_name: []const u8) usize { var iter_file_name = std.mem.tokenize(u8, file_name, "."); const num_str = iter_file_name.next().?; const num: usize = std.fmt.parseInt(usize, num_str, 10) catch @panic("Couln't parse the int of a zippondata file."); return num; } - /// Add an UUID at a specific index of a file - /// Used when some data are deleted from previous zippondata files and are now bellow the file size limit - fn addUUIDToMainFile(_: *FileEngine, file: std.fs.File, index: usize, uuid_str: []const u8) !void { - var output: [1024 * 50]u8 = undefined; // Maybe need to increase that as it limit the size of a line in files - var output_fbs = std.io.fixedBufferStream(&output); - const writer = output_fbs.writer(); + fn getFilesNames(self: FileEngine, struct_name: []const u8, member_name: []const u8, file_names: *std.ArrayList([]const u8)) !void { + const path = try std.fmt.allocPrint(self.allocator, "{s}/{s}/{s}", .{ self.path_to_DATA_dir, struct_name, member_name }); + defer self.allocator.free(path); - var reader = file.reader(); - - var line_num: usize = 1; - while (true) { - output_fbs.reset(); - reader.streamUntilDelimiter(writer, '\n', null) catch |err| switch (err) { // Maybe do a better error handeling. Because if an error happend here, data are already written in files but not in main - error.EndOfStream => { - output_fbs.reset(); // clear buffer before exit - break; - }, // file read till the end - else => break, - }; - - if (line_num == index) { - try file.seekBy(-1); - try file.writer().print("{s} ", .{uuid_str}); - return; - } - line_num += 1; - } - } - - fn getFilesNames(self: *FileEngine, struct_name: []const u8, member_name: []const u8, file_names: *std.ArrayList([]const u8)) !void { - const sub_path = try std.fmt.allocPrint(self.allocator, "{s}/{s}", .{ struct_name, member_name }); - defer self.allocator.free(sub_path); - - var member_dir = try self.dir.openDir(sub_path, .{ .iterate = true }); + var member_dir = try std.fs.cwd().openDir(path, .{ .iterate = true }); defer member_dir.close(); var iter = member_dir.iterate(); @@ -345,11 +253,11 @@ pub const FileEngine = struct { /// Use the map of file stat to find the first file with under the bytes limit. /// return the name of the file. If none is found, return null. - fn getFirstUsableFile(self: *FileEngine, struct_name: []const u8, member_name: []const u8) !?[]const u8 { - const sub_path = try std.fmt.allocPrint(self.allocator, "{s}/{s}", .{ struct_name, member_name }); - defer self.allocator.free(sub_path); + fn getFirstUsableFile(self: FileEngine, struct_name: []const u8, member_name: []const u8) !?[]const u8 { + const path = try std.fmt.allocPrint(self.allocator, "{s}/{s}/{s}", .{ self.path_to_DATA_dir, struct_name, member_name }); + defer self.allocator.free(path); - var member_dir = try self.dir.openDir(sub_path, .{ .iterate = true }); + var member_dir = try std.fs.cwd().openDir(path, .{ .iterate = true }); defer member_dir.close(); var iter = member_dir.iterate(); @@ -364,12 +272,11 @@ pub const FileEngine = struct { /// Iter over all file and get the max name and return the value of it as usize /// So for example if there is 1.zippondata and 2.zippondata it return 2. - fn maxFileIndex(self: *FileEngine, struct_name: []const u8, member_name: []const u8) !usize { - const buffer = try self.allocator.alloc(u8, 1024); // Adjust the size as needed - defer self.allocator.free(buffer); + fn maxFileIndex(self: FileEngine, struct_name: []const u8, member_name: []const u8) !usize { + const path = try std.fmt.allocPrint(self.allocator, "{s}/{s}/{s}", .{ self.path_to_DATA_dir, struct_name, member_name }); + defer self.allocator.free(path); - const sub_path = try std.fmt.bufPrint(buffer, "{s}/{s}", .{ struct_name, member_name }); - const member_dir = try self.dir.openDir(sub_path, .{ .iterate = true }); + const member_dir = try std.fs.cwd().openDir(path, .{ .iterate = true }); var count: usize = 0; var iter = member_dir.iterate(); @@ -381,14 +288,16 @@ pub const FileEngine = struct { } // TODO: Give the option to keep , dump or erase the data - pub fn initDataFolder(self: *FileEngine) !void { + pub fn initDataFolder(self: FileEngine) !void { + var data_dir = try std.fs.cwd().openDir(self.path_to_DATA_dir, .{}); + defer data_dir.close(); + for (schemaEngine.struct_name_list) |struct_name| { - self.dir.makeDir(struct_name) catch |err| switch (err) { + data_dir.makeDir(struct_name) catch |err| switch (err) { error.PathAlreadyExists => {}, else => return DataEngineError.ErrorCreateStructFolder, }; - const struct_dir = try self.dir.openDir(struct_name, .{}); - defer struct_dir.close(); + const struct_dir = try data_dir.openDir(struct_name, .{}); const member_names = schemaEngine.structName2structMembers(struct_name); for (member_names) |member_name| { @@ -397,15 +306,7 @@ pub const FileEngine = struct { else => return DataEngineError.ErrorCreateMemberFolder, }; const member_dir = try struct_dir.openDir(member_name, .{}); - defer member_dir.close(); - blk: { - const file = member_dir.createFile("main.zippondata", .{}) catch |err| switch (err) { - error.PathAlreadyExists => break :blk, - else => return DataEngineError.ErrorCreateMainFile, - }; - try file.writeAll("\n"); - } _ = member_dir.createFile("0.zippondata", .{}) catch |err| switch (err) { error.PathAlreadyExists => {}, else => return DataEngineError.ErrorCreateDataFile, @@ -425,3 +326,9 @@ test "Get list of UUID using condition" { const condition = FileEngine.Condition{ .struct_name = "User", .member_name = "email", .value = "adrien@mail.com", .operation = .equal, .data_type = .str }; try data_engine.getUUIDListUsingCondition(condition, &uuid_array); } + +test "Open dir" { + const dir = std.fs.cwd(); + const sub_dir = try dir.openDir("src/types", .{}); + _ = sub_dir; +} diff --git a/src/schema.zipponschema b/src/schema.zipponschema deleted file mode 100644 index cc18a46..0000000 --- a/src/schema.zipponschema +++ /dev/null @@ -1,8 +0,0 @@ -User ( - name: str, - email: str, -) - -Message ( - content: str, -) diff --git a/src/schemaEngine.zig b/src/schemaEngine.zig index dbfc67b..3175727 100644 --- a/src/schemaEngine.zig +++ b/src/schemaEngine.zig @@ -5,17 +5,17 @@ const std = @import("std"); const DataType = @import("types/dataType.zig").DataType; -const struct_name_list: [2][]const u8 = .{ +pub const struct_name_list: [2][]const u8 = .{ "User", "Message", }; -const struct_member_list: [2][]const []const u8 = .{ +pub const struct_member_list: [2][]const []const u8 = .{ &[_][]const u8{ "name", "email", "age", "scores", "friends" }, &[_][]const u8{"content"}, }; -const struct_type_list: [2][]const DataType = .{ +pub const struct_type_list: [2][]const DataType = .{ &[_]DataType{ .str, .str, .int, .int_array, .bool_array }, &[_]DataType{.str}, }; @@ -93,7 +93,7 @@ pub fn checkIfAllMemberInMap(struct_name: []const u8, map: *std.StringHashMap([] var count: u16 = 0; for (all_struct_member) |key| { - if (map.contains(key)) count += 1; + if (map.contains(key)) count += 1 else std.debug.print("Missing: {s}\n", .{key}); } return ((count == all_struct_member.len) and (count == map.count())); diff --git a/src/tokenizers/ziql.zig b/src/tokenizers/ziql.zig index 93ba2c1..36a717a 100644 --- a/src/tokenizers/ziql.zig +++ b/src/tokenizers/ziql.zig @@ -17,8 +17,6 @@ pub const Token = struct { .{ "ADD", .keyword_add }, .{ "IN", .keyword_in }, .{ "null", .keyword_null }, - .{ "__DESCRIBE__", .keyword__describe__ }, - .{ "__INIT__", .keyword__init__ }, .{ "true", .bool_literal_true }, .{ "false", .bool_literal_false }, .{ "AND", .keyword_and }, @@ -41,8 +39,6 @@ pub const Token = struct { keyword_null, keyword_and, keyword_or, - keyword__describe__, - keyword__init__, string_literal, int_literal, diff --git a/src/ziqlParser.zig b/src/ziqlParser.zig index c6d7cfc..87a38dc 100644 --- a/src/ziqlParser.zig +++ b/src/ziqlParser.zig @@ -9,36 +9,33 @@ const Allocator = std.mem.Allocator; pub const Parser = struct { allocator: Allocator, - toker: *Tokenizer, state: State, - data_engine: *DataEngine, + toker: *Tokenizer, + data_engine: DataEngine, additional_data: AdditionalData, struct_name: []const u8 = undefined, - action: Action = undefined, + action: enum { GRAB, ADD, UPDATE, DELETE } = undefined, pub fn init(allocator: Allocator, toker: *Tokenizer) Parser { - var data_engine = DataEngine.init(allocator, null); + // Do I need to init a DataEngine at each Parser, can't I put it in the CLI parser instead ? + const data_engine = DataEngine.init(allocator, null); return Parser{ .allocator = allocator, .toker = toker, .state = State.start, - .data_engine = &data_engine, + .data_engine = data_engine, .additional_data = AdditionalData.init(allocator), }; } pub fn deinit(self: *Parser) void { self.additional_data.deinit(); - //self.allocator.free(self.struct_name); - //self.data_engine.deinit(); } - const Action = enum { - GRAB, - ADD, - UPDATE, - DELETE, + const Options = struct { + members_for_ordering: std.ArrayList([]const u8), // The list in the right order of member name to use to order the result + sense_for_ordering: enum { ASC, DESC }, }; const State = enum { @@ -123,29 +120,21 @@ pub const Parser = struct { .start => { switch (token.tag) { .keyword_grab => { - self.action = Action.GRAB; + self.action = .GRAB; self.state = State.expect_struct_name; }, .keyword_add => { - self.action = Action.ADD; + self.action = .ADD; self.state = State.expect_struct_name; }, .keyword_update => { - self.action = Action.UPDATE; + self.action = .UPDATE; self.state = State.expect_struct_name; }, .keyword_delete => { - self.action = Action.DELETE; + self.action = .DELETE; self.state = State.expect_struct_name; }, - .keyword__describe__ => { - std.debug.print("{s}", .{@embedFile("schema.zipponschema")}); - self.state = State.end; - }, - .keyword__init__ => { - try self.data_engine.initDataFolder(); - self.state = State.end; - }, else => { self.printError("Error: Expected action keyword. Available: GRAB ADD DELETE UPDATE", &token); self.state = State.end; @@ -176,7 +165,7 @@ pub const Parser = struct { .filter_and_send => { var array = std.ArrayList(UUID).init(self.allocator); defer array.deinit(); - try self.parseFilter(&array); + try self.parseFilter(&array, self.struct_name, true); self.sendEntity(array.items); self.state = State.end; }, @@ -192,10 +181,12 @@ pub const Parser = struct { .parse_new_data_and_add_data => { switch (self.action) { .ADD => { - const data_map = std.StringHashMap([]const u8).init(self.allocator); + var data_map = std.StringHashMap([]const u8).init(self.allocator); defer data_map.deinit(); self.parseNewData(&data_map); - if (!schemaEngine.checkIfAllMemberInMap(self.struct_name, data_map)) {} + + // TODO: Print the list of missing + if (!schemaEngine.checkIfAllMemberInMap(self.struct_name, &data_map)) self.printError("Error: Missing member", &token); try self.data_engine.writeEntity(self.struct_name, data_map); self.state = State.end; }, @@ -214,7 +205,13 @@ pub const Parser = struct { fn sendEntity(self: *Parser, uuid_array: []UUID) void { _ = self; - std.debug.print("Number of uuid to send: {d}", .{uuid_array.len}); + std.debug.print("Number of uuid to send: {d}\n", .{uuid_array.len}); + } + + // TODO: The parser that check what is between || + // For now only |ASC name, age| + fn parseOptions(self: *Parser) void { + _ = self; } /// Take an array of UUID and populate it to be the array that represent filter between {} @@ -234,9 +231,10 @@ pub const Parser = struct { }) { switch (self.state) { .expect_left_condition => { - self.parseCondition(&left_condition, &token); + token = self.parseCondition(&left_condition, &token); try self.data_engine.getUUIDListUsingCondition(left_condition, left_array); self.state = State.expect_ANDOR_OR_end; + keep_next = true; }, .expect_ANDOR_OR_end => { switch (token.tag) { @@ -274,7 +272,8 @@ pub const Parser = struct { .identifier => { var right_condition = Condition.init(struct_name); - self.parseCondition(&right_condition, &token); + token = self.parseCondition(&right_condition, &token); + keep_next = true; try self.data_engine.getUUIDListUsingCondition(right_condition, &right_array); }, // Create a new condition and compare it else => self.printError("Error: Expecting ( or member name.", &token), @@ -288,6 +287,7 @@ pub const Parser = struct { try OR(left_array, &right_array); }, } + std.debug.print("Token here {any}\n", .{token}); self.state = .expect_ANDOR_OR_end; }, else => unreachable, @@ -295,7 +295,7 @@ pub const Parser = struct { } } - fn parseCondition(self: *Parser, condition: *Condition, token_ptr: *Token) void { + fn parseCondition(self: *Parser, condition: *Condition, token_ptr: *Token) Token { var keep_next = false; self.state = State.expect_member; var token = token_ptr.*; @@ -406,6 +406,7 @@ pub const Parser = struct { else => unreachable, } } + return token; } /// When this function is call, the tokenizer last token retrieved should be [. @@ -653,6 +654,7 @@ pub const Parser = struct { } } + // TODO: Stop panicking ! fn printError(self: *Parser, message: []const u8, token: *Token) void { std.debug.print("\n", .{}); std.debug.print("{s}\n", .{self.toker.buffer}); @@ -768,18 +770,6 @@ fn compareUUIDArray(arr1: std.ArrayList(UUID), arr2: std.ArrayList(UUID)) bool { return true; } -test "Parse filter" { - const allocator = std.testing.allocator; - var tokenizer = Tokenizer.init("{name = 'Adrien'}"); - var parser = Parser.init(allocator, &tokenizer); - _ = tokenizer.next(); - - var uuid_array = std.ArrayList(UUID).init(allocator); - defer uuid_array.deinit(); - - try parser.parseFilter(&uuid_array, "User", true); -} - test "Parse condition" { const condition1 = Condition{ .data_type = .int, .member_name = "age", .operation = .superior_or_equal, .struct_name = "User", .value = "26" }; try testConditionParsing("age >= 26", condition1); @@ -798,7 +788,7 @@ fn testConditionParsing(source: [:0]const u8, expected_condition: Condition) !vo var token = tokenizer.next(); var condition = Condition.init("User"); - parser.parseCondition(&condition, &token); + _ = parser.parseCondition(&condition, &token); try std.testing.expect(compareCondition(expected_condition, condition)); } @@ -807,8 +797,6 @@ fn compareCondition(c1: Condition, c2: Condition) bool { return ((std.mem.eql(u8, c1.value, c2.value)) and (std.mem.eql(u8, c1.struct_name, c2.struct_name)) and (std.mem.eql(u8, c1.member_name, c2.member_name)) and (c1.operation == c2.operation) and (c1.data_type == c2.data_type)); } -// TODO: Test Filter parser - test "Parse new data" { const allocator = std.testing.allocator; @@ -864,6 +852,20 @@ fn testNewDataParsing(source: [:0]const u8, expected_member_map: std.StringHashM if ((error_found) or (expected_total_count != found_count)) @panic("=("); } +test "Parse filter" { + const allocator = std.testing.allocator; + + var tokenizer = Tokenizer.init("{name = 'Adrien'}"); + var parser = Parser.init(allocator, &tokenizer); + defer parser.deinit(); + _ = tokenizer.next(); // Start at name + + var uuid_array = std.ArrayList(UUID).init(allocator); + defer uuid_array.deinit(); + + try parser.parseFilter(&uuid_array, "User", true); +} + test "Parse additional data" { const allocator = std.testing.allocator;