Start cleaning before working one 0.2

This commit is contained in:
Adrien Bouvais 2024-10-16 20:26:40 +02:00
parent c93538f4b0
commit 29af5a7ac0
8 changed files with 1063 additions and 1092 deletions

View File

@ -85,187 +85,185 @@ pub fn main() !void {
var token = cliToker.next();
state = .expect_main_command;
while ((state != .end) and (state != .quit)) : (token = cliToker.next()) {
switch (state) {
.expect_main_command => switch (token.tag) {
.keyword_run => {
if (!file_engine.usable) {
send("Error: No database selected. Please use db new or db use.", .{});
while ((state != .end) and (state != .quit)) : (token = cliToker.next()) switch (state) {
.expect_main_command => switch (token.tag) {
.keyword_run => {
if (!file_engine.usable) {
send("Error: No database selected. Please use db new or db use.", .{});
state = .end;
continue;
}
state = .expect_query;
},
.keyword_db => state = .expect_db_command,
.keyword_schema => {
if (!file_engine.usable) {
send("Error: No database selected. Please use db new or db use.", .{});
state = .end;
continue;
}
state = .expect_schema_command;
},
.keyword_help => {
send("{s}", .{
\\Welcome to ZipponDB v0.1!
\\
\\Available commands:
\\run To run a query.
\\db Create or chose a database.
\\schema Initialize the database schema.
\\quit Stop the CLI with memory safety.
\\
\\ For more informations: https://github.com/MrBounty/ZipponDB
\\
});
state = .end;
},
.keyword_quit => state = .quit,
.eof => state = .end,
else => {
send("Command need to start with a keyword, including: run, db, schema, help and quit", .{});
state = .end;
},
},
.expect_db_command => switch (token.tag) {
.keyword_new => state = .expect_path_to_new_db,
.keyword_use => state = .expect_path_to_db,
.keyword_metrics => {
if (!file_engine.usable) {
send("Error: No database selected. Please use db new or db use.", .{});
state = .end;
continue;
}
var buffer = std.ArrayList(u8).init(allocator);
defer buffer.deinit();
try file_engine.writeDbMetrics(&buffer);
send("{s}", .{buffer.items});
state = .end;
},
.keyword_help => {
send("{s}", .{
\\Available commands:
\\new Create a new database using a path to a sub folder.
\\use Select another ZipponDB folder to use as database.
\\metrics Print some metrics of the current database.
\\
\\ For more informations: https://github.com/MrBounty/ZipponDB
\\
});
state = .end;
},
else => {
send("Error: db commands available: new, metrics, swap & help", .{});
state = .end;
},
},
.expect_path_to_db => switch (token.tag) {
.identifier => {
file_engine.deinit();
file_engine = FileEngine.init(allocator, try allocator.dupe(u8, cliToker.getTokenSlice(token)));
send("Successfully started using the database!", .{});
state = .end;
},
else => {
send("Error Expect a path to a ZipponDB folder.", .{});
state = .end;
},
},
.expect_path_to_new_db => switch (token.tag) {
.identifier => {
checkAndCreateDirectories(cliToker.getTokenSlice(token), allocator) catch |err| {
send("Error: Coulnt create database directories: {any}", .{err});
state = .end;
continue;
};
file_engine.deinit();
file_engine = FileEngine.init(allocator, try allocator.dupe(u8, cliToker.getTokenSlice(token)));
send("Successfully initialized the database!", .{});
state = .end;
},
else => {
send("Error Expect a path to a folder.", .{});
state = .end;
},
},
.expect_query => switch (token.tag) {
.string_literal => {
const null_term_query_str = try allocator.dupeZ(u8, line_str[token.loc.start + 1 .. token.loc.end - 1]);
defer allocator.free(null_term_query_str);
runQuery(null_term_query_str, &file_engine);
state = .end;
},
.keyword_help => {
send("The run command take a ZiQL query between \" and run it. eg: run \"GRAB User\"", .{});
state = .end;
},
else => {
send("Error: After command run, need a query, eg: \"GRAB User\"", .{});
state = .end;
},
},
.expect_schema_command => switch (token.tag) {
.keyword_describe => {
if (std.mem.eql(u8, file_engine.path_to_ZipponDB_dir, "")) send("Error: No database selected. Please use db bew or db use.", .{});
if (file_engine.null_terminated_schema_buff.len == 0) {
send("Need to init the schema first. Please use the schema init path/to/schema command to start.", .{});
} else {
send("Schema:\n {s}", .{file_engine.null_terminated_schema_buff});
}
state = .end;
},
.keyword_init => state = .expect_path_to_schema,
.keyword_help => {
send("{s}", .{
\\Available commands:
\\describe Print the schema use by the currently selected database.
\\init Take the path to a schema file and initialize the database.
\\
\\ For more informations: https://github.com/MrBounty/ZipponDB
\\
});
state = .end;
},
else => {
send("Error: schema commands available: describe, init & help", .{});
state = .end;
},
},
.expect_path_to_schema => switch (token.tag) {
.identifier => {
file_engine.initDataFolder(cliToker.getTokenSlice(token)) catch |err| switch (err) {
error.SchemaFileNotFound => {
send("Coulnt find the schema file at {s}", .{cliToker.getTokenSlice(token)});
state = .end;
continue;
}
state = .expect_query;
},
.keyword_db => state = .expect_db_command,
.keyword_schema => {
if (!file_engine.usable) {
send("Error: No database selected. Please use db new or db use.", .{});
},
else => {
send("Error initializing the schema", .{});
state = .end;
continue;
}
state = .expect_schema_command;
},
.keyword_help => {
send("{s}", .{
\\Welcome to ZipponDB v0.1!
\\
\\Available commands:
\\run To run a query.
\\db Create or chose a database.
\\schema Initialize the database schema.
\\quit Stop the CLI with memory safety.
\\
\\ For more informations: https://github.com/MrBounty/ZipponDB
\\
});
state = .end;
},
.keyword_quit => state = .quit,
.eof => state = .end,
else => {
send("Command need to start with a keyword, including: run, db, schema, help and quit", .{});
state = .end;
},
},
};
send("Successfully initialized the database schema!", .{});
state = .end;
},
.expect_db_command => switch (token.tag) {
.keyword_new => state = .expect_path_to_new_db,
.keyword_use => state = .expect_path_to_db,
.keyword_metrics => {
if (!file_engine.usable) {
send("Error: No database selected. Please use db new or db use.", .{});
state = .end;
continue;
}
var buffer = std.ArrayList(u8).init(allocator);
defer buffer.deinit();
try file_engine.writeDbMetrics(&buffer);
send("{s}", .{buffer.items});
state = .end;
},
.keyword_help => {
send("{s}", .{
\\Available commands:
\\new Create a new database using a path to a sub folder.
\\use Select another ZipponDB folder to use as database.
\\metrics Print some metrics of the current database.
\\
\\ For more informations: https://github.com/MrBounty/ZipponDB
\\
});
state = .end;
},
else => {
send("Error: db commands available: new, metrics, swap & help", .{});
state = .end;
},
else => {
send("Error: Expect path to schema file.", .{});
state = .end;
},
},
.expect_path_to_db => switch (token.tag) {
.identifier => {
file_engine.deinit();
file_engine = FileEngine.init(allocator, try allocator.dupe(u8, cliToker.getTokenSlice(token)));
send("Successfully started using the database!", .{});
state = .end;
},
else => {
send("Error Expect a path to a ZipponDB folder.", .{});
state = .end;
},
},
.expect_path_to_new_db => switch (token.tag) {
.identifier => {
checkAndCreateDirectories(cliToker.getTokenSlice(token), allocator) catch |err| {
send("Error: Coulnt create database directories: {any}", .{err});
state = .end;
continue;
};
file_engine.deinit();
file_engine = FileEngine.init(allocator, try allocator.dupe(u8, cliToker.getTokenSlice(token)));
send("Successfully initialized the database!", .{});
state = .end;
},
else => {
send("Error Expect a path to a folder.", .{});
state = .end;
},
},
.expect_query => switch (token.tag) {
.string_literal => {
const null_term_query_str = try allocator.dupeZ(u8, line_str[token.loc.start + 1 .. token.loc.end - 1]);
defer allocator.free(null_term_query_str);
runQuery(null_term_query_str, &file_engine);
state = .end;
},
.keyword_help => {
send("The run command take a ZiQL query between \" and run it. eg: run \"GRAB User\"", .{});
state = .end;
},
else => {
send("Error: After command run, need a query, eg: \"GRAB User\"", .{});
state = .end;
},
},
.expect_schema_command => switch (token.tag) {
.keyword_describe => {
if (std.mem.eql(u8, file_engine.path_to_ZipponDB_dir, "")) send("Error: No database selected. Please use db bew or db use.", .{});
if (file_engine.null_terminated_schema_buff.len == 0) {
send("Need to init the schema first. Please use the schema init path/to/schema command to start.", .{});
} else {
send("Schema:\n {s}", .{file_engine.null_terminated_schema_buff});
}
state = .end;
},
.keyword_init => state = .expect_path_to_schema,
.keyword_help => {
send("{s}", .{
\\Available commands:
\\describe Print the schema use by the currently selected database.
\\init Take the path to a schema file and initialize the database.
\\
\\ For more informations: https://github.com/MrBounty/ZipponDB
\\
});
state = .end;
},
else => {
send("Error: schema commands available: describe, init & help", .{});
state = .end;
},
},
.expect_path_to_schema => switch (token.tag) {
.identifier => {
file_engine.initDataFolder(cliToker.getTokenSlice(token)) catch |err| switch (err) {
error.SchemaFileNotFound => {
send("Coulnt find the schema file at {s}", .{cliToker.getTokenSlice(token)});
state = .end;
},
else => {
send("Error initializing the schema", .{});
state = .end;
},
};
send("Successfully initialized the database schema!", .{});
state = .end;
},
else => {
send("Error: Expect path to schema file.", .{});
state = .end;
},
},
.quit, .end => break,
}
}
if (state == .quit) break;
.quit, .end => break,
};
}
if (state == .quit) break;
}
}

View File

@ -8,17 +8,25 @@ const SchemaStruct = @import("schemaParser.zig").Parser.SchemaStruct;
const SchemaParser = @import("schemaParser.zig").Parser;
const SchemaTokenizer = @import("tokenizers/schema.zig").Tokenizer;
const SchemaToken = @import("tokenizers/schema.zig").Token;
const AdditionalData = @import("ziqlParser.zig").Parser.AdditionalData;
const AdditionalData = @import("parsing-tools/additionalData.zig").AdditionalData;
//TODO: Create a union class and chose between file and memory
// TODO: Use those errors everywhere in this file
const FileEngineError = error{
SchemaFileNotFound,
SchemaNotConform,
DATAFolderNotFound,
StructFolderNotFound,
CantMakeDir,
CantMakeFile,
};
/// Manage everything that is relate to read or write in files
/// Or even get stats, whatever. If it touch files, it's here
pub const FileEngine = struct {
allocator: Allocator,
usable: bool,
path_to_ZipponDB_dir: []const u8, // Make that into a list
max_file_size: usize = 5e+4, // 50kb TODO: Change
path_to_ZipponDB_dir: []const u8, // TODO: Put in config file
max_file_size: usize = 5e+4, // 50kb TODO: Put in config file
null_terminated_schema_buff: [:0]u8,
struct_array: std.ArrayList(SchemaStruct),
@ -81,10 +89,6 @@ pub const FileEngine = struct {
}
};
pub fn setPath(self: *FileEngine, path: []const u8) void {
self.path_to_ZipponDB_dir = path;
}
/// Take a list of UUID and, a buffer array and the additional data to write into the buffer the JSON to send
/// TODO: Optimize
/// FIXME: Array of string are not working
@ -161,62 +165,55 @@ pub const FileEngine = struct {
}
}
if (founded) {
try out_writer.writeAll("{");
try out_writer.writeAll("id:\"");
try out_writer.print("{s}", .{output_fbs.getWritten()[0..36]});
try out_writer.writeAll("\", ");
for (self.structName2structMembers(struct_name), self.structName2DataType(struct_name)) |member_name, member_type| {
token = data_toker.next();
// FIXME: When relationship will be implemented, need to check if the len of NON link is 0
if ((additional_data.member_to_find.items.len == 0) or (self.isMemberNameInAdditionalData(self.locToSlice(member_name), additional_data))) {
// write the member name and = sign
try out_writer.print("{s}: ", .{self.locToSlice(member_name)});
if (!founded) continue;
switch (member_type) {
.str => {
const str_slice = data_toker.getTokenSlice(token);
try out_writer.print("\"{s}\"", .{str_slice[1 .. str_slice.len - 1]});
},
.str_array => {
try out_writer.writeAll(data_toker.getTokenSlice(token));
token = data_toker.next();
while (token.tag != .r_bracket) : (token = data_toker.next()) {
try out_writer.writeAll("\"");
try out_writer.writeAll(data_toker.getTokenSlice(token)[1..(token.loc.end - token.loc.start)]);
try out_writer.writeAll("\"");
try out_writer.writeAll(" ");
}
try out_writer.writeAll(data_toker.getTokenSlice(token));
},
.int_array, .float_array, .bool_array, .id_array => {
while (token.tag != .r_bracket) : (token = data_toker.next()) {
try out_writer.writeAll(data_toker.getTokenSlice(token));
try out_writer.writeAll(" ");
}
try out_writer.writeAll(data_toker.getTokenSlice(token));
},
else => try out_writer.writeAll(data_toker.getTokenSlice(token)), //write the value as if
try out_writer.writeAll("{");
try out_writer.writeAll("id:\"");
try out_writer.print("{s}", .{output_fbs.getWritten()[0..36]});
try out_writer.writeAll("\", ");
for (self.structName2structMembers(struct_name), self.structName2DataType(struct_name)) |member_name, member_type| {
token = data_toker.next();
// FIXME: When relationship will be implemented, need to check if the len of NON link is 0
if (!(additional_data.member_to_find.items.len == 0) or !(additional_data.contains(self.locToSlice(member_name)))) continue;
// write the member name and = sign
try out_writer.print("{s}: ", .{self.locToSlice(member_name)});
switch (member_type) {
.str => {
const str_slice = data_toker.getTokenSlice(token);
try out_writer.print("\"{s}\"", .{str_slice[1 .. str_slice.len - 1]});
},
.str_array => {
try out_writer.writeAll(data_toker.getTokenSlice(token));
token = data_toker.next();
while (token.tag != .r_bracket) : (token = data_toker.next()) {
try out_writer.writeAll("\"");
try out_writer.writeAll(data_toker.getTokenSlice(token)[1..(token.loc.end - token.loc.start)]);
try out_writer.writeAll("\"");
try out_writer.writeAll(" ");
}
try out_writer.writeAll(", ");
}
try out_writer.writeAll(data_toker.getTokenSlice(token));
},
.int_array, .float_array, .bool_array, .id_array => {
while (token.tag != .r_bracket) : (token = data_toker.next()) {
try out_writer.writeAll(data_toker.getTokenSlice(token));
try out_writer.writeAll(" ");
}
try out_writer.writeAll(data_toker.getTokenSlice(token));
},
else => try out_writer.writeAll(data_toker.getTokenSlice(token)), //write the value as if
}
try out_writer.writeAll("}");
try out_writer.writeAll(", ");
}
try out_writer.writeAll("}");
try out_writer.writeAll(", ");
}
// Write the end }
try out_writer.writeAll("]");
}
fn isMemberNameInAdditionalData(_: *FileEngine, member_name: []const u8, additional_data: AdditionalData) bool {
for (additional_data.member_to_find.items) |elem| {
if (std.mem.eql(u8, member_name, elem.name)) return true;
}
return false;
}
/// Use a struct name to populate a list with all UUID of this struct
/// TODO: Optimize this, I'm sure I can do better than that
pub fn getAllUUIDList(self: *FileEngine, struct_name: []const u8, uuid_array: *std.ArrayList(UUID)) !void {
const max_file_index = try self.maxFileIndex(struct_name);
var current_index: usize = 0;
@ -431,9 +428,7 @@ pub const FileEngine = struct {
}
}
// TODO: Clean a bit the code
// Do I need multiple files too ? I mean it duplicate UUID a lot, if it's just to save a name like 'Bob', storing a long UUID is overkill
// I could just use a tabular data format with separator using space - Or maybe I encode the uuid to take a minimum space as I always know it size
// Do I need a map here ? Cant I use something else ?
pub fn writeEntity(self: *FileEngine, struct_name: []const u8, data_map: std.StringHashMap([]const u8)) !UUID {
const uuid = UUID.init();
@ -566,7 +561,14 @@ pub const FileEngine = struct {
}
}
if (founded) {
if (!founded) {
// stream until the delimiter
output_fbs.reset();
try new_file.writeAll(" ");
try reader.streamUntilDelimiter(writer, '\n', null);
try new_file.writeAll(output_fbs.getWritten());
try new_file.writeAll("\n");
} else {
for (self.structName2structMembers(struct_name), self.structName2DataType(struct_name)) |member_name, member_type| {
// For all collum in the right order, check if the key is in the map, if so use it to write the new value, otherwise use the old file
output_fbs.reset();
@ -610,13 +612,6 @@ pub const FileEngine = struct {
try reader.streamUntilDelimiter(writer, '\n', null);
try new_file.writeAll("\n");
} else {
// stream until the delimiter
output_fbs.reset();
try new_file.writeAll(" ");
try reader.streamUntilDelimiter(writer, '\n', null);
try new_file.writeAll(output_fbs.getWritten());
try new_file.writeAll("\n");
}
}
}
@ -807,40 +802,13 @@ pub const FileEngine = struct {
}
}
// Maybe make it so it use itself to search if it find a directory
fn getDirTotalSize(self: FileEngine, dir: std.fs.Dir) !u64 {
var total: u64 = 0;
var stat: std.fs.File.Stat = undefined;
var iter = dir.iterate();
while (try iter.next()) |entry| {
if (entry.kind == .directory) {
const sub_dir = try dir.openDir(entry.name, .{ .iterate = true });
total += try self.getDirTotalSize(sub_dir);
}
if (entry.kind != .file) continue;
stat = try dir.statFile(entry.name);
total += stat.size;
}
return total;
}
const FileError = error{
SchemaFileNotFound,
SchemaNotConform,
DATAFolderNotFound,
StructFolderNotFound,
CantMakeDir,
CantMakeFile,
};
/// Request a path to a schema file and then create the struct folder
/// TODO: Check if some data already exist and if so ask if the user want to delete it and make a backup
pub fn initDataFolder(self: *FileEngine, path_to_schema_file: []const u8) FileError!void {
pub fn initDataFolder(self: *FileEngine, path_to_schema_file: []const u8) FileEngineError!void {
var schema_buf = self.allocator.alloc(u8, 1024 * 50) catch @panic("Cant allocate the schema buffer");
defer self.allocator.free(schema_buf);
const file = std.fs.cwd().openFile(path_to_schema_file, .{}) catch return FileError.SchemaFileNotFound;
const file = std.fs.cwd().openFile(path_to_schema_file, .{}) catch return FileEngineError.SchemaFileNotFound;
defer file.close();
const len = file.readAll(schema_buf) catch @panic("Can't read schema file");
@ -860,19 +828,19 @@ pub const FileEngine = struct {
const path = std.fmt.allocPrint(self.allocator, "{s}/DATA", .{self.path_to_ZipponDB_dir}) catch @panic("Cant allocate path");
defer self.allocator.free(path);
var data_dir = std.fs.cwd().openDir(path, .{}) catch return FileError.DATAFolderNotFound;
var data_dir = std.fs.cwd().openDir(path, .{}) catch return FileEngineError.DATAFolderNotFound;
defer data_dir.close();
for (self.struct_array.items) |struct_item| {
data_dir.makeDir(self.locToSlice(struct_item.name)) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => return FileError.CantMakeDir,
else => return FileEngineError.CantMakeDir,
};
const struct_dir = data_dir.openDir(self.locToSlice(struct_item.name), .{}) catch return FileError.StructFolderNotFound;
const struct_dir = data_dir.openDir(self.locToSlice(struct_item.name), .{}) catch return FileEngineError.StructFolderNotFound;
_ = struct_dir.createFile("0.zippondata", .{}) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => return FileError.CantMakeFile,
else => return FileEngineError.CantMakeFile,
};
}
@ -880,6 +848,7 @@ pub const FileEngine = struct {
}
// Stuff for schema
// TODO: Check all those functions and remove if not use
pub fn readSchemaFile(allocator: Allocator, sub_path: []const u8, buffer: []u8) !usize {
const path = try std.fmt.allocPrint(allocator, "{s}/schema.zipponschema", .{sub_path});
@ -1011,6 +980,7 @@ test "Get list of UUID using condition" {
// Series of functions to use just before creating an entity.
// Will transform the string of data into data of the right type./
// TODO: Put those functions somewhere else
pub fn parseInt(value_str: []const u8) i64 {
return std.fmt.parseInt(i64, value_str, 10) catch return 0;
}

View File

@ -0,0 +1,40 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
/// This is the [] part
/// IDK if saving it into the Parser struct is a good idea
pub const AdditionalData = struct {
entity_count_to_find: usize = 0,
member_to_find: std.ArrayList(AdditionalDataMember),
pub fn init(allocator: Allocator) AdditionalData {
return AdditionalData{ .member_to_find = std.ArrayList(AdditionalDataMember).init(allocator) };
}
pub fn deinit(self: *AdditionalData) void {
for (0..self.member_to_find.items.len) |i| {
self.member_to_find.items[i].additional_data.deinit();
}
self.member_to_find.deinit();
}
pub fn contains(additional_data: AdditionalData, member_name: []const u8) bool {
for (additional_data.member_to_find.items) |elem| {
if (std.mem.eql(u8, member_name, elem.name)) return true;
}
return false;
}
};
// This is name in: [name]
// There is an additional data because it can be [friend [1; name]]
pub const AdditionalDataMember = struct {
name: []const u8,
additional_data: AdditionalData,
pub fn init(allocator: Allocator, name: []const u8) AdditionalDataMember {
const additional_data = AdditionalData.init(allocator);
return AdditionalDataMember{ .name = name, .additional_data = additional_data };
}
};

View File

@ -0,0 +1,31 @@
const std = @import("std");
pub fn getEnvVariables(allocator: std.mem.Allocator, variable: []const u8) ?[]const u8 {
var env_map = std.process.getEnvMap(allocator) catch return null;
defer env_map.deinit();
var iter = env_map.iterator();
while (iter.next()) |entry| {
if (std.mem.eql(u8, entry.key_ptr.*, variable)) return allocator.dupe(u8, entry.value_ptr.*) catch return null;
}
return null;
}
pub fn getDirTotalSize(dir: std.fs.Dir) !u64 {
var total: u64 = 0;
var stat: std.fs.File.Stat = undefined;
var iter = dir.iterate();
while (try iter.next()) |entry| {
if (entry.kind == .directory) {
const sub_dir = try dir.openDir(entry.name, .{ .iterate = true });
total += try getDirTotalSize(sub_dir);
}
if (entry.kind != .file) continue;
stat = try dir.statFile(entry.name);
total += stat.size;
}
return total;
}

View File

@ -6,6 +6,7 @@ const Token = @import("tokenizers/schema.zig").Token;
const stdout = std.io.getStdOut().writer();
// Fuse this with the same function in the ZiQL parser
fn send(comptime format: []const u8, args: anytype) void {
stdout.print(format, args) catch |err| {
std.log.err("Can't send: {any}", .{err});
@ -68,124 +69,122 @@ pub const Parser = struct {
while ((state != .end) and (state != .invalid)) : ({
token = if (!keep_next) self.toker.next() else token;
keep_next = false;
}) {
switch (state) {
.expect_struct_name_OR_end => switch (token.tag) {
.identifier => {
state = .expect_l_paren;
struct_array.append(SchemaStruct.init(self.allocator, token.loc)) catch @panic("Error appending a struct name.");
},
.eof => state = .end,
else => {
self.printError("Error parsing schema: Expected a struct name", &token);
state = .invalid;
},
}) switch (state) {
.expect_struct_name_OR_end => switch (token.tag) {
.identifier => {
state = .expect_l_paren;
struct_array.append(SchemaStruct.init(self.allocator, token.loc)) catch @panic("Error appending a struct name.");
},
.expect_l_paren => switch (token.tag) {
.l_paren => state = .expect_member_name,
else => {
self.printError("Error parsing schema: Expected (", &token);
state = .invalid;
},
.eof => state = .end,
else => {
self.printError("Error parsing schema: Expected a struct name", &token);
state = .invalid;
},
},
.expect_member_name_OR_r_paren => switch (token.tag) {
.identifier => {
state = .expect_member_name;
keep_next = true;
},
.r_paren => {
state = .expect_struct_name_OR_end;
index += 1;
},
else => {
self.printError("Error parsing schema: Expected member name or )", &token);
state = .invalid;
},
.expect_l_paren => switch (token.tag) {
.l_paren => state = .expect_member_name,
else => {
self.printError("Error parsing schema: Expected (", &token);
state = .invalid;
},
},
.expect_member_name => {
state = .expect_two_dot;
struct_array.items[index].members.append(token.loc) catch @panic("Error appending a member name.");
.expect_member_name_OR_r_paren => switch (token.tag) {
.identifier => {
state = .expect_member_name;
keep_next = true;
},
.expect_two_dot => switch (token.tag) {
.two_dot => state = .expect_value_type,
else => {
self.printError("Error parsing schema: Expected :", &token);
state = .invalid;
},
.r_paren => {
state = .expect_struct_name_OR_end;
index += 1;
},
.expect_value_type => switch (token.tag) {
.type_int => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.int) catch @panic("Error appending a type.");
},
.type_str => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.str) catch @panic("Error appending a type.");
},
.type_float => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.float) catch @panic("Error appending a type.");
},
.type_bool => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.bool) catch @panic("Error appending a type.");
},
.type_date => @panic("Date not yet implemented"),
.identifier => @panic("Link not yet implemented"),
.lr_bracket => state = .expext_array_type,
else => {
self.printError("Error parsing schema: Expected data type", &token);
state = .invalid;
},
else => {
self.printError("Error parsing schema: Expected member name or )", &token);
state = .invalid;
},
},
.expext_array_type => switch (token.tag) {
.type_int => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.int_array) catch @panic("Error appending a type.");
},
.type_str => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.str_array) catch @panic("Error appending a type.");
},
.type_float => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.float_array) catch @panic("Error appending a type.");
},
.type_bool => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.bool_array) catch @panic("Error appending a type.");
},
.type_date => {
self.printError("Error parsing schema: Data not yet implemented", &token);
state = .invalid;
},
.identifier => {
self.printError("Error parsing schema: Relationship not yet implemented", &token);
state = .invalid;
},
else => {
self.printError("Error parsing schema: Expected data type", &token);
state = .invalid;
},
.expect_member_name => {
state = .expect_two_dot;
struct_array.items[index].members.append(token.loc) catch @panic("Error appending a member name.");
},
.expect_two_dot => switch (token.tag) {
.two_dot => state = .expect_value_type,
else => {
self.printError("Error parsing schema: Expected :", &token);
state = .invalid;
},
},
.expect_comma => switch (token.tag) {
.comma => state = .expect_member_name_OR_r_paren,
else => {
self.printError("Error parsing schema: Expected ,", &token);
state = .invalid;
},
.expect_value_type => switch (token.tag) {
.type_int => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.int) catch @panic("Error appending a type.");
},
.type_str => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.str) catch @panic("Error appending a type.");
},
.type_float => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.float) catch @panic("Error appending a type.");
},
.type_bool => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.bool) catch @panic("Error appending a type.");
},
.type_date => @panic("Date not yet implemented"),
.identifier => @panic("Link not yet implemented"),
.lr_bracket => state = .expext_array_type,
else => {
self.printError("Error parsing schema: Expected data type", &token);
state = .invalid;
},
},
else => unreachable,
}
}
.expext_array_type => switch (token.tag) {
.type_int => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.int_array) catch @panic("Error appending a type.");
},
.type_str => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.str_array) catch @panic("Error appending a type.");
},
.type_float => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.float_array) catch @panic("Error appending a type.");
},
.type_bool => {
state = .expect_comma;
struct_array.items[index].types.append(DataType.bool_array) catch @panic("Error appending a type.");
},
.type_date => {
self.printError("Error parsing schema: Data not yet implemented", &token);
state = .invalid;
},
.identifier => {
self.printError("Error parsing schema: Relationship not yet implemented", &token);
state = .invalid;
},
else => {
self.printError("Error parsing schema: Expected data type", &token);
state = .invalid;
},
},
.expect_comma => switch (token.tag) {
.comma => state = .expect_member_name_OR_r_paren,
else => {
self.printError("Error parsing schema: Expected ,", &token);
state = .invalid;
},
},
else => unreachable,
};
// if invalid, empty the list
if (state == .invalid) {

View File

@ -123,9 +123,94 @@ pub const UUID = struct {
// Zero UUID
pub const zero: UUID = .{ .bytes = .{0} ** 16 };
// Convenience function to return a new v4 UUID.
pub fn newV4() UUID {
return UUID.init();
// TODO: Optimize both
pub fn OR(arr1: *std.ArrayList(UUID), arr2: *std.ArrayList(UUID)) !void {
for (0..arr2.items.len) |i| {
if (!containUUID(arr1.*, arr2.items[i])) {
try arr1.append(arr2.items[i]);
}
}
}
pub fn AND(arr1: *std.ArrayList(UUID), arr2: *std.ArrayList(UUID)) !void {
var i: usize = 0;
for (0..arr1.items.len) |_| {
if (!containUUID(arr2.*, arr1.items[i])) {
_ = arr1.orderedRemove(i);
} else {
i += 1;
}
}
}
test "OR & AND" {
const allocator = std.testing.allocator;
var right_arr = std.ArrayList(UUID).init(allocator);
defer right_arr.deinit();
try right_arr.append(try UUID.parse("00000000-0000-0000-0000-000000000000"));
try right_arr.append(try UUID.parse("00000000-0000-0000-0000-000000000001"));
try right_arr.append(try UUID.parse("00000000-0000-0000-0000-000000000005"));
try right_arr.append(try UUID.parse("00000000-0000-0000-0000-000000000006"));
try right_arr.append(try UUID.parse("00000000-0000-0000-0000-000000000007"));
var left_arr1 = std.ArrayList(UUID).init(allocator);
defer left_arr1.deinit();
try left_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000000"));
try left_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000001"));
try left_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000002"));
try left_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000003"));
try left_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000004"));
var expected_arr1 = std.ArrayList(UUID).init(allocator);
defer expected_arr1.deinit();
try expected_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000000"));
try expected_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000001"));
try AND(&left_arr1, &right_arr);
try std.testing.expect(compareUUIDArray(left_arr1, expected_arr1));
var left_arr2 = std.ArrayList(UUID).init(allocator);
defer left_arr2.deinit();
try left_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000000"));
try left_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000001"));
try left_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000002"));
try left_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000003"));
try left_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000004"));
var expected_arr2 = std.ArrayList(UUID).init(allocator);
defer expected_arr2.deinit();
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000000"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000001"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000002"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000003"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000004"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000005"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000006"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000007"));
try OR(&left_arr2, &right_arr);
try std.testing.expect(compareUUIDArray(left_arr2, expected_arr2));
}
fn containUUID(arr: std.ArrayList(UUID), value: UUID) bool {
return for (arr.items) |elem| {
if (value.compare(elem)) break true;
} else false;
}
fn compareUUIDArray(arr1: std.ArrayList(UUID), arr2: std.ArrayList(UUID)) bool {
if (arr1.items.len != arr2.items.len) {
std.debug.print("Not same array len when comparing UUID. arr1: {d} arr2: {d}\n", .{ arr1.items.len, arr2.items.len });
return false;
}
for (0..arr1.items.len) |i| {
if (!containUUID(arr2, arr1.items[i])) return false;
}
return true;
}
test "parse and format" {

View File

@ -1,14 +0,0 @@
const std = @import("std");
pub fn getEnvVariables(allocator: std.mem.Allocator, variable: []const u8) ?[]const u8 {
var env_map = std.process.getEnvMap(allocator) catch return null;
defer env_map.deinit();
var iter = env_map.iterator();
while (iter.next()) |entry| {
if (std.mem.eql(u8, entry.key_ptr.*, variable)) return allocator.dupe(u8, entry.value_ptr.*) catch return null;
}
return null;
}

File diff suppressed because it is too large Load Diff