Function to get a list of UUID based on a condition is working, need to finish the GRAB parser to then use AND / OR on left / right array

This commit is contained in:
Adrien Bouvais 2024-10-04 00:06:55 +02:00
parent c66bfe285a
commit 9b773588e7
14 changed files with 662 additions and 411 deletions

View File

@ -116,7 +116,7 @@ Zippon have it's own query language. Here the keys point to remember:
| GRAB User \| ASCENDING name \| | Get all users ordered by name |
| GRAB User [name] { age > 10 AND name != 'Adrien' } \| DECENDING age \| | Get just the name of all users that are more than 10 years old and not named Adrien |
| GRAB User [1] { bestfriend = { name = 'Adrien' } } | Get one user that has a best friend named Adrien |
| GRAB User [10; friends [1]] { age > 10 } | ASC name | | Get one friend of the 10th user above 10 years old in ascending name |
| GRAB User [10; friends [1]] { age > 10 } | Get one friend of the 10th user above 10 years old |
### Not yet implemented
| Command | Description |

6
TODO.md Normal file
View File

@ -0,0 +1,6 @@
[ ] Finish the filter parsing
[ ] Create a TEST_DATA dataset without any link
[ ] Do some test on filter using the TEST_DATA
[ ] Do some test on DataEngine to check if can add stuff to the TEST_DATA
[ ] Check memory leak of Parser
[ ] Do testing on parser

View File

@ -1,5 +1,5 @@
const std = @import("std");
const dtypes = @import("dtypes.zig");
const metadata = @import("metadata.zig");
const UUID = @import("uuid.zig").UUID;
const Tokenizer = @import("ziqlTokenizer.zig").Tokenizer;
const Token = @import("ziqlTokenizer.zig").Token;
@ -25,144 +25,56 @@ const stdout = std.io.getStdOut().writer();
/// If no file is found, a new one is created.
/// Take the main.zippondata file, the index of the file where the data is saved and the string to add at the end of the line
pub const Parser = struct {
arena: std.heap.ArenaAllocator,
allocator: Allocator,
toker: *Tokenizer,
data_engine: *DataEngine,
pub fn init(allocator: Allocator, toker: *Tokenizer, data_engine: *DataEngine) Parser {
var arena = std.heap.ArenaAllocator.init(allocator);
pub fn init(allocator: Allocator, toker: *Tokenizer) Parser {
return Parser{
.arena = arena,
.allocator = arena.allocator(),
.allocator = allocator,
.toker = toker,
.data_engine = data_engine,
};
}
pub fn deinit(self: *Parser) void {
self.arena.deinit();
}
pub fn parse(self: *Parser) !void {
var data_engine = DataEngine.init(self.allocator, null);
defer data_engine.deinit();
var struct_name_token = self.toker.next();
const struct_name = self.toker.getTokenSlice(struct_name_token);
if (!metadata.isStructNameExists(struct_name)) self.print_error("Struct not found in current schema", &struct_name_token);
pub fn parse(self: *Parser, struct_name: []const u8) !void {
var token = self.toker.next();
switch (token.tag) {
.l_paren => {},
else => {
try self.print_error("Error: Expected (", &token);
self.print_error("Error: Expected (", &token);
},
}
const buffer = try self.allocator.alloc(u8, 1024 * 100);
defer self.allocator.free(buffer);
var data_map = self.parseData(struct_name);
defer data_map.deinit();
var data = self.parseData(); // data is a map with key as member name and value as str of the value inserted in the query. So age = 12 is the string 12 here
defer data.deinit();
if (!self.checkIfAllMemberInMap(struct_name, &data)) return;
const entity = try dtypes.createEntityFromMap(self.allocator, struct_name, data);
const uuid_str = entity.User.*.id.format_uuid();
defer stdout.print("Added new {s} successfully using UUID: {s}\n", .{
struct_name,
uuid_str,
}) catch {};
const member_names = dtypes.structName2structMembers(struct_name);
for (member_names) |member_name| {
var file_map = self.data_engine.getFilesStat(struct_name, member_name) catch {
try stdout.print("Error: File stat error", .{});
return;
};
const potential_file_name_to_use = self.data_engine.getFirstUsableFile(file_map);
if (potential_file_name_to_use) |file_name| {
const file_index = self.data_engine.fileName2Index(file_name);
try stdout.print("Using file: {s} with a size of {d}\n", .{ file_name, file_map.get(file_name).?.size });
const path = try std.fmt.bufPrint(buffer, "ZipponDB/DATA/{s}/{s}/{s}", .{
struct_name,
member_name,
file_name,
});
var file = std.fs.cwd().openFile(path, .{
.mode = .read_write,
}) catch {
try stdout.print("Error opening data file.", .{});
return;
};
defer file.close();
try file.seekFromEnd(0);
try file.writer().print("{s} {s}\n", .{ uuid_str, data.get(member_name).? });
const path_to_main = try std.fmt.bufPrint(buffer, "ZipponDB/DATA/{s}/{s}/main.zippondata", .{
struct_name,
member_name,
});
var file_main = std.fs.cwd().openFile(path_to_main, .{
.mode = .read_write,
}) catch {
try stdout.print("Error opening data file.", .{});
return;
};
defer file_main.close();
try self.data_engine.appendToLineAtIndex(file_main, file_index, &uuid_str);
} else {
const max_index = self.data_engine.maxFileIndex(file_map);
const new_file_path = try std.fmt.bufPrint(buffer, "ZipponDB/DATA/{s}/{s}/{d}.zippondata", .{
struct_name,
member_name,
max_index + 1,
});
try stdout.print("new file path: {s}\n", .{new_file_path});
// TODO: Create new file and save the data inside
const new_file = std.fs.cwd().createFile(new_file_path, .{}) catch @panic("Error creating new data file");
defer new_file.close();
try new_file.writer().print("{s} {s}\n", .{ &uuid_str, data.get(member_name).? });
const path_to_main = try std.fmt.bufPrint(buffer, "ZipponDB/DATA/{s}/{s}/main.zippondata", .{
struct_name,
member_name,
});
var file_main = std.fs.cwd().openFile(path_to_main, .{
.mode = .read_write,
}) catch {
try stdout.print("Error opening data file.", .{});
@panic("");
};
defer file_main.close();
try file_main.seekFromEnd(0);
try file_main.writeAll("\n ");
try file_main.seekTo(0);
try self.data_engine.appendToLineAtIndex(file_main, max_index + 1, &uuid_str);
}
}
if (self.checkIfAllMemberInMap(struct_name, &data_map)) {
try data_engine.writeEntity(struct_name, data_map);
} else |_| {}
}
/// Take the tokenizer and return a map of the query for the ADD command.
/// Keys are the member name and value are the string of the value in the query. E.g. 'Adrien' or '10'
pub fn parseData(self: *Parser) std.StringHashMap([]const u8) {
/// TODO: Make it clean using a State like other parser
pub fn parseData(self: *Parser, struct_name: []const u8) std.StringHashMap([]const u8) {
var token = self.toker.next();
var member_map = std.StringHashMap([]const u8).init(
self.allocator,
);
var member_map = std.StringHashMap([]const u8).init(self.allocator);
while (token.tag != Token.Tag.eof) : (token = self.toker.next()) {
switch (token.tag) {
.r_paren => continue,
.identifier => {
const member_name_str = self.toker.getTokenSlice(token);
if (!metadata.isMemberNameInStruct(struct_name, member_name_str)) self.print_error("Member not found in struct.", &token);
token = self.toker.next();
switch (token.tag) {
.equal => {
@ -170,22 +82,23 @@ pub const Parser = struct {
switch (token.tag) {
.string_literal, .number_literal => {
const value_str = self.toker.getTokenSlice(token);
member_map.put(member_name_str, value_str) catch @panic("Could not add member name and value to map in getMapOfMember");
member_map.put(member_name_str, value_str) catch self.print_error("Could not add member name and value to map in getMapOfMember", &token);
token = self.toker.next();
switch (token.tag) {
.comma, .r_paren => continue,
else => self.print_error("Error: Expected , after string or number. E.g. ADD User (name='bob', age=10)", &token) catch {},
else => self.print_error("Error: Expected , after string or number. E.g. ADD User (name='bob', age=10)", &token),
}
},
.keyword_null => {
const value_str = "null";
member_map.put(member_name_str, value_str) catch self.print_error("Error: 001", &token) catch {};
member_map.put(member_name_str, value_str) catch self.print_error("Error: 001", &token);
token = self.toker.next();
switch (token.tag) {
.comma, .r_paren => continue,
else => self.print_error("Error: Expected , after string or number. E.g. ADD User (name='bob', age=10)", &token) catch {},
else => self.print_error("Error: Expected , after string or number. E.g. ADD User (name='bob', age=10)", &token),
}
},
// Create a tag to prevent creating an array then join them. Instead just read the buffer from [ to ] in the tekenizer itself
.l_bracket => {
var array_values = std.ArrayList([]const u8).init(self.allocator);
token = self.toker.next();
@ -193,60 +106,78 @@ pub const Parser = struct {
switch (token.tag) {
.string_literal, .number_literal => {
const value_str = self.toker.getTokenSlice(token);
array_values.append(value_str) catch @panic("Could not add value to array in getMapOfMember");
array_values.append(value_str) catch self.print_error("Could not add value to array in getMapOfMember", &token);
},
else => self.print_error("Error: Expected string or number in array. E.g. ADD User (scores=[10 20 30])", &token) catch {},
else => self.print_error("Error: Expected string or number in array. E.g. ADD User (scores=[10 20 30])", &token),
}
}
// Maybe change that as it just recreate a string that is already in the buffer
const array_str = std.mem.join(self.allocator, " ", array_values.items) catch @panic("Couln't join the value of array");
member_map.put(member_name_str, array_str) catch @panic("Could not add member name and value to map in getMapOfMember");
},
else => self.print_error("Error: Expected string or number after =. E.g. ADD User (name='bob')", &token) catch {},
const array_str = std.mem.join(self.allocator, " ", array_values.items) catch {
self.print_error("Couln't join the value of array", &token);
@panic("=)");
};
member_map.put(member_name_str, array_str) catch self.print_error("Could not add member name and value to map in getMapOfMember", &token);
token = self.toker.next();
switch (token.tag) {
.comma, .r_paren => continue,
else => self.print_error("Error: Expected , after string or number. E.g. ADD User (name='bob', age=10)", &token),
}
},
else => self.print_error("Error: Expected = after a member declaration. E.g. ADD User (name='bob')", &token) catch {},
else => self.print_error("Error: Expected string or number after =. E.g. ADD User (name='bob')", &token),
}
},
else => self.print_error("Error: Unknow token. This should be the name of a member. E.g. name in ADD User (name='bob')", &token) catch {},
else => self.print_error("Error: Expected = after a member declaration. E.g. ADD User (name='bob')", &token),
}
},
else => self.print_error("Error: Unknow token. This should be the name of a member. E.g. name in ADD User (name='bob')", &token),
}
}
return member_map;
}
fn checkIfAllMemberInMap(_: *Parser, struct_name: []const u8, map: *std.StringHashMap([]const u8)) bool {
const all_struct_member = dtypes.structName2structMembers(struct_name);
const AddError = error{NotAllMemberInMap};
fn checkIfAllMemberInMap(_: *Parser, struct_name: []const u8, map: *std.StringHashMap([]const u8)) !void {
const all_struct_member = metadata.structName2structMembers(struct_name);
var count: u16 = 0;
var started_printing = false;
for (all_struct_member) |key| {
if (map.contains(key)) count += 1 else stdout.print("Error: ADD query of struct: {s}; missing member: {s}\n", .{
struct_name,
key,
}) catch {};
if (map.contains(key)) count += 1 else {
if (!started_printing) {
try stdout.print("Error: ADD query of struct: {s}; missing member: {s}", .{ struct_name, key });
started_printing = true;
} else {
try stdout.print(" {s}", .{key});
}
}
}
return ((count == all_struct_member.len) and (count == map.count()));
if (started_printing) try stdout.print("\n", .{});
if (!((count == all_struct_member.len) and (count == map.count()))) return error.NotAllMemberInMap;
}
fn print_error(self: *Parser, message: []const u8, token: *Token) !void {
try stdout.print("\n", .{});
try stdout.print("{s}\n", .{self.toker.buffer});
fn print_error(self: *Parser, message: []const u8, token: *Token) void {
stdout.print("\n", .{}) catch {};
stdout.print("{s}\n", .{self.toker.buffer}) catch {};
// Calculate the number of spaces needed to reach the start position.
var spaces: usize = 0;
while (spaces < token.loc.start) : (spaces += 1) {
try stdout.print(" ", .{});
stdout.print(" ", .{}) catch {};
}
// Print the '^' characters for the error span.
var i: usize = token.loc.start;
while (i < token.loc.end) : (i += 1) {
try stdout.print("^", .{});
stdout.print("^", .{}) catch {};
}
try stdout.print(" \n", .{}); // Align with the message
stdout.print(" \n", .{}) catch {}; // Align with the message
try stdout.print("{s}\n", .{message});
stdout.print("{s}\n", .{message}) catch {};
@panic("");
}

View File

@ -1,4 +1,5 @@
const std = @import("std");
const metadata = @import("metadata.zig");
const Allocator = std.mem.Allocator;
const Tokenizer = @import("ziqlTokenizer.zig").Tokenizer;
const Token = @import("ziqlTokenizer.zig").Token;
@ -20,18 +21,16 @@ pub const Parser = struct {
arena: std.heap.ArenaAllocator,
allocator: Allocator,
toker: *Tokenizer,
data_engine: *DataEngine,
state: State,
additional_data: AdditionalData,
pub fn init(allocator: Allocator, toker: *Tokenizer, data_engine: *DataEngine) Parser {
pub fn init(allocator: Allocator, toker: *Tokenizer) Parser {
var arena = std.heap.ArenaAllocator.init(allocator);
return Parser{
.arena = arena,
.allocator = arena.allocator(),
.toker = toker,
.data_engine = data_engine,
.state = State.start,
.additional_data = AdditionalData.init(allocator),
};
@ -52,8 +51,9 @@ pub const Parser = struct {
}
pub fn deinit(self: *AdditionalData) void {
for (self.member_to_find.items) |elem| {
elem.additional_data.deinit();
for (0..self.member_to_find.items.len) |i| {
std.debug.print("{d}\n", .{i});
self.member_to_find.items[i].additional_data.deinit();
}
self.member_to_find.deinit();
@ -77,32 +77,84 @@ pub const Parser = struct {
invalid,
end,
// For the additional data
// For the main parse function
expect_filter,
// For the additional data parser
expect_count_of_entity_to_find,
expect_semicolon_OR_right_bracket,
expect_member,
next_member_OR_end_OR_new_additional_data,
next_member_OR_end,
expect_filter,
// For the filter parser
expect_condition,
};
pub fn parse(self: *Parser) !void {
var data_engine = DataEngine.init(self.allocator, null);
defer data_engine.deinit();
var struct_name_token = self.toker.next();
if (!self.isStructInSchema(self.toker.getTokenSlice(struct_name_token))) {
try self.printError("Error: Struct name not in current shema.", &struct_name_token);
return;
}
var token = self.toker.next();
while (self.state != State.end) : (token = self.toker.next()) {
var keep_next = false;
while (self.state != State.end) : ({
token = if (!keep_next) self.toker.next() else token;
keep_next = false;
}) {
switch (self.state) {
.start => {
switch (token.tag) {
.l_bracket => {
try self.parse_additional_data(&self.additional_data);
try self.parseAdditionalData(&self.additional_data);
self.state = State.expect_filter;
},
.l_brace => {
self.state = State.expect_filter;
keep_next = true;
},
else => {
try stdout.print("Found {any}\n", .{token.tag});
try self.printError("Error: Expected filter starting with {} or what to return starting with []", &token);
return;
},
}
},
.expect_filter => {
var array = std.ArrayList(UUID).init(self.allocator);
try self.parseFilter(&array, struct_name_token);
self.state = State.end;
},
else => return,
}
}
}
fn parseFilter(self: *Parser, left_array: *std.ArrayList(UUID), struct_name_token: Token) !void {
const right_array = std.ArrayList(UUID).init(self.allocator);
var token = self.toker.next();
var keep_next = false;
self.state = State.expect_member;
_ = right_array;
_ = left_array;
while (self.state != State.end) : ({
token = if (!keep_next) self.toker.next() else token;
keep_next = false;
}) {
switch (self.state) {
.expect_member => {
if (!self.isMemberPartOfStruct(self.toker.getTokenSlice(struct_name_token), self.toker.getTokenSlice(token))) {
try self.printError("Error: Member not part of struct.", &token);
}
self.state = State.expect_condition;
},
else => return,
}
}
@ -110,24 +162,21 @@ pub const Parser = struct {
/// When this function is call, the tokenizer last token retrieved should be [.
/// Check if an int is here -> check if ; is here -> check if member is here -> check if [ is here -> loop
pub fn parse_additional_data(self: *Parser, additional_data: *AdditionalData) !void {
pub fn parseAdditionalData(self: *Parser, additional_data: *AdditionalData) !void {
var token = self.toker.next();
var skip_next = false;
var keep_next = false;
self.state = State.expect_count_of_entity_to_find;
while (self.state != State.end) : ({
token = if (!skip_next) self.toker.next() else token;
skip_next = false;
token = if (!keep_next) self.toker.next() else token;
keep_next = false;
}) {
switch (self.state) {
.expect_count_of_entity_to_find => {
switch (token.tag) {
.number_literal => {
const count = std.fmt.parseInt(usize, self.toker.getTokenSlice(token), 10) catch {
try stdout.print(
"Error parsing query: {s} need to be a number.",
.{self.toker.getTokenSlice(token)},
);
try self.printError("Error while transforming this into a integer.", &token);
self.state = .invalid;
continue;
};
@ -136,7 +185,7 @@ pub const Parser = struct {
},
else => {
self.state = .expect_member;
skip_next = true;
keep_next = true;
},
}
},
@ -149,7 +198,7 @@ pub const Parser = struct {
return;
},
else => {
try self.print_error(
try self.printError(
"Error: Expect ';' or ']'.",
&token,
);
@ -171,7 +220,7 @@ pub const Parser = struct {
self.state = .next_member_OR_end_OR_new_additional_data;
},
else => {
try self.print_error(
try self.printError(
"Error: A member name should be here.",
&token,
);
@ -187,13 +236,13 @@ pub const Parser = struct {
return;
},
.l_bracket => {
try self.parse_additional_data(
try self.parseAdditionalData(
&additional_data.member_to_find.items[additional_data.member_to_find.items.len - 1].additional_data,
);
self.state = .next_member_OR_end;
},
else => {
try self.print_error(
try self.printError(
"Error: Expected a comma ',' or the end or a new list of member to return.",
&token,
);
@ -203,15 +252,14 @@ pub const Parser = struct {
.next_member_OR_end => {
switch (token.tag) {
.comma => {
try stdout.print("Expected new member\n", .{});
self.state = .expect_member;
},
.r_bracket => {
return;
},
else => {
try self.print_error(
"Error: Expected a new member name or the end of the list of member name to return.",
try self.printError(
"Error: Expected a comma or the end of the list of member name to return.",
&token,
);
},
@ -221,7 +269,7 @@ pub const Parser = struct {
@panic("=)");
},
else => {
try self.print_error(
try self.printError(
"Error: Unknow state.",
&token,
);
@ -230,7 +278,7 @@ pub const Parser = struct {
}
}
fn print_error(self: *Parser, message: []const u8, token: *Token) !void {
fn printError(self: *Parser, message: []const u8, token: *Token) !void {
try stdout.print("\n", .{});
try stdout.print("{s}\n", .{self.toker.buffer});
@ -251,6 +299,27 @@ pub const Parser = struct {
@panic("");
}
/// Take a struct name and a member name and return true if the member name is part of the struct
fn isMemberPartOfStruct(_: *Parser, struct_name: []const u8, member_name: []const u8) bool {
const all_struct_member = metadata.structName2structMembers(struct_name);
for (all_struct_member) |key| {
if (std.mem.eql(u8, key, member_name)) return true;
}
return false;
}
/// Check if a string is a name of a struct in the currently use engine
fn isStructInSchema(_: *Parser, struct_name_to_check: []const u8) bool {
for (metadata.struct_name_list) |struct_name| {
if (std.mem.eql(u8, struct_name_to_check, struct_name)) {
return true;
}
}
return false;
}
};
// TODO: Optimize. Maybe just do a new list and return it instead
@ -292,7 +361,7 @@ test "Test AdditionalData" {
testAdditionalData("[1]", additional_data1);
var additional_data2 = Parser.AdditionalData.init(allocator);
defer additional_data2.member_to_find.deinit();
defer additional_data2.deinit();
try additional_data2.member_to_find.append(
Parser.AdditionalDataMember.init(
allocator,
@ -303,7 +372,7 @@ test "Test AdditionalData" {
var additional_data3 = Parser.AdditionalData.init(allocator);
additional_data3.entity_count_to_find = 1;
defer additional_data3.member_to_find.deinit();
defer additional_data3.deinit();
try additional_data3.member_to_find.append(
Parser.AdditionalDataMember.init(
allocator,
@ -314,7 +383,7 @@ test "Test AdditionalData" {
var additional_data4 = Parser.AdditionalData.init(allocator);
additional_data4.entity_count_to_find = 100;
defer additional_data4.member_to_find.deinit();
defer additional_data4.deinit();
try additional_data4.member_to_find.append(
Parser.AdditionalDataMember.init(
allocator,

View File

@ -19,6 +19,48 @@ pub fn parseArrayInt(allocator: std.mem.Allocator, array_str: []const u8) std.Ar
return array;
}
pub fn parseFloat(value_str: []const u8) f64 {
return std.fmt.parseFloat(f64, value_str) catch return 0;
}
pub fn parseArrayFloat(allocator: std.mem.Allocator, array_str: []const u8) std.ArrayList(f64) {
var array = std.ArrayList(f64).init(allocator);
var it = std.mem.splitAny(u8, array_str[1 .. array_str.len - 1], " ");
while (it.next()) |x| {
array.append(parseFloat(x)) catch {};
}
return array;
}
pub fn parseBool(value_str: []const u8) bool {
return (value_str[0] != '0');
}
pub fn parseArrayBool(allocator: std.mem.Allocator, array_str: []const u8) std.ArrayList(bool) {
var array = std.ArrayList(bool).init(allocator);
var it = std.mem.splitAny(u8, array_str[1 .. array_str.len - 1], " ");
while (it.next()) |x| {
array.append(parseBool(x)) catch {};
}
return array;
}
pub fn parseArrayStr(allocator: std.mem.Allocator, array_str: []const u8) std.ArrayList([]const u8) {
var array = std.ArrayList([]const u8).init(allocator);
var it = std.mem.splitAny(u8, array_str[1 .. array_str.len - 1], " ");
while (it.next()) |x| {
const x_copy = allocator.dupe(u8, x) catch @panic("=(");
array.append(x_copy) catch {};
}
return array;
}
test "Data parsing" {
const allocator = std.testing.allocator;
@ -35,4 +77,32 @@ test "Data parsing" {
defer out2.deinit();
const expected_out2: [5]i64 = .{ 1, 14, 44, 42, 0 };
try std.testing.expect(std.mem.eql(i64, out2.items, &expected_out2));
// Float
const in3: [3][]const u8 = .{ "1.3", "65.991", "Hello" };
const expected_out3: [3]f64 = .{ 1.3, 65.991, 0 };
for (in3, 0..) |value, i| {
try std.testing.expect(parseFloat(value) == expected_out3[i]);
}
// Int array
const in4 = "[1.5 14.3 44.9999 42 hello]";
const out4 = parseArrayFloat(allocator, in4);
defer out4.deinit();
const expected_out4: [5]f64 = .{ 1.5, 14.3, 44.9999, 42, 0 };
try std.testing.expect(std.mem.eql(f64, out4.items, &expected_out4));
// Float
const in5: [3][]const u8 = .{ "1", "Hello", "0" };
const expected_out5: [3]bool = .{ true, true, false };
for (in5, 0..) |value, i| {
try std.testing.expect(parseBool(value) == expected_out5[i]);
}
// Int array
const in6 = "[1 0 0 1 1]";
const out6 = parseArrayBool(allocator, in6);
defer out6.deinit();
const expected_out6: [5]bool = .{ true, false, false, true, true };
try std.testing.expect(std.mem.eql(bool, out6.items, &expected_out6));
}

View File

@ -1,42 +1,286 @@
const std = @import("std");
const dataParsing = @import("data-parsing.zig");
const metadata = @import("metadata.zig");
const Allocator = std.mem.Allocator;
const Tokenizer = @import("ziqlTokenizer.zig").Tokenizer;
const UUID = @import("uuid.zig").UUID;
const stdout = std.io.getStdOut().writer();
/// Manage everything that is relate to read or write in files
/// Or even get stats, whatever. If it touch files, it's here
pub const DataEngine = struct {
arena: std.heap.ArenaAllocator,
allocator: Allocator,
dir: std.fs.Dir, // The path to the DATA folder
max_file_size: usize = 1e+8, // 100mb
//
const DataEngineError = error{
ErrorCreateDataFolder,
ErrorCreateStructFolder,
ErrorCreateMemberFolder,
ErrorCreateMainFile,
ErrorCreateDataFile,
};
pub fn init(allocator: Allocator) DataEngine {
var arena = std.heap.ArenaAllocator.init(allocator);
const dir = std.fs.cwd().openDir("ZipponDB/DATA", .{}) catch @panic("Error opening ZipponDB/DATA");
/// Suported operation for the filter
/// TODO: Add more operation, like IN for array and LIKE for regex
const Operation = enum {
equal,
different,
superior,
superior_or_equal,
inferior,
inferior_or_equal,
};
/// Suported dataType for the DB
const DataType = enum {
int,
float,
str,
bool_,
int_array,
float_array,
str_array,
bool_array,
};
const ComparisonValue = union {
int: i64,
float: f64,
str: []const u8,
bool_: bool,
int_array: std.ArrayList(i64),
str_array: std.ArrayList([]const u8),
float_array: std.ArrayList(f64),
bool_array: std.ArrayList(bool),
};
/// use to parse file. It take a struct name and member name to know what to parse.
/// An Operation from equal, different, superior, superior_or_equal, ...
/// The DataType from int, float and str
/// TODO: Change the value to be the right type and not just a string all the time
const Condition = struct {
struct_name: []const u8,
member_name: []const u8,
value: []const u8,
operation: Operation,
data_type: DataType,
};
pub fn init(allocator: Allocator, DATA_path: ?[]const u8) DataEngine {
const path = DATA_path orelse "ZipponDB/DATA";
const dir = std.fs.cwd().openDir(path, .{}) catch @panic("Error opening ZipponDB/DATA");
return DataEngine{
.arena = arena,
.allocator = arena.allocator(),
.allocator = allocator,
.dir = dir,
};
}
pub fn deinit(self: *DataEngine) void {
self.arena.deinit();
self.dir.close();
}
/// Iter over all file and get the max name and return the value of it as usize
/// So for example if there is 1.zippondata and 2.zippondata it return 2.
fn maxFileIndex(_: *DataEngine, map: *std.StringHashMap(std.fs.File.Stat)) usize {
var iter = map.keyIterator();
var index_max: usize = 0;
while (iter.next()) |key| {
if (std.mem.eql(u8, key.*, "main.zippondata")) continue;
var iter_file_name = std.mem.tokenize(u8, key.*, ".");
const num_str = iter_file_name.next().?;
const num: usize = std.fmt.parseInt(usize, num_str, 10) catch @panic("Error parsing file name into usize");
if (num > index_max) index_max = num;
/// Take a condition and an array of UUID and fill the array with all UUID that match the condition
pub fn getUUIDListUsingCondition(self: *DataEngine, condition: Condition, uuid_array: *std.ArrayList(UUID)) !void {
const file_names = self.getFilesNames(condition.struct_name, condition.member_name) catch @panic("Can't get list of files");
defer self.deinitFilesNames(&file_names);
const sub_path = std.fmt.allocPrint(
self.allocator,
"{s}/{s}/{s}",
.{ condition.struct_name, condition.member_name, file_names.items[0] },
) catch @panic("Can't create sub_path for init a DataIterator");
defer self.allocator.free(sub_path);
var file = self.dir.openFile(sub_path, .{}) catch @panic("Can't open first file to init a data iterator");
// defer self.allocator.free(sub_path);
var output: [1024 * 50]u8 = undefined; // Maybe need to increase that as it limit the size of a line in files
var output_fbs = std.io.fixedBufferStream(&output);
const writer = output_fbs.writer();
var buffered = std.io.bufferedReader(file.reader());
var reader = buffered.reader();
var file_index: usize = 0;
var compare_value: ComparisonValue = undefined;
switch (condition.data_type) {
.int => compare_value = ComparisonValue{ .int = dataParsing.parseInt(condition.value) },
.str => compare_value = ComparisonValue{ .str = condition.value },
.float => compare_value = ComparisonValue{ .float = dataParsing.parseFloat(condition.value) },
.bool_ => compare_value = ComparisonValue{ .bool_ = dataParsing.parseBool(condition.value) },
.int_array => compare_value = ComparisonValue{ .int_array = dataParsing.parseArrayInt(self.allocator, condition.value) },
.str_array => compare_value = ComparisonValue{ .str_array = dataParsing.parseArrayStr(self.allocator, condition.value) },
.float_array => compare_value = ComparisonValue{ .float_array = dataParsing.parseArrayFloat(self.allocator, condition.value) },
.bool_array => compare_value = ComparisonValue{ .bool_array = dataParsing.parseArrayBool(self.allocator, condition.value) },
}
while (true) {
output_fbs.reset();
reader.streamUntilDelimiter(writer, '\n', null) catch |err| switch (err) {
error.EndOfStream => {
output_fbs.reset(); // clear buffer before exit
file_index += 1;
if (file_index == file_names.items.len) break;
// TODO: Update the file and reader to be the next file of the list
break;
}, // file read till the end
else => {
std.debug.print("Error while reading file: {any}\n", .{err});
break;
},
};
// TODO: Maybe put that directly inside the union type like a compare function
// Can also do the switch directly on the compare_value
// TODO: Add error for wrong condition like superior between 2 string or array
switch (condition.operation) {
.equal => {
switch (condition.data_type) {
.int => if (compare_value.int == dataParsing.parseInt(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
.float => if (compare_value.float == dataParsing.parseFloat(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
.str => if (std.mem.eql(u8, compare_value.str, output_fbs.getWritten()[38 .. output_fbs.getWritten().len - 1])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
.bool_ => if (compare_value.bool_ == dataParsing.parseBool(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
// TODO: Implement for array too
else => {},
}
},
.different => {
switch (condition.data_type) {
.int => if (compare_value.int != dataParsing.parseInt(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
.float => if (compare_value.float != dataParsing.parseFloat(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
.str => if (!std.mem.eql(u8, compare_value.str, output_fbs.getWritten()[38 .. output_fbs.getWritten().len - 1])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
.bool_ => if (compare_value.bool_ != dataParsing.parseBool(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
// TODO: Implement for array too
else => {},
}
},
.superior_or_equal => {
switch (condition.data_type) {
.int => if (compare_value.int <= dataParsing.parseInt(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
.float => if (compare_value.float <= dataParsing.parseFloat(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
// TODO: Implement for array too
else => {},
}
},
.superior => {
switch (condition.data_type) {
.int => if (compare_value.int < dataParsing.parseInt(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
.float => if (compare_value.float < dataParsing.parseFloat(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
// TODO: Implement for array too
else => {},
}
},
.inferior_or_equal => {
switch (condition.data_type) {
.int => if (compare_value.int >= dataParsing.parseInt(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
.float => if (compare_value.float >= dataParsing.parseFloat(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
// TODO: Implement for array too
else => {},
}
},
.inferior => {
switch (condition.data_type) {
.int => if (compare_value.int > dataParsing.parseInt(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
.float => if (compare_value.float > dataParsing.parseFloat(output_fbs.getWritten()[37..])) try uuid_array.append(try UUID.parse(output_fbs.getWritten()[0..36])),
// TODO: Implement for array too
else => {},
}
},
}
}
}
// TODO: Test leak on that
pub fn writeEntity(self: *DataEngine, struct_name: []const u8, data_map: std.StringHashMap([]const u8)) !void {
const uuid_str = UUID.init().format_uuid();
defer stdout.print("Added new {s} successfully using UUID: {s}\n", .{
struct_name,
uuid_str,
}) catch {};
const member_names = metadata.structName2structMembers(struct_name);
for (member_names) |member_name| {
const potential_file_name_to_use = try self.getFirstUsableFile(struct_name, member_name);
if (potential_file_name_to_use) |file_name| {
defer self.allocator.free(file_name);
const file_index = self.fileName2Index(file_name);
const path = try std.fmt.allocPrint(self.allocator, "{s}/{s}/{s}", .{
struct_name,
member_name,
file_name,
});
defer self.allocator.free(path);
var file = self.dir.openFile(path, .{
.mode = .read_write,
}) catch {
try stdout.print("Error opening data file.", .{});
return;
};
defer file.close();
try file.seekFromEnd(0);
try file.writer().print("{s} {s}\n", .{ uuid_str, data_map.get(member_name).? });
const path_to_main = try std.fmt.allocPrint(self.allocator, "{s}/{s}/main.zippondata", .{
struct_name,
member_name,
});
defer self.allocator.free(path_to_main);
var file_main = self.dir.openFile(path_to_main, .{
.mode = .read_write,
}) catch {
try stdout.print("Error opening data file.", .{});
return;
};
defer file_main.close();
try self.addUUIDToMainFile(file_main, file_index + 1, &uuid_str);
} else {
const max_index = try self.maxFileIndex(struct_name, member_name);
const new_file_path = try std.fmt.allocPrint(self.allocator, "{s}/{s}/{d}.zippondata", .{
struct_name,
member_name,
max_index + 1,
});
try stdout.print("new file path: {s}\n", .{new_file_path});
// TODO: Create new file and save the data inside
const new_file = self.dir.createFile(new_file_path, .{}) catch @panic("Error creating new data file");
defer new_file.close();
try new_file.writer().print("{s} {s}\n", .{ &uuid_str, data_map.get(member_name).? });
const path_to_main = try std.fmt.allocPrint(self.allocator, "ZipponDB/DATA/{s}/{s}/main.zippondata", .{
struct_name,
member_name,
});
defer self.allocator.free(path_to_main);
var file_main = self.dir.openFile(path_to_main, .{
.mode = .read_write,
}) catch {
try stdout.print("Error opening data file.", .{});
@panic("");
};
defer file_main.close();
try file_main.seekFromEnd(0);
try file_main.writeAll("\n ");
try file_main.seekTo(0);
try self.addUUIDToMainFile(file_main, max_index + 1, &uuid_str);
}
}
return index_max;
}
/// Use a filename in the format 1.zippondata and return the 1
@ -49,55 +293,135 @@ pub const DataEngine = struct {
/// Add an UUID at a specific index of a file
/// Used when some data are deleted from previous zippondata files and are now bellow the file size limit
fn appendToLineAtIndex(self: *DataEngine, file: std.fs.File, index: usize, str: []const u8) !void {
const buffer = try self.allocator.alloc(u8, 1024 * 100);
defer self.allocator.free(buffer);
fn addUUIDToMainFile(_: *DataEngine, file: std.fs.File, index: usize, uuid_str: []const u8) !void {
var output: [1024 * 50]u8 = undefined; // Maybe need to increase that as it limit the size of a line in files
var output_fbs = std.io.fixedBufferStream(&output);
const writer = output_fbs.writer();
var reader = file.reader();
var line_num: usize = 1;
while (try reader.readUntilDelimiterOrEof(buffer, '\n')) |_| {
while (true) {
output_fbs.reset();
reader.streamUntilDelimiter(writer, '\n', null) catch |err| switch (err) { // Maybe do a better error handeling. Because if an error happend here, data are already written in files but not in main
error.EndOfStream => {
output_fbs.reset(); // clear buffer before exit
break;
}, // file read till the end
else => break,
};
if (line_num == index) {
try file.seekBy(-1);
try file.writer().print("{s} ", .{str});
try file.writer().print("{s} ", .{uuid_str});
return;
}
line_num += 1;
}
}
/// Return a map of file path => Stat; for one struct and member name
/// E.g. for User & name
fn getFilesStat(self: *DataEngine, struct_name: []const u8, member_name: []const u8) !*std.StringHashMap(std.fs.File.Stat) {
const buffer = try self.allocator.alloc(u8, 1024); // Adjust the size as needed
defer self.allocator.free(buffer);
fn getFilesNames(self: *DataEngine, struct_name: []const u8, member_name: []const u8) !std.ArrayList([]const u8) {
const sub_path = try std.fmt.allocPrint(self.allocator, "{s}/{s}", .{ struct_name, member_name });
const path = try std.fmt.bufPrint(buffer, "{s}{s}/{s}", .{ self.path.basename(), struct_name, member_name });
var file_names = std.ArrayList([]const u8).init(self.allocator);
var file_map = std.StringHashMap(std.fs.File.Stat).init(self.allocator);
const member_dir = self.path.openDir(path, .{ .iterate = true }) catch @panic("Error opening struct directory");
const member_dir = self.dir.openDir(sub_path, .{ .iterate = true }) catch @panic("Error opening member directory");
defer self.allocator.free(sub_path);
var iter = member_dir.iterate();
while (try iter.next()) |entry| {
if (entry.kind != std.fs.Dir.Entry.Kind.file) continue;
const file_stat = member_dir.statFile(entry.name) catch @panic("Error getting stat of a file");
file_map.put(entry.name, file_stat) catch @panic("Error adding stat to map");
if ((entry.kind != std.fs.Dir.Entry.Kind.file) or (std.mem.eql(u8, "main.zippondata", entry.name))) continue;
try file_names.append(try self.allocator.dupe(u8, entry.name));
}
return &file_map;
return file_names;
}
fn deinitFilesNames(self: *DataEngine, array: *const std.ArrayList([]const u8)) void {
for (array.items) |elem| {
self.allocator.free(elem);
}
array.deinit();
}
/// Use the map of file stat to find the first file with under the bytes limit.
/// return the name of the file. If none is found, return null.
fn getFirstUsableFile(self: *DataEngine, map: *std.StringHashMap(std.fs.File.Stat)) ?[]const u8 {
var iter = map.keyIterator();
while (iter.next()) |key| {
if (std.mem.eql(u8, key.*, "main.zippondata")) continue;
if (map.get(key.*).?.size < self.max_file_size) return key.*;
fn getFirstUsableFile(self: *DataEngine, struct_name: []const u8, member_name: []const u8) !?[]const u8 {
const sub_path = try std.fmt.allocPrint(self.allocator, "{s}/{s}", .{ struct_name, member_name });
defer self.allocator.free(sub_path);
var member_dir = try self.dir.openDir(sub_path, .{ .iterate = true });
defer member_dir.close();
var iter = member_dir.iterate();
while (try iter.next()) |entry| {
if ((entry.kind != std.fs.Dir.Entry.Kind.file) or (std.mem.eql(u8, "main.zippondata", entry.name))) continue;
const file_stat = try member_dir.statFile(entry.name);
if (file_stat.size < self.max_file_size) return try self.allocator.dupe(u8, entry.name);
}
return null;
}
/// Iter over all file and get the max name and return the value of it as usize
/// So for example if there is 1.zippondata and 2.zippondata it return 2.
fn maxFileIndex(self: *DataEngine, struct_name: []const u8, member_name: []const u8) !usize {
const buffer = try self.allocator.alloc(u8, 1024); // Adjust the size as needed
defer self.allocator.free(buffer);
const sub_path = try std.fmt.bufPrint(buffer, "{s}/{s}", .{ struct_name, member_name });
const member_dir = try self.dir.openDir(sub_path, .{ .iterate = true });
var count: usize = 0;
var iter = member_dir.iterate();
while (try iter.next()) |entry| {
if ((entry.kind != std.fs.Dir.Entry.Kind.file) or (std.mem.eql(u8, "main.zippondata", entry.name))) continue;
count += 1;
}
return count;
}
// TODO: Give the option to keep , dump or erase the data
pub fn initDataFolder(self: *DataEngine) !void {
for (metadata.struct_name_list) |struct_name| {
self.dir.makeDir(struct_name) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => return DataEngineError.ErrorCreateStructFolder,
};
const struct_dir = try self.dir.openDir(struct_name, .{});
const member_names = metadata.structName2structMembers(struct_name);
for (member_names) |member_name| {
struct_dir.makeDir(member_name) catch |err| switch (err) {
error.PathAlreadyExists => continue,
else => return DataEngineError.ErrorCreateMemberFolder,
};
const member_dir = try struct_dir.openDir(member_name, .{});
blk: {
const file = member_dir.createFile("main.zippondata", .{}) catch |err| switch (err) {
error.PathAlreadyExists => break :blk,
else => return DataEngineError.ErrorCreateMainFile,
};
try file.writeAll("\n");
}
_ = member_dir.createFile("0.zippondata", .{}) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => return DataEngineError.ErrorCreateDataFile,
};
}
}
}
};
test "File iterator" {
const allocator = std.testing.allocator;
var data_engine = DataEngine.init(allocator, null);
var uuid_array = std.ArrayList(UUID).init(allocator);
defer uuid_array.deinit();
const condition = DataEngine.Condition{ .struct_name = "User", .member_name = "email", .value = "adrien@mail.com", .operation = .equal, .data_type = .str };
try data_engine.getUUIDListUsingCondition(condition, &uuid_array);
std.debug.print("Found {d} uuid with first as {any}\n\n", .{ uuid_array.items.len, uuid_array.items[0] });
}

View File

@ -152,37 +152,7 @@ fn buildEngine() !void {
try child.spawn();
_ = try child.wait();
const dtypes = @import("dtypes.zig");
const data_dir = try std.fs.cwd().openDir("ZipponDB/DATA", .{});
for (dtypes.struct_name_list) |struct_name| {
data_dir.makeDir(struct_name) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => @panic("Error other than path already exists when trying to create a struct directory.\n"),
};
const struct_dir = try data_dir.openDir(struct_name, .{});
const member_names = dtypes.structName2structMembers(struct_name);
for (member_names) |member_name| {
struct_dir.makeDir(member_name) catch |err| switch (err) {
error.PathAlreadyExists => return,
else => @panic("Error other than path already exists when trying to create a member directory.\n"),
};
const member_dir = try struct_dir.openDir(member_name, .{});
blk: {
const file = member_dir.createFile("main.zippondata", .{}) catch |err| switch (err) {
error.PathAlreadyExists => break :blk,
else => @panic("Error: can't create main.zippondata"),
};
try file.writeAll("\n");
}
_ = member_dir.createFile("1.zippondata", .{}) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => @panic("Error: can't create 1.zippondata"),
};
}
}
runCommand("__INIT__");
}
fn runCommand(null_term_query_str: [:0]const u8) void {
@ -192,7 +162,7 @@ fn runCommand(null_term_query_str: [:0]const u8) void {
// TODO: Use the folder ENGINE
const args = &[_][]const u8{ "./engine", null_term_query_str };
const result = std.process.Child.run(.{ .allocator = allocator, .argv = args, .max_output_bytes = 4084 }) catch |err| switch (err) {
const result = std.process.Child.run(.{ .allocator = allocator, .argv = args }) catch |err| switch (err) {
error.FileNotFound => {
std.debug.print("No engine found, please use `schema build` to make one.\n", .{});
return;

View File

@ -1,12 +1,8 @@
const std = @import("std");
const dtypes = @import("dtypes.zig");
const UUID = @import("uuid.zig").UUID;
const DataEngine = @import("dataEngine.zig").DataEngine;
const Tokenizer = @import("ziqlTokenizer.zig").Tokenizer;
const Token = @import("ziqlTokenizer.zig").Token;
const grabParser = @import("GRAB.zig").Parser;
const addParser = @import("ADD.zig").Parser;
const DataEngine = @import("dataEngine.zig").DataEngine;
const Allocator = std.mem.Allocator;
pub const Error = error{UUIDNotFound};
const stdout = std.io.getStdOut().writer();
@ -27,26 +23,17 @@ pub fn main() !void {
var toker = Tokenizer.init(null_term_query_str);
const first_token = toker.next();
const struct_name_token = toker.next();
var data_engine = DataEngine.init(allocator);
switch (first_token.tag) {
.keyword_grab => {
if (!isStructInSchema(toker.getTokenSlice(struct_name_token))) {
try stdout.print("Error: No struct named '{s}' in current schema.", .{toker.getTokenSlice(struct_name_token)});
return;
}
var parser = grabParser.init(allocator, &toker, &data_engine);
var parser = grabParser.init(allocator, &toker);
try parser.parse();
},
.keyword_add => {
if (!isStructInSchema(toker.getTokenSlice(struct_name_token))) {
try stdout.print("Error: No struct named '{s}' in current schema.", .{toker.getTokenSlice(struct_name_token)});
return;
}
var parser = addParser.init(allocator, &toker, &data_engine);
try parser.parse(toker.getTokenSlice(struct_name_token));
var parser = addParser.init(allocator, &toker);
parser.parse() catch |err| {
try stdout.print("Error: {any} while parsin ADD.\n", .{err});
};
},
.keyword_update => {
try stdout.print("Not yet implemented.\n", .{});
@ -55,22 +42,14 @@ pub fn main() !void {
try stdout.print("Not yet implemented.\n", .{});
},
.keyword__describe__ => {
try stdout.print("{s}", .{dtypes.describe_str});
try stdout.print("{s}", .{@embedFile("schema.zipponschema")});
},
.keyword__init__ => {
var data_engine = DataEngine.init(allocator, null);
try data_engine.initDataFolder();
},
else => {
try stdout.print("Query need to start with a keyword, including: GRAB ADD UPDATE DELETE\n", .{});
},
}
}
/// Check if a string is a name of a struct in the currently use engine
fn isStructInSchema(struct_name_to_check: []const u8) bool {
if (std.mem.eql(u8, struct_name_to_check, "describe")) return true;
for (dtypes.struct_name_list) |struct_name| {
if (std.mem.eql(u8, struct_name_to_check, struct_name)) {
return true;
}
}
return false;
}

View File

@ -1,70 +0,0 @@
const std = @import("std");
const UUID = @import("uuid.zig").UUID;
const dataParsing = @import("data-parsing.zig");
pub const User = struct {
id: UUID,
name: []const u8,
email: []const u8,
age: i64,
scores: []i64,
pub fn init(name: []const u8, email: []const u8, age: i64, scores: []i64) User {
return User{ .id = UUID.init(), .name = name, .email = email, .age = age, .scores = scores };
}
};
pub const Message = struct {
id: UUID,
content: []const u8,
pub fn init(content: []const u8) Message {
return Message{ .id = UUID.init(), .content = content };
}
};
pub const Types = union {
User: *const User,
Message: *const Message,
};
// Maybe create a struct like StructMetadata for the string list of member and name, ect
pub const struct_name_list: [2][]const u8 = .{
"User",
"Message",
};
pub const struct_member_list: [2][]const []const u8 = .{
&[_][]const u8{ "name", "email", "age", "scores" },
&[_][]const u8{"content"},
};
// For now there is 4 types of data: str, int, float, bool
const MemberTypes = enum { int, float, bool, str };
pub const describe_str = "User (\n name: str,\n email: str,\n)\n\nMessage (\n content: str,\n)\n";
/// User a map of member name / value string to create a new entity of a type
/// The goal being an array of map while parsing files to then return an array of entities and do some fileting on it.
pub fn createEntityFromMap(allocator: std.mem.Allocator, struct_name: []const u8, map: std.StringHashMap([]const u8)) !*Types {
var t = try allocator.create(Types);
if (std.mem.eql(u8, struct_name, "User")) {
const age = try std.fmt.parseInt(i64, map.get("age").?, 10);
const scores = dataParsing.parseArrayInt(allocator, map.get("scores").?);
t.User = &User.init(map.get("name").?, map.get("email").?, age, scores.items);
} else {
return error.UnknowStructName;
}
return t;
}
/// Get the list of all member name for a struct name
pub fn structName2structMembers(struct_name: []const u8) []const []const u8 {
var i: u16 = 0;
while (i < struct_name_list.len) : (i += 1) {
if (std.mem.eql(u8, struct_name_list[i], struct_name)) break;
}
return struct_member_list[i];
}

View File

@ -1,62 +0,0 @@
const std = @import("std");
const UUID = @import("uuid.zig").UUID;
pub const parameter_max_file_size_in_bytes = 500; // THe number of bytes than each file can be before splitting
pub const User = struct {
id: UUID,
name: []const u8,
email: []const u8,
pub fn init(name: []const u8, email: []const u8) User {
return User{ .id = UUID.init(), .name = name, .email = email };
}
};
pub const Message = struct {
id: UUID,
content: []const u8,
pub fn init(content: []const u8) Message {
return Message{ .id = UUID.init(), .content = content };
}
};
pub const Types = union {
User: *const User,
Message: *const Message,
};
// Maybe create a struct like StructMetadata for the string list of member and name, ect
pub const struct_name_list: [2][]const u8 = .{
"User",
"Message",
};
pub const struct_member_list: [2][]const []const u8 = .{
&[_][]const u8{ "name", "email" },
&[_][]const u8{"content"},
};
pub const describe_str = "User (\n name: str,\n email: str,\n)\n\nMessage (\n content: str,\n)\n";
pub fn createEntityFromMap(allocator: std.mem.Allocator, struct_name: []const u8, map: std.StringHashMap([]const u8)) !*Types {
var t = try allocator.create(Types);
if (std.mem.eql(u8, struct_name, "User")) {
t.User = &User.init(map.get("name").?, map.get("email").?);
} else {
return error.UnknowStructName;
}
return t;
}
/// Get the list of all member name for a struct name
pub fn structName2structMembers(struct_name: []const u8) []const []const u8 {
var i: u16 = 0;
while (i < struct_name_list.len) : (i += 1) {
if (std.mem.eql(u8, struct_name_list[i], struct_name)) break;
}
return struct_member_list[i];
}

31
src/metadata.zig Normal file
View File

@ -0,0 +1,31 @@
const std = @import("std");
// Maybe create a struct like StructMetadata for the string list of member and name, ect
pub const struct_name_list: [2][]const u8 = .{
"User",
"Message",
};
pub const struct_member_list: [2][]const []const u8 = .{
&[_][]const u8{ "name", "email", "age", "scores" },
&[_][]const u8{"content"},
};
/// Get the list of all member name for a struct name
pub fn structName2structMembers(struct_name: []const u8) []const []const u8 {
var i: u16 = 0;
while (i < struct_name_list.len) : (i += 1) if (std.mem.eql(u8, struct_name_list[i], struct_name)) break;
return struct_member_list[i];
}
pub fn isStructNameExists(struct_name: []const u8) bool {
for (struct_name_list) |sn| if (std.mem.eql(u8, sn, struct_name)) return true;
return false;
}
pub fn isMemberNameInStruct(struct_name: []const u8, member_name: []const u8) bool {
for (structName2structMembers(struct_name)) |mn| if (std.mem.eql(u8, mn, member_name)) return true;
return false;
}

View File

@ -32,6 +32,7 @@ pub const Parser = struct {
_ = bytes_written;
}
// TODO: Pass that to the DataEngine and do the metadata.zig file instead
pub fn parse(self: *Parser, toker: *Toker, buffer: []u8) void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
@ -39,9 +40,9 @@ pub const Parser = struct {
var state: State = .start;
std.fs.cwd().deleteFile("src/dtypes.zig") catch {};
std.fs.cwd().deleteFile("src/metadata.zig") catch {};
self.file = std.fs.cwd().createFile("src/dtypes.zig", .{}) catch |err| {
self.file = std.fs.cwd().createFile("src/metadata.zig", .{}) catch |err| {
std.debug.print("Error when writing dtypes.zig: {}", .{err});
return;
};

View File

@ -18,6 +18,7 @@ pub const Token = struct {
.{ "IN", .keyword_in },
.{ "null", .keyword_null },
.{ "__DESCRIBE__", .keyword__describe__ },
.{ "__INIT__", .keyword__init__ },
});
pub fn getKeyword(bytes: []const u8) ?Tag {
@ -35,6 +36,7 @@ pub const Token = struct {
keyword_in,
keyword_null,
keyword__describe__,
keyword__init__,
string_literal,
number_literal,