Switched to csv

This commit is contained in:
Adrien Bouvais 2024-10-19 23:24:55 +02:00
parent d851c8f908
commit ebb3eec4f4
7 changed files with 166 additions and 99 deletions

View File

@ -1,6 +1,7 @@
pub const BUFFER_SIZE = 1024 * 50; // Line limit when parsing file
pub const MAX_FILE_SIZE = 5e+4; // 50kb TODO: Put in config file
pub const CSV_DELIMITER = ';';
// Testing
pub const TEST_DATA_DIR = "test_data/v0.1.1"; // Maybe put that directly in the build
pub const TEST_DATA_DIR = "test_data/v0.1.2"; // Maybe put that directly in the build

View File

@ -17,6 +17,7 @@ const FileEngineError = @import("stuffs/errors.zig").FileEngineError;
const BUFFER_SIZE = @import("config.zig").BUFFER_SIZE;
const MAX_FILE_SIZE = @import("config.zig").MAX_FILE_SIZE;
const CSV_DELIMITER = @import("config.zig").CSV_DELIMITER;
/// Manage everything that is relate to read or write in files
/// Or even get stats, whatever. If it touch files, it's here
@ -214,7 +215,7 @@ pub const FileEngine = struct {
};
const struct_dir = data_dir.openDir(self.locToSlice(struct_item.name), .{}) catch return FileEngineError.CantOpenDir;
_ = struct_dir.createFile("0.zippondata", .{}) catch |err| switch (err) {
_ = struct_dir.createFile("0.csv", .{}) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => return FileEngineError.CantMakeFile,
};
@ -234,7 +235,7 @@ pub const FileEngine = struct {
var path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, current_index },
) catch return FileEngineError.MemoryError;
defer self.allocator.free(path_buff);
@ -269,7 +270,11 @@ pub const FileEngine = struct {
current_index += 1;
self.allocator.free(path_buff);
path_buff = std.fmt.allocPrint(self.allocator, "{s}/DATA/{s}/{d}.zippondata", .{ self.path_to_ZipponDB_dir, struct_name, current_index }) catch @panic("Can't create sub_path for init a DataIterator");
path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, current_index },
) catch @panic("Can't create sub_path for init a DataIterator");
file.close(); // Do I need to close ? I think so
file = std.fs.cwd().openFile(path_buff, .{}) catch {
@ -370,7 +375,7 @@ pub const FileEngine = struct {
var path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, current_index },
) catch return FileEngineError.MemoryError;
defer self.allocator.free(path_buff);
@ -400,7 +405,7 @@ pub const FileEngine = struct {
self.allocator.free(path_buff);
path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, current_index },
) catch return FileEngineError.MemoryError;
@ -434,11 +439,13 @@ pub const FileEngine = struct {
var path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, condition.struct_name, current_index },
) catch return FileEngineError.MemoryError;
defer self.allocator.free(path_buff);
std.debug.print("{s}\n", .{path_buff});
var file = std.fs.cwd().openFile(path_buff, .{}) catch return FileEngineError.CantOpenFile;
defer file.close();
@ -501,7 +508,7 @@ pub const FileEngine = struct {
self.allocator.free(path_buff);
path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, condition.struct_name, current_index },
) catch return FileEngineError.MemoryError;
@ -532,67 +539,71 @@ pub const FileEngine = struct {
const row_value = data_toker.getTokenSlice(token);
switch (condition.operation) {
found = switch (condition.operation) {
.equal => switch (condition.data_type) {
.int => found = compare_value.int == s2t.parseInt(row_value),
.float => found = compare_value.float == s2t.parseFloat(row_value),
.str => found = std.mem.eql(u8, compare_value.str, row_value),
.bool => found = compare_value.bool_ == s2t.parseBool(row_value),
.id => found = compare_value.id.compare(uuid),
.date => found = compare_value.datetime.compareDate(s2t.parseDate(row_value)),
.time => found = compare_value.datetime.compareTime(s2t.parseTime(row_value)),
.datetime => found = compare_value.datetime.compareDatetime(s2t.parseDatetime(row_value)),
.int => compare_value.int == s2t.parseInt(row_value),
.float => compare_value.float == s2t.parseFloat(row_value),
.str => std.mem.eql(u8, compare_value.str, row_value),
.bool => compare_value.bool_ == s2t.parseBool(row_value),
.id => compare_value.id.compare(uuid),
.date => compare_value.datetime.compareDate(s2t.parseDate(row_value)),
.time => compare_value.datetime.compareTime(s2t.parseTime(row_value)),
.datetime => compare_value.datetime.compareDatetime(s2t.parseDatetime(row_value)),
else => unreachable,
},
.different => switch (condition.data_type) {
.int => found = compare_value.int != s2t.parseInt(row_value),
.float => found = compare_value.float != s2t.parseFloat(row_value),
.str => found = !std.mem.eql(u8, compare_value.str, row_value),
.bool => found = compare_value.bool_ != s2t.parseBool(row_value),
.date => found = !compare_value.datetime.compareDate(s2t.parseDate(row_value)),
.time => found = !compare_value.datetime.compareTime(s2t.parseTime(row_value)),
.datetime => found = !compare_value.datetime.compareDatetime(s2t.parseDatetime(row_value)),
.int => compare_value.int != s2t.parseInt(row_value),
.float => compare_value.float != s2t.parseFloat(row_value),
.str => !std.mem.eql(u8, compare_value.str, row_value),
.bool => compare_value.bool_ != s2t.parseBool(row_value),
.date => !compare_value.datetime.compareDate(s2t.parseDate(row_value)),
.time => !compare_value.datetime.compareTime(s2t.parseTime(row_value)),
.datetime => !compare_value.datetime.compareDatetime(s2t.parseDatetime(row_value)),
else => unreachable,
},
.superior_or_equal => switch (condition.data_type) {
.int => found = compare_value.int <= s2t.parseInt(data_toker.getTokenSlice(token)),
.float => found = compare_value.float <= s2t.parseFloat(data_toker.getTokenSlice(token)),
.date => found = compare_value.datetime.toUnix() <= s2t.parseDate(row_value).toUnix(),
.time => found = compare_value.datetime.toUnix() <= s2t.parseTime(row_value).toUnix(),
.datetime => found = compare_value.datetime.toUnix() <= s2t.parseDatetime(row_value).toUnix(),
.int => compare_value.int <= s2t.parseInt(data_toker.getTokenSlice(token)),
.float => compare_value.float <= s2t.parseFloat(data_toker.getTokenSlice(token)),
.date => compare_value.datetime.toUnix() <= s2t.parseDate(row_value).toUnix(),
.time => compare_value.datetime.toUnix() <= s2t.parseTime(row_value).toUnix(),
.datetime => compare_value.datetime.toUnix() <= s2t.parseDatetime(row_value).toUnix(),
else => unreachable,
},
.superior => switch (condition.data_type) {
.int => found = compare_value.int < s2t.parseInt(data_toker.getTokenSlice(token)),
.float => found = compare_value.float < s2t.parseFloat(data_toker.getTokenSlice(token)),
.date => found = compare_value.datetime.toUnix() < s2t.parseDate(row_value).toUnix(),
.time => found = compare_value.datetime.toUnix() < s2t.parseTime(row_value).toUnix(),
.datetime => found = compare_value.datetime.toUnix() < s2t.parseDatetime(row_value).toUnix(),
.int => compare_value.int < s2t.parseInt(data_toker.getTokenSlice(token)),
.float => compare_value.float < s2t.parseFloat(data_toker.getTokenSlice(token)),
.date => compare_value.datetime.toUnix() < s2t.parseDate(row_value).toUnix(),
.time => compare_value.datetime.toUnix() < s2t.parseTime(row_value).toUnix(),
.datetime => compare_value.datetime.toUnix() < s2t.parseDatetime(row_value).toUnix(),
else => unreachable,
},
.inferior_or_equal => switch (condition.data_type) {
.int => found = compare_value.int >= s2t.parseInt(data_toker.getTokenSlice(token)),
.float => found = compare_value.float >= s2t.parseFloat(data_toker.getTokenSlice(token)),
.date => found = compare_value.datetime.toUnix() >= s2t.parseDate(row_value).toUnix(),
.time => found = compare_value.datetime.toUnix() >= s2t.parseTime(row_value).toUnix(),
.datetime => found = compare_value.datetime.toUnix() >= s2t.parseDatetime(row_value).toUnix(),
.int => compare_value.int >= s2t.parseInt(data_toker.getTokenSlice(token)),
.float => compare_value.float >= s2t.parseFloat(data_toker.getTokenSlice(token)),
.date => compare_value.datetime.toUnix() >= s2t.parseDate(row_value).toUnix(),
.time => compare_value.datetime.toUnix() >= s2t.parseTime(row_value).toUnix(),
.datetime => compare_value.datetime.toUnix() >= s2t.parseDatetime(row_value).toUnix(),
else => unreachable,
},
.inferior => switch (condition.data_type) {
.int => found = compare_value.int > s2t.parseInt(data_toker.getTokenSlice(token)),
.float => found = compare_value.float > s2t.parseFloat(data_toker.getTokenSlice(token)),
.date => found = compare_value.datetime.toUnix() > s2t.parseDate(row_value).toUnix(),
.time => found = compare_value.datetime.toUnix() > s2t.parseTime(row_value).toUnix(),
.datetime => found = compare_value.datetime.toUnix() > s2t.parseDatetime(row_value).toUnix(),
.int => compare_value.int > s2t.parseInt(data_toker.getTokenSlice(token)),
.float => compare_value.float > s2t.parseFloat(data_toker.getTokenSlice(token)),
.date => compare_value.datetime.toUnix() > s2t.parseDate(row_value).toUnix(),
.time => compare_value.datetime.toUnix() > s2t.parseTime(row_value).toUnix(),
.datetime => compare_value.datetime.toUnix() > s2t.parseDatetime(row_value).toUnix(),
else => unreachable,
},
// TODO: Do it for other array and implement in the query language
else => false,
};
// TODO: Do it for other array and implement in the query language
switch (condition.operation) {
.in => switch (condition.data_type) {
.id_array => {
for (compare_value.id_array.items) |elem| {
@ -601,6 +612,7 @@ pub const FileEngine = struct {
},
else => unreachable,
},
else => {},
}
if (found) uuid_array.append(uuid) catch return FileEngineError.MemoryError;
@ -623,7 +635,7 @@ pub const FileEngine = struct {
if (potential_file_index) |file_index| {
path = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, file_index },
) catch return FileEngineError.MemoryError;
file = std.fs.cwd().openFile(path, .{ .mode = .read_write }) catch return FileEngineError.CantOpenFile;
@ -632,20 +644,22 @@ pub const FileEngine = struct {
path = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, max_index + 1 },
) catch return FileEngineError.MemoryError;
file = std.fs.cwd().createFile(path, .{}) catch return FileEngineError.CantMakeFile;
}
file.seekFromEnd(0) catch return FileEngineError.WriteError; // Not really a write error tho
file.writer().print("{s}", .{uuid.format_uuid()}) catch return FileEngineError.WriteError;
const writer = file.writer();
writer.print("{s}", .{uuid.format_uuid()}) catch return FileEngineError.WriteError;
for (try self.structName2structMembers(struct_name)) |member_name| {
file.writer().print(" {s}", .{data_map.get(self.locToSlice(member_name)).?}) catch return FileEngineError.WriteError; // Change that for csv
writer.writeByte(CSV_DELIMITER) catch return FileEngineError.WriteError;
writer.print("{s}", .{data_map.get(self.locToSlice(member_name)).?}) catch return FileEngineError.WriteError; // Change that for csv
}
file.writer().print("\n", .{}) catch return FileEngineError.WriteError;
writer.print("\n", .{}) catch return FileEngineError.WriteError;
return uuid;
}
@ -661,14 +675,14 @@ pub const FileEngine = struct {
var path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
defer self.allocator.free(path_buff);
var path_buff2 = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
defer self.allocator.free(path_buff2);
@ -678,7 +692,7 @@ pub const FileEngine = struct {
self.allocator.free(path_buff);
path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata.new",
"{s}/DATA/{s}/{d}.csv.new",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
@ -692,10 +706,11 @@ pub const FileEngine = struct {
var buffered = std.io.bufferedReader(old_file.reader());
var reader = buffered.reader();
var founded = false;
const number_of_member_in_struct = try self.numberOfMemberInStruct(struct_name);
while (true) {
output_fbs.reset();
reader.streamUntilDelimiter(writer, ' ', null) catch |err| switch (err) {
reader.streamUntilDelimiter(writer, CSV_DELIMITER, null) catch |err| switch (err) {
error.EndOfStream => {
// When end of file, check if all file was parse, if not update the reader to the next file
// TODO: Be able to give an array of file index from the B+Tree to only parse them
@ -705,14 +720,14 @@ pub const FileEngine = struct {
self.allocator.free(path_buff);
path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
self.allocator.free(path_buff2);
path_buff2 = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata.new",
"{s}/DATA/{s}/{d}.csv.new",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
@ -727,12 +742,12 @@ pub const FileEngine = struct {
self.allocator.free(path_buff);
path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
self.allocator.free(path_buff2);
path_buff2 = std.fmt.allocPrint(self.allocator, "{s}/DATA/{s}/{d}.zippondata.new", .{
path_buff2 = std.fmt.allocPrint(self.allocator, "{s}/DATA/{s}/{d}.csv.new", .{
self.path_to_ZipponDB_dir,
struct_name,
current_file_index,
@ -749,7 +764,9 @@ pub const FileEngine = struct {
else => return FileEngineError.StreamError,
};
new_file.writeAll(output_fbs.getWritten()) catch return FileEngineError.WriteError;
const new_writer = new_file.writer();
new_writer.writeAll(output_fbs.getWritten()) catch return FileEngineError.WriteError;
// THis is the uuid of the current row
const uuid = UUID.parse(output_fbs.getWritten()[0..36]) catch return FileEngineError.InvalidUUID;
@ -766,12 +783,12 @@ pub const FileEngine = struct {
if (!founded) {
// stream until the delimiter
output_fbs.reset();
new_file.writeAll(" ") catch return FileEngineError.WriteError;
new_writer.writeByte(CSV_DELIMITER) catch return FileEngineError.WriteError;
reader.streamUntilDelimiter(writer, '\n', null) catch return FileEngineError.WriteError;
new_file.writeAll(output_fbs.getWritten()) catch return FileEngineError.WriteError;
new_file.writeAll("\n") catch return FileEngineError.WriteError;
new_writer.writeAll(output_fbs.getWritten()) catch return FileEngineError.WriteError;
new_writer.writeAll("\n") catch return FileEngineError.WriteError;
} else {
for (try self.structName2structMembers(struct_name), try self.structName2DataType(struct_name)) |member_name, member_type| {
for (try self.structName2structMembers(struct_name), try self.structName2DataType(struct_name), 0..) |member_name, member_type, i| {
// For all collum in the right order, check if the key is in the map, if so use it to write the new value, otherwise use the old file
output_fbs.reset();
switch (member_type) {
@ -779,45 +796,51 @@ pub const FileEngine = struct {
reader.streamUntilDelimiter(writer, '\'', null) catch return FileEngineError.StreamError;
reader.streamUntilDelimiter(writer, '\'', null) catch return FileEngineError.StreamError;
},
.int_array, .float_array, .bool_array, .id_array => {
.int_array, .float_array, .bool_array, .id_array, .date_array, .time_array, .datetime_array => {
reader.streamUntilDelimiter(writer, ']', null) catch return FileEngineError.StreamError;
},
.str_array => {
reader.streamUntilDelimiter(writer, ']', null) catch return FileEngineError.StreamError;
}, // FIXME: If the string itself contain ], this will be a problem
else => {
reader.streamUntilDelimiter(writer, ' ', null) catch return FileEngineError.StreamError;
reader.streamUntilDelimiter(writer, ' ', null) catch return FileEngineError.StreamError;
reader.streamUntilDelimiter(writer, CSV_DELIMITER, null) catch return FileEngineError.StreamError;
},
}
new_writer.writeByte(CSV_DELIMITER) catch return FileEngineError.WriteError;
if (new_data_map.contains(self.locToSlice(member_name))) {
// Write the new data
new_file.writer().print(" {s}", .{new_data_map.get(self.locToSlice(member_name)).?}) catch return FileEngineError.WriteError;
new_writer.print("{s}", .{new_data_map.get(self.locToSlice(member_name)).?}) catch return FileEngineError.WriteError;
} else {
// Write the old data
switch (member_type) {
.str => new_file.writeAll(" \'") catch return FileEngineError.WriteError,
.int_array => new_file.writeAll(" ") catch return FileEngineError.WriteError,
.float_array => new_file.writeAll(" ") catch return FileEngineError.WriteError,
.str_array => new_file.writeAll(" ") catch return FileEngineError.WriteError,
.bool_array => new_file.writeAll(" ") catch return FileEngineError.WriteError,
.id_array => new_file.writeAll(" ") catch return FileEngineError.WriteError,
else => new_file.writeAll(" ") catch return FileEngineError.WriteError,
.str => new_writer.writeByte('\'') catch return FileEngineError.WriteError,
else => {},
}
new_file.writeAll(output_fbs.getWritten()) catch return FileEngineError.WriteError;
new_writer.writeAll(output_fbs.getWritten()) catch return FileEngineError.WriteError;
switch (member_type) {
.str => new_file.writeAll("\'") catch return FileEngineError.WriteError,
.int_array, .float_array, .bool_array, .id_array => new_file.writeAll("]") catch return FileEngineError.WriteError,
.str => {
new_writer.writeByte('\'') catch return FileEngineError.WriteError;
},
.int_array, .float_array, .bool_array, .id_array, .date_array, .str_array, .time_array, .datetime_array => {
new_writer.writeByte(']') catch return FileEngineError.WriteError;
},
else => {},
}
}
if (i == number_of_member_in_struct - 1) continue;
switch (member_type) {
.str, .int_array, .float_array, .bool_array, .id_array, .date_array, .str_array, .time_array, .datetime_array => {
reader.streamUntilDelimiter(writer, CSV_DELIMITER, null) catch return FileEngineError.StreamError;
},
else => {},
}
}
reader.streamUntilDelimiter(writer, '\n', null) catch return FileEngineError.WriteError;
new_file.writeAll("\n") catch return FileEngineError.WriteError;
reader.streamUntilDelimiter(writer, '\n', null) catch return FileEngineError.StreamError;
new_writer.writeAll("\n") catch return FileEngineError.WriteError;
}
}
}
@ -830,14 +853,14 @@ pub const FileEngine = struct {
var path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
defer self.allocator.free(path_buff);
var path_buff2 = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
defer self.allocator.free(path_buff2);
@ -847,7 +870,7 @@ pub const FileEngine = struct {
self.allocator.free(path_buff);
path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata.new",
"{s}/DATA/{s}/{d}.csv.new",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
@ -865,7 +888,7 @@ pub const FileEngine = struct {
while (true) {
output_fbs.reset();
reader.streamUntilDelimiter(writer, ' ', null) catch |err| switch (err) {
reader.streamUntilDelimiter(writer, CSV_DELIMITER, null) catch |err| switch (err) {
error.EndOfStream => {
// When end of file, check if all file was parse, if not update the reader to the next file
// TODO: Be able to give an array of file index from the B+Tree to only parse them
@ -875,14 +898,14 @@ pub const FileEngine = struct {
self.allocator.free(path_buff);
path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
self.allocator.free(path_buff2);
path_buff2 = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata.new",
"{s}/DATA/{s}/{d}.csv.new",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
@ -897,14 +920,14 @@ pub const FileEngine = struct {
self.allocator.free(path_buff);
path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata",
"{s}/DATA/{s}/{d}.csv",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
self.allocator.free(path_buff2);
path_buff2 = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}/{d}.zippondata.new",
"{s}/DATA/{s}/{d}.csv.new",
.{ self.path_to_ZipponDB_dir, struct_name, current_file_index },
) catch return FileEngineError.MemoryError;
@ -922,6 +945,8 @@ pub const FileEngine = struct {
},
};
const new_writer = new_file.writer();
// THis is the uuid of the current row
const uuid = UUID.parse(output_fbs.getWritten()[0..36]) catch return FileEngineError.InvalidUUID;
founded = false;
@ -937,13 +962,13 @@ pub const FileEngine = struct {
if (!founded) {
// stream until the delimiter
new_file.writeAll(output_fbs.getWritten()) catch return FileEngineError.WriteError;
new_writer.writeAll(output_fbs.getWritten()) catch return FileEngineError.WriteError;
output_fbs.reset();
new_file.writeAll(" ") catch return FileEngineError.WriteError;
new_writer.writeByte(CSV_DELIMITER) catch return FileEngineError.WriteError;
reader.streamUntilDelimiter(writer, '\n', null) catch return FileEngineError.WriteError;
new_file.writeAll(output_fbs.getWritten()) catch return FileEngineError.WriteError;
new_file.writeAll("\n") catch return FileEngineError.WriteError;
new_writer.writeAll(output_fbs.getWritten()) catch return FileEngineError.WriteError;
new_writer.writeByte('\n') catch return FileEngineError.WriteError;
} else {
reader.streamUntilDelimiter(writer, '\n', null) catch return FileEngineError.WriteError;
}
@ -970,7 +995,7 @@ pub const FileEngine = struct {
while (iter.next() catch return FileEngineError.DirIterError) |entry| {
const file_stat = member_dir.statFile(entry.name) catch return FileEngineError.FileStatError;
if (file_stat.size < MAX_FILE_SIZE) {
return std.fmt.parseInt(usize, entry.name[0..(entry.name.len - 11)], 10) catch return FileEngineError.InvalidFileIndex; // TODO: Change the slice when start using CSV
return std.fmt.parseInt(usize, entry.name[0..(entry.name.len - 4)], 10) catch return FileEngineError.InvalidFileIndex; // TODO: Change the slice when start using CSV
}
}
return null;
@ -1055,6 +1080,17 @@ pub const FileEngine = struct {
return self.struct_array.items[i].types.items;
}
/// Return the number of member of a struct
fn numberOfMemberInStruct(self: *FileEngine, struct_name: []const u8) FileEngineError!usize {
var i: usize = 0;
for (try self.structName2structMembers(struct_name)) |_| {
i += 1;
}
return i;
}
/// Chech if the name of a struct is in the current schema
pub fn isStructNameExists(self: *FileEngine, struct_name: []const u8) bool {
var i: u16 = 0;

View File

@ -269,9 +269,9 @@ pub fn runQuery(null_term_query_str: [:0]const u8, file_engine: *FileEngine) voi
}
}
parser.parse() catch |err| switch (err) {
error.SynthaxError => {},
else => {},
parser.parse() catch |err| {
std.debug.print("Error: {any}\n", .{err});
@panic("=9");
};
}

View File

@ -51,7 +51,8 @@ pub const Tokenizer = struct {
pub fn next(self: *Tokenizer) Token {
// That ugly but work
if (self.buffer[self.index] == ' ') self.index += 1;
if (self.buffer[self.index] == ';') self.index += 1; // Hardcoded delimiter
if (self.buffer[self.index] == ' ') self.index += 1; // Hardcoded delimiter
var state: State = .start;
var result: Token = .{

View File

@ -29,7 +29,7 @@ def run(process, command):
from tqdm import tqdm
for i in tqdm(range(100)):
for i in tqdm(range(10)):
process = subprocess.Popen(
["zig-out/bin/zippon"],
stdin=subprocess.PIPE,

View File

@ -0,0 +1,19 @@
ad854d72-a495-42d1-9e84-d5afd0a85bdb;'Thomas Barr';75;'richardjeffrey@example.org';2004/03/24;1973/05/12-06:11:49.402846;01:36:23.182050;[11 4 72];[]
5644e3e0-2aa4-43cd-ba3a-27bda3da2c78;'Candace Hoover';61;'fpreston@example.org';2022/02/14;1981/10/08-04:53:28.203478;09:36:00.607259;[48];[]
73d8883e-b9c9-4615-8e98-cda7549be469;'Teresa Barnett';8;'mitchelldavid@example.net';2007/01/17;1976/06/26-12:21:44.257940;22:54:55.671784;[];[]
f1e66a48-065c-4419-b263-603fa43ef282;'Roberto Perkins';30;'samantha08@example.org';2021/01/23;1987/03/31-14:26:22.379328;17:46:23.039578;[75];[]
14b78b9e-3e83-41fc-a175-f50c40c9abb1;'Mary Ford';92;'jacquelinejohnson@example.net';1971/03/03;1970/02/25-13:14:36.542439;06:51:18.587125;[43];[]
96e1ed91-e159-42e2-b310-db890e541f9d;'Betty Nelson';84;'lisalawrence@example.net';1991/10/27;2001/09/11-10:04:29.907911;01:49:00.702671;[62 53 98 51 71];[]
5495efc6-e35c-405d-ac1a-469d7792aa39;'Jasmine Moore';32;'fred90@example.net';1970/04/15;1986/11/17-03:17:30.946545;03:04:57.600928;[-1 33 16 3 99 61 57];[]
6b281af0-4a68-42a1-ac94-453bb0f00c6b;'Carlos Mckay';56;'rrodriguez@example.net';2019/10/17;2010/03/12-16:42:00.463897;09:10:27.083629;[29 31 81 94 2 33 30];[]
6695f24d-0e06-4f6a-964d-908ab1ba28af;'Benjamin Perkins';96;'lcoleman@example.com';2003/06/20;2003/06/22-10:49:56.595647;03:23:20.199569;[15 85];[]
198311c9-e24b-40f7-925c-81d4e3c991f3;'Alison Walls';31;'david68@example.com';1999/04/11;2020/12/12-03:36:47.659589;18:09:29.650812;[];[]
64fc7b4a-0f3b-40a4-b48c-0c434f36c519;'Justin Sanders';99;'kenneth82@example.com';2018/12/26;2019/12/25-10:42:35.139683;04:13:35.694618;[];[]
36aa4acb-22cf-4b46-b149-b59955103d45;'Katherine Pierce';95;'leecervantes@example.com';1984/09/09;1997/06/29-07:32:07.899469;19:41:22.891744;[19 31 97 14];[]
b2da9c44-b66d-4d1a-8d14-12eecb32b167;'Ryan Holmes';60;'eramsey@example.com';1992/02/14;1997/03/07-09:31:27.186316;01:30:21.142382;[];[]
bed93461-295a-4f61-85f9-da51ccad135e;'Brenda Clark';49;'donna76@example.org';1975/04/25;1993/04/26-09:59:47.795232;11:04:00.306824;[];[]
7b408ab7-fbac-42d0-800d-decabc221f21;'Danny Rush';100;'benderholly@example.net';1970/05/26;1987/10/23-03:01:10.020161;01:10:10.023141;[8];[]
faf97a04-d448-44c0-9d18-5d5e99e28282;'Steven Caldwell';11;'kevin52@example.org';1994/01/07;1985/02/05-06:31:57.487465;06:53:39.729573;[];[]
f20fb937-805b-4159-8922-66ee9cd2d0e2;'Ronnie Peck';10;'cherylchandler@example.net';2009/05/07;2007/12/16-11:51:27.218355;08:52:37.577366;[40 61];[]
c1ea5791-5022-40f3-9f77-39dd475249c9;'Candace Gallegos';65;'morganstephen@example.org';2020/07/02;2018/06/11-06:05:19.667902;08:29:57.369812;[55 47 39 34 56 79 77 54 4];[]
12267ebf-4877-485e-b1a0-fb91c0253236;'Mark Murphy';27;'damon68@example.net';2002/09/05;1984/11/26-02:13:54.492933;09:05:11.668720;[90 74 17 90 92 14 7 92 78 55];[]
1 ad854d72-a495-42d1-9e84-d5afd0a85bdb 'Thomas Barr' 75 'richardjeffrey@example.org' 2004/03/24 1973/05/12-06:11:49.402846 01:36:23.182050 [11 4 72] []
2 5644e3e0-2aa4-43cd-ba3a-27bda3da2c78 'Candace Hoover' 61 'fpreston@example.org' 2022/02/14 1981/10/08-04:53:28.203478 09:36:00.607259 [48] []
3 73d8883e-b9c9-4615-8e98-cda7549be469 'Teresa Barnett' 8 'mitchelldavid@example.net' 2007/01/17 1976/06/26-12:21:44.257940 22:54:55.671784 [] []
4 f1e66a48-065c-4419-b263-603fa43ef282 'Roberto Perkins' 30 'samantha08@example.org' 2021/01/23 1987/03/31-14:26:22.379328 17:46:23.039578 [75] []
5 14b78b9e-3e83-41fc-a175-f50c40c9abb1 'Mary Ford' 92 'jacquelinejohnson@example.net' 1971/03/03 1970/02/25-13:14:36.542439 06:51:18.587125 [43] []
6 96e1ed91-e159-42e2-b310-db890e541f9d 'Betty Nelson' 84 'lisalawrence@example.net' 1991/10/27 2001/09/11-10:04:29.907911 01:49:00.702671 [62 53 98 51 71] []
7 5495efc6-e35c-405d-ac1a-469d7792aa39 'Jasmine Moore' 32 'fred90@example.net' 1970/04/15 1986/11/17-03:17:30.946545 03:04:57.600928 [-1 33 16 3 99 61 57] []
8 6b281af0-4a68-42a1-ac94-453bb0f00c6b 'Carlos Mckay' 56 'rrodriguez@example.net' 2019/10/17 2010/03/12-16:42:00.463897 09:10:27.083629 [29 31 81 94 2 33 30] []
9 6695f24d-0e06-4f6a-964d-908ab1ba28af 'Benjamin Perkins' 96 'lcoleman@example.com' 2003/06/20 2003/06/22-10:49:56.595647 03:23:20.199569 [15 85] []
10 198311c9-e24b-40f7-925c-81d4e3c991f3 'Alison Walls' 31 'david68@example.com' 1999/04/11 2020/12/12-03:36:47.659589 18:09:29.650812 [] []
11 64fc7b4a-0f3b-40a4-b48c-0c434f36c519 'Justin Sanders' 99 'kenneth82@example.com' 2018/12/26 2019/12/25-10:42:35.139683 04:13:35.694618 [] []
12 36aa4acb-22cf-4b46-b149-b59955103d45 'Katherine Pierce' 95 'leecervantes@example.com' 1984/09/09 1997/06/29-07:32:07.899469 19:41:22.891744 [19 31 97 14] []
13 b2da9c44-b66d-4d1a-8d14-12eecb32b167 'Ryan Holmes' 60 'eramsey@example.com' 1992/02/14 1997/03/07-09:31:27.186316 01:30:21.142382 [] []
14 bed93461-295a-4f61-85f9-da51ccad135e 'Brenda Clark' 49 'donna76@example.org' 1975/04/25 1993/04/26-09:59:47.795232 11:04:00.306824 [] []
15 7b408ab7-fbac-42d0-800d-decabc221f21 'Danny Rush' 100 'benderholly@example.net' 1970/05/26 1987/10/23-03:01:10.020161 01:10:10.023141 [8] []
16 faf97a04-d448-44c0-9d18-5d5e99e28282 'Steven Caldwell' 11 'kevin52@example.org' 1994/01/07 1985/02/05-06:31:57.487465 06:53:39.729573 [] []
17 f20fb937-805b-4159-8922-66ee9cd2d0e2 'Ronnie Peck' 10 'cherylchandler@example.net' 2009/05/07 2007/12/16-11:51:27.218355 08:52:37.577366 [40 61] []
18 c1ea5791-5022-40f3-9f77-39dd475249c9 'Candace Gallegos' 65 'morganstephen@example.org' 2020/07/02 2018/06/11-06:05:19.667902 08:29:57.369812 [55 47 39 34 56 79 77 54 4] []
19 12267ebf-4877-485e-b1a0-fb91c0253236 'Mark Murphy' 27 'damon68@example.net' 2002/09/05 1984/11/26-02:13:54.492933 09:05:11.668720 [90 74 17 90 92 14 7 92 78 55] []

View File

@ -0,0 +1,10 @@
User (
name: str,
age: int,
email: str,
bday: date,
last_order: datetime,
a_time: time,
scores: []int,
friends: []str,
)