Fix, perf ect

- Added a new data type self, that represent the id of the intity itself
- Fixed multi threading for parsing, now each thread use it's own writer
and I concat them at the end
- Added a schemaStruct id to the list
- Other fixe and stuff to go with the rest

New step, multi threading for all function then finally relationship
This commit is contained in:
Adrien Bouvais 2024-11-03 19:18:25 +01:00
parent 72b001b72c
commit 3539dd685c
6 changed files with 191 additions and 128 deletions

View File

@ -14,6 +14,7 @@ pub const DataType = enum {
str, str,
bool, bool,
link, link,
self, // self represent itself, it is the id
date, date,
time, time,
datetime, datetime,

View File

@ -26,6 +26,7 @@ const Condition = @import("stuffs/filter.zig").Condition;
const SchemaStruct = @import("schemaParser.zig").Parser.SchemaStruct; const SchemaStruct = @import("schemaParser.zig").Parser.SchemaStruct;
const SchemaParser = @import("schemaParser.zig").Parser; const SchemaParser = @import("schemaParser.zig").Parser;
const ZipponError = @import("stuffs/errors.zig").ZipponError;
const FileEngineError = @import("stuffs/errors.zig").FileEngineError; const FileEngineError = @import("stuffs/errors.zig").FileEngineError;
const config = @import("config.zig"); const config = @import("config.zig");
@ -39,6 +40,7 @@ const log = std.log.scoped(.fileEngine);
/// Manage everything that is relate to read or write in files /// Manage everything that is relate to read or write in files
/// Or even get stats, whatever. If it touch files, it's here /// Or even get stats, whatever. If it touch files, it's here
/// TODO: Keep all struct dir in a haspmap so I dont need to use an allocPrint everytime
pub const FileEngine = struct { pub const FileEngine = struct {
allocator: Allocator, allocator: Allocator,
path_to_ZipponDB_dir: []const u8, path_to_ZipponDB_dir: []const u8,
@ -84,6 +86,33 @@ pub const FileEngine = struct {
return !std.mem.eql(u8, "", self.path_to_ZipponDB_dir); return !std.mem.eql(u8, "", self.path_to_ZipponDB_dir);
} }
// For all struct in shema, add the UUID/index_file into the map
pub fn populateAllUUIDToFileIndexMap(self: *FileEngine) FileEngineError!void {
for (self.struct_array) |*sstruct| { // Stand for schema struct
const max_file_index = try self.maxFileIndex(sstruct.name);
var path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}",
.{ self.path_to_ZipponDB_dir, sstruct.name },
) catch return FileEngineError.MemoryError;
defer self.allocator.free(path_buff);
const dir = std.fs.cwd().openDir(path_buff, .{}) catch return FileEngineError.CantOpenDir;
for (0..(max_file_index + 1)) |i| {
self.allocator.free(path_buff);
path_buff = std.fmt.allocPrint(self.allocator, "{d}.zid", .{i}) catch return FileEngineError.MemoryError;
var iter = zid.DataIterator.init(self.allocator, path_buff, dir, sstruct.zid_schema) catch return FileEngineError.ZipponDataError;
defer iter.deinit();
while (iter.next() catch return FileEngineError.ZipponDataError) |row| {
sstruct.uuid_file_index.put(row[0].UUID, i) catch return FileEngineError.MemoryError;
}
}
}
}
// --------------------Other-------------------- // --------------------Other--------------------
pub fn readSchemaFile(allocator: Allocator, sub_path: []const u8, buffer: []u8) FileEngineError!usize { pub fn readSchemaFile(allocator: Allocator, sub_path: []const u8, buffer: []u8) FileEngineError!usize {
@ -231,33 +260,6 @@ pub const FileEngine = struct {
// --------------------Read and parse files-------------------- // --------------------Read and parse files--------------------
// For all struct in shema, add the UUID/index_file into the map
pub fn populateAllUUIDToFileIndexMap(self: *FileEngine) FileEngineError!void {
for (self.struct_array) |*sstruct| { // Stand for schema struct
const max_file_index = try self.maxFileIndex(sstruct.name);
var path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}",
.{ self.path_to_ZipponDB_dir, sstruct.name },
) catch return FileEngineError.MemoryError;
defer self.allocator.free(path_buff);
const dir = std.fs.cwd().openDir(path_buff, .{}) catch return FileEngineError.CantOpenDir;
for (0..(max_file_index + 1)) |i| {
self.allocator.free(path_buff);
path_buff = std.fmt.allocPrint(self.allocator, "{d}.zid", .{i}) catch return FileEngineError.MemoryError;
var iter = zid.DataIterator.init(self.allocator, path_buff, dir, sstruct.zid_schema) catch return FileEngineError.ZipponDataError;
defer iter.deinit();
while (iter.next() catch return FileEngineError.ZipponDataError) |row| {
sstruct.uuid_file_index.put(row[0].UUID, i) catch return FileEngineError.MemoryError;
}
}
}
}
/// Use a struct name to populate a list with all UUID of this struct /// Use a struct name to populate a list with all UUID of this struct
pub fn getAllUUIDList(self: *FileEngine, struct_name: []const u8, uuid_list: *std.ArrayList(UUID)) FileEngineError!void { pub fn getAllUUIDList(self: *FileEngine, struct_name: []const u8, uuid_list: *std.ArrayList(UUID)) FileEngineError!void {
var sstruct = try self.structName2SchemaStruct(struct_name); var sstruct = try self.structName2SchemaStruct(struct_name);
@ -303,35 +305,30 @@ pub const FileEngine = struct {
filter: ?Filter, filter: ?Filter,
writer: anytype, writer: anytype,
additional_data: *AdditionalData, additional_data: *AdditionalData,
) FileEngineError!void { ) ZipponError!void {
const sstruct = try self.structName2SchemaStruct(struct_name); const sstruct = try self.structName2SchemaStruct(struct_name);
const max_file_index = try self.maxFileIndex(sstruct.name); const max_file_index = try self.maxFileIndex(sstruct.name);
var path_buff = std.fmt.allocPrint(
self.allocator,
"{s}/DATA/{s}",
.{ self.path_to_ZipponDB_dir, sstruct.name },
) catch return FileEngineError.MemoryError;
defer self.allocator.free(path_buff);
const dir = std.fs.cwd().openDir(path_buff, .{}) catch return FileEngineError.CantOpenDir;
// If there is no member to find, that mean we need to return all members, so let's populate additional data with all of them // If there is no member to find, that mean we need to return all members, so let's populate additional data with all of them
if (additional_data.member_to_find.items.len == 0) { if (additional_data.member_to_find.items.len == 0) {
additional_data.populateWithEverything(self.allocator, sstruct.members) catch return FileEngineError.MemoryError; additional_data.populateWithEverything(self.allocator, sstruct.members) catch return FileEngineError.MemoryError;
} }
// Open the dir that contain all files
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{ .access_sub_paths = false });
// Multi thread stuffs // Multi thread stuffs
var total_entity_found: U64 = U64.init(0); var total_entity_found: U64 = U64.init(0);
var finished_count: U64 = U64.init(0); var ended_count: U64 = U64.init(0);
var single_threaded_arena = std.heap.ArenaAllocator.init(self.allocator); var error_count: U64 = U64.init(0);
defer single_threaded_arena.deinit();
var thread_safe_arena: std.heap.ThreadSafeAllocator = .{ var thread_safe_arena: std.heap.ThreadSafeAllocator = .{
.child_allocator = single_threaded_arena.allocator(), .child_allocator = self.allocator,
}; };
const arena = thread_safe_arena.allocator(); const arena = thread_safe_arena.allocator();
// INFO: Pool cant return error so far :/ That is annoying. Maybe I can do something similar myself that hundle error // INFO: Pool cant return error so far :/ That is annoying. Maybe I can do something similar myself that hundle error
// TODO: Put that in the file engine itself, so I dont need to init the Pool every time
var thread_pool: Pool = undefined; var thread_pool: Pool = undefined;
thread_pool.init(Pool.Options{ thread_pool.init(Pool.Options{
.allocator = arena, // this is an arena allocator from `std.heap.ArenaAllocator` .allocator = arena, // this is an arena allocator from `std.heap.ArenaAllocator`
@ -339,92 +336,128 @@ pub const FileEngine = struct {
}) catch return FileEngineError.ThreadError; }) catch return FileEngineError.ThreadError;
defer thread_pool.deinit(); defer thread_pool.deinit();
writer.writeAll("[") catch return FileEngineError.WriteError; // Do one array and writer for each thread otherwise then create error by writing at the same time
for (0..(max_file_index + 1)) |file_index| { // TODO: Multi thread that // Maybe use fixed lenght buffer for speed here
self.allocator.free(path_buff); var thread_writer_list = self.allocator.alloc(std.ArrayList(u8), max_file_index + 1) catch return FileEngineError.MemoryError;
path_buff = std.fmt.allocPrint(self.allocator, "{d}.zid", .{file_index}) catch return FileEngineError.MemoryError; defer {
for (thread_writer_list) |list| list.deinit();
self.allocator.free(thread_writer_list);
}
// Start parsing all file in multiple thread
for (0..(max_file_index + 1)) |file_index| {
thread_writer_list[file_index] = std.ArrayList(u8).init(self.allocator);
thread_pool.spawn(parseEntitiesOneFile, .{ thread_pool.spawn(parseEntitiesOneFile, .{
writer, thread_writer_list[file_index].writer(),
path_buff, file_index,
dir, dir,
sstruct.zid_schema, sstruct.zid_schema,
filter, filter,
additional_data, additional_data,
try self.structName2DataType(struct_name), try self.structName2DataType(struct_name),
&total_entity_found, &total_entity_found,
&finished_count, &ended_count,
&error_count,
}) catch return FileEngineError.ThreadError; }) catch return FileEngineError.ThreadError;
} }
while (finished_count.load(.acquire) < max_file_index) { // Wait for all thread to either finish or return an error
while ((ended_count.load(.acquire) + error_count.load(.acquire)) < max_file_index + 1) {
std.time.sleep(10_000_000); // Check every 10ms std.time.sleep(10_000_000); // Check every 10ms
} }
writer.writeAll("]") catch return FileEngineError.WriteError; // Append all writer to each other
writer.writeByte('[') catch return FileEngineError.WriteError;
for (thread_writer_list) |list| writer.writeAll(list.items) catch return FileEngineError.WriteError;
writer.writeByte(']') catch return FileEngineError.WriteError;
} }
// TODO: Add a bufferedWriter for performance and to prevent writting to the real writer if an error happend
fn parseEntitiesOneFile( fn parseEntitiesOneFile(
writer: anytype, writer: anytype,
path: []const u8, file_index: u64,
dir: std.fs.Dir, dir: std.fs.Dir,
zid_schema: []zid.DType, zid_schema: []zid.DType,
filter: ?Filter, filter: ?Filter,
additional_data: *AdditionalData, additional_data: *AdditionalData,
data_types: []const DataType, data_types: []const DataType,
total_entity_found: *U64, total_entity_found: *U64,
finished_count: *U64, ended_count: *U64,
error_count: *U64,
) void { ) void {
var data_buffer: [BUFFER_SIZE]u8 = undefined; var data_buffer: [BUFFER_SIZE]u8 = undefined;
var fa = std.heap.FixedBufferAllocator.init(&data_buffer); var fa = std.heap.FixedBufferAllocator.init(&data_buffer);
defer fa.reset(); defer fa.reset();
const allocator = fa.allocator(); const allocator = fa.allocator();
var iter = zid.DataIterator.init(allocator, path, dir, zid_schema) catch return; var path_buffer: [128]u8 = undefined;
defer iter.deinit(); const path = std.fmt.bufPrint(&path_buffer, "{d}.zid", .{file_index}) catch |err| {
logErrorAndIncrementCount("Error creating file path", err, error_count);
return;
};
blk: while (iter.next() catch return) |row| { var iter = zid.DataIterator.init(allocator, path, dir, zid_schema) catch |err| {
if (filter != null) if (!filter.?.evaluate(row)) continue; logErrorAndIncrementCount("Error initializing DataIterator", err, error_count);
return;
};
writer.writeByte('{') catch return; while (iter.next() catch return) |row| {
if (filter) |f| if (!f.evaluate(row)) continue;
if (writeEntity(writer, row, additional_data, data_types)) |_| {
if (incrementAndCheckLimit(total_entity_found, additional_data.entity_count_to_find)) break;
} else |err| {
logErrorAndIncrementCount("Error writing entity", err, error_count);
return;
}
}
_ = ended_count.fetchAdd(1, .acquire);
}
fn writeEntity(
writer: anytype,
row: []zid.Data,
additional_data: *AdditionalData,
data_types: []const DataType,
) !void {
try writer.writeByte('{');
for (additional_data.member_to_find.items) |member| { for (additional_data.member_to_find.items) |member| {
// write the member name and = sign try writer.print("{s}: ", .{member.name});
writer.print("{s}: ", .{member.name}) catch return; try writeValue(writer, row[member.index], data_types[member.index]);
try writer.writeAll(", ");
}
try writer.writeAll("}, ");
}
switch (row[member.index]) { fn writeValue(writer: anytype, value: zid.Data, data_type: DataType) !void {
.Int => |v| writer.print("{d}", .{v}) catch return, switch (value) {
.Float => |v| writer.print("{d}", .{v}) catch return, .Float => |v| try writer.print("{d}", .{v}),
.Str => |v| writer.print("\"{s}\"", .{v}) catch return, .Int => |v| try writer.print("{d}", .{v}),
.UUID => |v| writer.print("\"{s}\"", .{UUID.format_bytes(v)}) catch return, .Str => |v| try writer.print("\"{s}\"", .{v}),
.Bool => |v| writer.print("{any}", .{v}) catch return, .UUID => |v| try writer.print("\"{s}\"", .{UUID.format_bytes(v)}),
.Unix => |v| { .Bool => |v| try writer.print("{any}", .{v}),
const datetime = DateTime.initUnix(v); .Unix => |v| try writeDateTime(writer, v, data_type),
writer.writeByte('"') catch return; .IntArray, .FloatArray, .StrArray, .UUIDArray, .BoolArray, .UnixArray => try writeArray(writer, value, data_type),
switch (data_types[member.index - 1]) { }
.date => datetime.format("YYYY/MM/DD", writer) catch return, }
.time => datetime.format("HH:mm:ss.SSSS", writer) catch return,
.datetime => datetime.format("YYYY/MM/DD-HH:mm:ss.SSSS", writer) catch return, fn writeDateTime(writer: anytype, unix_time: u64, data_type: DataType) !void {
const datetime = DateTime.initUnix(unix_time);
try writer.writeByte('"');
switch (data_type) {
.date => try datetime.format("YYYY/MM/DD", writer),
.time => try datetime.format("HH:mm:ss.SSSS", writer),
.datetime => try datetime.format("YYYY/MM/DD-HH:mm:ss.SSSS", writer),
else => unreachable, else => unreachable,
} }
writer.writeByte('"') catch return; try writer.writeByte('"');
},
.IntArray, .FloatArray, .StrArray, .UUIDArray, .BoolArray => writeArray(&row[member.index], writer, null) catch return,
.UnixArray => writeArray(&row[member.index], writer, data_types[member.index]) catch return,
}
writer.writeAll(", ") catch return;
}
writer.writeAll("}, ") catch return;
_ = total_entity_found.fetchAdd(1, .monotonic);
if (additional_data.entity_count_to_find != 0 and total_entity_found.load(.monotonic) >= additional_data.entity_count_to_find) break :blk;
}
_ = finished_count.fetchAdd(1, .acquire);
} }
fn writeArray(data: *zid.Data, writer: anytype, datatype: ?DataType) FileEngineError!void { fn writeArray(writer: anytype, data: zid.Data, data_type: DataType) FileEngineError!void {
writer.writeByte('[') catch return FileEngineError.WriteError; writer.writeByte('[') catch return FileEngineError.WriteError;
var iter = zid.ArrayIterator.init(data) catch return FileEngineError.ZipponDataError; var iter = zid.ArrayIterator.init(data) catch return FileEngineError.ZipponDataError;
switch (data.*) { switch (data) {
.IntArray => while (iter.next()) |v| writer.print("{d}, ", .{v.Int}) catch return FileEngineError.WriteError, .IntArray => while (iter.next()) |v| writer.print("{d}, ", .{v.Int}) catch return FileEngineError.WriteError,
.FloatArray => while (iter.next()) |v| writer.print("{d}", .{v.Float}) catch return FileEngineError.WriteError, .FloatArray => while (iter.next()) |v| writer.print("{d}", .{v.Float}) catch return FileEngineError.WriteError,
.StrArray => while (iter.next()) |v| writer.print("\"{s}\"", .{v.Str}) catch return FileEngineError.WriteError, .StrArray => while (iter.next()) |v| writer.print("\"{s}\"", .{v.Str}) catch return FileEngineError.WriteError,
@ -434,7 +467,7 @@ pub const FileEngine = struct {
while (iter.next()) |v| { while (iter.next()) |v| {
const datetime = DateTime.initUnix(v.Unix); const datetime = DateTime.initUnix(v.Unix);
writer.writeByte('"') catch return FileEngineError.WriteError; writer.writeByte('"') catch return FileEngineError.WriteError;
switch (datatype.?) { switch (data_type) {
.date => datetime.format("YYYY/MM/DD", writer) catch return FileEngineError.WriteError, .date => datetime.format("YYYY/MM/DD", writer) catch return FileEngineError.WriteError,
.time => datetime.format("HH:mm:ss.SSSS", writer) catch return FileEngineError.WriteError, .time => datetime.format("HH:mm:ss.SSSS", writer) catch return FileEngineError.WriteError,
.datetime => datetime.format("YYYY/MM/DD-HH:mm:ss.SSSS", writer) catch return FileEngineError.WriteError, .datetime => datetime.format("YYYY/MM/DD-HH:mm:ss.SSSS", writer) catch return FileEngineError.WriteError,
@ -448,10 +481,20 @@ pub const FileEngine = struct {
writer.writeByte(']') catch return FileEngineError.WriteError; writer.writeByte(']') catch return FileEngineError.WriteError;
} }
fn incrementAndCheckLimit(counter: *U64, limit: u64) bool {
const new_count = counter.fetchAdd(1, .monotonic) + 1;
return limit != 0 and new_count >= limit;
}
fn logErrorAndIncrementCount(message: []const u8, err: anyerror, error_count: *U64) void {
log.err("{s}: {any}", .{ message, err });
_ = error_count.fetchAdd(1, .acquire);
}
// --------------------Change existing files-------------------- // --------------------Change existing files--------------------
// TODO: Make it in batch too // TODO: Make it in batch too
pub fn writeEntity( pub fn addEntity(
self: *FileEngine, self: *FileEngine,
struct_name: []const u8, struct_name: []const u8,
map: std.StringHashMap([]const u8), map: std.StringHashMap([]const u8),
@ -504,10 +547,11 @@ pub const FileEngine = struct {
defer self.allocator.free(path_buff); defer self.allocator.free(path_buff);
const dir = std.fs.cwd().openDir(path_buff, .{}) catch return FileEngineError.CantOpenDir; const dir = std.fs.cwd().openDir(path_buff, .{}) catch return FileEngineError.CantOpenDir;
var new_data_buff = self.allocator.alloc(zid.Data, try self.numberOfMemberInStruct(struct_name) + 1) catch return FileEngineError.MemoryError; var new_data_buff = self.allocator.alloc(zid.Data, try self.numberOfMemberInStruct(struct_name)) catch return FileEngineError.MemoryError;
defer self.allocator.free(new_data_buff); defer self.allocator.free(new_data_buff);
for (try self.structName2structMembers(struct_name), 1..) |member, i| { // Add the new data
for (try self.structName2structMembers(struct_name), 0..) |member, i| {
if (!map.contains(member)) continue; if (!map.contains(member)) continue;
const dt = try self.memberName2DataType(struct_name, member); const dt = try self.memberName2DataType(struct_name, member);
@ -535,7 +579,7 @@ pub const FileEngine = struct {
if (filter == null or filter.?.evaluate(row)) { if (filter == null or filter.?.evaluate(row)) {
// Add the unchanged Data in the new_data_buff // Add the unchanged Data in the new_data_buff
new_data_buff[0] = row[0]; new_data_buff[0] = row[0];
for (try self.structName2structMembers(struct_name), 1..) |member, i| { for (try self.structName2structMembers(struct_name), 0..) |member, i| {
if (map.contains(member)) continue; if (map.contains(member)) continue;
new_data_buff[i] = row[i]; new_data_buff[i] = row[i];
} }
@ -638,7 +682,7 @@ pub const FileEngine = struct {
.time => return zid.Data.initUnix(s2t.parseTime(value).toUnix()), .time => return zid.Data.initUnix(s2t.parseTime(value).toUnix()),
.datetime => return zid.Data.initUnix(s2t.parseDatetime(value).toUnix()), .datetime => return zid.Data.initUnix(s2t.parseDatetime(value).toUnix()),
.str => return zid.Data.initStr(value), .str => return zid.Data.initStr(value),
.link => { .link, .self => {
const uuid = UUID.parse(value) catch return FileEngineError.InvalidUUID; const uuid = UUID.parse(value) catch return FileEngineError.InvalidUUID;
return zid.Data{ .UUID = uuid.bytes }; return zid.Data{ .UUID = uuid.bytes };
}, },
@ -704,13 +748,15 @@ pub const FileEngine = struct {
const members = try self.structName2structMembers(struct_name); const members = try self.structName2structMembers(struct_name);
const types = try self.structName2DataType(struct_name); const types = try self.structName2DataType(struct_name);
var datas = allocator.alloc(zid.Data, (members.len + 1)) catch return FileEngineError.MemoryError; var datas = allocator.alloc(zid.Data, (members.len)) catch return FileEngineError.MemoryError;
const new_uuid = UUID.init(); const new_uuid = UUID.init();
datas[0] = zid.Data.initUUID(new_uuid.bytes); datas[0] = zid.Data.initUUID(new_uuid.bytes);
for (members, types, 1..) |member, dt, i| for (members, types, 0..) |member, dt, i| {
if (i == 0) continue;
datas[i] = try string2Data(allocator, dt, map.get(member).?); datas[i] = try string2Data(allocator, dt, map.get(member).?);
}
return datas; return datas;
} }
@ -814,7 +860,7 @@ pub const FileEngine = struct {
} }
pub fn memberName2DataIndex(self: *FileEngine, struct_name: []const u8, member_name: []const u8) FileEngineError!usize { pub fn memberName2DataIndex(self: *FileEngine, struct_name: []const u8, member_name: []const u8) FileEngineError!usize {
var i: usize = 1; // Start at 1 because there is the id var i: usize = 0;
for (try self.structName2structMembers(struct_name)) |mn| { for (try self.structName2structMembers(struct_name)) |mn| {
if (std.mem.eql(u8, mn, member_name)) return i; if (std.mem.eql(u8, mn, member_name)) return i;
@ -897,9 +943,10 @@ pub const FileEngine = struct {
const writer = error_message_buffer.writer(); const writer = error_message_buffer.writer();
for (all_struct_member) |mn| { for (all_struct_member) |mn| {
if (std.mem.eql(u8, mn, "id")) continue;
if (map.contains(mn)) count += 1 else writer.print(" {s},", .{mn}) catch return FileEngineError.WriteError; // TODO: Handle missing print better if (map.contains(mn)) count += 1 else writer.print(" {s},", .{mn}) catch return FileEngineError.WriteError; // TODO: Handle missing print better
} }
return ((count == all_struct_member.len) and (count == map.count())); return ((count == all_struct_member.len - 1) and (count == map.count()));
} }
}; };

View File

@ -73,26 +73,25 @@ pub const Parser = struct {
fn fileDataSchema(allocator: Allocator, dtypes: []DataType) SchemaParserError![]zid.DType { fn fileDataSchema(allocator: Allocator, dtypes: []DataType) SchemaParserError![]zid.DType {
var schema = std.ArrayList(zid.DType).init(allocator); var schema = std.ArrayList(zid.DType).init(allocator);
schema.append(zid.DType.UUID) catch return SchemaParserError.MemoryError;
for (dtypes) |dt| { for (dtypes) |dt| {
schema.append(switch (dt) { schema.append(switch (dt) {
DataType.int => zid.DType.Int, .int => .Int,
DataType.float => zid.DType.Float, .float => .Float,
DataType.str => zid.DType.Str, .str => .Str,
DataType.bool => zid.DType.Bool, .bool => .Bool,
DataType.link => zid.DType.UUID, .link, .self => .UUID,
DataType.date => zid.DType.Unix, .date => .Unix,
DataType.time => zid.DType.Unix, .time => .Unix,
DataType.datetime => zid.DType.Unix, .datetime => .Unix,
DataType.int_array => zid.DType.IntArray, .int_array => .IntArray,
DataType.float_array => zid.DType.FloatArray, .float_array => .FloatArray,
DataType.str_array => zid.DType.StrArray, .str_array => .StrArray,
DataType.bool_array => zid.DType.BoolArray, .bool_array => .BoolArray,
DataType.link_array => zid.DType.UUIDArray, .link_array => .UUIDArray,
DataType.date_array => zid.DType.UnixArray, .date_array => .UnixArray,
DataType.time_array => zid.DType.UnixArray, .time_array => .UnixArray,
DataType.datetime_array => zid.DType.UnixArray, .datetime_array => .UnixArray,
}) catch return SchemaParserError.MemoryError; }) catch return SchemaParserError.MemoryError;
} }
return schema.toOwnedSlice() catch return SchemaParserError.MemoryError; return schema.toOwnedSlice() catch return SchemaParserError.MemoryError;
@ -132,6 +131,8 @@ pub const Parser = struct {
.identifier => { .identifier => {
state = .expect_l_paren; state = .expect_l_paren;
name = self.toker.getTokenSlice(token); name = self.toker.getTokenSlice(token);
member_list.append("id") catch return SchemaParserError.MemoryError;
type_list.append(.self) catch return SchemaParserError.MemoryError;
}, },
.eof => state = .end, .eof => state = .end,
else => return printError( else => return printError(
@ -251,35 +252,35 @@ pub const Parser = struct {
.expext_array_type => switch (token.tag) { .expext_array_type => switch (token.tag) {
.type_int => { .type_int => {
state = .expect_comma; state = .expect_comma;
type_list.append(DataType.int_array) catch return SchemaParserError.MemoryError; type_list.append(.int_array) catch return SchemaParserError.MemoryError;
}, },
.type_str => { .type_str => {
state = .expect_comma; state = .expect_comma;
type_list.append(DataType.str_array) catch return SchemaParserError.MemoryError; type_list.append(.str_array) catch return SchemaParserError.MemoryError;
}, },
.type_float => { .type_float => {
state = .expect_comma; state = .expect_comma;
type_list.append(DataType.float_array) catch return SchemaParserError.MemoryError; type_list.append(.float_array) catch return SchemaParserError.MemoryError;
}, },
.type_bool => { .type_bool => {
state = .expect_comma; state = .expect_comma;
type_list.append(DataType.bool_array) catch return SchemaParserError.MemoryError; type_list.append(.bool_array) catch return SchemaParserError.MemoryError;
}, },
.type_date => { .type_date => {
state = .expect_comma; state = .expect_comma;
type_list.append(DataType.date_array) catch return SchemaParserError.MemoryError; type_list.append(.date_array) catch return SchemaParserError.MemoryError;
}, },
.type_time => { .type_time => {
state = .expect_comma; state = .expect_comma;
type_list.append(DataType.time_array) catch return SchemaParserError.MemoryError; type_list.append(.time_array) catch return SchemaParserError.MemoryError;
}, },
.type_datetime => { .type_datetime => {
state = .expect_comma; state = .expect_comma;
type_list.append(DataType.datetime_array) catch return SchemaParserError.MemoryError; type_list.append(.datetime_array) catch return SchemaParserError.MemoryError;
}, },
.identifier => { .identifier => {
state = .expect_comma; state = .expect_comma;
type_list.append(.link) catch return SchemaParserError.MemoryError; type_list.append(.link_array) catch return SchemaParserError.MemoryError;
links.put(self.toker.getTokenSlice(member_token), self.toker.getTokenSlice(token)) catch return SchemaParserError.MemoryError; links.put(self.toker.getTokenSlice(member_token), self.toker.getTokenSlice(token)) catch return SchemaParserError.MemoryError;
}, },
else => return printError( else => return printError(

View File

@ -20,8 +20,7 @@ pub const AdditionalData = struct {
} }
pub fn populateWithEverything(self: *AdditionalData, allocator: Allocator, members: [][]const u8) !void { pub fn populateWithEverything(self: *AdditionalData, allocator: Allocator, members: [][]const u8) !void {
try self.member_to_find.append(AdditionalDataMember.init(allocator, "id", 0)); for (members, 0..) |member, i| {
for (members, 1..) |member, i| {
try self.member_to_find.append(AdditionalDataMember.init(allocator, member, i)); try self.member_to_find.append(AdditionalDataMember.init(allocator, member, i));
} }
} }

View File

@ -84,3 +84,17 @@ pub fn printError(message: []const u8, err: ZipponError, query: ?[]const u8, sta
send("{s}", .{buffer.items}); send("{s}", .{buffer.items});
return err; return err;
} }
pub fn printOpenDir(comptime format: []const u8, args: anytype, options: std.fs.Dir.OpenDirOptions) ZipponError!std.fs.Dir {
var buff: [1024 * 16]u8 = undefined; // INFO: Hard coded buffer size
const path = std.fmt.bufPrint(&buff, format, args) catch return ZipponError.CantOpenDir;
return std.fs.cwd().openDir(path, options) catch ZipponError.CantOpenDir;
}
pub fn printOpenFile(comptime format: []const u8, args: anytype, options: std.fs.File.OpenFlags) ZipponError!std.fs.File {
var buff: [1024 * 16]u8 = undefined; // INFO: Hard coded buffer size
const path = std.fmt.bufPrint(&buff, format, args) catch return ZipponError.CantOpenDir;
return std.fs.cwd().openFile(path, options) catch ZipponError.CantOpenFile;
}

View File

@ -353,9 +353,9 @@ pub const Parser = struct {
token = self.toker.last_token; token = self.toker.last_token;
log.info("Token end of add: {s} {any}\n", .{ self.toker.getTokenSlice(token), token.tag }); log.info("Token end of add: {s} {any}\n", .{ self.toker.getTokenSlice(token), token.tag });
if (token.tag == .identifier and std.mem.eql(u8, self.toker.getTokenSlice(token), "MULTIPLE")) { if (token.tag == .identifier and std.mem.eql(u8, self.toker.getTokenSlice(token), "MULTIPLE")) {
for (0..1_000_000) |_| self.file_engine.writeEntity(struct_name, data_map, &buff.writer()) catch return ZipponError.CantWriteEntity; for (0..1_000_000) |_| self.file_engine.addEntity(struct_name, data_map, &buff.writer()) catch return ZipponError.CantWriteEntity;
} else { } else {
self.file_engine.writeEntity(struct_name, data_map, &buff.writer()) catch return ZipponError.CantWriteEntity; self.file_engine.addEntity(struct_name, data_map, &buff.writer()) catch return ZipponError.CantWriteEntity;
} }
send("{s}", .{buff.items}); send("{s}", .{buff.items});
state = .end; state = .end;
@ -559,6 +559,7 @@ pub const Parser = struct {
.float => .float_literal, .float => .float_literal,
.str => .string_literal, .str => .string_literal,
.link => .uuid_literal, .link => .uuid_literal,
.self => .uuid_literal,
.date => .date_literal, .date => .date_literal,
.time => .time_literal, .time => .time_literal,
.datetime => .datetime_literal, .datetime => .datetime_literal,
@ -622,7 +623,7 @@ pub const Parser = struct {
.time => ConditionValue.initTime(self.toker.buffer[start_index..token.loc.end]), .time => ConditionValue.initTime(self.toker.buffer[start_index..token.loc.end]),
.datetime => ConditionValue.initDateTime(self.toker.buffer[start_index..token.loc.end]), .datetime => ConditionValue.initDateTime(self.toker.buffer[start_index..token.loc.end]),
.bool => ConditionValue.initBool(self.toker.buffer[start_index..token.loc.end]), .bool => ConditionValue.initBool(self.toker.buffer[start_index..token.loc.end]),
else => unreachable, // TODO: Make for link and array =| else => unreachable, // TODO: Make for link and array =/
}; };
state = .end; state = .end;
}, },
@ -876,7 +877,7 @@ pub const Parser = struct {
.int => .int_literal, .int => .int_literal,
.float => .float_literal, .float => .float_literal,
.str => .string_literal, .str => .string_literal,
.link => .uuid_literal, .link, .self => .uuid_literal,
.date => .date_literal, .date => .date_literal,
.time => .time_literal, .time => .time_literal,
.datetime => .datetime_literal, .datetime => .datetime_literal,