Base reparsing

This dosnt work yet but I implemented the reparsing of the files to
return relationship.

So GRAB User [name, friends] should return all infos of all friends and
not just the UUID of those friends like before
This commit is contained in:
Adrien Bouvais 2024-12-27 12:42:51 +01:00
parent 0127daa330
commit 3b47007ca4
7 changed files with 223 additions and 110 deletions

View File

@ -1,5 +1,4 @@
pub const BUFFER_SIZE = 1024 * 10; // Used a bit everywhere. The size for the schema for example. 10kB
pub const OUT_BUFFER_SIZE = 1024 * 1024 * 16; // Mostly use in the fileEngine for the parsing, limit of what can be write to be send basically. 16MB
pub const MAX_FILE_SIZE = 1024 * 1024; // 1MB
pub const CPU_CORE = 16;
@ -14,7 +13,7 @@ pub const RESET_LOG_AT_RESTART = false; // If true, will reset the log file at t
// Help message
pub const HELP_MESSAGE = struct {
pub const main: []const u8 =
\\Welcome to ZipponDB v0.1.1!
\\Welcome to ZipponDB v0.2!
\\
\\Available commands:
\\run To run a query.
@ -22,24 +21,24 @@ pub const HELP_MESSAGE = struct {
\\schema Initialize the database schema.
\\quit Stop the CLI with memory safety.
\\
\\For more informations: https://github.com/MrBounty/ZipponDB
\\For more informations: https://mrbounty.github.io/ZipponDB/cli
\\
;
pub const db: []const u8 =
\\Available commands:
\\new Create a new database using a path to a sub folder.
\\use Select another ZipponDB folder to use as database.
\\use Select an existing folder to use as database.
\\metrics Print some metrics of the current database.
\\
\\For more informations: https://github.com/MrBounty/ZipponDB
\\For more informations: https://mrbounty.github.io/ZipponDB/cli
\\
;
pub const schema: []const u8 =
\\Available commands:
\\describe Print the schema use by the currently selected database.
\\describe Print the schema use by the currently database.
\\init Take the path to a schema file and initialize the database.
\\
\\For more informations: https://github.com/MrBounty/ZipponDB
\\For more informations: https://mrbounty.github.io/ZipponDB/cli
\\
;
pub const no_engine: []const u8 =
@ -50,17 +49,17 @@ pub const HELP_MESSAGE = struct {
\\
\\You can also set the environment variable ZIPPONDB_PATH to the desire path.
\\
\\For more informations: https://github.com/MrBounty/ZipponDB
\\For more informations: https://mrbounty.github.io/ZipponDB/cli
\\
;
pub const no_schema: []const u8 =
\\A database was found here {s} but no schema find inside.
\\To start yousing the database, you need to attach it a schema using a schema file.
\\A database was found here `{s}` but no schema find inside.
\\To start a database, you need to attach it a schema using a schema file.
\\By using 'schema init path/to/schema'. For more informations on how to create a schema: TODO add link
\\
\\You can also set the environment variable ZIPPONDB_SCHEMA to the path to a schema file.
\\
\\For more informations: https://github.com/MrBounty/ZipponDB
\\For more informations: https://mrbounty.github.io/ZipponDB/Schema
\\
;
};

View File

@ -2,6 +2,7 @@ const std = @import("std");
const utils = @import("stuffs/utils.zig");
const zid = @import("ZipponData");
const AdditionalData = @import("stuffs/additionalData.zig").AdditionalData;
const JsonString = @import("stuffs/relationMap.zig").JsonString;
const dtype = @import("dtype");
const DataType = dtype.DataType;
const DateTime = dtype.DateTime;
@ -15,7 +16,7 @@ pub const EntityWriter = struct {
pub fn writeEntityTable(
writer: anytype,
row: []zid.Data,
additional_data: *AdditionalData,
additional_data: AdditionalData,
data_types: []const DataType,
) !void {
try writer.writeAll("| ");
@ -29,7 +30,7 @@ pub const EntityWriter = struct {
pub fn writeEntityJSON(
writer: anytype,
row: []zid.Data,
additional_data: *AdditionalData,
additional_data: AdditionalData,
data_types: []const DataType,
) !void {
try writer.writeByte('{');
@ -47,6 +48,10 @@ pub const EntityWriter = struct {
.Int => |v| try writer.print("{d}", .{v}),
.Str => |v| try writer.print("\"{s}\"", .{v}),
.UUID => |v| {
if (data_type == .self) {
try writer.print("\"{s}\"", .{UUID.format_bytes(v)});
return;
}
const uuid = try UUID.parse("00000000-0000-0000-0000-000000000000"); // Maybe pass that comptime to prevent parsing it everytime
if (!std.meta.eql(v, uuid.bytes)) {
try writer.print("\"{{|<{s}>|}}\"", .{v});
@ -95,10 +100,10 @@ pub const EntityWriter = struct {
writer.writeByte(']') catch return ZipponError.WriteError;
}
/// TODO:
/// Take a string in the JSON format and look for {|<[16]u8>|}, then will look into the map and check if it can find this UUID
/// If it find it, it ill replace the {|<[16]u8>|} will the value
pub fn updateWithRelation(writer: anytype, input: []const u8, to_add: std.AutoHashMap([16]u8, []const u8)) ZipponError!void {
pub fn updateWithRelation(writer: anytype, input: []const u8, to_add: std.AutoHashMap([16]u8, JsonString)) ZipponError!void {
var uuid_bytes: [16]u8 = undefined;
var start: usize = 0;
while (std.mem.indexOf(u8, input[start..], "{|<[")) |pos| {
const pattern_start = start + pos;
@ -106,14 +111,18 @@ pub const EntityWriter = struct {
const full_pattern_end = pattern_start + pattern_end + 4;
// Write the text before the pattern
try writer.writeAll(input[start..pattern_start]);
writer.writeAll(input[start..pattern_start]) catch return ZipponError.WriteError;
const uuid_bytes = input[pattern_start + 3 .. full_pattern_end - 3];
writer.writeAll(to_add.get(uuid_bytes).?);
for (pattern_start + 3..pattern_end - 3, 0..) |i, j| uuid_bytes[j] = input[i];
if (to_add.get(uuid_bytes)) |json_string| {
writer.writeAll(json_string.slice) catch return ZipponError.WriteError;
} else {
writer.writeAll(input[pattern_start..pattern_end]) catch return ZipponError.WriteError;
}
start = full_pattern_end;
}
// Write any remaining text
try writer.writeAll(input[start..]);
writer.writeAll(input[start..]) catch return ZipponError.WriteError;
}
};

View File

@ -31,9 +31,10 @@ const MAX_FILE_SIZE = config.MAX_FILE_SIZE;
const RESET_LOG_AT_RESTART = config.RESET_LOG_AT_RESTART;
const CPU_CORE = config.CPU_CORE;
// TODO: Cute this into smaller modules: core, file_management, data_operation, utils
const log = std.log.scoped(.fileEngine);
var parsing_buffer: [OUT_BUFFER_SIZE]u8 = undefined; // Maybe use an arena but this is faster
var path_buffer: [1024]u8 = undefined;
var path_to_ZipponDB_dir_buffer: [1024]u8 = undefined;
@ -162,9 +163,9 @@ pub const FileEngine = struct {
/// Use a struct name to populate a list with all UUID of this struct
/// TODO: Multi thread that too
pub fn getNumberOfEntity(self: *FileEngine, struct_name: []const u8) ZipponError!usize {
var fa = std.heap.FixedBufferAllocator.init(&parsing_buffer);
fa.reset();
const allocator = fa.allocator();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
const max_file_index = try self.maxFileIndex(sstruct.name);
@ -193,9 +194,9 @@ pub const FileEngine = struct {
sstruct: SchemaStruct,
map: *UUIDFileIndex,
) ZipponError!void {
var fa = std.heap.FixedBufferAllocator.init(&parsing_buffer);
fa.reset();
const allocator = fa.allocator();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const max_file_index = try self.maxFileIndex(sstruct.name);
@ -285,9 +286,9 @@ pub const FileEngine = struct {
map: *std.AutoHashMap(UUID, void),
additional_data: *AdditionalData,
) ZipponError!void {
var fa = std.heap.FixedBufferAllocator.init(&parsing_buffer);
fa.reset();
const allocator = fa.allocator();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
const max_file_index = try self.maxFileIndex(sstruct.name);
@ -391,9 +392,9 @@ pub const FileEngine = struct {
additional_data: *AdditionalData,
entry_allocator: Allocator,
) ZipponError![]const u8 {
var fa = std.heap.FixedBufferAllocator.init(&parsing_buffer);
fa.reset();
const allocator = fa.allocator();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
var buff = std.ArrayList(u8).init(entry_allocator);
defer buff.deinit();
@ -408,6 +409,10 @@ pub const FileEngine = struct {
if (additional_data.childrens.items.len == 0)
additional_data.populateWithEverythingExceptLink(sstruct.members, sstruct.types) catch return FileEngineError.MemoryError;
// Do I populate the relationMap directly in the thread or do I do it on the string at the end ?
// I think it is better at the end, like that I dont need to create a deplicate of each map for the number of file
const relation_maps = try self.schema_engine.relationMapArrayInit(allocator, struct_name, additional_data.*);
// Open the dir that contain all files
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{ .access_sub_paths = false });
@ -417,8 +422,10 @@ pub const FileEngine = struct {
max_file_index + 1,
);
// Do one array and writer for each thread otherwise then create error by writing at the same time
// Maybe use fixed lenght buffer for speed here
// Do an array of writer for each thread
// Could I create just the number of max cpu ? Because if I have 1000 files, I do 1000 list
// But at the end, only the number of use CPU/Thread will use list simultanously
// So I could pass list from a thread to another technicly
var thread_writer_list = allocator.alloc(std.ArrayList(u8), max_file_index + 1) catch return FileEngineError.MemoryError;
// Start parsing all file in multiple thread
@ -431,7 +438,7 @@ pub const FileEngine = struct {
dir,
sstruct.zid_schema,
filter,
additional_data,
additional_data.*,
try self.schema_engine.structName2DataType(struct_name),
&sync_context,
}) catch return FileEngineError.ThreadError;
@ -439,7 +446,7 @@ pub const FileEngine = struct {
// Wait for all thread to either finish or return an error
while (!sync_context.isComplete()) {
std.time.sleep(10_000_000); // Check every 10ms
std.time.sleep(100_000); // Check every 0.1ms
}
// Append all writer to each other
@ -447,10 +454,17 @@ pub const FileEngine = struct {
for (thread_writer_list) |list| writer.writeAll(list.items) catch return FileEngineError.WriteError;
writer.writeByte(']') catch return FileEngineError.WriteError;
// Here now I need to already have a populated list of RelationMap
// I will then call parseEntitiesRelationMap on each
// Now I need to do the relation stuff, meaning parsing new files to get the relationship value
// Without relationship to return, this function is basically finish here
return buff.toOwnedSlice();
// Here I take the JSON string and I parse it to find all {|<>|} and add them to the relation map with an empty JsonString
for (relation_maps) |*relation_map| try relation_map.populate(buff.items);
// I then call parseEntitiesRelationMap on each
// This will update the buff items to be the same Json but with {|<[16]u8>|} replaced with the right Json
for (relation_maps) |*relation_map| try self.parseEntitiesRelationMap(struct_name, relation_map, &buff);
return buff.toOwnedSlice() catch return ZipponError.MemoryError;
}
fn parseEntitiesOneFile(
@ -459,7 +473,7 @@ pub const FileEngine = struct {
dir: std.fs.Dir,
zid_schema: []zid.DType,
filter: ?Filter,
additional_data: *AdditionalData,
additional_data: AdditionalData,
data_types: []const DataType,
sync_context: *ThreadSyncContext,
) void {
@ -500,37 +514,52 @@ pub const FileEngine = struct {
_ = sync_context.completeThread();
}
// Receive a map of UUID -> null
// Receive a map of UUID -> empty JsonString
// Will parse the files and update the value to the JSON string of the entity that represent the key
// Will then write the input with the JSON in the map looking for {|<>|}
// Once the new input received, call parseEntitiesRelationMap again the string still contain {|<>|} because of sub relationship
// The buffer contain the string with {|<>|} and need to be updated at the end
// TODO: Filter file that need to be parse to prevent parsing everything all the time
// TODO: Use the new function in SchemaEngine to reduce the number of files to parse
// TODO: Add recursion taking example on parseEntities
pub fn parseEntitiesRelationMap(
self: *FileEngine,
struct_name: []const u8,
relation_map: *RelationMap,
buff: *std.ArrayList(u8),
) ZipponError!void {
var fa = std.heap.FixedBufferAllocator.init(&parsing_buffer);
fa.reset();
const allocator = fa.allocator();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
var new_buff = std.ArrayList(u8).init(allocator);
defer new_buff.deinit();
const writer = new_buff.writer();
const sstruct = try self.schema_engine.structName2SchemaStruct(relation_map.struct_name);
const max_file_index = try self.maxFileIndex(sstruct.name);
const relation_maps = try self.schema_engine.relationMapArrayInit(
allocator,
struct_name,
relation_map.additional_data,
);
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
const max_file_index = try self.maxFileIndex(sstruct.name); // Chqnge to use a list of file index
log.debug("Max file index {d}", .{max_file_index});
// If there is no member to find, that mean we need to return all members, so let's populate additional data with all of them
if (relation_map.additional_data.childrens.items.len == 0) {
relation_map.additional_data.populateWithEverythingExceptLink(sstruct.members, sstruct.types) catch return FileEngineError.MemoryError;
relation_map.additional_data.populateWithEverythingExceptLink(
sstruct.members,
sstruct.types,
) catch return FileEngineError.MemoryError;
}
// Open the dir that contain all files
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{ .access_sub_paths = false });
const dir = try utils.printOpenDir(
"{s}/DATA/{s}",
.{ self.path_to_ZipponDB_dir, sstruct.name },
.{ .access_sub_paths = false },
);
// Multi thread stuffs
var sync_context = ThreadSyncContext.init(
@ -539,11 +568,14 @@ pub const FileEngine = struct {
);
// Do one writer for each thread otherwise it create error by writing at the same time
var thread_map_list = allocator.alloc(std.AutoHashMap([16]u8, JsonString), max_file_index + 1) catch return FileEngineError.MemoryError;
var thread_map_list = allocator.alloc(
std.AutoHashMap([16]u8, JsonString),
max_file_index + 1,
) catch return FileEngineError.MemoryError;
// Start parsing all file in multiple thread
for (0..(max_file_index + 1)) |file_index| {
thread_map_list[file_index] = relation_map.map.cloneWithAllocator(allocator);
thread_map_list[file_index] = relation_map.map.cloneWithAllocator(allocator) catch return ZipponError.MemoryError;
self.thread_pool.spawn(parseEntitiesRelationMapOneFile, .{
&thread_map_list[file_index],
@ -551,14 +583,14 @@ pub const FileEngine = struct {
dir,
sstruct.zid_schema,
relation_map.additional_data,
try self.schema_engine.structName2DataType(relation_map.struct_name),
try self.schema_engine.structName2DataType(struct_name),
&sync_context,
}) catch return FileEngineError.ThreadError;
}
// Wait for all thread to either finish or return an error
while (!sync_context.isComplete()) {
std.time.sleep(10_000_000); // Check every 10ms
std.time.sleep(100_000); // Check every 0.1ms
}
// Now here I should have a list of copy of the map with all UUID a bit everywhere
@ -567,24 +599,31 @@ pub const FileEngine = struct {
for (thread_map_list) |map| {
var iter = map.iterator();
while (iter.next()) |entry| {
if (entry.value_ptr.*) |json_string| relation_map.map.put(entry.key_ptr.*, json_string);
if (entry.value_ptr.init) relation_map.map.put(entry.key_ptr.*, entry.value_ptr.*) catch return ZipponError.MemoryError;
}
}
// Here I write the new string and update the buff to have the new version
EntityWriter.updateWithRelation(writer, buff.items, relation_map.map);
try EntityWriter.updateWithRelation(writer, buff.items, relation_map.map.*);
buff.clearRetainingCapacity();
buff.writer().writeAll(new_buff.items);
buff.writer().writeAll(new_buff.items) catch return ZipponError.WriteError;
// Now here I need to iterate if buff.items still have {|<>|}
// Here I take the JSON string and I parse it to find all {|<>|} and add them to the relation map with an empty JsonString
for (relation_maps) |*sub_relation_map| try sub_relation_map.populate(buff.items);
// I then call parseEntitiesRelationMap on each
// This will update the buff items to be the same Json but with {|<[16]u8>|} replaced with the right Json
for (relation_maps) |*sub_relation_map| try self.parseEntitiesRelationMap(struct_name, sub_relation_map, buff);
}
fn parseEntitiesRelationMapOneFile(
map: *std.AutoHashMap([16]u8, []const u8),
map: *std.AutoHashMap([16]u8, JsonString),
file_index: u64,
dir: std.fs.Dir,
zid_schema: []zid.DType,
additional_data: *AdditionalData,
additional_data: AdditionalData,
data_types: []const DataType,
sync_context: *ThreadSyncContext,
) void {
@ -624,7 +663,13 @@ pub const FileEngine = struct {
sync_context.logError("Error writing entity", err);
return;
};
map.put(row[0].UUID, parent_alloc.dupe(u8, string_list.items)) catch |err| {
map.put(row[0].UUID, JsonString{
.slice = parent_alloc.dupe(u8, string_list.items) catch |err| {
sync_context.logError("Error duping data", err);
return;
},
.init = true,
}) catch |err| {
sync_context.logError("Error writing entity", err);
return;
};
@ -645,9 +690,9 @@ pub const FileEngine = struct {
writer: anytype,
n: usize,
) ZipponError!void {
var fa = std.heap.FixedBufferAllocator.init(&parsing_buffer);
fa.reset();
const allocator = fa.allocator();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const file_index = try self.getFirstUsableIndexFile(struct_name);
@ -671,9 +716,9 @@ pub const FileEngine = struct {
writer: anytype,
additional_data: *AdditionalData,
) ZipponError!void {
var fa = std.heap.FixedBufferAllocator.init(&parsing_buffer);
fa.reset();
const allocator = fa.allocator();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
const max_file_index = try self.maxFileIndex(sstruct.name);
@ -716,7 +761,7 @@ pub const FileEngine = struct {
// Wait for all threads to complete
while (!sync_context.isComplete()) {
std.time.sleep(10_000_000); // Check every 10ms
std.time.sleep(100_000); // Check every 0.1ms
}
// Combine results
@ -836,9 +881,9 @@ pub const FileEngine = struct {
writer: anytype,
additional_data: *AdditionalData,
) ZipponError!void {
var fa = std.heap.FixedBufferAllocator.init(&parsing_buffer);
fa.reset();
const allocator = fa.allocator();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
const max_file_index = try self.maxFileIndex(sstruct.name);
@ -871,7 +916,7 @@ pub const FileEngine = struct {
// Wait for all threads to complete
while (!sync_context.isComplete()) {
std.time.sleep(10_000_000); // Check every 10ms
std.time.sleep(100_000); // Check every 0.1ms
}
// Combine results

View File

@ -6,23 +6,27 @@ const Tokenizer = @import("tokenizers/schema.zig").Tokenizer;
const ZipponError = @import("stuffs/errors.zig").ZipponError;
const dtype = @import("dtype");
const DataType = dtype.DataType;
const AdditionalData = @import("stuffs/additionalData.zig").AdditionalData;
const RelationMap = @import("stuffs/relationMap.zig").RelationMap;
const JsonString = @import("stuffs/relationMap.zig").JsonString;
const ConditionValue = @import("stuffs/filter.zig").ConditionValue;
const UUID = dtype.UUID;
const UUIDFileIndex = @import("stuffs/UUIDFileIndex.zig").UUIDIndexMap;
const FileEngine = @import("fileEngine.zig").FileEngine;
// TODO: Create a schemaEngine directory and add this as core and the parser with it
const config = @import("config.zig");
const BUFFER_SIZE = config.BUFFER_SIZE;
var schema_buffer: [BUFFER_SIZE]u8 = undefined;
// TODO: Stop keeping the allocator at the root of the file
var arena: std.heap.ArenaAllocator = undefined;
var allocator: Allocator = undefined;
const log = std.log.scoped(.schemaEngine);
// TODO: Make better memory management
pub const SchemaStruct = struct {
name: []const u8,
members: [][]const u8,
@ -122,30 +126,24 @@ pub const SchemaEngine = struct {
/// Get the type of the member
pub fn memberName2DataType(self: *SchemaEngine, struct_name: []const u8, member_name: []const u8) ZipponError!DataType {
var i: usize = 0;
for (try self.structName2structMembers(struct_name)) |mn| {
for (try self.structName2structMembers(struct_name), 0..) |mn, i| {
const dtypes = try self.structName2DataType(struct_name);
if (std.mem.eql(u8, mn, member_name)) return dtypes[i];
i += 1;
}
return ZipponError.MemberNotFound;
}
pub fn memberName2DataIndex(self: *SchemaEngine, struct_name: []const u8, member_name: []const u8) ZipponError!usize {
var i: usize = 0;
for (try self.structName2structMembers(struct_name)) |mn| {
for (try self.structName2structMembers(struct_name), 0..) |mn, i| {
if (std.mem.eql(u8, mn, member_name)) return i;
i += 1;
}
return ZipponError.MemberNotFound;
}
/// Get the list of all member name for a struct name
pub fn structName2structMembers(self: *SchemaEngine, struct_name: []const u8) ZipponError![][]const u8 {
pub fn structName2structMembers(self: SchemaEngine, struct_name: []const u8) ZipponError![][]const u8 {
var i: usize = 0;
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
@ -157,7 +155,7 @@ pub const SchemaEngine = struct {
return self.struct_array[i].members;
}
pub fn structName2SchemaStruct(self: *SchemaEngine, struct_name: []const u8) ZipponError!SchemaStruct {
pub fn structName2SchemaStruct(self: SchemaEngine, struct_name: []const u8) ZipponError!SchemaStruct {
var i: usize = 0;
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
@ -169,7 +167,7 @@ pub const SchemaEngine = struct {
return self.struct_array[i];
}
pub fn structName2DataType(self: *SchemaEngine, struct_name: []const u8) ZipponError![]const DataType {
pub fn structName2DataType(self: SchemaEngine, struct_name: []const u8) ZipponError![]const DataType {
var i: u16 = 0;
while (i < self.struct_array.len) : (i += 1) {
@ -184,14 +182,14 @@ pub const SchemaEngine = struct {
}
/// Chech if the name of a struct is in the current schema
pub fn isStructNameExists(self: *SchemaEngine, struct_name: []const u8) bool {
pub fn isStructNameExists(self: SchemaEngine, struct_name: []const u8) bool {
var i: u16 = 0;
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) return true;
return false;
}
/// Check if a struct have the member name
pub fn isMemberNameInStruct(self: *SchemaEngine, struct_name: []const u8, member_name: []const u8) ZipponError!bool {
pub fn isMemberNameInStruct(self: SchemaEngine, struct_name: []const u8, member_name: []const u8) ZipponError!bool {
for (try self.structName2structMembers(struct_name)) |mn| {
if (std.mem.eql(u8, mn, member_name)) return true;
}
@ -200,7 +198,7 @@ pub const SchemaEngine = struct {
// Return true if the map have all the member name as key and not more
pub fn checkIfAllMemberInMap(
self: *SchemaEngine,
self: SchemaEngine,
struct_name: []const u8,
map: *std.StringHashMap(ConditionValue),
error_message_buffer: *std.ArrayList(u8),
@ -218,8 +216,57 @@ pub const SchemaEngine = struct {
return ((count == all_struct_member.len - 1) and (count == map.count()));
}
pub fn isUUIDExist(self: *SchemaEngine, struct_name: []const u8, uuid: UUID) bool {
pub fn isUUIDExist(self: SchemaEngine, struct_name: []const u8, uuid: UUID) bool {
const sstruct = self.structName2SchemaStruct(struct_name) catch return false;
return sstruct.uuid_file_index.contains(uuid);
}
/// Create an array of empty RelationMap based on the additionalData
pub fn relationMapArrayInit(
self: SchemaEngine,
alloc: Allocator,
struct_name: []const u8,
additional_data: AdditionalData,
) ZipponError![]RelationMap {
// So here I should have relationship if children are relations
var array = std.ArrayList(RelationMap).init(alloc);
const sstruct = try self.structName2SchemaStruct(struct_name);
for (additional_data.childrens.items) |child| if (sstruct.links.contains(child.name)) {
const map = alloc.create(std.AutoHashMap([16]u8, JsonString)) catch return ZipponError.MemoryError;
map.* = std.AutoHashMap([16]u8, JsonString).init(alloc);
array.append(RelationMap{
.member_name = child.name,
.additional_data = child.additional_data, // Maybe I need to check if it exist, im not sure it always exist
.map = map,
}) catch return ZipponError.MemoryError;
};
return array.toOwnedSlice() catch return ZipponError.MemoryError;
}
pub fn fileListToParse(
self: SchemaEngine,
alloc: Allocator,
struct_name: []const u8,
map: std.AutoHashMap([16]u8, JsonString),
) ![]usize {
const sstruct = try self.structName2SchemaStruct(struct_name);
var unique_indices = std.AutoHashMap(usize, void).init(alloc);
var iter = map.keyIterator();
while (iter.next()) |uuid| {
if (sstruct.uuid_file_index.get(uuid.*)) |file_index| {
try unique_indices.put(file_index, {});
}
}
var result = try alloc.alloc(usize, unique_indices.count());
var i: usize = 0;
var index_iter = unique_indices.keyIterator();
while (index_iter.next()) |index| {
result[i] = index.*;
i += 1;
}
return result;
}
};

View File

@ -4,6 +4,8 @@ const RelationMap = @import("relationMap.zig").RelationMap;
const dtype = @import("dtype");
const DataType = dtype.DataType;
// TODO: Put this in a data structure directory
const ZipponError = @import("errors.zig").ZipponError;
/// This is the [] part
@ -22,27 +24,13 @@ pub const AdditionalData = struct {
pub fn populateWithEverythingExceptLink(self: *AdditionalData, members: [][]const u8, dtypes: []DataType) !void {
for (members, dtypes, 0..) |member, dt, i| {
if (dt == .link or dt == .link_array) continue;
try self.childrens.append(AdditionalDataMember.init(member, i));
try self.childrens.append(AdditionalDataMember.init(self.allocator, member, i));
}
}
pub fn addMember(self: *AdditionalData, name: []const u8, index: usize) ZipponError!void {
self.childrens.append(AdditionalDataMember.init(name, index)) catch return ZipponError.MemoryError;
self.childrens.append(AdditionalDataMember.init(self.allocator, name, index)) catch return ZipponError.MemoryError;
}
pub fn initAdditionalDataOfLastChildren(self: *AdditionalData) *AdditionalData {
self.childrens.items[self.childrens.items.len - 1].additional_data = AdditionalData.init(self.allocator);
return &self.childrens.items[self.childrens.items.len - 1].additional_data.?;
}
/// Create an array of empty RelationMap based on the additionalData
pub fn relationMapArrayInit(self: AdditionalData, allocator: Allocator) ZipponError!?[]RelationMap {
// So here I should have relationship if children are relations
var array = std.ArrayList(RelationMap).init(allocator);
for (self.childrens.items) |child| {
child.
}
}
};
// This is name in: [name]
@ -50,9 +38,9 @@ pub const AdditionalData = struct {
pub const AdditionalDataMember = struct {
name: []const u8,
index: usize, // Index place in the schema
additional_data: ?AdditionalData = null,
additional_data: AdditionalData,
pub fn init(name: []const u8, index: usize) AdditionalDataMember {
return AdditionalDataMember{ .name = name, .index = index };
pub fn init(allocator: Allocator, name: []const u8, index: usize) AdditionalDataMember {
return AdditionalDataMember{ .name = name, .index = index, .additional_data = AdditionalData.init(allocator) };
}
};

View File

@ -18,7 +18,8 @@
// So I need an option in parseEntity to either write the first JSON or update the existing one
const std = @import("std");
const AdditionalData = @import("stuffs/additionalData.zig").AdditionalData;
const AdditionalData = @import("additionalData.zig").AdditionalData;
const ZipponError = @import("errors.zig").ZipponError;
pub const JsonString = struct {
slice: []const u8 = "",
@ -26,7 +27,30 @@ pub const JsonString = struct {
};
pub const RelationMap = struct {
struct_name: []const u8,
member_name: []const u8,
additional_data: AdditionalData,
map: *std.AutoHashMap([16]u8, JsonString),
/// Will use a string in the JSON format and look for {|<[16]u8>|}
/// It will then check if it is for the right member name and if so, add an empty JSON string at the key
pub fn populate(self: *RelationMap, input: []const u8) ZipponError!void {
var uuid_bytes: [16]u8 = undefined;
var start: usize = 0;
while (std.mem.indexOf(u8, input[start..], "{|<")) |pos| {
const pattern_start = start + pos;
const pattern_end = std.mem.indexOf(u8, input[pattern_start..], ">|}") orelse break;
const full_pattern_end = pattern_start + pattern_end + 3;
const member_end = pattern_start - 3; // This should be " = "
var member_start = member_end - 1;
while (input[member_start] != ' ') : (member_start -= 1) {}
if (!std.mem.eql(u8, input[member_start..member_end], self.member_name)) continue;
for (pattern_start + 3..pattern_end - 3, 0..) |i, j| uuid_bytes[j] = input[i];
self.map.put(uuid_bytes, JsonString{}) catch return ZipponError.MemoryError;
start = full_pattern_end;
}
}
};

View File

@ -710,7 +710,7 @@ pub const Parser = struct {
try self.parseAdditionalData(
allocator,
additional_data.initAdditionalDataOfLastChildren(),
&additional_data.childrens.items[additional_data.childrens.items.len - 1].additional_data,
struct_name,
);
state = .expect_comma_OR_r_bracket;
@ -1081,7 +1081,8 @@ test "GRAB with additional data" {
}
test "UPDATE" {
try testParsing("UPDATE User {name = 'Bob'} TO (email='new@gmail.com')");
try testParsing("UPDATE User [1] {name = 'Bob'} TO (email='new@gmail.com')");
try testParsing("GRAB User {}");
}
test "GRAB filter with int" {