Started cleaning by creating a SchemaEngine
This commit is contained in:
parent
1d365adff6
commit
e1c957b3b6
@ -26,14 +26,6 @@ pub fn build(b: *std.Build) void {
|
||||
run_step.dependOn(&run_cmd.step);
|
||||
|
||||
// All tests
|
||||
const tests1 = b.addTest(.{
|
||||
.root_source_file = b.path("src/tokenizers/file.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
.name = "File tokenizer",
|
||||
.test_runner = b.path("test_runner.zig"),
|
||||
});
|
||||
const run_tests1 = b.addRunArtifact(tests1);
|
||||
|
||||
const tests2 = b.addTest(.{
|
||||
.root_source_file = b.path("src/tokenizers/cli.zig"),
|
||||
@ -86,7 +78,6 @@ pub fn build(b: *std.Build) void {
|
||||
const run_tests6 = b.addRunArtifact(tests6);
|
||||
|
||||
const test_step = b.step("test", "Run unit tests");
|
||||
test_step.dependOn(&run_tests1.step);
|
||||
test_step.dependOn(&run_tests2.step);
|
||||
test_step.dependOn(&run_tests3.step);
|
||||
test_step.dependOn(&run_tests4.step);
|
||||
|
@ -40,4 +40,25 @@ pub const HELP_MESSAGE = struct {
|
||||
\\For more informations: https://github.com/MrBounty/ZipponDB
|
||||
\\
|
||||
;
|
||||
pub const no_engine: []const u8 =
|
||||
\\To start using ZipponDB you need to create a new database.
|
||||
\\This is a directory/folder that will be use to store data, logs, backup, ect.
|
||||
\\To create one use 'db new path/to/directory'. E.g. 'db new data'.
|
||||
\\Or use an existing one with 'db use'.
|
||||
\\
|
||||
\\You can also set the environment variable ZIPPONDB_PATH to the desire path.
|
||||
\\
|
||||
\\For more informations: https://github.com/MrBounty/ZipponDB
|
||||
\\
|
||||
;
|
||||
pub const no_schema: []const u8 =
|
||||
\\A database was found here {s} but no schema find inside.
|
||||
\\To start yousing the database, you need to attach it a schema using a schema file.
|
||||
\\By using 'schema init path/to/schema'. For more informations on how to create a schema: TODO add link
|
||||
\\
|
||||
\\You can also set the environment variable ZIPPONDB_SCHEMA to the path to a schema file.
|
||||
\\
|
||||
\\For more informations: https://github.com/MrBounty/ZipponDB
|
||||
\\
|
||||
;
|
||||
};
|
||||
|
@ -1,30 +1,20 @@
|
||||
const std = @import("std");
|
||||
const utils = @import("stuffs/utils.zig");
|
||||
const dtype = @import("dtype");
|
||||
const s2t = dtype.s2t;
|
||||
const zid = @import("ZipponData");
|
||||
const time = std.time;
|
||||
const U64 = std.atomic.Value(u64);
|
||||
const Pool = std.Thread.Pool;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const SchemaEngine = @import("schemaEngine.zig").SchemaEngine;
|
||||
const SchemaStruct = @import("schemaParser.zig").Parser.SchemaStruct;
|
||||
|
||||
const dtype = @import("dtype");
|
||||
const s2t = dtype.s2t;
|
||||
const UUID = dtype.UUID;
|
||||
const DateTime = dtype.DateTime;
|
||||
const DataType = dtype.DataType;
|
||||
|
||||
const FileTokenizer = @import("tokenizers/file.zig").Tokenizer;
|
||||
const FileToken = @import("tokenizers/file.zig").Token;
|
||||
const SchemaTokenizer = @import("tokenizers/schema.zig").Tokenizer;
|
||||
const SchemaToken = @import("tokenizers/schema.zig").Token;
|
||||
const AdditionalData = @import("stuffs/additionalData.zig").AdditionalData;
|
||||
const Filter = @import("stuffs/filter.zig").Filter;
|
||||
const Loc = @import("tokenizers/shared/loc.zig").Loc;
|
||||
|
||||
const Condition = @import("stuffs/filter.zig").Condition;
|
||||
|
||||
// TODO: Move that to another struct, not in the file engine
|
||||
const SchemaStruct = @import("schemaParser.zig").Parser.SchemaStruct;
|
||||
const SchemaParser = @import("schemaParser.zig").Parser;
|
||||
|
||||
const ZipponError = @import("stuffs/errors.zig").ZipponError;
|
||||
const FileEngineError = @import("stuffs/errors.zig").FileEngineError;
|
||||
@ -37,53 +27,31 @@ const CPU_CORE = config.CPU_CORE;
|
||||
|
||||
const log = std.log.scoped(.fileEngine);
|
||||
|
||||
const FileEngineState = enum { MissingPath, MissingSchema, Ok };
|
||||
// TODO: Start using State at the start and end of each function for debugging
|
||||
const FileEngineState = enum { Parsing, Waiting };
|
||||
|
||||
/// Manage everything that is relate to read or write in files
|
||||
/// Or even get stats, whatever. If it touch files, it's here
|
||||
pub const FileEngine = struct {
|
||||
allocator: Allocator,
|
||||
path_to_ZipponDB_dir: []const u8,
|
||||
null_terminated_schema_buff: [:0]u8,
|
||||
struct_array: []SchemaStruct,
|
||||
state: FileEngineState,
|
||||
path_to_ZipponDB_dir: []const u8,
|
||||
schema_engine: *SchemaEngine = undefined, // I dont really like that, what if I never define it before using it ? Should I use ?*SchemaEngine instead ?
|
||||
|
||||
pub fn init(allocator: Allocator, path: []const u8) ZipponError!FileEngine {
|
||||
var schema_buf = allocator.alloc(u8, BUFFER_SIZE) catch return FileEngineError.MemoryError;
|
||||
defer allocator.free(schema_buf);
|
||||
|
||||
const len: usize = FileEngine.readSchemaFile(path, schema_buf) catch 0;
|
||||
const null_terminated_schema_buff = allocator.dupeZ(u8, schema_buf[0..len]) catch return FileEngineError.MemoryError;
|
||||
errdefer allocator.free(null_terminated_schema_buff);
|
||||
|
||||
var toker = SchemaTokenizer.init(null_terminated_schema_buff);
|
||||
var parser = SchemaParser.init(&toker, allocator);
|
||||
|
||||
var struct_array = std.ArrayList(SchemaStruct).init(allocator);
|
||||
parser.parse(&struct_array) catch return FileEngineError.SchemaNotConform;
|
||||
|
||||
var state: FileEngineState = .Ok;
|
||||
if (len == 0) state = .MissingSchema;
|
||||
if (std.mem.eql(u8, "", path)) state = .MissingPath;
|
||||
|
||||
return FileEngine{
|
||||
.allocator = allocator,
|
||||
.path_to_ZipponDB_dir = path,
|
||||
.null_terminated_schema_buff = null_terminated_schema_buff,
|
||||
.struct_array = struct_array.toOwnedSlice() catch return FileEngineError.MemoryError,
|
||||
.state = state,
|
||||
.state = .Waiting,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *FileEngine) void {
|
||||
for (self.struct_array) |*elem| elem.deinit();
|
||||
self.allocator.free(self.struct_array);
|
||||
self.allocator.free(self.null_terminated_schema_buff);
|
||||
self.allocator.free(self.path_to_ZipponDB_dir);
|
||||
}
|
||||
|
||||
pub fn usable(self: FileEngine) bool {
|
||||
return self.state == .Ok;
|
||||
return self.state == .Waiting;
|
||||
}
|
||||
|
||||
// --------------------Other--------------------
|
||||
@ -133,7 +101,8 @@ pub const FileEngine = struct {
|
||||
// --------------------Init folder and files--------------------
|
||||
|
||||
/// Create the main folder. Including DATA, LOG and BACKUP
|
||||
pub fn checkAndCreateDirectories(self: *FileEngine) ZipponError!void {
|
||||
/// TODO: Maybe start using a fixed lenght buffer instead of free everytime, but that not that important
|
||||
pub fn createMainDirectories(self: *FileEngine) ZipponError!void {
|
||||
var path_buff = std.fmt.allocPrint(self.allocator, "{s}", .{self.path_to_ZipponDB_dir}) catch return FileEngineError.MemoryError;
|
||||
defer self.allocator.free(path_buff);
|
||||
|
||||
@ -174,49 +143,19 @@ pub const FileEngine = struct {
|
||||
if (RESET_LOG_AT_RESTART) {
|
||||
_ = cwd.createFile(path_buff, .{}) catch return FileEngineError.CantMakeFile;
|
||||
} else {
|
||||
const log_dir = cwd.openDir(path_buff[0..(path_buff.len - 4)], .{ .iterate = true }) catch return FileEngineError.CantOpenDir;
|
||||
var iter = log_dir.iterate();
|
||||
|
||||
var founded = false;
|
||||
while (iter.next() catch return FileEngineError.DirIterError) |entry| {
|
||||
if (std.mem.eql(u8, entry.name, "log")) founded = true;
|
||||
}
|
||||
if (!founded) _ = cwd.createFile(path_buff, .{}) catch return FileEngineError.CantMakeFile;
|
||||
_ = std.fs.cwd().openFile(path_buff, .{}) catch {
|
||||
_ = cwd.createFile(path_buff, .{}) catch return FileEngineError.CantMakeFile;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a path to a schema file and then create the struct folder
|
||||
/// TODO: Check if some data already exist and if so ask if the user want to delete it and make a backup
|
||||
pub fn initDataFolder(self: *FileEngine, path_to_schema_file: []const u8) ZipponError!void {
|
||||
var schema_buf = self.allocator.alloc(u8, BUFFER_SIZE) catch return FileEngineError.MemoryError;
|
||||
defer self.allocator.free(schema_buf);
|
||||
|
||||
const file = std.fs.cwd().openFile(path_to_schema_file, .{}) catch return FileEngineError.SchemaFileNotFound;
|
||||
defer file.close();
|
||||
|
||||
const len = file.readAll(schema_buf) catch return FileEngineError.ReadError;
|
||||
|
||||
self.allocator.free(self.null_terminated_schema_buff);
|
||||
self.null_terminated_schema_buff = self.allocator.dupeZ(u8, schema_buf[0..len]) catch return FileEngineError.MemoryError;
|
||||
|
||||
var toker = SchemaTokenizer.init(self.null_terminated_schema_buff);
|
||||
var parser = SchemaParser.init(&toker, self.allocator);
|
||||
|
||||
// Deinit the struct array before creating a new one
|
||||
for (self.struct_array) |*elem| elem.deinit();
|
||||
self.allocator.free(self.struct_array);
|
||||
|
||||
var struct_array = std.ArrayList(SchemaStruct).init(self.allocator);
|
||||
parser.parse(&struct_array) catch return error.SchemaNotConform;
|
||||
self.struct_array = struct_array.toOwnedSlice() catch return FileEngineError.MemoryError;
|
||||
|
||||
const path = std.fmt.allocPrint(self.allocator, "{s}/DATA", .{self.path_to_ZipponDB_dir}) catch return FileEngineError.MemoryError;
|
||||
defer self.allocator.free(path);
|
||||
|
||||
var data_dir = std.fs.cwd().openDir(path, .{}) catch return FileEngineError.CantOpenDir;
|
||||
pub fn createStructDirectories(self: *FileEngine, struct_array: []SchemaStruct) ZipponError!void {
|
||||
var data_dir = try utils.printOpenDir("{s}/DATA", .{self.path_to_ZipponDB_dir}, .{});
|
||||
defer data_dir.close();
|
||||
|
||||
for (self.struct_array) |schema_struct| {
|
||||
for (struct_array) |schema_struct| {
|
||||
data_dir.makeDir(schema_struct.name) catch |err| switch (err) {
|
||||
error.PathAlreadyExists => {},
|
||||
else => return FileEngineError.CantMakeDir,
|
||||
@ -225,15 +164,14 @@ pub const FileEngine = struct {
|
||||
|
||||
zid.createFile("0.zid", struct_dir) catch return FileEngineError.CantMakeFile;
|
||||
}
|
||||
|
||||
try self.writeSchemaFile();
|
||||
}
|
||||
|
||||
// --------------------Read and parse files--------------------
|
||||
|
||||
/// Use a struct name to populate a list with all UUID of this struct
|
||||
/// TODO: Multi thread that too
|
||||
pub fn getNumberOfEntity(self: *FileEngine, struct_name: []const u8) ZipponError!usize {
|
||||
const sstruct = try self.structName2SchemaStruct(struct_name);
|
||||
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
|
||||
const max_file_index = try self.maxFileIndex(sstruct.name);
|
||||
var count: usize = 0;
|
||||
|
||||
@ -253,8 +191,10 @@ pub const FileEngine = struct {
|
||||
}
|
||||
|
||||
/// Use a struct name to populate a list with all UUID of this struct
|
||||
/// TODO: Use a radix trie or something in that style to keep UUID and file position in memory
|
||||
/// TODO: Multi thread that too
|
||||
pub fn getAllUUIDList(self: *FileEngine, struct_name: []const u8, uuid_list: *std.ArrayList(UUID)) ZipponError!void {
|
||||
const sstruct = try self.structName2SchemaStruct(struct_name);
|
||||
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
|
||||
const max_file_index = try self.maxFileIndex(sstruct.name);
|
||||
|
||||
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
@ -270,36 +210,16 @@ pub const FileEngine = struct {
|
||||
}
|
||||
}
|
||||
|
||||
/// Take a condition and an array of UUID and fill the array with all UUID that match the condition
|
||||
pub fn getUUIDListUsingFilter(self: *FileEngine, struct_name: []const u8, filter: Filter, uuid_list: *std.ArrayList(UUID)) ZipponError!void {
|
||||
const sstruct = try self.structName2SchemaStruct(struct_name);
|
||||
const max_file_index = try self.maxFileIndex(sstruct.name);
|
||||
|
||||
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
|
||||
for (0..(max_file_index + 1)) |i| {
|
||||
const path_buff = std.fmt.allocPrint(self.allocator, "{d}.zid", .{i}) catch return FileEngineError.MemoryError;
|
||||
defer self.allocator.free(path_buff);
|
||||
|
||||
var iter = zid.DataIterator.init(self.allocator, path_buff, dir, sstruct.zid_schema) catch return FileEngineError.ZipponDataError;
|
||||
defer iter.deinit();
|
||||
|
||||
while (iter.next() catch return FileEngineError.ZipponDataError) |row| {
|
||||
if (!filter.evaluate(row)) uuid_list.append(UUID{ .bytes = row[0] });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Take a filter, parse all file and if one struct if validate by the filter, write it in a JSON format to the writer
|
||||
/// filter can be null. This will return all of them
|
||||
pub fn parseEntities(
|
||||
self: *FileEngine,
|
||||
struct_name: []const u8,
|
||||
filter: ?Filter,
|
||||
writer: anytype,
|
||||
additional_data: *AdditionalData,
|
||||
writer: anytype,
|
||||
) ZipponError!void {
|
||||
const sstruct = try self.structName2SchemaStruct(struct_name);
|
||||
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
|
||||
const max_file_index = try self.maxFileIndex(sstruct.name);
|
||||
|
||||
// If there is no member to find, that mean we need to return all members, so let's populate additional data with all of them
|
||||
@ -328,7 +248,7 @@ pub const FileEngine = struct {
|
||||
};
|
||||
const arena = thread_safe_arena.allocator();
|
||||
|
||||
// TODO: Put that in the file engine members, so I dont need to init the Pool every time
|
||||
// TODO: Put that in the db engine, so I dont need to init the Pool every time
|
||||
var thread_pool: Pool = undefined;
|
||||
thread_pool.init(Pool.Options{
|
||||
.allocator = arena, // this is an arena allocator from `std.heap.ArenaAllocator`
|
||||
@ -353,7 +273,7 @@ pub const FileEngine = struct {
|
||||
sstruct.zid_schema,
|
||||
filter,
|
||||
additional_data,
|
||||
try self.structName2DataType(struct_name),
|
||||
try self.schema_engine.structName2DataType(struct_name),
|
||||
&total_entity_found,
|
||||
&ended_count,
|
||||
&error_count,
|
||||
@ -435,23 +355,21 @@ pub const FileEngine = struct {
|
||||
.Str => |v| try writer.print("\"{s}\"", .{v}),
|
||||
.UUID => |v| try writer.print("\"{s}\"", .{UUID.format_bytes(v)}),
|
||||
.Bool => |v| try writer.print("{any}", .{v}),
|
||||
.Unix => |v| try writeDateTime(writer, v, data_type),
|
||||
.Unix => |v| {
|
||||
const datetime = DateTime.initUnix(v);
|
||||
try writer.writeByte('"');
|
||||
switch (data_type) {
|
||||
.date => try datetime.format("YYYY/MM/DD", writer),
|
||||
.time => try datetime.format("HH:mm:ss.SSSS", writer),
|
||||
.datetime => try datetime.format("YYYY/MM/DD-HH:mm:ss.SSSS", writer),
|
||||
else => unreachable,
|
||||
}
|
||||
try writer.writeByte('"');
|
||||
},
|
||||
.IntArray, .FloatArray, .StrArray, .UUIDArray, .BoolArray, .UnixArray => try writeArray(writer, value, data_type),
|
||||
}
|
||||
}
|
||||
|
||||
fn writeDateTime(writer: anytype, unix_time: u64, data_type: DataType) !void {
|
||||
const datetime = DateTime.initUnix(unix_time);
|
||||
try writer.writeByte('"');
|
||||
switch (data_type) {
|
||||
.date => try datetime.format("YYYY/MM/DD", writer),
|
||||
.time => try datetime.format("HH:mm:ss.SSSS", writer),
|
||||
.datetime => try datetime.format("YYYY/MM/DD-HH:mm:ss.SSSS", writer),
|
||||
else => unreachable,
|
||||
}
|
||||
try writer.writeByte('"');
|
||||
}
|
||||
|
||||
fn writeArray(writer: anytype, data: zid.Data, data_type: DataType) ZipponError!void {
|
||||
writer.writeByte('[') catch return FileEngineError.WriteError;
|
||||
var iter = zid.ArrayIterator.init(data) catch return FileEngineError.ZipponDataError;
|
||||
@ -479,16 +397,6 @@ pub const FileEngine = struct {
|
||||
writer.writeByte(']') catch return FileEngineError.WriteError;
|
||||
}
|
||||
|
||||
fn incrementAndCheckLimit(counter: *U64, limit: u64) bool {
|
||||
const new_count = counter.fetchAdd(1, .monotonic) + 1;
|
||||
return limit != 0 and new_count >= limit;
|
||||
}
|
||||
|
||||
fn logErrorAndIncrementCount(message: []const u8, err: anyerror, error_count: *U64) void {
|
||||
log.err("{s}: {any}", .{ message, err });
|
||||
_ = error_count.fetchAdd(1, .acquire);
|
||||
}
|
||||
|
||||
// --------------------Change existing files--------------------
|
||||
|
||||
// TODO: Make it in batch too
|
||||
@ -532,7 +440,7 @@ pub const FileEngine = struct {
|
||||
writer: anytype,
|
||||
additional_data: *AdditionalData,
|
||||
) ZipponError!void {
|
||||
const sstruct = try self.structName2SchemaStruct(struct_name);
|
||||
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
|
||||
const max_file_index = try self.maxFileIndex(sstruct.name);
|
||||
|
||||
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
@ -565,11 +473,25 @@ pub const FileEngine = struct {
|
||||
list.* = std.ArrayList(u8).init(self.allocator);
|
||||
}
|
||||
|
||||
var data_arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer data_arena.deinit();
|
||||
const data_allocator = data_arena.allocator();
|
||||
|
||||
var new_data_buff = data_allocator.alloc(zid.Data, sstruct.members.len) catch return ZipponError.MemoryError;
|
||||
|
||||
// Convert the map to an array of ZipponData Data type, to be use with ZipponData writter
|
||||
for (sstruct.members, 0..) |member, i| {
|
||||
if (!map.contains(member)) continue;
|
||||
|
||||
const dt = try self.schema_engine.memberName2DataType(struct_name, member);
|
||||
new_data_buff[i] = try string2Data(data_allocator, dt, map.get(member).?);
|
||||
}
|
||||
|
||||
// Spawn threads for each file
|
||||
for (0..(max_file_index + 1)) |file_index| {
|
||||
thread_pool.spawn(updateEntitiesOneFile, .{
|
||||
self,
|
||||
struct_name,
|
||||
new_data_buff,
|
||||
sstruct,
|
||||
filter,
|
||||
&map,
|
||||
thread_writer_list[file_index].writer(),
|
||||
@ -596,8 +518,8 @@ pub const FileEngine = struct {
|
||||
}
|
||||
|
||||
fn updateEntitiesOneFile(
|
||||
file_engine: *FileEngine,
|
||||
struct_name: []const u8,
|
||||
new_data_buff: []zid.Data,
|
||||
sstruct: SchemaStruct,
|
||||
filter: ?Filter,
|
||||
map: *const std.StringHashMap([]const u8),
|
||||
writer: anytype,
|
||||
@ -613,25 +535,6 @@ pub const FileEngine = struct {
|
||||
defer fa.reset();
|
||||
const allocator = fa.allocator();
|
||||
|
||||
const sstruct = file_engine.structName2SchemaStruct(struct_name) catch |err| {
|
||||
logErrorAndIncrementCount("Error getting schema struct", err, error_count);
|
||||
return;
|
||||
};
|
||||
|
||||
var new_data_buff = allocator.alloc(zid.Data, file_engine.numberOfMemberInStruct(struct_name) catch return) catch {
|
||||
logErrorAndIncrementCount("Memory allocation error", error.OutOfMemory, error_count);
|
||||
return;
|
||||
};
|
||||
defer allocator.free(new_data_buff);
|
||||
|
||||
// Add the new data
|
||||
for (file_engine.structName2structMembers(struct_name) catch return, 0..) |member, i| {
|
||||
if (!map.contains(member)) continue;
|
||||
|
||||
const dt = file_engine.memberName2DataType(struct_name, member) catch return;
|
||||
new_data_buff[i] = string2Data(allocator, dt, map.get(member).?) catch return;
|
||||
}
|
||||
|
||||
var path_buffer: [128]u8 = undefined;
|
||||
const path = std.fmt.bufPrint(&path_buffer, "{d}.zid", .{file_index}) catch |err| {
|
||||
logErrorAndIncrementCount("Error creating file path", err, error_count);
|
||||
@ -665,7 +568,7 @@ pub const FileEngine = struct {
|
||||
if (filter == null or filter.?.evaluate(row)) {
|
||||
// Add the unchanged Data in the new_data_buff
|
||||
new_data_buff[0] = row[0];
|
||||
for (file_engine.structName2structMembers(struct_name) catch return, 0..) |member, i| {
|
||||
for (sstruct.members, 0..) |member, i| {
|
||||
if (map.contains(member)) continue;
|
||||
new_data_buff[i] = row[i];
|
||||
}
|
||||
@ -714,7 +617,7 @@ pub const FileEngine = struct {
|
||||
writer: anytype,
|
||||
additional_data: *AdditionalData,
|
||||
) ZipponError!void {
|
||||
const sstruct = try self.structName2SchemaStruct(struct_name);
|
||||
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
|
||||
const max_file_index = try self.maxFileIndex(sstruct.name);
|
||||
|
||||
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
@ -750,8 +653,7 @@ pub const FileEngine = struct {
|
||||
// Spawn threads for each file
|
||||
for (0..(max_file_index + 1)) |file_index| {
|
||||
thread_pool.spawn(deleteEntitiesOneFile, .{
|
||||
self,
|
||||
struct_name,
|
||||
sstruct,
|
||||
filter,
|
||||
thread_writer_list[file_index].writer(),
|
||||
additional_data,
|
||||
@ -777,8 +679,7 @@ pub const FileEngine = struct {
|
||||
}
|
||||
|
||||
fn deleteEntitiesOneFile(
|
||||
file_engine: *FileEngine,
|
||||
struct_name: []const u8,
|
||||
sstruct: SchemaStruct,
|
||||
filter: ?Filter,
|
||||
writer: anytype,
|
||||
additional_data: *AdditionalData,
|
||||
@ -793,11 +694,6 @@ pub const FileEngine = struct {
|
||||
defer fa.reset();
|
||||
const allocator = fa.allocator();
|
||||
|
||||
const sstruct = file_engine.structName2SchemaStruct(struct_name) catch |err| {
|
||||
logErrorAndIncrementCount("Error getting schema struct", err, error_count);
|
||||
return;
|
||||
};
|
||||
|
||||
var path_buffer: [128]u8 = undefined;
|
||||
const path = std.fmt.bufPrint(&path_buffer, "{d}.zid", .{file_index}) catch |err| {
|
||||
logErrorAndIncrementCount("Error creating file path", err, error_count);
|
||||
@ -859,6 +755,19 @@ pub const FileEngine = struct {
|
||||
|
||||
_ = ended_count.fetchAdd(1, .acquire);
|
||||
}
|
||||
|
||||
// --------------------Shared multi threading methods--------------------
|
||||
|
||||
fn incrementAndCheckLimit(counter: *U64, limit: u64) bool {
|
||||
const new_count = counter.fetchAdd(1, .monotonic) + 1;
|
||||
return limit != 0 and new_count >= limit;
|
||||
}
|
||||
|
||||
fn logErrorAndIncrementCount(message: []const u8, err: anyerror, error_count: *U64) void {
|
||||
log.err("{s}: {any}", .{ message, err });
|
||||
_ = error_count.fetchAdd(1, .acquire);
|
||||
}
|
||||
|
||||
// --------------------ZipponData utils--------------------
|
||||
|
||||
fn string2Data(allocator: Allocator, dt: DataType, value: []const u8) ZipponError!zid.Data {
|
||||
@ -896,7 +805,7 @@ pub const FileEngine = struct {
|
||||
const array = s2t.parseArrayBool(allocator, value) catch return FileEngineError.MemoryError;
|
||||
defer allocator.free(array);
|
||||
|
||||
return zid.Data.initFloatArray(zid.allocEncodArray.Bool(allocator, array) catch return FileEngineError.AllocEncodError);
|
||||
return zid.Data.initBoolArray(zid.allocEncodArray.Bool(allocator, array) catch return FileEngineError.AllocEncodError);
|
||||
},
|
||||
.link_array => {
|
||||
const array = s2t.parseArrayUUIDBytes(allocator, value) catch return FileEngineError.MemoryError;
|
||||
@ -933,8 +842,8 @@ pub const FileEngine = struct {
|
||||
struct_name: []const u8,
|
||||
map: std.StringHashMap([]const u8),
|
||||
) ZipponError![]zid.Data {
|
||||
const members = try self.structName2structMembers(struct_name);
|
||||
const types = try self.structName2DataType(struct_name);
|
||||
const members = try self.schema_engine.structName2structMembers(struct_name);
|
||||
const types = try self.schema_engine.structName2DataType(struct_name);
|
||||
|
||||
var datas = allocator.alloc(zid.Data, (members.len)) catch return FileEngineError.MemoryError;
|
||||
|
||||
@ -997,18 +906,11 @@ pub const FileEngine = struct {
|
||||
}
|
||||
|
||||
pub fn isSchemaFileInDir(self: *FileEngine) bool {
|
||||
const path = std.fmt.allocPrint(
|
||||
self.allocator,
|
||||
"{s}/schema",
|
||||
.{self.path_to_ZipponDB_dir},
|
||||
) catch return false;
|
||||
defer self.allocator.free(path);
|
||||
|
||||
_ = std.fs.cwd().openFile(path, .{}) catch return false;
|
||||
_ = utils.printOpenFile("{s}/schema", .{self.path_to_ZipponDB_dir}, .{}) catch return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn writeSchemaFile(self: *FileEngine) FileEngineError!void {
|
||||
pub fn writeSchemaFile(self: *FileEngine, null_terminated_schema_buff: [:0]const u8) FileEngineError!void {
|
||||
var zippon_dir = std.fs.cwd().openDir(self.path_to_ZipponDB_dir, .{}) catch return FileEngineError.MemoryError;
|
||||
defer zippon_dir.close();
|
||||
|
||||
@ -1019,115 +921,6 @@ pub const FileEngine = struct {
|
||||
|
||||
var file = zippon_dir.createFile("schema", .{}) catch return FileEngineError.CantMakeFile;
|
||||
defer file.close();
|
||||
file.writeAll(self.null_terminated_schema_buff) catch return FileEngineError.WriteError;
|
||||
}
|
||||
|
||||
/// Get the type of the member
|
||||
pub fn memberName2DataType(self: *FileEngine, struct_name: []const u8, member_name: []const u8) ZipponError!DataType {
|
||||
var i: usize = 0;
|
||||
|
||||
for (try self.structName2structMembers(struct_name)) |mn| {
|
||||
const dtypes = try self.structName2DataType(struct_name);
|
||||
if (std.mem.eql(u8, mn, member_name)) return dtypes[i];
|
||||
i += 1;
|
||||
}
|
||||
|
||||
return FileEngineError.MemberNotFound;
|
||||
}
|
||||
|
||||
pub fn memberName2DataIndex(self: *FileEngine, struct_name: []const u8, member_name: []const u8) ZipponError!usize {
|
||||
var i: usize = 0;
|
||||
|
||||
for (try self.structName2structMembers(struct_name)) |mn| {
|
||||
if (std.mem.eql(u8, mn, member_name)) return i;
|
||||
i += 1;
|
||||
}
|
||||
|
||||
return FileEngineError.MemberNotFound;
|
||||
}
|
||||
|
||||
/// Get the list of all member name for a struct name
|
||||
pub fn structName2structMembers(self: *FileEngine, struct_name: []const u8) ZipponError![][]const u8 {
|
||||
var i: usize = 0;
|
||||
|
||||
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
|
||||
|
||||
if (i == self.struct_array.len) {
|
||||
return FileEngineError.StructNotFound;
|
||||
}
|
||||
|
||||
return self.struct_array[i].members;
|
||||
}
|
||||
|
||||
pub fn structName2SchemaStruct(self: *FileEngine, struct_name: []const u8) ZipponError!SchemaStruct {
|
||||
var i: usize = 0;
|
||||
|
||||
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
|
||||
|
||||
if (i == self.struct_array.len) {
|
||||
return FileEngineError.StructNotFound;
|
||||
}
|
||||
|
||||
return self.struct_array[i];
|
||||
}
|
||||
|
||||
pub fn structName2DataType(self: *FileEngine, struct_name: []const u8) ZipponError![]const DataType {
|
||||
var i: u16 = 0;
|
||||
|
||||
while (i < self.struct_array.len) : (i += 1) {
|
||||
if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
|
||||
}
|
||||
|
||||
if (i == self.struct_array.len and !std.mem.eql(u8, self.struct_array[i].name, struct_name)) {
|
||||
return FileEngineError.StructNotFound;
|
||||
}
|
||||
|
||||
return self.struct_array[i].types;
|
||||
}
|
||||
|
||||
/// Return the number of member of a struct
|
||||
fn numberOfMemberInStruct(self: *FileEngine, struct_name: []const u8) ZipponError!usize {
|
||||
var i: usize = 0;
|
||||
|
||||
for (try self.structName2structMembers(struct_name)) |_| {
|
||||
i += 1;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
/// Chech if the name of a struct is in the current schema
|
||||
pub fn isStructNameExists(self: *FileEngine, struct_name: []const u8) bool {
|
||||
var i: u16 = 0;
|
||||
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Check if a struct have the member name
|
||||
pub fn isMemberNameInStruct(self: *FileEngine, struct_name: []const u8, member_name: []const u8) ZipponError!bool {
|
||||
for (try self.structName2structMembers(struct_name)) |mn| { // I do not return an error here because I should already check before is the struct exist
|
||||
if (std.mem.eql(u8, mn, member_name)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Return true if the map have all the member name as key and not more
|
||||
pub fn checkIfAllMemberInMap(
|
||||
self: *FileEngine,
|
||||
struct_name: []const u8,
|
||||
map: *std.StringHashMap([]const u8),
|
||||
error_message_buffer: *std.ArrayList(u8),
|
||||
) ZipponError!bool {
|
||||
const all_struct_member = try self.structName2structMembers(struct_name);
|
||||
var count: u16 = 0;
|
||||
|
||||
const writer = error_message_buffer.writer();
|
||||
|
||||
for (all_struct_member) |mn| {
|
||||
if (std.mem.eql(u8, mn, "id")) continue;
|
||||
if (map.contains(mn)) count += 1 else writer.print(" {s},", .{mn}) catch return FileEngineError.WriteError;
|
||||
}
|
||||
|
||||
return ((count == all_struct_member.len - 1) and (count == map.count()));
|
||||
file.writeAll(null_terminated_schema_buff) catch return FileEngineError.WriteError;
|
||||
}
|
||||
};
|
||||
|
282
src/main.zig
282
src/main.zig
@ -4,6 +4,7 @@ const send = utils.send;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const FileEngine = @import("fileEngine.zig").FileEngine;
|
||||
const SchemaEngine = @import("schemaEngine.zig").SchemaEngine;
|
||||
|
||||
const cliTokenizer = @import("tokenizers/cli.zig").Tokenizer;
|
||||
const cliToken = @import("tokenizers/cli.zig").Token;
|
||||
@ -12,6 +13,8 @@ const ziqlTokenizer = @import("tokenizers/ziql.zig").Tokenizer;
|
||||
const ziqlToken = @import("tokenizers/ziql.zig").Token;
|
||||
const ziqlParser = @import("ziqlParser.zig").Parser;
|
||||
|
||||
const ZipponError = @import("stuffs/errors.zig").ZipponError;
|
||||
|
||||
const BUFFER_SIZE = @import("config.zig").BUFFER_SIZE;
|
||||
const HELP_MESSAGE = @import("config.zig").HELP_MESSAGE;
|
||||
|
||||
@ -21,7 +24,6 @@ const State = enum {
|
||||
expect_schema_command,
|
||||
expect_path_to_schema,
|
||||
expect_db_command,
|
||||
expect_path_to_new_db,
|
||||
expect_path_to_db,
|
||||
quit,
|
||||
end,
|
||||
@ -36,6 +38,116 @@ pub const std_options = .{
|
||||
.logFn = myLog,
|
||||
};
|
||||
|
||||
const DBEngineState = enum { MissingFileEngine, MissingSchemaEngine, Ok, Init };
|
||||
|
||||
const DBEngine = struct {
|
||||
allocator: Allocator,
|
||||
state: DBEngineState = .Init,
|
||||
file_engine: FileEngine = undefined,
|
||||
schema_engine: SchemaEngine = undefined,
|
||||
|
||||
fn init(allocator: std.mem.Allocator, potential_main_path: ?[]const u8, potential_schema_path: ?[]const u8) DBEngine {
|
||||
var self = DBEngine{ .allocator = allocator };
|
||||
const potential_main_path_or_environment_variable = potential_main_path orelse utils.getEnvVariable(allocator, "ZIPPONDB_PATH");
|
||||
|
||||
if (potential_main_path_or_environment_variable) |main_path| {
|
||||
log_path = std.fmt.bufPrint(&log_buff, "{s}/LOG/log", .{main_path}) catch "";
|
||||
log.info("Found ZIPPONDB_PATH: {s}.", .{main_path});
|
||||
self.file_engine = FileEngine.init(self.allocator, main_path) catch {
|
||||
log.err("Error when init FileEngine", .{});
|
||||
self.state = .MissingFileEngine;
|
||||
return self;
|
||||
};
|
||||
self.file_engine.createMainDirectories() catch {
|
||||
log.err("Error when creating main directories", .{});
|
||||
self.state = .MissingFileEngine;
|
||||
return self;
|
||||
};
|
||||
|
||||
self.state = .MissingSchemaEngine;
|
||||
} else {
|
||||
log.info("No ZIPPONDB_PATH found.", .{});
|
||||
self.state = .MissingFileEngine;
|
||||
return self;
|
||||
}
|
||||
|
||||
if (self.file_engine.isSchemaFileInDir() and potential_schema_path == null) {
|
||||
const schema_path = std.fmt.allocPrint(allocator, "{s}/schema", .{self.file_engine.path_to_ZipponDB_dir}) catch {
|
||||
self.state = .MissingSchemaEngine;
|
||||
return self;
|
||||
};
|
||||
defer allocator.free(schema_path);
|
||||
|
||||
log.info("Schema founded in the database directory.", .{});
|
||||
self.schema_engine = SchemaEngine.init(self.allocator, schema_path) catch {
|
||||
log.err("Error when init SchemaEngine", .{});
|
||||
self.state = .MissingSchemaEngine;
|
||||
return self;
|
||||
};
|
||||
self.file_engine.createStructDirectories(self.schema_engine.struct_array) catch {
|
||||
log.err("Error when creating struct directories", .{});
|
||||
self.schema_engine.deinit();
|
||||
self.state = .MissingSchemaEngine;
|
||||
return self;
|
||||
};
|
||||
|
||||
self.file_engine.schema_engine = &self.schema_engine;
|
||||
self.state = .Ok;
|
||||
return self;
|
||||
}
|
||||
|
||||
log.info("Database don't have any schema yet, trying to add one.", .{});
|
||||
const potential_schema_path_or_environment_variable = potential_schema_path orelse utils.getEnvVariable(allocator, "ZIPPONDB_SCHEMA");
|
||||
if (potential_schema_path_or_environment_variable) |schema_path| {
|
||||
log.info("Found schema path {s}.", .{schema_path});
|
||||
self.schema_engine = SchemaEngine.init(self.allocator, schema_path) catch {
|
||||
log.err("Error when init SchemaEngine", .{});
|
||||
self.state = .MissingSchemaEngine;
|
||||
return self;
|
||||
};
|
||||
self.file_engine.createStructDirectories(self.schema_engine.struct_array) catch {
|
||||
log.err("Error when creating struct directories", .{});
|
||||
self.schema_engine.deinit();
|
||||
self.state = .MissingSchemaEngine;
|
||||
return self;
|
||||
};
|
||||
self.file_engine.schema_engine = &self.schema_engine;
|
||||
self.state = .Ok;
|
||||
} else {
|
||||
log.info(HELP_MESSAGE.no_schema, .{self.file_engine.path_to_ZipponDB_dir});
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
fn deinit(self: *DBEngine) void {
|
||||
if (self.state == .Ok or self.state == .MissingSchemaEngine) self.file_engine.deinit(); // Pretty sure I can use like state > 2 because enum of just number
|
||||
if (self.state == .Ok) self.schema_engine.deinit();
|
||||
}
|
||||
|
||||
pub fn runQuery(self: *DBEngine, null_term_query_str: [:0]const u8) void {
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){}; // Maybe use an arena here
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
var toker = ziqlTokenizer.init(null_term_query_str);
|
||||
|
||||
var parser = ziqlParser.init(
|
||||
allocator,
|
||||
&toker,
|
||||
&self.file_engine,
|
||||
&self.schema_engine,
|
||||
);
|
||||
|
||||
parser.parse() catch |err| {
|
||||
log.err("Error parsing: {any}", .{err});
|
||||
};
|
||||
|
||||
switch (gpa.deinit()) {
|
||||
.ok => {},
|
||||
.leak => std.log.debug("We fucked it up bro...\n", .{}),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn myLog(
|
||||
comptime message_level: std.log.Level,
|
||||
comptime scope: @Type(.EnumLiteral),
|
||||
@ -80,8 +192,8 @@ pub fn main() !void {
|
||||
.leak => log.debug("We fucked it up bro...\n", .{}),
|
||||
};
|
||||
|
||||
var file_engine = try initFileEngine.init(allocator, null);
|
||||
defer file_engine.deinit();
|
||||
var db_engine = DBEngine.init(allocator, null, null);
|
||||
defer db_engine.deinit();
|
||||
|
||||
const line_buf = try allocator.alloc(u8, BUFFER_SIZE);
|
||||
defer allocator.free(line_buf);
|
||||
@ -104,8 +216,13 @@ pub fn main() !void {
|
||||
while ((state != .end) and (state != .quit)) : (token = toker.next()) switch (state) {
|
||||
.expect_main_command => switch (token.tag) {
|
||||
.keyword_run => {
|
||||
if (!file_engine.usable()) {
|
||||
send("Error: No database selected. Please use db new or db use.", .{});
|
||||
if (db_engine.state == .MissingFileEngine) {
|
||||
send("{s}", .{HELP_MESSAGE.no_engine});
|
||||
state = .end;
|
||||
continue;
|
||||
}
|
||||
if (db_engine.state == .MissingSchemaEngine) {
|
||||
send("{s}", .{HELP_MESSAGE.no_schema});
|
||||
state = .end;
|
||||
continue;
|
||||
}
|
||||
@ -113,8 +230,8 @@ pub fn main() !void {
|
||||
},
|
||||
.keyword_db => state = .expect_db_command,
|
||||
.keyword_schema => {
|
||||
if (!file_engine.usable()) {
|
||||
send("Error: No database selected. Please use db new or db use.", .{});
|
||||
if (db_engine.state == .MissingFileEngine) {
|
||||
send("{s}", .{HELP_MESSAGE.no_engine});
|
||||
state = .end;
|
||||
continue;
|
||||
}
|
||||
@ -133,11 +250,15 @@ pub fn main() !void {
|
||||
},
|
||||
|
||||
.expect_db_command => switch (token.tag) {
|
||||
.keyword_new => state = .expect_path_to_new_db,
|
||||
.keyword_use => state = .expect_path_to_db,
|
||||
.keyword_new, .keyword_use => state = .expect_path_to_db, //TODO: When new, create the dir. If use, dont create the dir
|
||||
.keyword_metrics => {
|
||||
if (!file_engine.usable()) {
|
||||
send("Error: No database selected. Please use db new or db use.", .{});
|
||||
if (db_engine.state == .MissingFileEngine) {
|
||||
send("{s}", .{HELP_MESSAGE.no_engine});
|
||||
state = .end;
|
||||
continue;
|
||||
}
|
||||
if (db_engine.state == .MissingSchemaEngine) {
|
||||
send("{s}", .{HELP_MESSAGE.no_schema});
|
||||
state = .end;
|
||||
continue;
|
||||
}
|
||||
@ -145,7 +266,7 @@ pub fn main() !void {
|
||||
var buffer = std.ArrayList(u8).init(allocator);
|
||||
defer buffer.deinit();
|
||||
|
||||
try file_engine.writeDbMetrics(&buffer);
|
||||
try db_engine.file_engine.writeDbMetrics(&buffer);
|
||||
send("{s}", .{buffer.items});
|
||||
state = .end;
|
||||
},
|
||||
@ -161,9 +282,8 @@ pub fn main() !void {
|
||||
|
||||
.expect_path_to_db => switch (token.tag) {
|
||||
.identifier => {
|
||||
file_engine.deinit();
|
||||
file_engine = try initFileEngine.init(allocator, try allocator.dupe(u8, toker.getTokenSlice(token)));
|
||||
send("Successfully started using the database!", .{});
|
||||
db_engine.deinit();
|
||||
db_engine = DBEngine.init(allocator, try allocator.dupe(u8, toker.getTokenSlice(token)), null);
|
||||
state = .end;
|
||||
},
|
||||
else => {
|
||||
@ -172,29 +292,11 @@ pub fn main() !void {
|
||||
},
|
||||
},
|
||||
|
||||
.expect_path_to_new_db => switch (token.tag) {
|
||||
.identifier => {
|
||||
file_engine.deinit();
|
||||
file_engine = try FileEngine.init(allocator, try allocator.dupe(u8, toker.getTokenSlice(token)));
|
||||
file_engine.checkAndCreateDirectories() catch |err| {
|
||||
send("Error: Coulnt create database directories: {any}", .{err});
|
||||
state = .end;
|
||||
continue;
|
||||
};
|
||||
send("Successfully initialized the database!", .{});
|
||||
state = .end;
|
||||
},
|
||||
else => {
|
||||
send("Error Expect a path to a folder.", .{});
|
||||
state = .end;
|
||||
},
|
||||
},
|
||||
|
||||
.expect_query => switch (token.tag) {
|
||||
.string_literal => {
|
||||
const null_term_query_str = try allocator.dupeZ(u8, toker.buffer[token.loc.start + 1 .. token.loc.end - 1]);
|
||||
defer allocator.free(null_term_query_str);
|
||||
runQuery(null_term_query_str, &file_engine);
|
||||
db_engine.runQuery(null_term_query_str);
|
||||
state = .end;
|
||||
},
|
||||
.keyword_help => {
|
||||
@ -209,39 +311,30 @@ pub fn main() !void {
|
||||
|
||||
.expect_schema_command => switch (token.tag) {
|
||||
.keyword_describe => {
|
||||
if (std.mem.eql(u8, file_engine.path_to_ZipponDB_dir, "")) send("Error: No database selected. Please use db bew or db use.", .{});
|
||||
|
||||
if (file_engine.null_terminated_schema_buff.len == 0) {
|
||||
send("Need to init the schema first. Please use the schema init path/to/schema command to start.", .{});
|
||||
} else {
|
||||
send("Schema:\n {s}", .{file_engine.null_terminated_schema_buff});
|
||||
}
|
||||
if (db_engine.state == .MissingFileEngine) send("Error: No database selected. Please use 'db new' or 'db use'.", .{});
|
||||
if (db_engine.state == .MissingSchemaEngine) send("Error: No schema in database. Please use 'schema init'.", .{});
|
||||
send("Schema:\n {s}", .{db_engine.schema_engine.null_terminated_schema_buff});
|
||||
state = .end;
|
||||
},
|
||||
.keyword_init => state = .expect_path_to_schema,
|
||||
.keyword_init => {
|
||||
if (db_engine.state == .MissingFileEngine) send("Error: No database selected. Please use 'db new' or 'db use'.", .{});
|
||||
state = .expect_path_to_schema;
|
||||
},
|
||||
.keyword_help => {
|
||||
send("{s}", .{HELP_MESSAGE.schema});
|
||||
state = .end;
|
||||
},
|
||||
else => {
|
||||
send("Error: schema commands available: describe, init & help", .{});
|
||||
send("{s}", .{HELP_MESSAGE.schema});
|
||||
state = .end;
|
||||
},
|
||||
},
|
||||
|
||||
.expect_path_to_schema => switch (token.tag) {
|
||||
.identifier => {
|
||||
file_engine.initDataFolder(toker.getTokenSlice(token)) catch |err| switch (err) {
|
||||
error.SchemaFileNotFound => {
|
||||
send("Coulnt find the schema file at {s}", .{toker.getTokenSlice(token)});
|
||||
state = .end;
|
||||
},
|
||||
else => {
|
||||
send("Error initializing the schema", .{});
|
||||
state = .end;
|
||||
},
|
||||
};
|
||||
send("Successfully initialized the database schema!", .{});
|
||||
const main_path = try allocator.dupe(u8, db_engine.file_engine.path_to_ZipponDB_dir);
|
||||
db_engine.deinit();
|
||||
db_engine = DBEngine.init(allocator, main_path, toker.getTokenSlice(token));
|
||||
state = .end;
|
||||
},
|
||||
else => {
|
||||
@ -262,84 +355,3 @@ pub fn main() !void {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn runQuery(null_term_query_str: [:0]const u8, file_engine: *FileEngine) void {
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
var toker = ziqlTokenizer.init(null_term_query_str);
|
||||
|
||||
var parser = ziqlParser.init(allocator, &toker, file_engine);
|
||||
|
||||
parser.parse() catch |err| {
|
||||
log.err("Error parsing: {any}", .{err});
|
||||
};
|
||||
|
||||
switch (gpa.deinit()) {
|
||||
.ok => {},
|
||||
.leak => std.log.debug("We fucked it up bro...\n", .{}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple struct to manage the init of the FileEngine, mostly managing if env path is here and init the directories, ect
|
||||
const initFileEngine = struct {
|
||||
fn init(allocator: std.mem.Allocator, potential_path: ?[]const u8) !FileEngine {
|
||||
if (potential_path) |p| {
|
||||
log_path = try std.fmt.bufPrint(&log_buff, "{s}/LOG/log", .{p});
|
||||
log.info("Start using database path: {s}.", .{p});
|
||||
return try initWithPath(allocator, p);
|
||||
}
|
||||
|
||||
const path = utils.getEnvVariable(allocator, "ZIPPONDB_PATH");
|
||||
|
||||
if (path) |p| {
|
||||
log_path = try std.fmt.bufPrint(&log_buff, "{s}/LOG/log", .{p});
|
||||
log.info("Found environment variable ZIPPONDB_PATH: {s}.", .{p});
|
||||
return try initWithPath(allocator, p);
|
||||
} else {
|
||||
log.info("No environment variable ZIPPONDB_PATH found.", .{});
|
||||
return try FileEngine.init(allocator, "");
|
||||
}
|
||||
}
|
||||
|
||||
fn initWithPath(allocator: std.mem.Allocator, path: []const u8) !FileEngine {
|
||||
try ensureDirectoryExists(path);
|
||||
var file_engine = try FileEngine.init(allocator, path);
|
||||
try file_engine.checkAndCreateDirectories();
|
||||
|
||||
if (!file_engine.isSchemaFileInDir()) {
|
||||
try initSchema(allocator, &file_engine);
|
||||
} else {
|
||||
log.info("Database has a schema.", .{});
|
||||
}
|
||||
|
||||
return file_engine;
|
||||
}
|
||||
|
||||
fn ensureDirectoryExists(path: []const u8) !void {
|
||||
_ = std.fs.cwd().openDir(path, .{}) catch |err| {
|
||||
if (err == error.FileNotFound) {
|
||||
log.info("{s} directory not found, creating it", .{path});
|
||||
try std.fs.cwd().makeDir(path);
|
||||
return;
|
||||
} else {
|
||||
return err;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn initSchema(allocator: std.mem.Allocator, file_engine: *FileEngine) !void {
|
||||
log.debug("Database doesn't have any schema. Checking if ZIPPONDB_SCHEMA env variable exists.", .{});
|
||||
const schema = utils.getEnvVariable(allocator, "ZIPPONDB_SCHEMA");
|
||||
defer if (schema) |s| allocator.free(s);
|
||||
|
||||
if (schema) |s| {
|
||||
log.debug("Found environment variable ZIPPONDB_SCHEMA: {s}.", .{s});
|
||||
file_engine.initDataFolder(s) catch {
|
||||
log.warn("Couldn't use {s} as schema.\n", .{s});
|
||||
};
|
||||
} else {
|
||||
log.debug("No environment variable ZIPPONDB_SCHEMA found.", .{});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
149
src/schemaEngine.zig
Normal file
149
src/schemaEngine.zig
Normal file
@ -0,0 +1,149 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const SchemaStruct = @import("schemaParser.zig").Parser.SchemaStruct;
|
||||
const Parser = @import("schemaParser.zig").Parser;
|
||||
const Tokenizer = @import("tokenizers/schema.zig").Tokenizer;
|
||||
const ZipponError = @import("stuffs/errors.zig").ZipponError;
|
||||
const dtype = @import("dtype");
|
||||
const DataType = dtype.DataType;
|
||||
const FileEngine = @import("fileEngine.zig").FileEngine;
|
||||
|
||||
const config = @import("config.zig");
|
||||
const BUFFER_SIZE = config.BUFFER_SIZE;
|
||||
|
||||
const log = std.log.scoped(.fileEngine);
|
||||
|
||||
/// Manage everything that is relate to the schema
|
||||
/// This include keeping in memory the schema and schema file, and some functions to get like all members of a specific struct.
|
||||
pub const SchemaEngine = struct {
|
||||
allocator: Allocator,
|
||||
null_terminated_schema_buff: [:0]u8,
|
||||
struct_array: []SchemaStruct,
|
||||
|
||||
// The path is the path to the schema file
|
||||
pub fn init(allocator: Allocator, path: []const u8) ZipponError!SchemaEngine {
|
||||
var schema_buf = allocator.alloc(u8, BUFFER_SIZE) catch return ZipponError.MemoryError;
|
||||
defer allocator.free(schema_buf);
|
||||
|
||||
const len: usize = try FileEngine.readSchemaFile(path, schema_buf);
|
||||
const null_terminated_schema_buff = allocator.dupeZ(u8, schema_buf[0..len]) catch return ZipponError.MemoryError;
|
||||
errdefer allocator.free(null_terminated_schema_buff);
|
||||
|
||||
var toker = Tokenizer.init(null_terminated_schema_buff);
|
||||
var parser = Parser.init(&toker, allocator);
|
||||
|
||||
var struct_array = std.ArrayList(SchemaStruct).init(allocator);
|
||||
errdefer struct_array.deinit();
|
||||
parser.parse(&struct_array) catch return ZipponError.SchemaNotConform;
|
||||
|
||||
return SchemaEngine{
|
||||
.allocator = allocator,
|
||||
.null_terminated_schema_buff = null_terminated_schema_buff,
|
||||
.struct_array = struct_array.toOwnedSlice() catch return ZipponError.MemoryError,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *SchemaEngine) void {
|
||||
for (self.struct_array) |*elem| elem.deinit();
|
||||
self.allocator.free(self.struct_array);
|
||||
self.allocator.free(self.null_terminated_schema_buff);
|
||||
}
|
||||
|
||||
/// Get the type of the member
|
||||
pub fn memberName2DataType(self: *SchemaEngine, struct_name: []const u8, member_name: []const u8) ZipponError!DataType {
|
||||
var i: usize = 0;
|
||||
|
||||
for (try self.structName2structMembers(struct_name)) |mn| {
|
||||
const dtypes = try self.structName2DataType(struct_name);
|
||||
if (std.mem.eql(u8, mn, member_name)) return dtypes[i];
|
||||
i += 1;
|
||||
}
|
||||
|
||||
return ZipponError.MemberNotFound;
|
||||
}
|
||||
|
||||
pub fn memberName2DataIndex(self: *SchemaEngine, struct_name: []const u8, member_name: []const u8) ZipponError!usize {
|
||||
var i: usize = 0;
|
||||
|
||||
for (try self.structName2structMembers(struct_name)) |mn| {
|
||||
if (std.mem.eql(u8, mn, member_name)) return i;
|
||||
i += 1;
|
||||
}
|
||||
|
||||
return ZipponError.MemberNotFound;
|
||||
}
|
||||
|
||||
/// Get the list of all member name for a struct name
|
||||
pub fn structName2structMembers(self: *SchemaEngine, struct_name: []const u8) ZipponError![][]const u8 {
|
||||
var i: usize = 0;
|
||||
|
||||
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
|
||||
|
||||
if (i == self.struct_array.len) {
|
||||
return ZipponError.StructNotFound;
|
||||
}
|
||||
|
||||
return self.struct_array[i].members;
|
||||
}
|
||||
|
||||
pub fn structName2SchemaStruct(self: *SchemaEngine, struct_name: []const u8) ZipponError!SchemaStruct {
|
||||
var i: usize = 0;
|
||||
|
||||
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
|
||||
|
||||
if (i == self.struct_array.len) {
|
||||
return ZipponError.StructNotFound;
|
||||
}
|
||||
|
||||
return self.struct_array[i];
|
||||
}
|
||||
|
||||
pub fn structName2DataType(self: *SchemaEngine, struct_name: []const u8) ZipponError![]const DataType {
|
||||
var i: u16 = 0;
|
||||
|
||||
while (i < self.struct_array.len) : (i += 1) {
|
||||
if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
|
||||
}
|
||||
|
||||
if (i == self.struct_array.len and !std.mem.eql(u8, self.struct_array[i].name, struct_name)) {
|
||||
return ZipponError.StructNotFound;
|
||||
}
|
||||
|
||||
return self.struct_array[i].types;
|
||||
}
|
||||
|
||||
/// Chech if the name of a struct is in the current schema
|
||||
pub fn isStructNameExists(self: *SchemaEngine, struct_name: []const u8) bool {
|
||||
var i: u16 = 0;
|
||||
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Check if a struct have the member name
|
||||
pub fn isMemberNameInStruct(self: *SchemaEngine, struct_name: []const u8, member_name: []const u8) ZipponError!bool {
|
||||
for (try self.structName2structMembers(struct_name)) |mn| { // I do not return an error here because I should already check before is the struct exist
|
||||
if (std.mem.eql(u8, mn, member_name)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Return true if the map have all the member name as key and not more
|
||||
pub fn checkIfAllMemberInMap(
|
||||
self: *SchemaEngine,
|
||||
struct_name: []const u8,
|
||||
map: *std.StringHashMap([]const u8),
|
||||
error_message_buffer: *std.ArrayList(u8),
|
||||
) ZipponError!bool {
|
||||
const all_struct_member = try self.structName2structMembers(struct_name);
|
||||
var count: u16 = 0;
|
||||
|
||||
const writer = error_message_buffer.writer();
|
||||
|
||||
for (all_struct_member) |mn| {
|
||||
if (std.mem.eql(u8, mn, "id")) continue;
|
||||
if (map.contains(mn)) count += 1 else writer.print(" {s},", .{mn}) catch return ZipponError.WriteError;
|
||||
}
|
||||
|
||||
return ((count == all_struct_member.len - 1) and (count == map.count()));
|
||||
}
|
||||
};
|
@ -55,6 +55,7 @@ pub fn printError(message: []const u8, err: ZipponError, query: ?[]const u8, sta
|
||||
|
||||
var writer = buffer.writer();
|
||||
|
||||
writer.print("{{\"error\": \"", .{}) catch {};
|
||||
writer.print("\n", .{}) catch {}; // Maybe use write all, not sure if it affect performance in any considerable way
|
||||
writer.print("{s}\n", .{message}) catch {};
|
||||
|
||||
@ -78,6 +79,7 @@ pub fn printError(message: []const u8, err: ZipponError, query: ?[]const u8, sta
|
||||
}
|
||||
writer.print(" \n", .{}) catch {}; // Align with the message
|
||||
}
|
||||
writer.print("\"}}", .{}) catch {};
|
||||
|
||||
// log.debug("Parsing error: {s}", .{buffer.items});
|
||||
|
||||
|
@ -1,168 +0,0 @@
|
||||
const std = @import("std");
|
||||
const Loc = @import("shared/loc.zig").Loc;
|
||||
|
||||
pub const Token = struct {
|
||||
tag: Tag,
|
||||
loc: Loc,
|
||||
|
||||
pub const Tag = enum {
|
||||
string_literal,
|
||||
int_literal,
|
||||
float_literal,
|
||||
uuid_literal,
|
||||
date_literal,
|
||||
time_literal,
|
||||
datetime_literal,
|
||||
l_bracket, // [
|
||||
r_bracket, // ]
|
||||
};
|
||||
};
|
||||
|
||||
pub const Tokenizer = struct {
|
||||
buffer: [:0]const u8,
|
||||
index: usize,
|
||||
|
||||
// Maybe change that to use the stream directly so I dont have to read the line 2 times
|
||||
pub fn init(buffer: [:0]const u8) Tokenizer {
|
||||
// Skip the UTF-8 BOM if present.
|
||||
return .{
|
||||
.buffer = buffer,
|
||||
.index = if (std.mem.startsWith(u8, buffer, "\xEF\xBB\xBF")) 3 else 0, // WTF ? I guess some OS add that or some shit like that
|
||||
};
|
||||
}
|
||||
|
||||
const State = enum {
|
||||
start,
|
||||
string_literal,
|
||||
float,
|
||||
int,
|
||||
uuid_literal,
|
||||
date_literal,
|
||||
time_literal,
|
||||
};
|
||||
|
||||
pub fn getTokenSlice(self: *Tokenizer, token: Token) []const u8 {
|
||||
return self.buffer[token.loc.start..token.loc.end];
|
||||
}
|
||||
|
||||
pub fn next(self: *Tokenizer) Token {
|
||||
// That ugly but work
|
||||
if (self.buffer[self.index] == ';') self.index += 1; // Hardcoded delimiter
|
||||
if (self.buffer[self.index] == ' ') self.index += 1; // Hardcoded delimiter
|
||||
|
||||
var state: State = .start;
|
||||
var result: Token = .{
|
||||
.tag = undefined,
|
||||
.loc = .{
|
||||
.start = self.index,
|
||||
.end = undefined,
|
||||
},
|
||||
};
|
||||
while (true) : (self.index += 1) {
|
||||
const c = self.buffer[self.index];
|
||||
|
||||
if (self.index == self.buffer.len) break;
|
||||
|
||||
switch (state) {
|
||||
.start => switch (c) {
|
||||
'\'' => {
|
||||
state = .string_literal;
|
||||
result.tag = .string_literal;
|
||||
},
|
||||
'a'...'z' => {
|
||||
state = .uuid_literal;
|
||||
result.tag = .uuid_literal;
|
||||
},
|
||||
|
||||
'0'...'9', '-' => {
|
||||
state = .int;
|
||||
result.tag = .int_literal;
|
||||
},
|
||||
'[' => {
|
||||
result.tag = .l_bracket;
|
||||
self.index += 1;
|
||||
break;
|
||||
},
|
||||
']' => {
|
||||
result.tag = .r_bracket;
|
||||
self.index += 1;
|
||||
break;
|
||||
},
|
||||
else => std.debug.print("Unknow character: {c}\n", .{c}),
|
||||
},
|
||||
|
||||
.string_literal => switch (c) {
|
||||
'\'' => {
|
||||
self.index += 1;
|
||||
break;
|
||||
},
|
||||
else => continue,
|
||||
},
|
||||
|
||||
.int => switch (c) {
|
||||
'.' => {
|
||||
state = .float;
|
||||
result.tag = .float_literal;
|
||||
},
|
||||
'a'...'z', '-' => {
|
||||
state = .uuid_literal;
|
||||
result.tag = .uuid_literal;
|
||||
},
|
||||
'/' => {
|
||||
state = .date_literal;
|
||||
result.tag = .date_literal;
|
||||
},
|
||||
':' => {
|
||||
state = .time_literal;
|
||||
result.tag = .time_literal;
|
||||
},
|
||||
'_', '0'...'9' => continue,
|
||||
else => break,
|
||||
},
|
||||
|
||||
.float => switch (c) {
|
||||
'0'...'9' => {
|
||||
continue;
|
||||
},
|
||||
else => {
|
||||
break;
|
||||
},
|
||||
},
|
||||
|
||||
.date_literal => switch (c) {
|
||||
'-' => {
|
||||
state = .time_literal;
|
||||
result.tag = .datetime_literal;
|
||||
},
|
||||
'0'...'9', '/' => continue,
|
||||
else => break,
|
||||
},
|
||||
|
||||
.time_literal => switch (c) {
|
||||
'0'...'9', ':', '.' => continue,
|
||||
else => break,
|
||||
},
|
||||
|
||||
.uuid_literal => switch (c) {
|
||||
'0'...'9', 'a'...'z', '-' => continue,
|
||||
else => break,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
result.loc.end = self.index;
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
test "Basics" {
|
||||
try testTokenize("193 88.92 [ 123] 'hello mommy'", &.{ .int_literal, .float_literal, .l_bracket, .int_literal, .r_bracket });
|
||||
}
|
||||
|
||||
fn testTokenize(source: [:0]const u8, expected_token_tags: []const Token.Tag) !void {
|
||||
var tokenizer = Tokenizer.init(source);
|
||||
for (expected_token_tags) |expected_token_tag| {
|
||||
const token = tokenizer.next();
|
||||
try std.testing.expectEqual(expected_token_tag, token.tag);
|
||||
}
|
||||
}
|
@ -1,15 +1,12 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const FileEngine = @import("fileEngine.zig").FileEngine;
|
||||
const SchemaEngine = @import("schemaEngine.zig").SchemaEngine;
|
||||
const Tokenizer = @import("tokenizers/ziql.zig").Tokenizer;
|
||||
const Token = @import("tokenizers/ziql.zig").Token;
|
||||
|
||||
const dtype = @import("dtype");
|
||||
const s2t = dtype.s2t;
|
||||
const UUID = dtype.UUID;
|
||||
const AND = dtype.AND;
|
||||
const OR = dtype.OR;
|
||||
const DataType = dtype.DataType;
|
||||
|
||||
const Filter = @import("stuffs/filter.zig").Filter;
|
||||
const Condition = @import("stuffs/filter.zig").Condition;
|
||||
@ -72,13 +69,15 @@ pub const Parser = struct {
|
||||
allocator: Allocator,
|
||||
toker: *Tokenizer,
|
||||
file_engine: *FileEngine,
|
||||
schema_engine: *SchemaEngine,
|
||||
|
||||
pub fn init(allocator: Allocator, toker: *Tokenizer, file_engine: *FileEngine) Parser {
|
||||
pub fn init(allocator: Allocator, toker: *Tokenizer, file_engine: *FileEngine, schema_engine: *SchemaEngine) Parser {
|
||||
// Do I need to init a FileEngine at each Parser, can't I put it in the CLI parser instead ?
|
||||
return Parser{
|
||||
.allocator = allocator,
|
||||
.toker = toker,
|
||||
.file_engine = file_engine,
|
||||
.schema_engine = schema_engine,
|
||||
};
|
||||
}
|
||||
|
||||
@ -89,9 +88,6 @@ pub const Parser = struct {
|
||||
var struct_name: []const u8 = undefined;
|
||||
var action: enum { GRAB, ADD, UPDATE, DELETE } = undefined;
|
||||
|
||||
var out_buff: [BUFFER_SIZE]u8 = undefined;
|
||||
var fa = std.heap.FixedBufferAllocator.init(&out_buff);
|
||||
defer fa.reset();
|
||||
const out_allocator = self.allocator;
|
||||
|
||||
var token = self.toker.next();
|
||||
@ -137,7 +133,7 @@ pub const Parser = struct {
|
||||
token.loc.start,
|
||||
token.loc.end,
|
||||
);
|
||||
if (!self.file_engine.isStructNameExists(struct_name)) return printError(
|
||||
if (!self.schema_engine.isStructNameExists(struct_name)) return printError(
|
||||
"Error: struct name not found in schema.",
|
||||
ZiQlParserError.StructNotFound,
|
||||
self.toker.buffer,
|
||||
@ -189,7 +185,7 @@ pub const Parser = struct {
|
||||
var buff = std.ArrayList(u8).init(out_allocator);
|
||||
defer buff.deinit();
|
||||
|
||||
try self.file_engine.parseEntities(struct_name, filter, &buff.writer(), &additional_data);
|
||||
try self.file_engine.parseEntities(struct_name, filter, &additional_data, &buff.writer());
|
||||
send("{s}", .{buff.items});
|
||||
state = .end;
|
||||
},
|
||||
@ -197,7 +193,7 @@ pub const Parser = struct {
|
||||
var buff = std.ArrayList(u8).init(out_allocator);
|
||||
defer buff.deinit();
|
||||
|
||||
try self.file_engine.parseEntities(struct_name, null, &buff.writer(), &additional_data);
|
||||
try self.file_engine.parseEntities(struct_name, null, &additional_data, &buff.writer());
|
||||
send("{s}", .{buff.items});
|
||||
state = .end;
|
||||
},
|
||||
@ -334,7 +330,7 @@ pub const Parser = struct {
|
||||
const error_message_buffer_writer = error_message_buffer.writer();
|
||||
error_message_buffer_writer.writeAll("Error missing: ") catch return ZipponError.WriteError;
|
||||
|
||||
if (!(self.file_engine.checkIfAllMemberInMap(struct_name, &data_map, &error_message_buffer) catch {
|
||||
if (!(self.schema_engine.checkIfAllMemberInMap(struct_name, &data_map, &error_message_buffer) catch {
|
||||
return ZiQlParserError.StructNotFound;
|
||||
})) {
|
||||
_ = error_message_buffer.pop();
|
||||
@ -496,7 +492,7 @@ pub const Parser = struct {
|
||||
}) switch (state) {
|
||||
.expect_member => switch (token.tag) {
|
||||
.identifier => {
|
||||
if (!(self.file_engine.isMemberNameInStruct(struct_name, self.toker.getTokenSlice(token)) catch {
|
||||
if (!(self.schema_engine.isMemberNameInStruct(struct_name, self.toker.getTokenSlice(token)) catch {
|
||||
return printError(
|
||||
"Error: Struct not found.",
|
||||
ZiQlParserError.StructNotFound,
|
||||
@ -513,11 +509,11 @@ pub const Parser = struct {
|
||||
token.loc.end,
|
||||
);
|
||||
}
|
||||
condition.data_type = self.file_engine.memberName2DataType(
|
||||
condition.data_type = self.schema_engine.memberName2DataType(
|
||||
struct_name,
|
||||
self.toker.getTokenSlice(token),
|
||||
) catch return ZiQlParserError.MemberNotFound;
|
||||
condition.data_index = self.file_engine.memberName2DataIndex(
|
||||
condition.data_index = self.schema_engine.memberName2DataIndex(
|
||||
struct_name,
|
||||
self.toker.getTokenSlice(token),
|
||||
) catch return ZiQlParserError.MemberNotFound;
|
||||
@ -749,7 +745,7 @@ pub const Parser = struct {
|
||||
|
||||
.expect_member => switch (token.tag) {
|
||||
.identifier => {
|
||||
if (!(self.file_engine.isMemberNameInStruct(struct_name, self.toker.getTokenSlice(token)) catch {
|
||||
if (!(self.schema_engine.isMemberNameInStruct(struct_name, self.toker.getTokenSlice(token)) catch {
|
||||
return printError(
|
||||
"Struct not found.",
|
||||
ZiQlParserError.StructNotFound,
|
||||
@ -770,7 +766,7 @@ pub const Parser = struct {
|
||||
AdditionalDataMember.init(
|
||||
self.allocator,
|
||||
self.toker.getTokenSlice(token),
|
||||
try self.file_engine.memberName2DataIndex(struct_name, self.toker.getTokenSlice(token)),
|
||||
try self.schema_engine.memberName2DataIndex(struct_name, self.toker.getTokenSlice(token)),
|
||||
),
|
||||
) catch return ZipponError.MemoryError;
|
||||
|
||||
@ -836,7 +832,7 @@ pub const Parser = struct {
|
||||
.expect_member => switch (token.tag) {
|
||||
.identifier => {
|
||||
member_name = self.toker.getTokenSlice(token);
|
||||
if (!(self.file_engine.isMemberNameInStruct(struct_name, member_name) catch {
|
||||
if (!(self.schema_engine.isMemberNameInStruct(struct_name, member_name) catch {
|
||||
return ZiQlParserError.StructNotFound;
|
||||
})) return printError(
|
||||
"Member not found in struct.",
|
||||
@ -869,7 +865,7 @@ pub const Parser = struct {
|
||||
},
|
||||
|
||||
.expect_new_value => {
|
||||
const data_type = self.file_engine.memberName2DataType(struct_name, member_name) catch return ZiQlParserError.StructNotFound;
|
||||
const data_type = self.schema_engine.memberName2DataType(struct_name, member_name) catch return ZiQlParserError.StructNotFound;
|
||||
const start_index = token.loc.start;
|
||||
|
||||
const expected_tag: ?Token.Tag = switch (data_type) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user