Changed a bit how the memory work
Now SchemaEngine have an arena at the root of the file. So UUIDFileIndex can be as big as we want. The idea is nice, maybe I will use it for the FileEngine too.
This commit is contained in:
parent
3f5c929a11
commit
23e8ed8709
@ -1,6 +1,6 @@
|
||||
pub const BUFFER_SIZE = 1024 * 10; // Used a bit everywhere. The size for the schema for example. 10kB
|
||||
pub const OUT_BUFFER_SIZE = 1024 * 1024 * 16; // Mostly use in the fileEngine for the parsing, limit of what can be write to be send basically. 16MB
|
||||
pub const MAX_FILE_SIZE = 1024 * 1024 * 64; // 64MB
|
||||
pub const MAX_FILE_SIZE = 1024 * 1024; // 1MB
|
||||
pub const CPU_CORE = 16;
|
||||
|
||||
// Testing
|
||||
|
@ -29,8 +29,7 @@ const CPU_CORE = config.CPU_CORE;
|
||||
|
||||
const log = std.log.scoped(.fileEngine);
|
||||
|
||||
// I really like that, just some buffer in each file. Like that I can know EXACTLY how many memory I give the DB
|
||||
var parsing_buffer: [OUT_BUFFER_SIZE]u8 = undefined;
|
||||
var parsing_buffer: [OUT_BUFFER_SIZE]u8 = undefined; // Maybe use an arena but this is faster
|
||||
var path_buffer: [1024]u8 = undefined;
|
||||
var path_to_ZipponDB_dir_buffer: [1024]u8 = undefined;
|
||||
|
||||
@ -191,7 +190,7 @@ pub const FileEngine = struct {
|
||||
map: *UUIDFileIndex,
|
||||
) ZipponError!void {
|
||||
var fa = std.heap.FixedBufferAllocator.init(&parsing_buffer);
|
||||
defer fa.reset();
|
||||
fa.reset();
|
||||
const allocator = fa.allocator();
|
||||
|
||||
const max_file_index = try self.maxFileIndex(sstruct.name);
|
||||
@ -386,7 +385,7 @@ pub const FileEngine = struct {
|
||||
writer: anytype,
|
||||
) ZipponError!void {
|
||||
var fa = std.heap.FixedBufferAllocator.init(&parsing_buffer);
|
||||
defer fa.reset();
|
||||
fa.reset();
|
||||
const allocator = fa.allocator();
|
||||
|
||||
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
|
||||
@ -411,10 +410,6 @@ pub const FileEngine = struct {
|
||||
// Do one array and writer for each thread otherwise then create error by writing at the same time
|
||||
// Maybe use fixed lenght buffer for speed here
|
||||
var thread_writer_list = allocator.alloc(std.ArrayList(u8), max_file_index + 1) catch return FileEngineError.MemoryError;
|
||||
defer {
|
||||
for (thread_writer_list) |list| list.deinit();
|
||||
allocator.free(thread_writer_list);
|
||||
}
|
||||
|
||||
// Start parsing all file in multiple thread
|
||||
for (0..(max_file_index + 1)) |file_index| {
|
||||
|
105
src/main.zig
105
src/main.zig
@ -33,9 +33,13 @@ const State = enum {
|
||||
end,
|
||||
};
|
||||
|
||||
const log_allocator = std.heap.page_allocator;
|
||||
// End up using like 302kB of memory here
|
||||
var log_buff: [1024]u8 = undefined;
|
||||
var log_path: []const u8 = undefined;
|
||||
var date_buffer: [64]u8 = undefined;
|
||||
var date_fa = std.heap.FixedBufferAllocator.init(&date_buffer);
|
||||
const date_allocator = date_fa.allocator();
|
||||
|
||||
var path_buffer: [1024]u8 = undefined;
|
||||
var line_buffer: [BUFFER_SIZE]u8 = undefined;
|
||||
var in_buffer: [BUFFER_SIZE]u8 = undefined;
|
||||
@ -46,6 +50,40 @@ pub const std_options = .{
|
||||
.logFn = myLog,
|
||||
};
|
||||
|
||||
pub fn myLog(
|
||||
comptime message_level: std.log.Level,
|
||||
comptime scope: @Type(.EnumLiteral),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
const level_txt = comptime message_level.asText();
|
||||
const prefix = if (scope == .default) " - " else "(" ++ @tagName(scope) ++ ") - ";
|
||||
|
||||
const potential_file: ?std.fs.File = std.fs.cwd().openFile(log_path, .{ .mode = .write_only }) catch null;
|
||||
|
||||
date_fa.reset();
|
||||
const now = @import("dtype").DateTime.now();
|
||||
var date_format_buffer = std.ArrayList(u8).init(date_allocator);
|
||||
defer date_format_buffer.deinit();
|
||||
now.format("YYYY/MM/DD-HH:mm:ss.SSSS", date_format_buffer.writer()) catch return;
|
||||
|
||||
if (potential_file) |file| {
|
||||
file.seekFromEnd(0) catch return;
|
||||
const writer = file.writer();
|
||||
|
||||
writer.print("{s}{s}Time: {s} - ", .{ level_txt, prefix, date_format_buffer.items }) catch return;
|
||||
writer.print(format, args) catch return;
|
||||
writer.writeByte('\n') catch return;
|
||||
file.close();
|
||||
} else {
|
||||
const writer = std.io.getStdErr().writer();
|
||||
|
||||
writer.print("{s}{s}Time: {s} - ", .{ level_txt, prefix, date_format_buffer.items }) catch return;
|
||||
writer.print(format, args) catch return;
|
||||
writer.writeByte('\n') catch return;
|
||||
}
|
||||
}
|
||||
|
||||
const DBEngineState = enum { MissingFileEngine, MissingSchemaEngine, Ok, Init };
|
||||
|
||||
pub const DBEngine = struct {
|
||||
@ -95,7 +133,6 @@ pub const DBEngine = struct {
|
||||
};
|
||||
self.file_engine.createStructDirectories(self.schema_engine.struct_array) catch |err| {
|
||||
log.err("Error when creating struct directories: {any}", .{err});
|
||||
self.schema_engine.deinit();
|
||||
self.state = .MissingSchemaEngine;
|
||||
return self;
|
||||
};
|
||||
@ -118,7 +155,6 @@ pub const DBEngine = struct {
|
||||
};
|
||||
self.file_engine.createStructDirectories(self.schema_engine.struct_array) catch |err| {
|
||||
log.err("Error when creating struct directories: {any}", .{err});
|
||||
self.schema_engine.deinit();
|
||||
self.state = .MissingSchemaEngine;
|
||||
return self;
|
||||
};
|
||||
@ -131,67 +167,18 @@ pub const DBEngine = struct {
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *DBEngine) void {
|
||||
if (self.state == .Ok) self.schema_engine.deinit();
|
||||
}
|
||||
|
||||
pub fn runQuery(self: *DBEngine, null_term_query_str: [:0]const u8) void {
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){}; // Maybe use an arena here
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
var toker = ziqlTokenizer.init(null_term_query_str);
|
||||
|
||||
var parser = ziqlParser.init(
|
||||
allocator,
|
||||
&toker,
|
||||
&self.file_engine,
|
||||
&self.schema_engine,
|
||||
);
|
||||
|
||||
parser.parse() catch |err| {
|
||||
log.err("Error parsing: {any}", .{err});
|
||||
};
|
||||
|
||||
switch (gpa.deinit()) {
|
||||
.ok => {},
|
||||
.leak => std.log.debug("We fucked it up bro...\n", .{}),
|
||||
var parser = ziqlParser.init(&toker, &self.file_engine, &self.schema_engine);
|
||||
parser.parse() catch |err| log.err("Error parsing: {any}", .{err});
|
||||
}
|
||||
|
||||
pub fn deinit(self: *DBEngine) void {
|
||||
self.thread_engine.deinit();
|
||||
self.schema_engine.deinit();
|
||||
}
|
||||
};
|
||||
|
||||
pub fn myLog(
|
||||
comptime message_level: std.log.Level,
|
||||
comptime scope: @Type(.EnumLiteral),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
const level_txt = comptime message_level.asText();
|
||||
const prefix = if (scope == .default) " - " else "(" ++ @tagName(scope) ++ ") - ";
|
||||
|
||||
const potential_file: ?std.fs.File = std.fs.cwd().openFile(log_path, .{ .mode = .write_only }) catch null;
|
||||
|
||||
const now = @import("dtype").DateTime.now();
|
||||
var date_format_buffer = std.ArrayList(u8).init(log_allocator);
|
||||
defer date_format_buffer.deinit();
|
||||
now.format("YYYY/MM/DD-HH:mm:ss.SSSS", date_format_buffer.writer()) catch return;
|
||||
|
||||
if (potential_file) |file| {
|
||||
file.seekFromEnd(0) catch return;
|
||||
const writer = file.writer();
|
||||
|
||||
writer.print("{s}{s}Time: {s} - ", .{ level_txt, prefix, date_format_buffer.items }) catch return;
|
||||
writer.print(format, args) catch return;
|
||||
writer.writeByte('\n') catch return;
|
||||
file.close();
|
||||
} else {
|
||||
const writer = std.io.getStdErr().writer();
|
||||
|
||||
writer.print("{s}{s}Time: {s} - ", .{ level_txt, prefix, date_format_buffer.items }) catch return;
|
||||
writer.print(format, args) catch return;
|
||||
writer.writeByte('\n') catch return;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: If an argument is given when starting the binary, it is the db path
|
||||
pub fn main() !void {
|
||||
var db_engine = DBEngine.init(null, null);
|
||||
@ -202,7 +189,7 @@ pub fn main() !void {
|
||||
|
||||
while (true) {
|
||||
fa.reset();
|
||||
db_engine.thread_engine.reset();
|
||||
|
||||
std.debug.print("> ", .{}); // TODO: Find something better than just std.debug.print
|
||||
const line = std.io.getStdIn().reader().readUntilDelimiterOrEof(&in_buffer, '\n') catch {
|
||||
log.debug("Command too long for buffer", .{});
|
||||
|
@ -14,12 +14,14 @@ const BUFFER_SIZE = config.BUFFER_SIZE;
|
||||
|
||||
var schema_buffer: [BUFFER_SIZE]u8 = undefined;
|
||||
|
||||
var arena: std.heap.ArenaAllocator = undefined;
|
||||
var allocator: Allocator = undefined;
|
||||
|
||||
const log = std.log.scoped(.schemaEngine);
|
||||
|
||||
// TODO: Make better memory management
|
||||
|
||||
pub const SchemaStruct = struct {
|
||||
allocator: Allocator,
|
||||
name: []const u8,
|
||||
members: [][]const u8,
|
||||
types: []DataType,
|
||||
@ -28,7 +30,6 @@ pub const SchemaStruct = struct {
|
||||
uuid_file_index: *UUIDFileIndex, // Map UUID to the index of the file store in
|
||||
|
||||
pub fn init(
|
||||
allocator: Allocator,
|
||||
name: []const u8,
|
||||
members: [][]const u8,
|
||||
types: []DataType,
|
||||
@ -37,26 +38,16 @@ pub const SchemaStruct = struct {
|
||||
const uuid_file_index = allocator.create(UUIDFileIndex) catch return ZipponError.MemoryError;
|
||||
uuid_file_index.* = UUIDFileIndex.init(allocator) catch return ZipponError.MemoryError;
|
||||
return SchemaStruct{
|
||||
.allocator = allocator,
|
||||
.name = name,
|
||||
.members = members,
|
||||
.types = types,
|
||||
.zid_schema = SchemaStruct.fileDataSchema(allocator, types) catch return ZipponError.MemoryError,
|
||||
.zid_schema = SchemaStruct.fileDataSchema(types) catch return ZipponError.MemoryError,
|
||||
.links = links,
|
||||
.uuid_file_index = uuid_file_index,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *SchemaStruct) void {
|
||||
self.allocator.free(self.members);
|
||||
self.allocator.free(self.types);
|
||||
self.allocator.free(self.zid_schema);
|
||||
self.links.deinit();
|
||||
self.uuid_file_index.deinit();
|
||||
self.allocator.destroy(self.uuid_file_index);
|
||||
}
|
||||
|
||||
fn fileDataSchema(allocator: Allocator, dtypes: []DataType) ZipponError![]zid.DType {
|
||||
fn fileDataSchema(dtypes: []DataType) ZipponError![]zid.DType {
|
||||
var schema = std.ArrayList(zid.DType).init(allocator);
|
||||
|
||||
for (dtypes) |dt| {
|
||||
@ -88,13 +79,13 @@ pub const SchemaStruct = struct {
|
||||
/// This include keeping in memory the schema and schema file, and some functions to get like all members of a specific struct.
|
||||
/// For now it is a bit empty. But this is where I will manage migration
|
||||
pub const SchemaEngine = struct {
|
||||
allocator: Allocator,
|
||||
struct_array: []SchemaStruct,
|
||||
null_terminated: [:0]u8,
|
||||
|
||||
// The path is the path to the schema file
|
||||
pub fn init(path: []const u8, file_engine: *FileEngine) ZipponError!SchemaEngine {
|
||||
const allocator = std.heap.page_allocator;
|
||||
arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
allocator = arena.allocator();
|
||||
|
||||
var buffer: [BUFFER_SIZE]u8 = undefined;
|
||||
|
||||
@ -118,15 +109,13 @@ pub const SchemaEngine = struct {
|
||||
}
|
||||
|
||||
return SchemaEngine{
|
||||
.allocator = allocator,
|
||||
.struct_array = struct_array.toOwnedSlice() catch return ZipponError.MemoryError,
|
||||
.null_terminated = null_terminated,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *SchemaEngine) void {
|
||||
for (self.struct_array) |*elem| elem.deinit();
|
||||
self.allocator.free(self.struct_array);
|
||||
pub fn deinit(_: SchemaEngine) void {
|
||||
arena.deinit();
|
||||
}
|
||||
|
||||
/// Get the type of the member
|
||||
|
@ -43,16 +43,6 @@ pub const Parser = struct {
|
||||
var state: State = .expect_struct_name_OR_end;
|
||||
var keep_next = false;
|
||||
|
||||
errdefer {
|
||||
for (0..struct_array.items.len) |i| {
|
||||
struct_array.items[i].deinit();
|
||||
}
|
||||
|
||||
for (0..struct_array.items.len) |_| {
|
||||
_ = struct_array.pop();
|
||||
}
|
||||
}
|
||||
|
||||
var member_token: Token = undefined;
|
||||
|
||||
var name: []const u8 = undefined;
|
||||
@ -113,7 +103,6 @@ pub const Parser = struct {
|
||||
|
||||
.add_struct => {
|
||||
struct_array.append(try SchemaStruct.init(
|
||||
self.allocator,
|
||||
name,
|
||||
member_list.toOwnedSlice() catch return SchemaParserError.MemoryError,
|
||||
type_list.toOwnedSlice() catch return SchemaParserError.MemoryError,
|
||||
|
@ -7,12 +7,13 @@ const Allocator = std.mem.Allocator;
|
||||
|
||||
const ZipponError = @import("stuffs/errors.zig").ZipponError;
|
||||
const CPU_CORE = @import("config.zig").CPU_CORE;
|
||||
const BUFFER_SIZE = @import("config.zig").BUFFER_SIZE;
|
||||
const OUT_BUFFER_SIZE = @import("config.zig").OUT_BUFFER_SIZE;
|
||||
const log = std.log.scoped(.thread);
|
||||
|
||||
var alloc_buff: [BUFFER_SIZE]u8 = undefined;
|
||||
var fa = std.heap.FixedBufferAllocator.init(&alloc_buff);
|
||||
const allocator = fa.allocator();
|
||||
const allocator = std.heap.page_allocator;
|
||||
|
||||
var thread_arena: std.heap.ThreadSafeAllocator = undefined;
|
||||
var thread_pool: Pool = undefined;
|
||||
|
||||
pub const ThreadSyncContext = struct {
|
||||
processed_struct: std.atomic.Value(u64) = std.atomic.Value(u64).init(0),
|
||||
@ -55,28 +56,26 @@ pub const ThreadSyncContext = struct {
|
||||
};
|
||||
|
||||
pub const ThreadEngine = struct {
|
||||
thread_arena: *std.heap.ThreadSafeAllocator = undefined,
|
||||
thread_pool: *Pool = undefined,
|
||||
thread_arena: *std.heap.ThreadSafeAllocator,
|
||||
thread_pool: *Pool,
|
||||
|
||||
pub fn init() ThreadEngine {
|
||||
const thread_arena = allocator.create(std.heap.ThreadSafeAllocator) catch @panic("=(");
|
||||
thread_arena.* = std.heap.ThreadSafeAllocator{
|
||||
thread_arena = std.heap.ThreadSafeAllocator{
|
||||
.child_allocator = allocator,
|
||||
};
|
||||
|
||||
const thread_pool = allocator.create(Pool) catch @panic("=(");
|
||||
thread_pool.*.init(std.Thread.Pool.Options{
|
||||
thread_pool.init(std.Thread.Pool.Options{
|
||||
.allocator = thread_arena.allocator(),
|
||||
.n_jobs = CPU_CORE,
|
||||
}) catch @panic("=(");
|
||||
|
||||
return ThreadEngine{
|
||||
.thread_pool = thread_pool,
|
||||
.thread_arena = thread_arena,
|
||||
.thread_pool = &thread_pool,
|
||||
.thread_arena = &thread_arena,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn reset(_: ThreadEngine) void {
|
||||
fa.reset();
|
||||
pub fn deinit(_: ThreadEngine) void {
|
||||
thread_pool.deinit();
|
||||
}
|
||||
};
|
||||
|
@ -20,8 +20,6 @@ const printError = @import("stuffs/utils.zig").printError;
|
||||
const ZiQlParserError = @import("stuffs/errors.zig").ZiQlParserError;
|
||||
const ZipponError = @import("stuffs/errors.zig").ZipponError;
|
||||
|
||||
const BUFFER_SIZE = @import("config.zig").BUFFER_SIZE;
|
||||
|
||||
const log = std.log.scoped(.ziqlParser);
|
||||
|
||||
const State = enum {
|
||||
@ -66,16 +64,14 @@ const State = enum {
|
||||
};
|
||||
|
||||
pub const Parser = struct {
|
||||
allocator: Allocator,
|
||||
toker: *Tokenizer,
|
||||
file_engine: *FileEngine,
|
||||
schema_engine: *SchemaEngine,
|
||||
|
||||
// TODO: Improve memory management, stop using an alloc in init maybe
|
||||
pub fn init(allocator: Allocator, toker: *Tokenizer, file_engine: *FileEngine, schema_engine: *SchemaEngine) Parser {
|
||||
pub fn init(toker: *Tokenizer, file_engine: *FileEngine, schema_engine: *SchemaEngine) Parser {
|
||||
// Do I need to init a FileEngine at each Parser, can't I put it in the CLI parser instead ?
|
||||
return Parser{
|
||||
.allocator = allocator,
|
||||
.toker = toker,
|
||||
.file_engine = file_engine,
|
||||
.schema_engine = schema_engine,
|
||||
@ -83,14 +79,16 @@ pub const Parser = struct {
|
||||
}
|
||||
|
||||
pub fn parse(self: Parser) ZipponError!void {
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.allocator();
|
||||
|
||||
var state: State = .start;
|
||||
var additional_data = AdditionalData.init(self.allocator);
|
||||
var additional_data = AdditionalData.init(allocator);
|
||||
defer additional_data.deinit();
|
||||
var struct_name: []const u8 = undefined;
|
||||
var action: enum { GRAB, ADD, UPDATE, DELETE } = undefined;
|
||||
|
||||
const out_allocator = self.allocator;
|
||||
|
||||
var token = self.toker.next();
|
||||
var keep_next = false; // Use in the loop to prevent to get the next token when continue. Just need to make it true and it is reset at every loop
|
||||
|
||||
@ -169,7 +167,7 @@ pub const Parser = struct {
|
||||
},
|
||||
|
||||
.parse_additional_data => {
|
||||
try self.parseAdditionalData(&additional_data, struct_name);
|
||||
try self.parseAdditionalData(allocator, &additional_data, struct_name);
|
||||
state = switch (action) {
|
||||
.GRAB => .filter_and_send,
|
||||
.UPDATE => .filter_and_update,
|
||||
@ -180,10 +178,10 @@ pub const Parser = struct {
|
||||
|
||||
.filter_and_send => switch (token.tag) {
|
||||
.l_brace => {
|
||||
var filter = try self.parseFilter(struct_name, false);
|
||||
var filter = try self.parseFilter(allocator, struct_name, false);
|
||||
defer filter.deinit();
|
||||
|
||||
var buff = std.ArrayList(u8).init(out_allocator);
|
||||
var buff = std.ArrayList(u8).init(allocator);
|
||||
defer buff.deinit();
|
||||
|
||||
try self.file_engine.parseEntities(struct_name, filter, &additional_data, &buff.writer());
|
||||
@ -191,7 +189,7 @@ pub const Parser = struct {
|
||||
state = .end;
|
||||
},
|
||||
.eof => {
|
||||
var buff = std.ArrayList(u8).init(out_allocator);
|
||||
var buff = std.ArrayList(u8).init(allocator);
|
||||
defer buff.deinit();
|
||||
|
||||
try self.file_engine.parseEntities(struct_name, null, &additional_data, &buff.writer());
|
||||
@ -210,7 +208,7 @@ pub const Parser = struct {
|
||||
// TODO: Optimize so it doesnt use parseFilter but just parse the file and directly check the condition. Here I end up parsing 2 times.
|
||||
.filter_and_update => switch (token.tag) {
|
||||
.l_brace => {
|
||||
var filter = try self.parseFilter(struct_name, false);
|
||||
var filter = try self.parseFilter(allocator, struct_name, false);
|
||||
defer filter.deinit();
|
||||
|
||||
token = self.toker.last();
|
||||
@ -232,11 +230,11 @@ pub const Parser = struct {
|
||||
token.loc.end,
|
||||
);
|
||||
|
||||
var data_map = std.StringHashMap([]const u8).init(self.allocator);
|
||||
var data_map = std.StringHashMap([]const u8).init(allocator);
|
||||
defer data_map.deinit();
|
||||
try self.parseNewData(&data_map, struct_name);
|
||||
|
||||
var buff = std.ArrayList(u8).init(out_allocator);
|
||||
var buff = std.ArrayList(u8).init(allocator);
|
||||
defer buff.deinit();
|
||||
|
||||
try self.file_engine.updateEntities(struct_name, filter, data_map, &buff.writer(), &additional_data);
|
||||
@ -253,11 +251,11 @@ pub const Parser = struct {
|
||||
token.loc.end,
|
||||
);
|
||||
|
||||
var data_map = std.StringHashMap([]const u8).init(self.allocator);
|
||||
var data_map = std.StringHashMap([]const u8).init(allocator);
|
||||
defer data_map.deinit();
|
||||
try self.parseNewData(&data_map, struct_name);
|
||||
|
||||
var buff = std.ArrayList(u8).init(out_allocator);
|
||||
var buff = std.ArrayList(u8).init(allocator);
|
||||
defer buff.deinit();
|
||||
|
||||
try self.file_engine.updateEntities(struct_name, null, data_map, &buff.writer(), &additional_data);
|
||||
@ -275,10 +273,10 @@ pub const Parser = struct {
|
||||
|
||||
.filter_and_delete => switch (token.tag) {
|
||||
.l_brace => {
|
||||
var filter = try self.parseFilter(struct_name, false);
|
||||
var filter = try self.parseFilter(allocator, struct_name, false);
|
||||
defer filter.deinit();
|
||||
|
||||
var buff = std.ArrayList(u8).init(out_allocator);
|
||||
var buff = std.ArrayList(u8).init(allocator);
|
||||
defer buff.deinit();
|
||||
|
||||
try self.file_engine.deleteEntities(struct_name, filter, &buff.writer(), &additional_data);
|
||||
@ -286,7 +284,7 @@ pub const Parser = struct {
|
||||
state = .end;
|
||||
},
|
||||
.eof => {
|
||||
var buff = std.ArrayList(u8).init(out_allocator);
|
||||
var buff = std.ArrayList(u8).init(allocator);
|
||||
defer buff.deinit();
|
||||
|
||||
try self.file_engine.deleteEntities(struct_name, null, &buff.writer(), &additional_data);
|
||||
@ -317,11 +315,11 @@ pub const Parser = struct {
|
||||
},
|
||||
|
||||
.parse_new_data_and_add_data => {
|
||||
var data_map = std.StringHashMap([]const u8).init(self.allocator);
|
||||
var data_map = std.StringHashMap([]const u8).init(allocator);
|
||||
defer data_map.deinit();
|
||||
try self.parseNewData(&data_map, struct_name);
|
||||
|
||||
var error_message_buffer = std.ArrayList(u8).init(self.allocator);
|
||||
var error_message_buffer = std.ArrayList(u8).init(allocator);
|
||||
defer error_message_buffer.deinit();
|
||||
|
||||
const error_message_buffer_writer = error_message_buffer.writer();
|
||||
@ -340,7 +338,7 @@ pub const Parser = struct {
|
||||
token.loc.end,
|
||||
);
|
||||
}
|
||||
var buff = std.ArrayList(u8).init(out_allocator);
|
||||
var buff = std.ArrayList(u8).init(allocator);
|
||||
defer buff.deinit();
|
||||
|
||||
token = self.toker.last_token;
|
||||
@ -359,8 +357,8 @@ pub const Parser = struct {
|
||||
|
||||
/// Take an array of UUID and populate it with what match what is between {}
|
||||
/// Main is to know if between {} or (), main is true if between {}, otherwise between () inside {}
|
||||
fn parseFilter(self: Parser, struct_name: []const u8, is_sub: bool) ZipponError!Filter {
|
||||
var filter = try Filter.init(self.allocator);
|
||||
fn parseFilter(self: Parser, allocator: Allocator, struct_name: []const u8, is_sub: bool) ZipponError!Filter {
|
||||
var filter = try Filter.init(allocator);
|
||||
errdefer filter.deinit();
|
||||
|
||||
var keep_next = false;
|
||||
@ -400,14 +398,14 @@ pub const Parser = struct {
|
||||
}
|
||||
},
|
||||
.l_paren => {
|
||||
var sub_filter = try self.parseFilter(struct_name, true);
|
||||
var sub_filter = try self.parseFilter(allocator, struct_name, true);
|
||||
filter.addSubFilter(&sub_filter);
|
||||
token = self.toker.last();
|
||||
keep_next = true;
|
||||
state = .expect_ANDOR_OR_end;
|
||||
},
|
||||
.identifier => {
|
||||
const condition = try self.parseCondition(&token, struct_name);
|
||||
const condition = try self.parseCondition(allocator, &token, struct_name);
|
||||
try filter.addCondition(condition);
|
||||
token = self.toker.last();
|
||||
keep_next = true;
|
||||
@ -475,7 +473,7 @@ pub const Parser = struct {
|
||||
|
||||
/// Parse to get a Condition. Which is a struct that is use by the FileEngine to retreive data.
|
||||
/// In the query, it is this part name = 'Bob' or age <= 10
|
||||
fn parseCondition(self: Parser, token_ptr: *Token, struct_name: []const u8) ZipponError!Condition {
|
||||
fn parseCondition(self: Parser, allocator: Allocator, token_ptr: *Token, struct_name: []const u8) ZipponError!Condition {
|
||||
var keep_next = false;
|
||||
var state: State = .expect_member;
|
||||
var token = token_ptr.*;
|
||||
@ -566,7 +564,7 @@ pub const Parser = struct {
|
||||
|
||||
var filter: ?Filter = null;
|
||||
defer if (filter != null) filter.?.deinit();
|
||||
var additional_data = AdditionalData.init(self.allocator);
|
||||
var additional_data = AdditionalData.init(allocator);
|
||||
defer additional_data.deinit();
|
||||
|
||||
if (expected_tag) |tag| {
|
||||
@ -612,10 +610,7 @@ pub const Parser = struct {
|
||||
.link, .link_array => {
|
||||
switch (token.tag) {
|
||||
.l_bracket => {
|
||||
try self.parseAdditionalData(
|
||||
&additional_data,
|
||||
struct_name,
|
||||
);
|
||||
try self.parseAdditionalData(allocator, &additional_data, struct_name);
|
||||
},
|
||||
.uuid_literal => {},
|
||||
else => {},
|
||||
@ -625,7 +620,7 @@ pub const Parser = struct {
|
||||
|
||||
switch (token.tag) {
|
||||
.l_brace => {
|
||||
filter = try self.parseFilter(struct_name, false);
|
||||
filter = try self.parseFilter(allocator, struct_name, false);
|
||||
},
|
||||
.uuid_literal => {},
|
||||
else => return printError(
|
||||
@ -650,8 +645,8 @@ pub const Parser = struct {
|
||||
.bool => condition.value = ConditionValue.initBool(self.toker.buffer[start_index..token.loc.end]),
|
||||
.link_array, .link => switch (token.tag) {
|
||||
.l_brace, .l_bracket => {
|
||||
const map = self.allocator.create(std.AutoHashMap(UUID, void)) catch return ZipponError.MemoryError;
|
||||
map.* = std.AutoHashMap(UUID, void).init(self.allocator);
|
||||
const map = allocator.create(std.AutoHashMap(UUID, void)) catch return ZipponError.MemoryError;
|
||||
map.* = std.AutoHashMap(UUID, void).init(allocator);
|
||||
try self.file_engine.populateVoidUUIDMap(
|
||||
struct_name,
|
||||
filter,
|
||||
@ -677,7 +672,13 @@ pub const Parser = struct {
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
// Check if the condition is valid
|
||||
try self.checkConditionValidity(condition, token);
|
||||
|
||||
return condition;
|
||||
}
|
||||
|
||||
/// Will check if what is compared is ok, like comparing if a string is superior to another string is not for example.
|
||||
fn checkConditionValidity(self: Parser, condition: Condition, token: Token) ZipponError!void {
|
||||
switch (condition.operation) {
|
||||
.equal => switch (condition.data_type) {
|
||||
.int, .float, .str, .bool, .link, .date, .time, .datetime => {},
|
||||
@ -758,13 +759,11 @@ pub const Parser = struct {
|
||||
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
return condition;
|
||||
}
|
||||
|
||||
/// When this function is call, next token should be [
|
||||
/// Check if an int is here -> check if ; is here -> check if member is here -> check if [ is here -> loop
|
||||
fn parseAdditionalData(self: Parser, additional_data: *AdditionalData, struct_name: []const u8) ZipponError!void {
|
||||
fn parseAdditionalData(self: Parser, allocator: Allocator, additional_data: *AdditionalData, struct_name: []const u8) ZipponError!void {
|
||||
var token = self.toker.next();
|
||||
var keep_next = false;
|
||||
var state: State = .expect_count_of_entity_to_find;
|
||||
@ -826,7 +825,7 @@ pub const Parser = struct {
|
||||
}
|
||||
additional_data.member_to_find.append(
|
||||
AdditionalDataMember.init(
|
||||
self.allocator,
|
||||
allocator,
|
||||
self.toker.getTokenSlice(token),
|
||||
try self.schema_engine.memberName2DataIndex(struct_name, self.toker.getTokenSlice(token)),
|
||||
),
|
||||
@ -848,6 +847,7 @@ pub const Parser = struct {
|
||||
.r_bracket => state = .end,
|
||||
.l_bracket => {
|
||||
try self.parseAdditionalData(
|
||||
allocator,
|
||||
&additional_data.member_to_find.items[additional_data.member_to_find.items.len - 1].additional_data,
|
||||
struct_name,
|
||||
);
|
||||
@ -973,12 +973,10 @@ pub const Parser = struct {
|
||||
}
|
||||
|
||||
switch (data_type) {
|
||||
.str => member_map.put(member_name, self.toker.buffer[start_index + 1 .. token.loc.end - 1]) catch return ZipponError.MemoryError,
|
||||
.str => member_map.put(member_name, self.toker.buffer[start_index + 1 .. token.loc.end - 1]) catch return ZipponError.MemoryError, // TO remove ' on each side
|
||||
else => member_map.put(member_name, self.toker.buffer[start_index..token.loc.end]) catch return ZipponError.MemoryError,
|
||||
}
|
||||
} else {
|
||||
// Handle bool and bool array
|
||||
switch (data_type) {
|
||||
} else switch (data_type) {
|
||||
.bool => {
|
||||
switch (token.tag) {
|
||||
.bool_literal_true => {
|
||||
@ -1042,7 +1040,6 @@ pub const Parser = struct {
|
||||
.link_array => {},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
state = .expect_comma_OR_end;
|
||||
},
|
||||
@ -1065,8 +1062,6 @@ pub const Parser = struct {
|
||||
};
|
||||
}
|
||||
|
||||
// Utils
|
||||
|
||||
/// Check if all token in an array is of one specific type
|
||||
fn checkTokensInArray(self: Parser, tag: Token.Tag) ZipponError!Token {
|
||||
var token = self.toker.next();
|
||||
@ -1160,14 +1155,12 @@ const DBEngine = @import("main.zig").DBEngine;
|
||||
|
||||
fn testParsing(source: [:0]const u8) !void {
|
||||
const TEST_DATA_DIR = @import("config.zig").TEST_DATA_DIR;
|
||||
const allocator = std.testing.allocator;
|
||||
|
||||
var db_engine = DBEngine.init(TEST_DATA_DIR, null);
|
||||
defer db_engine.deinit();
|
||||
|
||||
var toker = Tokenizer.init(source);
|
||||
var parser = Parser.init(
|
||||
allocator,
|
||||
&toker,
|
||||
&db_engine.file_engine,
|
||||
&db_engine.schema_engine,
|
||||
@ -1178,14 +1171,12 @@ fn testParsing(source: [:0]const u8) !void {
|
||||
|
||||
fn expectParsingError(source: [:0]const u8, err: ZiQlParserError) !void {
|
||||
const TEST_DATA_DIR = @import("config.zig").TEST_DATA_DIR;
|
||||
const allocator = std.testing.allocator;
|
||||
|
||||
var db_engine = DBEngine.init(TEST_DATA_DIR, null);
|
||||
defer db_engine.deinit();
|
||||
|
||||
var toker = Tokenizer.init(source);
|
||||
var parser = Parser.init(
|
||||
allocator,
|
||||
&toker,
|
||||
&db_engine.file_engine,
|
||||
&db_engine.schema_engine,
|
||||
@ -1205,20 +1196,22 @@ test "Parse filter" {
|
||||
|
||||
fn testParseFilter(source: [:0]const u8) !void {
|
||||
const TEST_DATA_DIR = @import("config.zig").TEST_DATA_DIR;
|
||||
const allocator = std.testing.allocator;
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.allocator();
|
||||
|
||||
var db_engine = DBEngine.init(TEST_DATA_DIR, null);
|
||||
defer db_engine.deinit();
|
||||
|
||||
var toker = Tokenizer.init(source);
|
||||
var parser = Parser.init(
|
||||
allocator,
|
||||
&toker,
|
||||
&db_engine.file_engine,
|
||||
&db_engine.schema_engine,
|
||||
);
|
||||
|
||||
var filter = try parser.parseFilter("User", false);
|
||||
var filter = try parser.parseFilter(allocator, "User", false);
|
||||
defer filter.deinit();
|
||||
std.debug.print("{s}\n", .{source});
|
||||
filter.debugPrint();
|
||||
|
Loading…
x
Reference in New Issue
Block a user