Moved stuff into a schema dir and created empty dir
This commit is contained in:
parent
83ff27c3f2
commit
1c8384dec6
0
src/cli/core.zig
Normal file
0
src/cli/core.zig
Normal file
0
src/cli/engine.zig
Normal file
0
src/cli/engine.zig
Normal file
0
src/cli/parser.zig
Normal file
0
src/cli/parser.zig
Normal file
0
src/cli/tokenizer.zig
Normal file
0
src/cli/tokenizer.zig
Normal file
@ -1,6 +1,6 @@
|
||||
const std = @import("std");
|
||||
const Pool = std.Thread.Pool;
|
||||
const SchemaEngine = @import("../schemaEngine.zig");
|
||||
const SchemaEngine = @import("../schema/core.zig");
|
||||
|
||||
const ZipponError = @import("error").ZipponError;
|
||||
const log = std.log.scoped(.fileEngine);
|
||||
@ -11,6 +11,8 @@ var path_to_ZipponDB_dir_buffer: [1024]u8 = undefined;
|
||||
/// Or even get stats, whatever. If it touch files, it's here
|
||||
pub const Self = @This();
|
||||
|
||||
// This basically expend the file with other
|
||||
// So I can define function in other file for the same struct
|
||||
pub usingnamespace @import("utils.zig");
|
||||
pub usingnamespace @import("directory.zig");
|
||||
pub usingnamespace @import("read.zig");
|
@ -4,7 +4,7 @@ const utils = @import("../utils.zig");
|
||||
const zid = @import("ZipponData");
|
||||
const Self = @import("core.zig").Self;
|
||||
const ZipponError = @import("error").ZipponError;
|
||||
const SchemaStruct = @import("../schemaEngine.zig").SchemaStruct;
|
||||
const SchemaStruct = @import("../schema/struct.zig");
|
||||
|
||||
var path_buffer: [1024]u8 = undefined;
|
||||
|
||||
@ -54,7 +54,7 @@ pub fn createMainDirectories(self: *Self) ZipponError!void {
|
||||
/// Request a path to a schema file and then create the struct folder
|
||||
/// TODO: Check if some data already exist and if so ask if the user want to delete it and make a backup
|
||||
pub fn createStructDirectories(self: *Self, struct_array: []SchemaStruct) ZipponError!void {
|
||||
var data_dir = try utils.printOpenDir("{s}/DATA", .{self.path_to_ZipponDB_dir}, .{});
|
||||
var data_dir = try self.printOpenDir("{s}/DATA", .{self.path_to_ZipponDB_dir}, .{});
|
||||
defer data_dir.close();
|
||||
|
||||
for (struct_array) |schema_struct| {
|
@ -29,7 +29,7 @@ pub fn dumpDb(self: Self, parent_allocator: Allocator, path: []const u8, format:
|
||||
var writer = std.io.bufferedWriter(file.writer());
|
||||
EntityWriter.writeHeaderCsv(writer.writer(), sstruct.members, ';') catch return ZipponError.WriteError;
|
||||
|
||||
const struct_dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
const struct_dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
|
||||
const file_indexs = try self.allFileIndex(allocator, sstruct.name);
|
||||
for (file_indexs) |file_index| {
|
@ -6,7 +6,7 @@ const Allocator = std.mem.Allocator;
|
||||
const Self = @import("core.zig").Self;
|
||||
const ZipponError = @import("error").ZipponError;
|
||||
|
||||
const SchemaStruct = @import("../schemaEngine.zig").SchemaStruct;
|
||||
const SchemaStruct = @import("../schema/struct.zig");
|
||||
const Filter = @import("../dataStructure/filter.zig").Filter;
|
||||
const AdditionalData = @import("../dataStructure/additionalData.zig");
|
||||
const RelationMap = @import("../dataStructure/relationMap.zig");
|
||||
@ -35,7 +35,7 @@ pub fn getNumberOfEntity(self: *Self, struct_name: []const u8) ZipponError!usize
|
||||
const max_file_index = try self.maxFileIndex(sstruct.name);
|
||||
var count: usize = 0;
|
||||
|
||||
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
const dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
|
||||
for (0..(max_file_index + 1)) |i| {
|
||||
const path_buff = std.fmt.bufPrint(&path_buffer, "{d}.zid", .{i}) catch return ZipponError.MemoryError;
|
||||
@ -62,7 +62,7 @@ pub fn populateFileIndexUUIDMap(
|
||||
|
||||
const max_file_index = try self.maxFileIndex(sstruct.name);
|
||||
|
||||
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
const dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
|
||||
// Multi-threading setup
|
||||
var sync_context = ThreadSyncContext.init(
|
||||
@ -93,9 +93,7 @@ pub fn populateFileIndexUUIDMap(
|
||||
}
|
||||
|
||||
// Wait for all threads to complete
|
||||
while (!sync_context.isComplete()) {
|
||||
std.time.sleep(10_000_000);
|
||||
}
|
||||
while (!sync_context.isComplete()) std.time.sleep(10_000_000);
|
||||
|
||||
// Combine results
|
||||
for (thread_writer_list, 0..) |list, file_index| {
|
||||
@ -155,7 +153,7 @@ pub fn populateVoidUUIDMap(
|
||||
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
|
||||
const max_file_index = try self.maxFileIndex(sstruct.name);
|
||||
|
||||
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
const dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
|
||||
// Multi-threading setup
|
||||
var sync_context = ThreadSyncContext.init(
|
||||
@ -183,9 +181,7 @@ pub fn populateVoidUUIDMap(
|
||||
}
|
||||
|
||||
// Wait for all threads to complete
|
||||
while (!sync_context.isComplete()) {
|
||||
std.time.sleep(10_000_000);
|
||||
}
|
||||
while (!sync_context.isComplete()) std.time.sleep(10_000_000);
|
||||
|
||||
// Combine results
|
||||
for (thread_writer_list) |list| {
|
||||
@ -276,7 +272,7 @@ pub fn parseEntities(
|
||||
const relation_maps = try self.schema_engine.relationMapArrayInit(allocator, struct_name, additional_data.*);
|
||||
|
||||
// Open the dir that contain all files
|
||||
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{ .access_sub_paths = false });
|
||||
const dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{ .access_sub_paths = false });
|
||||
|
||||
// Multi thread stuffs
|
||||
var sync_context = ThreadSyncContext.init(
|
||||
@ -307,9 +303,7 @@ pub fn parseEntities(
|
||||
}
|
||||
|
||||
// Wait for all thread to either finish or return an error
|
||||
while (!sync_context.isComplete()) {
|
||||
std.time.sleep(100_000); // Check every 0.1ms
|
||||
}
|
||||
while (!sync_context.isComplete()) std.time.sleep(100_000); // Check every 0.1ms
|
||||
|
||||
// Append all writer to each other
|
||||
writer.writeByte('[') catch return ZipponError.WriteError;
|
||||
@ -414,7 +408,7 @@ pub fn parseEntitiesRelationMap(
|
||||
}
|
||||
|
||||
// Open the dir that contain all files
|
||||
const dir = try utils.printOpenDir(
|
||||
const dir = try self.printOpenDir(
|
||||
"{s}/DATA/{s}",
|
||||
.{ self.path_to_ZipponDB_dir, sstruct.name },
|
||||
.{ .access_sub_paths = false },
|
||||
@ -448,9 +442,7 @@ pub fn parseEntitiesRelationMap(
|
||||
}
|
||||
|
||||
// Wait for all thread to either finish or return an error
|
||||
while (!sync_context.isComplete()) {
|
||||
std.time.sleep(100_000); // Check every 0.1ms
|
||||
}
|
||||
while (!sync_context.isComplete()) std.time.sleep(100_000); // Check every 0.1ms
|
||||
|
||||
// Now here I should have a list of copy of the map with all UUID a bit everywhere
|
||||
|
@ -130,7 +130,7 @@ pub fn orderedNewData(
|
||||
/// Get the index of the first file that is bellow the size limit. If not found, create a new file
|
||||
/// TODO: Need some serious speed up. I should keep in memory a file->size as a hashmap and use that instead
|
||||
pub fn getFirstUsableIndexFile(self: Self, struct_name: []const u8) ZipponError!usize {
|
||||
var member_dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
|
||||
var member_dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
|
||||
defer member_dir.close();
|
||||
|
||||
var i: usize = 0;
|
||||
@ -156,7 +156,7 @@ pub fn getFirstUsableIndexFile(self: Self, struct_name: []const u8) ZipponError!
|
||||
/// FIXME: I use 0..file_index but because now I delete empty file, I can end up trying to parse an empty file. So I need to delete that
|
||||
/// And do something that return a list of file to parse instead
|
||||
pub fn maxFileIndex(self: Self, struct_name: []const u8) ZipponError!usize {
|
||||
var dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
|
||||
var dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
|
||||
defer dir.close();
|
||||
|
||||
var count: usize = 0;
|
||||
@ -170,7 +170,7 @@ pub fn maxFileIndex(self: Self, struct_name: []const u8) ZipponError!usize {
|
||||
}
|
||||
|
||||
pub fn allFileIndex(self: Self, allocator: Allocator, struct_name: []const u8) ZipponError![]usize {
|
||||
var dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
|
||||
var dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
|
||||
defer dir.close();
|
||||
|
||||
var array = std.ArrayList(usize).init(allocator);
|
||||
@ -185,7 +185,7 @@ pub fn allFileIndex(self: Self, allocator: Allocator, struct_name: []const u8) Z
|
||||
}
|
||||
|
||||
pub fn isSchemaFileInDir(self: *Self) bool {
|
||||
_ = utils.printOpenFile("{s}/schema", .{self.path_to_ZipponDB_dir}, .{}) catch return false;
|
||||
_ = self.printOpenFile("{s}/schema", .{self.path_to_ZipponDB_dir}, .{}) catch return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -202,3 +202,13 @@ pub fn writeSchemaFile(self: *Self, null_terminated_schema_buff: [:0]const u8) Z
|
||||
defer file.close();
|
||||
file.writeAll(null_terminated_schema_buff) catch return ZipponError.WriteError;
|
||||
}
|
||||
|
||||
pub fn printOpenDir(_: Self, comptime format: []const u8, args: anytype, options: std.fs.Dir.OpenDirOptions) ZipponError!std.fs.Dir {
|
||||
const path = std.fmt.bufPrint(&path_buffer, format, args) catch return ZipponError.CantOpenDir;
|
||||
return std.fs.cwd().openDir(path, options) catch ZipponError.CantOpenDir;
|
||||
}
|
||||
|
||||
pub fn printOpenFile(_: Self, comptime format: []const u8, args: anytype, options: std.fs.File.OpenFlags) ZipponError!std.fs.File {
|
||||
const path = std.fmt.bufPrint(&path_buffer, format, args) catch return ZipponError.CantOpenDir;
|
||||
return std.fs.cwd().openFile(path, options) catch ZipponError.CantOpenFile;
|
||||
}
|
@ -6,12 +6,12 @@ const Allocator = std.mem.Allocator;
|
||||
const Self = @import("core.zig").Self;
|
||||
const ZipponError = @import("error").ZipponError;
|
||||
|
||||
const SchemaStruct = @import("../schemaEngine.zig").SchemaStruct;
|
||||
const SchemaStruct = @import("../schema/struct.zig");
|
||||
const Filter = @import("../dataStructure/filter.zig").Filter;
|
||||
const ConditionValue = @import("../dataStructure/filter.zig").ConditionValue;
|
||||
const AdditionalData = @import("../dataStructure/additionalData.zig");
|
||||
const RelationMap = @import("../dataStructure/relationMap.zig");
|
||||
const JsonString = @import("../dataStructure/relationMap.zig").JsonString;
|
||||
const JsonString = RelationMap.JsonString;
|
||||
const EntityWriter = @import("../entityWriter.zig");
|
||||
const ThreadSyncContext = @import("../thread/context.zig");
|
||||
|
||||
@ -78,7 +78,7 @@ pub fn updateEntities(
|
||||
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
|
||||
const max_file_index = try self.maxFileIndex(sstruct.name);
|
||||
|
||||
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
const dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
|
||||
// Multi-threading setup
|
||||
var sync_context = ThreadSyncContext.init(
|
||||
@ -115,9 +115,7 @@ pub fn updateEntities(
|
||||
}
|
||||
|
||||
// Wait for all threads to complete
|
||||
while (!sync_context.isComplete()) {
|
||||
std.time.sleep(100_000); // Check every 0.1ms
|
||||
}
|
||||
while (!sync_context.isComplete()) std.time.sleep(100_000); // Check every 0.1ms
|
||||
|
||||
// Combine results
|
||||
writer.writeByte('[') catch return ZipponError.WriteError;
|
||||
@ -243,7 +241,7 @@ pub fn deleteEntities(
|
||||
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
|
||||
const max_file_index = try self.maxFileIndex(sstruct.name);
|
||||
|
||||
const dir = try utils.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
const dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, sstruct.name }, .{});
|
||||
|
||||
// Multi-threading setup
|
||||
var sync_context = ThreadSyncContext.init(
|
||||
@ -270,9 +268,7 @@ pub fn deleteEntities(
|
||||
}
|
||||
|
||||
// Wait for all threads to complete
|
||||
while (!sync_context.isComplete()) {
|
||||
std.time.sleep(100_000); // Check every 0.1ms
|
||||
}
|
||||
while (!sync_context.isComplete()) std.time.sleep(100_000); // Check every 0.1ms
|
||||
|
||||
// Combine results
|
||||
writer.writeByte('[') catch return ZipponError.WriteError;
|
@ -4,8 +4,8 @@ const send = utils.send;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Pool = std.Thread.Pool;
|
||||
|
||||
const FileEngine = @import("fileEngine/core.zig");
|
||||
const SchemaEngine = @import("schemaEngine.zig");
|
||||
const FileEngine = @import("file/core.zig");
|
||||
const SchemaEngine = @import("schema/core.zig");
|
||||
const ThreadEngine = @import("thread/engine.zig");
|
||||
|
||||
const cliTokenizer = @import("tokenizers/cli.zig").Tokenizer;
|
||||
|
62
src/schema/core.zig
Normal file
62
src/schema/core.zig
Normal file
@ -0,0 +1,62 @@
|
||||
const std = @import("std");
|
||||
const config = @import("config");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const SchemaStruct = @import("struct.zig");
|
||||
const FileEngine = @import("../file/core.zig");
|
||||
const Tokenizer = @import("tokenizer.zig").Tokenizer;
|
||||
const Parser = @import("parser.zig").Parser;
|
||||
|
||||
const ZipponError = @import("error").ZipponError;
|
||||
const log = std.log.scoped(.schemaEngine);
|
||||
|
||||
/// Manage everything that is relate to the schema
|
||||
/// This include keeping in memory the schema and schema file, and some functions to get like all members of a specific struct.
|
||||
pub const Self = @This();
|
||||
|
||||
var arena: std.heap.ArenaAllocator = undefined;
|
||||
pub var allocator: Allocator = undefined;
|
||||
var schema_buffer: [config.BUFFER_SIZE]u8 = undefined;
|
||||
|
||||
pub usingnamespace @import("utils.zig");
|
||||
|
||||
struct_array: []SchemaStruct,
|
||||
null_terminated: [:0]u8,
|
||||
|
||||
pub fn init(path: []const u8, file_engine: *FileEngine) ZipponError!Self {
|
||||
arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
allocator = arena.allocator();
|
||||
|
||||
var buffer: [config.BUFFER_SIZE]u8 = undefined;
|
||||
|
||||
log.debug("Trying to init a SchemaEngine with path {s}", .{path});
|
||||
const len: usize = try FileEngine.readSchemaFile(path, &buffer);
|
||||
const null_terminated = std.fmt.bufPrintZ(&schema_buffer, "{s}", .{buffer[0..len]}) catch return ZipponError.MemoryError;
|
||||
|
||||
var toker = Tokenizer.init(null_terminated);
|
||||
var parser = Parser.init(&toker, allocator);
|
||||
|
||||
var struct_array = std.ArrayList(SchemaStruct).init(allocator);
|
||||
errdefer struct_array.deinit();
|
||||
parser.parse(&struct_array) catch return ZipponError.SchemaNotConform;
|
||||
|
||||
log.debug("SchemaEngine init with {d} SchemaStruct.", .{struct_array.items.len});
|
||||
|
||||
for (struct_array.items) |sstruct| {
|
||||
file_engine.populateFileIndexUUIDMap(sstruct, sstruct.uuid_file_index) catch |err| {
|
||||
log.err("Error populate file index UUID map {any}", .{err});
|
||||
};
|
||||
}
|
||||
|
||||
return Self{
|
||||
.struct_array = struct_array.toOwnedSlice() catch return ZipponError.MemoryError,
|
||||
.null_terminated = null_terminated,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(_: Self) void {
|
||||
arena.deinit();
|
||||
}
|
||||
|
||||
pub fn getAllocator() Allocator {
|
||||
return allocator;
|
||||
}
|
0
src/schema/migration.zig
Normal file
0
src/schema/migration.zig
Normal file
@ -1,14 +1,14 @@
|
||||
const std = @import("std");
|
||||
const zid = @import("ZipponData");
|
||||
const SchemaStruct = @import("schemaEngine.zig").SchemaStruct;
|
||||
const SchemaStruct = @import("struct.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const DataType = @import("dtype").DataType;
|
||||
const UUID = @import("dtype").UUID;
|
||||
const Toker = @import("tokenizers/schema.zig").Tokenizer;
|
||||
const Token = @import("tokenizers/schema.zig").Token;
|
||||
const Loc = @import("tokenizers/shared/loc.zig").Loc;
|
||||
const send = @import("utils.zig").send;
|
||||
const printError = @import("utils.zig").printError;
|
||||
const Toker = @import("tokenizer.zig").Tokenizer;
|
||||
const Token = @import("tokenizer.zig").Token;
|
||||
const Loc = @import("../dataStructure/loc.zig");
|
||||
const send = @import("../utils.zig").send;
|
||||
const printError = @import("../utils.zig").printError;
|
||||
|
||||
const ZipponError = @import("error").ZipponError;
|
||||
|
||||
@ -27,6 +27,7 @@ const State = enum {
|
||||
};
|
||||
|
||||
pub const Parser = @This();
|
||||
|
||||
toker: *Toker,
|
||||
allocator: Allocator,
|
||||
|
||||
@ -104,6 +105,7 @@ pub fn parse(self: *Parser, struct_array: *std.ArrayList(SchemaStruct)) !void {
|
||||
|
||||
.add_struct => {
|
||||
struct_array.append(try SchemaStruct.init(
|
||||
struct_array.allocator,
|
||||
name,
|
||||
member_list.toOwnedSlice() catch return ZipponError.MemoryError,
|
||||
type_list.toOwnedSlice() catch return ZipponError.MemoryError,
|
64
src/schema/struct.zig
Normal file
64
src/schema/struct.zig
Normal file
@ -0,0 +1,64 @@
|
||||
const std = @import("std");
|
||||
const dtype = @import("dtype");
|
||||
const zid = @import("ZipponData");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const DataType = dtype.DataType;
|
||||
const UUIDFileIndex = @import("../dataStructure/UUIDFileIndex.zig");
|
||||
|
||||
const ZipponError = @import("error").ZipponError;
|
||||
|
||||
/// Represent one struct in the schema
|
||||
pub const Self = @This();
|
||||
|
||||
name: []const u8,
|
||||
members: [][]const u8,
|
||||
types: []DataType,
|
||||
zid_schema: []zid.DType,
|
||||
links: std.StringHashMap([]const u8), // Map key as member_name and value as struct_name of the link
|
||||
uuid_file_index: *UUIDFileIndex, // Map UUID to the index of the file store in
|
||||
|
||||
pub fn init(
|
||||
allocator: Allocator,
|
||||
name: []const u8,
|
||||
members: [][]const u8,
|
||||
types: []DataType,
|
||||
links: std.StringHashMap([]const u8),
|
||||
) ZipponError!Self {
|
||||
const uuid_file_index = allocator.create(UUIDFileIndex) catch return ZipponError.MemoryError;
|
||||
uuid_file_index.* = UUIDFileIndex.init(allocator) catch return ZipponError.MemoryError;
|
||||
return Self{
|
||||
.name = name,
|
||||
.members = members,
|
||||
.types = types,
|
||||
.zid_schema = Self.fileDataSchema(allocator, types) catch return ZipponError.MemoryError,
|
||||
.links = links,
|
||||
.uuid_file_index = uuid_file_index,
|
||||
};
|
||||
}
|
||||
|
||||
fn fileDataSchema(allocator: Allocator, dtypes: []DataType) ZipponError![]zid.DType {
|
||||
var schema = std.ArrayList(zid.DType).init(allocator);
|
||||
|
||||
for (dtypes) |dt| {
|
||||
schema.append(switch (dt) {
|
||||
.int => .Int,
|
||||
.float => .Float,
|
||||
.str => .Str,
|
||||
.bool => .Bool,
|
||||
.link => .UUID,
|
||||
.self => .UUID,
|
||||
.date => .Unix,
|
||||
.time => .Unix,
|
||||
.datetime => .Unix,
|
||||
.int_array => .IntArray,
|
||||
.float_array => .FloatArray,
|
||||
.str_array => .StrArray,
|
||||
.bool_array => .BoolArray,
|
||||
.date_array => .UnixArray,
|
||||
.time_array => .UnixArray,
|
||||
.datetime_array => .UnixArray,
|
||||
.link_array => .UUIDArray,
|
||||
}) catch return ZipponError.MemoryError;
|
||||
}
|
||||
return schema.toOwnedSlice() catch return ZipponError.MemoryError;
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
// From https://github.com/ziglang/zig/blob/master/lib/std/zig/tokenizer.zig
|
||||
const std = @import("std");
|
||||
const Loc = @import("shared/loc.zig").Loc;
|
||||
const Loc = @import("../dataStructure/loc.zig");
|
||||
|
||||
pub const Token = struct {
|
||||
tag: Tag,
|
168
src/schema/utils.zig
Normal file
168
src/schema/utils.zig
Normal file
@ -0,0 +1,168 @@
|
||||
pub const Self = @import("core.zig");
|
||||
const std = @import("std");
|
||||
const dtype = @import("dtype");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const UUID = dtype.UUID;
|
||||
const DataType = dtype.DataType;
|
||||
const SchemaStruct = @import("struct.zig");
|
||||
const ConditionValue = @import("../dataStructure/filter.zig").ConditionValue;
|
||||
const AdditionalData = @import("../dataStructure/additionalData.zig");
|
||||
const RelationMap = @import("../dataStructure/relationMap.zig");
|
||||
const JsonString = RelationMap.JsonString;
|
||||
|
||||
const ZipponError = @import("error").ZipponError;
|
||||
|
||||
pub fn memberName2DataType(self: *Self, struct_name: []const u8, member_name: []const u8) ZipponError!DataType {
|
||||
for (try self.structName2structMembers(struct_name), 0..) |mn, i| {
|
||||
const dtypes = try self.structName2DataType(struct_name);
|
||||
if (std.mem.eql(u8, mn, member_name)) return dtypes[i];
|
||||
}
|
||||
|
||||
return ZipponError.MemberNotFound;
|
||||
}
|
||||
|
||||
pub fn memberName2DataIndex(self: *Self, struct_name: []const u8, member_name: []const u8) ZipponError!usize {
|
||||
for (try self.structName2structMembers(struct_name), 0..) |mn, i| {
|
||||
if (std.mem.eql(u8, mn, member_name)) return i;
|
||||
}
|
||||
|
||||
return ZipponError.MemberNotFound;
|
||||
}
|
||||
|
||||
/// Get the list of all member name for a struct name
|
||||
pub fn structName2structMembers(self: Self, struct_name: []const u8) ZipponError![][]const u8 {
|
||||
var i: usize = 0;
|
||||
|
||||
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
|
||||
|
||||
if (i == self.struct_array.len) {
|
||||
return ZipponError.StructNotFound;
|
||||
}
|
||||
|
||||
return self.struct_array[i].members;
|
||||
}
|
||||
|
||||
pub fn structName2SchemaStruct(self: Self, struct_name: []const u8) ZipponError!SchemaStruct {
|
||||
var i: usize = 0;
|
||||
|
||||
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
|
||||
|
||||
if (i == self.struct_array.len) {
|
||||
return ZipponError.StructNotFound;
|
||||
}
|
||||
|
||||
return self.struct_array[i];
|
||||
}
|
||||
|
||||
pub fn structName2DataType(self: Self, struct_name: []const u8) ZipponError![]const DataType {
|
||||
var i: u16 = 0;
|
||||
|
||||
while (i < self.struct_array.len) : (i += 1) {
|
||||
if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
|
||||
}
|
||||
|
||||
if (i == self.struct_array.len and !std.mem.eql(u8, self.struct_array[i].name, struct_name)) {
|
||||
return ZipponError.StructNotFound;
|
||||
}
|
||||
|
||||
return self.struct_array[i].types;
|
||||
}
|
||||
|
||||
/// Chech if the name of a struct is in the current schema
|
||||
pub fn isStructNameExists(self: Self, struct_name: []const u8) bool {
|
||||
var i: u16 = 0;
|
||||
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Check if a struct have the member name
|
||||
pub fn isMemberNameInStruct(self: Self, struct_name: []const u8, member_name: []const u8) ZipponError!bool {
|
||||
for (try self.structName2structMembers(struct_name)) |mn| {
|
||||
if (std.mem.eql(u8, mn, member_name)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Return the SchemaStruct of the struct that the member is linked. So if it is not a link, it is itself, if it is a link, it the the sstruct of the link
|
||||
pub fn linkedStructName(self: Self, struct_name: []const u8, member_name: []const u8) ZipponError!SchemaStruct {
|
||||
const sstruct = try self.structName2SchemaStruct(struct_name);
|
||||
if (sstruct.links.get(member_name)) |struct_link_name| {
|
||||
return try self.structName2SchemaStruct(struct_link_name);
|
||||
}
|
||||
return sstruct;
|
||||
}
|
||||
|
||||
pub fn checkIfAllMemberInMap(
|
||||
self: Self,
|
||||
struct_name: []const u8,
|
||||
map: *std.StringHashMap(ConditionValue),
|
||||
error_message_buffer: *std.ArrayList(u8),
|
||||
) ZipponError!bool {
|
||||
const all_struct_member = try self.structName2structMembers(struct_name);
|
||||
var count: u16 = 0;
|
||||
|
||||
const writer = error_message_buffer.writer();
|
||||
|
||||
for (all_struct_member) |mn| {
|
||||
if (std.mem.eql(u8, mn, "id")) continue;
|
||||
if (map.contains(mn)) count += 1 else writer.print(" {s},", .{mn}) catch return ZipponError.WriteError;
|
||||
}
|
||||
|
||||
return ((count == all_struct_member.len - 1) and (count == map.count()));
|
||||
}
|
||||
|
||||
pub fn isUUIDExist(self: Self, struct_name: []const u8, uuid: UUID) bool {
|
||||
const sstruct = self.structName2SchemaStruct(struct_name) catch return false;
|
||||
return sstruct.uuid_file_index.contains(uuid);
|
||||
}
|
||||
|
||||
/// Create an array of empty RelationMap based on the additionalData
|
||||
pub fn relationMapArrayInit(
|
||||
self: Self,
|
||||
alloc: Allocator,
|
||||
struct_name: []const u8,
|
||||
additional_data: AdditionalData,
|
||||
) ZipponError![]RelationMap {
|
||||
// So here I should have relationship if children are relations
|
||||
var array = std.ArrayList(RelationMap).init(alloc);
|
||||
const sstruct = try self.structName2SchemaStruct(struct_name);
|
||||
for (additional_data.childrens.items) |child| if (sstruct.links.contains(child.name)) {
|
||||
const map = alloc.create(std.AutoHashMap([16]u8, JsonString)) catch return ZipponError.MemoryError;
|
||||
map.* = std.AutoHashMap([16]u8, JsonString).init(alloc);
|
||||
array.append(RelationMap{
|
||||
.struct_name = sstruct.links.get(child.name).?,
|
||||
.member_name = child.name,
|
||||
.additional_data = child.additional_data, // Maybe I need to check if it exist, im not sure it always exist
|
||||
.map = map,
|
||||
}) catch return ZipponError.MemoryError;
|
||||
};
|
||||
return array.toOwnedSlice() catch return ZipponError.MemoryError;
|
||||
}
|
||||
|
||||
pub fn fileListToParse(
|
||||
self: Self,
|
||||
alloc: Allocator,
|
||||
struct_name: []const u8,
|
||||
map: std.AutoHashMap([16]u8, JsonString),
|
||||
) ZipponError![]usize {
|
||||
const sstruct = try self.structName2SchemaStruct(struct_name);
|
||||
var unique_indices = std.AutoHashMap(usize, void).init(alloc);
|
||||
defer unique_indices.deinit();
|
||||
|
||||
var iter = map.keyIterator();
|
||||
while (iter.next()) |uuid| {
|
||||
if (sstruct.uuid_file_index.get(UUID{ .bytes = uuid.* })) |file_index| {
|
||||
unique_indices.put(file_index, {}) catch return ZipponError.MemoryError;
|
||||
}
|
||||
}
|
||||
|
||||
var result = alloc.alloc(usize, unique_indices.count()) catch return ZipponError.MemoryError;
|
||||
var i: usize = 0;
|
||||
var index_iter = unique_indices.keyIterator();
|
||||
while (index_iter.next()) |index| {
|
||||
result[i] = index.*;
|
||||
i += 1;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
@ -1,284 +0,0 @@
|
||||
const std = @import("std");
|
||||
const zid = @import("ZipponData");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Parser = @import("schemaParser.zig");
|
||||
const Tokenizer = @import("tokenizers/schema.zig").Tokenizer;
|
||||
const dtype = @import("dtype");
|
||||
const DataType = dtype.DataType;
|
||||
const AdditionalData = @import("dataStructure/additionalData.zig");
|
||||
const RelationMap = @import("dataStructure/relationMap.zig");
|
||||
const JsonString = @import("dataStructure/relationMap.zig").JsonString;
|
||||
const ConditionValue = @import("dataStructure/filter.zig").ConditionValue;
|
||||
const UUID = dtype.UUID;
|
||||
const UUIDFileIndex = @import("dataStructure/UUIDFileIndex.zig");
|
||||
const FileEngine = @import("fileEngine/core.zig");
|
||||
|
||||
const ZipponError = @import("error").ZipponError;
|
||||
|
||||
// TODO: Create a schemaEngine directory and add this as core and the parser with it
|
||||
|
||||
const config = @import("config");
|
||||
const BUFFER_SIZE = config.BUFFER_SIZE;
|
||||
|
||||
var schema_buffer: [BUFFER_SIZE]u8 = undefined;
|
||||
|
||||
// TODO: Stop keeping the allocator at the root of the file
|
||||
var arena: std.heap.ArenaAllocator = undefined;
|
||||
var allocator: Allocator = undefined;
|
||||
|
||||
const log = std.log.scoped(.schemaEngine);
|
||||
|
||||
pub const SchemaStruct = struct {
|
||||
name: []const u8,
|
||||
members: [][]const u8,
|
||||
types: []DataType,
|
||||
zid_schema: []zid.DType,
|
||||
links: std.StringHashMap([]const u8), // Map key as member_name and value as struct_name of the link
|
||||
uuid_file_index: *UUIDFileIndex, // Map UUID to the index of the file store in
|
||||
|
||||
pub fn init(
|
||||
name: []const u8,
|
||||
members: [][]const u8,
|
||||
types: []DataType,
|
||||
links: std.StringHashMap([]const u8),
|
||||
) ZipponError!SchemaStruct {
|
||||
const uuid_file_index = allocator.create(UUIDFileIndex) catch return ZipponError.MemoryError;
|
||||
uuid_file_index.* = UUIDFileIndex.init(allocator) catch return ZipponError.MemoryError;
|
||||
return SchemaStruct{
|
||||
.name = name,
|
||||
.members = members,
|
||||
.types = types,
|
||||
.zid_schema = SchemaStruct.fileDataSchema(types) catch return ZipponError.MemoryError,
|
||||
.links = links,
|
||||
.uuid_file_index = uuid_file_index,
|
||||
};
|
||||
}
|
||||
|
||||
fn fileDataSchema(dtypes: []DataType) ZipponError![]zid.DType {
|
||||
var schema = std.ArrayList(zid.DType).init(allocator);
|
||||
|
||||
for (dtypes) |dt| {
|
||||
schema.append(switch (dt) {
|
||||
.int => .Int,
|
||||
.float => .Float,
|
||||
.str => .Str,
|
||||
.bool => .Bool,
|
||||
.link => .UUID,
|
||||
.self => .UUID,
|
||||
.date => .Unix,
|
||||
.time => .Unix,
|
||||
.datetime => .Unix,
|
||||
.int_array => .IntArray,
|
||||
.float_array => .FloatArray,
|
||||
.str_array => .StrArray,
|
||||
.bool_array => .BoolArray,
|
||||
.date_array => .UnixArray,
|
||||
.time_array => .UnixArray,
|
||||
.datetime_array => .UnixArray,
|
||||
.link_array => .UUIDArray,
|
||||
}) catch return ZipponError.MemoryError;
|
||||
}
|
||||
return schema.toOwnedSlice() catch return ZipponError.MemoryError;
|
||||
}
|
||||
};
|
||||
|
||||
/// Manage everything that is relate to the schema
|
||||
/// This include keeping in memory the schema and schema file, and some functions to get like all members of a specific struct.
|
||||
/// For now it is a bit empty. But this is where I will manage migration
|
||||
pub const SchemaEngine = @This();
|
||||
|
||||
struct_array: []SchemaStruct,
|
||||
null_terminated: [:0]u8,
|
||||
|
||||
// The path is the path to the schema file
|
||||
pub fn init(path: []const u8, file_engine: *FileEngine) ZipponError!SchemaEngine {
|
||||
arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
allocator = arena.allocator();
|
||||
|
||||
var buffer: [BUFFER_SIZE]u8 = undefined;
|
||||
|
||||
log.debug("Trying to init a SchemaEngine with path {s}", .{path});
|
||||
const len: usize = try FileEngine.readSchemaFile(path, &buffer);
|
||||
const null_terminated = std.fmt.bufPrintZ(&schema_buffer, "{s}", .{buffer[0..len]}) catch return ZipponError.MemoryError;
|
||||
|
||||
var toker = Tokenizer.init(null_terminated);
|
||||
var parser = Parser.init(&toker, allocator);
|
||||
|
||||
var struct_array = std.ArrayList(SchemaStruct).init(allocator);
|
||||
errdefer struct_array.deinit();
|
||||
parser.parse(&struct_array) catch return ZipponError.SchemaNotConform;
|
||||
|
||||
log.debug("SchemaEngine init with {d} SchemaStruct.", .{struct_array.items.len});
|
||||
|
||||
for (struct_array.items) |sstruct| {
|
||||
file_engine.populateFileIndexUUIDMap(sstruct, sstruct.uuid_file_index) catch |err| {
|
||||
log.err("Error populate file index UUID map {any}", .{err});
|
||||
};
|
||||
}
|
||||
|
||||
return SchemaEngine{
|
||||
.struct_array = struct_array.toOwnedSlice() catch return ZipponError.MemoryError,
|
||||
.null_terminated = null_terminated,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(_: SchemaEngine) void {
|
||||
arena.deinit();
|
||||
}
|
||||
|
||||
/// Get the type of the member
|
||||
pub fn memberName2DataType(self: *SchemaEngine, struct_name: []const u8, member_name: []const u8) ZipponError!DataType {
|
||||
for (try self.structName2structMembers(struct_name), 0..) |mn, i| {
|
||||
const dtypes = try self.structName2DataType(struct_name);
|
||||
if (std.mem.eql(u8, mn, member_name)) return dtypes[i];
|
||||
}
|
||||
|
||||
return ZipponError.MemberNotFound;
|
||||
}
|
||||
|
||||
pub fn memberName2DataIndex(self: *SchemaEngine, struct_name: []const u8, member_name: []const u8) ZipponError!usize {
|
||||
for (try self.structName2structMembers(struct_name), 0..) |mn, i| {
|
||||
if (std.mem.eql(u8, mn, member_name)) return i;
|
||||
}
|
||||
|
||||
return ZipponError.MemberNotFound;
|
||||
}
|
||||
|
||||
/// Get the list of all member name for a struct name
|
||||
pub fn structName2structMembers(self: SchemaEngine, struct_name: []const u8) ZipponError![][]const u8 {
|
||||
var i: usize = 0;
|
||||
|
||||
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
|
||||
|
||||
if (i == self.struct_array.len) {
|
||||
return ZipponError.StructNotFound;
|
||||
}
|
||||
|
||||
return self.struct_array[i].members;
|
||||
}
|
||||
|
||||
pub fn structName2SchemaStruct(self: SchemaEngine, struct_name: []const u8) ZipponError!SchemaStruct {
|
||||
var i: usize = 0;
|
||||
|
||||
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
|
||||
|
||||
if (i == self.struct_array.len) {
|
||||
return ZipponError.StructNotFound;
|
||||
}
|
||||
|
||||
return self.struct_array[i];
|
||||
}
|
||||
|
||||
pub fn structName2DataType(self: SchemaEngine, struct_name: []const u8) ZipponError![]const DataType {
|
||||
var i: u16 = 0;
|
||||
|
||||
while (i < self.struct_array.len) : (i += 1) {
|
||||
if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) break;
|
||||
}
|
||||
|
||||
if (i == self.struct_array.len and !std.mem.eql(u8, self.struct_array[i].name, struct_name)) {
|
||||
return ZipponError.StructNotFound;
|
||||
}
|
||||
|
||||
return self.struct_array[i].types;
|
||||
}
|
||||
|
||||
/// Chech if the name of a struct is in the current schema
|
||||
pub fn isStructNameExists(self: SchemaEngine, struct_name: []const u8) bool {
|
||||
var i: u16 = 0;
|
||||
while (i < self.struct_array.len) : (i += 1) if (std.mem.eql(u8, self.struct_array[i].name, struct_name)) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Check if a struct have the member name
|
||||
pub fn isMemberNameInStruct(self: SchemaEngine, struct_name: []const u8, member_name: []const u8) ZipponError!bool {
|
||||
for (try self.structName2structMembers(struct_name)) |mn| {
|
||||
if (std.mem.eql(u8, mn, member_name)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Return the SchemaStruct of the struct that the member is linked. So if it is not a link, it is itself, if it is a link, it the the sstruct of the link
|
||||
pub fn linkedStructName(self: SchemaEngine, struct_name: []const u8, member_name: []const u8) ZipponError!SchemaStruct {
|
||||
const sstruct = try self.structName2SchemaStruct(struct_name);
|
||||
if (sstruct.links.get(member_name)) |struct_link_name| {
|
||||
return try self.structName2SchemaStruct(struct_link_name);
|
||||
}
|
||||
return sstruct;
|
||||
}
|
||||
|
||||
// Return true if the map have all the member name as key and not more
|
||||
pub fn checkIfAllMemberInMap(
|
||||
self: SchemaEngine,
|
||||
struct_name: []const u8,
|
||||
map: *std.StringHashMap(ConditionValue),
|
||||
error_message_buffer: *std.ArrayList(u8),
|
||||
) ZipponError!bool {
|
||||
const all_struct_member = try self.structName2structMembers(struct_name);
|
||||
var count: u16 = 0;
|
||||
|
||||
const writer = error_message_buffer.writer();
|
||||
|
||||
for (all_struct_member) |mn| {
|
||||
if (std.mem.eql(u8, mn, "id")) continue;
|
||||
if (map.contains(mn)) count += 1 else writer.print(" {s},", .{mn}) catch return ZipponError.WriteError;
|
||||
}
|
||||
|
||||
return ((count == all_struct_member.len - 1) and (count == map.count()));
|
||||
}
|
||||
|
||||
pub fn isUUIDExist(self: SchemaEngine, struct_name: []const u8, uuid: UUID) bool {
|
||||
const sstruct = self.structName2SchemaStruct(struct_name) catch return false;
|
||||
return sstruct.uuid_file_index.contains(uuid);
|
||||
}
|
||||
|
||||
/// Create an array of empty RelationMap based on the additionalData
|
||||
pub fn relationMapArrayInit(
|
||||
self: SchemaEngine,
|
||||
alloc: Allocator,
|
||||
struct_name: []const u8,
|
||||
additional_data: AdditionalData,
|
||||
) ZipponError![]RelationMap {
|
||||
// So here I should have relationship if children are relations
|
||||
var array = std.ArrayList(RelationMap).init(alloc);
|
||||
const sstruct = try self.structName2SchemaStruct(struct_name);
|
||||
for (additional_data.childrens.items) |child| if (sstruct.links.contains(child.name)) {
|
||||
const map = alloc.create(std.AutoHashMap([16]u8, JsonString)) catch return ZipponError.MemoryError;
|
||||
map.* = std.AutoHashMap([16]u8, JsonString).init(alloc);
|
||||
array.append(RelationMap{
|
||||
.struct_name = sstruct.links.get(child.name).?,
|
||||
.member_name = child.name,
|
||||
.additional_data = child.additional_data, // Maybe I need to check if it exist, im not sure it always exist
|
||||
.map = map,
|
||||
}) catch return ZipponError.MemoryError;
|
||||
};
|
||||
return array.toOwnedSlice() catch return ZipponError.MemoryError;
|
||||
}
|
||||
|
||||
pub fn fileListToParse(
|
||||
self: SchemaEngine,
|
||||
alloc: Allocator,
|
||||
struct_name: []const u8,
|
||||
map: std.AutoHashMap([16]u8, JsonString),
|
||||
) ZipponError![]usize {
|
||||
const sstruct = try self.structName2SchemaStruct(struct_name);
|
||||
var unique_indices = std.AutoHashMap(usize, void).init(alloc);
|
||||
defer unique_indices.deinit();
|
||||
|
||||
var iter = map.keyIterator();
|
||||
while (iter.next()) |uuid| {
|
||||
if (sstruct.uuid_file_index.get(UUID{ .bytes = uuid.* })) |file_index| {
|
||||
unique_indices.put(file_index, {}) catch return ZipponError.MemoryError;
|
||||
}
|
||||
}
|
||||
|
||||
var result = alloc.alloc(usize, unique_indices.count()) catch return ZipponError.MemoryError;
|
||||
var i: usize = 0;
|
||||
var index_iter = unique_indices.keyIterator();
|
||||
while (index_iter.next()) |index| {
|
||||
result[i] = index.*;
|
||||
i += 1;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
// From https://github.com/ziglang/zig/blob/master/lib/std/zig/tokenizer.zig
|
||||
const std = @import("std");
|
||||
const Loc = @import("shared/loc.zig").Loc;
|
||||
const Loc = @import("../dataStructure/loc.zig");
|
||||
|
||||
pub const Token = struct {
|
||||
tag: Tag,
|
||||
|
@ -1,6 +1,6 @@
|
||||
// From https://github.com/ziglang/zig/blob/master/lib/std/zig/tokenizer.zig
|
||||
const std = @import("std");
|
||||
const Loc = @import("shared/loc.zig").Loc;
|
||||
const Loc = @import("../dataStructure/loc.zig");
|
||||
|
||||
pub const Token = struct {
|
||||
tag: Tag,
|
||||
|
@ -76,13 +76,3 @@ pub fn printError(message: []const u8, err: ZipponError, query: ?[]const u8, sta
|
||||
if (config.DONT_SEND and !config.DONT_SEND_ERROR) std.debug.print("{s}", .{buffer.items});
|
||||
return err;
|
||||
}
|
||||
|
||||
pub fn printOpenDir(comptime format: []const u8, args: anytype, options: std.fs.Dir.OpenDirOptions) ZipponError!std.fs.Dir {
|
||||
const path = std.fmt.bufPrint(&path_buffer, format, args) catch return ZipponError.CantOpenDir;
|
||||
return std.fs.cwd().openDir(path, options) catch ZipponError.CantOpenDir;
|
||||
}
|
||||
|
||||
pub fn printOpenFile(comptime format: []const u8, args: anytype, options: std.fs.File.OpenFlags) ZipponError!std.fs.File {
|
||||
const path = std.fmt.bufPrint(&path_buffer, format, args) catch return ZipponError.CantOpenDir;
|
||||
return std.fs.cwd().openFile(path, options) catch ZipponError.CantOpenFile;
|
||||
}
|
||||
|
0
src/ziql/core.zig
Normal file
0
src/ziql/core.zig
Normal file
0
src/ziql/parser.zig
Normal file
0
src/ziql/parser.zig
Normal file
0
src/ziql/tokenizer.zig
Normal file
0
src/ziql/tokenizer.zig
Normal file
@ -1,7 +1,7 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const FileEngine = @import("fileEngine/core.zig");
|
||||
const SchemaEngine = @import("schemaEngine.zig").SchemaEngine;
|
||||
const FileEngine = @import("file/core.zig");
|
||||
const SchemaEngine = @import("schema/core.zig");
|
||||
const Tokenizer = @import("tokenizers/ziql.zig").Tokenizer;
|
||||
const Token = @import("tokenizers/ziql.zig").Token;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user