Moved temporally everything to the root for testing test. Also small changes
This commit is contained in:
parent
0ee197b564
commit
475e00b13e
12
TODO.md
12
TODO.md
@ -1,12 +0,0 @@
|
||||
How to create a new entity ?
|
||||
|
||||
## Engine roadmap
|
||||
[ ] Generate empty data file
|
||||
[ ] Parse data file
|
||||
[ ] Add one User in the file
|
||||
[ ] Can get all User with GRAB User
|
||||
[ ] Filter User on name
|
||||
[ ] Filter User on name and age
|
||||
[ ] Get all Message of one User
|
||||
[ ] Delete User
|
||||
[ ] Update User
|
27
build.zig
27
build.zig
@ -20,7 +20,7 @@ pub fn build(b: *std.Build) void {
|
||||
|
||||
// Test step
|
||||
const tests1 = b.addTest(.{
|
||||
.root_source_file = b.path("src/parsers/data-parsing.zig"),
|
||||
.root_source_file = b.path("src/data-parsing.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
.name = "Data parsing",
|
||||
@ -28,7 +28,7 @@ pub fn build(b: *std.Build) void {
|
||||
const run_tests1 = b.addRunArtifact(tests1);
|
||||
|
||||
const tests2 = b.addTest(.{
|
||||
.root_source_file = b.path("src/tokenizers/cliTokenizer.zig"),
|
||||
.root_source_file = b.path("src/cliTokenizer.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
.name = "CLI tokenizer",
|
||||
@ -36,7 +36,7 @@ pub fn build(b: *std.Build) void {
|
||||
const run_tests2 = b.addRunArtifact(tests2);
|
||||
|
||||
const tests3 = b.addTest(.{
|
||||
.root_source_file = b.path("src/tokenizers/ziqlTokenizer.zig"),
|
||||
.root_source_file = b.path("src/ziqlTokenizer.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
.name = "ZiQL tokenizer",
|
||||
@ -44,25 +44,34 @@ pub fn build(b: *std.Build) void {
|
||||
const run_tests3 = b.addRunArtifact(tests3);
|
||||
|
||||
const tests4 = b.addTest(.{
|
||||
.root_source_file = b.path("src/tokenizers/schemaTokenizer.zig"),
|
||||
.root_source_file = b.path("src/schemaTokenizer.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
.name = "Schema tokenizer",
|
||||
});
|
||||
const run_tests4 = b.addRunArtifact(tests4);
|
||||
|
||||
const tests5 = b.addTest(.{
|
||||
.root_source_file = b.path("src/test.zig"),
|
||||
//const tests5 = b.addTest(.{
|
||||
// .root_source_file = b.path("src/ADD.zig"),
|
||||
// .target = target,
|
||||
// .optimize = optimize,
|
||||
// .name = "ADD",
|
||||
//});
|
||||
//const run_tests5 = b.addRunArtifact(tests5);
|
||||
|
||||
const tests6 = b.addTest(.{
|
||||
.root_source_file = b.path("src/GRAB.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
.name = "ADD functions",
|
||||
.name = "GRAB",
|
||||
});
|
||||
const run_tests5 = b.addRunArtifact(tests5);
|
||||
const run_tests6 = b.addRunArtifact(tests6);
|
||||
|
||||
const test_step = b.step("test", "Run unit tests");
|
||||
test_step.dependOn(&run_tests1.step);
|
||||
test_step.dependOn(&run_tests2.step);
|
||||
test_step.dependOn(&run_tests3.step);
|
||||
test_step.dependOn(&run_tests4.step);
|
||||
test_step.dependOn(&run_tests5.step);
|
||||
//test_step.dependOn(&run_tests5.step);
|
||||
test_step.dependOn(&run_tests6.step);
|
||||
}
|
||||
|
@ -1,15 +1,13 @@
|
||||
const std = @import("std");
|
||||
const dtypes = @import("../dtypes.zig");
|
||||
const UUID = @import("../uuid.zig").UUID;
|
||||
const ziqlTokenizer = @import("../tokenizers/ziqlTokenizer.zig").Tokenizer;
|
||||
const ziqlToken = @import("../tokenizers/ziqlTokenizer.zig").Token;
|
||||
const dtypes = @import("dtypes.zig");
|
||||
const UUID = @import("uuid.zig").UUID;
|
||||
const ziqlTokenizer = @import("ziqlTokenizer.zig").Tokenizer;
|
||||
const ziqlToken = @import("ziqlTokenizer.zig").Token;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const stdout = std.io.getStdOut().writer();
|
||||
|
||||
// TODO to improve this part of the code:
|
||||
// 1. Use logging
|
||||
// 2. Create a struct that manage files with member: stdout, folder (e.g. the User folder),
|
||||
// TODO: Use a Parser struct like in GRAB
|
||||
|
||||
// Query that need to work now
|
||||
// ADD User (name='Adrien', email='adrien.bouvais@gmail.com') OK
|
||||
@ -236,7 +234,10 @@ pub fn getMapOfMember(allocator: Allocator, toker: *ziqlTokenizer) !std.StringHa
|
||||
|
||||
std.debug.print("OK \n\n", .{});
|
||||
|
||||
while (token.tag != ziqlToken.Tag.eof) : (token = toker.next()) {
|
||||
while (token.tag != ziqlToken.Tag.eof) : ({
|
||||
token = toker.next();
|
||||
std.debug.print("{any}", .{token});
|
||||
}) {
|
||||
std.debug.print("{any}\n\n", .{token});
|
||||
switch (token.tag) {
|
||||
.r_paren => continue,
|
||||
@ -333,3 +334,20 @@ fn checkIfAllMemberInMap(struct_name: []const u8, map: *std.StringHashMap([]cons
|
||||
|
||||
return ((count == all_struct_member.len) and (count == map.count()));
|
||||
}
|
||||
|
||||
test "Get map of members" {
|
||||
// std.testing.refAllDecls(@This());
|
||||
// _ = @import("query_functions/ADD.zig").getMapOfMember;
|
||||
|
||||
const allocator = std.testing.allocator;
|
||||
|
||||
const in = "(name='Adrien', email='adrien@gmail.com', age=26, scores=[42 100 5])";
|
||||
const null_term_in = try allocator.dupeZ(u8, in);
|
||||
|
||||
var toker = ziqlTokenizer.init(null_term_in);
|
||||
|
||||
const member_map = try getMapOfMember(allocator, &toker);
|
||||
std.debug.print("{s}", .{member_map.get("name").?});
|
||||
|
||||
allocator.free(null_term_in);
|
||||
}
|
284
src/GRAB.zig
Normal file
284
src/GRAB.zig
Normal file
@ -0,0 +1,284 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Tokenizer = @import("ziqlTokenizer.zig").Tokenizer;
|
||||
const Token = @import("ziqlTokenizer.zig").Token;
|
||||
|
||||
// To work now
|
||||
// GRAB User {}
|
||||
// GRAB User {name = 'Adrien'}
|
||||
// GRAB User {name='Adrien' AND age < 30}
|
||||
// GRAB User [1] {}
|
||||
// GRAB User [10; name] {age < 30}
|
||||
//
|
||||
// For later
|
||||
|
||||
const stdout = std.io.getStdOut().writer();
|
||||
|
||||
pub const Parser = struct {
|
||||
allocator: Allocator,
|
||||
toker: *Tokenizer,
|
||||
state: State,
|
||||
|
||||
additional_data: AdditionalData,
|
||||
|
||||
// This is the [] part
|
||||
pub const AdditionalData = struct {
|
||||
entity_count_to_find: usize = 0,
|
||||
member_to_find: std.ArrayList(AdditionalDataMember),
|
||||
|
||||
pub fn init(allocator: Allocator) AdditionalData {
|
||||
return AdditionalData{ .member_to_find = std.ArrayList(AdditionalDataMember).init(allocator) };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *AdditionalData) void {
|
||||
// Get all additional data that are in the list to also deinit them
|
||||
self.member_to_find.deinit();
|
||||
}
|
||||
};
|
||||
|
||||
// This is name in: [name]
|
||||
// There is an additional data because it can be [friend [1; name]]
|
||||
const AdditionalDataMember = struct {
|
||||
name: []const u8,
|
||||
additional_data: AdditionalData,
|
||||
|
||||
pub fn init(allocator: Allocator, name: []const u8) AdditionalDataMember {
|
||||
const additional_data = AdditionalData.init(allocator);
|
||||
return AdditionalDataMember{ .name = name, .additional_data = additional_data };
|
||||
}
|
||||
};
|
||||
|
||||
const State = enum {
|
||||
start,
|
||||
invalid,
|
||||
end,
|
||||
|
||||
// For the additional data
|
||||
expect_count_of_entity_to_find,
|
||||
expect_semicolon_OR_right_bracket,
|
||||
expect_member,
|
||||
next_member_OR_end_OR_new_additional_data,
|
||||
next_member_OR_end,
|
||||
|
||||
expect_filter,
|
||||
};
|
||||
|
||||
pub fn init(allocator: Allocator, toker: *Tokenizer) Parser {
|
||||
return Parser{
|
||||
.allocator = allocator,
|
||||
.toker = toker,
|
||||
.state = State.start,
|
||||
.additional_data = AdditionalData.init(allocator),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Parser) void {
|
||||
// FIXME: I think additionalData inside additionalData are not deinit
|
||||
self.additional_data.deinit();
|
||||
}
|
||||
|
||||
pub fn parse(self: *Parser) !void {
|
||||
var token = self.toker.next();
|
||||
while (self.state != State.end) : (token = self.toker.next()) {
|
||||
switch (self.state) {
|
||||
.start => {
|
||||
switch (token.tag) {
|
||||
.l_bracket => {
|
||||
try self.parse_additional_data(&self.additional_data);
|
||||
},
|
||||
else => {
|
||||
try stdout.print("Found {any}\n", .{token.tag});
|
||||
|
||||
return;
|
||||
},
|
||||
}
|
||||
},
|
||||
else => return,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// When this function is call, the tokenizer last token retrieved should be [.
|
||||
/// Check if an int is here -> check if ; is here -> check if member is here -> check if [ is here -> loop
|
||||
pub fn parse_additional_data(self: *Parser, additional_data: *AdditionalData) !void {
|
||||
var token = self.toker.next();
|
||||
var skip_next = false;
|
||||
self.state = State.expect_count_of_entity_to_find;
|
||||
|
||||
while (self.state != State.end) : ({
|
||||
token = if (!skip_next) self.toker.next() else token;
|
||||
skip_next = false;
|
||||
}) {
|
||||
switch (self.state) {
|
||||
.expect_count_of_entity_to_find => {
|
||||
switch (token.tag) {
|
||||
.number_literal => {
|
||||
const count = std.fmt.parseInt(usize, self.toker.getTokenSlice(token), 10) catch {
|
||||
try stdout.print(
|
||||
"Error parsing query: {s} need to be a number.",
|
||||
.{self.toker.getTokenSlice(token)},
|
||||
);
|
||||
self.state = .invalid;
|
||||
continue;
|
||||
};
|
||||
additional_data.entity_count_to_find = count;
|
||||
self.state = .expect_semicolon_OR_right_bracket;
|
||||
},
|
||||
else => {
|
||||
self.state = .expect_member;
|
||||
skip_next = true;
|
||||
},
|
||||
}
|
||||
},
|
||||
.expect_semicolon_OR_right_bracket => {
|
||||
switch (token.tag) {
|
||||
.semicolon => {
|
||||
self.state = .expect_member;
|
||||
},
|
||||
.r_bracket => {
|
||||
return;
|
||||
},
|
||||
else => {
|
||||
try self.print_error(
|
||||
"Error: Expect ';' or ']'.",
|
||||
&token,
|
||||
);
|
||||
self.state = .invalid;
|
||||
},
|
||||
}
|
||||
},
|
||||
.expect_member => {
|
||||
switch (token.tag) {
|
||||
.identifier => {
|
||||
// TODO: Check if the member name exist
|
||||
try additional_data.member_to_find.append(
|
||||
AdditionalDataMember.init(
|
||||
self.allocator,
|
||||
self.toker.getTokenSlice(token),
|
||||
),
|
||||
);
|
||||
|
||||
self.state = .next_member_OR_end_OR_new_additional_data;
|
||||
},
|
||||
else => {
|
||||
try self.print_error(
|
||||
"Error: A member name should be here.",
|
||||
&token,
|
||||
);
|
||||
},
|
||||
}
|
||||
},
|
||||
.next_member_OR_end_OR_new_additional_data => {
|
||||
switch (token.tag) {
|
||||
.comma => {
|
||||
self.state = .expect_member;
|
||||
},
|
||||
.r_bracket => {
|
||||
return;
|
||||
},
|
||||
.l_bracket => {
|
||||
try self.parse_additional_data(
|
||||
&additional_data.member_to_find.items[additional_data.member_to_find.items.len - 1].additional_data,
|
||||
);
|
||||
self.state = .next_member_OR_end;
|
||||
},
|
||||
else => {
|
||||
try self.print_error(
|
||||
"Error: Expected a comma ',' or the end or a new list of member to return.",
|
||||
&token,
|
||||
);
|
||||
},
|
||||
}
|
||||
},
|
||||
.next_member_OR_end => {
|
||||
switch (token.tag) {
|
||||
.comma => {
|
||||
try stdout.print("Expected new member\n", .{});
|
||||
self.state = .expect_member;
|
||||
},
|
||||
.r_bracket => {
|
||||
return;
|
||||
},
|
||||
else => {
|
||||
try self.print_error(
|
||||
"Error: Expected a new member name or the end of the list of member name to return.",
|
||||
&token,
|
||||
);
|
||||
},
|
||||
}
|
||||
},
|
||||
.invalid => {
|
||||
@panic("=)");
|
||||
},
|
||||
else => {
|
||||
try self.print_error(
|
||||
"Error: Unknow state.",
|
||||
&token,
|
||||
);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_error(self: *Parser, message: []const u8, token: *Token) !void {
|
||||
try stdout.print("\n", .{});
|
||||
try stdout.print("{s}\n", .{self.toker.buffer});
|
||||
|
||||
// Calculate the number of spaces needed to reach the start position.
|
||||
var spaces: usize = 0;
|
||||
while (spaces < token.loc.start) : (spaces += 1) {
|
||||
try stdout.print(" ", .{});
|
||||
}
|
||||
|
||||
// Print the '^' characters for the error span.
|
||||
var i: usize = token.loc.start;
|
||||
while (i < token.loc.end) : (i += 1) {
|
||||
try stdout.print("^", .{});
|
||||
}
|
||||
try stdout.print(" \n", .{}); // Align with the message
|
||||
|
||||
try stdout.print("{s}\n", .{message});
|
||||
|
||||
@panic("");
|
||||
}
|
||||
};
|
||||
|
||||
test "Test AdditionalData" {
|
||||
const allocator = std.testing.allocator;
|
||||
|
||||
var additional_data1 = Parser.AdditionalData.init(allocator);
|
||||
additional_data1.entity_count_to_find = 1;
|
||||
testAdditionalData("[1]", additional_data1);
|
||||
|
||||
var additional_data2 = Parser.AdditionalData.init(allocator);
|
||||
defer additional_data2.deinit();
|
||||
try additional_data2.member_to_find.append(
|
||||
Parser.AdditionalDataMember.init(
|
||||
allocator,
|
||||
"name",
|
||||
),
|
||||
);
|
||||
testAdditionalData("[name]", additional_data2);
|
||||
|
||||
std.debug.print("AdditionalData Parsing OK \n", .{});
|
||||
}
|
||||
|
||||
fn testAdditionalData(source: [:0]const u8, expected_AdditionalData: Parser.AdditionalData) void {
|
||||
const allocator = std.testing.allocator;
|
||||
var tokenizer = Tokenizer.init(source);
|
||||
var additional_data = Parser.AdditionalData.init(allocator);
|
||||
|
||||
_ = tokenizer.next();
|
||||
var parser = Parser.init(allocator, &tokenizer);
|
||||
parser.parse_additional_data(&additional_data) catch |err| {
|
||||
std.debug.print("Error parsing additional data: {any}\n", .{err});
|
||||
};
|
||||
|
||||
std.debug.print("{any}\n\n", .{additional_data});
|
||||
|
||||
std.testing.expectEqual(expected_AdditionalData, additional_data) catch {
|
||||
std.debug.print("Additional data are not equal for: {s}\n", .{source});
|
||||
};
|
||||
|
||||
parser.deinit();
|
||||
}
|
@ -1,10 +1,10 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const cliTokenizer = @import("tokenizers/cliTokenizer.zig").Tokenizer;
|
||||
const cliToken = @import("tokenizers/cliTokenizer.zig").Token;
|
||||
const schemaTokenizer = @import("tokenizers/schemaTokenizer.zig").Tokenizer;
|
||||
const schemaToken = @import("tokenizers/schemaTokenizer.zig").Token;
|
||||
const schemaParser = @import("parsers/schemaParser.zig").Parser;
|
||||
const cliTokenizer = @import("cliTokenizer.zig").Tokenizer;
|
||||
const cliToken = @import("cliTokenizer.zig").Token;
|
||||
const schemaTokenizer = @import("schemaTokenizer.zig").Tokenizer;
|
||||
const schemaToken = @import("schemaTokenizer.zig").Token;
|
||||
const schemaParser = @import("schemaParser.zig").Parser;
|
||||
|
||||
pub fn main() !void {
|
||||
checkAndCreateDirectories();
|
||||
@ -189,9 +189,10 @@ fn runCommand(null_term_query_str: [:0]const u8) void {
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
const argv = &[_][]const u8{ "./engine", null_term_query_str };
|
||||
// TODO: Use the folder ENGINE
|
||||
const args = &[_][]const u8{ "./engine", null_term_query_str };
|
||||
|
||||
const result = std.process.Child.run(.{ .allocator = allocator, .argv = argv }) catch |err| switch (err) {
|
||||
const result = std.process.Child.run(.{ .allocator = allocator, .argv = args, .max_output_bytes = 4084 }) catch |err| switch (err) {
|
||||
error.FileNotFound => {
|
||||
std.debug.print("No engine found, please use `schema build` to make one.\n", .{});
|
||||
return;
|
||||
|
@ -1,11 +1,11 @@
|
||||
const std = @import("std");
|
||||
const dtypes = @import("dtypes.zig");
|
||||
const UUID = @import("uuid.zig").UUID;
|
||||
const ziqlTokenizer = @import("tokenizers/ziqlTokenizer.zig").Tokenizer;
|
||||
const ziqlToken = @import("tokenizers/ziqlTokenizer.zig").Token;
|
||||
const grabParser = @import("query_functions/GRAB.zig").Parser;
|
||||
const ziqlTokenizer = @import("ziqlTokenizer.zig").Tokenizer;
|
||||
const ziqlToken = @import("ziqlTokenizer.zig").Token;
|
||||
const grabParser = @import("GRAB.zig").Parser;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const parseDataAndAddToFile = @import("query_functions/ADD.zig").parseDataAndAddToFile;
|
||||
const parseDataAndAddToFile = @import("ADD.zig").parseDataAndAddToFile;
|
||||
|
||||
pub const Error = error{UUIDNotFound};
|
||||
const stdout = std.io.getStdOut().writer();
|
||||
@ -46,8 +46,8 @@ pub fn main() !void {
|
||||
|
||||
switch (first_token.tag) {
|
||||
.keyword_grab => {
|
||||
var parser = grabParser.init(&ziqlToker);
|
||||
try parser.parse_additional_data();
|
||||
var parser = grabParser.init(allocator, &ziqlToker);
|
||||
try parser.parse();
|
||||
},
|
||||
.keyword_add => {
|
||||
if (!isStructInSchema(ziqlToker.getTokenSlice(struct_name_token))) {
|
||||
|
@ -1,6 +1,6 @@
|
||||
const std = @import("std");
|
||||
const UUID = @import("uuid.zig").UUID;
|
||||
const dataParsing = @import("parsers/data-parsing.zig");
|
||||
const dataParsing = @import("data-parsing.zig");
|
||||
|
||||
pub const parameter_max_file_size_in_bytes = 500; // THe number of bytes than each file can be before splitting
|
||||
|
||||
|
@ -1,100 +0,0 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const ziqlTokenizer = @import("../tokenizers/ziqlTokenizer.zig").Tokenizer;
|
||||
|
||||
// To work now
|
||||
// GRAB User {}
|
||||
// GRAB User {name = 'Adrien'}
|
||||
// GRAB User {name='Adrien' AND age < 30}
|
||||
// GRAB User [1] {}
|
||||
// GRAB User [10; name] {age < 30}
|
||||
//
|
||||
// For later
|
||||
|
||||
const stdout = std.io.getStdOut().writer();
|
||||
|
||||
const AdditionalData = struct {
|
||||
entity_to_find: usize = 0,
|
||||
member_to_find: std.ArrayList([]const u8),
|
||||
|
||||
pub fn init(allocator: Allocator) AdditionalData {
|
||||
return AdditionalData{ .member_to_find = std.ArrayList(u8).init(allocator) };
|
||||
}
|
||||
};
|
||||
|
||||
pub const Parser = struct {
|
||||
allocator: Allocator,
|
||||
additional_data: AdditionalData,
|
||||
toker: *ziqlTokenizer,
|
||||
state: State,
|
||||
|
||||
const State = enum {
|
||||
start,
|
||||
invalid,
|
||||
end,
|
||||
|
||||
expect_additional_data,
|
||||
expect_count_of_entity_to_find,
|
||||
expect_semicolon,
|
||||
|
||||
expect_filter,
|
||||
};
|
||||
|
||||
pub fn init(allocator: Allocator, toker: *ziqlTokenizer) Parser {
|
||||
return Parser{ .allocator = allocator, .toker = toker, .state = State.expect_additional_data, .additional_data = AdditionalData.init(allocator) };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Parser) void {
|
||||
self.additional_data.member_to_find.deinit();
|
||||
}
|
||||
|
||||
pub fn parse_additional_data(self: *Parser) !void {
|
||||
var token = self.toker.next();
|
||||
while (self.state != State.end) : (token = self.toker.next()) {
|
||||
switch (self.state) {
|
||||
.expect_additional_data => {
|
||||
switch (token.tag) {
|
||||
.l_bracket => {
|
||||
try stdout.print("Additional data found.\n", .{});
|
||||
self.state = State.expect_count_of_entity_to_find;
|
||||
},
|
||||
else => {
|
||||
try stdout.print("No additional data found.\n", .{});
|
||||
self.state = State.expect_filter;
|
||||
},
|
||||
}
|
||||
},
|
||||
.expect_count_of_entity_to_find => {
|
||||
switch (token.tag) {
|
||||
.number_literal => {
|
||||
try stdout.print("Count of entity found.\n", .{});
|
||||
self.state = State.expect_semicolon;
|
||||
},
|
||||
else => {
|
||||
try stdout.print("No count of entity found.\n", .{});
|
||||
self.state = State.expect_filter;
|
||||
},
|
||||
}
|
||||
},
|
||||
.expect_semicolon => {
|
||||
switch (token.tag) {
|
||||
.semicolon => {
|
||||
try stdout.print("Found semiconlon.\n", .{});
|
||||
self.state = State.expect_semicolon;
|
||||
},
|
||||
else => {
|
||||
try stdout.print("Expected semicon here: {s}.\n", .{self.toker.buffer[token.loc.start - 5 .. token.loc.end + 5]});
|
||||
self.state = State.invalid;
|
||||
},
|
||||
}
|
||||
},
|
||||
.invalid => {
|
||||
return;
|
||||
},
|
||||
else => {
|
||||
try stdout.print("End\n", .{});
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
@ -1,7 +1,7 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Toker = @import("../tokenizers/schemaTokenizer.zig").Tokenizer;
|
||||
const Token = @import("../tokenizers/schemaTokenizer.zig").Token;
|
||||
const Toker = @import("tokenizers/schemaTokenizer.zig").Tokenizer;
|
||||
const Token = @import("tokenizers/schemaTokenizer.zig").Token;
|
||||
|
||||
pub const Parser = struct {
|
||||
file: std.fs.File,
|
22
src/test.zig
22
src/test.zig
@ -1,22 +0,0 @@
|
||||
const std = @import("std");
|
||||
const dtypes = @import("dtypes.zig");
|
||||
const UUID = @import("uuid.zig").UUID;
|
||||
const ziqlTokenizer = @import("tokenizers/ziqlTokenizer.zig").Tokenizer;
|
||||
const ziqlToken = @import("tokenizers/ziqlTokenizer.zig").Token;
|
||||
|
||||
// Test for functions in for_add.zig
|
||||
const getMapOfMember = @import("query_functions/ADD.zig").getMapOfMember;
|
||||
|
||||
test "Get map of members" {
|
||||
const allocator = std.testing.allocator;
|
||||
|
||||
const in = "(name='Adrien', email='adrien@gmail.com', age=26, scores=[42 100 5])";
|
||||
const null_term_in = try allocator.dupeZ(u8, in);
|
||||
|
||||
var toker = ziqlTokenizer.init(null_term_in);
|
||||
|
||||
const member_map = try getMapOfMember(allocator, &toker);
|
||||
std.debug.print("{s}", .{member_map.get("name").?});
|
||||
|
||||
allocator.free(null_term_in);
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user