Compare commits

...

10 Commits

Author SHA1 Message Date
281e77ea64 Use specific version of 0.14 because stuff changed, to switch to just 0.14 when release
Some checks failed
mkdocs / deploy (push) Failing after 2m1s
2025-02-12 17:08:02 +01:00
f0fc12d1b8 Remove test from auto release because it is buggy, to check later 2025-02-12 17:04:50 +01:00
1bfdad9a28 Update release action 2025-02-12 17:02:50 +01:00
2021f95ffd Update roadmap 2025-02-12 16:51:07 +01:00
fe2f407b7f Added empty test for later 2025-02-12 16:42:14 +01:00
ca5d806f0b Changed a bit test and removed comments 2025-02-12 16:39:35 +01:00
d7b8dec378 Fix
Start index still change if the member name is not right, otherwise run
in loop forever
2025-02-12 16:39:18 +01:00
7352ec45c7 Removed old comment and move an array a bit later in function 2025-02-12 14:54:10 +01:00
84f5749aa2 Fix
Now str array don't take ' on both side when creating the array,
otherwise I compare Bob with 'Bob' and it don't work
2025-02-12 14:53:39 +01:00
3e234396bf Fix
Now delete .new file if Йfind one when I shouldn't in allFileIndex
2025-02-12 14:52:48 +01:00
13 changed files with 46 additions and 53 deletions

View File

@ -18,11 +18,7 @@ jobs:
- name: Setup Zig
uses: mlugg/setup-zig@v1
with:
version: 0.13.0
- name: Test code
run: |
zig build -Doptimize=ReleaseSmall test
version: 0.14.0-dev.1510+fb0028a0d
- name: Build release artifacts
run: |

View File

@ -17,17 +17,17 @@
- [X] Date
- [X] Logs
- [X] Query multi threading
- [ ] Arrays manipulation
- [ ] Ordering
- [X] Arrays manipulation
#### v0.3 - QoL
- [X] Docs website
- [ ] Ordering
- [ ] Dot operator
- [ ] Linked query
- [ ] Schema migration
- [~] Dump/Bump data
- [ ] Dump/Bump data
- [ ] Recovery
- [ ] Better CLI
- [ ] Linked query
#### v0.4 - Usability
- [ ] Single file

View File

@ -4,8 +4,6 @@ User (
email: str,
bday: date,
friends: []User,
posts: []Post,
comments: []Comment,
)
Post (

View File

@ -183,7 +183,7 @@ pub fn benchmark(allocator: std.mem.Allocator) !void {
// Linked array not yet implemented and array manipulation not tested
const null_term_query_str = try std.fmt.bufPrintZ(
&line_buffer, // I dont like 'category = {name='Book'}'. Maybe att a IS keyword ?
\\ADD Order (from={{}}, at=NOW, items={{name IN ['Food1', 'Food2']}}, quantity=[5 22])
\\ADD Order (from={{}}, at=NOW, items={{name IN ['Food1', 'Food2']}}, quantity=[5, 22])
,
.{},
);

View File

@ -312,8 +312,6 @@ pub const Filter = struct {
else => unreachable,
},
// TODO: Also be able to it for array to array. Like names in ['something']
// And it is true if at least one value are shared between array.
.in => switch (condition.data_type) {
.link => condition.value.link.contains(UUID{ .bytes = row_value.UUID }),
.int => in(i32, row_value.Int, condition.value.int_array),

View File

@ -16,8 +16,6 @@
// Then for each RelationMap, I parse the files again this time to update the first JSON that now have {<||>}
// With a sub additionalData. If there is an additional data relation, I recurcive.
// So I need an option in parseEntity to either write the first JSON or update the existing one
//
// FIXME: I think if 2 different struct have the same member name it can cause issue but maybe not tho
const std = @import("std");
const AdditionalData = @import("additionalData.zig").AdditionalData;
@ -42,6 +40,7 @@ pub fn populate(self: *RelationMap, input: []const u8) ZipponError!void {
while (std.mem.indexOf(u8, input[start..], "{<|")) |pos| {
const pattern_start = start + pos + 3;
const pattern_end = pattern_start + 16;
defer start = pattern_end + 3;
const member_end = if (input[pattern_start - 4] == '[') pattern_start - 6 else pattern_start - 5; // This should be ": {<|"
var member_start = member_end - 1;
@ -58,7 +57,6 @@ pub fn populate(self: *RelationMap, input: []const u8) ZipponError!void {
@memcpy(uuid_bytes[0..], input[pattern_start..pattern_end]);
self.map.put(uuid_bytes, JsonString{}) catch return ZipponError.MemoryError;
start = pattern_end + 3;
}
}

View File

@ -9,6 +9,8 @@ const DateTime = dtype.DateTime;
const UUID = dtype.UUID;
// TODO: Move this outside of FileEngine and make it faster
// And add some features like
// - Option to pretty print (add \n and \t)
const ZipponError = @import("error").ZipponError;

View File

@ -190,9 +190,6 @@ pub fn parseEntities(
var safe_allocator = std.heap.ThreadSafeAllocator{ .child_allocator = arena.allocator() };
const allocator = safe_allocator.allocator();
var buff = std.ArrayList(u8).init(entry_allocator);
const writer = buff.writer();
const sstruct = try self.schema_engine.structName2SchemaStruct(struct_name);
const to_parse = try self.allFileIndex(allocator, struct_name);
@ -235,6 +232,9 @@ pub fn parseEntities(
wg.wait();
// Append all writer to each other
var buff = std.ArrayList(u8).init(entry_allocator);
const writer = buff.writer();
writer.writeByte('[') catch return ZipponError.WriteError;
for (thread_writer_list) |list| writer.writeAll(list.items) catch return ZipponError.WriteError;
writer.writeByte(']') catch return ZipponError.WriteError;
@ -242,7 +242,6 @@ pub fn parseEntities(
// Now I need to do the relation stuff, meaning parsing new files to get the relationship value
// Without relationship to return, this function is basically finish here
//
// Here I take the JSON string and I parse it to find all {<||>} and add them to the relation map with an empty JsonString
for (relation_maps) |*relation_map| try relation_map.populate(buff.items);

View File

@ -164,7 +164,11 @@ pub fn allFileIndex(self: Self, allocator: Allocator, struct_name: []const u8) Z
var iter = dir.iterate();
while (iter.next() catch return ZipponError.DirIterError) |entry| {
if (entry.kind != .file) continue;
if (std.mem.eql(u8, entry.name[0..(entry.name.len - 4)], ".new")) continue; // TODO: Delete the file, shouldn't be here
if (std.mem.eql(u8, entry.name[0..(entry.name.len - 4)], ".new")) {
dir.deleteFile(entry.name) catch return ZipponError.DeleteFileError;
continue;
}
const index = std.fmt.parseInt(usize, entry.name[0..(entry.name.len - 4)], 10) catch return ZipponError.InvalidFileIndex;
array.append(index) catch return ZipponError.MemoryError;
}

View File

@ -268,7 +268,7 @@ pub fn deleteEntities(
writer.writeByte(']') catch return ZipponError.WriteError;
// FIXME: Stop doing that and just remove UUID from the map itself instead of reparsing everything at the end
// It's just that I can't do it in deleteEntitiesOneFile itself
// It's just that I can't do it in deleteEntitiesOneFile itself because of multi thread that update the map at same time
sstruct.uuid_file_index.map.clearRetainingCapacity();
_ = sstruct.uuid_file_index.reset();
try self.populateFileIndexUUIDMap(sstruct, sstruct.uuid_file_index);

View File

@ -72,7 +72,7 @@ test "GRAB filter with string" { // OK
test "GRAB with additional data" { // OK
const db = DB{ .path = "test1", .schema = "schema/test" };
try testParsing(db, "GRAB User [1] {age < 18}"); // FIXME: Return nothing
try testParsing(db, "GRAB User [1] {age < 18}");
try testParsing(db, "GRAB User [id, name] {age < 18}");
try testParsing(db, "GRAB User [100; name, age] {age < 18}");
}
@ -100,21 +100,20 @@ test "GRAB filter with date" { // OK
try testParsing(db, "GRAB User {last_order > 2000/01/01-12:45}");
}
// FIXME: GRAB User [1] return nothing
test "Specific query" { // NOT OK
const db = DB{ .path = "test1", .schema = "schema/test" };
try testParsing(db, "GRAB User");
try testParsing(db, "GRAB User {}");
try testParsing(db, "GRAB User [1]"); // FIXME: Return nothing
try testParsing(db, "GRAB User [1]");
try testParsing(db, "GRAB User [*, friends]");
}
test "Specific query ADD" { // OK
test "Specific query ADD" { // OK - Test if array and relationship are empty by default if not specify
const db = DB{ .path = "test1", .schema = "schema/test" };
try testParsing(db, "ADD User (name = 'Bob1', email='bob@email.com', age=55, best_friend=none, friends=none, bday=2000/01/01, a_time=12:04, last_order=2000/01/01-12:45)");
try testParsing(db, "ADD User (name = 'Bob2', email='bob@email.com', age=55, best_friend=none, bday=2000/01/01, a_time=12:04, last_order=2000/01/01-12:45)");
try testParsing(db, "ADD User (name = 'Bob3', email='bob@email.com', age=55, bday=2000/01/01, a_time=12:04, last_order=2000/01/01-12:45)");
try testParsing(db, "GRAB User {name IN ['Bob1', 'Bob2', 'Bob3']}"); // FIXME: Return nothing
try testParsing(db, "GRAB User {name IN ['Bob1', 'Bob2', 'Bob3']}");
}
// Array manipulation
@ -122,7 +121,7 @@ test "Specific query ADD" { // OK
test "GRAB name IN" { // OK
const db = DB{ .path = "test1", .schema = "schema/test" };
try testParsing(db, "GRAB User {name IN ['Bob', 'Bobibou']}"); // FIXME: Return nothing
try testParsing(db, "GRAB User {name IN ['Bob', 'Bobibou']}");
}
test "UPDATE APPEND" { // OK
@ -171,7 +170,7 @@ test "UPDATE REMOVEAT" { // OK
test "UPDATE relationship" { // OK
const db = DB{ .path = "test1", .schema = "schema/test" };
try testParsing(db, "UPDATE User [1] {name='Bob'} TO (best_friend = {name='Boba'} )");
try testParsing(db, "GRAB User {}");
try testParsing(db, "GRAB User [1; name, best_friend] {}");
}
test "GRAB Relationship Filter" { // OK
@ -199,8 +198,7 @@ test "GRAB Relationship AdditionalData Filtered" { // FIXME: NOT OK
try testParsing(db, "GRAB User [2; name, best_friend] {best_friend !IN {}}");
}
test "GRAB Relationship dot" { // TODO: Make this a reality
// DO I add this ? I'm not sure about this feature
test "GRAB Relationship dot" { // TODO: Make this a reality, but need to think a bit more about it
const db = DB{ .path = "test1", .schema = "schema/test" };
// try testParsing(db, "GRAB User.best_friend {}");
// try testParsing(db, "GRAB User.best_friend.best_friend {}");
@ -209,6 +207,14 @@ test "GRAB Relationship dot" { // TODO: Make this a reality
try testParsing(db, "GRAB User [1] {}");
}
test "GRAB Ordering" { // TODO: Make this a reality
const db = DB{ .path = "test1", .schema = "schema/test" };
// try testParsing(db, "GRAB User {} |age|");
// try testParsing(db, "GRAB User {} |name|");
// try testParsing(db, "GRAB User {} |name, age|");
try testParsing(db, "GRAB User [1] {}");
}
// 3 Struct Relationship
// ===============================================================
@ -216,25 +222,15 @@ test "3 struct base" {
const db = DB{ .path = "test2", .schema = "schema/test-3struct" };
try testParsing(db, "DELETE User {}");
try testParsing(db, "DELETE Post {}");
try testParsing(db, "ADD User (name = 'Bob', email='bob@email.com', age=55, friends=none, posts=none, comments=none, bday=2000/01/01)");
try testParsing(db, "ADD Post (text = 'Hello every body', at=NOW, from={}, comments=none)");
try testParsing(db, "ADD Post (text = 'Hello every body', at=NOW, from={}, comments=none)");
try testParsing(db, "GRAB Post [id, text, at, from [id, name]] {}");
}
test "3 struct both side" {
const db = DB{ .path = "test2", .schema = "schema/test-3struct" };
try testParsing(db, "DELETE User {}");
try testParsing(db, "DELETE Post {}");
try testParsing(db, "ADD User (name = 'Bob', email='bob@email.com', age=55, friends=none, posts=none, comments=none, bday=2000/01/01)");
try testParsing(db, "ADD Post (text = 'Hello every body', at=NOW, from=none, comments=none)");
//try testParsing(db, "ADD Post (text = 'Hello every body', at=NOW, from={}, comments=none) -> new_post -> UPDATE User {} TO (posts APPEND new_post)");
// try testParsing(db, "ADD Post (text = 'Hello every body', at=NOW, from={} APPEND TO posts, comments=none)"); Maybe I can use that to be like the above query
// ADD Post (text = 'Hello every body', at=NOW, from={} TO last_post, comments=none) And this for a single link
// try testParsing(db, "ADD Post (text = 'Hello every body', at=NOW, from={} APPEND TO [posts, last_post], comments=none)"); Can be an array to add it to multiple list
// last_post is replaced instead of append
try testParsing(db, "GRAB Post [id, text, at, from [id, name]] {}");
try testParsing(db, "GRAB User [id, name] {}");
try testParsing(db, "ADD User (name = 'Bob', email='bob@email.com', age=55, bday=2000/01/01)");
try testParsing(db, "ADD User (name = 'Roger', email='roger@email.com', age=22, bday=2000/01/01)");
try testParsing(db, "ADD Post (text = 'Hello everybody', at=NOW, from={name = 'Bob'})");
try testParsing(db, "ADD Post (text = 'Look at this thing !', at=NOW, from={name = 'Bob'})");
try testParsing(db, "ADD Post (text = 'I love animals.', at=NOW, from={name = 'Roger'})");
try testParsing(db, "ADD Comment (text = 'Hey man !', at=NOW, from={name = 'Roger'}, of={text = 'Hello everybody'})");
try testParsing(db, "ADD Comment (text = 'Me too :)', at=NOW, from={name = 'Bob'}, of={text = 'I love animals.'})");
try testParsing(db, "GRAB Post [text, at, from [name]] {}");
try testParsing(db, "GRAB Comment [text, at, from [name], of [id]] {}");
}
fn testParsing(db: DB, source: [:0]const u8) !void {

View File

@ -21,7 +21,7 @@ pub fn init(allocator: std.mem.Allocator) !ThreadEngine {
const cpu_core = if (CPU_CORE == 0) std.Thread.getCpuCount() catch 1 else CPU_CORE;
log.debug("Using {d} cpu core.", .{cpu_core});
log.debug("Using {d}Mb stack size.", .{std.Thread.SpawnConfig.default_stack_size / 1024 / 1024});
// log.debug("Using {d}Mb stack size.", .{std.Thread.SpawnConfig.default_stack_size / 1024 / 1024});
const thread_pool = try allocator.create(std.Thread.Pool);
try thread_pool.init(std.Thread.Pool.Options{

View File

@ -246,7 +246,9 @@ pub fn parseConditionValue(
token.* = self.toker.next();
} else first = false;
if (token.tag == .string_literal) array.append(self.toker.getTokenSlice(token.*)) catch return ZipponError.MemoryError;
if (token.tag == .string_literal) array.append(
self.toker.getTokenSlice(token.*)[1 .. (token.loc.end - token.loc.start) - 1],
) catch return ZipponError.MemoryError;
if (token.tag == .string_literal or token.tag == .comma) continue;
if (token.tag == .r_bracket) break;
return printError(