Removed useless code and old comment

This commit is contained in:
Adrien Bouvais 2025-01-16 22:34:19 +01:00
parent d1b430a3d5
commit a09a368528
7 changed files with 3 additions and 98 deletions

View File

@ -136,77 +136,6 @@ pub const UUID = struct {
// Zero UUID
pub const zero: UUID = .{ .bytes = .{0} ** 16 };
// TODO: Optimize both
pub fn OR(arr1: *std.ArrayList(UUID), arr2: *std.ArrayList(UUID)) !void {
for (0..arr2.items.len) |i| {
if (!containUUID(arr1.*, arr2.items[i])) {
try arr1.append(arr2.items[i]);
}
}
}
pub fn AND(arr1: *std.ArrayList(UUID), arr2: *std.ArrayList(UUID)) !void {
var i: usize = 0;
for (0..arr1.items.len) |_| {
if (!containUUID(arr2.*, arr1.items[i])) {
_ = arr1.orderedRemove(i);
} else {
i += 1;
}
}
}
test "OR & AND" {
const allocator = std.testing.allocator;
var right_arr = std.ArrayList(UUID).init(allocator);
defer right_arr.deinit();
try right_arr.append(try UUID.parse("00000000-0000-0000-0000-000000000000"));
try right_arr.append(try UUID.parse("00000000-0000-0000-0000-000000000001"));
try right_arr.append(try UUID.parse("00000000-0000-0000-0000-000000000005"));
try right_arr.append(try UUID.parse("00000000-0000-0000-0000-000000000006"));
try right_arr.append(try UUID.parse("00000000-0000-0000-0000-000000000007"));
var left_arr1 = std.ArrayList(UUID).init(allocator);
defer left_arr1.deinit();
try left_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000000"));
try left_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000001"));
try left_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000002"));
try left_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000003"));
try left_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000004"));
var expected_arr1 = std.ArrayList(UUID).init(allocator);
defer expected_arr1.deinit();
try expected_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000000"));
try expected_arr1.append(try UUID.parse("00000000-0000-0000-0000-000000000001"));
try AND(&left_arr1, &right_arr);
try std.testing.expect(compareUUIDArray(left_arr1, expected_arr1));
var left_arr2 = std.ArrayList(UUID).init(allocator);
defer left_arr2.deinit();
try left_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000000"));
try left_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000001"));
try left_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000002"));
try left_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000003"));
try left_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000004"));
var expected_arr2 = std.ArrayList(UUID).init(allocator);
defer expected_arr2.deinit();
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000000"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000001"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000002"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000003"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000004"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000005"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000006"));
try expected_arr2.append(try UUID.parse("00000000-0000-0000-0000-000000000007"));
try OR(&left_arr2, &right_arr);
try std.testing.expect(compareUUIDArray(left_arr2, expected_arr2));
}
fn containUUID(arr: std.ArrayList(UUID), value: UUID) bool {
return for (arr.items) |elem| {
if (value.compare(elem)) break true;

View File

@ -150,7 +150,7 @@ pub fn parse(self: *Self, null_term_line_str: [:0]const u8) !bool {
.string_literal => {
const null_term_query_str = try allocator.dupeZ(u8, toker.buffer[token.loc.start + 1 .. token.loc.end - 1]);
defer allocator.free(null_term_query_str);
self.runQuery(null_term_query_str); // TODO: THis should return something and I should send from here, not from the parser
self.runQuery(null_term_query_str); // This should most probably return something and I should send from here, not from the parser
state = .end;
},
.keyword_help => {

View File

@ -4,8 +4,6 @@ const RelationMap = @import("relationMap.zig").RelationMap;
const dtype = @import("dtype");
const DataType = dtype.DataType;
// TODO: Put this in a data structure directory
const ZipponError = @import("error").ZipponError;
/// This is the [] part

View File

@ -52,7 +52,6 @@ pub fn createMainDirectories(self: *Self) ZipponError!void {
}
/// Request a path to a schema file and then create the struct folder
/// TODO: Check if some data already exist and if so ask if the user want to delete it and make a backup
pub fn createStructDirectories(self: *Self, struct_array: []SchemaStruct) ZipponError!void {
var data_dir = try self.printOpenDir("{s}/DATA", .{self.path_to_ZipponDB_dir}, .{});
defer data_dir.close();

View File

@ -132,7 +132,6 @@ pub fn orderedNewData(
}
/// Get the index of the first file that is bellow the size limit. If not found, create a new file
/// TODO: Need some serious speed up. I should keep in memory a file->size as a hashmap and use that instead
pub fn getFirstUsableIndexFile(self: Self, struct_name: []const u8) ZipponError!usize {
var member_dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
defer member_dir.close();
@ -144,7 +143,7 @@ pub fn getFirstUsableIndexFile(self: Self, struct_name: []const u8) ZipponError!
const file_stat = member_dir.statFile(entry.name) catch return ZipponError.FileStatError;
if (file_stat.size < config.MAX_FILE_SIZE) {
// Cant I just return i ? It is supossed that files are ordered. I think I already check and it is not
log.debug("{s}\n\n", .{entry.name});
log.debug("First usable file found for `{s}`: {s}", .{ struct_name, entry.name });
return std.fmt.parseInt(usize, entry.name[0..(entry.name.len - 4)], 10) catch return ZipponError.InvalidFileIndex; // INFO: Hardcoded len of file extension
}
}
@ -155,24 +154,6 @@ pub fn getFirstUsableIndexFile(self: Self, struct_name: []const u8) ZipponError!
return i;
}
/// Iterate over all file of a struct and return the index of the last file.
/// E.g. a struct with 0.csv and 1.csv it return 1.
/// FIXME: I use 0..file_index but because now I delete empty file, I can end up trying to parse an empty file. So I need to delete that
/// And do something that return a list of file to parse instead
pub fn maxFileIndex(self: Self, struct_name: []const u8) ZipponError!usize {
var dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
defer dir.close();
var count: usize = 0;
var iter = dir.iterate();
while (iter.next() catch return ZipponError.DirIterError) |entry| {
if (entry.kind != .file) continue;
count += 1;
}
return count - 1;
}
pub fn allFileIndex(self: Self, allocator: Allocator, struct_name: []const u8) ZipponError![]usize {
var dir = try self.printOpenDir("{s}/DATA/{s}", .{ self.path_to_ZipponDB_dir, struct_name }, .{ .iterate = true });
defer dir.close();

View File

@ -24,7 +24,6 @@ const log = std.log.scoped(.fileEngine);
var path_buffer: [1024]u8 = undefined;
// TODO: Make it in batch too
pub fn addEntity(
self: *Self,
struct_name: []const u8,
@ -35,7 +34,7 @@ pub fn addEntity(
defer arena.deinit();
const allocator = arena.allocator();
var file_index = try self.getFirstUsableIndexFile(struct_name); // TODO: Speed up this
var file_index = try self.getFirstUsableIndexFile(struct_name);
var path = std.fmt.bufPrint(&path_buffer, "{s}/DATA/{s}/{d}.zid", .{ self.path_to_ZipponDB_dir, struct_name, file_index }) catch return ZipponError.MemoryError;
var data_writer = zid.DataWriter.init(path, null) catch return ZipponError.ZipponDataError;

View File

@ -210,7 +210,6 @@ pub fn parse(self: *Self, buffer: [:0]const u8) ZipponError!void {
),
},
// TODO: Optimize so it doesnt use parseFilter but just parse the file and directly check the condition. Here I end up parsing 2 times.
.filter_and_update => switch (token.tag) {
.l_brace => {
var filter = try self.parseFilter(allocator, struct_name, false);