Moved thread stuff into a directory
This commit is contained in:
parent
d775ff1d0a
commit
c0e8b07025
@ -28,14 +28,7 @@ pub fn deinit(self: *UUIDIndexMap) void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn put(self: *UUIDIndexMap, uuid: UUID, file_index: usize) !void {
|
pub fn put(self: *UUIDIndexMap, uuid: UUID, file_index: usize) !void {
|
||||||
const allocator = self.arena.allocator();
|
try self.map.*.put(uuid, file_index);
|
||||||
const new_uuid = try allocator.create(UUID);
|
|
||||||
new_uuid.* = uuid;
|
|
||||||
|
|
||||||
const new_file_index = try allocator.create(usize);
|
|
||||||
new_file_index.* = file_index;
|
|
||||||
|
|
||||||
try self.map.*.put(new_uuid.*, new_file_index.*);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn contains(self: UUIDIndexMap, uuid: UUID) bool {
|
pub fn contains(self: UUIDIndexMap, uuid: UUID) bool {
|
||||||
|
@ -10,160 +10,158 @@ const UUID = dtype.UUID;
|
|||||||
|
|
||||||
const ZipponError = @import("errors.zig").ZipponError;
|
const ZipponError = @import("errors.zig").ZipponError;
|
||||||
|
|
||||||
pub const EntityWriter = struct {
|
pub fn writeEntityTable(
|
||||||
pub fn writeEntityTable(
|
writer: anytype,
|
||||||
writer: anytype,
|
row: []zid.Data,
|
||||||
row: []zid.Data,
|
additional_data: AdditionalData,
|
||||||
additional_data: AdditionalData,
|
data_types: []const DataType,
|
||||||
data_types: []const DataType,
|
) !void {
|
||||||
) !void {
|
try writer.writeAll("| ");
|
||||||
try writer.writeAll("| ");
|
for (additional_data.childrens.items) |member| {
|
||||||
for (additional_data.childrens.items) |member| {
|
try writeValue(writer, row[member.index], data_types[member.index]);
|
||||||
try writeValue(writer, row[member.index], data_types[member.index]);
|
try writer.writeAll(" \t| ");
|
||||||
try writer.writeAll(" \t| ");
|
|
||||||
}
|
|
||||||
try writer.writeByte('\n');
|
|
||||||
}
|
}
|
||||||
|
try writer.writeByte('\n');
|
||||||
|
}
|
||||||
|
|
||||||
pub fn writeHeaderCsv(
|
pub fn writeHeaderCsv(
|
||||||
writer: anytype,
|
writer: anytype,
|
||||||
members: [][]const u8,
|
members: [][]const u8,
|
||||||
delimiter: u8,
|
delimiter: u8,
|
||||||
) !void {
|
) !void {
|
||||||
for (members, 0..) |member, i| {
|
for (members, 0..) |member, i| {
|
||||||
try writer.writeAll(member);
|
try writer.writeAll(member);
|
||||||
if (i < members.len - 1) try writer.writeByte(delimiter);
|
if (i < members.len - 1) try writer.writeByte(delimiter);
|
||||||
}
|
|
||||||
try writer.writeByte('\n');
|
|
||||||
}
|
}
|
||||||
|
try writer.writeByte('\n');
|
||||||
|
}
|
||||||
|
|
||||||
pub fn writeEntityCsv( // FIXME: I think if one value str have a \n this will broke. I need to use like """
|
pub fn writeEntityCsv( // FIXME: I think if one value str have a \n this will broke. I need to use like """
|
||||||
writer: anytype,
|
writer: anytype,
|
||||||
row: []zid.Data,
|
row: []zid.Data,
|
||||||
data_types: []const DataType,
|
data_types: []const DataType,
|
||||||
delimiter: u8,
|
delimiter: u8,
|
||||||
) !void {
|
) !void {
|
||||||
for (0..row.len) |i| {
|
for (0..row.len) |i| {
|
||||||
try writeValue(writer, row[i], data_types[i]);
|
try writeValue(writer, row[i], data_types[i]);
|
||||||
if (i < row.len - 1) try writer.writeByte(delimiter);
|
if (i < row.len - 1) try writer.writeByte(delimiter);
|
||||||
}
|
|
||||||
try writer.writeByte('\n');
|
|
||||||
}
|
}
|
||||||
|
try writer.writeByte('\n');
|
||||||
|
}
|
||||||
|
|
||||||
pub fn writeEntityJSON(
|
pub fn writeEntityJSON(
|
||||||
writer: anytype,
|
writer: anytype,
|
||||||
row: []zid.Data,
|
row: []zid.Data,
|
||||||
additional_data: AdditionalData,
|
additional_data: AdditionalData,
|
||||||
data_types: []const DataType,
|
data_types: []const DataType,
|
||||||
) !void {
|
) !void {
|
||||||
try writer.writeByte('{');
|
try writer.writeByte('{');
|
||||||
for (additional_data.childrens.items) |member| {
|
for (additional_data.childrens.items) |member| {
|
||||||
try writer.print("{s}: ", .{member.name});
|
try writer.print("{s}: ", .{member.name});
|
||||||
try writeValue(writer, row[member.index], data_types[member.index]);
|
try writeValue(writer, row[member.index], data_types[member.index]);
|
||||||
try writer.writeAll(", ");
|
try writer.writeAll(", ");
|
||||||
}
|
|
||||||
try writer.writeAll("}, ");
|
|
||||||
}
|
}
|
||||||
|
try writer.writeAll("}, ");
|
||||||
|
}
|
||||||
|
|
||||||
fn writeValue(writer: anytype, value: zid.Data, data_type: DataType) !void {
|
fn writeValue(writer: anytype, value: zid.Data, data_type: DataType) !void {
|
||||||
switch (value) {
|
switch (value) {
|
||||||
.Float => |v| try writer.print("{d}", .{v}),
|
.Float => |v| try writer.print("{d}", .{v}),
|
||||||
.Int => |v| try writer.print("{d}", .{v}),
|
.Int => |v| try writer.print("{d}", .{v}),
|
||||||
.Str => |v| try writer.print("\"{s}\"", .{v}),
|
.Str => |v| try writer.print("\"{s}\"", .{v}),
|
||||||
.UUID => |v| {
|
.UUID => |v| {
|
||||||
if (data_type == .self) {
|
if (data_type == .self) {
|
||||||
try writer.print("\"{s}\"", .{UUID.format_bytes(v)});
|
try writer.print("\"{s}\"", .{UUID.format_bytes(v)});
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
const uuid = try UUID.parse("00000000-0000-0000-0000-000000000000"); // Maybe pass that comptime to prevent parsing it everytime
|
|
||||||
if (!std.meta.eql(v, uuid.bytes)) {
|
|
||||||
try writer.print("{{|<{s}>|}}", .{v});
|
|
||||||
} else {
|
|
||||||
try writer.print("{{}}", .{});
|
|
||||||
}
|
|
||||||
},
|
|
||||||
.Bool => |v| try writer.print("{any}", .{v}),
|
|
||||||
.Unix => |v| {
|
|
||||||
const datetime = DateTime.initUnix(v);
|
|
||||||
try writer.writeByte('"');
|
|
||||||
switch (data_type) {
|
|
||||||
.date => try datetime.format("YYYY/MM/DD", writer),
|
|
||||||
.time => try datetime.format("HH:mm:ss.SSSS", writer),
|
|
||||||
.datetime => try datetime.format("YYYY/MM/DD-HH:mm:ss.SSSS", writer),
|
|
||||||
else => unreachable,
|
|
||||||
}
|
|
||||||
try writer.writeByte('"');
|
|
||||||
},
|
|
||||||
.IntArray, .FloatArray, .StrArray, .UUIDArray, .BoolArray, .UnixArray => try writeArray(writer, value, data_type),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn writeArray(writer: anytype, data: zid.Data, data_type: DataType) ZipponError!void {
|
|
||||||
writer.writeByte('[') catch return ZipponError.WriteError;
|
|
||||||
var iter = zid.ArrayIterator.init(data) catch return ZipponError.ZipponDataError;
|
|
||||||
switch (data) {
|
|
||||||
.IntArray => while (iter.next()) |v| writer.print("{d}, ", .{v.Int}) catch return ZipponError.WriteError,
|
|
||||||
.FloatArray => while (iter.next()) |v| writer.print("{d}", .{v.Float}) catch return ZipponError.WriteError,
|
|
||||||
.StrArray => while (iter.next()) |v| writer.print("\"{s}\"", .{v.Str}) catch return ZipponError.WriteError,
|
|
||||||
.UUIDArray => while (iter.next()) |v| writer.print("{{|<{s}>|}},", .{v.UUID}) catch return ZipponError.WriteError,
|
|
||||||
.BoolArray => while (iter.next()) |v| writer.print("{any}", .{v.Bool}) catch return ZipponError.WriteError,
|
|
||||||
.UnixArray => while (iter.next()) |v| {
|
|
||||||
const datetime = DateTime.initUnix(v.Unix);
|
|
||||||
writer.writeByte('"') catch return ZipponError.WriteError;
|
|
||||||
switch (data_type) {
|
|
||||||
.date => datetime.format("YYYY/MM/DD", writer) catch return ZipponError.WriteError,
|
|
||||||
.time => datetime.format("HH:mm:ss.SSSS", writer) catch return ZipponError.WriteError,
|
|
||||||
.datetime => datetime.format("YYYY/MM/DD-HH:mm:ss.SSSS", writer) catch return ZipponError.WriteError,
|
|
||||||
else => unreachable,
|
|
||||||
}
|
|
||||||
writer.writeAll("\", ") catch return ZipponError.WriteError;
|
|
||||||
},
|
|
||||||
else => unreachable,
|
|
||||||
}
|
|
||||||
writer.writeByte(']') catch return ZipponError.WriteError;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Take a string in the JSON format and look for {|<[16]u8>|}, then will look into the map and check if it can find this UUID
|
|
||||||
/// If it find it, it ill replace the {|<[16]u8>|} will the value
|
|
||||||
pub fn updateWithRelation(writer: anytype, input: []const u8, map: std.AutoHashMap([16]u8, JsonString)) ZipponError!void {
|
|
||||||
var uuid_bytes: [16]u8 = undefined;
|
|
||||||
var start: usize = 0;
|
|
||||||
while (std.mem.indexOf(u8, input[start..], "{|<")) |pos| {
|
|
||||||
const pattern_start = start + pos + 3;
|
|
||||||
const pattern_end = pattern_start + 16;
|
|
||||||
|
|
||||||
// Write the text before the pattern
|
|
||||||
writer.writeAll(input[start .. pattern_start - 3]) catch return ZipponError.WriteError;
|
|
||||||
|
|
||||||
if (input[pattern_start - 4] == '[') {
|
|
||||||
start = try updateArray(writer, input, map, pattern_start - 3);
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
const uuid = try UUID.parse("00000000-0000-0000-0000-000000000000"); // Maybe pass that comptime to prevent parsing it everytime
|
||||||
@memcpy(uuid_bytes[0..], input[pattern_start..pattern_end]);
|
if (!std.meta.eql(v, uuid.bytes)) {
|
||||||
if (map.get(uuid_bytes)) |json_string| {
|
try writer.print("{{|<{s}>|}}", .{v});
|
||||||
writer.writeAll(json_string.slice) catch return ZipponError.WriteError;
|
|
||||||
} else {
|
} else {
|
||||||
writer.writeAll(input[pattern_start - 3 .. pattern_end + 3]) catch return ZipponError.WriteError;
|
try writer.print("{{}}", .{});
|
||||||
}
|
}
|
||||||
start = pattern_end + 5;
|
},
|
||||||
|
.Bool => |v| try writer.print("{any}", .{v}),
|
||||||
|
.Unix => |v| {
|
||||||
|
const datetime = DateTime.initUnix(v);
|
||||||
|
try writer.writeByte('"');
|
||||||
|
switch (data_type) {
|
||||||
|
.date => try datetime.format("YYYY/MM/DD", writer),
|
||||||
|
.time => try datetime.format("HH:mm:ss.SSSS", writer),
|
||||||
|
.datetime => try datetime.format("YYYY/MM/DD-HH:mm:ss.SSSS", writer),
|
||||||
|
else => unreachable,
|
||||||
|
}
|
||||||
|
try writer.writeByte('"');
|
||||||
|
},
|
||||||
|
.IntArray, .FloatArray, .StrArray, .UUIDArray, .BoolArray, .UnixArray => try writeArray(writer, value, data_type),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn writeArray(writer: anytype, data: zid.Data, data_type: DataType) ZipponError!void {
|
||||||
|
writer.writeByte('[') catch return ZipponError.WriteError;
|
||||||
|
var iter = zid.ArrayIterator.init(data) catch return ZipponError.ZipponDataError;
|
||||||
|
switch (data) {
|
||||||
|
.IntArray => while (iter.next()) |v| writer.print("{d}, ", .{v.Int}) catch return ZipponError.WriteError,
|
||||||
|
.FloatArray => while (iter.next()) |v| writer.print("{d}", .{v.Float}) catch return ZipponError.WriteError,
|
||||||
|
.StrArray => while (iter.next()) |v| writer.print("\"{s}\"", .{v.Str}) catch return ZipponError.WriteError,
|
||||||
|
.UUIDArray => while (iter.next()) |v| writer.print("{{|<{s}>|}},", .{v.UUID}) catch return ZipponError.WriteError,
|
||||||
|
.BoolArray => while (iter.next()) |v| writer.print("{any}", .{v.Bool}) catch return ZipponError.WriteError,
|
||||||
|
.UnixArray => while (iter.next()) |v| {
|
||||||
|
const datetime = DateTime.initUnix(v.Unix);
|
||||||
|
writer.writeByte('"') catch return ZipponError.WriteError;
|
||||||
|
switch (data_type) {
|
||||||
|
.date => datetime.format("YYYY/MM/DD", writer) catch return ZipponError.WriteError,
|
||||||
|
.time => datetime.format("HH:mm:ss.SSSS", writer) catch return ZipponError.WriteError,
|
||||||
|
.datetime => datetime.format("YYYY/MM/DD-HH:mm:ss.SSSS", writer) catch return ZipponError.WriteError,
|
||||||
|
else => unreachable,
|
||||||
|
}
|
||||||
|
writer.writeAll("\", ") catch return ZipponError.WriteError;
|
||||||
|
},
|
||||||
|
else => unreachable,
|
||||||
|
}
|
||||||
|
writer.writeByte(']') catch return ZipponError.WriteError;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Take a string in the JSON format and look for {|<[16]u8>|}, then will look into the map and check if it can find this UUID
|
||||||
|
/// If it find it, it ill replace the {|<[16]u8>|} will the value
|
||||||
|
pub fn updateWithRelation(writer: anytype, input: []const u8, map: std.AutoHashMap([16]u8, JsonString)) ZipponError!void {
|
||||||
|
var uuid_bytes: [16]u8 = undefined;
|
||||||
|
var start: usize = 0;
|
||||||
|
while (std.mem.indexOf(u8, input[start..], "{|<")) |pos| {
|
||||||
|
const pattern_start = start + pos + 3;
|
||||||
|
const pattern_end = pattern_start + 16;
|
||||||
|
|
||||||
|
// Write the text before the pattern
|
||||||
|
writer.writeAll(input[start .. pattern_start - 3]) catch return ZipponError.WriteError;
|
||||||
|
|
||||||
|
if (input[pattern_start - 4] == '[') {
|
||||||
|
start = try updateArray(writer, input, map, pattern_start - 3);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write any remaining text
|
@memcpy(uuid_bytes[0..], input[pattern_start..pattern_end]);
|
||||||
writer.writeAll(input[start..]) catch return ZipponError.WriteError;
|
if (map.get(uuid_bytes)) |json_string| {
|
||||||
|
writer.writeAll(json_string.slice) catch return ZipponError.WriteError;
|
||||||
|
} else {
|
||||||
|
writer.writeAll(input[pattern_start - 3 .. pattern_end + 3]) catch return ZipponError.WriteError;
|
||||||
|
}
|
||||||
|
start = pattern_end + 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn updateArray(writer: anytype, input: []const u8, map: std.AutoHashMap([16]u8, JsonString), origin: usize) ZipponError!usize {
|
// Write any remaining text
|
||||||
var uuid_bytes: [16]u8 = undefined;
|
writer.writeAll(input[start..]) catch return ZipponError.WriteError;
|
||||||
var start = origin;
|
}
|
||||||
while (input.len > start + 23 and std.mem.eql(u8, input[start .. start + 3], "{|<") and std.mem.eql(u8, input[start + 19 .. start + 23], ">|},")) : (start += 23) {
|
|
||||||
@memcpy(uuid_bytes[0..], input[start + 3 .. start + 19]);
|
fn updateArray(writer: anytype, input: []const u8, map: std.AutoHashMap([16]u8, JsonString), origin: usize) ZipponError!usize {
|
||||||
if (map.get(uuid_bytes)) |json_string| {
|
var uuid_bytes: [16]u8 = undefined;
|
||||||
writer.writeAll(json_string.slice) catch return ZipponError.WriteError;
|
var start = origin;
|
||||||
} else {
|
while (input.len > start + 23 and std.mem.eql(u8, input[start .. start + 3], "{|<") and std.mem.eql(u8, input[start + 19 .. start + 23], ">|},")) : (start += 23) {
|
||||||
writer.writeAll(input[start .. start + 23]) catch return ZipponError.WriteError;
|
@memcpy(uuid_bytes[0..], input[start + 3 .. start + 19]);
|
||||||
}
|
if (map.get(uuid_bytes)) |json_string| {
|
||||||
|
writer.writeAll(json_string.slice) catch return ZipponError.WriteError;
|
||||||
|
} else {
|
||||||
|
writer.writeAll(input[start .. start + 23]) catch return ZipponError.WriteError;
|
||||||
}
|
}
|
||||||
return start;
|
|
||||||
}
|
}
|
||||||
};
|
return start;
|
||||||
|
}
|
||||||
|
@ -4,10 +4,10 @@ const zid = @import("ZipponData");
|
|||||||
const U64 = std.atomic.Value(u64);
|
const U64 = std.atomic.Value(u64);
|
||||||
const Pool = std.Thread.Pool;
|
const Pool = std.Thread.Pool;
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const SchemaEngine = @import("schemaEngine.zig").SchemaEngine;
|
const SchemaEngine = @import("schemaEngine.zig");
|
||||||
const SchemaStruct = @import("schemaEngine.zig").SchemaStruct;
|
const SchemaStruct = @import("schemaEngine.zig").SchemaStruct;
|
||||||
const ThreadSyncContext = @import("threadEngine.zig").ThreadSyncContext;
|
const ThreadSyncContext = @import("thread/context.zig");
|
||||||
const EntityWriter = @import("entityWriter.zig").EntityWriter;
|
const EntityWriter = @import("entityWriter.zig");
|
||||||
|
|
||||||
const dtype = @import("dtype");
|
const dtype = @import("dtype");
|
||||||
const s2t = dtype.s2t;
|
const s2t = dtype.s2t;
|
||||||
@ -15,9 +15,9 @@ const UUID = dtype.UUID;
|
|||||||
const DateTime = dtype.DateTime;
|
const DateTime = dtype.DateTime;
|
||||||
const DataType = dtype.DataType;
|
const DataType = dtype.DataType;
|
||||||
|
|
||||||
const AdditionalData = @import("dataStructure/additionalData.zig").AdditionalData;
|
const AdditionalData = @import("dataStructure/additionalData.zig");
|
||||||
const Filter = @import("dataStructure/filter.zig").Filter;
|
const Filter = @import("dataStructure/filter.zig").Filter;
|
||||||
const RelationMap = @import("dataStructure/relationMap.zig").RelationMap;
|
const RelationMap = @import("dataStructure/relationMap.zig");
|
||||||
const JsonString = @import("dataStructure/relationMap.zig").JsonString;
|
const JsonString = @import("dataStructure/relationMap.zig").JsonString;
|
||||||
const ConditionValue = @import("dataStructure/filter.zig").ConditionValue;
|
const ConditionValue = @import("dataStructure/filter.zig").ConditionValue;
|
||||||
|
|
||||||
|
10
src/main.zig
10
src/main.zig
@ -4,16 +4,16 @@ const send = utils.send;
|
|||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const Pool = std.Thread.Pool;
|
const Pool = std.Thread.Pool;
|
||||||
|
|
||||||
const FileEngine = @import("fileEngine.zig").FileEngine;
|
const FileEngine = @import("fileEngine.zig");
|
||||||
const SchemaEngine = @import("schemaEngine.zig").SchemaEngine;
|
const SchemaEngine = @import("schemaEngine.zig");
|
||||||
const ThreadEngine = @import("threadEngine.zig").ThreadEngine;
|
const ThreadEngine = @import("thread/engine.zig");
|
||||||
|
|
||||||
const cliTokenizer = @import("tokenizers/cli.zig").Tokenizer;
|
const cliTokenizer = @import("tokenizers/cli.zig").Tokenizer;
|
||||||
const cliToken = @import("tokenizers/cli.zig").Token;
|
const cliToken = @import("tokenizers/cli.zig").Token;
|
||||||
|
|
||||||
const ziqlTokenizer = @import("tokenizers/ziql.zig").Tokenizer;
|
const ziqlTokenizer = @import("tokenizers/ziql.zig").Tokenizer;
|
||||||
const ziqlToken = @import("tokenizers/ziql.zig").Token;
|
const ziqlToken = @import("tokenizers/ziql.zig").Token;
|
||||||
const ziqlParser = @import("ziqlParser.zig").Parser;
|
const ziqlParser = @import("ziqlParser.zig");
|
||||||
|
|
||||||
const ZipponError = @import("errors.zig").ZipponError;
|
const ZipponError = @import("errors.zig").ZipponError;
|
||||||
|
|
||||||
@ -86,7 +86,7 @@ pub const DBEngine = struct {
|
|||||||
pub fn init(potential_main_path: ?[]const u8, potential_schema_path: ?[]const u8) DBEngine {
|
pub fn init(potential_main_path: ?[]const u8, potential_schema_path: ?[]const u8) DBEngine {
|
||||||
var self = DBEngine{};
|
var self = DBEngine{};
|
||||||
|
|
||||||
self.thread_engine = ThreadEngine.init();
|
self.thread_engine = ThreadEngine.init() catch @panic("TODO");
|
||||||
|
|
||||||
const potential_main_path_or_environment_variable = potential_main_path orelse utils.getEnvVariable("ZIPPONDB_PATH");
|
const potential_main_path_or_environment_variable = potential_main_path orelse utils.getEnvVariable("ZIPPONDB_PATH");
|
||||||
if (potential_main_path_or_environment_variable) |main_path| {
|
if (potential_main_path_or_environment_variable) |main_path| {
|
||||||
|
@ -1,18 +1,18 @@
|
|||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const zid = @import("ZipponData");
|
const zid = @import("ZipponData");
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const Parser = @import("schemaParser.zig").Parser;
|
const Parser = @import("schemaParser.zig");
|
||||||
const Tokenizer = @import("tokenizers/schema.zig").Tokenizer;
|
const Tokenizer = @import("tokenizers/schema.zig").Tokenizer;
|
||||||
const ZipponError = @import("errors.zig").ZipponError;
|
const ZipponError = @import("errors.zig").ZipponError;
|
||||||
const dtype = @import("dtype");
|
const dtype = @import("dtype");
|
||||||
const DataType = dtype.DataType;
|
const DataType = dtype.DataType;
|
||||||
const AdditionalData = @import("dataStructure/additionalData.zig").AdditionalData;
|
const AdditionalData = @import("dataStructure/additionalData.zig");
|
||||||
const RelationMap = @import("dataStructure/relationMap.zig").RelationMap;
|
const RelationMap = @import("dataStructure/relationMap.zig");
|
||||||
const JsonString = @import("dataStructure/relationMap.zig").JsonString;
|
const JsonString = @import("dataStructure/relationMap.zig").JsonString;
|
||||||
const ConditionValue = @import("dataStructure/filter.zig").ConditionValue;
|
const ConditionValue = @import("dataStructure/filter.zig").ConditionValue;
|
||||||
const UUID = dtype.UUID;
|
const UUID = dtype.UUID;
|
||||||
const UUIDFileIndex = @import("dataStructure/UUIDFileIndex.zig").UUIDIndexMap;
|
const UUIDFileIndex = @import("dataStructure/UUIDFileIndex.zig");
|
||||||
const FileEngine = @import("fileEngine.zig").FileEngine;
|
const FileEngine = @import("fileEngine.zig");
|
||||||
|
|
||||||
// TODO: Create a schemaEngine directory and add this as core and the parser with it
|
// TODO: Create a schemaEngine directory and add this as core and the parser with it
|
||||||
|
|
||||||
|
43
src/thread/context.zig
Normal file
43
src/thread/context.zig
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
const std = @import("std");
|
||||||
|
const log = std.log.scoped(.thread);
|
||||||
|
const U64 = std.atomic.Value(u64);
|
||||||
|
|
||||||
|
pub const Self = @This();
|
||||||
|
|
||||||
|
processed_struct: U64 = U64.init(0),
|
||||||
|
error_file: U64 = U64.init(0),
|
||||||
|
completed_file: U64 = U64.init(0),
|
||||||
|
max_struct: u64,
|
||||||
|
max_file: u64,
|
||||||
|
|
||||||
|
pub fn init(max_struct: u64, max_file: u64) Self {
|
||||||
|
return Self{
|
||||||
|
.max_struct = max_struct,
|
||||||
|
.max_file = max_file,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn isComplete(self: *Self) bool {
|
||||||
|
return (self.completed_file.load(.acquire) + self.error_file.load(.acquire)) >= self.max_file;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn completeThread(self: *Self) void {
|
||||||
|
_ = self.completed_file.fetchAdd(1, .release);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn incrementAndCheckStructLimit(self: *Self) bool {
|
||||||
|
if (self.max_struct == 0) return false;
|
||||||
|
const new_count = self.processed_struct.fetchAdd(1, .monotonic);
|
||||||
|
return (new_count + 1) >= self.max_struct;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn checkStructLimit(self: *Self) bool {
|
||||||
|
if (self.max_struct == 0) return false;
|
||||||
|
const count = self.processed_struct.load(.monotonic);
|
||||||
|
return (count) >= self.max_struct;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn logError(self: *Self, message: []const u8, err: anyerror) void {
|
||||||
|
log.err("{s}: {any}", .{ message, err });
|
||||||
|
_ = self.error_file.fetchAdd(1, .acquire);
|
||||||
|
}
|
38
src/thread/engine.zig
Normal file
38
src/thread/engine.zig
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
const std = @import("std");
|
||||||
|
const Pool = std.Thread.Pool;
|
||||||
|
const Allocator = std.mem.Allocator;
|
||||||
|
|
||||||
|
const CPU_CORE = @import("config").CPU_CORE;
|
||||||
|
const log = std.log.scoped(.thread);
|
||||||
|
const ZipponError = @import("../errors.zig").ZipponError;
|
||||||
|
|
||||||
|
pub const Self = @This();
|
||||||
|
|
||||||
|
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||||
|
const allocator = arena.allocator();
|
||||||
|
|
||||||
|
thread_arena: *std.heap.ThreadSafeAllocator,
|
||||||
|
thread_pool: *Pool,
|
||||||
|
|
||||||
|
pub fn init() ZipponError!Self {
|
||||||
|
const thread_arena = allocator.create(std.heap.ThreadSafeAllocator) catch return ZipponError.MemoryError;
|
||||||
|
thread_arena.* = std.heap.ThreadSafeAllocator{
|
||||||
|
.child_allocator = allocator,
|
||||||
|
};
|
||||||
|
|
||||||
|
const thread_pool = allocator.create(Pool) catch return ZipponError.MemoryError;
|
||||||
|
thread_pool.init(Pool.Options{
|
||||||
|
.allocator = thread_arena.allocator(),
|
||||||
|
.n_jobs = CPU_CORE,
|
||||||
|
}) catch return ZipponError.ThreadError;
|
||||||
|
|
||||||
|
return Self{
|
||||||
|
.thread_pool = thread_pool,
|
||||||
|
.thread_arena = thread_arena,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *Self) void {
|
||||||
|
self.thread_pool.deinit();
|
||||||
|
arena.deinit();
|
||||||
|
}
|
@ -1,81 +0,0 @@
|
|||||||
// TODO: Put the ThreadSynx stuff and create a ThreadEngine with the arena, pool, and some methods
|
|
||||||
|
|
||||||
const std = @import("std");
|
|
||||||
const U64 = std.atomic.Value(u64);
|
|
||||||
const Pool = std.Thread.Pool;
|
|
||||||
const Allocator = std.mem.Allocator;
|
|
||||||
|
|
||||||
const ZipponError = @import("errors.zig").ZipponError;
|
|
||||||
const CPU_CORE = @import("config").CPU_CORE;
|
|
||||||
const OUT_BUFFER_SIZE = @import("config").OUT_BUFFER_SIZE;
|
|
||||||
const log = std.log.scoped(.thread);
|
|
||||||
|
|
||||||
const allocator = std.heap.page_allocator;
|
|
||||||
|
|
||||||
var thread_arena: std.heap.ThreadSafeAllocator = undefined;
|
|
||||||
var thread_pool: Pool = undefined;
|
|
||||||
|
|
||||||
pub const ThreadSyncContext = struct {
|
|
||||||
processed_struct: std.atomic.Value(u64) = std.atomic.Value(u64).init(0),
|
|
||||||
error_file: std.atomic.Value(u64) = std.atomic.Value(u64).init(0),
|
|
||||||
completed_file: std.atomic.Value(u64) = std.atomic.Value(u64).init(0),
|
|
||||||
max_struct: u64,
|
|
||||||
max_file: u64,
|
|
||||||
|
|
||||||
pub fn init(max_struct: u64, max_file: u64) ThreadSyncContext {
|
|
||||||
return ThreadSyncContext{
|
|
||||||
.max_struct = max_struct,
|
|
||||||
.max_file = max_file,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn isComplete(self: *ThreadSyncContext) bool {
|
|
||||||
return (self.completed_file.load(.acquire) + self.error_file.load(.acquire)) >= self.max_file;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn completeThread(self: *ThreadSyncContext) void {
|
|
||||||
_ = self.completed_file.fetchAdd(1, .release);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn incrementAndCheckStructLimit(self: *ThreadSyncContext) bool {
|
|
||||||
if (self.max_struct == 0) return false;
|
|
||||||
const new_count = self.processed_struct.fetchAdd(1, .monotonic);
|
|
||||||
return (new_count + 1) >= self.max_struct;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn checkStructLimit(self: *ThreadSyncContext) bool {
|
|
||||||
if (self.max_struct == 0) return false;
|
|
||||||
const count = self.processed_struct.load(.monotonic);
|
|
||||||
return (count) >= self.max_struct;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn logError(self: *ThreadSyncContext, message: []const u8, err: anyerror) void {
|
|
||||||
log.err("{s}: {any}", .{ message, err });
|
|
||||||
_ = self.error_file.fetchAdd(1, .acquire);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const ThreadEngine = @This();
|
|
||||||
|
|
||||||
thread_arena: *std.heap.ThreadSafeAllocator,
|
|
||||||
thread_pool: *Pool,
|
|
||||||
|
|
||||||
pub fn init() ThreadEngine {
|
|
||||||
thread_arena = std.heap.ThreadSafeAllocator{
|
|
||||||
.child_allocator = allocator,
|
|
||||||
};
|
|
||||||
|
|
||||||
thread_pool.init(std.Thread.Pool.Options{
|
|
||||||
.allocator = thread_arena.allocator(),
|
|
||||||
.n_jobs = CPU_CORE,
|
|
||||||
}) catch @panic("=(");
|
|
||||||
|
|
||||||
return ThreadEngine{
|
|
||||||
.thread_pool = &thread_pool,
|
|
||||||
.thread_arena = &thread_arena,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn deinit(_: ThreadEngine) void {
|
|
||||||
thread_pool.deinit();
|
|
||||||
}
|
|
@ -66,6 +66,7 @@ const State = enum {
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub const Parser = @This();
|
pub const Parser = @This();
|
||||||
|
|
||||||
toker: *Tokenizer,
|
toker: *Tokenizer,
|
||||||
file_engine: *FileEngine,
|
file_engine: *FileEngine,
|
||||||
schema_engine: *SchemaEngine,
|
schema_engine: *SchemaEngine,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user