Merge remote-tracking branch 'origin/master' into llvm6

This commit is contained in:
Andrew Kelley 2018-01-17 13:11:21 -05:00
commit 48cd808185
18 changed files with 1374 additions and 1515 deletions

View File

@ -368,6 +368,7 @@ set(ZIG_STD_FILES
"crypto/md5.zig"
"crypto/sha1.zig"
"crypto/sha2.zig"
"crypto/sha3.zig"
"crypto/blake2.zig"
"cstr.zig"
"debug/failing_allocator.zig"

View File

@ -15,23 +15,17 @@ pub fn build(b: &Builder) -> %void {
var docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
const rel_zig_exe = try os.path.relative(b.allocator, b.build_root, b.zig_exe);
var docgen_cmd = b.addCommand(null, b.env_map, [][]const u8 {
docgen_exe.getOutputPath(),
rel_zig_exe,
"doc/langref.html.in",
os.path.join(b.allocator, b.cache_root, "langref.html") catch unreachable,
});
docgen_cmd.step.dependOn(&docgen_exe.step);
var docgen_home_cmd = b.addCommand(null, b.env_map, [][]const u8 {
docgen_exe.getOutputPath(),
"doc/home.html.in",
os.path.join(b.allocator, b.cache_root, "home.html") catch unreachable,
});
docgen_home_cmd.step.dependOn(&docgen_exe.step);
const docs_step = b.step("docs", "Build documentation");
docs_step.dependOn(&docgen_cmd.step);
docs_step.dependOn(&docgen_home_cmd.step);
const test_step = b.step("test", "Run all the tests");

View File

@ -1,10 +1,16 @@
const std = @import("std");
const io = std.io;
const os = std.os;
const warn = std.debug.warn;
const mem = std.mem;
const max_doc_file_size = 10 * 1024 * 1024;
const exe_ext = std.build.Target(std.build.Target.Native).exeFileExt();
pub fn main() -> %void {
// TODO use a more general purpose allocator here
var inc_allocator = try std.heap.IncrementingAllocator.init(5 * 1024 * 1024);
var inc_allocator = try std.heap.IncrementingAllocator.init(max_doc_file_size);
defer inc_allocator.deinit();
const allocator = &inc_allocator.allocator;
@ -12,6 +18,9 @@ pub fn main() -> %void {
if (!args_it.skip()) @panic("expected self arg");
const zig_exe = try (args_it.next(allocator) ?? @panic("expected zig exe arg"));
defer allocator.free(zig_exe);
const in_file_name = try (args_it.next(allocator) ?? @panic("expected input arg"));
defer allocator.free(in_file_name);
@ -25,39 +34,533 @@ pub fn main() -> %void {
defer out_file.close();
var file_in_stream = io.FileInStream.init(&in_file);
var buffered_in_stream = io.BufferedInStream.init(&file_in_stream.stream);
const input_file_bytes = try file_in_stream.stream.readAllAlloc(allocator, max_doc_file_size);
var file_out_stream = io.FileOutStream.init(&out_file);
var buffered_out_stream = io.BufferedOutStream.init(&file_out_stream.stream);
gen(&buffered_in_stream.stream, &buffered_out_stream.stream);
try buffered_out_stream.flush();
var tokenizer = Tokenizer.init(in_file_name, input_file_bytes);
var toc = try genToc(allocator, &tokenizer);
try genHtml(allocator, &tokenizer, &toc, &buffered_out_stream.stream, zig_exe);
try buffered_out_stream.flush();
}
const State = enum {
Start,
Derp,
const Token = struct {
id: Id,
start: usize,
end: usize,
const Id = enum {
Invalid,
Content,
BracketOpen,
TagContent,
Separator,
BracketClose,
Eof,
};
};
// TODO look for code segments
const Tokenizer = struct {
buffer: []const u8,
index: usize,
state: State,
source_file_name: []const u8,
fn gen(in: &io.InStream, out: &io.OutStream) {
var state = State.Start;
while (true) {
const byte = in.readByte() catch |err| {
if (err == error.EndOfStream) {
return;
}
std.debug.panic("{}", err);
const State = enum {
Start,
LBracket,
Hash,
TagName,
Eof,
};
fn init(source_file_name: []const u8, buffer: []const u8) -> Tokenizer {
return Tokenizer {
.buffer = buffer,
.index = 0,
.state = State.Start,
.source_file_name = source_file_name,
};
switch (state) {
State.Start => switch (byte) {
else => {
out.writeByte(byte) catch unreachable;
}
fn next(self: &Tokenizer) -> Token {
var result = Token {
.id = Token.Id.Eof,
.start = self.index,
.end = undefined,
};
while (self.index < self.buffer.len) : (self.index += 1) {
const c = self.buffer[self.index];
switch (self.state) {
State.Start => switch (c) {
'{' => {
self.state = State.LBracket;
},
else => {
result.id = Token.Id.Content;
},
},
},
State.Derp => unreachable,
State.LBracket => switch (c) {
'#' => {
if (result.id != Token.Id.Eof) {
self.index -= 1;
self.state = State.Start;
break;
} else {
result.id = Token.Id.BracketOpen;
self.index += 1;
self.state = State.TagName;
break;
}
},
else => {
result.id = Token.Id.Content;
self.state = State.Start;
},
},
State.TagName => switch (c) {
'|' => {
if (result.id != Token.Id.Eof) {
break;
} else {
result.id = Token.Id.Separator;
self.index += 1;
break;
}
},
'#' => {
self.state = State.Hash;
},
else => {
result.id = Token.Id.TagContent;
},
},
State.Hash => switch (c) {
'}' => {
if (result.id != Token.Id.Eof) {
self.index -= 1;
self.state = State.TagName;
break;
} else {
result.id = Token.Id.BracketClose;
self.index += 1;
self.state = State.Start;
break;
}
},
else => {
result.id = Token.Id.TagContent;
self.state = State.TagName;
},
},
State.Eof => unreachable,
}
} else {
switch (self.state) {
State.Start, State.LBracket, State.Eof => {},
else => {
result.id = Token.Id.Invalid;
},
}
self.state = State.Eof;
}
result.end = self.index;
return result;
}
const Location = struct {
line: usize,
column: usize,
line_start: usize,
line_end: usize,
};
fn getTokenLocation(self: &Tokenizer, token: &const Token) -> Location {
var loc = Location {
.line = 0,
.column = 0,
.line_start = 0,
.line_end = 0,
};
for (self.buffer) |c, i| {
if (i == token.start) {
loc.line_end = i;
while (loc.line_end < self.buffer.len and self.buffer[loc.line_end] != '\n') : (loc.line_end += 1) {}
return loc;
}
if (c == '\n') {
loc.line += 1;
loc.column = 0;
loc.line_start = i + 1;
} else {
loc.column += 1;
}
}
return loc;
}
};
error ParseError;
fn parseError(tokenizer: &Tokenizer, token: &const Token, comptime fmt: []const u8, args: ...) -> error {
const loc = tokenizer.getTokenLocation(token);
warn("{}:{}:{}: error: " ++ fmt ++ "\n", tokenizer.source_file_name, loc.line + 1, loc.column + 1, args);
if (loc.line_start <= loc.line_end) {
warn("{}\n", tokenizer.buffer[loc.line_start..loc.line_end]);
{
var i: usize = 0;
while (i < loc.column) : (i += 1) {
warn(" ");
}
}
{
const caret_count = token.end - token.start;
var i: usize = 0;
while (i < caret_count) : (i += 1) {
warn("~");
}
}
warn("\n");
}
return error.ParseError;
}
fn assertToken(tokenizer: &Tokenizer, token: &const Token, id: Token.Id) -> %void {
if (token.id != id) {
return parseError(tokenizer, token, "expected {}, found {}", @tagName(id), @tagName(token.id));
}
}
fn eatToken(tokenizer: &Tokenizer, id: Token.Id) -> %Token {
const token = tokenizer.next();
try assertToken(tokenizer, token, id);
return token;
}
const HeaderOpen = struct {
name: []const u8,
url: []const u8,
n: usize,
};
const SeeAlsoItem = struct {
name: []const u8,
token: Token,
};
const Code = struct {
id: Id,
name: []const u8,
source_token: Token,
const Id = enum {
Test,
Exe,
Error,
};
};
const Node = union(enum) {
Content: []const u8,
Nav,
HeaderOpen: HeaderOpen,
SeeAlso: []const SeeAlsoItem,
Code: Code,
};
const Toc = struct {
nodes: []Node,
toc: []u8,
urls: std.HashMap([]const u8, Token, mem.hash_slice_u8, mem.eql_slice_u8),
};
const Action = enum {
Open,
Close,
};
fn genToc(allocator: &mem.Allocator, tokenizer: &Tokenizer) -> %Toc {
var urls = std.HashMap([]const u8, Token, mem.hash_slice_u8, mem.eql_slice_u8).init(allocator);
%defer urls.deinit();
var header_stack_size: usize = 0;
var last_action = Action.Open;
var toc_buf = try std.Buffer.initSize(allocator, 0);
defer toc_buf.deinit();
var toc_buf_adapter = io.BufferOutStream.init(&toc_buf);
var toc = &toc_buf_adapter.stream;
var nodes = std.ArrayList(Node).init(allocator);
defer nodes.deinit();
try toc.writeByte('\n');
while (true) {
const token = tokenizer.next();
switch (token.id) {
Token.Id.Eof => {
if (header_stack_size != 0) {
return parseError(tokenizer, token, "unbalanced headers");
}
try toc.write(" </ul>\n");
break;
},
Token.Id.Content => {
try nodes.append(Node {.Content = tokenizer.buffer[token.start..token.end] });
},
Token.Id.BracketOpen => {
const tag_token = try eatToken(tokenizer, Token.Id.TagContent);
const tag_name = tokenizer.buffer[tag_token.start..tag_token.end];
if (mem.eql(u8, tag_name, "nav")) {
_ = try eatToken(tokenizer, Token.Id.BracketClose);
try nodes.append(Node.Nav);
} else if (mem.eql(u8, tag_name, "header_open")) {
_ = try eatToken(tokenizer, Token.Id.Separator);
const content_token = try eatToken(tokenizer, Token.Id.TagContent);
const content = tokenizer.buffer[content_token.start..content_token.end];
_ = try eatToken(tokenizer, Token.Id.BracketClose);
header_stack_size += 1;
const urlized = try urlize(allocator, content);
try nodes.append(Node{.HeaderOpen = HeaderOpen {
.name = content,
.url = urlized,
.n = header_stack_size,
}});
if (try urls.put(urlized, tag_token)) |other_tag_token| {
parseError(tokenizer, tag_token, "duplicate header url: #{}", urlized) catch {};
parseError(tokenizer, other_tag_token, "other tag here") catch {};
return error.ParseError;
}
if (last_action == Action.Open) {
try toc.writeByte('\n');
try toc.writeByteNTimes(' ', header_stack_size * 4);
try toc.write("<ul>\n");
} else {
last_action = Action.Open;
}
try toc.writeByteNTimes(' ', 4 + header_stack_size * 4);
try toc.print("<li><a href=\"#{}\">{}</a>", urlized, content);
} else if (mem.eql(u8, tag_name, "header_close")) {
if (header_stack_size == 0) {
return parseError(tokenizer, tag_token, "unbalanced close header");
}
header_stack_size -= 1;
_ = try eatToken(tokenizer, Token.Id.BracketClose);
if (last_action == Action.Close) {
try toc.writeByteNTimes(' ', 8 + header_stack_size * 4);
try toc.write("</ul></li>\n");
} else {
try toc.write("</li>\n");
last_action = Action.Close;
}
} else if (mem.eql(u8, tag_name, "see_also")) {
var list = std.ArrayList(SeeAlsoItem).init(allocator);
%defer list.deinit();
while (true) {
const see_also_tok = tokenizer.next();
switch (see_also_tok.id) {
Token.Id.TagContent => {
const content = tokenizer.buffer[see_also_tok.start..see_also_tok.end];
try list.append(SeeAlsoItem {
.name = content,
.token = see_also_tok,
});
},
Token.Id.Separator => {},
Token.Id.BracketClose => {
try nodes.append(Node {.SeeAlso = list.toOwnedSlice() } );
break;
},
else => return parseError(tokenizer, see_also_tok, "invalid see_also token"),
}
}
} else if (mem.eql(u8, tag_name, "code_begin")) {
_ = try eatToken(tokenizer, Token.Id.Separator);
const code_kind_tok = try eatToken(tokenizer, Token.Id.TagContent);
var name: []const u8 = "test";
const maybe_sep = tokenizer.next();
switch (maybe_sep.id) {
Token.Id.Separator => {
const name_tok = try eatToken(tokenizer, Token.Id.TagContent);
name = tokenizer.buffer[name_tok.start..name_tok.end];
_ = try eatToken(tokenizer, Token.Id.BracketClose);
},
Token.Id.BracketClose => {},
else => return parseError(tokenizer, token, "invalid token"),
}
const code_kind_str = tokenizer.buffer[code_kind_tok.start..code_kind_tok.end];
var code_kind_id: Code.Id = undefined;
if (mem.eql(u8, code_kind_str, "exe")) {
code_kind_id = Code.Id.Exe;
} else if (mem.eql(u8, code_kind_str, "test")) {
code_kind_id = Code.Id.Test;
} else if (mem.eql(u8, code_kind_str, "error")) {
code_kind_id = Code.Id.Error;
} else {
return parseError(tokenizer, code_kind_tok, "unrecognized code kind: {}", code_kind_str);
}
const source_token = try eatToken(tokenizer, Token.Id.Content);
_ = try eatToken(tokenizer, Token.Id.BracketOpen);
const end_code_tag = try eatToken(tokenizer, Token.Id.TagContent);
const end_tag_name = tokenizer.buffer[end_code_tag.start..end_code_tag.end];
if (!mem.eql(u8, end_tag_name, "code_end")) {
return parseError(tokenizer, end_code_tag, "expected code_end token");
}
_ = try eatToken(tokenizer, Token.Id.BracketClose);
try nodes.append(Node {.Code = Code{
.id = code_kind_id,
.name = name,
.source_token = source_token,
}});
} else {
return parseError(tokenizer, tag_token, "unrecognized tag name: {}", tag_name);
}
},
else => return parseError(tokenizer, token, "invalid token"),
}
}
return Toc {
.nodes = nodes.toOwnedSlice(),
.toc = toc_buf.toOwnedSlice(),
.urls = urls,
};
}
fn urlize(allocator: &mem.Allocator, input: []const u8) -> %[]u8 {
var buf = try std.Buffer.initSize(allocator, 0);
defer buf.deinit();
var buf_adapter = io.BufferOutStream.init(&buf);
var out = &buf_adapter.stream;
for (input) |c| {
switch (c) {
'a'...'z', 'A'...'Z', '_', '-' => {
try out.writeByte(c);
},
' ' => {
try out.writeByte('-');
},
else => {},
}
}
return buf.toOwnedSlice();
}
fn escapeHtml(allocator: &mem.Allocator, input: []const u8) -> %[]u8 {
var buf = try std.Buffer.initSize(allocator, 0);
defer buf.deinit();
var buf_adapter = io.BufferOutStream.init(&buf);
var out = &buf_adapter.stream;
for (input) |c| {
try switch (c) {
'&' => out.write("&amp;"),
'<' => out.write("&lt;"),
'>' => out.write("&gt;"),
'"' => out.write("&quot;"),
else => out.writeByte(c),
};
}
return buf.toOwnedSlice();
}
error ExampleFailedToCompile;
fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: &io.OutStream, zig_exe: []const u8) -> %void {
for (toc.nodes) |node| {
switch (node) {
Node.Content => |data| {
try out.write(data);
},
Node.Nav => {
try out.write(toc.toc);
},
Node.HeaderOpen => |info| {
try out.print("<h{} id=\"{}\">{}</h{}>\n", info.n, info.url, info.name, info.n);
},
Node.SeeAlso => |items| {
try out.write("<p>See also:</p><ul>\n");
for (items) |item| {
const url = try urlize(allocator, item.name);
if (!toc.urls.contains(url)) {
return parseError(tokenizer, item.token, "url not found: {}", url);
}
try out.print("<li><a href=\"#{}\">{}</a></li>\n", url, item.name);
}
try out.write("</ul>\n");
},
Node.Code => |code| {
const raw_source = tokenizer.buffer[code.source_token.start..code.source_token.end];
const trimmed_raw_source = mem.trim(u8, raw_source, " \n");
const escaped_source = try escapeHtml(allocator, trimmed_raw_source);
try out.print("<pre><code class=\"zig\">{}</code></pre>", escaped_source);
const tmp_dir_name = "docgen_tmp";
try os.makePath(allocator, tmp_dir_name);
const name_plus_ext = try std.fmt.allocPrint(allocator, "{}.zig", code.name);
const name_plus_bin_ext = try std.fmt.allocPrint(allocator, "{}{}", code.name, exe_ext);
const tmp_source_file_name = try os.path.join(allocator, tmp_dir_name, name_plus_ext);
const tmp_bin_file_name = try os.path.join(allocator, tmp_dir_name, name_plus_bin_ext);
try io.writeFile(tmp_source_file_name, trimmed_raw_source, null);
switch (code.id) {
Code.Id.Exe => {
{
const args = [][]const u8 {zig_exe, "build-exe", tmp_source_file_name, "--output", tmp_bin_file_name};
const result = try os.ChildProcess.exec(allocator, args, null, null, max_doc_file_size);
switch (result.term) {
os.ChildProcess.Term.Exited => |exit_code| {
if (exit_code != 0) {
warn("{}\nThe following command exited with code {}:\n", result.stderr, exit_code);
for (args) |arg| warn("{} ", arg) else warn("\n");
return parseError(tokenizer, code.source_token, "example failed to compile");
}
},
else => {
warn("{}\nThe following command crashed:\n", result.stderr);
for (args) |arg| warn("{} ", arg) else warn("\n");
return parseError(tokenizer, code.source_token, "example failed to compile");
},
}
}
const args = [][]const u8 {tmp_bin_file_name};
const result = try os.ChildProcess.exec(allocator, args, null, null, max_doc_file_size);
switch (result.term) {
os.ChildProcess.Term.Exited => |exit_code| {
if (exit_code != 0) {
warn("The following command exited with code {}:\n", exit_code);
for (args) |arg| warn("{} ", arg) else warn("\n");
return parseError(tokenizer, code.source_token, "example exited with code {}", exit_code);
}
},
else => {
warn("The following command crashed:\n");
for (args) |arg| warn("{} ", arg) else warn("\n");
return parseError(tokenizer, code.source_token, "example crashed");
},
}
try out.print("<pre><code class=\"sh\">$ zig build-exe {}.zig\n$ ./{}\n{}{}</code></pre>\n", code.name, code.name, result.stderr, result.stdout);
},
Code.Id.Test => {
@panic("TODO");
},
Code.Id.Error => {
@panic("TODO");
},
}
},
}
}
}

View File

@ -1,724 +0,0 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no" />
<title>The Zig Programming Language</title>
<link rel="stylesheet" type="text/css" href="highlight/styles/default.css">
<style type="text/css">
img {
max-width: 100%;
}
</style>
</head>
<body>
<img src="zig-logo.svg">
<p>
Zig is an open-source programming language designed for <strong>robustness</strong>,
<strong>optimality</strong>, and <strong>clarity</strong>.
</p>
<p>
<a href="download/">Download</a> |
<a href="documentation/master/">Documentation</a> |
<a href="https://github.com/zig-lang/zig">Source Code</a> |
<a href="https://github.com/zig-lang/zig/issues">Bug Tracker</a> |
<a href="https://webchat.freenode.net/?channels=%23zig">IRC</a> |
<a href="https://www.patreon.com/andrewrk">Donate $1/month</a>
</p>
<h2>Feature Highlights</h2>
<ul>
<li>Manual memory management. Memory allocation failure is handled correctly. Edge cases matter!</li>
<li>Zig competes with C instead of depending on it. The Zig Standard Library does not depend on libc.</li>
<li>Small, simple language. Focus on debugging your application rather than debugging your knowledge of your programming language.</li>
<li>A fresh take on error handling that resembles what well-written C error handling looks like,
minus the boilerplate and verbosity.</li>
<li>Debug mode optimizes for fast compilation time and crashing with a stack trace when undefined behavior
<em>would</em> happen.</li>
<li>ReleaseFast mode produces heavily optimized code. What other projects call
"Link Time Optimization" Zig does automatically.</li>
<li>ReleaseSafe mode produces optimized code but keeps safety checks enabled. Disable safety checks in the bottlenecks of your code.</li>
<li>Generic data structures and functions.</li>
<li>Compile-time reflection and compile-time code execution.</li>
<li>Import .h files and directly use C types, variables, and functions.</li>
<li>Export functions, variables, and types for C code to depend on. Automatically generate .h files.</li>
<li>Nullable type instead of null pointers.</li>
<li>Order independent top level declarations.</li>
<li>Friendly toward package maintainers. Reproducible build, bootstrapping process carefully documented. Issues filed by package maintainers are considered especially important.</li>
<li>Cross-compiling is a first-class use case.</li>
<li>No preprocessor. Instead Zig has a few carefully designed features that
provide a way to accomplish things you might do with a preprocessor.</li>
</ul>
<h2 id="reading-material">Reading Material</h2>
<ul>
<li>2018-01-03 - <a href="http://andrewkelley.me/post/zig-december-2017-in-review.html">December 2017 in Review</a></li>
<li>2017-10-17 - <a href="download/0.1.1/release-notes.html">Zig 0.1.1 Release Notes</a></li>
<li>2017-07-19 - <a href="http://tiehuis.github.io/iterative-replacement-of-c-with-zig">Iterative Replacement of C with Zig</a></li>
<li>2017-02-16 - <a href="http://andrewkelley.me/post/a-better-way-to-implement-bit-fields.html">A Better Way to Implement Bit-Fields</a></li>
<li>2017-02-13 - <a href="http://andrewkelley.me/post/zig-already-more-knowable-than-c.html">Zig: Already More Knowable Than C</a></li>
<li>2017-01-30 - <a href="http://andrewkelley.me/post/zig-programming-language-blurs-line-compile-time-run-time.html">Zig Programming Language Blurs the Line Between Compile-Time and Run-Time</a></li>
<li>2016-02-08 - <a href="http://andrewkelley.me/post/intro-to-zig.html">Introduction to the Zig Programming Language</a></li>
</ul>
<h2 id="source-examples">Source Code Examples</h2>
<ul>
<li><a href="#hello">Hello World</a></li>
<li><a href="#hello_libc">Hello World with libc</a></li>
<li><a href="#parse">Parsing Unsigned Integers</a></li>
<li><a href="#hashmap">HashMap with Custom Allocator</a></li>
<li><a href="#tetris">Tetris Clone</a></li>
<li><a href="#clashos">Bare Bones Operating System</a></li>
<li><a href="#cat">Cat Utility</a></li>
<li><a href="#multiline-strings">Multiline String Syntax</a></li>
<li><a href="#mersenne">Mersenne Twister Random Number Generator</a></li>
</ul>
<h3 id="hello">Hello World</h3>
<pre><code class="zig">const std = @import("std");
pub fn main() -&gt; %void {
// If this program is run without stdout attached, exit with an error.
var stdout_file = try std.io.getStdOut();
// If this program encounters pipe failure when printing to stdout, exit
// with an error.
try stdout_file.write("Hello, world!\n");
}</code></pre>
<p>Build this with:</p>
<pre>zig build-exe hello.zig</pre>
<h3 id="hello_libc">Hello World with libc</h3>
<pre><code class="zig">const c = @cImport({
// See https://github.com/zig-lang/zig/issues/515
@cDefine("_NO_CRT_STDIO_INLINE", "1");
@cInclude("stdio.h");
@cInclude("string.h");
});
const msg = c"Hello, world!\n";
export fn main(argc: c_int, argv: &amp;&amp;u8) -&gt; c_int {
if (c.printf(msg) != c_int(c.strlen(msg)))
return -1;
return 0;
}</code></pre>
<p>Build this with:</p>
<pre>zig build-exe hello.zig --library c</pre>
<h3 id="parse">Parsing Unsigned Integers</h3>
<pre><code class="zig">pub fn parseUnsigned(comptime T: type, buf: []u8, radix: u8) -&gt; %T {
var x: T = 0;
for (buf) |c| {
const digit = try charToDigit(c, radix);
x = try mulOverflow(T, x, radix);
x = try addOverflow(T, x, digit);
}
return x;
}
error InvalidChar;
fn charToDigit(c: u8, radix: u8) -&gt; %u8 {
const value = switch (c) {
'0' ... '9' =&gt; c - '0',
'A' ... 'Z' =&gt; c - 'A' + 10,
'a' ... 'z' =&gt; c - 'a' + 10,
else =&gt; return error.InvalidChar,
};
if (value &gt;= radix)
return error.InvalidChar;
return value;
}
error Overflow;
pub fn mulOverflow(comptime T: type, a: T, b: T) -&gt; %T {
var answer: T = undefined;
if (@mulWithOverflow(T, a, b, &amp;answer)) error.Overflow else answer
}
pub fn addOverflow(comptime T: type, a: T, b: T) -&gt; %T {
var answer: T = undefined;
if (@addWithOverflow(T, a, b, &amp;answer)) error.Overflow else answer
}
fn getNumberWithDefault(s: []u8) -&gt; u32 {
parseUnsigned(u32, s, 10) catch 42
}
fn getNumberOrCrash(s: []u8) -&gt; u32 {
%%parseUnsigned(u32, s, 10)
}
fn addTwoTogetherOrReturnErr(a_str: []u8, b_str: []u8) -&gt; %u32 {
const a = parseUnsigned(u32, a_str, 10) catch |err| return err;
const b = parseUnsigned(u32, b_str, 10) catch |err| return err;
return a + b;
}</code></pre>
<h3 id="hashmap">HashMap with Custom Allocator</h3>
<pre><code class="zig">const debug = @import(&quot;debug.zig&quot;);
const assert = debug.assert;
const math = @import(&quot;math.zig&quot;);
const mem = @import(&quot;mem.zig&quot;);
const Allocator = mem.Allocator;
const want_modification_safety = !@compileVar(&quot;is_release&quot;);
const debug_u32 = if (want_modification_safety) u32 else void;
pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn(key: K)-&gt;u32,
comptime eql: fn(a: K, b: K)-&gt;bool) -&gt; type
{
struct {
entries: []Entry,
size: usize,
max_distance_from_start_index: usize,
allocator: &amp;Allocator,
// this is used to detect bugs where a hashtable is edited while an iterator is running.
modification_count: debug_u32,
const Self = this;
pub const Entry = struct {
used: bool,
distance_from_start_index: usize,
key: K,
value: V,
};
pub const Iterator = struct {
hm: &amp;Self,
// how many items have we returned
count: usize,
// iterator through the entry array
index: usize,
// used to detect concurrent modification
initial_modification_count: debug_u32,
pub fn next(it: &amp;Iterator) -&gt; ?&amp;Entry {
if (want_modification_safety) {
assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
}
if (it.count &gt;= it.hm.size) return null;
while (it.index &lt; it.hm.entries.len) : (it.index += 1) {
const entry = &amp;it.hm.entries[it.index];
if (entry.used) {
it.index += 1;
it.count += 1;
return entry;
}
}
unreachable // no next item
}
};
pub fn init(hm: &amp;Self, allocator: &amp;Allocator) {
hm.entries = []Entry{};
hm.allocator = allocator;
hm.size = 0;
hm.max_distance_from_start_index = 0;
// it doesn't actually matter what we set this to since we use wrapping integer arithmetic
hm.modification_count = undefined;
}
pub fn deinit(hm: &amp;Self) {
hm.allocator.free(Entry, hm.entries);
}
pub fn clear(hm: &amp;Self) {
for (hm.entries) |*entry| {
entry.used = false;
}
hm.size = 0;
hm.max_distance_from_start_index = 0;
hm.incrementModificationCount();
}
pub fn put(hm: &amp;Self, key: K, value: V) -&gt; %void {
if (hm.entries.len == 0) {
try hm.initCapacity(16);
}
hm.incrementModificationCount();
// if we get too full (60%), double the capacity
if (hm.size * 5 &gt;= hm.entries.len * 3) {
const old_entries = hm.entries;
try hm.initCapacity(hm.entries.len * 2);
// dump all of the old elements into the new table
for (old_entries) |*old_entry| {
if (old_entry.used) {
hm.internalPut(old_entry.key, old_entry.value);
}
}
hm.allocator.free(Entry, old_entries);
}
hm.internalPut(key, value);
}
pub fn get(hm: &amp;Self, key: K) -&gt; ?&amp;Entry {
return hm.internalGet(key);
}
pub fn remove(hm: &amp;Self, key: K) {
hm.incrementModificationCount();
const start_index = hm.keyToIndex(key);
{var roll_over: usize = 0; while (roll_over &lt;= hm.max_distance_from_start_index) : (roll_over += 1) {
const index = (start_index + roll_over) % hm.entries.len;
var entry = &amp;hm.entries[index];
assert(entry.used); // key not found
if (!eql(entry.key, key)) continue;
while (roll_over &lt; hm.entries.len) : (roll_over += 1) {
const next_index = (start_index + roll_over + 1) % hm.entries.len;
const next_entry = &amp;hm.entries[next_index];
if (!next_entry.used or next_entry.distance_from_start_index == 0) {
entry.used = false;
hm.size -= 1;
return;
}
*entry = *next_entry;
entry.distance_from_start_index -= 1;
entry = next_entry;
}
unreachable // shifting everything in the table
}}
unreachable // key not found
}
pub fn entryIterator(hm: &amp;Self) -&gt; Iterator {
return Iterator {
.hm = hm,
.count = 0,
.index = 0,
.initial_modification_count = hm.modification_count,
};
}
fn initCapacity(hm: &amp;Self, capacity: usize) -&gt; %void {
hm.entries = try hm.allocator.alloc(Entry, capacity);
hm.size = 0;
hm.max_distance_from_start_index = 0;
for (hm.entries) |*entry| {
entry.used = false;
}
}
fn incrementModificationCount(hm: &amp;Self) {
if (want_modification_safety) {
hm.modification_count +%= 1;
}
}
fn internalPut(hm: &amp;Self, orig_key: K, orig_value: V) {
var key = orig_key;
var value = orig_value;
const start_index = hm.keyToIndex(key);
var roll_over: usize = 0;
var distance_from_start_index: usize = 0;
while (roll_over &lt; hm.entries.len) : ({roll_over += 1; distance_from_start_index += 1}) {
const index = (start_index + roll_over) % hm.entries.len;
const entry = &amp;hm.entries[index];
if (entry.used and !eql(entry.key, key)) {
if (entry.distance_from_start_index &lt; distance_from_start_index) {
// robin hood to the rescue
const tmp = *entry;
hm.max_distance_from_start_index = math.max(hm.max_distance_from_start_index,
distance_from_start_index);
*entry = Entry {
.used = true,
.distance_from_start_index = distance_from_start_index,
.key = key,
.value = value,
};
key = tmp.key;
value = tmp.value;
distance_from_start_index = tmp.distance_from_start_index;
}
continue;
}
if (!entry.used) {
// adding an entry. otherwise overwriting old value with
// same key
hm.size += 1;
}
hm.max_distance_from_start_index = math.max(distance_from_start_index, hm.max_distance_from_start_index);
*entry = Entry {
.used = true,
.distance_from_start_index = distance_from_start_index,
.key = key,
.value = value,
};
return;
}
unreachable // put into a full map
}
fn internalGet(hm: &amp;Self, key: K) -&gt; ?&amp;Entry {
const start_index = hm.keyToIndex(key);
{var roll_over: usize = 0; while (roll_over &lt;= hm.max_distance_from_start_index) : (roll_over += 1) {
const index = (start_index + roll_over) % hm.entries.len;
const entry = &amp;hm.entries[index];
if (!entry.used) return null;
if (eql(entry.key, key)) return entry;
}}
return null;
}
fn keyToIndex(hm: &amp;Self, key: K) -&gt; usize {
return usize(hash(key)) % hm.entries.len;
}
}
}
test "basic hash map test" {
var map: HashMap(i32, i32, hash_i32, eql_i32) = undefined;
map.init(&amp;debug.global_allocator);
defer map.deinit();
%%map.put(1, 11);
%%map.put(2, 22);
%%map.put(3, 33);
%%map.put(4, 44);
%%map.put(5, 55);
assert((??map.get(2)).value == 22);
map.remove(2);
assert(if (const entry ?= map.get(2)) false else true);
}
fn hash_i32(x: i32) -&gt; u32 {
*(&amp;u32)(&amp;x)
}
fn eql_i32(a: i32, b: i32) -&gt; bool {
a == b
}</code></pre>
<h3 id="tetris">Tetris Clone</h3>
<img src="tetris-screenshot.png">
<p>
<a href="https://github.com/andrewrk/tetris">Source Code on GitHub</a>
</p>
<h3 id="clashos">Bare Bones Operating System</h3>
<p>
<a href="https://github.com/andrewrk/clashos">Source Code on GitHub</a>
</p>
<h3 id="cat">Cat Utility</h3>
<pre><code class="zig">const std = @import("std");
const io = std.io;
const mem = std.mem;
const os = std.os;
pub fn main() -&gt; %void {
const exe = os.args.at(0);
var catted_anything = false;
var arg_i: usize = 1;
while (arg_i &lt; os.args.count()) : (arg_i += 1) {
const arg = os.args.at(arg_i);
if (mem.eql(u8, arg, "-")) {
catted_anything = true;
try cat_stream(&amp;io.stdin);
} else if (arg[0] == '-') {
return usage(exe);
} else {
var is = io.InStream.open(arg, null) catch |err| {
%%io.stderr.printf("Unable to open file: {}\n", @errorName(err));
return err;
};
defer is.close();
catted_anything = true;
try cat_stream(&amp;is);
}
}
if (!catted_anything) {
try cat_stream(&amp;io.stdin);
}
try io.stdout.flush();
}
fn usage(exe: []const u8) -&gt; %void {
%%io.stderr.printf("Usage: {} [FILE]...\n", exe);
return error.Invalid;
}
fn cat_stream(is: &amp;io.InStream) -&gt; %void {
var buf: [1024 * 4]u8 = undefined;
while (true) {
const bytes_read = is.read(buf[0..]) catch |err| {
%%io.stderr.printf("Unable to read from stream: {}\n", @errorName(err));
return err;
};
if (bytes_read == 0) {
break;
}
io.stdout.write(buf[0..bytes_read]) catch |err| {
%%io.stderr.printf("Unable to write to stdout: {}\n", @errorName(err));
return err;
};
}
}</code></pre>
<h3 id="multiline-strings">Multiline String Syntax</h3>
<pre><code class="zig">pub fn createAllShaders() -&gt; AllShaders {
var as : AllShaders = undefined;
as.primitive = createShader(
\\#version 150 core
\\
\\in vec3 VertexPosition;
\\
\\uniform mat4 MVP;
\\
\\void main(void) {
\\ gl_Position = vec4(VertexPosition, 1.0) * MVP;
\\}
,
\\#version 150 core
\\
\\out vec4 FragColor;
\\
\\uniform vec4 Color;
\\
\\void main(void) {
\\ FragColor = Color;
\\}
, null);
as.primitive_attrib_position = as.primitive.attrib_location(c&quot;VertexPosition&quot;);
as.primitive_uniform_mvp = as.primitive.uniform_location(c&quot;MVP&quot;);
as.primitive_uniform_color = as.primitive.uniform_location(c&quot;Color&quot;);
as.texture = createShader(
\\#version 150 core
\\
\\in vec3 VertexPosition;
\\in vec2 TexCoord;
\\
\\out vec2 FragTexCoord;
\\
\\uniform mat4 MVP;
\\
\\void main(void)
\\{
\\ FragTexCoord = TexCoord;
\\ gl_Position = vec4(VertexPosition, 1.0) * MVP;
\\}
,
\\#version 150 core
\\
\\in vec2 FragTexCoord;
\\out vec4 FragColor;
\\
\\uniform sampler2D Tex;
\\
\\void main(void)
\\{
\\ FragColor = texture(Tex, FragTexCoord);
\\}
, null);
as.texture_attrib_tex_coord = as.texture.attrib_location(c&quot;TexCoord&quot;);
as.texture_attrib_position = as.texture.attrib_location(c&quot;VertexPosition&quot;);
as.texture_uniform_mvp = as.texture.uniform_location(c&quot;MVP&quot;);
as.texture_uniform_tex = as.texture.uniform_location(c&quot;Tex&quot;);
debug_gl.assert_no_error();
return as;
}</code></pre>
<h3 id="mersenne">Mersenne Twister Random Number Generator</h3>
<pre><code class="zig">const assert = @import(&quot;debug.zig&quot;).assert;
const rand_test = @import(&quot;rand_test.zig&quot;);
pub const MT19937_32 = MersenneTwister(
u32, 624, 397, 31,
0x9908B0DF,
11, 0xFFFFFFFF,
7, 0x9D2C5680,
15, 0xEFC60000,
18, 1812433253);
pub const MT19937_64 = MersenneTwister(
u64, 312, 156, 31,
0xB5026F5AA96619E9,
29, 0x5555555555555555,
17, 0x71D67FFFEDA60000,
37, 0xFFF7EEE000000000,
43, 6364136223846793005);
/// Use `init` to initialize this state.
pub const Rand = struct {
const Rng = if (@sizeOf(usize) &gt;= 8) MT19937_64 else MT19937_32;
rng: Rng,
/// Initialize random state with the given seed.
pub fn init(r: &amp;Rand, seed: usize) {
r.rng.init(seed);
}
/// Get an integer with random bits.
pub fn scalar(r: &amp;Rand, comptime T: type) -&gt; T {
if (T == usize) {
return r.rng.get();
} else {
var result: [@sizeOf(T)]u8 = undefined;
r.fillBytes(result);
return ([]T)(result)[0];
}
}
/// Fill `buf` with randomness.
pub fn fillBytes(r: &amp;Rand, buf: []u8) {
var bytes_left = buf.len;
while (bytes_left &gt;= @sizeOf(usize)) {
([]usize)(buf[buf.len - bytes_left...])[0] = r.rng.get();
bytes_left -= @sizeOf(usize);
}
if (bytes_left &gt; 0) {
var rand_val_array : [@sizeOf(usize)]u8 = undefined;
([]usize)(rand_val_array)[0] = r.rng.get();
while (bytes_left &gt; 0) {
buf[buf.len - bytes_left] = rand_val_array[@sizeOf(usize) - bytes_left];
bytes_left -= 1;
}
}
}
/// Get a random unsigned integer with even distribution between `start`
/// inclusive and `end` exclusive.
// TODO support signed integers and then rename to &quot;range&quot;
pub fn rangeUnsigned(r: &amp;Rand, comptime T: type, start: T, end: T) -&gt; T {
const range = end - start;
const leftover = @maxValue(T) % range;
const upper_bound = @maxValue(T) - leftover;
var rand_val_array : [@sizeOf(T)]u8 = undefined;
while (true) {
r.fillBytes(rand_val_array);
const rand_val = ([]T)(rand_val_array)[0];
if (rand_val &lt; upper_bound) {
return start + (rand_val % range);
}
}
}
/// Get a floating point value in the range 0.0..1.0.
pub fn float(r: &amp;Rand, comptime T: type) -&gt; T {
// TODO Implement this way instead:
// const int = @int_type(false, @sizeOf(T) * 8);
// const mask = ((1 &lt;&lt; @float_mantissa_bit_count(T)) - 1);
// const rand_bits = r.rng.scalar(int) &amp; mask;
// return @float_compose(T, false, 0, rand_bits) - 1.0
const int_type = @intType(false, @sizeOf(T) * 8);
const precision = if (T == f32) {
16777216
} else if (T == f64) {
9007199254740992
} else {
@compileError(&quot;unknown floating point type&quot;)
};
return T(r.rangeUnsigned(int_type, 0, precision)) / T(precision);
}
};
fn MersenneTwister(
comptime int: type, comptime n: usize, comptime m: usize, comptime r: int,
comptime a: int,
comptime u: int, comptime d: int,
comptime s: int, comptime b: int,
comptime t: int, comptime c: int,
comptime l: int, comptime f: int) -&gt; type
{
struct {
const Self = this;
array: [n]int,
index: usize,
pub fn init(mt: &amp;Self, seed: int) {
mt.index = n;
var prev_value = seed;
mt.array[0] = prev_value;
{var i: usize = 1; while (i &lt; n) : (i += 1) {
prev_value = int(i) +% f *% (prev_value ^ (prev_value &gt;&gt; (int.bit_count - 2)));
mt.array[i] = prev_value;
}};
}
pub fn get(mt: &amp;Self) -&gt; int {
const mag01 = []int{0, a};
const LM: int = (1 &lt;&lt; r) - 1;
const UM = ~LM;
if (mt.index &gt;= n) {
var i: usize = 0;
while (i &lt; n - m) : (i += 1) {
const x = (mt.array[i] &amp; UM) | (mt.array[i + 1] &amp; LM);
mt.array[i] = mt.array[i + m] ^ (x &gt;&gt; 1) ^ mag01[x &amp; 0x1];
}
while (i &lt; n - 1) : (i += 1) {
const x = (mt.array[i] &amp; UM) | (mt.array[i + 1] &amp; LM);
mt.array[i] = mt.array[i + m - n] ^ (x &gt;&gt; 1) ^ mag01[x &amp; 0x1];
}
const x = (mt.array[i] &amp; UM) | (mt.array[0] &amp; LM);
mt.array[i] = mt.array[m - 1] ^ (x &gt;&gt; 1) ^ mag01[x &amp; 0x1];
mt.index = 0;
}
var x = mt.array[mt.index];
mt.index += 1;
x ^= ((x &gt;&gt; u) &amp; d);
x ^= ((x &lt;&lt;% s) &amp; b);
x ^= ((x &lt;&lt;% t) &amp; c);
x ^= (x &gt;&gt; l);
return x;
}
}
}
test "float 32" {
var r: Rand = undefined;
r.init(42);
{var i: usize = 0; while (i &lt; 1000) : (i += 1) {
const val = r.float(f32);
assert(val &gt;= 0.0);
assert(val &lt; 1.0);
}}
}
test "MT19937_64" {
var rng: MT19937_64 = undefined;
rng.init(rand_test.mt64_seed);
for (rand_test.mt64_data) |value| {
assert(value == rng.get());
}
}
test "MT19937_32" {
var rng: MT19937_32 = undefined;
rng.init(rand_test.mt32_seed);
for (rand_test.mt32_data) |value| {
assert(value == rng.get());
}
}</code></pre>
<script src="highlight/highlight.pack.js"></script>
<script>hljs.initHighlightingOnLoad();</script>
</body>
</html>

File diff suppressed because it is too large Load Diff

View File

@ -1271,6 +1271,12 @@ void bigint_and(BigInt *dest, const BigInt *op1, const BigInt *op2) {
}
void bigint_xor(BigInt *dest, const BigInt *op1, const BigInt *op2) {
if (op1->digit_count == 0) {
return bigint_init_bigint(dest, op2);
}
if (op2->digit_count == 0) {
return bigint_init_bigint(dest, op1);
}
if (op1->is_negative || op2->is_negative) {
// TODO this code path is untested
size_t big_bit_count = max(bigint_bits_needed(op1), bigint_bits_needed(op2));
@ -1289,14 +1295,16 @@ void bigint_xor(BigInt *dest, const BigInt *op1, const BigInt *op2) {
dest->is_negative = false;
const uint64_t *op1_digits = bigint_ptr(op1);
const uint64_t *op2_digits = bigint_ptr(op2);
assert(op1->digit_count > 0 && op2->digit_count > 0);
uint64_t first_digit = op1_digits[0] ^ op2_digits[0];
if (op1->digit_count == 1 && op2->digit_count == 1) {
dest->digit_count = 1;
dest->data.digit = op1_digits[0] ^ op2_digits[0];
dest->data.digit = first_digit;
bigint_normalize(dest);
return;
}
// TODO this code path is untested
uint64_t first_digit = dest->data.digit;
dest->digit_count = max(op1->digit_count, op2->digit_count);
dest->data.digits = allocate_nonzero<uint64_t>(dest->digit_count);
dest->data.digits[0] = first_digit;

View File

@ -921,31 +921,41 @@ static LLVMValueRef get_memcpy_fn_val(CodeGen *g) {
return g->memcpy_fn_val;
}
static LLVMValueRef get_return_address_fn_val(CodeGen *g) {
if (g->return_address_fn_val)
return g->return_address_fn_val;
TypeTableEntry *return_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true);
LLVMTypeRef fn_type = LLVMFunctionType(return_type->type_ref,
&g->builtin_types.entry_i32->type_ref, 1, false);
g->return_address_fn_val = LLVMAddFunction(g->module, "llvm.returnaddress", fn_type);
assert(LLVMGetIntrinsicID(g->return_address_fn_val));
return g->return_address_fn_val;
}
static LLVMValueRef get_return_err_fn(CodeGen *g) {
if (g->return_err_fn != nullptr)
return g->return_err_fn;
assert(g->err_tag_type != nullptr);
LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
LLVMTypeRef arg_types[] = {
// error return trace pointer
get_ptr_to_stack_trace_type(g)->type_ref,
// return address
ptr_u8,
};
LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false);
LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 1, false);
Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_return_error"), false);
LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref);
addLLVMFnAttr(fn_val, "noinline"); // so that we can look at return address
addLLVMFnAttr(fn_val, "cold");
LLVMSetLinkage(fn_val, LLVMInternalLinkage);
LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
addLLVMFnAttr(fn_val, "nounwind");
add_uwtable_attr(g, fn_val);
addLLVMArgAttr(fn_val, (unsigned)0, "nonnull");
addLLVMArgAttr(fn_val, (unsigned)1, "nonnull");
if (g->build_mode == BuildModeDebug) {
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
@ -983,7 +993,9 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) {
LLVMValueRef ptr_value = gen_load_untyped(g, ptr_field_ptr, 0, false, "");
LLVMValueRef address_slot = LLVMBuildInBoundsGEP(g->builder, ptr_value, address_indices, 1, "");
LLVMValueRef return_address = LLVMBuildPtrToInt(g->builder, LLVMGetParam(fn_val, 1), usize_type_ref, "");
LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_i32->type_ref);
LLVMValueRef return_address_ptr = LLVMBuildCall(g->builder, get_return_address_fn_val(g), &zero, 1, "");
LLVMValueRef return_address = LLVMBuildPtrToInt(g->builder, return_address_ptr, usize_type_ref, "");
LLVMValueRef address_value = LLVMBuildPtrToInt(g->builder, return_address, usize_type_ref, "");
gen_store_untyped(g, address_value, address_slot, 0, false);
@ -1431,17 +1443,11 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns
is_err_return = true;
}
if (is_err_return) {
LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(g->cur_fn_val, "ReturnError");
LLVMValueRef block_address = LLVMBlockAddress(g->cur_fn_val, return_block);
LLVMValueRef return_err_fn = get_return_err_fn(g);
LLVMValueRef args[] = {
g->cur_err_ret_trace_val,
block_address,
};
LLVMBuildBr(g->builder, return_block);
LLVMPositionBuilderAtEnd(g->builder, return_block);
LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, return_err_fn, args, 2,
LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, return_err_fn, args, 1,
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
LLVMSetTailCall(call_instruction, true);
}
@ -3291,20 +3297,6 @@ static LLVMValueRef ir_render_breakpoint(CodeGen *g, IrExecutable *executable, I
return nullptr;
}
static LLVMValueRef get_return_address_fn_val(CodeGen *g) {
if (g->return_address_fn_val)
return g->return_address_fn_val;
TypeTableEntry *return_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true);
LLVMTypeRef fn_type = LLVMFunctionType(return_type->type_ref,
&g->builtin_types.entry_i32->type_ref, 1, false);
g->return_address_fn_val = LLVMAddFunction(g->module, "llvm.returnaddress", fn_type);
assert(LLVMGetIntrinsicID(g->return_address_fn_val));
return g->return_address_fn_val;
}
static LLVMValueRef ir_render_return_address(CodeGen *g, IrExecutable *executable,
IrInstructionReturnAddress *instruction)
{

View File

@ -760,7 +760,7 @@ const CrossTarget = struct {
environ: builtin.Environ,
};
const Target = union(enum) {
pub const Target = union(enum) {
Native: void,
Cross: CrossTarget,

View File

@ -21,6 +21,8 @@ pub const Blake2s256 = Blake2s(256);
fn Blake2s(comptime out_len: usize) -> type { return struct {
const Self = this;
const block_size = 64;
const digest_size = out_len / 8;
const iv = [8]u32 {
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
@ -236,6 +238,8 @@ pub const Blake2b512 = Blake2b(512);
fn Blake2b(comptime out_len: usize) -> type { return struct {
const Self = this;
const block_size = 128;
const digest_size = out_len / 8;
const iv = [8]u64 {
0x6a09e667f3bcc908, 0xbb67ae8584caa73b,

View File

@ -1,5 +1,5 @@
pub const Md5 = @import("sha1.zig").Md5;
pub const Sha1 = @import("md5.zig").Sha1;
pub const Md5 = @import("md5.zig").Md5;
pub const Sha1 = @import("sha1.zig").Sha1;
const sha2 = @import("sha2.zig");
pub const Sha224 = sha2.Sha224;
@ -7,6 +7,12 @@ pub const Sha256 = sha2.Sha256;
pub const Sha384 = sha2.Sha384;
pub const Sha512 = sha2.Sha512;
const sha3 = @import("sha3.zig");
pub const Sha3_224 = sha3.Sha3_224;
pub const Sha3_256 = sha3.Sha3_256;
pub const Sha3_384 = sha3.Sha3_384;
pub const Sha3_512 = sha3.Sha3_512;
const blake2 = @import("blake2.zig");
pub const Blake2s224 = blake2.Blake2s224;
pub const Blake2s256 = blake2.Blake2s256;
@ -17,5 +23,6 @@ test "crypto" {
_ = @import("md5.zig");
_ = @import("sha1.zig");
_ = @import("sha2.zig");
_ = @import("sha3.zig");
_ = @import("blake2.zig");
}

View File

@ -14,14 +14,10 @@ fn Rp(a: usize, b: usize, c: usize, d: usize, k: usize, s: u32, t: u32) -> Round
return RoundParam { .a = a, .b = b, .c = c, .d = d, .k = k, .s = s, .t = t };
}
/// const hash1 = Md5.hash("my input");
///
/// const hasher = Md5.init();
/// hasher.update("my ");
/// hasher.update("input");
/// const hash2 = hasher.final();
pub const Md5 = struct {
const Self = this;
const block_size = 64;
const digest_size = 16;
s: [4]u32,
// Streaming Cache

View File

@ -16,6 +16,8 @@ fn Rp(a: usize, b: usize, c: usize, d: usize, e: usize, i: u32) -> RoundParam {
pub const Sha1 = struct {
const Self = this;
const block_size = 64;
const digest_size = 20;
s: [5]u32,
// Streaming Cache

View File

@ -58,6 +58,8 @@ pub const Sha256 = Sha2_32(Sha256Params);
fn Sha2_32(comptime params: Sha2Params32) -> type { return struct {
const Self = this;
const block_size = 64;
const digest_size = params.out_len / 8;
s: [8]u32,
// Streaming Cache
@ -372,7 +374,8 @@ pub const Sha512 = Sha2_64(Sha512Params);
fn Sha2_64(comptime params: Sha2Params64) -> type { return struct {
const Self = this;
const u9 = @IntType(false, 9);
const block_size = 128;
const digest_size = params.out_len / 8;
s: [8]u64,
// Streaming Cache

281
std/crypto/sha3.zig Normal file
View File

@ -0,0 +1,281 @@
const mem = @import("../mem.zig");
const math = @import("../math/index.zig");
const endian = @import("../endian.zig");
const debug = @import("../debug/index.zig");
const builtin = @import("builtin");
const htest = @import("test.zig");
pub const Sha3_224 = Keccak(224, 0x06);
pub const Sha3_256 = Keccak(256, 0x06);
pub const Sha3_384 = Keccak(384, 0x06);
pub const Sha3_512 = Keccak(512, 0x06);
fn Keccak(comptime bits: usize, comptime delim: u8) -> type { return struct {
const Self = this;
const block_size = 200;
const digest_size = bits / 8;
s: [200]u8,
offset: usize,
rate: usize,
pub fn init() -> Self {
var d: Self = undefined;
d.reset();
return d;
}
pub fn reset(d: &Self) {
mem.set(u8, d.s[0..], 0);
d.offset = 0;
d.rate = 200 - (bits / 4);
}
pub fn hash(b: []const u8, out: []u8) {
var d = Self.init();
d.update(b);
d.final(out);
}
pub fn update(d: &Self, b: []const u8) {
var ip: usize = 0;
var len = b.len;
var rate = d.rate - d.offset;
var offset = d.offset;
// absorb
while (len >= rate) {
for (d.s[offset .. offset + rate]) |*r, i|
*r ^= b[ip..][i];
keccak_f(1600, d.s[0..]);
ip += rate;
len -= rate;
rate = d.rate;
offset = 0;
}
for (d.s[offset .. offset + len]) |*r, i|
*r ^= b[ip..][i];
d.offset = offset + len;
}
pub fn final(d: &Self, out: []u8) {
// padding
d.s[d.offset] ^= delim;
d.s[d.rate - 1] ^= 0x80;
keccak_f(1600, d.s[0..]);
// squeeze
var op: usize = 0;
var len: usize = bits / 8;
while (len >= d.rate) {
mem.copy(u8, out[op..], d.s[0..d.rate]);
keccak_f(1600, d.s[0..]);
op += d.rate;
len -= d.rate;
}
mem.copy(u8, out[op..], d.s[0..len]);
}
};}
const RC = []const u64 {
0x0000000000000001, 0x0000000000008082, 0x800000000000808a, 0x8000000080008000,
0x000000000000808b, 0x0000000080000001, 0x8000000080008081, 0x8000000000008009,
0x000000000000008a, 0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
0x000000008000808b, 0x800000000000008b, 0x8000000000008089, 0x8000000000008003,
0x8000000000008002, 0x8000000000000080, 0x000000000000800a, 0x800000008000000a,
0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008,
};
const ROTC = []const usize {
1, 3, 6, 10, 15, 21, 28, 36,
45, 55, 2, 14, 27, 41, 56, 8,
25, 43, 62, 18, 39, 61, 20, 44
};
const PIL = []const usize {
10, 7, 11, 17, 18, 3, 5, 16,
8, 21, 24, 4, 15, 23, 19, 13,
12, 2, 20, 14, 22, 9, 6, 1
};
const M5 = []const usize {
0, 1, 2, 3, 4, 0, 1, 2, 3, 4
};
fn keccak_f(comptime F: usize, d: []u8) {
debug.assert(d.len == F / 8);
const B = F / 25;
const no_rounds = comptime x: { break :x 12 + 2 * math.log2(B); };
var s = []const u64 {0} ** 25;
var t = []const u64 {0} ** 1;
var c = []const u64 {0} ** 5;
for (s) |*r, i| {
*r = mem.readIntLE(u64, d[8*i .. 8*i + 8]);
}
var x: usize = 0;
var y: usize = 0;
// TODO: Cannot unroll all loops here due to comptime differences.
inline for (RC[0..no_rounds]) |round| {
// theta
x = 0; while (x < 5) : (x += 1) {
c[x] = s[x] ^ s[x+5] ^ s[x+10] ^ s[x+15] ^ s[x+20];
}
x = 0; while (x < 5) : (x += 1) {
t[0] = c[M5[x+4]] ^ math.rotl(u64, c[M5[x+1]], usize(1));
y = 0; while (y < 5) : (y += 1) {
s[x + y*5] ^= t[0];
}
}
// rho+pi
t[0] = s[1];
x = 0; while (x < 24) : (x += 1) {
c[0] = s[PIL[x]];
s[PIL[x]] = math.rotl(u64, t[0], ROTC[x]);
t[0] = c[0];
}
// chi
y = 0; while (y < 5) : (y += 1) {
x = 0; while (x < 5) : (x += 1) {
c[x] = s[x + y*5];
}
x = 0; while (x < 5) : (x += 1) {
s[x + y*5] = c[x] ^ (~c[M5[x+1]] & c[M5[x+2]]);
}
}
// iota
s[0] ^= round;
}
for (s) |r, i| {
mem.writeInt(d[8*i .. 8*i + 8], r, builtin.Endian.Little);
}
}
test "sha3-224 single" {
htest.assertEqualHash(Sha3_224, "6b4e03423667dbb73b6e15454f0eb1abd4597f9a1b078e3f5b5a6bc7", "");
htest.assertEqualHash(Sha3_224, "e642824c3f8cf24ad09234ee7d3c766fc9a3a5168d0c94ad73b46fdf", "abc");
htest.assertEqualHash(Sha3_224, "543e6868e1666c1a643630df77367ae5a62a85070a51c14cbf665cbc", "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
}
test "sha3-224 streaming" {
var h = Sha3_224.init();
var out: [28]u8 = undefined;
h.final(out[0..]);
htest.assertEqual("6b4e03423667dbb73b6e15454f0eb1abd4597f9a1b078e3f5b5a6bc7", out[0..]);
h.reset();
h.update("abc");
h.final(out[0..]);
htest.assertEqual("e642824c3f8cf24ad09234ee7d3c766fc9a3a5168d0c94ad73b46fdf", out[0..]);
h.reset();
h.update("a");
h.update("b");
h.update("c");
h.final(out[0..]);
htest.assertEqual("e642824c3f8cf24ad09234ee7d3c766fc9a3a5168d0c94ad73b46fdf", out[0..]);
}
test "sha3-256 single" {
htest.assertEqualHash(Sha3_256, "a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a" , "");
htest.assertEqualHash(Sha3_256, "3a985da74fe225b2045c172d6bd390bd855f086e3e9d525b46bfe24511431532", "abc");
htest.assertEqualHash(Sha3_256, "916f6061fe879741ca6469b43971dfdb28b1a32dc36cb3254e812be27aad1d18", "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
}
test "sha3-256 streaming" {
var h = Sha3_256.init();
var out: [32]u8 = undefined;
h.final(out[0..]);
htest.assertEqual("a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a", out[0..]);
h.reset();
h.update("abc");
h.final(out[0..]);
htest.assertEqual("3a985da74fe225b2045c172d6bd390bd855f086e3e9d525b46bfe24511431532", out[0..]);
h.reset();
h.update("a");
h.update("b");
h.update("c");
h.final(out[0..]);
htest.assertEqual("3a985da74fe225b2045c172d6bd390bd855f086e3e9d525b46bfe24511431532", out[0..]);
}
test "sha3-384 single" {
const h1 = "0c63a75b845e4f7d01107d852e4c2485c51a50aaaa94fc61995e71bbee983a2ac3713831264adb47fb6bd1e058d5f004";
htest.assertEqualHash(Sha3_384, h1 , "");
const h2 = "ec01498288516fc926459f58e2c6ad8df9b473cb0fc08c2596da7cf0e49be4b298d88cea927ac7f539f1edf228376d25";
htest.assertEqualHash(Sha3_384, h2, "abc");
const h3 = "79407d3b5916b59c3e30b09822974791c313fb9ecc849e406f23592d04f625dc8c709b98b43b3852b337216179aa7fc7";
htest.assertEqualHash(Sha3_384, h3, "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
}
test "sha3-384 streaming" {
var h = Sha3_384.init();
var out: [48]u8 = undefined;
const h1 = "0c63a75b845e4f7d01107d852e4c2485c51a50aaaa94fc61995e71bbee983a2ac3713831264adb47fb6bd1e058d5f004";
h.final(out[0..]);
htest.assertEqual(h1, out[0..]);
const h2 = "ec01498288516fc926459f58e2c6ad8df9b473cb0fc08c2596da7cf0e49be4b298d88cea927ac7f539f1edf228376d25";
h.reset();
h.update("abc");
h.final(out[0..]);
htest.assertEqual(h2, out[0..]);
h.reset();
h.update("a");
h.update("b");
h.update("c");
h.final(out[0..]);
htest.assertEqual(h2, out[0..]);
}
test "sha3-512 single" {
const h1 = "a69f73cca23a9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281dcd26";
htest.assertEqualHash(Sha3_512, h1 , "");
const h2 = "b751850b1a57168a5693cd924b6b096e08f621827444f70d884f5d0240d2712e10e116e9192af3c91a7ec57647e3934057340b4cf408d5a56592f8274eec53f0";
htest.assertEqualHash(Sha3_512, h2, "abc");
const h3 = "afebb2ef542e6579c50cad06d2e578f9f8dd6881d7dc824d26360feebf18a4fa73e3261122948efcfd492e74e82e2189ed0fb440d187f382270cb455f21dd185";
htest.assertEqualHash(Sha3_512, h3, "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
}
test "sha3-512 streaming" {
var h = Sha3_512.init();
var out: [64]u8 = undefined;
const h1 = "a69f73cca23a9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281dcd26";
h.final(out[0..]);
htest.assertEqual(h1, out[0..]);
const h2 = "b751850b1a57168a5693cd924b6b096e08f621827444f70d884f5d0240d2712e10e116e9192af3c91a7ec57647e3934057340b4cf408d5a56592f8274eec53f0";
h.reset();
h.update("abc");
h.final(out[0..]);
htest.assertEqual(h2, out[0..]);
h.reset();
h.update("a");
h.update("b");
h.update("c");
h.final(out[0..]);
htest.assertEqual(h2, out[0..]);
}

View File

@ -0,0 +1,43 @@
// Modify the HashFunction variable to the one wanted to test.
//
// NOTE: The throughput measurement may be slightly lower than other measurements since we run
// through our block alignment functions as well. Be aware when comparing against other tests.
//
// ```
// zig build-exe --release-fast --library c throughput_test.zig
// ./throughput_test
// ```
const HashFunction = @import("md5.zig").Md5;
const BytesToHash = 1024 * Mb;
const std = @import("std");
const c = @cImport({
@cInclude("time.h");
});
const Mb = 1024 * 1024;
pub fn main() -> %void {
var stdout_file = try std.io.getStdOut();
var stdout_out_stream = std.io.FileOutStream.init(&stdout_file);
const stdout = &stdout_out_stream.stream;
var block: [HashFunction.block_size]u8 = undefined;
std.mem.set(u8, block[0..], 0);
var h = HashFunction.init();
var offset: usize = 0;
const start = c.clock();
while (offset < BytesToHash) : (offset += block.len) {
h.update(block[0..]);
}
const end = c.clock();
const elapsed_s = f64((end - start) * c.CLOCKS_PER_SEC) / 1000000;
const throughput = u64(BytesToHash / elapsed_s);
try stdout.print("{}: ", @typeName(HashFunction));
try stdout.print("{} Mb/s\n", throughput);
}

View File

@ -62,8 +62,7 @@ pub fn HashMap(comptime K: type, comptime V: type,
.allocator = allocator,
.size = 0,
.max_distance_from_start_index = 0,
// it doesn't actually matter what we set this to since we use wrapping integer arithmetic
.modification_count = undefined,
.modification_count = if (want_modification_safety) 0 else {},
};
}
@ -110,6 +109,10 @@ pub fn HashMap(comptime K: type, comptime V: type,
return hm.internalGet(key);
}
pub fn contains(hm: &Self, key: K) -> bool {
return hm.get(key) != null;
}
pub fn remove(hm: &Self, key: K) -> ?&Entry {
hm.incrementModificationCount();
const start_index = hm.keyToIndex(key);

View File

@ -203,6 +203,20 @@ pub fn dupe(allocator: &Allocator, comptime T: type, m: []const T) -> %[]T {
return new_buf;
}
/// Remove values from the beginning and end of a slice.
pub fn trim(comptime T: type, slice: []const T, values_to_strip: []const T) -> []const T {
var begin: usize = 0;
var end: usize = slice.len;
while (begin < end and indexOfScalar(T, values_to_strip, slice[begin]) != null) : (begin += 1) {}
while (end > begin and indexOfScalar(T, values_to_strip, slice[end - 1]) != null) : (end -= 1) {}
return slice[begin..end];
}
test "mem.trim" {
assert(eql(u8, trim(u8, " foo\n ", " \n"), "foo"));
assert(eql(u8, trim(u8, "foo", " \n"), "foo"));
}
/// Linear search for the index of a scalar value inside a slice.
pub fn indexOfScalar(comptime T: type, slice: []const T, value: T) -> ?usize {
return indexOfScalarPos(T, slice, 0, value);

View File

@ -349,6 +349,32 @@ test "big number shifting" {
}
}
test "xor" {
test_xor();
comptime test_xor();
}
fn test_xor() {
assert(0xFF ^ 0x00 == 0xFF);
assert(0xF0 ^ 0x0F == 0xFF);
assert(0xFF ^ 0xF0 == 0x0F);
assert(0xFF ^ 0x0F == 0xF0);
assert(0xFF ^ 0xFF == 0x00);
}
test "big number xor" {
comptime {
assert(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ^ 0x00000000000000000000000000000000 == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
assert(0xFFFFFFFFFFFFFFFF0000000000000000 ^ 0x0000000000000000FFFFFFFFFFFFFFFF == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
assert(0xFFFFFFFFFFFFFFFF0000000000000000 ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x0000000000000000FFFFFFFFFFFFFFFF);
assert(0x0000000000000000FFFFFFFFFFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0xFFFFFFFFFFFFFFFF0000000000000000);
assert(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x00000000000000000000000000000000);
assert(0xFFFFFFFF00000000FFFFFFFF00000000 ^ 0x00000000FFFFFFFF00000000FFFFFFFF == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
assert(0xFFFFFFFF00000000FFFFFFFF00000000 ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x00000000FFFFFFFF00000000FFFFFFFF);
assert(0x00000000FFFFFFFF00000000FFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0xFFFFFFFF00000000FFFFFFFF00000000);
}
}
test "f128" {
test_f128();
comptime test_f128();
@ -368,4 +394,4 @@ fn test_f128() {
fn should_not_be_zero(x: f128) {
assert(x != 0.0);
}
}