From 76fa6cdce36671bd9ad54248d77a3941f2eb3e34 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 31 Dec 2016 01:31:23 -0500 Subject: [PATCH] eradicate use of zeroes in std --- src/all_types.hpp | 2 +- src/ir.cpp | 11 ++++++----- std/hash_map.zig | 34 +++++++--------------------------- std/list.zig | 2 +- std/mem.zig | 2 +- 5 files changed, 16 insertions(+), 35 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 9f470996f3..ef6e2ae654 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1382,7 +1382,7 @@ enum AtomicOrder { // A basic block contains no branching. Branches send control flow // to another basic block. // Phi instructions must be first in a basic block. -// The last instruction in a basic block must be an expression of type unreachable. +// The last instruction in a basic block must be of type unreachable. struct IrBasicBlock { ZigList instruction_list; IrBasicBlock *other; diff --git a/src/ir.cpp b/src/ir.cpp index edb2789c5d..33bfb0fe9f 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -2741,10 +2741,11 @@ static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_sco static void ir_gen_defers_for_block(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, bool gen_error_defers, bool gen_maybe_defers) { - while (inner_scope != outer_scope) { - assert(inner_scope); - if (inner_scope->id == ScopeIdDefer) { - AstNode *defer_node = inner_scope->source_node; + Scope *scope = inner_scope; + while (scope != outer_scope) { + assert(scope); + if (scope->id == ScopeIdDefer) { + AstNode *defer_node = scope->source_node; assert(defer_node->type == NodeTypeDefer); ReturnKind defer_kind = defer_node->data.defer.kind; if (defer_kind == ReturnKindUnconditional || @@ -2756,7 +2757,7 @@ static void ir_gen_defers_for_block(IrBuilder *irb, Scope *inner_scope, Scope *o } } - inner_scope = inner_scope->parent; + scope = scope->parent; } } diff --git a/std/hash_map.zig b/std/hash_map.zig index 35ede91981..3fe840f4e8 100644 --- a/std/hash_map.zig +++ b/std/hash_map.zig @@ -9,22 +9,12 @@ const debug_u32 = if (want_modification_safety) u32 else void; pub fn HashMap(inline K: type, inline V: type, inline hash: fn(key: K)->u32, inline eql: fn(a: K, b: K)->bool) -> type -{ - SmallHashMap(K, V, hash, eql, @sizeOf(usize)) -} - -pub fn SmallHashMap(inline K: type, inline V: type, - inline hash: fn(key: K)->u32, inline eql: fn(a: K, b: K)->bool, - inline static_size: usize) -> type { struct { entries: []Entry, size: usize, max_distance_from_start_index: usize, allocator: &Allocator, - // if the hash map is small enough, we use linear search through these - // entries instead of allocating memory - prealloc_entries: [static_size]Entry, // this is used to detect bugs where a hashtable is edited while an iterator is running. modification_count: debug_u32, @@ -64,18 +54,16 @@ pub fn SmallHashMap(inline K: type, inline V: type, }; pub fn init(hm: &Self, allocator: &Allocator) { - hm.entries = hm.prealloc_entries[0...]; + hm.entries = []Entry{}; hm.allocator = allocator; hm.size = 0; hm.max_distance_from_start_index = 0; - hm.prealloc_entries = zeroes; // sets used to false for all entries - hm.modification_count = zeroes; + // it doesn't actually matter what we set this to since we use wrapping integer arithmetic + hm.modification_count = undefined; } pub fn deinit(hm: &Self) { - if (hm.entries.ptr != &hm.prealloc_entries[0]) { - hm.allocator.free(Entry, hm.entries); - } + hm.allocator.free(Entry, hm.entries); } pub fn clear(hm: &Self) { @@ -90,14 +78,8 @@ pub fn SmallHashMap(inline K: type, inline V: type, pub fn put(hm: &Self, key: K, value: V) -> %void { hm.incrementModificationCount(); - const resize = if (hm.entries.ptr == &hm.prealloc_entries[0]) { - // preallocated entries table is full - hm.size == hm.entries.len - } else { - // if we get too full (60%), double the capacity - hm.size * 5 >= hm.entries.len * 3 - }; - if (resize) { + // if we get too full (60%), double the capacity + if (hm.size * 5 >= hm.entries.len * 3) { const old_entries = hm.entries; %return hm.initCapacity(hm.entries.len * 2); // dump all of the old elements into the new table @@ -106,9 +88,7 @@ pub fn SmallHashMap(inline K: type, inline V: type, hm.internalPut(old_entry.key, old_entry.value); } } - if (old_entries.ptr != &hm.prealloc_entries[0]) { - hm.allocator.free(Entry, old_entries); - } + hm.allocator.free(Entry, old_entries); } hm.internalPut(key, value); diff --git a/std/list.zig b/std/list.zig index 4269f7b1e0..08b766224c 100644 --- a/std/list.zig +++ b/std/list.zig @@ -13,7 +13,7 @@ pub fn List(inline T: type) -> type{ pub fn init(allocator: &Allocator) -> Self { Self { - .items = zeroes, + .items = []T{}, .len = 0, .allocator = allocator, } diff --git a/std/mem.zig b/std/mem.zig index 451e2c2167..2a72efb264 100644 --- a/std/mem.zig +++ b/std/mem.zig @@ -67,7 +67,7 @@ pub fn cmp(inline T: type, a: []const T, b: []const T) -> Cmp { } pub fn sliceAsInt(buf: []u8, is_be: bool, inline T: type) -> T { - var result: T = zeroes; + var result: T = undefined; const result_slice = ([]u8)((&result)[0...1]); const padding = @sizeOf(T) - buf.len;