213 Commits

Author SHA1 Message Date
Andrew Kelley
4e182c7e9e
inferred comptime array inits 2019-06-17 17:46:03 -04:00
Andrew Kelley
74250e434e
inferred comptime union inits 2019-06-17 16:27:45 -04:00
Andrew Kelley
9564c05cd5
better result location handling of inline loops 2019-06-15 19:19:13 -04:00
Andrew Kelley
6bf193af19
better result location semantics with optionals and return locations
somewhere along this branch, #1901 has been fixed.
2019-06-15 12:28:21 -04:00
Andrew Kelley
e1d14e73b5
fix @bitCast semantics when there is no parent result loc 2019-06-11 15:44:06 -04:00
Andrew Kelley
06f307ff77
fix implicit casting return value struct/arary init to optional 2019-06-11 12:19:57 -04:00
Andrew Kelley
f6d4e2565e
use result loc for ref instruction 2019-06-10 23:51:43 -04:00
Andrew Kelley
4582ec518f
result location semantics for vector to array
```zig
export fn entry() void {
    var x: @Vector(4, i32) = undefined;
    var y: [4]i32 = x;
}
```

```llvm
define void @entry() #2 !dbg !35 {
Entry:
  %x = alloca <4 x i32>, align 16
  %y = alloca [4 x i32], align 4
  %0 = bitcast <4 x i32>* %x to i8*, !dbg !47
  call void @llvm.memset.p0i8.i64(i8* align 16 %0, i8 -86, i64 16, i1 false), !dbg !47
  call void @llvm.dbg.declare(metadata <4 x i32>* %x, metadata !39, metadata !DIExpression()), !dbg !47
  %1 = load <4 x i32>, <4 x i32>* %x, align 16, !dbg !48
  %2 = bitcast [4 x i32]* %y to <4 x i32>*, !dbg !48
  store <4 x i32> %1, <4 x i32>* %2, align 16, !dbg !48
  call void @llvm.dbg.declare(metadata [4 x i32]* %y, metadata !45, metadata !DIExpression()), !dbg !49
  ret void, !dbg !50
}
```
2019-06-10 19:49:24 -04:00
Andrew Kelley
9a324ecb42
result loc semantics for loading packed struct pointer to packed struct
```zig
export fn entry() void {
    var x = foo();
    var ptr = &x.b;
    var y = ptr.*;
}
const Foo = packed struct {
    a: u24 = 1,
    b: Bar = Bar{},
};

const Bar = packed struct {
    a: u4 = 2,
    b: u4 = 3,
};
```

```llvm
define void @entry() #2 !dbg !35 {
Entry:
  %x = alloca %Foo, align 1
  %ptr = alloca i32*, align 8
  %y = alloca %Bar, align 1
  call fastcc void @foo(%Foo* sret %x), !dbg !55
  call void @llvm.dbg.declare(metadata %Foo* %x, metadata !39, metadata !DIExpression()), !dbg !56
  %0 = getelementptr inbounds %Foo, %Foo* %x, i32 0, i32 0, !dbg !57
  store i32* %0, i32** %ptr, align 8, !dbg !57
  call void @llvm.dbg.declare(metadata i32** %ptr, metadata !51, metadata !DIExpression()), !dbg !58
  %1 = load i32*, i32** %ptr, align 8, !dbg !59
  %2 = load i32, i32* %1, align 1, !dbg !60
  %3 = lshr i32 %2, 24, !dbg !60
  %4 = trunc i32 %3 to i8, !dbg !60
  %5 = bitcast %Bar* %y to i8*, !dbg !60
  store i8 %4, i8* %5, !dbg !60
  call void @llvm.dbg.declare(metadata %Bar* %y, metadata !54, metadata !DIExpression()), !dbg !61
  ret void, !dbg !62
}
```
2019-06-10 19:11:34 -04:00
Andrew Kelley
65f6ea66f4
result loc semantics for @sliceToBytes and @bytesToSlice 2019-06-10 18:34:27 -04:00
Andrew Kelley
ee3f7e20f6
result location semantics for cmpxchg 2019-06-10 17:49:36 -04:00
Andrew Kelley
b9c033ae1a
result location semantics for error union wrapping an error 2019-06-10 17:28:25 -04:00
Andrew Kelley
4f085b8d2c
result location semantics for error union wrapping a payload 2019-06-10 16:55:07 -04:00
Andrew Kelley
eaa9d8bdac
result location semantics for optional wrap 2019-06-10 16:20:13 -04:00
Andrew Kelley
c362895116
result location semantics for slices
```zig
export fn entry() void {
    var buf: [10]u8 = undefined;
    const slice1: []const u8 = &buf;
    const slice2 = buf[0..];
}
```

```llvm
define void @entry() #2 !dbg !35 {
Entry:
  %buf = alloca [10 x i8], align 1
  %slice1 = alloca %"[]u8", align 8
  %slice2 = alloca %"[]u8", align 8
  %0 = bitcast [10 x i8]* %buf to i8*, !dbg !46
  call void @llvm.memset.p0i8.i64(i8* align 1 %0, i8 -86, i64 10, i1 false), !dbg !46
  call void @llvm.dbg.declare(metadata [10 x i8]* %buf, metadata !39, metadata !DIExpression()), !dbg !46
  %1 = getelementptr inbounds %"[]u8", %"[]u8"* %slice1, i32 0, i32 0, !dbg !47
  %2 = getelementptr inbounds [10 x i8], [10 x i8]* %buf, i64 0, i64 0, !dbg !47
  store i8* %2, i8** %1, align 8, !dbg !47
  %3 = getelementptr inbounds %"[]u8", %"[]u8"* %slice1, i32 0, i32 1, !dbg !47
  store i64 10, i64* %3, align 8, !dbg !47
  call void @llvm.dbg.declare(metadata %"[]u8"* %slice1, metadata !44, metadata !DIExpression()), !dbg !48
  %4 = getelementptr inbounds %"[]u8", %"[]u8"* %slice2, i32 0, i32 0, !dbg !49
  %5 = getelementptr inbounds [10 x i8], [10 x i8]* %buf, i64 0, i64 0, !dbg !49
  store i8* %5, i8** %4, align 8, !dbg !49
  %6 = getelementptr inbounds %"[]u8", %"[]u8"* %slice2, i32 0, i32 1, !dbg !49
  store i64 10, i64* %6, align 8, !dbg !49
  call void @llvm.dbg.declare(metadata %"[]u8"* %slice2, metadata !45, metadata !DIExpression()), !dbg !50
  ret void, !dbg !51
}
```
2019-06-10 15:49:45 -04:00
Andrew Kelley
17b1ac5d03
result location semantics for @bitCast
```zig
export fn entry() void {
    var x = @bitCast(f32, foo());
}
```

```llvm
define void @entry() #2 !dbg !35 {
Entry:
  %x = alloca float, align 4
  %0 = bitcast float* %x to %Foo*, !dbg !42
  call fastcc void @foo(%Foo* sret %0), !dbg !42
  call void @llvm.dbg.declare(metadata float* %x, metadata !39, metadata !DIExpression()), !dbg !43
  ret void, !dbg !44
}
```
2019-06-10 12:24:19 -04:00
Andrew Kelley
1a51bf6304
hook up result locations for union initializations
```zig
export fn entry() void {
    var x = Foo{ .bar = bar() };
}
```

```llvm
define void @entry() #2 !dbg !44 {
Entry:
  %x = alloca %Foo, align 4
  %0 = getelementptr inbounds %Foo, %Foo* %x, i32 0, i32 1, !dbg !68
  store i1 true, i1* %0, align 1, !dbg !68
  %1 = getelementptr inbounds %Foo, %Foo* %x, i32 0, i32 0, !dbg !68
  %2 = bitcast { i32, [4 x i8] }* %1 to %Bar*, !dbg !68
  call fastcc void @bar(%Bar* sret %2), !dbg !68
  call void @llvm.dbg.declare(metadata %Foo* %x, metadata !48, metadata !DIExpression()), !dbg !69
  ret void, !dbg !70
}
```
2019-06-10 11:15:32 -04:00
Andrew Kelley
3ec766abe3
remove ResultLocField dead code 2019-06-09 10:46:13 -04:00
Andrew Kelley
771e88951a
result location mechanism for struct initialization
```zig
export fn entry() void {
    const static = Foo{
        .x = 9,
        .bar = Bar{ .y = 10 },
    };
    const runtime = foo(true);
}

fn foo(c: bool) Foo {
    return Foo{
        .x = 12,
        .bar = if (c) bar1() else bar2(),
    };
}

fn bar1() Bar {
    return Bar{ .y = 34 };
}

fn bar2() Bar {
    return Bar{ .y = 56 };
}
```

```llvm
@0 = internal unnamed_addr constant %Foo { i32 9, %Bar { i32 10 } }, align 4
@1 = internal unnamed_addr constant %Bar { i32 34 }, align 4
@2 = internal unnamed_addr constant %Bar { i32 56 }, align 4

define void @entry() #2 !dbg !35 {
Entry:
  %runtime = alloca %Foo, align 4
  call void @llvm.dbg.declare(metadata %Foo* @0, metadata !39, metadata !DIExpression()), !dbg !50
  call fastcc void @foo(%Foo* sret %runtime, i1 true), !dbg !51
  call void @llvm.dbg.declare(metadata %Foo* %runtime, metadata !49, metadata !DIExpression()), !dbg !52
  ret void, !dbg !53
}

define internal fastcc void @foo(%Foo* nonnull sret, i1) unnamed_addr #2 !dbg !54 {
Entry:
  %c = alloca i1, align 1
  store i1 %1, i1* %c, align 1
  call void @llvm.dbg.declare(metadata i1* %c, metadata !60, metadata !DIExpression()), !dbg !61
  %2 = getelementptr inbounds %Foo, %Foo* %0, i32 0, i32 0, !dbg !62
  store i32 12, i32* %2, align 4, !dbg !62
  %3 = getelementptr inbounds %Foo, %Foo* %0, i32 0, i32 1, !dbg !64
  %4 = load i1, i1* %c, align 1, !dbg !65
  br i1 %4, label %Then, label %Else, !dbg !65

Then:                                             ; preds = %Entry
  call fastcc void @bar1(%Bar* sret %3), !dbg !66
  br label %EndIf, !dbg !64

Else:                                             ; preds = %Entry
  call fastcc void @bar2(%Bar* sret %3), !dbg !67
  br label %EndIf, !dbg !64

EndIf:                                            ; preds = %Else, %Then
  ret void, !dbg !68
}

define internal fastcc void @bar1(%Bar* nonnull sret) unnamed_addr #2 !dbg !69 {
Entry:
  %1 = bitcast %Bar* %0 to i8*, !dbg !73
  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast (%Bar* @1 to i8*), i64 4, i1 false), !dbg !73
  ret void, !dbg !73
}

define internal fastcc void @bar2(%Bar* nonnull sret) unnamed_addr #2 !dbg !75 {
Entry:
  %1 = bitcast %Bar* %0 to i8*, !dbg !76
  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast (%Bar* @2 to i8*), i64 4, i1 false), !dbg !76
  ret void, !dbg !76
}

!39 = !DILocalVariable(name: "static", scope: !40, file: !5, line: 2, type: !41)
!49 = !DILocalVariable(name: "runtime", scope: !40, file: !5, line: 6, type: !41)
```
2019-06-08 18:51:31 -04:00
Andrew Kelley
76a3938d69
no-copy semantics for peer result function calls
```zig
export fn entry() void {
    var c = true;
    var x = if (c) foo() else bar();
}
```

```llvm
define void @entry() #2 !dbg !35 {
Entry:
  %c = alloca i1, align 1
  %x = alloca %Foo, align 4
  store i1 true, i1* %c, align 1, !dbg !47
  call void @llvm.dbg.declare(metadata i1* %c, metadata !39, metadata !DIExpression()), !dbg !48
  %0 = load i1, i1* %c, align 1, !dbg !49
  br i1 %0, label %Then, label %Else, !dbg !49

Then:                                             ; preds = %Entry
  call fastcc void @foo(%Foo* sret %x), !dbg !50
  br label %EndIf, !dbg !51

Else:                                             ; preds = %Entry
  call fastcc void @bar(%Foo* sret %x), !dbg !52
  br label %EndIf, !dbg !51

EndIf:                                            ; preds = %Else, %Then
  call void @llvm.dbg.declare(metadata %Foo* %x, metadata !42, metadata !DIExpression()), !dbg !53
  ret void, !dbg !54
}
```
2019-06-07 11:34:47 -04:00
Andrew Kelley
143d6ada8f
no-copy semantics for for loops
Note that only the index variable requires a stack allocation, and the
memcpy for the element is gone.

```zig
export fn entry() void {
    var buf: [10]i32 = undefined;
    for (buf) |x| {}
}
```

```llvm
define void @entry() #2 !dbg !35 {
Entry:
  %buf = alloca [10 x i32], align 4
  %i = alloca i64, align 8
  %0 = bitcast [10 x i32]* %buf to i8*, !dbg !47
  call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 -86, i64 40, i1 false), !dbg !47
  call void @llvm.dbg.declare(metadata [10 x i32]* %buf, metadata !39, metadata !DIExpression()), !dbg !47
  store i64 0, i64* %i, align 8, !dbg !48
  call void @llvm.dbg.declare(metadata i64* %i, metadata !45, metadata !DIExpression()), !dbg !48
  br label %ForCond, !dbg !48

ForCond:                                          ; preds = %ForBody, %Entry
  %1 = load i64, i64* %i, align 8, !dbg !48
  %2 = icmp ult i64 %1, 10, !dbg !48
  br i1 %2, label %ForBody, label %ForEnd, !dbg !48

ForBody:                                          ; preds = %ForCond
  %3 = getelementptr inbounds [10 x i32], [10 x i32]* %buf, i64 0, i64 %1, !dbg !48
  call void @llvm.dbg.declare(metadata i32* %3, metadata !46, metadata !DIExpression()), !dbg !49
  %4 = add nuw i64 %1, 1, !dbg !48
  store i64 %4, i64* %i, align 8, !dbg !48
  br label %ForCond, !dbg !48

ForEnd:                                           ; preds = %ForCond
  ret void, !dbg !50
}
```
2019-06-03 21:40:56 -04:00
Andrew Kelley
d4054e35fe
while loops
Note that neither the payload capture variable nor the error capture
variable require a stack allocation.

```zig
export fn entry() void {
    var c: anyerror!i32 = 1234;
    while (c) |hi| {} else |e| {}
}
```

```llvm
define void @entry() #2 !dbg !39 {
Entry:
  %c = alloca { i16, i32 }, align 4
  %0 = bitcast { i16, i32 }* %c to i8*, !dbg !52
  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast ({ i16, i32 }* @0 to i8*), i64 8, i1 false), !dbg !52
  call void @llvm.dbg.declare(metadata { i16, i32 }* %c, metadata !43, metadata !DIExpression()), !dbg !52
  br label %WhileCond, !dbg !53

WhileCond:                                        ; preds = %WhileBody, %Entry
  %1 = getelementptr inbounds { i16, i32 }, { i16, i32 }* %c, i32 0, i32 0, !dbg !54
  %2 = load i16, i16* %1, align 2, !dbg !54
  %3 = icmp ne i16 %2, 0, !dbg !54
  br i1 %3, label %WhileElse, label %WhileBody, !dbg !54

WhileBody:                                        ; preds = %WhileCond
  %4 = getelementptr inbounds { i16, i32 }, { i16, i32 }* %c, i32 0, i32 1, !dbg !53
  call void @llvm.dbg.declare(metadata i32* %4, metadata !50, metadata !DIExpression()), !dbg !53
  br label %WhileCond, !dbg !53

WhileElse:                                        ; preds = %WhileCond
  %5 = getelementptr inbounds { i16, i32 }, { i16, i32 }* %c, i32 0, i32 0, !dbg !55
  call void @llvm.dbg.declare(metadata i16* %5, metadata !51, metadata !DIExpression()), !dbg !55
  ret void, !dbg !56
}
```
2019-06-03 20:56:22 -04:00
Andrew Kelley
eb8a132d23
var types, alignment, and comptime 2019-06-03 17:46:58 -04:00
Andrew Kelley
a4aca78722
no-copy semantics for if expr
```zig
export fn entry() void {
    var c = true;
    var x = if (c) u8(4) else u32(10);
}
```

```llvm
define void @entry() #2 !dbg !35 {
Entry:
  %c = alloca i1, align 1
  %x = alloca i32, align 4
  store i1 true, i1* %c, align 1, !dbg !44
  call void @llvm.dbg.declare(metadata i1* %c, metadata !39, metadata !DIExpression()), !dbg !45
  %0 = load i1, i1* %c, align 1, !dbg !46
  br i1 %0, label %Then, label %Else, !dbg !46

Then:                                             ; preds = %Entry
  br label %EndIf, !dbg !47

Else:                                             ; preds = %Entry
  br label %EndIf, !dbg !47

EndIf:                                            ; preds = %Else, %Then
  %1 = phi i32 [ 4, %Then ], [ 10, %Else ], !dbg !47
  store i32 %1, i32* %x, align 4, !dbg !47
  call void @llvm.dbg.declare(metadata i32* %x, metadata !42, metadata !DIExpression()), !dbg !48
  ret void, !dbg !49
}
```
2019-05-30 23:16:11 -04:00
Andrew Kelley
5e1003bc81
no-copy semantics for basic runtime function call variable init
```zig
export fn entry() void {
    var x: Foo = foo();
}
```

```llvm
define void @entry() #2 !dbg !37 {
Entry:
  %x = alloca %Foo, align 4
  call fastcc void @foo(%Foo* sret %x), !dbg !48
  call void @llvm.dbg.declare(metadata %Foo* %x, metadata !41, metadata !DIExpression()), !dbg !49
  ret void, !dbg !50
}
```
2019-05-30 17:11:14 -04:00
Andrew Kelley
b66438eb80
no "use of undeclared identifer" in dead comptime branches 2019-05-28 18:19:27 -04:00
Andrew Kelley
269a53b6af
introduce @hasDecl builtin function
closes #1439
2019-05-26 16:21:03 -04:00
LemonBoy
6672ee9eb3 Fix too eager comptime evaluation of error ptr 2019-05-19 15:53:32 -04:00
Shawn Landden
1fdb24827f
breaking changes to all bit manipulation intrinsics
* `@clz`, `@ctz`, `@popCount`, `@bswap`, `@bitreverse` now
   have a type parameter
 * rename @bitreverse to @bitReverse
 * rename @bswap to @byteSwap

Closes #2119
Closes #2120
2019-05-16 16:37:58 -04:00
Andrew Kelley
e93a05b6e4
switching on error sets makes new error set for capture values
closes #769
2019-05-14 19:11:37 -04:00
Andrew Kelley
df4f77024e
else value when switching on error set has
optional capture value which is subset.

see #769
2019-05-14 18:06:57 -04:00
Andrew Kelley
0099583bd3
C pointers support .? operator
see #1967
2019-05-08 17:39:00 -04:00
Jimmi Holst Christensen
3109c89850 fixed 1726 2019-04-06 10:56:07 +02:00
Andrew Kelley
15c316b0d8
add docs for assembly and fix global assembly parsing
Previously, global assembly was parsed expecting it to have
the template syntax. However global assembly has no inputs,
outputs, or clobbers, and thus does not have template syntax.
This is now fixed.

This commit also adds a compile error for using volatile
on global assembly, since it is meaningless.

closes #1515
2019-03-20 19:00:23 -04:00
Andrew Kelley
9c13e9b7ed
breaking changes to std.mem.Allocator interface API
Before, allocator implementations had to provide `allocFn`,
`reallocFn`, and `freeFn`.

Now, they must provide only `reallocFn` and `shrinkFn`.
Reallocating from a zero length slice is allocation, and
shrinking to a zero length slice is freeing.

When the new memory size is less than or equal to the
previous allocation size, `reallocFn` now has the option
to return `error.OutOfMemory` to indicate that the allocator
would not be able to take advantage of the new size.

For more details see #1306. This commit closes #1306.

This commit paves the way to solving #2009.

This commit also introduces a memory leak to all coroutines.
There is an issue where a coroutine calls the function and it
frees its own stack frame, but then the return value of `shrinkFn`
is a slice, which is implemented as an sret struct. Writing to
the return pointer causes invalid memory write. We could work
around it by having a global helper function which has a void
return type and calling that instead. But instead this hack will
suffice until I rework coroutines to be non-allocating. Basically
coroutines are not supported right now until they are reworked as
in #1194.
2019-03-15 17:57:21 -04:00
Andrew Kelley
0c5f897904
fix @bitCast when src/dest types have mismatched handle_is_ptr
* separate BitCast and BitCastGen instructions
 * closes #991
 * closes #1934
 * unrelated: fix typo in docs (thanks gamester for pointing it out)
2019-02-22 08:49:27 -05:00
Andrew Kelley
1066004b79
better handling of arrays in packed structs
* Separate LoadPtr IR instructions into pass1 and pass2 variants.
 * Define `type_size_bits` for extern structs to be the same as
   their `@sizeOf(T) * 8` and allow them in packed structs.
 * More helpful error messages when trying to use types in
   packed structs that are not allowed.
 * Support arrays in packed structs even when they are not
   byte-aligned.
 * Add compile error for using arrays in packed structs when the
   padding bits would be problematic. This is necessary since
   we do not have packed arrays.

closes #677
2019-02-21 14:44:14 -05:00
Andrew Kelley
2bb795dc45
@sliceToBytes works at comptime
closes #262
2019-02-21 10:07:11 -05:00
Andrew Kelley
f330eebe4b fix using the result of @intCast to u0
closes #1817
2019-02-07 16:02:45 -05:00
Andrew Kelley
8c6fa982cd
SIMD: array to vector, vector to array, wrapping int add
also vectors and arrays now use the same ConstExprVal representation

See #903
2019-02-04 20:30:00 -05:00
Andrew Kelley
545064c1d9
introduce vector type for SIMD
See #903

 * create with `@Vector(len, ElemType)`
 * only wrapping addition is implemented

This feature is far from complete; this is only the beginning.
2019-01-30 23:39:25 -05:00
Andrew Kelley
581edd643f
backport copy elision changes
This commit contains everything from the copy-elision-2
branch that does not have to do with copy elision directly,
but is generally useful for master branch.

 * All const values know their parents, when applicable, not
   just structs and unions.
 * Null pointers in const values are represented explicitly,
   rather than as a HardCodedAddr value of 0.
 * Rename "maybe" to "optional" in various code locations.
 * Separate DeclVarSrc and DeclVarGen
 * Separate PtrCastSrc and PtrCastGen
 * Separate CmpxchgSrc and CmpxchgGen
 * Represent optional error set as an integer, using the 0 value.
   In a const value, it uses nullptr.
 * Introduce type_has_one_possible_value and use it where applicable.
 * Fix debug builds not setting memory to 0xaa when storing
   undefined.
 * Separate the type of a variable from the const value of a variable.
 * Use copy_const_val where appropriate.
 * Rearrange structs to pack data more efficiently.
 * Move test/cases/* to test/behavior/*
 * Use `std.debug.assertOrPanic` in behavior tests instead of
   `std.debug.assert`.
 * Fix outdated slice syntax in docs.
2019-01-29 22:30:30 -05:00
vegecode
f6cd68386d @bitreverse intrinsic, part of #767 (#1865)
* bitreverse - give bswap behavior

* bitreverse, comptime_ints, negative values still not working?

* bitreverse working for negative comptime ints

* Finished bitreverse test cases

* Undo exporting a bigint function. @bitreverse test name includes ampersand

* added docs entry for @bitreverse
2019-01-02 16:47:47 -05:00
Andrew Kelley
b883bc873d
breaking API changes to all readInt/writeInt functions & more
* add `@bswap` builtin function. See #767
 * comptime evaluation facilities are improved to be able to
   handle a `@ptrCast` with a backing array.
 * `@truncate` allows "truncating" a u0 value to any integer
   type, and the result is always comptime known to be `0`.
 * when specifying pointer alignment in a type expression,
   the alignment value of pointers which do not have addresses
   at runtime is ignored, and always has the default/ABI alignment
 * threw in a fix to freebsd/x86_64.zig to update syntax from
   language changes
 * some improvements are pending #863

closes #638
closes #1733

std lib API changes
 * io.InStream().readIntNe renamed to readIntNative
 * io.InStream().readIntLe renamed to readIntLittle
 * io.InStream().readIntBe renamed to readIntBig
 * introduced io.InStream().readIntForeign
 * io.InStream().readInt has parameter order changed
 * io.InStream().readVarInt has parameter order changed
 * io.InStream().writeIntNe renamed to writeIntNative
 * introduced io.InStream().writeIntForeign
 * io.InStream().writeIntLe renamed to writeIntLittle
 * io.InStream().writeIntBe renamed to writeIntBig
 * io.InStream().writeInt has parameter order changed
 * mem.readInt has different parameters and semantics
 * introduced mem.readIntNative
 * introduced mem.readIntForeign
 * mem.readIntBE renamed to mem.readIntBig and different API
 * mem.readIntLE renamed to mem.readIntLittle and different API
 * introduced mem.readIntSliceNative
 * introduced mem.readIntSliceForeign
 * introduced mem.readIntSliceLittle
 * introduced mem.readIntSliceBig
 * introduced mem.readIntSlice
 * mem.writeInt has different parameters and semantics
 * introduced mem.writeIntNative
 * introduced mem.writeIntForeign
 * mem.writeIntBE renamed to mem.readIntBig and different semantics
 * mem.writeIntLE renamed to mem.readIntLittle and different semantics
 * introduced mem.writeIntSliceForeign
 * introduced mem.writeIntSliceNative
 * introduced mem.writeIntSliceBig
 * introduced mem.writeIntSliceLittle
 * introduced mem.writeIntSlice
 * removed mem.endianSwapIfLe
 * removed mem.endianSwapIfBe
 * removed mem.endianSwapIf
 * added mem.littleToNative
 * added mem.bigToNative
 * added mem.toNative
 * added mem.nativeTo
 * added mem.nativeToLittle
 * added mem.nativeToBig
2018-12-12 20:35:04 -05:00
Jimmi Holst Christensen
be9bb0a857 Fixed #1663 and removed IrInstructionArrayLen 2018-11-16 10:15:13 -05:00
Andrew Kelley
2b395d4ede
remove @minValue,@maxValue; add std.math.minInt,maxInt
closes #1466
closes #1476
2018-10-26 15:01:51 -04:00
Andrew Kelley
dcfd15a7f0
the last number in a packed ptr is host int bytes
See #1121
2018-09-26 14:54:52 -04:00
raulgrell
09a1162af5 builtin functions: @byteOffsetOf and @bitOffsetOf 2018-09-07 22:49:19 +01:00
Andrew Kelley
68db9d5074
add compile error for comptime control flow inside runtime block
closes #834
2018-09-04 15:28:35 -04:00
kristopher tate
da5f3d5c4c src/ir_print.cpp: support @handle();
Tracking Issue #1296 ;
2018-08-02 16:50:08 +09:00