From 2183c4bb444d80921783fc5a26d217c0e4a68d31 Mon Sep 17 00:00:00 2001 From: Vexu Date: Sat, 4 Jan 2020 13:16:37 +0200 Subject: [PATCH] std-c tokenizer string concatenation --- lib/std/c/tokenizer.zig | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/lib/std/c/tokenizer.zig b/lib/std/c/tokenizer.zig index a27a39e6db..1d06c6a523 100644 --- a/lib/std/c/tokenizer.zig +++ b/lib/std/c/tokenizer.zig @@ -272,6 +272,7 @@ pub const Tokenizer = struct { U, L, StringLiteral, + AfterStringLiteral, CharLiteralStart, CharLiteral, EscapeSequence, @@ -565,8 +566,7 @@ pub const Tokenizer = struct { state = .EscapeSequence; }, '"' => { - self.index += 1; - break; + state = .AfterStringLiteral; }, '\n', '\r' => { result.id = .Invalid; @@ -574,6 +574,15 @@ pub const Tokenizer = struct { }, else => {}, }, + .AfterStringLiteral => switch (c) { + '"' => { + state = .StringLiteral; + }, + '\n'...'\r', ' ' => {}, + else => { + break; + }, + }, .CharLiteralStart => switch (c) { '\\' => { string = false; @@ -1109,6 +1118,7 @@ pub const Tokenizer = struct { } } else if (self.index == self.source.buffer.len) { switch (state) { + .AfterStringLiteral, .Start => {}, .u, .u8, .U, .L, .Identifier => { result.id = Token.getKeyword(self.source.buffer[result.start..self.index], self.prev_tok_id == .Hash and !self.pp_directive) orelse .Identifier; @@ -1351,11 +1361,11 @@ test "line continuation" { test "string prefix" { expectTokens( - \\"foo" - \\u"foo" - \\u8"foo" - \\U"foo" - \\L"foo" + \\"foo" "bar" + \\u"foo" "bar" + \\u8"foo" "bar" + \\U"foo" "bar" + \\L"foo" "bar" \\'foo' \\u'foo' \\U'foo'