Merge pull request #3697 from Vexu/container-docs

Implement container level doc comments
This commit is contained in:
Andrew Kelley 2019-11-17 22:31:12 +00:00 committed by GitHub
commit 57b8614a5a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 190 additions and 25 deletions

View File

@ -856,6 +856,7 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Tok
.LineComment,
.DocComment,
.ContainerDocComment,
.ShebangLine,
=> {
try out.write("<span class=\"tok-comment\">");

View File

@ -484,7 +484,7 @@
doc comments.
</p>
</div>
<div id="fnDocs" class="hidden"></div>
<div id="tldDocs" class="hidden"></div>
<div id="sectFnErrors" class="hidden">
<h2>Errors</h2>
<div id="fnErrorsAnyError">

View File

@ -20,7 +20,7 @@
var domListValues = document.getElementById("listValues");
var domFnProto = document.getElementById("fnProto");
var domFnProtoCode = document.getElementById("fnProtoCode");
var domFnDocs = document.getElementById("fnDocs");
var domTldDocs = document.getElementById("tldDocs");
var domSectFnErrors = document.getElementById("sectFnErrors");
var domListFnErrors = document.getElementById("listFnErrors");
var domTableFnErrors = document.getElementById("tableFnErrors");
@ -34,7 +34,6 @@
var domListSearchResults = document.getElementById("listSearchResults");
var domSectSearchNoResults = document.getElementById("sectSearchNoResults");
var domSectInfo = document.getElementById("sectInfo");
var domListInfo = document.getElementById("listInfo");
var domTdTarget = document.getElementById("tdTarget");
var domTdZigVer = document.getElementById("tdZigVer");
var domHdrName = document.getElementById("hdrName");
@ -102,7 +101,7 @@
function render() {
domStatus.classList.add("hidden");
domFnProto.classList.add("hidden");
domFnDocs.classList.add("hidden");
domTldDocs.classList.add("hidden");
domSectPkgs.classList.add("hidden");
domSectTypes.classList.add("hidden");
domSectNamespaces.classList.add("hidden");
@ -190,11 +189,11 @@
var docs = zigAnalysis.astNodes[decl.src].docs;
if (docs != null) {
domFnDocs.innerHTML = markdown(docs);
domTldDocs.innerHTML = markdown(docs);
} else {
domFnDocs.innerHTML = '<p>There are no doc comments for this declaration.</p>';
domTldDocs.innerHTML = '<p>There are no doc comments for this declaration.</p>';
}
domFnDocs.classList.remove("hidden");
domTldDocs.classList.remove("hidden");
}
function typeIsErrSet(typeIndex) {
@ -274,8 +273,8 @@
docsSource = protoSrcNode.docs;
}
if (docsSource != null) {
domFnDocs.innerHTML = markdown(docsSource);
domFnDocs.classList.remove("hidden");
domTldDocs.innerHTML = markdown(docsSource);
domTldDocs.classList.remove("hidden");
}
domFnProto.classList.remove("hidden");
}
@ -893,8 +892,8 @@
var docs = zigAnalysis.astNodes[decl.src].docs;
if (docs != null) {
domFnDocs.innerHTML = markdown(docs);
domFnDocs.classList.remove("hidden");
domTldDocs.innerHTML = markdown(docs);
domTldDocs.classList.remove("hidden");
}
domFnProto.classList.remove("hidden");
@ -906,8 +905,8 @@
var docs = zigAnalysis.astNodes[decl.src].docs;
if (docs != null) {
domFnDocs.innerHTML = markdown(docs);
domFnDocs.classList.remove("hidden");
domTldDocs.innerHTML = markdown(docs);
domTldDocs.classList.remove("hidden");
}
domFnProto.classList.remove("hidden");
@ -957,6 +956,14 @@
varsList.sort(byNameProperty);
valsList.sort(byNameProperty);
if (container.src != null) {
var docs = zigAnalysis.astNodes[container.src].docs;
if (docs != null) {
domTldDocs.innerHTML = markdown(docs);
domTldDocs.classList.remove("hidden");
}
}
if (typesList.length !== 0) {
resizeDomList(domListTypes, typesList.length, '<li><a href="#"></a></li>');
for (var i = 0; i < typesList.length; i += 1) {

View File

@ -576,7 +576,6 @@ pub const Node = struct {
pub const Root = struct {
base: Node,
doc_comments: ?*DocComment,
decls: DeclList,
eof_token: TokenIndex,
@ -2254,7 +2253,6 @@ pub const Node = struct {
test "iterate" {
var root = Node.Root{
.base = Node{ .id = Node.Id.Root },
.doc_comments = null,
.decls = Node.Root.DeclList.init(std.debug.global_allocator),
.eof_token = 0,
};

View File

@ -58,13 +58,6 @@ fn parseRoot(arena: *Allocator, it: *TokenIterator, tree: *Tree) Allocator.Error
node.* = Node.Root{
.base = Node{ .id = .Root },
.decls = undefined,
// TODO: Because zig fmt collapses consecutive comments separated by blank lines into
// a single multi-line comment, it is currently impossible to have a container-level
// doc comment and NO doc comment on the first decl. For now, simply
// ignore the problem and assume that there will be no container-level
// doc comments.
// See: https://github.com/ziglang/zig/issues/2288
.doc_comments = null,
.eof_token = undefined,
};
node.decls = parseContainerMembers(arena, it, tree) catch |err| {
@ -94,6 +87,11 @@ fn parseContainerMembers(arena: *Allocator, it: *TokenIterator, tree: *Tree) !No
var list = Node.Root.DeclList.init(arena);
while (true) {
if (try parseContainerDocComments(arena, it, tree)) |node| {
try list.push(node);
continue;
}
const doc_comments = try parseDocComment(arena, it, tree);
if (try parseTestDecl(arena, it, tree)) |node| {
@ -155,12 +153,35 @@ fn parseContainerMembers(arena: *Allocator, it: *TokenIterator, tree: *Tree) !No
continue;
}
// Dangling doc comment
if (doc_comments != null) {
try tree.errors.push(AstError{
.UnattachedDocComment = AstError.UnattachedDocComment{ .token = doc_comments.?.firstToken() },
});
}
break;
}
return list;
}
/// Eat a multiline container doc comment
fn parseContainerDocComments(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
var lines = Node.DocComment.LineList.init(arena);
while (eatToken(it, .ContainerDocComment)) |line| {
try lines.push(line);
}
if (lines.len == 0) return null;
const node = try arena.create(Node.DocComment);
node.* = Node.DocComment{
.base = Node{ .id = .DocComment },
.lines = lines,
};
return &node.base;
}
/// TestDecl <- KEYWORD_test STRINGLITERAL Block
fn parseTestDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const test_token = eatToken(it, .Keyword_test) orelse return null;

View File

@ -2566,6 +2566,62 @@ test "zig fmt: comments at several places in struct init" {
);
}
test "zig fmt: top level doc comments" {
try testCanonical(
\\//! tld 1
\\//! tld 2
\\//! tld 3
\\
\\// comment
\\
\\/// A doc
\\const A = struct {
\\ //! A tld 1
\\ //! A tld 2
\\ //! A tld 3
\\};
\\
\\/// B doc
\\const B = struct {
\\ //! B tld 1
\\ //! B tld 2
\\ //! B tld 3
\\
\\ /// b doc
\\ b: u32,
\\};
\\
\\/// C doc
\\const C = struct {
\\ //! C tld 1
\\ //! C tld 2
\\ //! C tld 3
\\
\\ /// c1 doc
\\ c1: u32,
\\
\\ //! C tld 4
\\ //! C tld 5
\\ //! C tld 6
\\
\\ /// c2 doc
\\ c2: u32,
\\};
\\
);
try testCanonical(
\\//! Top-level documentation.
\\
\\/// This is A
\\pub const A = usize;
\\
);
try testCanonical(
\\//! Nothing here
\\
);
}
const std = @import("std");
const mem = std.mem;
const warn = std.debug.warn;

View File

@ -299,6 +299,17 @@ fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, i
assert(!decl.requireSemiColon());
try renderExpression(allocator, stream, tree, indent, start_col, decl, Space.Newline);
},
ast.Node.Id.DocComment => {
const comment = @fieldParentPtr(ast.Node.DocComment, "base", decl);
var it = comment.lines.iterator(0);
while (it.next()) |line_token_index| {
try renderToken(tree, stream, line_token_index.*, indent, start_col, Space.Newline);
if (it.peek()) |_| {
try stream.writeByteNTimes(' ', indent);
}
}
},
else => unreachable,
}
}

View File

@ -142,6 +142,7 @@ pub const Token = struct {
FloatLiteral,
LineComment,
DocComment,
ContainerDocComment,
BracketStarBracket,
BracketStarCBracket,
ShebangLine,
@ -211,6 +212,7 @@ pub const Token = struct {
.FloatLiteral => "FloatLiteral",
.LineComment => "LineComment",
.DocComment => "DocComment",
.ContainerDocComment => "ContainerDocComment",
.ShebangLine => "ShebangLine",
.Bang => "!",
@ -387,6 +389,7 @@ pub const Tokenizer = struct {
LineComment,
DocCommentStart,
DocComment,
ContainerDocComment,
Zero,
IntegerLiteral,
IntegerLiteralWithRadix,
@ -1076,6 +1079,10 @@ pub const Tokenizer = struct {
'/' => {
state = State.DocCommentStart;
},
'!' => {
result.id = Token.Id.ContainerDocComment;
state = State.ContainerDocComment;
},
'\n' => break,
else => {
state = State.LineComment;
@ -1096,7 +1103,7 @@ pub const Tokenizer = struct {
self.checkLiteralCharacter();
},
},
State.LineComment, State.DocComment => switch (c) {
State.LineComment, State.DocComment, State.ContainerDocComment => switch (c) {
'\n' => break,
else => self.checkLiteralCharacter(),
},
@ -1234,6 +1241,9 @@ pub const Tokenizer = struct {
State.DocComment, State.DocCommentStart => {
result.id = Token.Id.DocComment;
},
State.ContainerDocComment => {
result.id = Token.Id.ContainerDocComment;
},
State.NumberDot,
State.NumberDotHex,
@ -1601,6 +1611,8 @@ test "tokenizer - line comment and doc comment" {
testTokenize("/// a", [_]Token.Id{Token.Id.DocComment});
testTokenize("///", [_]Token.Id{Token.Id.DocComment});
testTokenize("////", [_]Token.Id{Token.Id.LineComment});
testTokenize("//!", [_]Token.Id{Token.Id.ContainerDocComment});
testTokenize("//!!", [_]Token.Id{Token.Id.ContainerDocComment});
}
test "tokenizer - line comment followed by identifier" {

View File

@ -174,7 +174,6 @@ pub fn translate(
tree.root_node.* = ast.Node.Root{
.base = ast.Node{ .id = ast.Node.Id.Root },
.decls = ast.Node.Root.DeclList.init(arena),
.doc_comments = null,
// initialized with the eof token at the end
.eof_token = undefined,
};

View File

@ -968,6 +968,7 @@ struct AstNodeContainerDecl {
AstNode *init_arg_expr; // enum(T), struct(endianness), or union(T), or union(enum(T))
ZigList<AstNode *> fields;
ZigList<AstNode *> decls;
Buf doc_comments;
ContainerKind kind;
ContainerLayout layout;

View File

@ -1088,6 +1088,7 @@ static void anal_dump_node(AnalDumpCtx *ctx, const AstNode *node) {
break;
case NodeTypeContainerDecl:
field_nodes = &node->data.container_decl.fields;
doc_comments_buf = &node->data.container_decl.doc_comments;
break;
default:
break;

View File

@ -493,6 +493,9 @@ static AstNode *ast_parse_root(ParseContext *pc) {
node->data.container_decl.layout = ContainerLayoutAuto;
node->data.container_decl.kind = ContainerKindStruct;
node->data.container_decl.is_root = true;
if (buf_len(&members.doc_comments) != 0) {
node->data.container_decl.doc_comments = members.doc_comments;
}
return node;
}
@ -514,6 +517,21 @@ static Token *ast_parse_doc_comments(ParseContext *pc, Buf *buf) {
return first_doc_token;
}
static void ast_parse_container_doc_comments(ParseContext *pc, Buf *buf) {
if (buf_len(buf) != 0 && peek_token(pc)->id == TokenIdContainerDocComment) {
buf_append_char(buf, '\n');
}
Token *doc_token = nullptr;
while ((doc_token = eat_token_if(pc, TokenIdContainerDocComment))) {
if (buf->list.length == 0) {
buf_resize(buf, 0);
}
// chops off '//!' but leaves '\n'
buf_append_mem(buf, buf_ptr(pc->buf) + doc_token->start_pos + 3,
doc_token->end_pos - doc_token->start_pos - 3);
}
}
// ContainerMembers
// <- TestDecl ContainerMembers
// / TopLevelComptime ContainerMembers
@ -523,7 +541,11 @@ static Token *ast_parse_doc_comments(ParseContext *pc, Buf *buf) {
// /
static AstNodeContainerDecl ast_parse_container_members(ParseContext *pc) {
AstNodeContainerDecl res = {};
Buf tld_doc_comment_buf = BUF_INIT;
buf_resize(&tld_doc_comment_buf, 0);
for (;;) {
ast_parse_container_doc_comments(pc, &tld_doc_comment_buf);
AstNode *test_decl = ast_parse_test_decl(pc);
if (test_decl != nullptr) {
res.decls.append(test_decl);
@ -566,7 +588,7 @@ static AstNodeContainerDecl ast_parse_container_members(ParseContext *pc) {
break;
}
res.doc_comments = tld_doc_comment_buf;
return res;
}
@ -2802,6 +2824,9 @@ static AstNode *ast_parse_container_decl_auto(ParseContext *pc) {
res->data.container_decl.fields = members.fields;
res->data.container_decl.decls = members.decls;
if (buf_len(&members.doc_comments) != 0) {
res->data.container_decl.doc_comments = members.doc_comments;
}
return res;
}

View File

@ -198,6 +198,7 @@ enum TokenizeState {
TokenizeStateSawSlash,
TokenizeStateSawSlash2,
TokenizeStateSawSlash3,
TokenizeStateSawSlashBang,
TokenizeStateSawBackslash,
TokenizeStateSawPercent,
TokenizeStateSawPlus,
@ -209,6 +210,7 @@ enum TokenizeState {
TokenizeStateSawBar,
TokenizeStateSawBarBar,
TokenizeStateDocComment,
TokenizeStateContainerDocComment,
TokenizeStateLineComment,
TokenizeStateLineString,
TokenizeStateLineStringEnd,
@ -938,6 +940,9 @@ void tokenize(Buf *buf, Tokenization *out) {
case '/':
t.state = TokenizeStateSawSlash3;
break;
case '!':
t.state = TokenizeStateSawSlashBang;
break;
case '\n':
cancel_token(&t);
t.state = TokenizeStateStart;
@ -965,6 +970,19 @@ void tokenize(Buf *buf, Tokenization *out) {
break;
}
break;
case TokenizeStateSawSlashBang:
switch (c) {
case '\n':
set_token_id(&t, t.cur_tok, TokenIdContainerDocComment);
end_token(&t);
t.state = TokenizeStateStart;
break;
default:
set_token_id(&t, t.cur_tok, TokenIdContainerDocComment);
t.state = TokenizeStateContainerDocComment;
break;
}
break;
case TokenizeStateSawBackslash:
switch (c) {
case '\\':
@ -1055,6 +1073,17 @@ void tokenize(Buf *buf, Tokenization *out) {
break;
}
break;
case TokenizeStateContainerDocComment:
switch (c) {
case '\n':
end_token(&t);
t.state = TokenizeStateStart;
break;
default:
// do nothing
break;
}
break;
case TokenizeStateSymbolFirstC:
switch (c) {
case '"':
@ -1545,6 +1574,7 @@ void tokenize(Buf *buf, Tokenization *out) {
case TokenizeStateSawBarBar:
case TokenizeStateLBracket:
case TokenizeStateDocComment:
case TokenizeStateContainerDocComment:
end_token(&t);
break;
case TokenizeStateSawDotDot:
@ -1559,6 +1589,7 @@ void tokenize(Buf *buf, Tokenization *out) {
case TokenizeStateLineComment:
case TokenizeStateSawSlash2:
case TokenizeStateSawSlash3:
case TokenizeStateSawSlashBang:
break;
}
if (t.state != TokenizeStateError) {
@ -1606,6 +1637,7 @@ const char * token_name(TokenId id) {
case TokenIdDash: return "-";
case TokenIdDivEq: return "/=";
case TokenIdDocComment: return "DocComment";
case TokenIdContainerDocComment: return "ContainerDocComment";
case TokenIdDot: return ".";
case TokenIdDotStar: return ".*";
case TokenIdEllipsis2: return "..";

View File

@ -43,6 +43,7 @@ enum TokenId {
TokenIdDash,
TokenIdDivEq,
TokenIdDocComment,
TokenIdContainerDocComment,
TokenIdDot,
TokenIdDotStar,
TokenIdEllipsis2,