friend class ASTDeclWriter;
Stmt *Statement = nullptr;
+ bool IsSemiMissing = false;
TopLevelStmtDecl(DeclContext *DC, SourceLocation L, Stmt *S)
: Decl(TopLevelStmt, DC, L), Statement(S) {}
SourceRange getSourceRange() const override LLVM_READONLY;
Stmt *getStmt() { return Statement; }
const Stmt *getStmt() const { return Statement; }
+ void setStmt(Stmt *S) {
+ assert(IsSemiMissing && "Operation supported for printing values only!");
+ Statement = S;
+ }
+ bool isSemiMissing() const { return IsSemiMissing; }
+ void setSemiMissing(bool Missing = true) { IsSemiMissing = Missing; }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == TopLevelStmt; }
// into the name of a header unit.
ANNOTATION(header_unit)
+// Annotation for end of input in clang-repl.
+ANNOTATION(repl_input_end)
+
#undef PRAGMA_ANNOTATION
#undef ANNOTATION
#undef TESTING_KEYWORD
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
- Kind == tok::annot_module_end || Kind == tok::annot_module_include;
+ Kind == tok::annot_module_end || Kind == tok::annot_module_include ||
+ Kind == tok::annot_repl_input_end;
}
/// Checks if the \p Level is valid for use in a fold expression.
// them.
if (Tok.is(tok::eof) ||
(Tok.isAnnotation() && !Tok.is(tok::annot_header_unit) &&
- !Tok.is(tok::annot_module_begin) && !Tok.is(tok::annot_module_end)))
+ !Tok.is(tok::annot_module_begin) && !Tok.is(tok::annot_module_end) &&
+ !Tok.is(tok::annot_repl_input_end)))
return;
// EmittedDirectiveOnThisLine takes priority over RequireSameLine.
// -traditional-cpp the lexer keeps /all/ whitespace, including comments.
PP.Lex(Tok);
continue;
+ } else if (Tok.is(tok::annot_repl_input_end)) {
+ PP.Lex(Tok);
+ continue;
} else if (Tok.is(tok::eod)) {
// Don't print end of directive tokens, since they are typically newlines
// that mess up our line tracking. These come from unknown pre-processor
LastPTU.TUPart = C.getTranslationUnitDecl();
// Skip previous eof due to last incremental input.
- if (P->getCurToken().is(tok::eof)) {
- P->ConsumeToken();
+ if (P->getCurToken().is(tok::annot_repl_input_end)) {
+ P->ConsumeAnyToken();
// FIXME: Clang does not call ExitScope on finalizing the regular TU, we
// might want to do that around HandleEndOfTranslationUnit.
P->ExitScope();
Token Tok;
do {
PP.Lex(Tok);
- } while (Tok.isNot(tok::eof));
+ } while (Tok.isNot(tok::annot_repl_input_end));
+ } else {
+ Token AssertTok;
+ PP.Lex(AssertTok);
+ assert(AssertTok.is(tok::annot_repl_input_end) &&
+ "Lexer must be EOF when starting incremental parse!");
}
- Token AssertTok;
- PP.Lex(AssertTok);
- assert(AssertTok.is(tok::eof) &&
- "Lexer must be EOF when starting incremental parse!");
-
if (CodeGenerator *CG = getCodeGen(Act.get())) {
std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
CG->StartModule("incr_module_" + std::to_string(PTUs.size()),
return LeavingSubmodule;
}
}
-
// If this is the end of the main file, form an EOF token.
assert(CurLexer && "Got EOF but no current lexer set!");
const char *EndPos = getCurLexerEndPos();
Result.startToken();
CurLexer->BufferPtr = EndPos;
- CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
+
+ if (isIncrementalProcessingEnabled()) {
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::annot_repl_input_end);
+ Result.setAnnotationEndLoc(Result.getLocation());
+ Result.setAnnotationValue(nullptr);
+ } else {
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
+ }
if (isCodeCompletionEnabled()) {
// Inserting the code-completion point increases the source buffer by 1,
case tok::annot_module_begin:
case tok::annot_module_end:
case tok::annot_module_include:
+ case tok::annot_repl_input_end:
// Ran out of tokens.
return false;
case tok::annot_module_begin:
case tok::annot_module_end:
case tok::annot_module_include:
+ case tok::annot_repl_input_end:
// Ran out of tokens.
return false;
case tok::annot_module_begin:
case tok::annot_module_end:
case tok::annot_module_include:
+ case tok::annot_repl_input_end:
return;
default:
SmallVector<Decl *, 2> DeclsInGroup;
DeclsInGroup.push_back(Actions.ActOnTopLevelStmtDecl(R.get()));
+
+ if (Tok.is(tok::annot_repl_input_end) &&
+ Tok.getAnnotationValue() != nullptr) {
+ ConsumeAnnotationToken();
+ cast<TopLevelStmtDecl>(DeclsInGroup.back())->setSemiMissing();
+ }
+
// Currently happens for things like -fms-extensions and use `__if_exists`.
for (Stmt *S : Stmts)
DeclsInGroup.push_back(Actions.ActOnTopLevelStmtDecl(S));
return ParseCaseStatement(StmtCtx, /*MissingCase=*/true, Expr);
}
- // Otherwise, eat the semicolon.
- ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
- return handleExprStmt(Expr, StmtCtx);
+ Token *CurTok = nullptr;
+ // If the semicolon is missing at the end of REPL input, consider if
+ // we want to do value printing. Note this is only enabled in C++ mode
+ // since part of the implementation requires C++ language features.
+ // Note we shouldn't eat the token since the callback needs it.
+ if (Tok.is(tok::annot_repl_input_end) && Actions.getLangOpts().CPlusPlus)
+ CurTok = &Tok;
+ else
+ // Otherwise, eat the semicolon.
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
+
+ StmtResult R = handleExprStmt(Expr, StmtCtx);
+ if (CurTok && !R.isInvalid())
+ CurTok->setAnnotationValue(R.get());
+
+ return R;
}
/// ParseSEHTryBlockCommon
case tok::annot_module_begin:
case tok::annot_module_end:
case tok::annot_module_include:
+ case tok::annot_repl_input_end:
// Stop before we change submodules. They generally indicate a "good"
// place to pick up parsing again (except in the special case where
// we're trying to skip to EOF).
Sema::ModuleImportState &ImportState) {
DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(*this);
- // Skip over the EOF token, flagging end of previous input for incremental
- // processing
- if (PP.isIncrementalProcessingEnabled() && Tok.is(tok::eof))
- ConsumeToken();
-
Result = nullptr;
switch (Tok.getKind()) {
case tok::annot_pragma_unused:
return false;
case tok::eof:
+ case tok::annot_repl_input_end:
// Check whether -fmax-tokens= was reached.
if (PP.getMaxTokens() != 0 && PP.getTokenCount() > PP.getMaxTokens()) {
PP.Diag(Tok.getLocation(), diag::warn_max_tokens_total)