cb.Run(node);
}
+bool DoLoadFile(const LocationRange& origin,
+ const BuildSettings* build_settings,
+ const SourceFile& name,
+ InputFile* file,
+ std::vector<Token>* tokens,
+ scoped_ptr<ParseNode>* root,
+ Err* err) {
+ // Do all of this stuff outside the lock. We should not give out file
+ // pointers until the read is complete.
+ if (g_scheduler->verbose_logging()) {
+ std::string logmsg = name.value();
+ if (origin.begin().file())
+ logmsg += " (referenced from " + origin.begin().Describe(false) + ")";
+ g_scheduler->Log("Loading", logmsg);
+ }
+
+ // Read.
+ base::FilePath primary_path = build_settings->GetFullPath(name);
+ ScopedTrace load_trace(TraceItem::TRACE_FILE_LOAD, name.value());
+ if (!file->Load(primary_path)) {
+ if (!build_settings->secondary_source_path().empty()) {
+ // Fall back to secondary source tree.
+ base::FilePath secondary_path =
+ build_settings->GetFullPathSecondary(name);
+ if (!file->Load(secondary_path)) {
+ *err = Err(origin, "Can't load input file.",
+ "Unable to load:\n " +
+ FilePathToUTF8(primary_path) + "\n"
+ "I also checked in the secondary tree for:\n " +
+ FilePathToUTF8(secondary_path));
+ return false;
+ }
+ } else {
+ *err = Err(origin,
+ "Unable to load \"" + FilePathToUTF8(primary_path) + "\".");
+ return false;
+ }
+ }
+ load_trace.Done();
+
+ ScopedTrace exec_trace(TraceItem::TRACE_FILE_PARSE, name.value());
+
+ // Tokenize.
+ *tokens = Tokenizer::Tokenize(file, err);
+ if (err->has_error())
+ return false;
+
+ // Parse.
+ *root = Parser::Parse(*tokens, err);
+ if (err->has_error())
+ return false;
+
+ exec_trace.Done();
+ return true;
+}
+
} // namespace
InputFileManager::InputFileData::InputFileData(const SourceFile& file_name)
// Should be single-threaded by now.
STLDeleteContainerPairSecondPointers(input_files_.begin(),
input_files_.end());
+ STLDeleteContainerPointers(dynamic_inputs_.begin(), dynamic_inputs_.end());
}
bool InputFileManager::AsyncLoadFile(const LocationRange& origin,
base::AutoUnlock unlock(lock_);
data->completion_event->Wait();
}
+ // If there were multiple waiters on the same event, we now need to wake
+ // up the next one.
+ data->completion_event->Signal();
}
}
- // The other load could have failed. In this case that error will be printed
- // to the console, but we need to return something here, so make up a
+ // The other load could have failed. In this case that error was probably
+ // printed to the console, but we need to return something here, so make up a
// dummy error.
- if (!data->parsed_root)
- *err = Err(origin, "File parse failed");
+ //
+ // There is a race condition. The other load could have failed, but if the
+ // other thread is delayed for some reason, this thread could end up
+ // reporting the error to the scheduler first (since first error report
+ // wins). The user will see this one and the "real" one will be discarded.
+ if (!data->parsed_root) {
+ *err = Err(origin, "File parse failed.",
+ "If you see this, I'm really sorry, but a race condition has caused\n"
+ "me to eat your error message. It was crunchy. If the parse error\n"
+ "in your imported file isn't obvious, try re-running GN.");
+ }
return data->parsed_root.get();
}
+void InputFileManager::AddDynamicInput(const SourceFile& name,
+ InputFile** file,
+ std::vector<Token>** tokens,
+ scoped_ptr<ParseNode>** parse_root) {
+ InputFileData* data = new InputFileData(name);
+ {
+ base::AutoLock lock(lock_);
+ dynamic_inputs_.push_back(data);
+ }
+ *file = &data->file;
+ *tokens = &data->tokens;
+ *parse_root = &data->parsed_root;
+}
+
int InputFileManager::GetInputFileCount() const {
base::AutoLock lock(lock_);
return static_cast<int>(input_files_.size());
const SourceFile& name,
InputFile* file,
Err* err) {
- // Do all of this stuff outside the lock. We should not give out file
- // pointers until the read is complete.
- if (g_scheduler->verbose_logging()) {
- std::string logmsg = name.value();
- if (origin.begin().file())
- logmsg += " (referenced from " + origin.begin().Describe(false) + ")";
- g_scheduler->Log("Loading", logmsg);
- }
-
- // Read.
- base::FilePath primary_path = build_settings->GetFullPath(name);
- ScopedTrace load_trace(TraceItem::TRACE_FILE_LOAD, name.value());
- if (!file->Load(primary_path)) {
- if (!build_settings->secondary_source_path().empty()) {
- // Fall back to secondary source tree.
- base::FilePath secondary_path =
- build_settings->GetFullPathSecondary(name);
- if (!file->Load(secondary_path)) {
- *err = Err(origin, "Can't load input file.",
- "Unable to load either \n" +
- FilePathToUTF8(primary_path) + " or \n" +
- FilePathToUTF8(secondary_path));
- return false;
- }
- } else {
- *err = Err(origin,
- "Unable to load \"" + FilePathToUTF8(primary_path) + "\".");
- return false;
- }
- }
- load_trace.Done();
-
- ScopedTrace exec_trace(TraceItem::TRACE_FILE_PARSE, name.value());
-
- // Tokenize.
- std::vector<Token> tokens = Tokenizer::Tokenize(file, err);
- if (err->has_error())
- return false;
-
- // Parse.
- scoped_ptr<ParseNode> root = Parser::Parse(tokens, err);
- if (err->has_error())
- return false;
+ std::vector<Token> tokens;
+ scoped_ptr<ParseNode> root;
+ bool success = DoLoadFile(origin, build_settings, name, file,
+ &tokens, &root, err);
+ // Can't return early. We have to ensure that the completion event is
+ // signaled in all cases bacause another thread could be blocked on this one.
+
+ // Save this pointer for running the callbacks below, which happens after the
+ // scoped ptr ownership is taken away inside the lock.
ParseNode* unowned_root = root.get();
- exec_trace.Done();
-
std::vector<FileLoadCallback> callbacks;
{
base::AutoLock lock(lock_);
InputFileData* data = input_files_[name];
data->loaded = true;
- data->tokens.swap(tokens);
- data->parsed_root = root.Pass();
+ if (success) {
+ data->tokens.swap(tokens);
+ data->parsed_root = root.Pass();
+ }
// Unblock waiters on this event.
//
// Run pending invocations. Theoretically we could schedule each of these
// separately to get some parallelism. But normally there will only be one
// item in the list, so that's extra overhead and complexity for no gain.
- for (size_t i = 0; i < callbacks.size(); i++)
- callbacks[i].Run(unowned_root);
- return true;
+ if (success) {
+ for (size_t i = 0; i < callbacks.size(); i++)
+ callbacks[i].Run(unowned_root);
+ }
+ return success;
}