#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
+#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
}
if (ready_nodes_->Empty()) {
- return Status(error::UNAVAILABLE, "No ready nodes in the graph.");
+ return errors::InvalidArgument("No ready nodes in the graph.");
}
- if (!feed_nodes.empty())
- LOG(ERROR) << "Some feed nodes were not found in the graph: "
- << str_util::Join(feed_nodes, ",");
-
+ if (!feed_nodes.empty()) {
+ return errors::InvalidArgument(
+ strings::StrCat("Some feed nodes were not found in the graph: ",
+ str_util::Join(feed_nodes, ",")));
+ }
initialized_ = true;
return Status::OK();
}
}
}
+ if (addn_list.empty()) {
+ return false;
+ }
+
GraphMemory memory(*item);
const std::unordered_map<string, DeviceProperties>& devices =
cluster->GetDevices();
VLOG(1) << "Missing properties for " << node->name();
continue;
}
+ const TensorShapeProto& shape =
+ properties.GetOutputProperties(node->name())[0].shape();
+ PartialTensorShape shp(shape);
+ if (!shp.IsFullyDefined()) {
+ VLOG(1) << "Shape not fully known for " << node->name();
+ continue;
+ }
// Compute a topological ordering for the node fanin.
std::unordered_map<NodeDef*, int> topo_order;
}
}
- const TensorShapeProto& shape =
- properties.GetOutputProperties(node->name())[0].shape();
DataType dtype = node->attr().at("T").type();
const string& device = node->device();
bool updated_graph = true;
for (int i = 0; i < 25 && updated_graph; ++i) {
updated_graph = false;
- if ((optimization_level_ == RewriterConfig::SCHEDULING_HEURISTICS ||
+ if ((optimization_level_ == RewriterConfig::DEFAULT_MEM_OPT ||
+ optimization_level_ == RewriterConfig::SCHEDULING_HEURISTICS ||
optimization_level_ == RewriterConfig::HEURISTICS) &&
cluster != nullptr) {
updated_graph |= SchedulingPass(cluster, &optimized_item);
optimizers.push_back(
std::unique_ptr<GraphOptimizer>(new LayoutOptimizer()));
}
- if (cfg_.memory_optimization() > 1) {
+ if (cfg_.memory_optimization() != RewriterConfig::NO_MEM_OPT) {
if (cfg_.memory_optimizer_target_node_name_prefix().empty()) {
optimizers.push_back(std::unique_ptr<GraphOptimizer>(
// Use the default target node name prefix "gradients/"
bool already_optimized = false;
for (const auto& optimizer : optimizers) {
if (!already_optimized) {
- auto status = optimizer->Optimize(cluster, item, optimized_graph);
+ Status status = optimizer->Optimize(cluster, item, optimized_graph);
string result;
if (!status.ok()) {
VLOG(1) << "Not able to apply optimizer " << optimizer->name()
<< " return status: " << result;
} else {
GrapplerItem optimized_item(item, std::move(*optimized_graph));
- auto status =
+ Status status =
optimizer->Optimize(cluster, optimized_item, optimized_graph);
string result;
if (!status.ok()) {
cfg.constant_folding() != RewriterConfig::OFF ||
cfg.dependency_optimization() != RewriterConfig::OFF ||
cfg.arithmetic_optimization() != RewriterConfig::OFF ||
- cfg.auto_parallel().enable() || cfg.memory_optimization() > 1 ||
+ cfg.auto_parallel().enable() ||
+ cfg.memory_optimization() != RewriterConfig::NO_MEM_OPT ||
!cfg.optimizers().empty();
}
// let's be conservative and preserve the graph as is.
return errors::InvalidArgument("Invalid input graph.");
}
- // Try to keep the nodes ordored somewhat topologically since this helps
+ // Try to keep the nodes ordered somewhat topologically since this helps
// further optimizations perform better.
for (int i = keep.size() - 1; i >= 0; --i) {
*runnable_item.graph.add_node() = *keep[i];
bool disable_model_pruning = 2;
enum MemOptType {
- // The default setting (currently disabled)
+ // The default setting (SCHEDULING_HEURISTICS only)
DEFAULT_MEM_OPT = 0;
// Disabled in the meta-optimizer.
NO_MEM_OPT = 1;