hdrs = ["service_interface.h"],
visibility = [":friends"],
deps = [
+ ":status",
":xla_data_proto",
":xla_proto",
- "//tensorflow/core:lib",
],
)
visibility = ["//visibility:public"],
deps = [
":protobuf_util",
+ ":status",
":status_macros",
":statusor",
":types",
ExecuteParallelResponse response;
VLOG(1) << "making execute-parallel request: " << request.ShortDebugString();
- tensorflow::Status s = stub_->ExecuteParallel(&request, &response);
+ Status s = stub_->ExecuteParallel(&request, &response);
VLOG(1) << "done with request";
if (!s.ok()) {
ExecuteParallelResponse response;
VLOG(1) << "making execute-graph-parallel request: "
<< request.ShortDebugString();
- tensorflow::Status s = stub_->ExecuteGraphParallel(&request, &response);
+ Status s = stub_->ExecuteGraphParallel(&request, &response);
VLOG(1) << "done with request";
if (!s.ok()) {
GetDeviceHandlesResponse response;
VLOG(1) << "making get device request: " << request.ShortDebugString();
- tensorflow::Status s = stub_->GetDeviceHandles(&request, &response);
+ Status s = stub_->GetDeviceHandles(&request, &response);
VLOG(1) << "done with request";
if (!s.ok()) {
*request.mutable_data() = handle_;
UnregisterResponse response;
VLOG(1) << "requesting to unregister " << handle_.ShortDebugString();
- tensorflow::Status s = parent_->Unregister(&request, &response);
+ Status s = parent_->Unregister(&request, &response);
VLOG(1) << "done with request";
if (!s.ok()) {
<< "Must have a valid device ordinal that the executable was built for.";
}
-tensorflow::Status LocalExecutable::ValidateExecutionOptions(
+Status LocalExecutable::ValidateExecutionOptions(
const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
const ExecutableRunOptions& run_options, const Backend& backend) {
const ComputationLayout& host_computation_layout =
return std::move(result);
}
-tensorflow::Status LocalExecutable::RecordArguments(
+Status LocalExecutable::RecordArguments(
const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
SessionModule* session_module) {
session_module->clear_arguments();
return Status::OK();
}
-tensorflow::Status LocalExecutable::RecordResult(
- const ShapedBuffer* result, SessionModule* session_module) {
+Status LocalExecutable::RecordResult(const ShapedBuffer* result,
+ SessionModule* session_module) {
session_module->clear_result();
TF_ASSIGN_OR_RETURN(std::unique_ptr<Literal> literal,
LiteralFromShapedBuffer(*result));
// Validates that the given arguments and options satisfy various constraints
// of the computation.
- tensorflow::Status ValidateExecutionOptions(
+ Status ValidateExecutionOptions(
const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
const ExecutableRunOptions& run_options, const Backend& backend);
// Records the arguments used to invoke the computation in a SessionModule
// proto.
- tensorflow::Status RecordArguments(
+ Status RecordArguments(
const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
SessionModule* session_module);
// Records the result of the computation in a SessionModule proto.
- tensorflow::Status RecordResult(const ShapedBuffer* result,
- SessionModule* session_module);
+ Status RecordResult(const ShapedBuffer* result,
+ SessionModule* session_module);
// Returns a literal containing the contents of the given ShapedBuffer.
StatusOr<std::unique_ptr<Literal>> LiteralFromShapedBuffer(
LayoutUtil::SetToDefaultLayout(program_shape->mutable_result());
}
-/* static */ tensorflow::Status LayoutUtil::ValidateLayoutInShape(
- const Shape& shape) {
+/* static */ Status LayoutUtil::ValidateLayoutInShape(const Shape& shape) {
if (ShapeUtil::IsTuple(shape)) {
// Tuple shape.
if (shape.has_layout()) {
for (auto& element_shape : shape.tuple_shapes()) {
TF_RETURN_IF_ERROR(ValidateLayoutInShape(element_shape));
}
- return tensorflow::Status::OK();
+ return Status::OK();
} else if (ShapeUtil::IsOpaque(shape)) {
if (shape.has_layout()) {
return InvalidArgument("opaque should not have a layout field");
}
- return tensorflow::Status::OK();
+ return Status::OK();
} else {
// Array shape.
if (!shape.has_layout()) {
}
}
-/* static */ tensorflow::Status LayoutUtil::ValidateLayoutForShape(
- const Layout& layout, const Shape& shape) {
+/* static */ Status LayoutUtil::ValidateLayoutForShape(const Layout& layout,
+ const Shape& shape) {
if (ShapeUtil::IsTuple(shape)) {
return InvalidArgument("a single Layout is not valid for tuple shapes");
}
if (ShapeUtil::IsOpaque(shape)) {
- return tensorflow::Status::OK();
+ return Status::OK();
}
if (layout.format() == INVALID_FORMAT) {
}
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
/* static */ void LayoutUtil::ClearLayout(Shape* shape) {
namespace {
// Internal helper for recursively copying layouts.
-tensorflow::Status CopyLayoutInternal(const Shape& src, Shape* dst) {
+Status CopyLayoutInternal(const Shape& src, Shape* dst) {
if (ShapeUtil::IsTuple(src) != ShapeUtil::IsTuple(*dst)) {
return InvalidArgument(
"cannot copy layout from shape: shape structure differs");
dst->clear_layout();
}
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
} // namespace
/* static */
-tensorflow::Status LayoutUtil::CopyLayoutBetweenShapes(const Shape& src,
- Shape* dst) {
+Status LayoutUtil::CopyLayoutBetweenShapes(const Shape& src, Shape* dst) {
return CopyLayoutInternal(src, dst);
}
#include <string>
+#include "tensorflow/compiler/xla/status.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
-#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
static void SetToDefaultLayout(ProgramShape* program_shape);
// Validates that the layout within the given shape is correct.
- static tensorflow::Status ValidateLayoutInShape(const Shape& shape);
+ static Status ValidateLayoutInShape(const Shape& shape);
// Validates that the provided layout satisfies invariants for the given
// shape.
- static tensorflow::Status ValidateLayoutForShape(const Layout& layout,
- const Shape& shape);
+ static Status ValidateLayoutForShape(const Layout& layout,
+ const Shape& shape);
// Clears the layout in the given Shape. After this function is called,
// HasLayout will return false for the shape.
// tuples. 'src' and 'dst' need not be compatible but the two shapes must
// have the same tuple structure (if any) and arrays must have the same
// rank. within the shapes must have the same number of dimensions.
- static tensorflow::Status CopyLayoutBetweenShapes(const Shape& src,
- Shape* dst);
+ static Status CopyLayoutBetweenShapes(const Shape& src, Shape* dst);
// Returns true if the layouts of lhs and rhs are equal, false
// otherwise. Recursively compares layouts of tuples.
return std::move(grpc_service);
}
-::grpc::Status DelegateRPC(std::function<tensorflow::Status()> op) {
- tensorflow::Status s = op();
+::grpc::Status DelegateRPC(std::function<Status()> op) {
+ Status s = op();
return tensorflow::ToGrpcStatus(s);
}
GRPCStub::~GRPCStub() = default;
-tensorflow::Status MakeRPC(
+Status MakeRPC(
const std::function<::grpc::Status(::grpc::ClientContext*)>& rpc_method) {
::grpc::ClientContext context;
::grpc::Status s = rpc_method(&context);
return tensorflow::FromGrpcStatus(s);
}
-tensorflow::Status GRPCStub::TransferToClient(
- const TransferToClientRequest* request,
- TransferToClientResponse* response) {
+Status GRPCStub::TransferToClient(const TransferToClientRequest* request,
+ TransferToClientResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->TransferToClient(context, *request, response);
});
}
-tensorflow::Status GRPCStub::TransferToServer(
- const TransferToServerRequest* request,
- TransferToServerResponse* response) {
+Status GRPCStub::TransferToServer(const TransferToServerRequest* request,
+ TransferToServerResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->TransferToServer(context, *request, response);
});
}
-tensorflow::Status GRPCStub::TransferToInfeed(
- const TransferToInfeedRequest* request,
- TransferToInfeedResponse* response) {
+Status GRPCStub::TransferToInfeed(const TransferToInfeedRequest* request,
+ TransferToInfeedResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->TransferToInfeed(context, *request, response);
});
}
-tensorflow::Status GRPCStub::TransferFromOutfeed(
- const TransferFromOutfeedRequest* request,
- TransferFromOutfeedResponse* response) {
+Status GRPCStub::TransferFromOutfeed(const TransferFromOutfeedRequest* request,
+ TransferFromOutfeedResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->TransferFromOutfeed(context, *request, response);
});
}
-tensorflow::Status GRPCStub::ResetDevice(const ResetDeviceRequest* request,
- ResetDeviceResponse* response) {
+Status GRPCStub::ResetDevice(const ResetDeviceRequest* request,
+ ResetDeviceResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->ResetDevice(context, *request, response);
});
}
-tensorflow::Status GRPCStub::LoadComputationSnapshot(
+Status GRPCStub::LoadComputationSnapshot(
const LoadComputationSnapshotRequest* request,
LoadComputationSnapshotResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
});
}
-tensorflow::Status GRPCStub::Execute(const ExecuteRequest* request,
- ExecuteResponse* response) {
+Status GRPCStub::Execute(const ExecuteRequest* request,
+ ExecuteResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->Execute(context, *request, response);
});
}
-tensorflow::Status GRPCStub::ExecuteGraph(const ExecuteGraphRequest* request,
- ExecuteResponse* response) {
+Status GRPCStub::ExecuteGraph(const ExecuteGraphRequest* request,
+ ExecuteResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->ExecuteGraph(context, *request, response);
});
}
-tensorflow::Status GRPCStub::ExecuteParallel(
- const ExecuteParallelRequest* request, ExecuteParallelResponse* response) {
+Status GRPCStub::ExecuteParallel(const ExecuteParallelRequest* request,
+ ExecuteParallelResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->ExecuteParallel(context, *request, response);
});
}
-tensorflow::Status GRPCStub::ExecuteGraphParallel(
+Status GRPCStub::ExecuteGraphParallel(
const ExecuteGraphParallelRequest* request,
ExecuteParallelResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
});
}
-tensorflow::Status GRPCStub::ExecuteAsync(const ExecuteAsyncRequest* request,
- ExecuteAsyncResponse* response) {
+Status GRPCStub::ExecuteAsync(const ExecuteAsyncRequest* request,
+ ExecuteAsyncResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->ExecuteAsync(context, *request, response);
});
}
-tensorflow::Status GRPCStub::WaitForExecution(
- const WaitForExecutionRequest* request,
- WaitForExecutionResponse* response) {
+Status GRPCStub::WaitForExecution(const WaitForExecutionRequest* request,
+ WaitForExecutionResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->WaitForExecution(context, *request, response);
});
}
-tensorflow::Status GRPCStub::DeconstructTuple(
- const DeconstructTupleRequest* request,
- DeconstructTupleResponse* response) {
+Status GRPCStub::DeconstructTuple(const DeconstructTupleRequest* request,
+ DeconstructTupleResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->DeconstructTuple(context, *request, response);
});
}
-tensorflow::Status GRPCStub::GetComputationStats(
- const ComputationStatsRequest* request,
- ComputationStatsResponse* response) {
+Status GRPCStub::GetComputationStats(const ComputationStatsRequest* request,
+ ComputationStatsResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->GetComputationStats(context, *request, response);
});
}
-tensorflow::Status GRPCStub::GetComputationGraphStats(
+Status GRPCStub::GetComputationGraphStats(
const ComputationGraphStatsRequest* request,
ComputationStatsResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
});
}
-tensorflow::Status GRPCStub::GetComputationShape(
- const GetComputationShapeRequest* request,
- GetComputationShapeResponse* response) {
+Status GRPCStub::GetComputationShape(const GetComputationShapeRequest* request,
+ GetComputationShapeResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->GetComputationShape(context, *request, response);
});
}
-tensorflow::Status GRPCStub::GetShape(const GetShapeRequest* request,
- GetShapeResponse* response) {
+Status GRPCStub::GetShape(const GetShapeRequest* request,
+ GetShapeResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->GetShape(context, *request, response);
});
}
-tensorflow::Status GRPCStub::GetDeviceHandles(
- const GetDeviceHandlesRequest* request,
- GetDeviceHandlesResponse* response) {
+Status GRPCStub::GetDeviceHandles(const GetDeviceHandlesRequest* request,
+ GetDeviceHandlesResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->GetDeviceHandles(context, *request, response);
});
}
-tensorflow::Status GRPCStub::CreateChannelHandle(
- const CreateChannelHandleRequest* request,
- CreateChannelHandleResponse* response) {
+Status GRPCStub::CreateChannelHandle(const CreateChannelHandleRequest* request,
+ CreateChannelHandleResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->CreateChannelHandle(context, *request, response);
});
}
// Methods used by ComputationBuilder.
-tensorflow::Status GRPCStub::Computation(const ComputationRequest* request,
- ComputationResponse* response) {
+Status GRPCStub::Computation(const ComputationRequest* request,
+ ComputationResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->Computation(context, *request, response);
});
}
-tensorflow::Status GRPCStub::Op(const OpRequest* request,
- OpResponse* response) {
+Status GRPCStub::Op(const OpRequest* request, OpResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->CreateOp(context, *request, response);
});
}
-tensorflow::Status GRPCStub::GetLocalShape(const GetLocalShapeRequest* request,
- GetLocalShapeResponse* response) {
+Status GRPCStub::GetLocalShape(const GetLocalShapeRequest* request,
+ GetLocalShapeResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->GetLocalShape(context, *request, response);
});
}
-tensorflow::Status GRPCStub::SetReturnValue(
- const SetReturnValueRequest* request, SetReturnValueResponse* responses) {
+Status GRPCStub::SetReturnValue(const SetReturnValueRequest* request,
+ SetReturnValueResponse* responses) {
return MakeRPC([this, request, responses](::grpc::ClientContext* context) {
return grpc_stub_->SetReturnValue(context, *request, responses);
});
}
-tensorflow::Status GRPCStub::IsConstant(const IsConstantRequest* request,
- IsConstantResponse* response) {
+Status GRPCStub::IsConstant(const IsConstantRequest* request,
+ IsConstantResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->IsConstant(context, *request, response);
});
}
-tensorflow::Status GRPCStub::ComputeConstant(
- const ComputeConstantRequest* request, ComputeConstantResponse* response) {
+Status GRPCStub::ComputeConstant(const ComputeConstantRequest* request,
+ ComputeConstantResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->ComputeConstant(context, *request, response);
});
}
-tensorflow::Status GRPCStub::ComputeConstantGraph(
+Status GRPCStub::ComputeConstantGraph(
const ComputeConstantGraphRequest* request,
ComputeConstantResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
}
// Methods used by Computation.
-tensorflow::Status GRPCStub::SnapshotComputation(
- const SnapshotComputationRequest* request,
- SnapshotComputationResponse* response) {
+Status GRPCStub::SnapshotComputation(const SnapshotComputationRequest* request,
+ SnapshotComputationResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->SnapshotComputation(context, *request, response);
});
}
// Methods used by GlobalData.
-tensorflow::Status GRPCStub::Unregister(const UnregisterRequest* request,
- UnregisterResponse* response) {
+Status GRPCStub::Unregister(const UnregisterRequest* request,
+ UnregisterResponse* response) {
return MakeRPC([this, request, response](::grpc::ClientContext* context) {
return grpc_stub_->Unregister(context, *request, response);
});
explicit GRPCStub(grpc::XlaService::Stub* stub) : grpc_stub_(stub) {}
~GRPCStub() override;
- tensorflow::Status TransferToClient(
- const TransferToClientRequest* arg,
- TransferToClientResponse* result) override;
+ Status TransferToClient(const TransferToClientRequest* arg,
+ TransferToClientResponse* result) override;
- tensorflow::Status TransferToServer(
- const TransferToServerRequest* arg,
- TransferToServerResponse* result) override;
+ Status TransferToServer(const TransferToServerRequest* arg,
+ TransferToServerResponse* result) override;
- tensorflow::Status TransferToInfeed(
- const TransferToInfeedRequest* arg,
- TransferToInfeedResponse* result) override;
+ Status TransferToInfeed(const TransferToInfeedRequest* arg,
+ TransferToInfeedResponse* result) override;
- tensorflow::Status TransferFromOutfeed(
- const TransferFromOutfeedRequest* arg,
- TransferFromOutfeedResponse* result) override;
+ Status TransferFromOutfeed(const TransferFromOutfeedRequest* arg,
+ TransferFromOutfeedResponse* result) override;
- tensorflow::Status ResetDevice(const ResetDeviceRequest* arg,
- ResetDeviceResponse* result) override;
+ Status ResetDevice(const ResetDeviceRequest* arg,
+ ResetDeviceResponse* result) override;
- tensorflow::Status LoadComputationSnapshot(
+ Status LoadComputationSnapshot(
const LoadComputationSnapshotRequest* request,
LoadComputationSnapshotResponse* result) override;
- tensorflow::Status Execute(const ExecuteRequest* arg,
- ExecuteResponse* result) override;
+ Status Execute(const ExecuteRequest* arg, ExecuteResponse* result) override;
- tensorflow::Status ExecuteGraph(const ExecuteGraphRequest* request,
- ExecuteResponse* response) override;
+ Status ExecuteGraph(const ExecuteGraphRequest* request,
+ ExecuteResponse* response) override;
- tensorflow::Status ExecuteParallel(const ExecuteParallelRequest* arg,
- ExecuteParallelResponse* result) override;
+ Status ExecuteParallel(const ExecuteParallelRequest* arg,
+ ExecuteParallelResponse* result) override;
- tensorflow::Status ExecuteGraphParallel(
- const ExecuteGraphParallelRequest* request,
- ExecuteParallelResponse* response) override;
+ Status ExecuteGraphParallel(const ExecuteGraphParallelRequest* request,
+ ExecuteParallelResponse* response) override;
- tensorflow::Status ExecuteAsync(const ExecuteAsyncRequest* arg,
- ExecuteAsyncResponse* result) override;
+ Status ExecuteAsync(const ExecuteAsyncRequest* arg,
+ ExecuteAsyncResponse* result) override;
- tensorflow::Status WaitForExecution(
- const WaitForExecutionRequest* arg,
- WaitForExecutionResponse* result) override;
+ Status WaitForExecution(const WaitForExecutionRequest* arg,
+ WaitForExecutionResponse* result) override;
- tensorflow::Status DeconstructTuple(
- const DeconstructTupleRequest* arg,
- DeconstructTupleResponse* result) override;
+ Status DeconstructTuple(const DeconstructTupleRequest* arg,
+ DeconstructTupleResponse* result) override;
- tensorflow::Status GetComputationStats(
- const ComputationStatsRequest* arg,
- ComputationStatsResponse* result) override;
+ Status GetComputationStats(const ComputationStatsRequest* arg,
+ ComputationStatsResponse* result) override;
- tensorflow::Status GetComputationGraphStats(
- const ComputationGraphStatsRequest* request,
- ComputationStatsResponse* response) override;
+ Status GetComputationGraphStats(const ComputationGraphStatsRequest* request,
+ ComputationStatsResponse* response) override;
- tensorflow::Status GetComputationShape(
- const GetComputationShapeRequest* arg,
- GetComputationShapeResponse* result) override;
+ Status GetComputationShape(const GetComputationShapeRequest* arg,
+ GetComputationShapeResponse* result) override;
- tensorflow::Status GetShape(const GetShapeRequest* arg,
- GetShapeResponse* result) override;
+ Status GetShape(const GetShapeRequest* arg,
+ GetShapeResponse* result) override;
- tensorflow::Status GetDeviceHandles(
- const GetDeviceHandlesRequest* arg,
- GetDeviceHandlesResponse* result) override;
+ Status GetDeviceHandles(const GetDeviceHandlesRequest* arg,
+ GetDeviceHandlesResponse* result) override;
- tensorflow::Status CreateChannelHandle(
- const CreateChannelHandleRequest* arg,
- CreateChannelHandleResponse* result) override;
+ Status CreateChannelHandle(const CreateChannelHandleRequest* arg,
+ CreateChannelHandleResponse* result) override;
// Methods used by ComputationBuilder.
- tensorflow::Status Computation(const ComputationRequest* arg,
- ComputationResponse* result) override;
+ Status Computation(const ComputationRequest* arg,
+ ComputationResponse* result) override;
- tensorflow::Status Op(const OpRequest* arg, OpResponse* result) override;
- tensorflow::Status GetLocalShape(const GetLocalShapeRequest* arg,
- GetLocalShapeResponse* result) override;
+ Status Op(const OpRequest* arg, OpResponse* result) override;
+ Status GetLocalShape(const GetLocalShapeRequest* arg,
+ GetLocalShapeResponse* result) override;
- tensorflow::Status SetReturnValue(const SetReturnValueRequest* arg,
- SetReturnValueResponse* results) override;
+ Status SetReturnValue(const SetReturnValueRequest* arg,
+ SetReturnValueResponse* results) override;
- tensorflow::Status IsConstant(const IsConstantRequest* arg,
- IsConstantResponse* result) override;
+ Status IsConstant(const IsConstantRequest* arg,
+ IsConstantResponse* result) override;
- tensorflow::Status ComputeConstant(const ComputeConstantRequest* arg,
- ComputeConstantResponse* result) override;
+ Status ComputeConstant(const ComputeConstantRequest* arg,
+ ComputeConstantResponse* result) override;
- tensorflow::Status ComputeConstantGraph(
- const ComputeConstantGraphRequest* arg,
- ComputeConstantResponse* result) override;
+ Status ComputeConstantGraph(const ComputeConstantGraphRequest* arg,
+ ComputeConstantResponse* result) override;
// Methods used by Computation.
- tensorflow::Status SnapshotComputation(
- const SnapshotComputationRequest* ag,
- SnapshotComputationResponse* result) override;
+ Status SnapshotComputation(const SnapshotComputationRequest* ag,
+ SnapshotComputationResponse* result) override;
// Methods used by GlobalData.
- tensorflow::Status Unregister(const UnregisterRequest* arg,
- UnregisterResponse* result) override;
+ Status Unregister(const UnregisterRequest* arg,
+ UnregisterResponse* result) override;
grpc::XlaService::Stub* service() { return grpc_stub_; }
return result;
}
-tensorflow::Status AllocationTracker::Unregister(const GlobalDataHandle& data) {
+Status AllocationTracker::Unregister(const GlobalDataHandle& data) {
tensorflow::mutex_lock lock(mutex_);
VLOG(2) << "Unregister("
<< "handle: " << data.handle() << ")";
for (auto& shaped_buffer : it->second) {
shaped_buffer.reset();
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
StatusOr<std::vector<GlobalDataHandle>> AllocationTracker::DeconstructTuple(
} else {
allocation.ref_count--;
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
} // namespace xla
return std::move(liveness);
}
-tensorflow::Status BufferLiveness::Analyze() {
+Status BufferLiveness::Analyze() {
TF_ASSIGN_OR_RETURN(points_to_analysis_, TuplePointsToAnalysis::Run(module_));
for (auto* computation : module_->computations()) {
if (computation->IsFusionComputation()) {
}
XLA_VLOG_LINES(3, ToString());
- return tensorflow::Status::OK();
+ return Status::OK();
}
string BufferLiveness::ToString() const {
// Perform buffer liveness analysis. This method must be called prior to
// MayInterfere or MaybeLiveOut.
- tensorflow::Status Analyze();
+ Status Analyze();
// Returns true if the live range of the buffer of 'a' is strictly before the
// live range of the buffer of 'b' (they do not overlap).
// Override Service methods that require or imply the existence of an
// execute backend. Note that this does not include TransferToClient, as
// computing constants produces global data that we may wish to transfer.
- tensorflow::Status Execute(const ExecuteRequest* arg,
- ExecuteResponse* result) override {
+ Status Execute(const ExecuteRequest* arg, ExecuteResponse* result) override {
return Unimplemented("CompileOnlyService does not support execution.");
}
- tensorflow::Status ExecuteParallel(const ExecuteParallelRequest* arg,
- ExecuteParallelResponse* result) override {
+ Status ExecuteParallel(const ExecuteParallelRequest* arg,
+ ExecuteParallelResponse* result) override {
return Unimplemented("CompileOnlyService does not support execution.");
}
- tensorflow::Status GetDeviceHandles(
- const GetDeviceHandlesRequest* arg,
- GetDeviceHandlesResponse* result) override {
+ Status GetDeviceHandles(const GetDeviceHandlesRequest* arg,
+ GetDeviceHandlesResponse* result) override {
return Unimplemented("CompileOnlyService does not support devices.");
}
- tensorflow::Status ExecuteAsync(const ExecuteAsyncRequest* arg,
- ExecuteAsyncResponse* result) override {
+ Status ExecuteAsync(const ExecuteAsyncRequest* arg,
+ ExecuteAsyncResponse* result) override {
return Unimplemented("CompileOnlyService does not support execution.");
}
- tensorflow::Status WaitForExecution(
- const WaitForExecutionRequest* arg,
- WaitForExecutionResponse* result) override {
+ Status WaitForExecution(const WaitForExecutionRequest* arg,
+ WaitForExecutionResponse* result) override {
return Unimplemented("CompileOnlyService does not support execution.");
}
- tensorflow::Status TransferToServer(
- const TransferToServerRequest* arg,
- TransferToServerResponse* result) override {
+ Status TransferToServer(const TransferToServerRequest* arg,
+ TransferToServerResponse* result) override {
return Unimplemented(
"CompileOnlyService does not support device data transfers.");
}
- tensorflow::Status TransferToInfeed(
- const TransferToInfeedRequest* arg,
- TransferToInfeedResponse* result) override {
+ Status TransferToInfeed(const TransferToInfeedRequest* arg,
+ TransferToInfeedResponse* result) override {
return Unimplemented(
"CompileOnlyService does not support device data transfers.");
}
- tensorflow::Status TransferFromOutfeed(
- const TransferFromOutfeedRequest* arg,
- TransferFromOutfeedResponse* result) override {
+ Status TransferFromOutfeed(const TransferFromOutfeedRequest* arg,
+ TransferFromOutfeedResponse* result) override {
return Unimplemented(
"CompileOnlyService does not support device data transfers.");
}
- tensorflow::Status ResetDevice(const ResetDeviceRequest* arg,
- ResetDeviceResponse* result) override {
+ Status ResetDevice(const ResetDeviceRequest* arg,
+ ResetDeviceResponse* result) override {
return Unimplemented("CompileOnlyService does not support devices.");
}
}
}
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
} // namespace cpu
} // namespace xla
hlo_module_config_(hlo_module_config),
target_machine_features_(target_machine_features) {}
-/* static */ tensorflow::Status DotOpEmitter::EmitDotOperation(
+/* static */ Status DotOpEmitter::EmitDotOperation(
const HloInstruction& dot, const llvm_ir::IrArray& target_array,
const llvm_ir::IrArray& lhs_array, const llvm_ir::IrArray& rhs_array,
const llvm_ir::IrArray* addend_array,
return true;
}
-tensorflow::Status DotOpEmitter::Emit() {
+Status DotOpEmitter::Emit() {
// The dot operation performs a sum of products over dimension 0 of the left
// hand side operand and dimension 1 of the right hand side operand.
//
// loop.
ir_builder_->SetInsertPoint(loop_nest.GetOuterLoopExitBasicBlock());
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status DotOpEmitter::EmitScalarDot() {
+Status DotOpEmitter::EmitScalarDot() {
// A scalar dot is just a scalar multiply.
llvm::Value* result;
llvm::Value* lhs_value =
result = ir_builder_->CreateFMul(lhs_value, rhs_value);
}
target_array_.EmitWriteArrayElement(/*index=*/{}, result, ir_builder_);
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status DotOpEmitter::EmitCallToRuntime() {
+Status DotOpEmitter::EmitCallToRuntime() {
// The signature of the Eigen runtime matmul function is:
//
// (void)(void* run_options, float* out, float* lhs, float* rhs,
ir_builder_->getInt64(mat_mult_dims.k),
ir_builder_->getInt32(transpose_lhs),
ir_builder_->getInt32(transpose_rhs)});
- return tensorflow::Status::OK();
+ return Status::OK();
}
DotOpEmitter::MatMultDims DotOpEmitter::GetMatMultDims() const {
// dimensions as the result, and the result is computed as `addend_array` +
// dot(`lhs_array`, `rhs_array`). A non-null `addend_array` is only supported
// for Matrix-vector products.
- static tensorflow::Status EmitDotOperation(
+ static Status EmitDotOperation(
const HloInstruction& dot, const llvm_ir::IrArray& target_array,
const llvm_ir::IrArray& lhs_array, const llvm_ir::IrArray& rhs_array,
const llvm_ir::IrArray* addend_array,
const TargetMachineFeatures& target_machine_features);
// Emits the IR to perform the dot operation.
- tensorflow::Status Emit();
+ Status Emit();
// Emits instructions to perform a scalar dot product (a multiply of the
// LHS and RHS) and store the results in the target.
- tensorflow::Status EmitScalarDot();
+ Status EmitScalarDot();
// Emit an LLVM IR implementation of the dot operation if we can. Returns
// true if an LLVM IR implementation was emitted.
bool EmitLlvmIrDotIfProfitable();
// Emits a call to the CPU runtime to perform the matrix multiply.
- tensorflow::Status EmitCallToRuntime();
+ Status EmitCallToRuntime();
// Emits a series of nested loops for iterating over an operand array in the
// dot operation. Loops are constructed in major to minor dimension layout
}
// Must be a nop for null pointers.
- virtual tensorflow::Status Deallocate(int device_ordinal,
- se::DeviceMemoryBase mem) = 0;
+ virtual Status Deallocate(int device_ordinal, se::DeviceMemoryBase mem) = 0;
// Return the platform that the allocator allocates memory on.
const se::Platform* platform() const { return platform_; }
// Pull in two-arg overload that sets retry_on_failure to true.
using DeviceMemoryAllocator::Allocate;
- tensorflow::Status Deallocate(int device_ordinal,
- se::DeviceMemoryBase mem) override;
+ Status Deallocate(int device_ordinal, se::DeviceMemoryBase mem) override;
bool AllowsAsynchronousDeallocation() const override;
}
}
-tensorflow::Status AsyncExecution::BlockUntilDone() const {
+Status AsyncExecution::BlockUntilDone() const {
for (auto& stream : streams_) {
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
ExecutionTracker::ExecutionTracker() : next_handle_(1) {}
return execution_handle;
}
-tensorflow::Status ExecutionTracker::Unregister(const ExecutionHandle& handle) {
+Status ExecutionTracker::Unregister(const ExecutionHandle& handle) {
tensorflow::mutex_lock lock(execution_mutex_);
auto it = handle_to_execution_.find(handle.handle());
if (it == handle_to_execution_.end()) {
handle.handle());
}
handle_to_execution_.erase(handle.handle());
- return tensorflow::Status::OK();
+ return Status::OK();
}
StatusOr<const AsyncExecution*> ExecutionTracker::Resolve(
AsyncExecution(Backend* backend, std::vector<Backend::StreamPtr> streams,
const ExecutionProfile& profile, GlobalDataHandle result);
- tensorflow::Status BlockUntilDone() const;
+ Status BlockUntilDone() const;
const GlobalDataHandle& result() const { return result_; }
GlobalDataHandle data);
// Unregisters the execution for the given handle.
- tensorflow::Status Unregister(const ExecutionHandle& handle);
+ Status Unregister(const ExecutionHandle& handle);
// Resolves the given ExecutionHandle to an AsyncExecution. Returns an
// error status if the given handle is not found, which means that the
}
}
-tensorflow::Status BufferAllocations::TearDown(
+Status BufferAllocations::TearDown(
const std::set<se::DeviceMemoryBase>& live_addresses) {
// Deallocate temporary buffers, taking care to try to deallocate all of them
// even if one of the deallocations fails.
// Tears down all buffers allocated by this object that are not in
// `live_addresses`.
- tensorflow::Status TearDown(
- const std::set<se::DeviceMemoryBase>& live_addresses);
+ Status TearDown(const std::set<se::DeviceMemoryBase>& live_addresses);
private:
BufferAllocations(BufferAllocation::Index buffer_count, int device_ordinal,
destination_buffer_(destination_buffer),
mem_size_(mem_size) {}
-tensorflow::Status HostToDeviceCopyThunk::ExecuteOnStream(
+Status HostToDeviceCopyThunk::ExecuteOnStream(
const BufferAllocations& buffer_allocations, se::Stream* stream) {
se::DeviceMemoryBase destination_data =
buffer_allocations.GetDeviceAddress(destination_buffer_);
stream->ThenMemcpy(&destination_data, source_address_, mem_size_);
- return tensorflow::Status::OK();
+ return Status::OK();
}
DeviceToDeviceCopyThunk::DeviceToDeviceCopyThunk(
destination_buffer_(destination_buffer),
mem_size_(mem_size) {}
-tensorflow::Status DeviceToDeviceCopyThunk::ExecuteOnStream(
+Status DeviceToDeviceCopyThunk::ExecuteOnStream(
const BufferAllocations& buffer_allocations, se::Stream* stream) {
se::DeviceMemoryBase destination_data =
buffer_allocations.GetDeviceAddress(destination_buffer_);
se::DeviceMemoryBase source_data =
buffer_allocations.GetDeviceAddress(source_buffer_);
stream->ThenMemcpy(&destination_data, source_data, mem_size_);
- return tensorflow::Status::OK();
+ return Status::OK();
}
} // namespace gpu
} // namespace xla
HostToDeviceCopyThunk(const HostToDeviceCopyThunk&) = delete;
HostToDeviceCopyThunk& operator=(const HostToDeviceCopyThunk&) = delete;
- tensorflow::Status ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) override;
+ Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) override;
private:
const void* source_address_;
DeviceToDeviceCopyThunk(const DeviceToDeviceCopyThunk&) = delete;
DeviceToDeviceCopyThunk& operator=(const DeviceToDeviceCopyThunk&) = delete;
- tensorflow::Status ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) override;
+ Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) override;
private:
const BufferAllocation::Slice source_buffer_;
input_shape_(input_shape),
output_shape_(output_shape) {}
-tensorflow::Status FftThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+Status FftThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) {
VLOG(3) << "FFT type: " << FftTypeToString(fft_type_);
VLOG(3) << "Input shape: " << ShapeUtil::HumanStringWithLayout(input_shape_);
VLOG(3) << "Output shape: "
LOG(FATAL) << "unsupported fft type";
}
if (launch_ok) {
- return tensorflow::Status::OK();
+ return Status::OK();
}
return InternalError("Unable to launch fft for thunk %p with type %s", this,
FftTypeToString(fft_type_).c_str());
FftThunk& operator=(const FftThunk&) = delete; // Cannot share fft_plan_
// Does the FFT for the thunk on "stream".
- tensorflow::Status ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) override;
+ Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) override;
private:
const se::fft::Type fft_type_;
body_thunk_sequence_(
MakeUnique<SequentialThunk>(std::move(*body_thunk_sequence), hlo)) {}
-tensorflow::Status ForThunk::Initialize(const GpuExecutable& executable,
- se::StreamExecutor* executor) {
+Status ForThunk::Initialize(const GpuExecutable& executable,
+ se::StreamExecutor* executor) {
TF_RETURN_IF_ERROR(body_thunk_sequence_->Initialize(executable, executor));
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status ForThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+Status ForThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) {
for (int64 i = 0; i < loop_limit_; ++i) {
// Invoke loop body thunk sequence.
TF_RETURN_IF_ERROR(
body_thunk_sequence_->ExecuteOnStream(buffer_allocations, stream));
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
} // namespace gpu
ForThunk(const ForThunk&) = delete;
ForThunk& operator=(const ForThunk&) = delete;
- tensorflow::Status Initialize(const GpuExecutable& executable,
- se::StreamExecutor* executor) override;
- tensorflow::Status ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) override;
+ Status Initialize(const GpuExecutable& executable,
+ se::StreamExecutor* executor) override;
+ Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) override;
private:
const int64 loop_limit_;
output_shape_(output_shape),
alpha_(alpha) {}
-tensorflow::Status GemmThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+Status GemmThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) {
VLOG(2) << "Executing a GemmThunk";
se::DeviceMemoryBase lhs_data =
if (!launch_ok) {
return InternalError("Unable to launch cuBLAS gemm on stream %p", stream);
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
} // namespace gpu
GemmThunk& operator=(const GemmThunk&) = delete;
// Does the gemm operation for the thunk on "stream", which must be non-null.
- tensorflow::Status ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) override;
+ Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) override;
// Returns true if we'll perform autotuning if run on the given stream. If
// so, we want the GPU to be quiescent during autotuning, so as not to
}
// Runs optimization passes on the given HLO module.
-tensorflow::Status OptimizeHloModule(HloModule* hlo_module,
- se::StreamExecutor* stream_exec,
- DeviceMemoryAllocator* device_allocator) {
+Status OptimizeHloModule(HloModule* hlo_module, se::StreamExecutor* stream_exec,
+ DeviceMemoryAllocator* device_allocator) {
{
HloPassPipeline pipeline("optimization");
pipeline.AddInvariantChecker<HloVerifier>();
TF_RETURN_IF_ERROR(fusion.Run(hlo_module).status());
}
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
// Modifies the given HLO module so that it will be accepted by IrEmitter.
// Unlike optimization passes, the passes are necessary for correctness.
-tensorflow::Status PrepareHloModuleForIrEmitting(HloModule* hlo_module) {
+Status PrepareHloModuleForIrEmitting(HloModule* hlo_module) {
// In some cases, we have to place the result of an instruction in a temporary
// buffer. For instance, the buffer that holds an external parameter is
// assumed immutable at this point, and should not be reused for output
kernel_name_(kernel_name),
unroll_factor_(unroll_factor) {}
-tensorflow::Status KernelThunk::Initialize(const GpuExecutable& executable,
- se::StreamExecutor* executor) {
+Status KernelThunk::Initialize(const GpuExecutable& executable,
+ se::StreamExecutor* executor) {
tensorflow::mutex_lock lock(mutex_);
if (!loader_spec_) {
loader_spec_.reset(new se::MultiKernelLoaderSpec(args_.size()));
}
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
void KernelThunk::SetLaunchDimensions(const LaunchDimensions& launch_dims) {
launch_dimensions_ = launch_dims;
}
-tensorflow::Status KernelThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+Status KernelThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) {
// Load the kernel.
se::StreamExecutor* executor = stream->parent();
LaunchDimensions launch_dimensions;
*kernel_args)) {
return InternalError("Unable to launch kernel %s", kernel_name_.c_str());
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
} // namespace gpu
int unroll_factor() const { return unroll_factor_; }
void SetLaunchDimensions(const LaunchDimensions& launch_dims);
- tensorflow::Status Initialize(const GpuExecutable& executable,
- se::StreamExecutor* executor) override;
+ Status Initialize(const GpuExecutable& executable,
+ se::StreamExecutor* executor) override;
// Executes the kernel for the thunk on "stream", which must be non-null.
- tensorflow::Status ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) override;
+ Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) override;
private:
// Buffers passed to the kernel as arguments.
// Since CUDA 9.0, all GPU versions are included in a single file
const char* unified_libdevice_filename = "libdevice.10.bc";
std::vector<string> unified_libdevice_files;
- const tensorflow::Status status =
- tensorflow::Env::Default()->GetMatchingPaths(
+ const Status status = tensorflow::Env::Default()->GetMatchingPaths(
tensorflow::io::JoinPath(libdevice_dir_path, unified_libdevice_filename),
&unified_libdevice_files);
if (status.ok() && unified_libdevice_files.size() == 1) {
}
// Links libdevice into the given module if the module needs libdevice.
-tensorflow::Status LinkLibdeviceIfNecessary(
- llvm::Module* module, std::pair<int, int> compute_capability,
- const string& libdevice_dir_path) {
+Status LinkLibdeviceIfNecessary(llvm::Module* module,
+ std::pair<int, int> compute_capability,
+ const string& libdevice_dir_path) {
if (!CouldNeedLibdevice(*module)) {
- return tensorflow::Status::OK();
+ return Status::OK();
}
llvm::Linker linker(*module);
return tensorflow::errors::Internal(tensorflow::strings::StrCat(
"Error linking libdevice from ", libdevice_path));
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
StatusOr<string> CompileModuleToPtx(llvm::Module* module,
const HloInstruction* hlo)
: Thunk(Kind::kSequential, hlo), thunks_(std::move(thunks)) {}
-tensorflow::Status SequentialThunk::Initialize(const GpuExecutable& executable,
- se::StreamExecutor* executor) {
+Status SequentialThunk::Initialize(const GpuExecutable& executable,
+ se::StreamExecutor* executor) {
for (auto& thunk : thunks_) {
TF_RETURN_IF_ERROR(thunk->Initialize(executable, executor));
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status SequentialThunk::ExecuteOnStream(
+Status SequentialThunk::ExecuteOnStream(
const BufferAllocations& buffer_allocations, se::Stream* stream) {
for (const auto& thunk : thunks_) {
TF_RETURN_IF_ERROR(thunk->ExecuteOnStream(buffer_allocations, stream));
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
} // namespace gpu
const std::vector<std::unique_ptr<Thunk>>& thunks() const { return thunks_; }
- tensorflow::Status Initialize(const GpuExecutable& executable,
- se::StreamExecutor* executor) override;
- tensorflow::Status ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) override;
+ Status Initialize(const GpuExecutable& executable,
+ se::StreamExecutor* executor) override;
+ Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) override;
private:
// The list of sub-thunks.
// This may be called multiple times. Its main purpose is to give us a chance
// to do initialization outside of ExecuteOnStream() so that the
// time spent initializing doesn't count towards our execution profile.
- virtual tensorflow::Status Initialize(const GpuExecutable& /*executable*/,
- se::StreamExecutor* /*executor*/) {
- return tensorflow::Status::OK();
+ virtual Status Initialize(const GpuExecutable& /*executable*/,
+ se::StreamExecutor* /*executor*/) {
+ return Status::OK();
}
// Users of Thunk should call ShouldHaltAllActivityBeforeRunning(stream)
// lifetime. Stream argument must be non-null.
//
// Precondition: Initialize(stream->parent()) has been called.
- virtual tensorflow::Status ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) = 0;
+ virtual Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) = 0;
private:
Kind kind_;
namespace xla {
namespace gpu {
-tensorflow::Status TupleThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+Status TupleThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) {
std::vector<void*> tuple_element_buffer_addresses;
for (BufferAllocation::Slice tuple_element_buffer : tuple_element_buffers_) {
tuple_element_buffer_addresses.push_back(
tuple_element_buffer_addresses.data(), dest_buffer_address.opaque(),
sizeof(void*) * tuple_element_buffer_addresses.size());
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
} // namespace gpu
TupleThunk(const TupleThunk&) = delete;
TupleThunk& operator=(const TupleThunk&) = delete;
- tensorflow::Status ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) override;
+ Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream) override;
private:
const std::vector<BufferAllocation::Slice> tuple_element_buffers_;
TF_RETURN_IF_ERROR(pair.second->Match(instruction->operand(pair.first),
tagged_instructions));
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
private:
// Attempts to match each ExprTree in 'expr_trees_'.
// Returns OK on the first successful match, error status otherwise.
- virtual tensorflow::Status Run() {
+ virtual Status Run() {
Status status;
for (const ExprTree& expr_tree : expr_trees_) {
status = MatchExprTree(expr_tree);
} else if (type == S64) {
*const_value = literal.GetFirstElement<int64>();
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
StatusOr<const HloInstruction*> GetTaggedInstruction(
gte_fusion_param0->name().c_str());
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
const HloComputation* computation_;
GetTaggedInstruction("loop_start", tagged_instructions));
TF_RETURN_IF_ERROR(ParseConstInteger(const_hlo, &loop_start_));
- return tensorflow::Status::OK();
+ return Status::OK();
}
const HloInstruction* while_hlo_;
}
}
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
const HloComputation* computation_;
reduce_precision->mantissa_bits()));
}
-Status ShapeVerifier::HandleInfeed(HloInstruction*) {
- return tensorflow::Status::OK();
-}
+Status ShapeVerifier::HandleInfeed(HloInstruction*) { return Status::OK(); }
Status ShapeVerifier::HandleOutfeed(HloInstruction* outfeed) {
// Outfeed has a separate shape field for the value which is outfed to the
}
Status ShapeVerifier::HandleHostCompute(HloInstruction*) {
- return tensorflow::Status::OK();
+ return Status::OK();
}
-Status ShapeVerifier::HandleRng(HloInstruction*) {
- return tensorflow::Status::OK();
-}
+Status ShapeVerifier::HandleRng(HloInstruction*) { return Status::OK(); }
Status ShapeVerifier::HandleReverse(HloInstruction* reverse) {
return CheckShape(
}
Status ShapeVerifier::HandleBitcast(HloInstruction* bitcast) {
- return tensorflow::Status::OK();
+ return Status::OK();
}
Status ShapeVerifier::HandleBroadcast(HloInstruction* broadcast) {
operand_shape.dimensions(operand_dimension))
<< broadcast->ToString() << " operand shape " << operand_shape;
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
Status ShapeVerifier::HandleReshape(HloInstruction* reshape) {
TF_RETURN_IF_ERROR(CheckShape(reshape, reshape->shape()));
TF_RET_CHECK(ShapeUtil::ElementsIn(reshape->shape()) ==
ShapeUtil::ElementsIn(reshape->operand(0)->shape()));
- return tensorflow::Status::OK();
+ return Status::OK();
}
Status ShapeVerifier::HandleTranspose(HloInstruction* transpose) {
}
Status ShapeVerifier::HandleParameter(HloInstruction* hlo) {
- return tensorflow::Status::OK();
+ return Status::OK();
}
-Status ShapeVerifier::HandleFusion(HloInstruction*) {
- return tensorflow::Status::OK();
-}
+Status ShapeVerifier::HandleFusion(HloInstruction*) { return Status::OK(); }
Status ShapeVerifier::HandleCall(HloInstruction* call) {
// The shape of kCall should match the shape of the computation it calls.
return CheckShape(call, call->to_apply()->ComputeProgramShape().result());
}
-Status ShapeVerifier::HandleCustomCall(HloInstruction*) {
- return tensorflow::Status::OK();
-}
+Status ShapeVerifier::HandleCustomCall(HloInstruction*) { return Status::OK(); }
Status ShapeVerifier::HandleSlice(HloInstruction* slice) {
return CheckShape(slice,
ShapeUtil::HumanString(instruction->shape()).c_str(),
instruction->ToString().c_str());
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
Status ShapeVerifier::CheckShape(const HloInstruction* instruction,
instr1->ToString().c_str(), instr1->channel_id(),
instr2->ToString().c_str(), instr2->channel_id());
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
string ComputationsToString(
}
}
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
// TODO(b/65423525): We'd like to check that all operands are distinct.
// This is currently disabled due to the invariant being violated by
// multi-output fusion.
- return tensorflow::Status::OK();
+ return Status::OK();
}
Status HloVerifier::CheckWhileInstruction(HloInstruction* instruction) {
"init: %s, body: %s",
init->ToString().c_str(), body_root->ToString().c_str());
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
Status HloVerifier::CheckElementwiseInstruction(HloInstruction* instruction) {
ShapeUtil::HumanString(operand_shape).c_str());
}
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
StatusOr<bool> HloVerifier::Run(HloModule* module) {
Status HandleBatchNormGrad(HloInstruction* batch_norm_grad) override;
Status HandleGather(HloInstruction* gather) override;
- Status FinishVisit(HloInstruction*) override {
- return tensorflow::Status::OK();
- }
+ Status FinishVisit(HloInstruction*) override { return Status::OK(); }
protected:
// Check the instruction's shape against the shape given by ShapeInference
/*device_allocator=*/nullptr)
.ConsumeValueOrDie();
- EXPECT_EQ(
- ::tensorflow::Status::OK(),
- backend()
- .compiler()
- ->RunBackend(std::move(module), backend().default_stream_executor(),
- /*device_allocator=*/nullptr)
- .status());
+ EXPECT_EQ(Status::OK(), backend()
+ .compiler()
+ ->RunBackend(std::move(module),
+ backend().default_stream_executor(),
+ /*device_allocator=*/nullptr)
+ .status());
}
// A GTE inside of a fusion node inherits the layout of its operand (which
Status FusedIrEmitter::FinishVisit(HloInstruction* root) {
fused_root_ = root;
- return tensorflow::Status::OK();
+ return Status::OK();
}
FusedIrEmitter::Generator FusedIrEmitter::GetRootGenerator() const {
LoopEmitter::LoopEmitter(const ElementGenerator& target_element_generator,
const IrArray& target_array,
llvm::IRBuilder<>* ir_builder)
- : body_emitter_([=](const llvm_ir::IrArray::Index array_index)
- -> ::tensorflow::Status {
+ : body_emitter_([=](const llvm_ir::IrArray::Index array_index) -> Status {
// Convert target_element_generator to a BodyEmitter.
TF_ASSIGN_OR_RETURN(llvm::Value * target_element,
target_element_generator(array_index));
target_array.EmitWriteArrayElement(array_index, target_element,
ir_builder);
- return tensorflow::Status::OK();
+ return Status::OK();
}),
shape_(target_array.GetShape()),
ir_builder_(ir_builder) {}
return {array_index};
}
-tensorflow::Status LoopEmitter::EmitLoop(tensorflow::StringPiece loop_name) {
+Status LoopEmitter::EmitLoop(tensorflow::StringPiece loop_name) {
for (const IrArray::Index& array_index :
EmitIndexAndSetExitBasicBlock(loop_name)) {
TF_RETURN_IF_ERROR(body_emitter_(array_index));
if (exit_bb_ != nullptr) {
ir_builder_->SetInsertPoint(exit_bb_);
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
} // namespace llvm_ir
// Emits a loop for every element in the given shape.
class LoopEmitter {
public:
- using BodyEmitter =
- std::function<tensorflow::Status(const IrArray::Index& index)>;
+ using BodyEmitter = std::function<Status(const IrArray::Index& index)>;
LoopEmitter(const BodyEmitter& body_emitter, const Shape& shape,
llvm::IRBuilder<>* ir_builder);
tensorflow::StringPiece loop_name);
// Emits a complete loop nest for every element in the given shape.
- tensorflow::Status EmitLoop(tensorflow::StringPiece loop_name = "");
+ Status EmitLoop(tensorflow::StringPiece loop_name = "");
protected:
// An IR emitter that generates the loop body.
// Records the arguments used to invoke a computation in a SessionModule
// proto.
-tensorflow::Status RecordArguments(
+Status RecordArguments(
const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
se::StreamExecutor* executor, TransferManager* transfer_manager,
SessionModule* module) {
transfer_manager->TransferLiteralFromDevice(executor, *argument));
*module->add_arguments() = literal->ToProto();
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
// Records the result of a computation in a SessionModule proto.
-tensorflow::Status RecordResult(const ShapedBuffer& result,
- se::StreamExecutor* executor,
- TransferManager* transfer_manager,
- SessionModule* module) {
+Status RecordResult(const ShapedBuffer& result, se::StreamExecutor* executor,
+ TransferManager* transfer_manager, SessionModule* module) {
module->clear_result();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<Literal> literal,
transfer_manager->TransferLiteralFromDevice(executor, result));
*module->mutable_result() = literal->ToProto();
- return tensorflow::Status::OK();
+ return Status::OK();
}
// Records the arguments used to invoke a computation in an HloSnapshot proto.
-tensorflow::Status RecordArguments(
+Status RecordArguments(
const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
se::StreamExecutor* executor, TransferManager* transfer_manager,
HloSnapshot* module) {
transfer_manager->TransferLiteralFromDevice(executor, *argument));
*module->add_arguments() = literal->ToProto();
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
// Records the result of a computation in a HloSnapshot proto.
-tensorflow::Status RecordResult(const ShapedBuffer& result,
- se::StreamExecutor* executor,
- TransferManager* transfer_manager,
- HloSnapshot* module) {
+Status RecordResult(const ShapedBuffer& result, se::StreamExecutor* executor,
+ TransferManager* transfer_manager, HloSnapshot* module) {
module->clear_result();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<Literal> literal,
transfer_manager->TransferLiteralFromDevice(executor, result));
*module->mutable_result() = literal->ToProto();
- return tensorflow::Status::OK();
+ return Status::OK();
}
} // namespace
}
}
-tensorflow::Status Service::Computation(const ComputationRequest* arg,
- ComputationResponse* result) {
+Status Service::Computation(const ComputationRequest* arg,
+ ComputationResponse* result) {
if (arg->name().empty()) {
return InvalidArgument("computation request needs a name");
}
VLOG(1) << Printf("Created new computation %s on service %p, name %s",
result->computation().ShortDebugString().c_str(), this,
arg->name().c_str());
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::CreateChannelHandle(
- const CreateChannelHandleRequest* arg,
- CreateChannelHandleResponse* result) {
+Status Service::CreateChannelHandle(const CreateChannelHandleRequest* arg,
+ CreateChannelHandleResponse* result) {
*result->mutable_channel() = channel_tracker_.NewChannel();
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::Unregister(const UnregisterRequest* arg,
- UnregisterResponse* result) {
+Status Service::Unregister(const UnregisterRequest* arg,
+ UnregisterResponse* result) {
return allocation_tracker_.Unregister(arg->data());
}
// Deconstructs a previously-allocated global handle.
-tensorflow::Status Service::DeconstructTuple(const DeconstructTupleRequest* arg,
- DeconstructTupleResponse* result) {
+Status Service::DeconstructTuple(const DeconstructTupleRequest* arg,
+ DeconstructTupleResponse* result) {
TF_ASSIGN_OR_RETURN(
std::vector<GlobalDataHandle> elements,
allocation_tracker_.DeconstructTuple(arg->tuple_handle()));
for (auto& element : elements) {
*result->add_element_handles() = element;
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::ValidateResultShapeWithLayout(
- const Shape& shape_with_layout, const Shape& result_shape) const {
+Status Service::ValidateResultShapeWithLayout(const Shape& shape_with_layout,
+ const Shape& result_shape) const {
if (!ShapeUtil::Compatible(shape_with_layout, result_shape)) {
return InvalidArgument(
"Shape used to set computation result layout %s is not compatible "
module->device_entry_computation_layout().result_shape(),
execute_backend_->transfer_manager()->HostShapeToDeviceShape(
module->host_entry_computation_layout().result_shape())));
- return tensorflow::Status::OK();
+ return Status::OK();
}
StatusOr<std::unique_ptr<Executable>> Service::BuildExecutable(
result_tag);
}
-tensorflow::Status Service::SetReturnValue(const SetReturnValueRequest* arg,
- SetReturnValueResponse* results) {
+Status Service::SetReturnValue(const SetReturnValueRequest* arg,
+ SetReturnValueResponse* results) {
TF_ASSIGN_OR_RETURN(UserComputation * computation,
computation_tracker_.Resolve(arg->computation()));
return computation->SetReturnValue(arg->operand());
return replicated_arguments;
}
-tensorflow::Status Service::ExecuteParallel(const ExecuteParallelRequest* arg,
- ExecuteParallelResponse* result) {
+Status Service::ExecuteParallel(const ExecuteParallelRequest* arg,
+ ExecuteParallelResponse* result) {
VLOG(1) << "running execute-parallel request: " << arg->ShortDebugString();
std::vector<std::vector<std::vector<const ShapedBuffer*>>> all_arguments;
}
VLOG(1) << "successfully completed 'execute-parallel' request";
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::ExecuteGraphParallel(
- const ExecuteGraphParallelRequest* arg, ExecuteParallelResponse* result) {
+Status Service::ExecuteGraphParallel(const ExecuteGraphParallelRequest* arg,
+ ExecuteParallelResponse* result) {
VLOG(1) << "running execute-graph-parallel request";
std::vector<std::vector<std::vector<const ShapedBuffer*>>> all_arguments;
}
VLOG(1) << "successfully completed 'execute-graph-parallel' request";
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::GetDeviceHandles(const GetDeviceHandlesRequest* arg,
- GetDeviceHandlesResponse* result) {
+Status Service::GetDeviceHandles(const GetDeviceHandlesRequest* arg,
+ GetDeviceHandlesResponse* result) {
const int64 available_device_count = execute_backend_->device_count();
const int64 replica_count = options_.number_of_replicas();
if (replica_count <= 0) {
*result->add_device_handles() = device_handle;
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::ExecuteOneToN(const ExecuteRequest* arg,
- ExecuteResponse* result) {
+Status Service::ExecuteOneToN(const ExecuteRequest* arg,
+ ExecuteResponse* result) {
ExecuteParallelRequest parallel_arg;
*parallel_arg.add_requests() = *arg;
ExecuteParallelResponse parallel_result;
return PickParallelResponse(parallel_result, result);
}
-tensorflow::Status Service::ExecuteOneToN(const ExecuteGraphRequest* arg,
- ExecuteResponse* result) {
+Status Service::ExecuteOneToN(const ExecuteGraphRequest* arg,
+ ExecuteResponse* result) {
ExecuteGraphParallelRequest parallel_arg;
*parallel_arg.add_requests() = *arg;
ExecuteParallelResponse parallel_result;
return PickParallelResponse(parallel_result, result);
}
-tensorflow::Status Service::PickParallelResponse(
+Status Service::PickParallelResponse(
const ExecuteParallelResponse& parallel_result, ExecuteResponse* result) {
// The "result device" selection is a bit hacky, but better than assuming it
// is device 0. We have b/76035356 for restructuring the client API to clean
return Status::OK();
}
-tensorflow::Status Service::Execute(const ExecuteRequest* arg,
- ExecuteResponse* result) {
+Status Service::Execute(const ExecuteRequest* arg, ExecuteResponse* result) {
VLOG(1) << "running execute request: " << arg->ShortDebugString();
TF_ASSIGN_OR_RETURN(UserComputation * user_computation,
}
VLOG(1) << "successfully completed 'execute' request";
- return tensorflow::Status::OK();
+ return Status::OK();
}
StatusOr<std::unique_ptr<Executable>> Service::BuildExecutable(
return std::move(executable);
}
-tensorflow::Status Service::ExecuteGraph(const ExecuteGraphRequest* arg,
- ExecuteResponse* result) {
+Status Service::ExecuteGraph(const ExecuteGraphRequest* arg,
+ ExecuteResponse* result) {
VLOG(1) << "running execute-graph request";
if (!arg->has_computation()) {
}
VLOG(1) << "successfully completed 'execute-graph' request";
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::ExecuteAsync(const ExecuteAsyncRequest* arg,
- ExecuteAsyncResponse* result) {
+Status Service::ExecuteAsync(const ExecuteAsyncRequest* arg,
+ ExecuteAsyncResponse* result) {
VLOG(1) << "running execute-async request: " << arg->ShortDebugString();
TF_ASSIGN_OR_RETURN(UserComputation * user_computation,
streams.clear();
VLOG(1) << "successfully completed 'execute-async' request";
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::WaitForExecution(const WaitForExecutionRequest* arg,
- WaitForExecutionResponse* result) {
+Status Service::WaitForExecution(const WaitForExecutionRequest* arg,
+ WaitForExecutionResponse* result) {
TF_ASSIGN_OR_RETURN(const auto execution,
execution_tracker_.Resolve(arg->execution()));
TF_RETURN_IF_ERROR(execution_tracker_.Unregister(arg->execution()));
VLOG(1) << "successfully completed 'wait-for-execution' request";
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::TransferToClient(const TransferToClientRequest* arg,
- TransferToClientResponse* result) {
+Status Service::TransferToClient(const TransferToClientRequest* arg,
+ TransferToClientResponse* result) {
TF_ASSIGN_OR_RETURN(const ShapedBuffer* shaped_buffer,
allocation_tracker_.ResolveForReplica(arg->data(), 0));
*result->mutable_literal() =
result_literal->Relayout(*return_shape)->ToProto();
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
namespace {
} // namespace
-tensorflow::Status Service::TransferToServer(const TransferToServerRequest* arg,
- TransferToServerResponse* result) {
+Status Service::TransferToServer(const TransferToServerRequest* arg,
+ TransferToServerResponse* result) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<Literal> literal,
Literal::CreateFromProto(arg->literal()));
const Shape& shape = literal->shape();
StrCat("TransferToServer literal of shape ",
ShapeUtil::HumanString(shape))));
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::TransferToInfeed(const TransferToInfeedRequest* arg,
- TransferToInfeedResponse* result) {
+Status Service::TransferToInfeed(const TransferToInfeedRequest* arg,
+ TransferToInfeedResponse* result) {
const int64 replica_count = options_.number_of_replicas();
if (arg->replica_id() < 0 || arg->replica_id() >= replica_count) {
return FailedPrecondition(
executor, *literal);
}
-tensorflow::Status Service::TransferFromOutfeed(
- const TransferFromOutfeedRequest* arg,
- TransferFromOutfeedResponse* result) {
+Status Service::TransferFromOutfeed(const TransferFromOutfeedRequest* arg,
+ TransferFromOutfeedResponse* result) {
const int64 replica_count = options_.number_of_replicas();
if (arg->replica_id() < 0 || arg->replica_id() >= replica_count) {
return FailedPrecondition(
execute_backend_->transfer_manager()->TransferLiteralFromOutfeed(
executor, arg->shape_with_layout(), &literal));
*result->mutable_literal() = literal.ToProto();
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::ResetDevice(const ResetDeviceRequest* arg,
- ResetDeviceResponse* result) {
+Status Service::ResetDevice(const ResetDeviceRequest* arg,
+ ResetDeviceResponse* result) {
return execute_backend_->ResetDevices();
}
-tensorflow::Status Service::IsConstant(const IsConstantRequest* arg,
- IsConstantResponse* result) {
+Status Service::IsConstant(const IsConstantRequest* arg,
+ IsConstantResponse* result) {
TF_ASSIGN_OR_RETURN(UserComputation * user_computation,
computation_tracker_.Resolve(arg->computation()));
user_computation->IsConstant(arg->operand(), arg->num_parameters()));
result->set_is_constant(is_constant);
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::ComputeConstant(const ComputeConstantRequest* arg,
- ComputeConstantResponse* result) {
+Status Service::ComputeConstant(const ComputeConstantRequest* arg,
+ ComputeConstantResponse* result) {
TF_ASSIGN_OR_RETURN(UserComputation * user_computation,
computation_tracker_.Resolve(arg->computation()));
}
*result->mutable_literal() = result_literal->ToProto();
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::ComputeConstantGraph(
- const ComputeConstantGraphRequest* arg, ComputeConstantResponse* result) {
+Status Service::ComputeConstantGraph(const ComputeConstantGraphRequest* arg,
+ ComputeConstantResponse* result) {
if (!arg->has_computation()) {
return InvalidArgument("computations may not be empty");
}
}
*result->mutable_literal() = result_literal->ToProto();
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::GetShape(const GetShapeRequest* arg,
- GetShapeResponse* result) {
+Status Service::GetShape(const GetShapeRequest* arg, GetShapeResponse* result) {
TF_ASSIGN_OR_RETURN(const ShapedBuffer* buffer,
allocation_tracker_.ResolveForReplica(arg->data(), 0));
*result->mutable_shape() = buffer->on_host_shape();
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::GetComputationShape(
- const GetComputationShapeRequest* arg,
- GetComputationShapeResponse* result) {
+Status Service::GetComputationShape(const GetComputationShapeRequest* arg,
+ GetComputationShapeResponse* result) {
TF_ASSIGN_OR_RETURN(UserComputation * computation,
computation_tracker_.Resolve(arg->computation()));
TF_ASSIGN_OR_RETURN(auto program_shape, computation->ComputeProgramShape(
versioned_handle.version));
*result->mutable_program_shape() = *program_shape;
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::GetLocalShape(const GetLocalShapeRequest* arg,
- GetLocalShapeResponse* result) {
+Status Service::GetLocalShape(const GetLocalShapeRequest* arg,
+ GetLocalShapeResponse* result) {
TF_ASSIGN_OR_RETURN(UserComputation * computation,
computation_tracker_.Resolve(arg->computation()));
TF_ASSIGN_OR_RETURN(*result->mutable_shape(),
computation->GetShape(arg->operand()));
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::GetComputationStats(
- const ComputationStatsRequest* arg, ComputationStatsResponse* result) {
+Status Service::GetComputationStats(const ComputationStatsRequest* arg,
+ ComputationStatsResponse* result) {
TF_ASSIGN_OR_RETURN(UserComputation * user_computation,
computation_tracker_.Resolve(arg->computation()));
stats.set_flop_count(analysis.flop_count());
stats.set_transcendental_count(analysis.transcendental_count());
*result->mutable_stats() = stats;
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::GetComputationGraphStats(
+Status Service::GetComputationGraphStats(
const ComputationGraphStatsRequest* arg, ComputationStatsResponse* result) {
if (!arg->has_computation()) {
return InvalidArgument("Computations may not be empty.");
stats.set_flop_count(analysis.flop_count());
stats.set_transcendental_count(analysis.transcendental_count());
*result->mutable_stats() = stats;
- return tensorflow::Status::OK();
+ return Status::OK();
}
template <typename RequestT, typename ResponseT>
-tensorflow::Status Service::AddInstruction(
+Status Service::AddInstruction(
const RequestT* arg, ResponseT* result,
const std::function<StatusOr<ComputationDataHandle>(UserComputation*)>&
adder) {
computation_tracker_.Resolve(arg->computation()));
TF_ASSIGN_OR_RETURN(*result->mutable_output(), adder(computation));
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::Op(const OpRequest* arg, OpResponse* result) {
+Status Service::Op(const OpRequest* arg, OpResponse* result) {
TF_ASSIGN_OR_RETURN(UserComputation * computation,
computation_tracker_.Resolve(arg->computation()));
StatusOr<ComputationDataHandle> handle_status;
if (arg->has_sharding()) {
TF_RETURN_IF_ERROR(computation->SetOpSharding(handle, arg->sharding()));
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::SnapshotComputation(
- const SnapshotComputationRequest* arg,
- SnapshotComputationResponse* result) {
+Status Service::SnapshotComputation(const SnapshotComputationRequest* arg,
+ SnapshotComputationResponse* result) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<SessionModule> module,
computation_tracker_.SnapshotComputation(arg->computation()));
result->set_allocated_module(module.release());
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status Service::LoadComputationSnapshot(
+Status Service::LoadComputationSnapshot(
const LoadComputationSnapshotRequest* arg,
LoadComputationSnapshotResponse* result) {
TF_ASSIGN_OR_RETURN(*result->mutable_computation(),
computation_tracker_.LoadSessionModule(arg->module()));
- return tensorflow::Status::OK();
+ return Status::OK();
}
DeviceHandle Service::SingleComputationDeviceHandle() const {
// Creates a new computation with the given name.
// A unique ComputationHandle is returned.
- tensorflow::Status Computation(const ComputationRequest* arg,
- ComputationResponse* result) override;
+ Status Computation(const ComputationRequest* arg,
+ ComputationResponse* result) override;
// Unregisters a previously-allocated global handle.
//
// If the handle given is not currently allocated, a NOT_FOUND status is
// returned.
- tensorflow::Status Unregister(const UnregisterRequest* arg,
- UnregisterResponse* result) override;
+ Status Unregister(const UnregisterRequest* arg,
+ UnregisterResponse* result) override;
// Deconstructs a tuple. Returns a newly created GlobalDataHandle for each
// element in the tuple.
- tensorflow::Status DeconstructTuple(
- const DeconstructTupleRequest* arg,
- DeconstructTupleResponse* result) override;
+ Status DeconstructTuple(const DeconstructTupleRequest* arg,
+ DeconstructTupleResponse* result) override;
// Modifies the provided computation so that subsequent executions
// will compute the provided ComputationDataHandle, rather than the
// last expression enqueued on that Computation.
- tensorflow::Status SetReturnValue(const SetReturnValueRequest* arg,
- SetReturnValueResponse* results) override;
+ Status SetReturnValue(const SetReturnValueRequest* arg,
+ SetReturnValueResponse* results) override;
// Executes a computation with the provided global data passed as
// immutable arguments. Returns global data output and execution timing.
- tensorflow::Status Execute(const ExecuteRequest* arg,
- ExecuteResponse* result) override;
+ Status Execute(const ExecuteRequest* arg, ExecuteResponse* result) override;
// Executes a computation with the provided global data passed as
// immutable arguments. The request contains the whole computation graph.
// Returns global data output and execution timing.
//
// TODO(b/74197823): This is a part of a NOT YET ready refactor.
- tensorflow::Status ExecuteGraph(const ExecuteGraphRequest* arg,
- ExecuteResponse* result) override;
+ Status ExecuteGraph(const ExecuteGraphRequest* arg,
+ ExecuteResponse* result) override;
// Executes one or more computations in parallel with the provided global data
// passed as immutable arguments. Returns global data output for each
// computation.
- tensorflow::Status ExecuteParallel(const ExecuteParallelRequest* arg,
- ExecuteParallelResponse* result) override;
+ Status ExecuteParallel(const ExecuteParallelRequest* arg,
+ ExecuteParallelResponse* result) override;
// Executes one or more computations in parallel with the provided global data
// passed as immutable arguments. Returns global data output for each
// computation.
//
// TODO(b/74197823): This is a part of a NOT YET ready refactor.
- tensorflow::Status ExecuteGraphParallel(
- const ExecuteGraphParallelRequest* arg,
- ExecuteParallelResponse* result) override;
+ Status ExecuteGraphParallel(const ExecuteGraphParallelRequest* arg,
+ ExecuteParallelResponse* result) override;
// Requests one or more device handles from the target.
//
// the first set of replicas, and the next R devices to the second set of
// replicas, etc. Each returned device handle represents the device with the
// replica id 0.
- tensorflow::Status GetDeviceHandles(
- const GetDeviceHandlesRequest* arg,
- GetDeviceHandlesResponse* result) override;
+ Status GetDeviceHandles(const GetDeviceHandlesRequest* arg,
+ GetDeviceHandlesResponse* result) override;
// Asynchronously executes a computation with provided arguments. Invokes
// the provided computation with the provided global data passed as
// (Note: The corresponding function in xla::Client was removed as part of
// b/64116060, in an attempt to simplify our API. We're keeping this around
// for now in case we want to expose this to clients in a different way.)
- tensorflow::Status ExecuteAsync(const ExecuteAsyncRequest* arg,
- ExecuteAsyncResponse* result) override;
+ Status ExecuteAsync(const ExecuteAsyncRequest* arg,
+ ExecuteAsyncResponse* result) override;
// Waits until the specified execution is complete and returns the result.
// Calling this API multiple times with the same execution handle returns the
// method with an error since the execution handle is destroyed after the
// first call.
- tensorflow::Status WaitForExecution(
- const WaitForExecutionRequest* arg,
- WaitForExecutionResponse* result) override;
+ Status WaitForExecution(const WaitForExecutionRequest* arg,
+ WaitForExecutionResponse* result) override;
// Requests that global data be transferred to the client in literal form.
- tensorflow::Status TransferToClient(
- const TransferToClientRequest* arg,
- TransferToClientResponse* result) override;
+ Status TransferToClient(const TransferToClientRequest* arg,
+ TransferToClientResponse* result) override;
// Transfers data from a literal provided by the client, into device memory.
- tensorflow::Status TransferToServer(
- const TransferToServerRequest* arg,
- TransferToServerResponse* result) override;
+ Status TransferToServer(const TransferToServerRequest* arg,
+ TransferToServerResponse* result) override;
// Transfers data from a literal provided by the client, into the Infeed
// buffer of the device.
- tensorflow::Status TransferToInfeed(
- const TransferToInfeedRequest* arg,
- TransferToInfeedResponse* result) override;
+ Status TransferToInfeed(const TransferToInfeedRequest* arg,
+ TransferToInfeedResponse* result) override;
// Transfers data from the Outfeed othe device to the literal provided by the
// client.
- tensorflow::Status TransferFromOutfeed(
- const TransferFromOutfeedRequest* arg,
- TransferFromOutfeedResponse* result) override;
+ Status TransferFromOutfeed(const TransferFromOutfeedRequest* arg,
+ TransferFromOutfeedResponse* result) override;
// Resets devices, clearing all existing state on all the devices associated
// with this service (including memory allocated on the devices).
// ResetDevice should be called before an Execution that expect the device to
// be in the reset state. For example, if the prior Execution modifies device
// state (e.g., architectural state) that the next Execution depends on.
- tensorflow::Status ResetDevice(const ResetDeviceRequest* arg,
- ResetDeviceResponse* result) override;
+ Status ResetDevice(const ResetDeviceRequest* arg,
+ ResetDeviceResponse* result) override;
// Tests if an expression is a compile-time constant.
- tensorflow::Status IsConstant(const IsConstantRequest* arg,
- IsConstantResponse* result) override;
+ Status IsConstant(const IsConstantRequest* arg,
+ IsConstantResponse* result) override;
// Computes the value of a constant expression.
- tensorflow::Status ComputeConstant(const ComputeConstantRequest* arg,
- ComputeConstantResponse* result) override;
- tensorflow::Status ComputeConstantGraph(
- const ComputeConstantGraphRequest* arg,
- ComputeConstantResponse* result) override;
+ Status ComputeConstant(const ComputeConstantRequest* arg,
+ ComputeConstantResponse* result) override;
+ Status ComputeConstantGraph(const ComputeConstantGraphRequest* arg,
+ ComputeConstantResponse* result) override;
// Returns the shape (with layout) of an array associated with a given data
// handle.
- tensorflow::Status GetShape(const GetShapeRequest* arg,
- GetShapeResponse* result) override;
+ Status GetShape(const GetShapeRequest* arg,
+ GetShapeResponse* result) override;
// Returns the program shape of the computation associated with the given
// handle.
- tensorflow::Status GetComputationShape(
- const GetComputationShapeRequest* arg,
- GetComputationShapeResponse* result) override;
+ Status GetComputationShape(const GetComputationShapeRequest* arg,
+ GetComputationShapeResponse* result) override;
/////
// Computation-oriented methods.
// Enqueues an Op on the computation.
- tensorflow::Status Op(const OpRequest* arg, OpResponse* result) override;
+ Status Op(const OpRequest* arg, OpResponse* result) override;
// Retrieves the inferred shape for a value within a computation.
- tensorflow::Status GetLocalShape(const GetLocalShapeRequest* arg,
- GetLocalShapeResponse* result) override;
+ Status GetLocalShape(const GetLocalShapeRequest* arg,
+ GetLocalShapeResponse* result) override;
// Retrieves the statistics of a computation.
- tensorflow::Status GetComputationStats(
- const ComputationStatsRequest* arg,
- ComputationStatsResponse* result) override;
+ Status GetComputationStats(const ComputationStatsRequest* arg,
+ ComputationStatsResponse* result) override;
// Retrieves the statistics of a computation.
//
// TODO(b/74197823): This is a part of a NOT YET ready refactor.
- tensorflow::Status GetComputationGraphStats(
- const ComputationGraphStatsRequest* arg,
- ComputationStatsResponse* result) override;
+ Status GetComputationGraphStats(const ComputationGraphStatsRequest* arg,
+ ComputationStatsResponse* result) override;
// Snapshots the current state of a computation handle into a serializable
// protocol buffer form, so it can be loaded via
// LoadComputationSnapshot.
- tensorflow::Status SnapshotComputation(
- const SnapshotComputationRequest* arg,
- SnapshotComputationResponse* result) override;
+ Status SnapshotComputation(const SnapshotComputationRequest* arg,
+ SnapshotComputationResponse* result) override;
// Loads a computation from a serialized protocol buffer created via
// SnapshotComputation.
- tensorflow::Status LoadComputationSnapshot(
+ Status LoadComputationSnapshot(
const LoadComputationSnapshotRequest* arg,
LoadComputationSnapshotResponse* result) override;
// Creates a unique channel handle that can be used for Send/Recv
// instructions.
- tensorflow::Status CreateChannelHandle(
- const CreateChannelHandleRequest* arg,
- CreateChannelHandleResponse* result) override;
+ Status CreateChannelHandle(const CreateChannelHandleRequest* arg,
+ CreateChannelHandleResponse* result) override;
// Returns the ComputationTracker of the current service instance.
// Only used in unit tests to access user computations from client.
// Convenience function for adding a function to a user computation.
template <typename RequestT, typename ResponseT>
- tensorflow::Status AddInstruction(
+ Status AddInstruction(
const RequestT* arg, ResponseT* result,
const std::function<StatusOr<ComputationDataHandle>(UserComputation*)>&
adder);
// Executes a single computation which has more than one target device.
// The N devices are expected to all return an empty tuple, but one, which
// will be the result of this computation.
- tensorflow::Status ExecuteOneToN(const ExecuteRequest* arg,
- ExecuteResponse* result);
- tensorflow::Status ExecuteOneToN(const ExecuteGraphRequest* arg,
- ExecuteResponse* result);
+ Status ExecuteOneToN(const ExecuteRequest* arg, ExecuteResponse* result);
+ Status ExecuteOneToN(const ExecuteGraphRequest* arg, ExecuteResponse* result);
// Convenience function which checks whether the given shape_with_layout
// (presumably passed by the client to set the result layout) is valid for the
// given computation result shape.
- tensorflow::Status ValidateResultShapeWithLayout(
- const Shape& shape_with_layout, const Shape& result_shape) const;
+ Status ValidateResultShapeWithLayout(const Shape& shape_with_layout,
+ const Shape& result_shape) const;
// Returns the stream executors assigned to the replicas represented by the
// given device handle. Each device_handle is a virtual replicated device that
return std::set<int64>(slice.begin(), slice.end()).size() == slice.size();
}
-tensorflow::Status ExpectNotTupleOrOpaque(const Shape& shape,
- tensorflow::StringPiece op_type) {
+Status ExpectNotTupleOrOpaque(const Shape& shape,
+ tensorflow::StringPiece op_type) {
if (ShapeUtil::IsTuple(shape)) {
return InvalidArgument("Expected non-tuple argument for %s, but got %s.",
std::string(op_type).c_str(),
std::string(op_type).c_str(),
ShapeUtil::HumanString(shape).c_str());
} else {
- return tensorflow::Status::OK();
+ return Status::OK();
}
}
-tensorflow::Status VerifyReducerShape(const ProgramShape& reducer_shape,
- const Shape& init_value_shape,
- const PrimitiveType& input_element_type) {
+Status VerifyReducerShape(const ProgramShape& reducer_shape,
+ const Shape& init_value_shape,
+ const PrimitiveType& input_element_type) {
if (reducer_shape.parameters_size() != 2) {
return InvalidArgument(
"Reduction function must take 2 parameters, but "
ShapeUtil::HumanString(accumulator_shape).c_str());
}
- return tensorflow::Status::OK();
+ return Status::OK();
}
StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
scale_shape, "scale input of batch norm training"));
TF_RET_CHECK(ShapeUtil::ValidateShapeWithOptionalLayout(operand_shape) ==
- tensorflow::Status::OK());
+ Status::OK());
TF_RET_CHECK(ShapeUtil::ValidateShapeWithOptionalLayout(offset_shape) ==
- tensorflow::Status::OK());
+ Status::OK());
TF_RET_CHECK(ShapeUtil::ValidateShapeWithOptionalLayout(scale_shape) ==
- tensorflow::Status::OK());
+ Status::OK());
if (feature_index >= ShapeUtil::Rank(operand_shape)) {
return InvalidArgument(
scale_shape, "scale input of batch norm inference"));
TF_RET_CHECK(ShapeUtil::ValidateShapeWithOptionalLayout(operand_shape) ==
- tensorflow::Status::OK());
+ Status::OK());
TF_RET_CHECK(ShapeUtil::ValidateShapeWithOptionalLayout(offset_shape) ==
- tensorflow::Status::OK());
+ Status::OK());
TF_RET_CHECK(ShapeUtil::ValidateShapeWithOptionalLayout(scale_shape) ==
- tensorflow::Status::OK());
+ Status::OK());
TF_RET_CHECK(ShapeUtil::ValidateShapeWithOptionalLayout(mean_shape) ==
- tensorflow::Status::OK());
+ Status::OK());
TF_RET_CHECK(ShapeUtil::ValidateShapeWithOptionalLayout(variance_shape) ==
- tensorflow::Status::OK());
+ Status::OK());
if (feature_index >= ShapeUtil::Rank(operand_shape)) {
return InvalidArgument(
std::make_pair(instruction, operand_indices));
}
}
- return tensorflow::Status::OK();
+ return Status::OK();
};
for (auto* comp : module->MakeNonfusionComputations()) {
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_INTERFACE_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_INTERFACE_H_
+#include "tensorflow/compiler/xla/status.h"
#include "tensorflow/compiler/xla/xla.pb.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
-#include "tensorflow/core/lib/core/status.h"
namespace xla {
virtual ~ServiceInterface() = default;
// TODO(b/31824348): Convert to use StatusOr.
- virtual tensorflow::Status TransferToClient(
- const TransferToClientRequest* arg, TransferToClientResponse* result) = 0;
+ virtual Status TransferToClient(const TransferToClientRequest* arg,
+ TransferToClientResponse* result) = 0;
- virtual tensorflow::Status TransferToServer(
- const TransferToServerRequest* arg, TransferToServerResponse* result) = 0;
+ virtual Status TransferToServer(const TransferToServerRequest* arg,
+ TransferToServerResponse* result) = 0;
- virtual tensorflow::Status TransferToInfeed(
- const TransferToInfeedRequest* arg, TransferToInfeedResponse* result) = 0;
+ virtual Status TransferToInfeed(const TransferToInfeedRequest* arg,
+ TransferToInfeedResponse* result) = 0;
- virtual tensorflow::Status TransferFromOutfeed(
- const TransferFromOutfeedRequest* arg,
- TransferFromOutfeedResponse* result) = 0;
+ virtual Status TransferFromOutfeed(const TransferFromOutfeedRequest* arg,
+ TransferFromOutfeedResponse* result) = 0;
- virtual tensorflow::Status ResetDevice(const ResetDeviceRequest* arg,
- ResetDeviceResponse* result) = 0;
+ virtual Status ResetDevice(const ResetDeviceRequest* arg,
+ ResetDeviceResponse* result) = 0;
- virtual tensorflow::Status LoadComputationSnapshot(
+ virtual Status LoadComputationSnapshot(
const LoadComputationSnapshotRequest* request,
LoadComputationSnapshotResponse* result) = 0;
- virtual tensorflow::Status Execute(const ExecuteRequest* arg,
- ExecuteResponse* result) = 0;
+ virtual Status Execute(const ExecuteRequest* arg,
+ ExecuteResponse* result) = 0;
- virtual tensorflow::Status ExecuteGraph(const ExecuteGraphRequest* arg,
- ExecuteResponse* result) = 0;
+ virtual Status ExecuteGraph(const ExecuteGraphRequest* arg,
+ ExecuteResponse* result) = 0;
- virtual tensorflow::Status ExecuteParallel(
- const ExecuteParallelRequest* arg, ExecuteParallelResponse* result) = 0;
+ virtual Status ExecuteParallel(const ExecuteParallelRequest* arg,
+ ExecuteParallelResponse* result) = 0;
- virtual tensorflow::Status ExecuteGraphParallel(
- const ExecuteGraphParallelRequest* arg,
- ExecuteParallelResponse* result) = 0;
+ virtual Status ExecuteGraphParallel(const ExecuteGraphParallelRequest* arg,
+ ExecuteParallelResponse* result) = 0;
- virtual tensorflow::Status ExecuteAsync(const ExecuteAsyncRequest* arg,
- ExecuteAsyncResponse* result) = 0;
+ virtual Status ExecuteAsync(const ExecuteAsyncRequest* arg,
+ ExecuteAsyncResponse* result) = 0;
- virtual tensorflow::Status WaitForExecution(
- const WaitForExecutionRequest* arg, WaitForExecutionResponse* result) = 0;
+ virtual Status WaitForExecution(const WaitForExecutionRequest* arg,
+ WaitForExecutionResponse* result) = 0;
- virtual tensorflow::Status DeconstructTuple(
- const DeconstructTupleRequest* arg, DeconstructTupleResponse* result) = 0;
+ virtual Status DeconstructTuple(const DeconstructTupleRequest* arg,
+ DeconstructTupleResponse* result) = 0;
- virtual tensorflow::Status GetComputationStats(
- const ComputationStatsRequest* arg, ComputationStatsResponse* result) = 0;
+ virtual Status GetComputationStats(const ComputationStatsRequest* arg,
+ ComputationStatsResponse* result) = 0;
- virtual tensorflow::Status GetComputationGraphStats(
+ virtual Status GetComputationGraphStats(
const ComputationGraphStatsRequest* arg,
ComputationStatsResponse* result) = 0;
- virtual tensorflow::Status GetComputationShape(
- const GetComputationShapeRequest* arg,
- GetComputationShapeResponse* result) = 0;
+ virtual Status GetComputationShape(const GetComputationShapeRequest* arg,
+ GetComputationShapeResponse* result) = 0;
- virtual tensorflow::Status GetShape(const GetShapeRequest* arg,
- GetShapeResponse* result) = 0;
+ virtual Status GetShape(const GetShapeRequest* arg,
+ GetShapeResponse* result) = 0;
- virtual tensorflow::Status CreateChannelHandle(
- const CreateChannelHandleRequest* arg,
- CreateChannelHandleResponse* result) = 0;
+ virtual Status CreateChannelHandle(const CreateChannelHandleRequest* arg,
+ CreateChannelHandleResponse* result) = 0;
- virtual tensorflow::Status GetDeviceHandles(
- const GetDeviceHandlesRequest* arg, GetDeviceHandlesResponse* result) = 0;
+ virtual Status GetDeviceHandles(const GetDeviceHandlesRequest* arg,
+ GetDeviceHandlesResponse* result) = 0;
// Methods used by ComputationBuilder.
- virtual tensorflow::Status Computation(const ComputationRequest* arg,
- ComputationResponse* result) = 0;
+ virtual Status Computation(const ComputationRequest* arg,
+ ComputationResponse* result) = 0;
- virtual tensorflow::Status Op(const OpRequest* arg, OpResponse* result) = 0;
+ virtual Status Op(const OpRequest* arg, OpResponse* result) = 0;
- virtual tensorflow::Status GetLocalShape(const GetLocalShapeRequest* arg,
- GetLocalShapeResponse* result) = 0;
+ virtual Status GetLocalShape(const GetLocalShapeRequest* arg,
+ GetLocalShapeResponse* result) = 0;
- virtual tensorflow::Status SetReturnValue(
- const SetReturnValueRequest* arg, SetReturnValueResponse* results) = 0;
+ virtual Status SetReturnValue(const SetReturnValueRequest* arg,
+ SetReturnValueResponse* results) = 0;
- virtual tensorflow::Status IsConstant(const IsConstantRequest* arg,
- IsConstantResponse* result) = 0;
+ virtual Status IsConstant(const IsConstantRequest* arg,
+ IsConstantResponse* result) = 0;
- virtual tensorflow::Status ComputeConstant(
- const ComputeConstantRequest* arg, ComputeConstantResponse* result) = 0;
+ virtual Status ComputeConstant(const ComputeConstantRequest* arg,
+ ComputeConstantResponse* result) = 0;
- virtual tensorflow::Status ComputeConstantGraph(
- const ComputeConstantGraphRequest* arg,
- ComputeConstantResponse* result) = 0;
+ virtual Status ComputeConstantGraph(const ComputeConstantGraphRequest* arg,
+ ComputeConstantResponse* result) = 0;
// Methods used by Computation.
- virtual tensorflow::Status SnapshotComputation(
- const SnapshotComputationRequest* ag,
- SnapshotComputationResponse* result) = 0;
+ virtual Status SnapshotComputation(const SnapshotComputationRequest* ag,
+ SnapshotComputationResponse* result) = 0;
// Methods used by GlobalData.
- virtual tensorflow::Status Unregister(const UnregisterRequest* arg,
- UnregisterResponse* result) = 0;
+ virtual Status Unregister(const UnregisterRequest* arg,
+ UnregisterResponse* result) = 0;
};
} // namespace xla
namespace xla {
-tensorflow::Status ShapeLayout::CopyLayoutFromShape(const Shape& other_shape) {
+Status ShapeLayout::CopyLayoutFromShape(const Shape& other_shape) {
if (!ShapeUtil::Compatible(other_shape, shape_)) {
return InvalidArgument("Shape %s is not compatible with shape %s",
ShapeUtil::HumanString(other_shape).c_str(),
ShapeUtil::HumanString(shape()).c_str());
}
shape_ = other_shape;
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status ShapeLayout::AssignLayoutToShape(Shape* to_shape) const {
+Status ShapeLayout::AssignLayoutToShape(Shape* to_shape) const {
if (!ShapeUtil::Compatible(*to_shape, shape_)) {
return InvalidArgument("Shape %s is not compatible with shape %s",
ShapeUtil::HumanString(*to_shape).c_str(),
ShapeUtil::HumanString(shape()).c_str());
}
*to_shape = shape_;
- return tensorflow::Status::OK();
+ return Status::OK();
}
void ShapeLayout::SetToDefaultLayout() {
// Assigns the layouts in this ShapeLayout to the Layout fields of the given
// shape. 'to_shape' and the shape of the ShapeLayout object must be
// compatible.
- tensorflow::Status AssignLayoutToShape(Shape* to_shape) const;
+ Status AssignLayoutToShape(Shape* to_shape) const;
// Returns true if the Layouts in this ShapeLayout match the layouts in the
// given shape. Returns false otherwise. If the given shape is not compatible
// Copies the layout from the given shape into this ShapeLayout. 'other_shape'
// must be compatible with the ShapeLayout's shape.
- tensorflow::Status CopyLayoutFromShape(const Shape& other_shape);
+ Status CopyLayoutFromShape(const Shape& other_shape);
// Clears (Layout::Clear) all the Layouts stored in this object.
void Clear();
namespace xla {
-using tensorflow::Status;
+using tensorflow::Status; // TENSORFLOW_STATUS_OK
} // namespace xla
EXPECT_EQ(&kI, thing.ValueOrDie());
}
-// NOTE(tucker): tensorflow::StatusOr does not support this kind
+// NOTE(tucker): StatusOr does not support this kind
// of resize op.
// TEST(StatusOr, StatusOrVectorOfUniquePointerCanResize) {
// using EvilType = std::vector<std::unique_ptr<int>>;
namespace testing {
namespace internal_status {
-inline const ::tensorflow::Status& GetStatus(
- const ::tensorflow::Status& status) {
- return status;
-}
+inline const Status& GetStatus(const Status& status) { return status; }
template <typename T>
-inline const ::tensorflow::Status& GetStatus(const StatusOr<T>& status) {
+inline const Status& GetStatus(const StatusOr<T>& status) {
return status.status();
}
} // namespace internal_status
// The following macros are similar to macros in gmock, but deliberately named
// differently in order to avoid conflicts in files which include both.
-// Macros for testing the results of functions that return tensorflow::Status or
+// Macros for testing the results of functions that return Status or
// StatusOr<T> (for any type T).
-#define EXPECT_IS_OK(expression) \
- EXPECT_EQ(tensorflow::Status::OK(), \
- xla::testing::internal_status::GetStatus(expression))
-#define EXPECT_IS_NOT_OK(expression) \
- EXPECT_NE(tensorflow::Status::OK(), \
- xla::testing::internal_status::GetStatus(expression))
+#define EXPECT_IS_OK(expression) \
+ EXPECT_EQ(Status::OK(), xla::testing::internal_status::GetStatus(expression))
+#define EXPECT_IS_NOT_OK(expression) \
+ EXPECT_NE(Status::OK(), xla::testing::internal_status::GetStatus(expression))
#undef ASSERT_IS_OK
-#define ASSERT_IS_OK(expression) \
- ASSERT_EQ(tensorflow::Status::OK(), \
- xla::testing::internal_status::GetStatus(expression))
+#define ASSERT_IS_OK(expression) \
+ ASSERT_EQ(Status::OK(), xla::testing::internal_status::GetStatus(expression))
#undef ASSERT_IS_NOT_OK
-#define ASSERT_IS_NOT_OK(expression) \
- ASSERT_NE(tensorflow::Status::OK(), \
- xla::testing::internal_status::GetStatus(expression))
+#define ASSERT_IS_NOT_OK(expression) \
+ ASSERT_NE(Status::OK(), xla::testing::internal_status::GetStatus(expression))
#endif // TENSORFLOW_COMPILER_XLA_TEST_HELPERS_H_
error, shape_with_layout));
}
-tensorflow::Status
-ClientLibraryTestBase::ComputeAndCompareLiteralWithAllOutputLayouts(
+Status ClientLibraryTestBase::ComputeAndCompareLiteralWithAllOutputLayouts(
const xla::XlaComputation& computation, const Literal& expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments,
const std::function<void(const Literal& actual,
"Test with output layout: ",
ShapeUtil::HumanStringWithLayout(layout)));
} while (std::next_permutation(minor_to_major.begin(), minor_to_major.end()));
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status
-ClientLibraryTestBase::ComputeAndCompareLiteralWithAllInputLayouts(
+Status ClientLibraryTestBase::ComputeAndCompareLiteralWithAllInputLayouts(
const xla::XlaComputation& computation, const Literal& /*expected*/,
tensorflow::gtl::ArraySlice<GlobalData*> arguments,
const std::function<void(const Literal& actual,
// This is a recursive function. It's an std::function instead of a lambda
// because it needs to capture itself. The index is the index of the argument
// to try all layouts for.
- std::function<tensorflow::Status(int64)> choose;
- choose = [&, this](int64 index) -> tensorflow::Status {
+ std::function<Status(int64)> choose;
+ choose = [&, this](int64 index) -> Status {
if (index < arguments.size()) {
// Try out all layouts for the operand.
TF_ASSIGN_OR_RETURN(auto literal,
TF_RETURN_IF_ERROR(choose(index + 1));
arguments_with_layout.pop_back();
layout_strings.pop_back();
- return tensorflow::Status::OK();
+ return Status::OK();
}
std::vector<int64> minor_to_major(ShapeUtil::Rank(literal->shape()));
layout_strings.pop_back();
} while (
std::next_permutation(minor_to_major.begin(), minor_to_major.end()));
- return tensorflow::Status::OK();
+ return Status::OK();
}
// Every argument has an assigned layout.
tensorflow::strings::StrAppend(&error_message, str, " ");
}
verify_output(*actual, error_message);
- return tensorflow::Status::OK();
+ return Status::OK();
};
return choose(0);
}
-tensorflow::Status ClientLibraryTestBase::ComputeAndCompareLiteralWithStatus(
+Status ClientLibraryTestBase::ComputeAndCompareLiteralWithStatus(
XlaBuilder* builder, const Literal& expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments_passed_in,
const Shape* shape_with_layout) {
TF_ASSIGN_OR_RETURN(auto actual, ExecuteAndTransfer(computation, arguments,
shape_with_layout));
EXPECT_TRUE(LiteralTestUtil::Equal(*expected_ptr, *actual));
- return tensorflow::Status::OK();
+ return Status::OK();
}
-tensorflow::Status ClientLibraryTestBase::ComputeAndCompareLiteralWithStatus(
+Status ClientLibraryTestBase::ComputeAndCompareLiteralWithStatus(
XlaBuilder* builder, const Literal& expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments_passed_in,
ErrorSpec error, const Shape* shape_with_layout) {
TF_ASSIGN_OR_RETURN(auto actual, ExecuteAndTransfer(computation, arguments,
shape_with_layout));
EXPECT_TRUE(LiteralTestUtil::Near(*expected_ptr, *actual, error));
- return tensorflow::Status::OK();
+ return Status::OK();
}
void ClientLibraryTestBase::ComputeAndCompareR1U8(
const Shape* shape_with_layout = nullptr);
// ComputeAndCompare variant which returns an error status.
- tensorflow::Status ComputeAndCompareLiteralWithStatus(
+ Status ComputeAndCompareLiteralWithStatus(
XlaBuilder* builder, const Literal& expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments,
const Shape* shape_with_layout = nullptr);
- tensorflow::Status ComputeAndCompareLiteralWithStatus(
+ Status ComputeAndCompareLiteralWithStatus(
XlaBuilder* builder, const Literal& expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments, ErrorSpec error,
const Shape* shape_with_layout = nullptr);
ExecutionOptions execution_options_;
private:
- tensorflow::Status ComputeAndCompareLiteralWithAllOutputLayouts(
+ Status ComputeAndCompareLiteralWithAllOutputLayouts(
const xla::XlaComputation& computation, const Literal& expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments,
const std::function<void(const Literal& actual,
const string& error_message)>& verify_output);
- tensorflow::Status ComputeAndCompareLiteralWithAllInputLayouts(
+ Status ComputeAndCompareLiteralWithAllInputLayouts(
const xla::XlaComputation& computation, const Literal& expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments,
const std::function<void(const Literal& actual,
retry_on_failure);
}
-tensorflow::Status TestAllocator::Deallocate(int device_ordinal,
- se::DeviceMemoryBase mem) {
+Status TestAllocator::Deallocate(int device_ordinal, se::DeviceMemoryBase mem) {
VLOG(2) << "Deallocate(" << device_ordinal << ")";
{
tensorflow::mutex_lock lock(count_mutex_);
StatusOr<OwningDeviceMemory> Allocate(int device_ordinal, uint64 size,
bool retry_on_failure) override;
- tensorflow::Status Deallocate(int device_ordinal,
- se::DeviceMemoryBase mem) override;
+ Status Deallocate(int device_ordinal, se::DeviceMemoryBase mem) override;
// Return the number of allocations that have been performed.
int64 allocation_count() const;
auto p = builder.Parameter(2, ShapeUtil::MakeShape(F32, {}), "param2");
auto computation_status = builder.Build();
- ASSERT_NE(computation_status.status(), tensorflow::Status::OK());
+ ASSERT_NE(computation_status.status(), Status::OK());
}
XLA_TEST_F(ParamsTest, UnusedParameter) {
namespace xla {
-/* static */ tensorflow::Status TextLiteralWriter::WriteToPath(
+/* static */ Status TextLiteralWriter::WriteToPath(
const Literal& literal, tensorflow::StringPiece path) {
std::unique_ptr<tensorflow::WritableFile> f;
auto s = tensorflow::Env::Default()->NewWritableFile(std::string(path), &f);
return s;
}
- tensorflow::Status status;
+ Status status;
tensorflow::WritableFile* f_ptr = f.get();
literal.EachCellAsString(
[f_ptr, &status](tensorflow::gtl::ArraySlice<int64> indices,
// This should be readable by xla::TextLiteralReader.
class TextLiteralWriter {
public:
- static tensorflow::Status WriteToPath(const Literal& literal,
- tensorflow::StringPiece path);
+ static Status WriteToPath(const Literal& literal,
+ tensorflow::StringPiece path);
private:
TF_DISALLOW_COPY_AND_ASSIGN(TextLiteralWriter);
TEST_F(HloParserTest, Empty) {
const string original = "";
auto result = Parse(original);
- EXPECT_NE(tensorflow::Status::OK(), result.status());
+ EXPECT_NE(Status::OK(), result.status());
}
TEST_F(HloParserTest, Garbage) {
const string original = "HloModule thi$ str1ng makes# N0 sen$e @all!*&^%$";
auto result = Parse(original);
- EXPECT_NE(tensorflow::Status::OK(), result.status());
+ EXPECT_NE(Status::OK(), result.status());
}
TEST_F(HloParserTest, WrongOpcode) {
)";
auto result = Parse(original);
- EXPECT_NE(tensorflow::Status::OK(), result.status());
+ EXPECT_NE(Status::OK(), result.status());
}
TEST_F(HloParserTest, WrongShape) {
)";
auto result = Parse(original);
- EXPECT_NE(tensorflow::Status::OK(), result.status());
+ EXPECT_NE(Status::OK(), result.status());
}
TEST_F(HloParserTest, WrongOperandsSize) {
)";
auto result = Parse(original);
- EXPECT_NE(tensorflow::Status::OK(), result.status());
+ EXPECT_NE(Status::OK(), result.status());
}
TEST_F(HloParserTest, OperandNotFound) {
}
)";
auto result = Parse(original);
- EXPECT_NE(tensorflow::Status::OK(), result.status());
+ EXPECT_NE(Status::OK(), result.status());
}
TEST_F(HloParserTest, MoreConstants) {
)";
auto result = Parse(original);
- EXPECT_NE(tensorflow::Status::OK(), result.status());
+ EXPECT_NE(Status::OK(), result.status());
ExpectHasSubstr(result.status().error_message(),
"expects nested array in rank 1, but sees larger");
}
)";
auto result = Parse(original);
- EXPECT_NE(tensorflow::Status::OK(), result.status());
+ EXPECT_NE(Status::OK(), result.status());
ExpectHasSubstr(result.status().error_message(),
"expects nested array in rank 2, but sees 1");
}
)";
auto result = Parse(original);
- EXPECT_NE(tensorflow::Status::OK(), result.status());
+ EXPECT_NE(Status::OK(), result.status());
ExpectHasSubstr(result.status().error_message(),
"expects 3 elements in the [0]th element");
}
)";
auto result = Parse(original);
- EXPECT_NE(tensorflow::Status::OK(), result.status());
+ EXPECT_NE(Status::OK(), result.status());
ExpectHasSubstr(result.status().error_message(),
"is out of range for literal's primitive type F16");
}