interpreter_platform,
stream_executor::interpreter::InitializeXlaInterpreterPlatform());
-DECLARE_MODULE_INITIALIZER(multi_platform_manager);
-
// Note that module initialization sequencing is not supported in the
// open-source project, so this will be a no-op there.
REGISTER_MODULE_INITIALIZER_SEQUENCE(interpreter_platform,
REGISTER_MODULE_INITIALIZER(cuda_platform,
stream_executor::InitializeCudaPlatform());
-DECLARE_MODULE_INITIALIZER(multi_platform_manager);
// Note that module initialization sequencing is not supported in the
// open-source project, so this will be a no-op there.
REGISTER_MODULE_INITIALIZER_SEQUENCE(cuda_platform, multi_platform_manager);
REGISTER_MODULE_INITIALIZER(host_platform,
stream_executor::host::InitializeHostPlatform());
-DECLARE_MODULE_INITIALIZER(multi_platform_manager);
// Note that module initialization sequencing is not supported in the
// open-source project, so this will be a no-op there.
REGISTER_MODULE_INITIALIZER_SEQUENCE(host_platform, multi_platform_manager);
#include <map>
#include <memory>
+#include "tensorflow/stream_executor/lib/initialize.h"
#include "tensorflow/stream_executor/lib/status.h"
#include "tensorflow/stream_executor/lib/statusor.h"
#include "tensorflow/stream_executor/platform.h"
} // namespace stream_executor
+// multi_platform_manager.cc will define this instance. Includers of this header
+// should use
+// REGISTER_MODULE_INITIALIZER_SEQUENCE(my_platform, multi_platform_manager);
+DECLARE_MODULE_INITIALIZER(multi_platform_manager);
+
#endif // TENSORFLOW_STREAM_EXECUTOR_MULTI_PLATFORM_MANAGER_H_