DOCKER_IMAGE_VERSION = 248
-# TODO Sort the config.yml upstream so the ubuntu configs are contiguous
CONFIG_HIERARCHY = [
+ (Ver("ubuntu", "14.04"), [
+ (Ver("gcc", "4.8"), ["py2"]),
+ (Ver("gcc", "4.9"), ["py2"]),
+ ]),
(Ver("ubuntu", "16.04"), [
+ (Ver("cuda", "8.0"), ["py2"]),
(Ver("cuda", "9.0"), [
+ # TODO make explicit that this is a "secret TensorRT build"
+ # (see https://github.com/pytorch/pytorch/pull/17323#discussion_r259446749)
"py2",
"cmake",
]),
(Ver("cuda", "9.1"), ["py2"]),
(Ver("mkl"), ["py2"]),
- ]),
- (Ver("ubuntu", "14.04"), [
- (Ver("gcc", "4.8"), ["py2"]),
- ]),
- (Ver("ubuntu", "16.04"), [
(Ver("gcc", "5"), ["onnx_py2"]),
- (Ver("cuda", "8.0"), ["py2"]),
- ]),
- (Ver("ubuntu", "14.04"), [
- (Ver("gcc", "4.9"), ["py2"]),
- ]),
- (Ver("ubuntu", "16.04"), [
(Ver("clang", "3.8"), ["py2"]),
(Ver("clang", "3.9"), ["py2"]),
(Ver("clang", "7"), ["py2"]),
(Ver("cuda", "9.0"), ["py2"]),
]),
(Ver("macos", "10.13"), [
+ # TODO ios and system aren't related. system qualifies where the python comes
+ # from (use the system python instead of homebrew or anaconda)
(Ver("ios"), ["py2"]),
(Ver("system"), ["py2"]),
]),
"android",
] or self.get_platform() == "macos"
+ # TODO: Eventually we can probably just remove the cudnn7 everywhere.
def get_cudnn_insertion(self):
omit = self.language == "onnx_py2" \
tuples.append(("BUILD_IOS", miniutils.quote("1")))
if self.phase == "test":
- use_cuda_docker = str(self.compiler) not in ["mkl", "gcc4.8", "gcc5"]
+ use_cuda_docker = self.compiler.name == "cuda"
if use_cuda_docker:
tuples.append(("USE_CUDA_DOCKER_RUNTIME", miniutils.quote("1")))
if not self.distro.name == "macos":
tuples.append(("BUILD_ONLY", miniutils.quote("1")))
+ # TODO: not sure we need the distinction between system and homebrew anymore. Our python handling in cmake
+ # and setuptools is more robust now than when we first had these.
if self.distro.name == "macos":
tuples.append(("PYTHON_INSTALLATION", miniutils.quote("system")))
tuples.append(("PYTHON_VERSION", miniutils.quote("2")))
])
if self.phase == "test":
- is_large = str(self.compiler) in ["mkl", "gcc4.8"] or self.language == "onnx_py2"
+ is_large = self.compiler.name != "cuda"
resource_class = "large" if is_large else "gpu.medium"
d["resource_class"] = resource_class
configs = gen_build_list()
# TODO Why don't we build this config?
+ # See https://github.com/pytorch/pytorch/pull/17323#discussion_r259450540
filtered_configs = filter(lambda x: not (str(x.distro) == "ubuntu14.04" and str(x.compiler) == "gcc4.9"), configs)
x = []
from collections import OrderedDict
-import cimodel.miniutils as miniutils
-import cimodel.dimensions as dimensions
import cimodel.conf_tree as conf_tree
-from cimodel.conf_tree import ConfigNode
+import cimodel.dimensions as dimensions
+import cimodel.miniutils as miniutils
import cimodel.visualization as visualization
+from cimodel.conf_tree import ConfigNode
DOCKER_IMAGE_PATH_BASE = "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/"
DOCKER_IMAGE_VERSION = 282
-class DockerHide(object):
- """
- Used for hiding name elements for construction of the Docker image path.
- Name elements that are wrapped in this object may be part of the build configuration name, but
- shall be excluded from the Docker path.
- """
- def __init__(self, val):
- self.val = val
-
- def __str__(self):
- return self.val
-
-
class Conf(object):
def __init__(self,
distro,
self.pyver = pyver
self.parms = parms
self.cuda_version = cuda_version
+
+ # TODO expand this to cover all the USE_* that we want to test for
+ # tesnrorrt, leveldb, lmdb, redis, opencv, mkldnn, ideep, etc.
+ # (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453608)
self.is_xla = is_xla
+
self.restrict_phases = restrict_phases
self.gpu_resource = gpu_resource
self.dependent_tests = dependent_tests or []
self.parent_build = parent_build
- def get_parms(self):
+ # TODO: Eliminate the special casing for docker paths
+ # In the short term, we *will* need to support special casing as docker images are merged for caffe2 and pytorch
+ def get_parms(self, for_docker):
leading = ["pytorch"]
- if self.is_xla:
- leading.append(DockerHide("xla"))
+ if self.is_xla and not for_docker:
+ leading.append("xla")
cuda_parms = []
if self.cuda_version:
cuda_parms.extend(["cuda" + self.cuda_version, "cudnn7"])
return leading + ["linux", self.distro] + cuda_parms + self.parms
- # TODO: Eliminate this special casing in docker paths
def gen_docker_image_path(self):
- build_env_pieces = list(map(str, filter(lambda x: type(x) is not DockerHide, self.get_parms())))
- base_build_env_name = "-".join(build_env_pieces)
+ parms_source = self.parent_build or self
+ base_build_env_name = "-".join(parms_source.get_parms(True))
return miniutils.quote(DOCKER_IMAGE_PATH_BASE + base_build_env_name + ":" + str(DOCKER_IMAGE_VERSION))
def get_build_job_name_pieces(self, build_or_test):
- return self.get_parms() + [build_or_test]
+ return self.get_parms(False) + [build_or_test]
def gen_build_name(self, build_or_test):
return ("_".join(map(str, self.get_build_job_name_pieces(build_or_test)))).replace(".", "_")
def get_dependents(self):
- return self.dependent_tests
+ return self.dependent_tests or []
def gen_yaml_tree(self, build_or_test):
if self.is_xla or phase == "test":
val = OrderedDict()
if self.is_xla:
+ # this makes the job run on merges rather than new PRs
+ # TODO Many of the binary build jobs on PRs could be moved to this mode as well
val["filters"] = {"branches": {"only": ["master"]}}
+ # TODO When merging the caffe2 and pytorch jobs, it might be convenient for a while to make a
+ # caffe2 test job dependent on a pytorch build job. This way we could quickly dedup the repeated
+ # build of pytorch in the caffe2 build job, and just run the caffe2 tests off of a completed
+ # pytorch build job (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259452641)
if phase == "test":
dependency_build = self.parent_build or self
val["requires"] = [dependency_build.gen_build_name("build")]
return self.name
-xenial_parent_config = Conf(
- "xenial",
- ["py3"],
- pyver="3.6",
- cuda_version="8",
- gpu_resource="medium",
-)
-
-
-# TODO This is a short-term hack until it is converted to recursive tree traversal
-xenial_dependent_configs = [
- Conf("xenial",
- ["py3", DockerHide("multigpu")],
- pyver="3.6",
- cuda_version="8",
- restrict_phases=["test"],
- gpu_resource="large",
- parent_build=xenial_parent_config,
- ),
- Conf("xenial",
- ["py3", DockerHide("NO_AVX2")],
- pyver="3.6",
- cuda_version="8",
- restrict_phases=["test"],
- gpu_resource="medium",
- parent_build=xenial_parent_config,
- ),
- Conf("xenial",
- ["py3", DockerHide("NO_AVX"), DockerHide("NO_AVX2")],
- pyver="3.6",
- cuda_version="8",
- restrict_phases=["test"],
- gpu_resource="medium",
- parent_build=xenial_parent_config,
- ),
-
- HiddenConf("pytorch_short_perf_test_gpu", parent_build=xenial_parent_config),
- HiddenConf("pytorch_doc_push", parent_build=xenial_parent_config),
-]
+# TODO Convert these to graph nodes
+def gen_dependent_configs(xenial_parent_config):
+
+ extra_parms = [
+ (["multigpu"], "large"),
+ (["NO_AVX2"], "medium"),
+ (["NO_AVX", "NO_AVX2"], "medium"),
+ ]
+ configs = []
+ for parms, gpu in extra_parms:
-xenial_parent_config.dependent_tests = xenial_dependent_configs
+ c = Conf(
+ "xenial",
+ ["py3"] + parms,
+ pyver="3.6",
+ cuda_version="8",
+ restrict_phases=["test"],
+ gpu_resource=gpu,
+ parent_build=xenial_parent_config,
+ )
+ configs.append(c)
-# TODO this hierarchy is a work in progress
+ for x in ["pytorch_short_perf_test_gpu", "pytorch_doc_push"]:
+ configs.append(HiddenConf(x, parent_build=xenial_parent_config))
+
+ return configs
+
+
+# TODO make the schema consistent between "trusty" and "xenial"
CONFIG_TREE_DATA = [
("trusty", [
- ("py2.7.9", []),
- ("py2.7", []),
- ("py3.5", []),
- ("py3.6", [
+ ("2.7.9", []),
+ ("2.7", []),
+ ("3.5", []),
+ ("3.6", [
("gcc4.8", []),
("gcc5.4", [False, True]),
("gcc7", []),
]),
- ("pynightly", []),
+ ("nightly", []),
]),
("xenial", [
("clang", [
- ("X", [("py3", [])]),
+ ("5", [("3.6", [])]),
]),
("cuda", [
("8", [("3.6", [])]),
("9", [
+ # Note there are magic strings here
+ # https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L21
+ # and
+ # https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L143
+ # and
+ # https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L153
+ # (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453144)
("2.7", []),
("3.6", []),
]),
]
+def get_major_pyver(dotted_version):
+ parts = dotted_version.split(".")
+ return "py" + parts[0]
+
+
def get_root():
- return TopLevelNode("Pytorch Builds", CONFIG_TREE_DATA)
+ return TopLevelNode("PyTorch Builds", CONFIG_TREE_DATA)
def gen_tree():
class DistroConfigNode(ConfigNode):
- def __init__(self, parent, distro_name, py_tree):
+ def __init__(self, parent, distro_name, subtree):
super(DistroConfigNode, self).__init__(parent, distro_name)
- self.py_tree = py_tree
+ self.subtree = subtree
self.props["distro_name"] = distro_name
def get_children(self):
- return [PyVerConfigNode(self, k, v) for k, v in self.py_tree]
+
+ if self.find_prop("distro_name") == "trusty":
+ return [PyVerConfigNode(self, k, v) for k, v in self.subtree]
+ else:
+ return [XenialCompilerConfigNode(self, v, subtree) for (v, subtree) in self.subtree]
class PyVerConfigNode(ConfigNode):
- def __init__(self, parent, pyver, compiler_tree):
+ def __init__(self, parent, pyver, subtree):
super(PyVerConfigNode, self).__init__(parent, pyver)
- self.compiler_tree = compiler_tree
+ self.subtree = subtree
self.props["pyver"] = pyver
- def get_children(self):
+ self.props["abbreviated_pyver"] = get_major_pyver(pyver)
- if self.find_prop("distro_name") == "trusty":
- return [CompilerConfigNode(self, v, xla_options) for (v, xla_options) in self.compiler_tree]
- else:
- return []
+ def get_children(self):
+ return [CompilerConfigNode(self, v, xla_options) for (v, xla_options) in self.subtree]
class CompilerConfigNode(ConfigNode):
- def __init__(self, parent, compiler_name, xla_options):
+ def __init__(self, parent, compiler_name, subtree):
super(CompilerConfigNode, self).__init__(parent, compiler_name)
- self.xla_options = xla_options
+ self.props["compiler_name"] = compiler_name
+
+ self.subtree = subtree
def get_children(self):
- return [XlaConfigNode(self, v) for v in self.xla_options]
+ return [XlaConfigNode(self, v) for v in self.subtree]
+
+
+class XenialCompilerConfigNode(ConfigNode):
+ def __init__(self, parent, compiler_name, subtree):
+ super(XenialCompilerConfigNode, self).__init__(parent, compiler_name)
+
+ self.props["compiler_name"] = compiler_name
+
+ self.subtree = subtree
+
+ def get_children(self):
+ return [XenialCompilerVersionConfigNode(self, k, v) for (k, v) in self.subtree]
+
+
+class XenialCompilerVersionConfigNode(ConfigNode):
+ def __init__(self, parent, compiler_version, subtree):
+ super(XenialCompilerVersionConfigNode, self).__init__(parent, compiler_version)
+
+ self.subtree = subtree
+
+ self.props["compiler_version"] = compiler_version
+
+ def get_children(self):
+ return [XenialPythonVersionConfigNode(self, v) for (v, _) in self.subtree]
+
+
+class XenialPythonVersionConfigNode(ConfigNode):
+ def __init__(self, parent, python_version):
+ super(XenialPythonVersionConfigNode, self).__init__(parent, python_version)
+
+ self.props["pyver"] = python_version
+ self.props["abbreviated_pyver"] = get_major_pyver(python_version)
+
+ def get_children(self):
+ return []
class XlaConfigNode(ConfigNode):
def __init__(self, parent, xla_enabled):
super(XlaConfigNode, self).__init__(parent, "XLA=" + str(xla_enabled))
- self.xla_enabled = xla_enabled
+ self.props["is_xla"] = xla_enabled
def get_children(self):
return []
-BUILD_ENV_LIST = [
- Conf("trusty", ["py2.7.9"]),
- Conf("trusty", ["py2.7"]),
- Conf("trusty", ["py3.5"]),
- Conf("trusty", ["py3.6", "gcc4.8"]),
- Conf("trusty", ["py3.6", "gcc5.4"]),
- Conf("trusty", ["py3.6", "gcc5.4"], is_xla=True),
- Conf("trusty", ["py3.6", "gcc7"]),
- Conf("trusty", ["pynightly"]),
- Conf("xenial", ["py3", "clang5", "asan"], pyver="3.6"),
- xenial_parent_config,
- Conf("xenial",
- ["py2"],
- pyver="2.7",
- cuda_version="9",
- gpu_resource="medium"),
- Conf("xenial",
- ["py3"],
- pyver="3.6",
- cuda_version="9",
- gpu_resource="medium"),
- Conf("xenial",
- ["py3", "gcc7"],
- pyver="3.6",
- cuda_version="9.2",
- gpu_resource="medium"),
- Conf("xenial",
- ["py3", "gcc7"],
- pyver="3.6",
- cuda_version="10",
- restrict_phases=["build"]), # TODO why does this not have a test?
-]
+def instantiate_configs():
+
+ config_list = []
+
+ root = get_root()
+ found_configs = conf_tree.dfs(root)
+ for fc in found_configs:
+
+ distro_name = fc.find_prop("distro_name")
+
+ python_version = None
+ if distro_name == "xenial":
+ python_version = fc.find_prop("pyver")
+
+ if distro_name == "xenial":
+ parms_list = [fc.find_prop("abbreviated_pyver")]
+ else:
+ parms_list = ["py" + fc.find_prop("pyver")]
+
+ cuda_version = None
+ if fc.find_prop("compiler_name") == "cuda":
+ cuda_version = fc.find_prop("compiler_version")
+
+ compiler_name = fc.find_prop("compiler_name")
+ if compiler_name and compiler_name != "cuda":
+ gcc_version = compiler_name + (fc.find_prop("compiler_version") or "")
+ parms_list.append(gcc_version)
+
+ if compiler_name == "clang":
+ parms_list.append("asan")
+
+ if cuda_version in ["9.2", "10"]:
+ # TODO The gcc version is orthogonal to CUDA version?
+ parms_list.append("gcc7")
+
+ is_xla = fc.find_prop("is_xla") or False
+
+ gpu_resource = None
+ if cuda_version and cuda_version != "10":
+ gpu_resource = "medium"
+
+ c = Conf(
+ distro_name,
+ parms_list,
+ python_version,
+ cuda_version,
+ is_xla,
+ None,
+ gpu_resource,
+ )
+
+ if cuda_version == "8":
+ c.dependent_tests = gen_dependent_configs(c)
+
+ config_list.append(c)
+
+ return config_list
def add_build_env_defs(jobs_dict):
mydict = OrderedDict()
- def append_steps(build_list):
- for conf_options in filter(lambda x: type(x) is not HiddenConf, build_list):
+ config_list = instantiate_configs()
+
+ for c in config_list:
- def append_environment_dict(build_or_test):
- d = conf_options.gen_yaml_tree(build_or_test)
- mydict[conf_options.gen_build_name(build_or_test)] = d
+ for phase in dimensions.PHASES:
- phases = dimensions.PHASES
- if conf_options.restrict_phases:
- phases = conf_options.restrict_phases
+ # TODO why does this not have a test?
+ if phase == "test" and c.cuda_version == "10":
+ continue
- for phase in phases:
- append_environment_dict(phase)
+ d = c.gen_yaml_tree(phase)
+ mydict[c.gen_build_name(phase)] = d
- # Recurse
- dependents = conf_options.get_dependents()
- if dependents:
- append_steps(dependents)
+ if phase == "test":
+ for x in filter(lambda x: type(x) is not HiddenConf, c.get_dependents()):
- append_steps(BUILD_ENV_LIST)
+ d = x.gen_yaml_tree(phase)
+ mydict[x.gen_build_name(phase)] = d
+ # this is the circleci api version and probably never changes
jobs_dict["version"] = 2
jobs_dict["jobs"] = mydict
graph = visualization.generate_graph(get_root())
- graph.draw("aaa-config-dimensions.png", prog="twopi")
+ graph.draw("pytorch-config-dimensions.png", prog="twopi")
def get_workflow_list():
+ config_list = instantiate_configs()
+
x = []
- for conf_options in BUILD_ENV_LIST:
+ for conf_options in config_list:
phases = dimensions.PHASES
if conf_options.restrict_phases:
phases = conf_options.restrict_phases
for phase in phases:
+
+ # TODO why does this not have a test?
+ if phase == "test" and conf_options.cuda_version == "10":
+ continue
+
x.append(conf_options.gen_workflow_yaml_item(phase))
# TODO convert to recursion
chmod a+x .jenkins/pytorch/macos-build.sh
unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts
+ caffe2_py2_gcc4_8_ubuntu14_04_build:
+ environment:
+ BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-build"
+ DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:248"
+ <<: *caffe2_linux_build_defaults
+
+ caffe2_py2_gcc4_8_ubuntu14_04_test:
+ environment:
+ BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-test"
+ DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:248"
+ resource_class: large
+ <<: *caffe2_linux_test_defaults
+
+ caffe2_py2_gcc4_9_ubuntu14_04_build:
+ environment:
+ BUILD_ENVIRONMENT: "caffe2-py2-gcc4.9-ubuntu14.04-build"
+ DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.9-ubuntu14.04:248"
+ BUILD_ONLY: "1"
+ <<: *caffe2_linux_build_defaults
+
+ caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build:
+ environment:
+ BUILD_ENVIRONMENT: "caffe2-py2-cuda8.0-cudnn7-ubuntu16.04-build"
+ DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda8.0-cudnn7-ubuntu16.04:248"
+ <<: *caffe2_linux_build_defaults
+
+ caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_test:
+ environment:
+ BUILD_ENVIRONMENT: "caffe2-py2-cuda8.0-cudnn7-ubuntu16.04-test"
+ USE_CUDA_DOCKER_RUNTIME: "1"
+ DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda8.0-cudnn7-ubuntu16.04:248"
+ resource_class: gpu.medium
+ <<: *caffe2_linux_test_defaults
+
caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_build:
environment:
BUILD_ENVIRONMENT: "caffe2-py2-cuda9.0-cudnn7-ubuntu16.04-build"
resource_class: large
<<: *caffe2_linux_test_defaults
- caffe2_py2_gcc4_8_ubuntu14_04_build:
- environment:
- BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-build"
- DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:248"
- <<: *caffe2_linux_build_defaults
-
- caffe2_py2_gcc4_8_ubuntu14_04_test:
- environment:
- BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-test"
- DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:248"
- resource_class: large
- <<: *caffe2_linux_test_defaults
-
caffe2_onnx_py2_gcc5_ubuntu16_04_build:
environment:
BUILD_ENVIRONMENT: "caffe2-onnx-py2-gcc5-ubuntu16.04-build"
resource_class: large
<<: *caffe2_linux_test_defaults
- caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build:
- environment:
- BUILD_ENVIRONMENT: "caffe2-py2-cuda8.0-cudnn7-ubuntu16.04-build"
- DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda8.0-cudnn7-ubuntu16.04:248"
- <<: *caffe2_linux_build_defaults
-
- caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_test:
- environment:
- BUILD_ENVIRONMENT: "caffe2-py2-cuda8.0-cudnn7-ubuntu16.04-test"
- USE_CUDA_DOCKER_RUNTIME: "1"
- DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda8.0-cudnn7-ubuntu16.04:248"
- resource_class: gpu.medium
- <<: *caffe2_linux_test_defaults
-
- caffe2_py2_gcc4_9_ubuntu14_04_build:
- environment:
- BUILD_ENVIRONMENT: "caffe2-py2-gcc4.9-ubuntu14.04-build"
- DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.9-ubuntu14.04:248"
- BUILD_ONLY: "1"
- <<: *caffe2_linux_build_defaults
-
caffe2_py2_clang3_8_ubuntu16_04_build:
environment:
BUILD_ENVIRONMENT: "caffe2-py2-clang3.8-ubuntu16.04-build"
- pytorch_macos_10_13_py3_build
- pytorch_macos_10_13_cuda9_2_cudnn7_py3_build
+ - caffe2_py2_gcc4_8_ubuntu14_04_build
+ - caffe2_py2_gcc4_8_ubuntu14_04_test:
+ requires:
+ - caffe2_py2_gcc4_8_ubuntu14_04_build
+ - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build
+ - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_test:
+ requires:
+ - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build
- caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_build
- caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_test:
requires:
- caffe2_py2_mkl_ubuntu16_04_test:
requires:
- caffe2_py2_mkl_ubuntu16_04_build
- - caffe2_py2_gcc4_8_ubuntu14_04_build
- - caffe2_py2_gcc4_8_ubuntu14_04_test:
- requires:
- - caffe2_py2_gcc4_8_ubuntu14_04_build
- caffe2_onnx_py2_gcc5_ubuntu16_04_build
- caffe2_onnx_py2_gcc5_ubuntu16_04_test:
requires:
- caffe2_onnx_py2_gcc5_ubuntu16_04_build
- - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build
- - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_test:
- requires:
- - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build
- caffe2_py2_clang3_8_ubuntu16_04_build
- caffe2_py2_clang3_9_ubuntu16_04_build
- caffe2_py2_clang7_ubuntu16_04_build