order caffe2 ubuntu configs contiguously (#17427)
authorKarl Ostmo <kostmo@gmail.com>
Sat, 23 Feb 2019 04:10:22 +0000 (20:10 -0800)
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>
Sat, 23 Feb 2019 04:18:29 +0000 (20:18 -0800)
Summary:
This involves another purely cosmetic (ordering) change to the `config.yml` to facilitate simpler logic.

Other changes:
* add some review feedback as comments
* exit with nonzero status on config.yml mismatch
* produce a diagram for pytorch builds
Pull Request resolved: https://github.com/pytorch/pytorch/pull/17427

Differential Revision: D14197618

Pulled By: kostmo

fbshipit-source-id: 267439d3aa4c0a80801adcde2fa714268865900e

.circleci/README.md
.circleci/cimodel/binary_build_definitions.py
.circleci/cimodel/caffe2_build_definitions.py
.circleci/cimodel/conf_tree.py
.circleci/cimodel/pytorch_build_definitions.py
.circleci/cimodel/visualization.py
.circleci/config.yml
.circleci/ensure-consistency.py
.circleci/regenerate.sh

index 3bf384c..74ac300 100644 (file)
@@ -24,3 +24,13 @@ Furthermore, consistency is enforced within the YAML config itself, by using a s
 multiple parts of the file.
 
 See https://github.com/pytorch/pytorch/issues/17038
+
+
+Future direction
+----------------
+
+### Declaring sparse config subsets
+See comment [here](https://github.com/pytorch/pytorch/pull/17323#pullrequestreview-206945747):
+
+In contrast with a full recursive tree traversal of configuration dimensions,
+> in the future future I think we actually want to decrease our matrix somewhat and have only a few mostly-orthogonal builds that taste as many different features as possible on PRs, plus a more complete suite on every PR and maybe an almost full suite nightly/weekly (we don't have this yet). Specifying PR jobs in the future might be easier to read with an explicit list when we come to this.
\ No newline at end of file
index 3eac714..547041a 100644 (file)
@@ -91,10 +91,10 @@ class Conf(object):
         return d
 
 
-def get_root(smoke):
+def get_root(smoke, name):
 
     return make_build_configs.TopLevelNode(
-        "Builds",
+        name,
         make_build_configs.CONFIG_TREE_DATA,
         smoke,
     )
@@ -102,7 +102,7 @@ def get_root(smoke):
 
 def gen_build_env_list(smoke):
 
-    root = get_root(smoke)
+    root = get_root(smoke, "N/A")
     config_list = conf_tree.dfs(root)
 
     newlist = []
@@ -214,7 +214,7 @@ def add_jobs_and_render(jobs_dict, toplevel_key, smoke, cron_schedule):
 
     jobs_dict[toplevel_key] = d
 
-    graph = visualization.generate_graph(get_root(smoke))
+    graph = visualization.generate_graph(get_root(smoke, toplevel_key))
     graph.draw(toplevel_key + "-config-dimensions.png", prog="twopi")
 
 
index 5205cb6..fbf15b6 100644 (file)
@@ -12,27 +12,22 @@ DOCKER_IMAGE_PATH_BASE = "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/"
 DOCKER_IMAGE_VERSION = 248
 
 
-# TODO Sort the config.yml upstream so the ubuntu configs are contiguous
 CONFIG_HIERARCHY = [
+    (Ver("ubuntu", "14.04"), [
+        (Ver("gcc", "4.8"), ["py2"]),
+        (Ver("gcc", "4.9"), ["py2"]),
+    ]),
     (Ver("ubuntu", "16.04"), [
+        (Ver("cuda", "8.0"), ["py2"]),
         (Ver("cuda", "9.0"), [
+            # TODO make explicit that this is a "secret TensorRT build"
+            #  (see https://github.com/pytorch/pytorch/pull/17323#discussion_r259446749)
             "py2",
             "cmake",
         ]),
         (Ver("cuda", "9.1"), ["py2"]),
         (Ver("mkl"), ["py2"]),
-    ]),
-    (Ver("ubuntu", "14.04"), [
-        (Ver("gcc", "4.8"), ["py2"]),
-    ]),
-    (Ver("ubuntu", "16.04"), [
         (Ver("gcc", "5"), ["onnx_py2"]),
-        (Ver("cuda", "8.0"), ["py2"]),
-    ]),
-    (Ver("ubuntu", "14.04"), [
-        (Ver("gcc", "4.9"), ["py2"]),
-    ]),
-    (Ver("ubuntu", "16.04"), [
         (Ver("clang", "3.8"), ["py2"]),
         (Ver("clang", "3.9"), ["py2"]),
         (Ver("clang", "7"), ["py2"]),
@@ -42,6 +37,8 @@ CONFIG_HIERARCHY = [
         (Ver("cuda", "9.0"), ["py2"]),
     ]),
     (Ver("macos", "10.13"), [
+        # TODO ios and system aren't related. system qualifies where the python comes
+        #  from (use the system python instead of homebrew or anaconda)
         (Ver("ios"), ["py2"]),
         (Ver("system"), ["py2"]),
     ]),
@@ -65,6 +62,7 @@ class Conf(object):
             "android",
         ] or self.get_platform() == "macos"
 
+    # TODO: Eventually we can probably just remove the cudnn7 everywhere.
     def get_cudnn_insertion(self):
 
         omit = self.language == "onnx_py2" \
@@ -128,7 +126,7 @@ class Conf(object):
             tuples.append(("BUILD_IOS", miniutils.quote("1")))
 
         if self.phase == "test":
-            use_cuda_docker = str(self.compiler) not in ["mkl", "gcc4.8", "gcc5"]
+            use_cuda_docker = self.compiler.name == "cuda"
             if use_cuda_docker:
                 tuples.append(("USE_CUDA_DOCKER_RUNTIME", miniutils.quote("1")))
 
@@ -139,6 +137,8 @@ class Conf(object):
             if not self.distro.name == "macos":
                 tuples.append(("BUILD_ONLY", miniutils.quote("1")))
 
+        # TODO: not sure we need the distinction between system and homebrew anymore. Our python handling in cmake
+        #  and setuptools is more robust now than when we first had these.
         if self.distro.name == "macos":
             tuples.append(("PYTHON_INSTALLATION", miniutils.quote("system")))
             tuples.append(("PYTHON_VERSION", miniutils.quote("2")))
@@ -150,7 +150,7 @@ class Conf(object):
         ])
 
         if self.phase == "test":
-            is_large = str(self.compiler) in ["mkl", "gcc4.8"] or self.language == "onnx_py2"
+            is_large = self.compiler.name != "cuda"
 
             resource_class = "large" if is_large else "gpu.medium"
             d["resource_class"] = resource_class
@@ -187,6 +187,7 @@ def get_caffe2_workflows():
     configs = gen_build_list()
 
     # TODO Why don't we build this config?
+    # See https://github.com/pytorch/pytorch/pull/17323#discussion_r259450540
     filtered_configs = filter(lambda x: not (str(x.distro) == "ubuntu14.04" and str(x.compiler) == "gcc4.9"), configs)
 
     x = []
index 7e3e27c..9548ec7 100644 (file)
@@ -20,11 +20,7 @@ class ConfigNode(object):
         self.props = {}
 
     def get_label(self):
-        label = self.node_name
-        if not label:
-            # FIXME this shouldn't be necessary
-            label = "<None>"
-        return label
+        return self.node_name
 
     def get_children(self):
         return []
index 64c734c..fb8da24 100644 (file)
@@ -2,11 +2,11 @@
 
 from collections import OrderedDict
 
-import cimodel.miniutils as miniutils
-import cimodel.dimensions as dimensions
 import cimodel.conf_tree as conf_tree
-from cimodel.conf_tree import ConfigNode
+import cimodel.dimensions as dimensions
+import cimodel.miniutils as miniutils
 import cimodel.visualization as visualization
+from cimodel.conf_tree import ConfigNode
 
 
 DOCKER_IMAGE_PATH_BASE = "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/"
@@ -14,19 +14,6 @@ DOCKER_IMAGE_PATH_BASE = "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/"
 DOCKER_IMAGE_VERSION = 282
 
 
-class DockerHide(object):
-    """
-    Used for hiding name elements for construction of the Docker image path.
-    Name elements that are wrapped in this object may be part of the build configuration name, but
-    shall be excluded from the Docker path.
-    """
-    def __init__(self, val):
-        self.val = val
-
-    def __str__(self):
-        return self.val
-
-
 class Conf(object):
     def __init__(self,
                  distro,
@@ -43,38 +30,44 @@ class Conf(object):
         self.pyver = pyver
         self.parms = parms
         self.cuda_version = cuda_version
+
+        # TODO expand this to cover all the USE_* that we want to test for
+        #  tesnrorrt, leveldb, lmdb, redis, opencv, mkldnn, ideep, etc.
+        # (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453608)
         self.is_xla = is_xla
+
         self.restrict_phases = restrict_phases
         self.gpu_resource = gpu_resource
         self.dependent_tests = dependent_tests or []
         self.parent_build = parent_build
 
-    def get_parms(self):
+    # TODO: Eliminate the special casing for docker paths
+    # In the short term, we *will* need to support special casing as docker images are merged for caffe2 and pytorch
+    def get_parms(self, for_docker):
         leading = ["pytorch"]
-        if self.is_xla:
-            leading.append(DockerHide("xla"))
+        if self.is_xla and not for_docker:
+            leading.append("xla")
 
         cuda_parms = []
         if self.cuda_version:
             cuda_parms.extend(["cuda" + self.cuda_version, "cudnn7"])
         return leading + ["linux", self.distro] + cuda_parms + self.parms
 
-    # TODO: Eliminate this special casing in docker paths
     def gen_docker_image_path(self):
 
-        build_env_pieces = list(map(str, filter(lambda x: type(x) is not DockerHide, self.get_parms())))
-        base_build_env_name = "-".join(build_env_pieces)
+        parms_source = self.parent_build or self
+        base_build_env_name = "-".join(parms_source.get_parms(True))
 
         return miniutils.quote(DOCKER_IMAGE_PATH_BASE + base_build_env_name + ":" + str(DOCKER_IMAGE_VERSION))
 
     def get_build_job_name_pieces(self, build_or_test):
-        return self.get_parms() + [build_or_test]
+        return self.get_parms(False) + [build_or_test]
 
     def gen_build_name(self, build_or_test):
         return ("_".join(map(str, self.get_build_job_name_pieces(build_or_test)))).replace(".", "_")
 
     def get_dependents(self):
-        return self.dependent_tests
+        return self.dependent_tests or []
 
     def gen_yaml_tree(self, build_or_test):
 
@@ -115,8 +108,14 @@ class Conf(object):
         if self.is_xla or phase == "test":
             val = OrderedDict()
             if self.is_xla:
+                # this makes the job run on merges rather than new PRs
+                # TODO Many of the binary build jobs on PRs could be moved to this mode as well
                 val["filters"] = {"branches": {"only": ["master"]}}
 
+            # TODO When merging the caffe2 and pytorch jobs, it might be convenient for a while to make a
+            #  caffe2 test job dependent on a pytorch build job. This way we could quickly dedup the repeated
+            #  build of pytorch in the caffe2 build job, and just run the caffe2 tests off of a completed
+            #  pytorch build job (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259452641)
             if phase == "test":
                 dependency_build = self.parent_build or self
                 val["requires"] = [dependency_build.gen_build_name("build")]
@@ -144,70 +143,63 @@ class HiddenConf(object):
         return self.name
 
 
-xenial_parent_config = Conf(
-    "xenial",
-    ["py3"],
-    pyver="3.6",
-    cuda_version="8",
-    gpu_resource="medium",
-)
-
-
-# TODO This is a short-term hack until it is converted to recursive tree traversal
-xenial_dependent_configs = [
-    Conf("xenial",
-         ["py3", DockerHide("multigpu")],
-         pyver="3.6",
-         cuda_version="8",
-         restrict_phases=["test"],
-         gpu_resource="large",
-         parent_build=xenial_parent_config,
-         ),
-    Conf("xenial",
-         ["py3", DockerHide("NO_AVX2")],
-         pyver="3.6",
-         cuda_version="8",
-         restrict_phases=["test"],
-         gpu_resource="medium",
-         parent_build=xenial_parent_config,
-         ),
-    Conf("xenial",
-         ["py3", DockerHide("NO_AVX"), DockerHide("NO_AVX2")],
-         pyver="3.6",
-         cuda_version="8",
-         restrict_phases=["test"],
-         gpu_resource="medium",
-         parent_build=xenial_parent_config,
-         ),
-
-    HiddenConf("pytorch_short_perf_test_gpu", parent_build=xenial_parent_config),
-    HiddenConf("pytorch_doc_push", parent_build=xenial_parent_config),
-]
+# TODO Convert these to graph nodes
+def gen_dependent_configs(xenial_parent_config):
+
+    extra_parms = [
+        (["multigpu"], "large"),
+        (["NO_AVX2"], "medium"),
+        (["NO_AVX", "NO_AVX2"], "medium"),
+    ]
 
+    configs = []
+    for parms, gpu in extra_parms:
 
-xenial_parent_config.dependent_tests = xenial_dependent_configs
+        c = Conf(
+            "xenial",
+            ["py3"] + parms,
+            pyver="3.6",
+            cuda_version="8",
+            restrict_phases=["test"],
+            gpu_resource=gpu,
+            parent_build=xenial_parent_config,
+        )
 
+        configs.append(c)
 
-# TODO this hierarchy is a work in progress
+    for x in ["pytorch_short_perf_test_gpu", "pytorch_doc_push"]:
+        configs.append(HiddenConf(x, parent_build=xenial_parent_config))
+
+    return configs
+
+
+# TODO make the schema consistent between "trusty" and "xenial"
 CONFIG_TREE_DATA = [
     ("trusty", [
-        ("py2.7.9", []),
-        ("py2.7", []),
-        ("py3.5", []),
-        ("py3.6", [
+        ("2.7.9", []),
+        ("2.7", []),
+        ("3.5", []),
+        ("3.6", [
             ("gcc4.8", []),
             ("gcc5.4", [False, True]),
             ("gcc7", []),
         ]),
-        ("pynightly", []),
+        ("nightly", []),
     ]),
     ("xenial", [
         ("clang", [
-            ("X", [("py3", [])]),
+            ("5", [("3.6", [])]),
         ]),
         ("cuda", [
             ("8", [("3.6", [])]),
             ("9", [
+                # Note there are magic strings here
+                # https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L21
+                # and
+                # https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L143
+                # and
+                # https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L153
+                # (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453144)
                 ("2.7", []),
                 ("3.6", []),
             ]),
@@ -218,8 +210,13 @@ CONFIG_TREE_DATA = [
 ]
 
 
+def get_major_pyver(dotted_version):
+    parts = dotted_version.split(".")
+    return "py" + parts[0]
+
+
 def get_root():
-    return TopLevelNode("Pytorch Builds", CONFIG_TREE_DATA)
+    return TopLevelNode("PyTorch Builds", CONFIG_TREE_DATA)
 
 
 def gen_tree():
@@ -239,127 +236,197 @@ class TopLevelNode(ConfigNode):
 
 
 class DistroConfigNode(ConfigNode):
-    def __init__(self, parent, distro_name, py_tree):
+    def __init__(self, parent, distro_name, subtree):
         super(DistroConfigNode, self).__init__(parent, distro_name)
 
-        self.py_tree = py_tree
+        self.subtree = subtree
         self.props["distro_name"] = distro_name
 
     def get_children(self):
-        return [PyVerConfigNode(self, k, v) for k, v in self.py_tree]
+
+        if self.find_prop("distro_name") == "trusty":
+            return [PyVerConfigNode(self, k, v) for k, v in self.subtree]
+        else:
+            return [XenialCompilerConfigNode(self, v, subtree) for (v, subtree) in self.subtree]
 
 
 class PyVerConfigNode(ConfigNode):
-    def __init__(self, parent, pyver, compiler_tree):
+    def __init__(self, parent, pyver, subtree):
         super(PyVerConfigNode, self).__init__(parent, pyver)
 
-        self.compiler_tree = compiler_tree
+        self.subtree = subtree
         self.props["pyver"] = pyver
 
-    def get_children(self):
+        self.props["abbreviated_pyver"] = get_major_pyver(pyver)
 
-        if self.find_prop("distro_name") == "trusty":
-            return [CompilerConfigNode(self, v, xla_options) for (v, xla_options) in self.compiler_tree]
-        else:
-            return []
+    def get_children(self):
+        return [CompilerConfigNode(self, v, xla_options) for (v, xla_options) in self.subtree]
 
 
 class CompilerConfigNode(ConfigNode):
-    def __init__(self, parent, compiler_name, xla_options):
+    def __init__(self, parent, compiler_name, subtree):
         super(CompilerConfigNode, self).__init__(parent, compiler_name)
 
-        self.xla_options = xla_options
+        self.props["compiler_name"] = compiler_name
+
+        self.subtree = subtree
 
     def get_children(self):
-        return [XlaConfigNode(self, v) for v in self.xla_options]
+        return [XlaConfigNode(self, v) for v in self.subtree]
+
+
+class XenialCompilerConfigNode(ConfigNode):
+    def __init__(self, parent, compiler_name, subtree):
+        super(XenialCompilerConfigNode, self).__init__(parent, compiler_name)
+
+        self.props["compiler_name"] = compiler_name
+
+        self.subtree = subtree
+
+    def get_children(self):
+        return [XenialCompilerVersionConfigNode(self, k, v) for (k, v) in self.subtree]
+
+
+class XenialCompilerVersionConfigNode(ConfigNode):
+    def __init__(self, parent, compiler_version, subtree):
+        super(XenialCompilerVersionConfigNode, self).__init__(parent, compiler_version)
+
+        self.subtree = subtree
+
+        self.props["compiler_version"] = compiler_version
+
+    def get_children(self):
+        return [XenialPythonVersionConfigNode(self, v) for (v, _) in self.subtree]
+
+
+class XenialPythonVersionConfigNode(ConfigNode):
+    def __init__(self, parent, python_version):
+        super(XenialPythonVersionConfigNode, self).__init__(parent, python_version)
+
+        self.props["pyver"] = python_version
+        self.props["abbreviated_pyver"] = get_major_pyver(python_version)
+
+    def get_children(self):
+        return []
 
 
 class XlaConfigNode(ConfigNode):
     def __init__(self, parent, xla_enabled):
         super(XlaConfigNode, self).__init__(parent, "XLA=" + str(xla_enabled))
 
-        self.xla_enabled = xla_enabled
+        self.props["is_xla"] = xla_enabled
 
     def get_children(self):
         return []
 
 
-BUILD_ENV_LIST = [
-    Conf("trusty", ["py2.7.9"]),
-    Conf("trusty", ["py2.7"]),
-    Conf("trusty", ["py3.5"]),
-    Conf("trusty", ["py3.6", "gcc4.8"]),
-    Conf("trusty", ["py3.6", "gcc5.4"]),
-    Conf("trusty", ["py3.6", "gcc5.4"], is_xla=True),
-    Conf("trusty", ["py3.6", "gcc7"]),
-    Conf("trusty", ["pynightly"]),
-    Conf("xenial", ["py3", "clang5", "asan"], pyver="3.6"),
-    xenial_parent_config,
-    Conf("xenial",
-         ["py2"],
-         pyver="2.7",
-         cuda_version="9",
-         gpu_resource="medium"),
-    Conf("xenial",
-         ["py3"],
-         pyver="3.6",
-         cuda_version="9",
-         gpu_resource="medium"),
-    Conf("xenial",
-         ["py3", "gcc7"],
-         pyver="3.6",
-         cuda_version="9.2",
-         gpu_resource="medium"),
-    Conf("xenial",
-         ["py3", "gcc7"],
-         pyver="3.6",
-         cuda_version="10",
-         restrict_phases=["build"]),  # TODO why does this not have a test?
-]
+def instantiate_configs():
+
+    config_list = []
+
+    root = get_root()
+    found_configs = conf_tree.dfs(root)
+    for fc in found_configs:
+
+        distro_name = fc.find_prop("distro_name")
+
+        python_version = None
+        if distro_name == "xenial":
+            python_version = fc.find_prop("pyver")
+
+        if distro_name == "xenial":
+            parms_list = [fc.find_prop("abbreviated_pyver")]
+        else:
+            parms_list = ["py" + fc.find_prop("pyver")]
+
+        cuda_version = None
+        if fc.find_prop("compiler_name") == "cuda":
+            cuda_version = fc.find_prop("compiler_version")
+
+        compiler_name = fc.find_prop("compiler_name")
+        if compiler_name and compiler_name != "cuda":
+            gcc_version = compiler_name + (fc.find_prop("compiler_version") or "")
+            parms_list.append(gcc_version)
+
+            if compiler_name == "clang":
+                parms_list.append("asan")
+
+        if cuda_version in ["9.2", "10"]:
+            # TODO The gcc version is orthogonal to CUDA version?
+            parms_list.append("gcc7")
+
+        is_xla = fc.find_prop("is_xla") or False
+
+        gpu_resource = None
+        if cuda_version and cuda_version != "10":
+            gpu_resource = "medium"
+
+        c = Conf(
+            distro_name,
+            parms_list,
+            python_version,
+            cuda_version,
+            is_xla,
+            None,
+            gpu_resource,
+        )
+
+        if cuda_version == "8":
+            c.dependent_tests = gen_dependent_configs(c)
+
+        config_list.append(c)
+
+    return config_list
 
 
 def add_build_env_defs(jobs_dict):
 
     mydict = OrderedDict()
 
-    def append_steps(build_list):
-        for conf_options in filter(lambda x: type(x) is not HiddenConf, build_list):
+    config_list = instantiate_configs()
+
+    for c in config_list:
 
-            def append_environment_dict(build_or_test):
-                d = conf_options.gen_yaml_tree(build_or_test)
-                mydict[conf_options.gen_build_name(build_or_test)] = d
+        for phase in dimensions.PHASES:
 
-            phases = dimensions.PHASES
-            if conf_options.restrict_phases:
-                phases = conf_options.restrict_phases
+            # TODO why does this not have a test?
+            if phase == "test" and c.cuda_version == "10":
+                continue
 
-            for phase in phases:
-                append_environment_dict(phase)
+            d = c.gen_yaml_tree(phase)
+            mydict[c.gen_build_name(phase)] = d
 
-            # Recurse
-            dependents = conf_options.get_dependents()
-            if dependents:
-                append_steps(dependents)
+            if phase == "test":
+                for x in filter(lambda x: type(x) is not HiddenConf, c.get_dependents()):
 
-    append_steps(BUILD_ENV_LIST)
+                    d = x.gen_yaml_tree(phase)
+                    mydict[x.gen_build_name(phase)] = d
 
+    # this is the circleci api version and probably never changes
     jobs_dict["version"] = 2
     jobs_dict["jobs"] = mydict
 
     graph = visualization.generate_graph(get_root())
-    graph.draw("aaa-config-dimensions.png", prog="twopi")
+    graph.draw("pytorch-config-dimensions.png", prog="twopi")
 
 
 def get_workflow_list():
 
+    config_list = instantiate_configs()
+
     x = []
-    for conf_options in BUILD_ENV_LIST:
+    for conf_options in config_list:
 
         phases = dimensions.PHASES
         if conf_options.restrict_phases:
             phases = conf_options.restrict_phases
 
         for phase in phases:
+
+            # TODO why does this not have a test?
+            if phase == "test" and conf_options.cuda_version == "10":
+                continue
+
             x.append(conf_options.gen_workflow_yaml_item(phase))
 
         # TODO convert to recursion
index 9227646..2f472c2 100644 (file)
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
 """
 This module encapsulates dependencies on pygraphviz
 """
index 0eae5a4..7820821 100644 (file)
@@ -1471,6 +1471,40 @@ jobs:
             chmod a+x .jenkins/pytorch/macos-build.sh
             unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts
 
+  caffe2_py2_gcc4_8_ubuntu14_04_build:
+    environment:
+      BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-build"
+      DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:248"
+    <<: *caffe2_linux_build_defaults
+
+  caffe2_py2_gcc4_8_ubuntu14_04_test:
+    environment:
+      BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-test"
+      DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:248"
+    resource_class: large
+    <<: *caffe2_linux_test_defaults
+
+  caffe2_py2_gcc4_9_ubuntu14_04_build:
+    environment:
+      BUILD_ENVIRONMENT: "caffe2-py2-gcc4.9-ubuntu14.04-build"
+      DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.9-ubuntu14.04:248"
+      BUILD_ONLY: "1"
+    <<: *caffe2_linux_build_defaults
+
+  caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build:
+    environment:
+      BUILD_ENVIRONMENT: "caffe2-py2-cuda8.0-cudnn7-ubuntu16.04-build"
+      DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda8.0-cudnn7-ubuntu16.04:248"
+    <<: *caffe2_linux_build_defaults
+
+  caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_test:
+    environment:
+      BUILD_ENVIRONMENT: "caffe2-py2-cuda8.0-cudnn7-ubuntu16.04-test"
+      USE_CUDA_DOCKER_RUNTIME: "1"
+      DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda8.0-cudnn7-ubuntu16.04:248"
+    resource_class: gpu.medium
+    <<: *caffe2_linux_test_defaults
+
   caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_build:
     environment:
       BUILD_ENVIRONMENT: "caffe2-py2-cuda9.0-cudnn7-ubuntu16.04-build"
@@ -1526,19 +1560,6 @@ jobs:
     resource_class: large
     <<: *caffe2_linux_test_defaults
 
-  caffe2_py2_gcc4_8_ubuntu14_04_build:
-    environment:
-      BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-build"
-      DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:248"
-    <<: *caffe2_linux_build_defaults
-
-  caffe2_py2_gcc4_8_ubuntu14_04_test:
-    environment:
-      BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-test"
-      DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:248"
-    resource_class: large
-    <<: *caffe2_linux_test_defaults
-
   caffe2_onnx_py2_gcc5_ubuntu16_04_build:
     environment:
       BUILD_ENVIRONMENT: "caffe2-onnx-py2-gcc5-ubuntu16.04-build"
@@ -1552,27 +1573,6 @@ jobs:
     resource_class: large
     <<: *caffe2_linux_test_defaults
 
-  caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build:
-    environment:
-      BUILD_ENVIRONMENT: "caffe2-py2-cuda8.0-cudnn7-ubuntu16.04-build"
-      DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda8.0-cudnn7-ubuntu16.04:248"
-    <<: *caffe2_linux_build_defaults
-
-  caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_test:
-    environment:
-      BUILD_ENVIRONMENT: "caffe2-py2-cuda8.0-cudnn7-ubuntu16.04-test"
-      USE_CUDA_DOCKER_RUNTIME: "1"
-      DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda8.0-cudnn7-ubuntu16.04:248"
-    resource_class: gpu.medium
-    <<: *caffe2_linux_test_defaults
-
-  caffe2_py2_gcc4_9_ubuntu14_04_build:
-    environment:
-      BUILD_ENVIRONMENT: "caffe2-py2-gcc4.9-ubuntu14.04-build"
-      DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.9-ubuntu14.04:248"
-      BUILD_ONLY: "1"
-    <<: *caffe2_linux_build_defaults
-
   caffe2_py2_clang3_8_ubuntu16_04_build:
     environment:
       BUILD_ENVIRONMENT: "caffe2-py2-clang3.8-ubuntu16.04-build"
@@ -3079,6 +3079,14 @@ workflows:
             - pytorch_macos_10_13_py3_build
       - pytorch_macos_10_13_cuda9_2_cudnn7_py3_build
 
+      - caffe2_py2_gcc4_8_ubuntu14_04_build
+      - caffe2_py2_gcc4_8_ubuntu14_04_test:
+          requires:
+            - caffe2_py2_gcc4_8_ubuntu14_04_build
+      - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build
+      - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_test:
+          requires:
+            - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build
       - caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_build
       - caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_test:
           requires:
@@ -3095,18 +3103,10 @@ workflows:
       - caffe2_py2_mkl_ubuntu16_04_test:
           requires:
             - caffe2_py2_mkl_ubuntu16_04_build
-      - caffe2_py2_gcc4_8_ubuntu14_04_build
-      - caffe2_py2_gcc4_8_ubuntu14_04_test:
-          requires:
-            - caffe2_py2_gcc4_8_ubuntu14_04_build
       - caffe2_onnx_py2_gcc5_ubuntu16_04_build
       - caffe2_onnx_py2_gcc5_ubuntu16_04_test:
           requires:
             - caffe2_onnx_py2_gcc5_ubuntu16_04_build
-      - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build
-      - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_test:
-          requires:
-            - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build
       - caffe2_py2_clang3_8_ubuntu16_04_build
       - caffe2_py2_clang3_9_ubuntu16_04_build
       - caffe2_py2_clang7_ubuntu16_04_build
index 53b275d..972eded 100755 (executable)
@@ -28,9 +28,9 @@ def check_consistency():
         generate_config_yml.stitch_sources(fh)
 
     try:
-        subprocess.check_call('cmp "%s" "%s"' % (temp_filename, CHECKED_IN_FILE), shell=True)
+        subprocess.check_call(["cmp", temp_filename, CHECKED_IN_FILE])
     except subprocess.CalledProcessError:
-        sys.stderr.write(ERROR_MESSAGE_TEMPLATE % (CHECKED_IN_FILE, REGENERATION_SCRIPT, PARENT_DIR, README_PATH))
+        sys.exit(ERROR_MESSAGE_TEMPLATE % (CHECKED_IN_FILE, REGENERATION_SCRIPT, PARENT_DIR, README_PATH))
     finally:
         os.remove(temp_filename)
 
index abf68ee..5afaadf 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/bash -xe
 
 # Allows this script to be invoked from any directory:
-cd `dirname "$0"`
+cd $(dirname "$0")
 
 ./generate_config_yml.py > config.yml