[Tizen] Add BuildTools 2.1.0-rc1-02804-05
[platform/upstream/coreclr.git] / Tools / RunnerScripts / scriptrunner / continuationrunner.py
1 #!/usr/bin/env py
2
3 # Licensed to the .NET Foundation under one or more agreements.
4 # The .NET Foundation licenses this file to you under the MIT license.
5 # See the LICENSE file in the project root for more information.
6
7 import os.path
8 import json
9 import platform
10 import re
11 import uuid
12
13 import helix.depcheck
14 import helix.logs
15 import helix.proc
16 import helix.saferequests
17
18 from helix.cmdline import command_main
19 from helix.io import fix_path, zip_directory, add_file_to_zip
20 from helix.platformutil import is_windows
21 from helix_test_execution import HelixTestExecution
22 from helix.settings import settings_from_env
23 from helix.servicebusrepository import ServiceBusRepository
24 from helix.workitem import HelixWorkItem
25
26 log = helix.logs.get_logger()
27
28
29 def main(args=None):
30     def _main(settings, optlist, args):
31         """
32         Usage::
33             continuationrunner
34                 [--config config.json]
35                 [--setting name=value]
36                 --script
37                 [--args arg1 arg2...]
38         """
39         optdict = dict(optlist)
40         log.info("BuildTools Helix Continuation Runner starting")
41
42         if '--args' in optdict:
43             script_arguments = optdict['--args']
44             log.info("Script Arguments: " + script_arguments)
45
46         if '--script' in optdict:
47             script_to_execute = optdict['--script']
48         else:
49             log.error("Value for parameter '--script' is required")
50             return -1
51
52         if '--next_queue' in optdict:
53             next_queue = optdict['--next_queue']
54         else:
55             log.error("Need a secondary queue id to continue execution.")
56             return -1
57         if '--next_payload_dir' in optdict:
58             next_payload_dir = optdict['--next_payload_dir']
59         else:
60             log.error("Need a secondary payload to continue execution.")
61             return -1
62
63         unpack_dir = fix_path(settings.workitem_payload_dir)
64
65         execution_args = [os.path.join(unpack_dir, script_to_execute)] + args
66
67         return_code = helix.proc.run_and_log_output(
68             execution_args,
69             cwd=unpack_dir,
70             env=None
71         )
72
73         if return_code == 0:
74             # currently there's no use for it, but here's where we'd choose to send out XUnit results
75             # if desired at some point.
76             log.info("First stage of execution succeded.  Sending a new work item to " + next_queue)
77             log.info("Will include contents of " + next_payload_dir)
78
79             settings = settings_from_env()
80             # load Client-specific settings
81             config_path = os.path.join(settings.config_root, "ClientSettings.json")
82             settings.__dict__.update(json.load(open(config_path)))
83             service_bus_repository = ServiceBusRepository(settings.ServiceBusRoot,
84                                                           settings.QueueId,
85                                                           settings.LongPollTimeout,
86                                                           settings.SAS,
87                                                           settings.servicebus_retry_count,
88                                                           settings.servicebus_retry_delay
89                                                           )
90             # For now, we'll use ScriptRunner for this step. Eventually we'll want to either combine functionality
91             # of the two into scriptrunner.py, OR parameterize which script is used (for the 2+ re-queue scenario)
92             call_runcontinuation = "/RunnerScripts/scriptrunner/scriptrunner.py --script RunContinuation"
93             if is_windows():
94                 continuation_command = "%HELIX_PYTHONPATH% %HELIX_CORRELATION_PAYLOAD%" + call_runcontinuation + ".cmd"
95             else:
96                 continuation_command = "$HELIX_PYTHONPATH% $HELIX_CORRELATION_PAYLOAD" + call_runcontinuation + ".sh"
97
98             # Prep the follow-up work item ...
99             new_work_item = HelixWorkItem(
100                 correlation_id=settings.correlation_id,
101                 work_item_friendly_name=settings.workitem_friendly_name + ".Execution",
102                 command=continuation_command,
103                 results_output_uri=settings.output_uri + "/continuation",
104                 results_output_write_token=settings.output_write_token,
105                 results_output_read_token=settings.output_read_token)
106
107             # This may eventually cause trouble if zips with identical names are somehow included inside
108             # other payload zips. Chained continuation will be OK as there will be a new results
109             # directory to upload to for each leg.
110             new_workitem_payload_name = settings.workitem_friendly_name + ".continuation.zip"
111             secondary_zip_path = os.path.join(settings.workitem_working_dir, new_workitem_payload_name)
112
113             zip_directory(secondary_zip_path, next_payload_dir)
114             log.info("Zipped into " + secondary_zip_path)
115
116             # Upload the payloads for the job
117             upload_client = helix.azure_storage.BlobUploadClient(settings.output_uri,
118                                                                  settings.output_write_token,
119                                                                  settings.output_read_token)
120             new_payload_uri = upload_client.upload(secondary_zip_path, new_workitem_payload_name)
121             new_work_item.WorkItemPayloadUris.append(new_payload_uri)
122
123             # Current assumption: No need to reuse correlation payload, but bring supplemental (for scripts)
124             # NOTE: We don't currently have a way to access the existing Uri, so reusing the payload from
125             #       storage will involve plumbing that through or re-uploading it (can be huge)
126             supplemental_payload_path = os.path.join(settings.work_root,
127                                                      settings.correlation_id,
128                                                      "work", "SupplementalPayload.zip")
129
130             supplemental_payload_uri = upload_client.upload(supplemental_payload_path, "SupplementalPayload.zip")
131             log.info("Uploaded " + secondary_zip_path + " to " + new_payload_uri)
132             log.info("Uploaded SupplementalPayload.zip to " + supplemental_payload_uri)
133             new_work_item.CorrelationPayloadUris.append(supplemental_payload_uri)
134
135             if service_bus_repository.post_new_workitem(queue_id=next_queue,
136                                                         work_item=new_work_item):
137                 log.info("Successfully queued new work item.")
138             else:
139                 log.error("Failure to send to Service bus.")
140                 return -1
141
142         else:
143             log.error("Got non-zero exit code for first stage of execution.  Skipping further processing.")
144
145         return return_code
146
147     return command_main(_main, ['script=', 'args=', 'next_queue=', 'next_payload_dir='], args)
148
149 if __name__ == '__main__':
150     import sys
151     sys.exit(main())
152
153 helix.depcheck.check_dependencies(__name__)