3 # Licensed to the .NET Foundation under one or more agreements.
4 # The .NET Foundation licenses this file to you under the MIT license.
5 # See the LICENSE file in the project root for more information.
16 import helix.saferequests
18 from helix.cmdline import command_main
19 from helix.io import fix_path, zip_directory, add_file_to_zip
20 from helix.platformutil import is_windows
21 from helix_test_execution import HelixTestExecution
22 from helix.settings import settings_from_env
23 from helix.servicebusrepository import ServiceBusRepository
24 from helix.workitem import HelixWorkItem
26 log = helix.logs.get_logger()
30 def _main(settings, optlist, args):
34 [--config config.json]
35 [--setting name=value]
39 optdict = dict(optlist)
40 log.info("BuildTools Helix Continuation Runner starting")
42 if '--args' in optdict:
43 script_arguments = optdict['--args']
44 log.info("Script Arguments: " + script_arguments)
46 if '--script' in optdict:
47 script_to_execute = optdict['--script']
49 log.error("Value for parameter '--script' is required")
52 if '--next_queue' in optdict:
53 next_queue = optdict['--next_queue']
55 log.error("Need a secondary queue id to continue execution.")
57 if '--next_payload_dir' in optdict:
58 next_payload_dir = optdict['--next_payload_dir']
60 log.error("Need a secondary payload to continue execution.")
63 unpack_dir = fix_path(settings.workitem_payload_dir)
65 execution_args = [os.path.join(unpack_dir, script_to_execute)] + args
67 return_code = helix.proc.run_and_log_output(
74 # currently there's no use for it, but here's where we'd choose to send out XUnit results
75 # if desired at some point.
76 log.info("First stage of execution succeded. Sending a new work item to " + next_queue)
77 log.info("Will include contents of " + next_payload_dir)
79 settings = settings_from_env()
80 # load Client-specific settings
81 config_path = os.path.join(settings.config_root, "ClientSettings.json")
82 settings.__dict__.update(json.load(open(config_path)))
83 service_bus_repository = ServiceBusRepository(settings.ServiceBusRoot,
85 settings.LongPollTimeout,
87 settings.servicebus_retry_count,
88 settings.servicebus_retry_delay
90 # For now, we'll use ScriptRunner for this step. Eventually we'll want to either combine functionality
91 # of the two into scriptrunner.py, OR parameterize which script is used (for the 2+ re-queue scenario)
92 call_runcontinuation = "/RunnerScripts/scriptrunner/scriptrunner.py --script RunContinuation"
94 continuation_command = "%HELIX_PYTHONPATH% %HELIX_CORRELATION_PAYLOAD%" + call_runcontinuation + ".cmd"
96 continuation_command = "$HELIX_PYTHONPATH% $HELIX_CORRELATION_PAYLOAD" + call_runcontinuation + ".sh"
98 # Prep the follow-up work item ...
99 new_work_item = HelixWorkItem(
100 correlation_id=settings.correlation_id,
101 work_item_friendly_name=settings.workitem_friendly_name + ".Execution",
102 command=continuation_command,
103 results_output_uri=settings.output_uri + "/continuation",
104 results_output_write_token=settings.output_write_token,
105 results_output_read_token=settings.output_read_token)
107 # This may eventually cause trouble if zips with identical names are somehow included inside
108 # other payload zips. Chained continuation will be OK as there will be a new results
109 # directory to upload to for each leg.
110 new_workitem_payload_name = settings.workitem_friendly_name + ".continuation.zip"
111 secondary_zip_path = os.path.join(settings.workitem_working_dir, new_workitem_payload_name)
113 zip_directory(secondary_zip_path, next_payload_dir)
114 log.info("Zipped into " + secondary_zip_path)
116 # Upload the payloads for the job
117 upload_client = helix.azure_storage.BlobUploadClient(settings.output_uri,
118 settings.output_write_token,
119 settings.output_read_token)
120 new_payload_uri = upload_client.upload(secondary_zip_path, new_workitem_payload_name)
121 new_work_item.WorkItemPayloadUris.append(new_payload_uri)
123 # Current assumption: No need to reuse correlation payload, but bring supplemental (for scripts)
124 # NOTE: We don't currently have a way to access the existing Uri, so reusing the payload from
125 # storage will involve plumbing that through or re-uploading it (can be huge)
126 supplemental_payload_path = os.path.join(settings.work_root,
127 settings.correlation_id,
128 "work", "SupplementalPayload.zip")
130 supplemental_payload_uri = upload_client.upload(supplemental_payload_path, "SupplementalPayload.zip")
131 log.info("Uploaded " + secondary_zip_path + " to " + new_payload_uri)
132 log.info("Uploaded SupplementalPayload.zip to " + supplemental_payload_uri)
133 new_work_item.CorrelationPayloadUris.append(supplemental_payload_uri)
135 if service_bus_repository.post_new_workitem(queue_id=next_queue,
136 work_item=new_work_item):
137 log.info("Successfully queued new work item.")
139 log.error("Failure to send to Service bus.")
143 log.error("Got non-zero exit code for first stage of execution. Skipping further processing.")
147 return command_main(_main, ['script=', 'args=', 'next_queue=', 'next_payload_dir='], args)
149 if __name__ == '__main__':
153 helix.depcheck.check_dependencies(__name__)