From: 최종헌/MDE Lab(SR)/삼성전자 Date: Wed, 29 May 2024 08:18:48 +0000 (+0900) Subject: Add launcher-action.yml (#550) X-Git-Tag: accepted/tizen/unified/20240621.010434~5 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=56e62c404bbccb259bae6cba718e0fd6e6d25f98;p=platform%2Fcore%2Fdotnet%2Flauncher.git Add launcher-action.yml (#550) --- diff --git a/.github/workflows/launcher-action.yml b/.github/workflows/launcher-action.yml new file mode 100644 index 0000000..52bbc69 --- /dev/null +++ b/.github/workflows/launcher-action.yml @@ -0,0 +1,275 @@ +name: "Launcher Action" + +on: + pull_request_target: + branches: + - tizen + - action + +jobs: + Tizen_Unified_GBS_Build: + runs-on: [ code-linux, code-large ] + container: + image: actions-docker.bart.sec.samsung.net/docker-gbs-base:ubuntu-20.04 + options: --user root --privileged + steps: + - name: proxy setting + uses: code-actions/setup-proxy@main + with: + location: seoulrnd + + - name: Setup .NET + uses: code-actions/setup-dotnet@v1 + with: + dotnet-version: '6.0.310' + + - name: Checkout + uses: code-actions/checkout@v3 + + - name: Build + run: | + gbs -c .github/gbs.conf build -A armv7l --include-all --clean -P Tizen-Unified + if: success() + + - name: Upload artifacts + uses: code-actions/upload-artifact@v3 + with: + name: launcher-rpm-Tizen-Unified-armv7l + path: .GBS-ROOT/launcher-Unified/local/repos/Tizen_Unified/armv7l/RPMS/*.rpm + if: success() + + - name: Leave comment + if: failure() + uses: code-actions/github-script@v5 + with: + script: | + github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body: 'Please check the build status.' + }) + + Tizen_Unified_launcher_performance: + runs-on: [ self-hosted ] + needs: Tizen_Unified_GBS_Build + steps: + - name: Checkout + uses: code-actions/checkout@v3 + + - name: Connect RPI4 + shell: bash + run: | + sdb devices + sdb -d root on + sdb -d shell mount -o remount,rw / + sdb -d shell osu --resize + echo "device 10000000d15e1aff is ready." + if: success() + + - name: Download artifacts + uses: code-actions/download-artifact@v3 + with: + name: launcher-rpm-Tizen-Unified-armv7l + path: ./ + if: success() + + - name: Install App + shell: bash + run: | + sdb -d install ./tests/Performance/org.tizen.example.AppCommon.Tizen.Mobile-1.0.0.tpk + sdb -d install ./tests/Performance/org.tizen.example.ApplicationControl.Tizen.Mobile-1.0.0.tpk + sdb -d install ./tests/Performance/org.tizen.example.Puzzle.Tizen.Mobile-1.0.0.tpk + sdb -d install ./tests/Performance/org.tizen.example.Settings.Tizen.Mobile-1.0.0.tpk + sdb -d install ./tests/Performance/org.tizen.example.System_info.Tizen.Mobile-1.0.0.tpk + sdb -d install ./tests/Performance/org.tizen.example.Xamarin.Hello.F_HUB.Tizen-1.0.0.tpk + if: success() + + - name: Run Performance Before PR + shell: bash + run: | + ./tests/Performance/measure -o ./tests/Performance/result_before.log + if: success() + + - name: Install RPM + shell: bash + run: | + ls ./ + sdb -d push ./dotnet-launcher-6.0.9-1.armv7l.rpm /root + sdb -d shell rpm -Uvh --force /root/dotnet-launcher-6.0.9-1.armv7l.rpm + sdb -d shell "setcap cap_setgid,cap_sys_admin+ei /usr/bin/dotnet && setcap cap_setgid,cap_sys_admin+ei /usr/bin/dotnet-loader && setcap cap_setgid,cap_sys_admin+ei /usr/bin/dotnet-launcher && setcap cap_setgid,cap_sys_admin+ei /usr/bin/dotnet-hydra-loader" + sdb -d shell "killall -9 dotnet-loader" + sleep 30 + if: success() + + - name: Install App + shell: bash + run: | + sdb -d install ./tests/Performance/org.tizen.example.AppCommon.Tizen.Mobile-1.0.0.tpk + sdb -d install ./tests/Performance/org.tizen.example.ApplicationControl.Tizen.Mobile-1.0.0.tpk + sdb -d install ./tests/Performance/org.tizen.example.Puzzle.Tizen.Mobile-1.0.0.tpk + sdb -d install ./tests/Performance/org.tizen.example.Settings.Tizen.Mobile-1.0.0.tpk + sdb -d install ./tests/Performance/org.tizen.example.System_info.Tizen.Mobile-1.0.0.tpk + sdb -d install ./tests/Performance/org.tizen.example.Xamarin.Hello.F_HUB.Tizen-1.0.0.tpk + if: success() + + - name: Run Performance After PR + shell: bash + run: | + ./tests/Performance/measure -o ./tests/Performance/result_after.log + if: success() + + - name: Compare Performance + shell: bash + run: | + echo "### Before Performance Result ###" > ./tests/Performance/result.log + ./tests/Performance/measure -f ./tests/Performance/result_before.log >> ./tests/Performance/result.log + echo -e "\n### After Performance Result ###" >> ./tests/Performance/result.log + ./tests/Performance/measure -f ./tests/Performance/result_after.log >> ./tests/Performance/result.log + echo -e "\n### Compare Performance Result ###" >> ./tests/Performance/result.log + ./tests/Performance/measure --compare ./tests/Performance/result_before.log ./tests/Performance/result_after.log >> ./tests/Performance/result.log + if: success() + + - name: Result Performance + shell: bash + run: | + cat ./tests/Performance/result.log + sed -i '1s/^/\`\`\`\n/' ./tests/Performance/result.log + echo -e "\n" >> ./tests/Performance/result.log + sdb -d shell "cat /etc/info.ini" >> ./tests/Performance/result.log + echo "\`\`\`" >> ./tests/Performance/result.log + if: success() + + - name: Comment Result + run: | + ./scripts/github-comment.sh ${{github.event.pull_request.number}} ./tests/Performance/result.log + if: success() + + Tizen_Unified_launcher_tc: + runs-on: [ self-hosted ] + needs: Tizen_Unified_launcher_performance + steps: + - name: Checkout + uses: code-actions/checkout@v3 + + - name: Build TC + shell: bash + run: | + ./tests/Apps/BuildTPK.py + if: success() + + - name: Connect RPI4 + shell: bash + run: | + sdb devices + sdb -d root on + sdb -d shell mount -o remount,rw / + sdb -d shell osu --resize + echo "device 10000000d15e1aff is ready." + if: success() + + - name: Download artifacts + uses: code-actions/download-artifact@v3 + with: + name: launcher-rpm-Tizen-Unified-armv7l + path: ./ + if: success() + + - name: Install RPM + shell: bash + run: | + ls ./ + sdb -d push ./dotnet-launcher-6.0.9-1.armv7l.rpm /root + sdb -d shell rpm -Uvh --force /root/dotnet-launcher-6.0.9-1.armv7l.rpm + sdb -d shell "setcap cap_setgid,cap_sys_admin+ei /usr/bin/dotnet && setcap cap_setgid,cap_sys_admin+ei /usr/bin/dotnet-loader && setcap cap_setgid,cap_sys_admin+ei /usr/bin/dotnet-launcher && setcap cap_setgid,cap_sys_admin+ei /usr/bin/dotnet-hydra-loader" + sdb -d shell "killall -9 dotnet-loader" + sdb -d shell "cat /etc/info.ini" >> ./tests/TCs/device.info + sleep 30 + if: success() + + - name: Run TC + shell: bash + run: | + ./tests/TCs/ALL.py + if: success() + + - name: Result TC + shell: bash + run: | + cat ./tests/TCs/result.log + sed -i '1s/^/\`\`\`\n/' ./tests/TCs/result.log + echo -e "\n" >> ./tests/TCs/result.log + cat ./tests/TCs/device.info >> ./tests/TCs/result.log + echo "\`\`\`" >> ./tests/TCs/result.log + if: success() + + - name: Comment Result + run: | + ./scripts/github-comment.sh ${{github.event.pull_request.number}} ./tests/TCs/result.log + if: success() + + Tizen_Dotnet_Benchmark: + runs-on: [ self-hosted ] + needs: Tizen_Unified_launcher_tc + steps: + - name: Checkout + uses: code-actions/checkout@v3 + + - name: Connect RPI4 + shell: bash + run: | + sleep 60 + sdb devices + sdb -d root on + sdb -d shell mount -o remount,rw / + echo "device 10000000d15e1aff is ready." + if: success() + + - name: Install App + shell: bash + run: | + sdb -d install ./tests/Benchmark/org.tizen.dotnet.TizenBenchmark-1.0.0.tpk + if: success() + + - name: Run Benchmark + shell: bash + run: | + sdb -d shell "rm /tmp/BenchmarkDotnet/*.log" + sdb -d shell "app_launcher -k org.tizen.dotnet.TizenBenchmark" + sdb -d shell "app_launcher -s org.tizen.dotnet.TizenBenchmark" + if: success() + + - name: Print&Pull Log + shell: bash + run: | + sdb shell "dlogutil -c" + sdb shell "dlogutil -v time STDOUT" > stdout.log & + DLOG_STREAMER_PID=$! + echo $DLOG_STREAMER_PID + while inotifywait -qqre modify stdout.log; + do + GET_LOG="$(tail -1 stdout.log)" + echo $GET_LOG + if [[ $GET_LOG =~ "##### Tizen.NET Benchmark END #####" ]]; then + sdb -d pull /tmp/BenchmarkDotnet ./tests/Benchmark/ + kill -9 $DLOG_STREAMER_PID > /dev/null 2>&1 + break + fi + done + if: success() + + - name: Result Benchmark + run: | + echo "\`\`\`" > ./tests/Benchmark/benchmark.result + for file in ./tests/Benchmark/*.log; do if [[ $file =~ "BenchmarkRun-" ]]; then continue; fi; echo $file; sed -n '/ Summary /,/ Legends /p' $file | grep -v '*'; done >> ./tests/Benchmark/benchmark.result + echo -e "\n" >> ./tests/Benchmark/benchmark.result + sdb -d shell "cat /etc/info.ini" >> ./tests/Benchmark/benchmark.result + echo "\`\`\`" >> ./tests/Benchmark/benchmark.result + cat ./tests/Benchmark/benchmark.result + if: success() + + - name: Comment Result + run: | + ./scripts/github-comment.sh ${{github.event.pull_request.number}} ./tests/Benchmark/benchmark.result + if: success() + diff --git a/script/github-comment.sh b/script/github-comment.sh new file mode 100644 index 0000000..fb4fba5 --- /dev/null +++ b/script/github-comment.sh @@ -0,0 +1,14 @@ +#!/bin/bash +html_content="$(cat $2)" + +json_payload=$(jq -n --arg body "$html_content" '{"body": $body}') +echo "[GITHUB COMMENT] json_payload --> ${json_payload}" + +curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $JH_GITHUB_TOKEN" \ + https://github.sec.samsung.net/api/v3/repos/dotnet/launcher/issues/$1/comments \ + -d "$json_payload" + +exit 0 diff --git a/tests/Performance/measure b/tests/Performance/measure new file mode 100755 index 0000000..5da7d68 --- /dev/null +++ b/tests/Performance/measure @@ -0,0 +1,418 @@ +#!/usr/bin/env python3 +import argparse, ast, math, os, re, statistics, subprocess, sys, threading +from time import sleep +from datetime import datetime + +# Global values. +predefined_list = ["org.tizen.example.AppCommon.Tizen.Mobile", + "org.tizen.example.ApplicationControl.Tizen.Mobile", + "org.tizen.example.Puzzle.Tizen.Mobile", + "org.tizen.example.Settings.Tizen.Mobile", + "org.tizen.example.System_info.Tizen.Mobile", + "org.tizen.example.Xamarin.Hello.F_HUB.Tizen"] +logfile = None +dlogs = list() + +# Print text to a file. +def log(text): + if not logfile: + return + + with open(logfile, 'a') as f: + d = f"\n[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] " + f.write(d + d.join(str(text).splitlines())) + +# Execute a command and return the output as a string. +def cmd(command): + return subprocess.run(command.split(), encoding='utf-8', stdout=subprocess.PIPE).stdout + +# Launch an application and return its pid. +def launch(app_id, serial, standalone=False, pause=10): + raw = cmd(f"sdb -s {serial} shell app_launcher {'-e' if standalone else '-s'} {app_id}") + if 'successfully launched pid' not in raw: + raise Exception(f'Unable to launch {app_id}. Make sure the application is installed on the target device.') + sleep(pause) + return raw[raw.index('pid') + 6:raw.index('with') - 1] + +# Terminate apps if running. +def terminate(app_ids, serial, pause=0): + for app_id in app_ids: + cmd(f'sdb -s {serial} shell app_launcher -t {app_id}') + # Check if we are failing due to an unhandled exception. + if sys.exc_info()[0] is None: + sleep(pause) + +# Colorize the text. +def color(text): + return f'\033[93m{text}\033[0m' if os.name == 'posix' else text + +# Entry point for the dlog thread. +def dlogger(serial): + cmd(f'sdb -s {serial} dlog -c') + + command = f'sdb -s {serial} dlog -v time AUL LAUNCH' + with subprocess.Popen(command.split(), encoding='utf-8', stdout=subprocess.PIPE) as p: + for line in p.stdout: + dlogs.append(line.strip()) + +# Filter dlogs with conditions. +def read_dlogs(target=None, pid=0, grep:str=None): + ret = list() + + for line in dlogs: + split = line.split(')')[0] + if target and target not in split: + continue + elif pid != 0 and str(pid) not in split.split('(')[1]: + continue + elif grep is None or re.search(grep, line): + ret.append(line) + + return ret + +# Return a UNIX time in milliseconds from a formatted dlog text. +def time_in_millis(text): + t = text.split()[1] + h = int(t[0:2]) + m = int(t[3:5]) + s = int(t[6:8]) + millis = int(t[9:12]) + offset = int(t[12:15]) + return ((h - offset) * 3600 + m * 60 + s) % 86400 * 1000 + millis + +# Parse time values. +def parse_stime(ret, app_id): + # Detect the launch request. + lines = read_dlogs('AUL', grep=f'[Rr]equest cmd\(0:APP_START\)( )?: appid\({app_id}\)') + if len(lines) == 0: + log(f'Error: parse_stime for app_id={app_id} has failed. No launch request.') + return + queued = time_in_millis(lines[-1]) + + # Get the Launching:done notification. + lines = read_dlogs('LAUNCH', grep=f'{app_id}:Application:Launching:done') + if len(lines) == 0: + log(f'Error: parse_stime for app_id={app_id} has failed. Unable to detect the app launch completion.') + return + done = time_in_millis(lines[-1]) + + ret[app_id]['stime'][-1] = done - queued + log(f"{app_id:40.40} {done - queued:8} ms (Launch)") + +# Parse memps to get PSS and RSS. +def parse_memps(ret, serial, pid_map:map): + # pid_map is a map of {pid: app_id}. + raw = '' + app_ids = [ai for ai in pid_map.values() if len(ai) > 0] + retry_cnt = 3 + while not all(ai in raw for ai in app_ids) and retry_cnt > 0: + raw = cmd(f'sdb -s {serial} shell memps -v') + retry_cnt -= 1 + sleep(1) + + # len(lines) == len(app_ids) if successful. + lines = [l for l in raw.splitlines() if any(ai in l for ai in app_ids)] + if len(lines) < len(app_ids): + log(f'Warning: Failed to get one or more app ids from memps.\n{lines}') + + for line in lines: + if any(ai in line for ai in app_ids): + split = [x for x in line.split() if x] + ai = pid_map[split[0]] # from pid + shar = float(split[1]) + float(split[2]) + prvt = float(split[3]) + float(split[4]) + swap = float(split[10]) + rss = shar + prvt + swap + ret[ai]['pss'][-1] = float(split[6]) + ret[ai]['rss'][-1] = rss + ret[ai]['shar'][-1] = shar + ret[ai]['prvt'][-1] = prvt + ret[ai]['swap'][-1] = swap + ret[ai]['file'][-1] = float(split[1]) + float(split[3]) + ret[ai]['dirt'][-1] = float(split[2]) + float(split[4]) + log(f'{ai:40.40} {float(split[6]):8.0f} KB (PSS), {rss:8.0f} KB (RSS)') + +# Reduce the dimensionality of the given data. +def trim(data, recent=False): + # If recent is True, only the latest (intermediate) values are taken. + # Otherwise, we first remove outlying values and then compute averages. + ret = dict((ai, dict()) for ai in data.keys()) # Create a new data structure. + for ai, d in data.items(): # d is a dictionary. + # Initialize the metadata. + ret[ai]['n'] = len(data[ai]['stime']) # N (original length) + ret[ai]['na'] = dict((k, 0) for k in d.keys()) # Zero-initialized N/A counts + ret[ai]['stdev'] = dict((k, 0) for k in d.keys()) # Zero-initialized standard deviations + for key, val in d.items(): + if recent: + ret[ai][key] = val[-1] + else: + # Only swaps can be zero by nature. Otherwise, use only non-zero values. + v = val if key == 'swap' else [x for x in val if x > 0] + mean = statistics.mean(v) if len(v) > 0 else 0 + stdev = statistics.stdev(v) if len(v) > 1 else 0 + # Use sqrt(mean) as a correction threshold to discard outlying values. + cv = v if stdev <= math.sqrt(mean) else [x for x in v if (x >= mean - stdev) and (x <= mean + stdev)] + ret[ai][key] = statistics.mean(cv) if len(cv) > 0 else 0 + ret[ai]['na'][key] = len(val) - len(cv) + ret[ai]['stdev'][key] = statistics.stdev(cv) if len(cv) > 1 else 0 + + # Compute averages. + keys = data[list(data.keys())[0]].keys() + mean = dict() + for key in keys: + val = [ret[ai][key] for ai in data.keys() if ret[ai][key] > 0 or key == 'swap'] # Only non-zeros are valid. + mean[key] = statistics.mean(val) if len(val) > 0 else 0 + ret['Average'] = mean + + return ret + +# Display the statistical metadata (N/A and STDEV) of the dataset. +def report(data:map): + print(f"{'':40}{'STIME':>14}{'PSS':>14}{'RSS':>14}") + print(f"{'Application':40}{' N/A STDEV' * 3}") + for app_id in [ai for ai in data.keys() if ai != 'Average']: + empty = dict((k, 0) for k in data[app_id].keys()) # for backward compatibility (0 if no data) + na = data[app_id].get('na', empty) + stdev = data[app_id].get('stdev', empty) + b = f"{app_id:40.40}" + b += f"{na['stime']:>6}{stdev['stime']:>8.1f}" + b += f"{na['pss']:>6}{stdev['pss']:>8.1f}" + b += f"{na['rss']:>6}{stdev['rss']:>8.1f}" + print(b) + +# Display the dataset. +def display(data:map, verbose=False): + b = f"{'Application':40}{'STIME':>6}{'PSS':>8}" + if verbose: + b += f"{'nSHARED (%)':>15}{'MULT':>6}{'PRIVATE':>9}{'CODE':>8}{'DATA':>8}" + b += f"{'RSS':>8}" + print(b) + + for app_id in data.keys(): + d = data[app_id] + b = f"{app_id:40.40}{d['stime']:6.0f}{d['pss']:8.0f}" + if verbose: + nshared = d['pss'] - d['prvt'] + b += f"{nshared:9.0f}{nshared / d['pss'] * 100 if d['pss'] > 0 else 0:6.1f}" # nShared (net shared amount) + b += f"{(d['rss'] - d['prvt'] - d['swap']) / nshared if nshared > 0 else 0:6.1f}" # Mult + b += f"{d['prvt']:9.0f}{d['file']:8.0f}{d['dirt']:8.0f}" + b += f"{d['rss']:8.0f}" + print(b) + + if verbose: + report(data) + +# Compare and display the two datasets. +def compare(data1:map, data2:map, verbose=False): + if set(data1.keys()) != set(data2.keys()): + raise Exception('Unable to process data because the target app ids do not match.') + + b = f"{'Application':40}{'STIME (ms)':^20}{'PSS (KB)':^24}" + if verbose: + b += f"{'RSS (KB)':^24}" + print(b) + + for app_id in data1.keys(): + d1 = data1[app_id] + d2 = data2[app_id] + stt_gain=f"{(d2['stime'] - d1['stime']) / d1['stime'] * 100 if d1['stime'] > 0 else 0:+5.1f}%" + pss_gain=f"{(d2['pss'] - d1['pss']) / d1['pss'] * 100 if d1['pss'] > 0 else 0:+5.1f}%" + rss_gain=f"{(d2['rss'] - d1['rss']) / d1['rss'] * 100 if d1['rss'] > 0 else 0:+5.1f}%" + b = f"{app_id:40.40}{d1['stime']:6.0f}{d2['stime']:6.0f}{stt_gain:>8}" + b += f"{d1['pss']:8.0f}{d2['pss']:8.0f}{pss_gain:>8}" + if verbose: + b += f"{d1['rss']:8.0f}{d2['rss']:8.0f}{rss_gain:>8}" + print(b) + +# The main data collection logic. +def run(serial, app_ids=predefined_list, count=5, pause=10, standalone=False, individual=False, post:str=None): + # A permission is required to run 'memps -v'. + cmd(f'sdb -s {serial} root on') + + # A dictionary of dictionaries containing output values as lists. + ret = dict((app_id, { + 'stime':[], # Traditional startup time in ms + 'pss':[], # Proportional set size in KB + 'rss':[], # Resident set size + 'shar':[], # Shared set size + 'prvt':[], # Private set size + 'swap':[], # Swap size + 'file':[], # File (clean) set size + 'dirt':[], # Dirty set size + }) for app_id in app_ids) + + # Flush dlogs and terminate any running processes. + print('Preparing for the measurement...') + if not standalone: + # Restart the candidate process if running. + cmd(f'sdb -s {serial} shell killall dotnet-launcher dotnet-loader') + terminate(app_ids, serial, pause / 2) + + # Start up the dlog thread. + # We need to create a dedicated thread because 'dlog -d' is unstable on FH4.0 targets. + dlog_thread = threading.Thread(target=dlogger, args=[serial], daemon=True) + dlog_thread.start() + + for num_iter in range(count): + print(f'Iter {num_iter + 1}/{count}') + + # Zero-initialize values. + for app_id in app_ids: + for v in ret[app_id].values(): + v.append(0) + + if individual: + for i in range(len(app_ids)): + app_id = app_ids[i] + print(f'({i + 1}/{len(app_ids)}) Launching {app_id:60.60}', end='\r') + + try: + pid = launch(app_id, serial, standalone, pause) + parse_stime(ret, app_id) + parse_memps(ret, serial, {pid: app_id}) + dlogs.clear() + finally: + terminate([app_id], serial, pause / 4) + else: + pid_map = dict() # {pid: app_id} + try: + for i in range(len(app_ids)): + app_id = app_ids[i] + print(f'({i + 1}/{len(app_ids)}) Launching {app_id:60.60}', end='\r') + + pid = launch(app_id, serial, standalone, pause) + parse_stime(ret, app_id) + pid_map[pid] = app_id + dlogs.clear() + + # The post-script is only supported in non-individual mode. + if post: + res = cmd(post) + + parse_memps(ret, serial, pid_map) + finally: + terminate(app_ids, serial, pause / 2) + + # Display the intermediate data. + print(f"{'':80}",end='\r') + display(trim(ret, True), False) + + if post: + print(res) + + return ret + +# Save the serialized data to a file. +def export(data, outfile=None): + if not outfile: + return + + with open(outfile, 'w') as f: + f.write(str(data)) + print(f'\nSuccessfully exported as {outfile}.') + +# Check the sdb connection status and get a device serial number. +def read_serial(): + raw = subprocess.run('sdb devices'.split(), encoding='utf-8', stdout=subprocess.PIPE).stdout + lines = [l for l in raw.splitlines() if not l.startswith('* ')] + if len(lines) <= 1: + # No target found. + return None + elif len(lines) == 2: + return lines[1].split(' ')[0].split(':')[0].strip() + else: + # More than one target found. + serials = [] + for idx in range(1, len(lines)): + serial = lines[idx].split(' ')[0].split(':')[0].replace('device', '').strip() + serials.append(serial) + print(f"[{idx}] {serial} - {lines[idx].split(' ')[-1].strip()}") + choice = input(f'Select a device [1-{len(lines) - 1}]: ') + return serials[int(choice) - 1].strip() if choice.isdigit() else None + +# Main entry point for command line usage. +def main(): + epilog = \ + '''output columns: + STIME Startup time in ms + PSS Proportional set size in KB + nSHARED Net shared set size = PSS - PRIVATE + (%) The portion of nSHARED in PSS + MULT Net multiplier = (RSS - PRIVATE - SWAP) / nSHARED + PRIVATE Private set size + CODE File (clean) set size + DATA Dirty (data) set size + RSS Resident set size + N/A The number of omitted samples + STDEV Dispersion of the samples (standard deviation)''' + + parser = argparse.ArgumentParser(description='Tizen .NET performance analysis utility', epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('APP_ID', type=str, nargs='*', help='target app ids (default: predefined 6 applications)', default=predefined_list) + parser.add_argument('-s', metavar='SERIAL', type=str, help='specify a device serial number', default=None) + parser.add_argument('-n', metavar='NUM', type=int, help='number of iterations (default: 5)', default=5) + parser.add_argument('-t', metavar='SECONDS', type=int, help='minimum time interval between launches (default: 10)', default=10) + parser.add_argument('-v', help='print verbose debug information', action='store_true') + parser.add_argument('-e', '--standalone', help='launch apps in standalone mode', action='store_true') + parser.add_argument('-i', '--individual', help='measure each application individually', action='store_true') + parser.add_argument('-p', metavar='CMD', type=str, help='execute a post-script after launching all apps', default=None) + parser.add_argument('-o', metavar='OUTFILE', help='export output data to a file', default=None) + parser.add_argument('-f', metavar='FILE', help='display data from a file (do not measure)', default=None) + parser.add_argument('--last', help='display the most recent measurement data', action='store_true') + parser.add_argument('--compare', nargs=2, metavar=('A', 'B'), help='compare two data files', default=None) + args = parser.parse_args() + + # Read args.file and exit (if not None). + if args.f: + with open(args.f, 'r') as f: + # Deserialize data from the file. + data = ast.literal_eval(f.read()) + display(data, args.v) + exit(0) + + # Read the last measurement data and exit (if --last). + if args.last: + with open('measure.log', 'r') as f: + for line in reversed(f.read().splitlines()): + raw = line.split(']')[1].strip() + if raw.startswith('{'): + data = ast.literal_eval(raw) + display(data, args.v) + export(data, args.o) # Allow exporting data to OUTFILE. + exit(0) + print('The requested data could not be found in measure.log.') + exit(1) + + # Compare two input files and exit (if --compare). + if args.compare: + with open(args.compare[0], 'r') as f1, open(args.compare[1], 'r') as f2: + data1 = ast.literal_eval(f1.read()) + data2 = ast.literal_eval(f2.read()) + compare(data1, data2, args.v) + exit(0) + + # Get a device serial number. + serial = args.s if args.s else read_serial() + if serial is None: + print('No connected device(s).') + exit(1) + + # Set up a log file. + global logfile + logfile = 'measure.log' + log(' '.join(sys.argv)) + + # Record data. + raw = run(serial, args.APP_ID, args.n, args.t, args.standalone, args.individual, args.p) + data = trim(raw) + + print('Final Result') + display(data, args.v) + + # Create a backup. + log(data) + + # Export data. + export(data, args.o) + +if __name__ == '__main__': + main() diff --git a/tests/Performance/org.tizen.example.AppCommon.Tizen.Mobile-1.0.0.tpk b/tests/Performance/org.tizen.example.AppCommon.Tizen.Mobile-1.0.0.tpk new file mode 100755 index 0000000..37fbd1d Binary files /dev/null and b/tests/Performance/org.tizen.example.AppCommon.Tizen.Mobile-1.0.0.tpk differ diff --git a/tests/Performance/org.tizen.example.ApplicationControl.Tizen.Mobile-1.0.0.tpk b/tests/Performance/org.tizen.example.ApplicationControl.Tizen.Mobile-1.0.0.tpk new file mode 100755 index 0000000..8fc9b38 Binary files /dev/null and b/tests/Performance/org.tizen.example.ApplicationControl.Tizen.Mobile-1.0.0.tpk differ diff --git a/tests/Performance/org.tizen.example.Puzzle.Tizen.Mobile-1.0.0.tpk b/tests/Performance/org.tizen.example.Puzzle.Tizen.Mobile-1.0.0.tpk new file mode 100755 index 0000000..8be0139 Binary files /dev/null and b/tests/Performance/org.tizen.example.Puzzle.Tizen.Mobile-1.0.0.tpk differ diff --git a/tests/Performance/org.tizen.example.Settings.Tizen.Mobile-1.0.0.tpk b/tests/Performance/org.tizen.example.Settings.Tizen.Mobile-1.0.0.tpk new file mode 100755 index 0000000..83b5c9c Binary files /dev/null and b/tests/Performance/org.tizen.example.Settings.Tizen.Mobile-1.0.0.tpk differ diff --git a/tests/Performance/org.tizen.example.System_info.Tizen.Mobile-1.0.0.tpk b/tests/Performance/org.tizen.example.System_info.Tizen.Mobile-1.0.0.tpk new file mode 100755 index 0000000..2ba1a80 Binary files /dev/null and b/tests/Performance/org.tizen.example.System_info.Tizen.Mobile-1.0.0.tpk differ diff --git a/tests/Performance/org.tizen.example.Xamarin.Hello.F_HUB.Tizen-1.0.0.tpk b/tests/Performance/org.tizen.example.Xamarin.Hello.F_HUB.Tizen-1.0.0.tpk new file mode 100755 index 0000000..21973fe Binary files /dev/null and b/tests/Performance/org.tizen.example.Xamarin.Hello.F_HUB.Tizen-1.0.0.tpk differ