From f159f12fee7aa374ec6bfeaa547630a5b943eb08 Mon Sep 17 00:00:00 2001 From: Tao Xu Date: Mon, 13 Sep 2021 18:14:32 -0700 Subject: [PATCH] [iOS][OSS][BE] Add Simulator tests for full JIT (#64851) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/64851 ghstack-source-id: 137970229 Test Plan: CircleCI Reviewed By: hanton, cccclai Differential Revision: D30877963 fbshipit-source-id: 7bb8ade1959b85c3902ba9dc0660cdac8f558d64 --- .circleci/config.yml | 25 ++++++----- .../job-specs/job-specs-custom.yml | 25 ++++++----- ios/TestApp/TestApp.xcodeproj/project.pbxproj | 6 +-- ios/TestApp/TestAppTests/TestAppTests.mm | 45 ------------------- ios/TestApp/TestAppTests/TestFullJIT.mm | 22 ++++++++++ ios/TestApp/TestAppTests/TestLiteInterpreter.mm | 24 +++++++++++ ios/TestApp/benchmark/setup.rb | 50 ++++++++++++++++++---- ios/TestApp/benchmark/trace_model.py | 5 ++- 8 files changed, 120 insertions(+), 82 deletions(-) delete mode 100644 ios/TestApp/TestAppTests/TestAppTests.mm create mode 100644 ios/TestApp/TestAppTests/TestFullJIT.mm create mode 100644 ios/TestApp/TestAppTests/TestLiteInterpreter.mm diff --git a/.circleci/config.yml b/.circleci/config.yml index 8415b23..82f5ce1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1797,10 +1797,6 @@ jobs: no_output_timeout: "30m" command: | set -e - if [ ${BUILD_LITE_INTERPRETER} == 0 ]; then - echo "Run Build Test is not for full jit, skipping." - exit 0 - fi PROJ_ROOT=/Users/distiller/project PROFILE=PyTorch_CI_2021 # run the ruby build script @@ -1826,21 +1822,28 @@ jobs: if [ ${IOS_PLATFORM} != "SIMULATOR" ]; then echo "not SIMULATOR build, skip it." exit 0 - elif [ ${BUILD_LITE_INTERPRETER} == 0 ]; then - echo "Run Simulator Tests is not for full jit, skipping." - exit 0 fi WORKSPACE=/Users/distiller/workspace PROJ_ROOT=/Users/distiller/project source ~/anaconda/bin/activate - pip install torch torchvision --progress-bar off - #run unit test + # use the pytorch nightly build to generate models + conda install pytorch torchvision -c pytorch-nightly --yes + # generate models for differnet backends cd ${PROJ_ROOT}/ios/TestApp/benchmark + mkdir -p ../models python trace_model.py - ruby setup.rb + if [ ${BUILD_LITE_INTERPRETER} == 1 ]; then + ruby setup.rb --lite 1 + else + ruby setup.rb + fi cd ${PROJ_ROOT}/ios/TestApp instruments -s -devices - fastlane scan + if [ ${BUILD_LITE_INTERPRETER} == 1 ]; then + fastlane scan --only_testing TestAppTests/TestAppTests/testLiteInterpreter + else + fastlane scan --only_testing TestAppTests/TestAppTests/testFullJIT + fi pytorch_linux_bazel_build: <<: *pytorch_params machine: diff --git a/.circleci/verbatim-sources/job-specs/job-specs-custom.yml b/.circleci/verbatim-sources/job-specs/job-specs-custom.yml index 49df2f5..765c1d8 100644 --- a/.circleci/verbatim-sources/job-specs/job-specs-custom.yml +++ b/.circleci/verbatim-sources/job-specs/job-specs-custom.yml @@ -534,10 +534,6 @@ no_output_timeout: "30m" command: | set -e - if [ ${BUILD_LITE_INTERPRETER} == 0 ]; then - echo "Run Build Test is not for full jit, skipping." - exit 0 - fi PROJ_ROOT=/Users/distiller/project PROFILE=PyTorch_CI_2021 # run the ruby build script @@ -563,21 +559,28 @@ if [ ${IOS_PLATFORM} != "SIMULATOR" ]; then echo "not SIMULATOR build, skip it." exit 0 - elif [ ${BUILD_LITE_INTERPRETER} == 0 ]; then - echo "Run Simulator Tests is not for full jit, skipping." - exit 0 fi WORKSPACE=/Users/distiller/workspace PROJ_ROOT=/Users/distiller/project source ~/anaconda/bin/activate - pip install torch torchvision --progress-bar off - #run unit test + # use the pytorch nightly build to generate models + conda install pytorch torchvision -c pytorch-nightly --yes + # generate models for differnet backends cd ${PROJ_ROOT}/ios/TestApp/benchmark + mkdir -p ../models python trace_model.py - ruby setup.rb + if [ ${BUILD_LITE_INTERPRETER} == 1 ]; then + ruby setup.rb --lite 1 + else + ruby setup.rb + fi cd ${PROJ_ROOT}/ios/TestApp instruments -s -devices - fastlane scan + if [ ${BUILD_LITE_INTERPRETER} == 1 ]; then + fastlane scan --only_testing TestAppTests/TestAppTests/testLiteInterpreter + else + fastlane scan --only_testing TestAppTests/TestAppTests/testFullJIT + fi pytorch_linux_bazel_build: <<: *pytorch_params machine: diff --git a/ios/TestApp/TestApp.xcodeproj/project.pbxproj b/ios/TestApp/TestApp.xcodeproj/project.pbxproj index 091f9e7..c516d91 100644 --- a/ios/TestApp/TestApp.xcodeproj/project.pbxproj +++ b/ios/TestApp/TestApp.xcodeproj/project.pbxproj @@ -3,7 +3,7 @@ archiveVersion = 1; classes = { }; - objectVersion = 52; + objectVersion = 50; objects = { /* Begin PBXBuildFile section */ @@ -13,7 +13,6 @@ A06D4CBD232F0DB200763E16 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = A06D4CBC232F0DB200763E16 /* Assets.xcassets */; }; A06D4CC0232F0DB200763E16 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = A06D4CBE232F0DB200763E16 /* LaunchScreen.storyboard */; }; A06D4CC3232F0DB200763E16 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = A06D4CC2232F0DB200763E16 /* main.m */; }; - A0EA3B0A237FCB72007CEA34 /* TestAppTests.mm in Sources */ = {isa = PBXBuildFile; fileRef = A0EA3B09237FCB72007CEA34 /* TestAppTests.mm */; platformFilter = ios; }; /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ @@ -39,7 +38,6 @@ A06D4CC2232F0DB200763E16 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; A0EA3AFF237FCB08007CEA34 /* TestAppTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = TestAppTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; A0EA3B03237FCB08007CEA34 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; - A0EA3B09237FCB72007CEA34 /* TestAppTests.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = TestAppTests.mm; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -97,7 +95,6 @@ A0EA3B00237FCB08007CEA34 /* TestAppTests */ = { isa = PBXGroup; children = ( - A0EA3B09237FCB72007CEA34 /* TestAppTests.mm */, A0EA3B03237FCB08007CEA34 /* Info.plist */, ); path = TestAppTests; @@ -211,7 +208,6 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - A0EA3B0A237FCB72007CEA34 /* TestAppTests.mm in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/ios/TestApp/TestAppTests/TestAppTests.mm b/ios/TestApp/TestAppTests/TestAppTests.mm deleted file mode 100644 index 63fbaaa..0000000 --- a/ios/TestApp/TestAppTests/TestAppTests.mm +++ /dev/null @@ -1,45 +0,0 @@ -#import - -#include -#include -#include -#include -#include -#include -#include "ATen/ATen.h" -#include "caffe2/core/timer.h" -#include "caffe2/utils/string_utils.h" -#include "torch/csrc/autograd/grad_mode.h" - -@interface TestAppTests : XCTestCase - -@end - -@implementation TestAppTests { - torch::jit::mobile::Module _module; -} - -+ (void)setUp { - [super setUp]; -} - -- (void)setUp { - [super setUp]; - NSString* modelPath = [[NSBundle bundleForClass:[self class]] pathForResource:@"model" - ofType:@"ptl"]; - XCTAssertTrue([NSFileManager.defaultManager fileExistsAtPath:modelPath], - @"model.ptl doesn't exist!"); - _module = torch::jit::_load_for_mobile(modelPath.UTF8String); -} - -- (void)testForward { -// _module.eval(); - c10::InferenceMode mode; - std::vector inputs; - inputs.push_back(torch::ones({1, 3, 224, 224}, at::ScalarType::Float)); - auto outputTensor = _module.forward(inputs).toTensor(); - float* outputBuffer = outputTensor.data_ptr(); - XCTAssertTrue(outputBuffer != nullptr, @""); -} - -@end diff --git a/ios/TestApp/TestAppTests/TestFullJIT.mm b/ios/TestApp/TestAppTests/TestFullJIT.mm new file mode 100644 index 0000000..cc08c63 --- /dev/null +++ b/ios/TestApp/TestAppTests/TestFullJIT.mm @@ -0,0 +1,22 @@ +#import + +#include + +@interface TestAppTests : XCTestCase + +@end + +@implementation TestAppTests { +} + +- (void)testFullJIT { + NSString* modelPath = [[NSBundle bundleForClass:[self class]] pathForResource:@"model" + ofType:@"pt"]; + auto module = torch::jit::load(modelPath.UTF8String); + c10::InferenceMode mode; + auto input = torch::ones({1, 3, 224, 224}, at::kFloat); + auto outputTensor = module.forward({input}).toTensor(); + XCTAssertTrue(outputTensor.numel() == 1000); +} + +@end diff --git a/ios/TestApp/TestAppTests/TestLiteInterpreter.mm b/ios/TestApp/TestAppTests/TestLiteInterpreter.mm new file mode 100644 index 0000000..722ed45 --- /dev/null +++ b/ios/TestApp/TestAppTests/TestLiteInterpreter.mm @@ -0,0 +1,24 @@ +#import + +#include +#include +#include + +@interface TestAppTests : XCTestCase + +@end + +@implementation TestAppTests { +} + +- (void)testLiteInterpreter { + NSString* modelPath = [[NSBundle bundleForClass:[self class]] pathForResource:@"model_lite" + ofType:@"ptl"]; + auto module = torch::jit::_load_for_mobile(modelPath.UTF8String); + c10::InferenceMode mode; + auto input = torch::ones({1, 3, 224, 224}, at::kFloat); + auto outputTensor = module.forward({input}).toTensor(); + XCTAssertTrue(outputTensor.numel() == 1000); +} + +@end diff --git a/ios/TestApp/benchmark/setup.rb b/ios/TestApp/benchmark/setup.rb index 7edb474..6ff96ab 100644 --- a/ios/TestApp/benchmark/setup.rb +++ b/ios/TestApp/benchmark/setup.rb @@ -8,7 +8,12 @@ option_parser = OptionParser.new do |opts| opts.on('-t', '--team_id ', 'development team ID') { |value| options[:team_id] = value } + opts.on('-l', '--lite ', 'use lite interpreter') { |value| + options[:lite] = value + } end.parse! +puts options.inspect + puts "Current directory: #{Dir.pwd}" install_path = File.expand_path("../../../build_ios/install") if not Dir.exist? (install_path) @@ -37,26 +42,55 @@ targets.each do |target| end end end -puts "Installing the testing model..." -model_path = File.expand_path("./model.ptl") -if not File.exist?(model_path) - raise "model.pt can't be found!" -end group = project.main_group.find_subpath(File.join('TestApp'),true) group.set_source_tree('SOURCE_ROOT') group.files.each do |file| - if (file.name.to_s.end_with?(".pt")) + if (file.name.to_s.end_with?(".pt") || + file.name.to_s.end_with?(".ptl")) group.remove_reference(file) targets.each do |target| target.resources_build_phase.remove_file_reference(file) end end end -model_file_ref = group.new_reference(model_path) + +file_refs = [] +# collect models +puts "Installing models..." +models_dir = File.expand_path("../models") +Dir.foreach(models_dir) do |model| + if(model.end_with?(".pt") || model.end_with?(".ptl")) + model_path = models_dir + "/" + model + file_refs.push(group.new_reference(model_path)) + end +end + targets.each do |target| - target.resources_build_phase.add_file_reference(model_file_ref, true) + file_refs.each do |ref| + target.resources_build_phase.add_file_reference(ref, true) + end end + +# add test files +puts "Adding test files..." +testTarget = targets[1] +testFilePath = File.expand_path('../TestAppTests/') +group = project.main_group.find_subpath(File.join('TestAppTests'),true) +group.files.each do |file| + if (file.path.end_with?(".mm")) + file.remove_from_project + end +end + +if(options[:lite]) + file = group.new_file("TestLiteInterpreter.mm") + testTarget.add_file_references([file]) +else + file = group.new_file("TestFullJIT.mm") + testTarget.add_file_references([file]) +end + puts "Linking static libraries..." libs = ['libc10.a', 'libclog.a', 'libpthreadpool.a', 'libXNNPACK.a', 'libeigen_blas.a', 'libcpuinfo.a', 'libpytorch_qnnpack.a', 'libtorch_cpu.a', 'libtorch.a'] targets.each do |target| diff --git a/ios/TestApp/benchmark/trace_model.py b/ios/TestApp/benchmark/trace_model.py index fc6fe82..ef82d92 100644 --- a/ios/TestApp/benchmark/trace_model.py +++ b/ios/TestApp/benchmark/trace_model.py @@ -5,6 +5,7 @@ from torch.utils.mobile_optimizer import optimize_for_mobile model = torchvision.models.mobilenet_v2(pretrained=True) model.eval() example = torch.rand(1, 3, 224, 224) -traced_script_module = torch.jit.script(model, example) +traced_script_module = torch.jit.trace(model, example) optimized_scripted_module = optimize_for_mobile(traced_script_module) -exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter("model.ptl") +torch.jit.save(optimized_scripted_module, '../models/model.pt') +exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter("../models/model_lite.ptl") -- 2.7.4