From 042b262b3633b6c0f577aed6cb4b980ad0c1dcf3 Mon Sep 17 00:00:00 2001 From: Chunseok Lee Date: Fri, 14 Aug 2020 15:19:19 +0900 Subject: [PATCH] Imported Upstream version 1.8.0 --- .ahub/tcchecker-tca/config.yaml | 43 + .ctags | 1 + .gitignore | 2 +- Makefile.template | 15 +- compiler/.ahub/tcchecker-tca/config.yaml | 54 + compiler/bcq-tools/CMakeLists.txt | 27 + compiler/bcq-tools/README.md | 78 ++ compiler/bcq-tools/generate_bcq_output_arrays | 90 ++ compiler/bcq-tools/preserve_bcq_info | 116 +++ compiler/circle-quantizer/CMakeLists.txt | 1 + compiler/circle-quantizer/requires.cmake | 1 + compiler/circle-quantizer/src/CircleQuantizer.cpp | 18 +- compiler/circle-tensordump/driver/Driver.cpp | 9 +- compiler/circle-tensordump/src/Dump.cpp | 48 +- compiler/circle-verify/src/Driver.cpp | 2 +- .../circle2circle-dredd-recipe-test/CMakeLists.txt | 93 +- .../circle2circle-dredd-recipe-test/requires.cmake | 4 +- compiler/circle2circle-dredd-recipe-test/test.lst | 3 +- .../circle2circle-dredd-recipe-test/testall.sh | 13 +- compiler/circle2circle/CMakeLists.txt | 2 + compiler/circle2circle/requires.cmake | 1 + compiler/circle2circle/src/Circle2Circle.cpp | 14 + compiler/circlechef/CMakeLists.txt | 4 +- compiler/circlechef/circle/src/RecipeChef.cpp | 2 + compiler/circlechef/core/src/ModelChef.cpp | 1 + compiler/circlechef/proto/circlechef.proto | 1 + compiler/circlechef/tools/file/Driver.cpp | 2 +- compiler/circlechef/tools/reverse/Driver.cpp | 2 +- compiler/circledump/driver/Driver.cpp | 2 +- compiler/circledump/src/OpPrinter.cpp | 17 + compiler/common-artifacts/CMakeLists.txt | 42 +- compiler/common-artifacts/exclude.lst | 35 +- compiler/common-artifacts/requires.cmake | 1 + .../common-artifacts/src/TestDataGenerator.cpp | 32 +- compiler/hermes/src/hermes.test.cpp | 25 +- compiler/locomotiv/src/Node/BiasEncode.test.cpp | 14 +- compiler/locomotiv/src/Node/MatMul.test.cpp | 4 + compiler/locop/src/FormattedGraph.test.cpp | 2 + compiler/locop/src/FormattedTensorShape.test.cpp | 2 + .../include/luci_interpreter/core/Tensor.h | 9 +- compiler/luci-interpreter/src/core/KernelParams.h | 5 + compiler/luci-interpreter/src/kernels/Add.cpp | 5 +- compiler/luci-interpreter/src/kernels/Add.test.cpp | 27 + .../luci-interpreter/src/kernels/CMakeLists.txt | 9 + .../luci-interpreter/src/kernels/DepthToSpace.cpp | 90 ++ .../luci-interpreter/src/kernels/DepthToSpace.h | 45 + .../src/kernels/DepthToSpace.test.cpp | 60 ++ .../src/kernels/L2Normalize.test.cpp | 9 +- .../src/kernels/LeakyRelu.test.cpp | 11 +- .../luci-interpreter/src/kernels/Logistic.test.cpp | 6 +- compiler/luci-interpreter/src/kernels/Reverse.cpp | 81 ++ compiler/luci-interpreter/src/kernels/Reverse.h | 43 + .../luci-interpreter/src/kernels/Reverse.test.cpp | 66 ++ compiler/luci-interpreter/src/kernels/Slice.cpp | 149 +++ compiler/luci-interpreter/src/kernels/Slice.h | 44 + .../luci-interpreter/src/kernels/Slice.test.cpp | 64 ++ .../src/kernels/TransposeConv.test.cpp | 23 +- .../luci-interpreter/src/loader/CMakeLists.txt | 7 + .../luci-interpreter/src/loader/GraphLoader.cpp | 25 +- compiler/luci-interpreter/src/loader/GraphLoader.h | 18 +- .../luci-interpreter/src/loader/KernelBuilder.cpp | 113 +- .../luci-interpreter/src/loader/KernelBuilder.h | 17 +- .../src/loader/KernelBuilder.test.cpp | 743 +++++++++++++ .../luci-interpreter/src/loader/ModuleLoader.cpp | 7 +- .../luci-interpreter/src/loader/ModuleLoader.h | 5 - compiler/luci-value-test/CMakeLists.txt | 2 +- compiler/luci-value-test/evalverify.sh | 6 +- compiler/luci-value-test/luci_eval_verifier.py | 78 +- compiler/luci-value-test/test.lst | 140 ++- compiler/luci-value-test/tester/src/EvalTester.cpp | 47 +- .../luci/export/src/CircleOperationExporter.cpp | 90 +- compiler/luci/export/src/CircleTensorExporter.cpp | 5 +- compiler/luci/import/include/luci/Import/Nodes.h | 2 + .../luci/Import/Nodes/CircleNonMaxSuppressionV4.h | 35 + .../include/luci/Import/Nodes/CircleUnique.h | 35 + compiler/luci/import/src/CircleReader.cpp | 2 + compiler/luci/import/src/GraphBuilderRegistry.cpp | 4 +- compiler/luci/import/src/Importer.test.cpp | 7 +- compiler/luci/import/src/Nodes/CircleAbs.cpp | 2 +- compiler/luci/import/src/Nodes/CircleAdd.cpp | 4 +- compiler/luci/import/src/Nodes/CircleArgMax.cpp | 4 +- compiler/luci/import/src/Nodes/CircleArgMin.cpp | 4 +- .../luci/import/src/Nodes/CircleAveragePool2D.cpp | 2 +- .../import/src/Nodes/CircleBCQFullyConnected.cpp | 10 +- compiler/luci/import/src/Nodes/CircleBCQGather.cpp | 8 +- .../luci/import/src/Nodes/CircleBatchMatMul.cpp | 4 +- .../luci/import/src/Nodes/CircleBatchToSpaceND.cpp | 12 +- compiler/luci/import/src/Nodes/CircleCast.cpp | 6 +- compiler/luci/import/src/Nodes/CircleCeil.cpp | 2 +- compiler/luci/import/src/Nodes/CircleConv2D.cpp | 6 +- compiler/luci/import/src/Nodes/CircleCos.cpp | 2 +- .../luci/import/src/Nodes/CircleDepthToSpace.cpp | 4 +- .../import/src/Nodes/CircleDepthwiseConv2D.cpp | 6 +- compiler/luci/import/src/Nodes/CircleDiv.cpp | 4 +- compiler/luci/import/src/Nodes/CircleElu.cpp | 4 +- compiler/luci/import/src/Nodes/CircleEqual.cpp | 6 +- compiler/luci/import/src/Nodes/CircleExp.cpp | 4 +- .../luci/import/src/Nodes/CircleExpandDims.cpp | 6 +- compiler/luci/import/src/Nodes/CircleFill.cpp | 4 +- compiler/luci/import/src/Nodes/CircleFloor.cpp | 2 +- compiler/luci/import/src/Nodes/CircleFloorDiv.cpp | 8 +- compiler/luci/import/src/Nodes/CircleFloorMod.cpp | 8 +- .../luci/import/src/Nodes/CircleFullyConnected.cpp | 6 +- compiler/luci/import/src/Nodes/CircleGather.cpp | 4 +- compiler/luci/import/src/Nodes/CircleGatherNd.cpp | 6 +- compiler/luci/import/src/Nodes/CircleGreater.cpp | 6 +- .../luci/import/src/Nodes/CircleGreaterEqual.cpp | 6 +- compiler/luci/import/src/Nodes/CircleIf.cpp | 2 +- .../luci/import/src/Nodes/CircleInstanceNorm.cpp | 6 +- .../luci/import/src/Nodes/CircleL2Normalize.cpp | 2 +- compiler/luci/import/src/Nodes/CircleL2Pool2D.cpp | 2 +- compiler/luci/import/src/Nodes/CircleLeakyRelu.cpp | 2 +- compiler/luci/import/src/Nodes/CircleLess.cpp | 8 +- compiler/luci/import/src/Nodes/CircleLessEqual.cpp | 6 +- .../src/Nodes/CircleLocalResponseNormalization.cpp | 2 +- compiler/luci/import/src/Nodes/CircleLog.cpp | 4 +- .../luci/import/src/Nodes/CircleLogSoftmax.cpp | 2 +- .../luci/import/src/Nodes/CircleLogicalAnd.cpp | 4 +- .../luci/import/src/Nodes/CircleLogicalNot.cpp | 4 +- compiler/luci/import/src/Nodes/CircleLogicalOr.cpp | 4 +- compiler/luci/import/src/Nodes/CircleLogistic.cpp | 18 +- .../luci/import/src/Nodes/CircleMatrixDiag.cpp | 4 +- .../luci/import/src/Nodes/CircleMatrixSetDiag.cpp | 6 +- compiler/luci/import/src/Nodes/CircleMaxPool2D.cpp | 2 +- compiler/luci/import/src/Nodes/CircleMaximum.cpp | 8 +- compiler/luci/import/src/Nodes/CircleMean.cpp | 4 +- compiler/luci/import/src/Nodes/CircleMinimum.cpp | 8 +- compiler/luci/import/src/Nodes/CircleMirrorPad.cpp | 4 +- compiler/luci/import/src/Nodes/CircleMul.cpp | 4 +- compiler/luci/import/src/Nodes/CircleNeg.cpp | 2 +- .../import/src/Nodes/CircleNonMaxSuppressionV4.cpp | 123 +++ compiler/luci/import/src/Nodes/CircleNotEqual.cpp | 6 +- compiler/luci/import/src/Nodes/CircleOneHot.cpp | 16 +- compiler/luci/import/src/Nodes/CirclePRelu.cpp | 4 +- compiler/luci/import/src/Nodes/CirclePad.cpp | 4 +- compiler/luci/import/src/Nodes/CirclePow.cpp | 4 +- compiler/luci/import/src/Nodes/CircleRange.cpp | 6 +- compiler/luci/import/src/Nodes/CircleRank.cpp | 2 +- compiler/luci/import/src/Nodes/CircleReduceAny.cpp | 8 +- compiler/luci/import/src/Nodes/CircleReduceMax.cpp | 6 +- compiler/luci/import/src/Nodes/CircleReduceMin.cpp | 6 +- .../luci/import/src/Nodes/CircleReduceProd.cpp | 6 +- compiler/luci/import/src/Nodes/CircleRelu.cpp | 2 +- compiler/luci/import/src/Nodes/CircleRelu6.cpp | 2 +- compiler/luci/import/src/Nodes/CircleReluN1To1.cpp | 2 +- compiler/luci/import/src/Nodes/CircleReshape.cpp | 4 +- .../luci/import/src/Nodes/CircleResizeBilinear.cpp | 4 +- .../src/Nodes/CircleResizeNearestNeighbor.cpp | 4 +- .../import/src/Nodes/CircleReverseSequence.cpp | 8 +- compiler/luci/import/src/Nodes/CircleReverseV2.cpp | 8 +- compiler/luci/import/src/Nodes/CircleRound.cpp | 4 +- compiler/luci/import/src/Nodes/CircleRsqrt.cpp | 4 +- compiler/luci/import/src/Nodes/CircleScatterNd.cpp | 12 +- .../luci/import/src/Nodes/CircleSegmentSum.cpp | 8 +- compiler/luci/import/src/Nodes/CircleSelect.cpp | 8 +- compiler/luci/import/src/Nodes/CircleSelectV2.cpp | 12 +- compiler/luci/import/src/Nodes/CircleShape.cpp | 2 +- compiler/luci/import/src/Nodes/CircleSin.cpp | 4 +- compiler/luci/import/src/Nodes/CircleSlice.cpp | 6 +- compiler/luci/import/src/Nodes/CircleSoftmax.cpp | 2 +- .../luci/import/src/Nodes/CircleSpaceToBatchND.cpp | 12 +- .../luci/import/src/Nodes/CircleSpaceToDepth.cpp | 2 +- .../luci/import/src/Nodes/CircleSparseToDense.cpp | 8 +- compiler/luci/import/src/Nodes/CircleSqrt.cpp | 2 +- compiler/luci/import/src/Nodes/CircleSquare.cpp | 4 +- .../import/src/Nodes/CircleSquaredDifference.cpp | 10 +- compiler/luci/import/src/Nodes/CircleSqueeze.cpp | 2 +- .../luci/import/src/Nodes/CircleStridedSlice.cpp | 8 +- compiler/luci/import/src/Nodes/CircleSub.cpp | 4 +- compiler/luci/import/src/Nodes/CircleSum.cpp | 4 +- compiler/luci/import/src/Nodes/CircleTanh.cpp | 20 +- compiler/luci/import/src/Nodes/CircleTile.cpp | 10 +- compiler/luci/import/src/Nodes/CircleTopKV2.cpp | 2 +- compiler/luci/import/src/Nodes/CircleTranspose.cpp | 4 +- .../luci/import/src/Nodes/CircleTransposeConv.cpp | 24 +- compiler/luci/import/src/Nodes/CircleUnique.cpp | 89 ++ compiler/luci/import/src/Nodes/CircleUnpack.cpp | 2 +- compiler/luci/import/src/Nodes/CircleWhere.cpp | 4 +- compiler/luci/import/src/Nodes/CircleZerosLike.cpp | 2 +- compiler/luci/lang/include/luci/IR/CircleNodes.h | 5 + compiler/luci/lang/include/luci/IR/CircleNodes.lst | 7 +- .../luci/lang/include/luci/IR/CircleQuantParam.h | 1 + .../luci/lang/include/luci/IR/Nodes/CircleConst.h | 2 +- .../luci/IR/Nodes/CircleNonMaxSuppressionV4.h | 53 + .../luci/IR/Nodes/CircleNonMaxSuppressionV4Out.h | 51 + .../luci/lang/include/luci/IR/Nodes/CirclePadV2.h | 49 + .../luci/lang/include/luci/IR/Nodes/CircleUnique.h | 47 + .../lang/include/luci/IR/Nodes/CircleUniqueOut.h | 51 + compiler/luci/lang/src/Module.test.cpp | 2 +- compiler/luci/lang/src/Nodes/CircleCustom.test.cpp | 7 +- compiler/luci/lang/src/Nodes/CircleIf.test.cpp | 4 + .../src/Nodes/CircleNonMaxSuppressionV4.test.cpp | 96 ++ .../Nodes/CircleNonMaxSuppressionV4Out.test.cpp | 32 + compiler/luci/lang/src/Nodes/CirclePadV2.test.cpp | 86 ++ compiler/luci/lang/src/Nodes/CircleUnique.test.cpp | 76 ++ compiler/luci/lang/src/Nodes/CircleWhile.test.cpp | 4 + compiler/luci/logex/src/FormattedGraph.cpp | 41 + compiler/luci/pass/src/CircleOptimizer.cpp | 4 +- compiler/luci/pass/src/FuseBCQPass.cpp | 435 ++++---- compiler/luci/pass/src/QuantizationUtils.cpp | 20 +- compiler/luci/pass/src/QuantizeWithMinMaxPass.cpp | 25 +- .../luci/service/src/CircleShapeInferenceRule.cpp | 59 ++ .../luci/service/src/CircleTypeInferenceRule.cpp | 33 +- compiler/luci/tests/test.lst | 15 + compiler/mio-tflite/CMakeLists.txt | 6 +- compiler/one-cmds/CMakeLists.txt | 3 +- compiler/one-cmds/how-to-prepare-virtualenv.txt | 4 +- compiler/one-cmds/how-to-use-one-commands.txt | 25 +- compiler/one-cmds/one-codegen | 25 +- compiler/one-cmds/one-import | 25 +- compiler/one-cmds/one-import-bcq | 150 +++ compiler/one-cmds/one-import-tf | 30 +- compiler/one-cmds/one-import-tflite | 20 +- compiler/one-cmds/one-optimize | 20 +- compiler/one-cmds/one-pack | 32 +- compiler/one-cmds/one-prepare-venv | 16 +- compiler/one-cmds/one-quantize | 23 +- compiler/one-cmds/requires.cmake | 2 + .../pota-quantization-value-test/CMakeLists.txt | 32 +- .../compare_tensors.py | 4 +- .../layer/uint8/fake_quantization/ker.json | 44 +- .../Conv2D_004/layer/uint8/quantization/bias.json | 14 +- .../Conv2D_004/layer/uint8/quantization/ifm.json | 2 +- .../Conv2D_004/layer/uint8/quantization/ker.json | 56 +- .../Conv2D_004/layer/uint8/quantization/ofm.json | 2 +- .../Conv2D_004/layer/uint8/record_minmax/ifm.json | 4 +- .../Conv2D_004/layer/uint8/record_minmax/ofm.json | 4 +- .../layer/uint8/fake_quantization/ker.json | 34 +- .../layer/uint8/quantization/bias.json | 12 +- .../layer/uint8/quantization/ifm.json | 2 +- .../layer/uint8/quantization/ker.json | 40 +- .../layer/uint8/quantization/ofm.json | 2 +- .../layer/uint8/record_minmax/ifm.json | 4 +- .../layer/uint8/record_minmax/ofm.json | 4 +- .../layer/uint8/fake_quantization/weight.json | 76 ++ .../layer/uint8/quantization/bias.json | 9 + .../layer/uint8/quantization/in.json | 4 + .../layer/uint8/quantization/out.json | 4 + .../layer/uint8/quantization/weight.json | 80 ++ .../layer/uint8/record_minmax/in.json | 4 + .../layer/uint8/record_minmax/out.json | 4 + .../layer/uint8/fake_quantization/ker.json | 48 + .../layer/uint8/quantization/ifm.json | 4 + .../layer/uint8/quantization/ker.json | 52 + .../layer/uint8/quantization/ofm.json | 4 + .../layer/uint8/record_minmax/ifm.json | 4 + .../layer/uint8/record_minmax/ofm.json | 4 + compiler/pota-quantization-value-test/test.lst | 2 + .../test_inputs/Conv2D_004/layer/uint8/0.txt | 2 +- .../test_inputs/Conv2D_004/layer/uint8/1.txt | 1 + .../test_inputs/Conv2D_004/layer/uint8/2.txt | 1 + .../test_inputs/Conv2D_004/layer/uint8/3.txt | 1 + .../test_inputs/Conv2D_004/layer/uint8/4.txt | 1 + .../DepthwiseConv2D_002/layer/uint8/0.txt | 2 +- .../DepthwiseConv2D_002/layer/uint8/1.txt | 1 + .../DepthwiseConv2D_002/layer/uint8/2.txt | 1 + .../DepthwiseConv2D_002/layer/uint8/3.txt | 1 + .../DepthwiseConv2D_002/layer/uint8/4.txt | 1 + .../FullyConnected_003/layer/uint8/0.txt | 1 + .../FullyConnected_003/layer/uint8/1.txt | 1 + .../FullyConnected_003/layer/uint8/2.txt | 1 + .../FullyConnected_003/layer/uint8/3.txt | 1 + .../FullyConnected_003/layer/uint8/4.txt | 1 + .../TransposeConv_001/layer/uint8/0.txt | 1 + .../TransposeConv_001/layer/uint8/1.txt | 1 + .../TransposeConv_001/layer/uint8/2.txt | 1 + .../TransposeConv_001/layer/uint8/3.txt | 1 + .../TransposeConv_001/layer/uint8/4.txt | 1 + .../test_record_minmax.sh | 6 +- compiler/record-minmax/CMakeLists.txt | 5 + compiler/record-minmax/driver/Driver.cpp | 16 +- compiler/record-minmax/requires.cmake | 1 + compiler/record-minmax/src/HDF5Importer.cpp | 1 + compiler/record-minmax/src/MinMaxObserver.cpp | 2 +- compiler/record-minmax/src/RecordMinMax.cpp | 2 +- .../record-minmax/tests/RecordFunction.test.cpp | 14 + .../CMakeLists.txt | 1 - .../tf2circle-value-pbtxt-remote-test/README.md | 10 +- .../tf2circle-value-pbtxt-remote-test/testall.sh | 13 +- .../tf2nnpackage-value-remote-test/CMakeLists.txt | 9 +- compiler/tf2nnpackage-value-remote-test/README.md | 4 +- compiler/tf2nnpackage-value-remote-test/testall.sh | 13 +- compiler/tf2tfliteV2/README.md | 6 +- compiler/tf2tfliteV2/tf2tfliteV2.py | 102 +- compiler/tfl-verify/CMakeLists.txt | 1 + compiler/tfl-verify/requires.cmake | 1 + compiler/tfl-verify/src/Driver.cpp | 19 +- compiler/tflchef/core/src/ModelChef.cpp | 1 + .../tflchef/core/src/Op/NonMaxSuppressionV4.cpp | 30 + compiler/tflchef/core/src/Op/NonMaxSuppressionV4.h | 52 + compiler/tflchef/core/src/Op/PadV2.cpp | 28 + compiler/tflchef/core/src/Op/PadV2.h | 46 + compiler/tflchef/core/src/OpChef.def | 2 + compiler/tflchef/core/src/OpChefs.h | 2 + compiler/tflchef/proto/tflchef.proto | 13 +- .../tflchef/tflite/src/Op/NonMaxSuppressionV4.cpp | 56 + .../tflchef/tflite/src/Op/NonMaxSuppressionV4.h | 39 + compiler/tflchef/tflite/src/Op/PadV2.cpp | 42 + compiler/tflchef/tflite/src/Op/PadV2.h | 39 + compiler/tflchef/tflite/src/Op/TransposeConv.cpp | 4 + compiler/tflchef/tflite/src/RecipeChef.cpp | 2 + compiler/tflchef/tflite/src/TFliteOpChefs.h | 2 + compiler/tflchef/tflite/src/TFliteOpRegistry.h | 2 + compiler/tflchef/tools/file/Driver.cpp | 2 +- compiler/tflchef/tools/reverse/Driver.cpp | 2 +- compiler/tfldump/driver/Driver.cpp | 2 +- compiler/tfldump/src/OpPrinter.cpp | 1 + compiler/tflite2circle/CMakeLists.txt | 1 + compiler/tflite2circle/driver/Driver.cpp | 17 +- compiler/tflite2circle/requires.cmake | 1 + compiler/tflite2circle/src/BuildBuiltinOptions.h | 2 + .../NonMaxSuppressionV4Options.cpp | 30 + .../NonMaxSuppressionV4Options.h | 32 + .../src/BuildBuiltinOptions/PadV2Options.cpp | 29 + .../src/BuildBuiltinOptions/PadV2Options.h | 31 + compiler/tflite2circle/src/TFLBuiltinOptions.lst | 3 +- compiler/vconone/CMakeLists.txt | 31 + compiler/vconone/README.md | 14 + compiler/vconone/driver/driver.cpp | 36 + compiler/vconone/include/vconone/vconone.h | 61 ++ compiler/vconone/src/version.cpp | 63 ++ compiler/vconone/src/version.test.cpp | 49 + compiler/vconone/version_cfg.h.in | 22 + .../core/CL/kernels/CLArgOperationKernel.h | 124 --- .../arm_compute/core/CL/kernels/CLCastKernel.h | 121 --- .../core/CL/kernels/CLDepthToSpaceKernel.h | 82 -- .../CL/kernels/CLGEMMLowpMatrixMultiplyKernelEx.h | 117 --- .../arm_compute/core/CL/kernels/CLPReLUKernel.h | 83 -- .../core/CL/kernels/CLSpaceToDepthKernel.h | 82 -- .../kernels/CLTransposeConvLayerUpsampleKernel.h | 109 -- .../core/CPP/kernels/CPPUpsampleKernelEx.h | 88 -- .../arm_compute/core/NEON/kernels/NECastKernel.h | 96 -- .../NEON/kernels/NEDepthToSpaceLayerKernelEx.h | 96 -- .../core/NEON/kernels/NEElementwiseUnaryKernelEx.h | 118 --- .../arm_compute/core/NEON/kernels/NEPReLUKernel.h | 100 -- .../NEON/kernels/NESpaceToDepthLayerKernelEx.h | 97 -- .../arm_compute/runtime/CL/CLFunctionsEx.h | 11 - .../runtime/CL/functions/CLArgOperation.h | 129 --- .../runtime/CL/functions/CLBatchToSpaceND.h | 69 -- .../arm_compute/runtime/CL/functions/CLCast.h | 75 -- .../runtime/CL/functions/CLDepthToSpace.h | 68 -- .../CL/functions/CLDirectTransposeConvLayer.h | 201 ++++ .../CL/functions/CLFullyConnectedHybridLayer.h | 4 +- .../CL/functions/CLGEMMLowpMatrixMultiplyCoreEx.h | 142 --- .../runtime/CL/functions/CLLogicalNot.h | 62 -- .../arm_compute/runtime/CL/functions/CLPReLU.h | 64 -- .../runtime/CL/functions/CLPixelWiseDivision.h | 103 -- .../runtime/CL/functions/CLRNNLayerEx.h | 120 --- .../runtime/CL/functions/CLSpaceToDepth.h | 68 -- .../runtime/CL/functions/CLStridedSliceEx.h | 81 -- .../runtime/CL/functions/CLTransposeConvLayer.h | 176 ++-- .../CL/functions/CLTransposeConvLayerUpsample.h | 102 -- .../runtime/CPP/functions/CPPUpsampleEx.h | 65 -- .../arm_compute/runtime/NEON/NEFunctionsEx.h | 7 - .../arm_compute/runtime/NEON/functions/NECast.h | 79 -- .../runtime/NEON/functions/NEDepthToSpaceLayerEx.h | 78 -- .../NEON/functions/NEElementwiseUnaryLayerEx.h | 70 -- .../NEON/functions/NEFullyConnectedHybridLayer.h | 4 +- .../functions/NEGEMMLowpMatrixMultiplyCoreEx.h | 170 --- .../arm_compute/runtime/NEON/functions/NEPReLU.h | 63 -- .../runtime/NEON/functions/NERNNLayerEx.h | 130 --- .../runtime/NEON/functions/NEReduceMeanEx.h | 99 -- .../runtime/NEON/functions/NESpaceToBatchLayerEx.h | 136 --- .../runtime/NEON/functions/NESpaceToDepthLayerEx.h | 79 -- .../runtime/NEON/functions/NETransposeConvLayer.h | 68 +- .../ARMComputeEx/src/core/CL/CLKernelLibrary.cpp | 39 - .../src/core/CL/cl_kernels/arg_operation.cl | 137 --- .../core/CL/cl_kernels/arithmetic_op_quantized.cl | 191 ---- .../ARMComputeEx/src/core/CL/cl_kernels/cast.cl | 233 ----- .../src/core/CL/cl_kernels/depth_to_space.cl | 185 ---- .../ARMComputeEx/src/core/CL/cl_kernels/helpers.h | 206 +++- .../src/core/CL/cl_kernels/helpers_asymm.h | 185 +++- .../ARMComputeEx/src/core/CL/cl_kernels/prelu.cl | 120 --- .../src/core/CL/cl_kernels/prelu_quantized.cl | 138 --- .../src/core/CL/cl_kernels/space_to_depth.cl | 185 ---- .../src/core/CL/kernels/CLArgOperationKernel.cpp | 181 ---- .../core/CL/kernels/CLBinaryLogicalOpKernel.cpp | 1 + .../src/core/CL/kernels/CLCastKernel.cpp | 132 --- .../src/core/CL/kernels/CLDepthToSpaceKernel.cpp | 140 --- .../core/CL/kernels/CLEmbeddingLookupKernel.cpp | 1 + .../kernels/CLGEMMLowpMatrixMultiplyKernelEx.cpp | 372 ------- .../src/core/CL/kernels/CLGatherExKernel.cpp | 1 + .../core/CL/kernels/CLHashtableLookupKernel.cpp | 3 +- .../CLInstanceNormalizationLayerKernelEx.cpp | 2 +- .../CL/kernels/CLMultiplyScaleFactorKernel.cpp | 1 + .../src/core/CL/kernels/CLNegKernel.cpp | 1 + .../src/core/CL/kernels/CLPReLUKernel.cpp | 210 ---- .../CL/kernels/CLQuantizationSymmetricKernel.cpp | 3 +- .../core/CL/kernels/CLReduceOperationKernel.cpp | 1 + .../core/CL/kernels/CLScaleFactorSymm8Kernel.cpp | 1 + .../src/core/CL/kernels/CLSpaceToDepthKernel.cpp | 148 --- .../kernels/CLTransposeConvLayerUpsampleKernel.cpp | 188 ---- .../src/core/CPP/kernels/CPPUpsampleKernelEx.cpp | 118 --- .../src/core/NEON/kernels/NECastKernel.cpp | 671 ------------ .../NEON/kernels/NEDepthToSpaceLayerKernelEx.cpp | 181 ---- .../NEON/kernels/NEElementwiseUnaryKernelEx.cpp | 221 ---- .../src/core/NEON/kernels/NEPReLUKernel.cpp | 291 ------ .../NEON/kernels/NEQuantizationSymmetricKernel.cpp | 2 +- .../NEON/kernels/NESpaceToDepthLayerKernelEx.cpp | 181 ---- .../src/runtime/CL/functions/CLArgOperation.cpp | 144 --- .../src/runtime/CL/functions/CLBinaryLogicalOp.cpp | 2 +- .../src/runtime/CL/functions/CLCast.cpp | 52 - .../src/runtime/CL/functions/CLDepthToSpace.cpp | 52 - .../CL/functions/CLDirectTransposeConvLayer.cpp | 267 +++++ .../src/runtime/CL/functions/CLEmbeddingLookup.cpp | 2 +- .../CL/functions/CLFullyConnectedHybridLayer.cpp | 16 +- .../CL/functions/CLFullyConnectedLayerEx.cpp | 4 +- .../functions/CLFullyConnectedReshapingLayer.cpp | 16 +- .../functions/CLGEMMLowpMatrixMultiplyCoreEx.cpp | 180 ---- .../src/runtime/CL/functions/CLGatherEx.cpp | 2 +- .../src/runtime/CL/functions/CLHashtableLookup.cpp | 2 +- .../functions/CLInstanceNormalizationLayerEx.cpp | 2 +- .../src/runtime/CL/functions/CLPReLU.cpp | 63 -- .../src/runtime/CL/functions/CLRNNLayerEx.cpp | 163 --- .../src/runtime/CL/functions/CLReduceOperation.cpp | 8 +- .../src/runtime/CL/functions/CLSpaceToDepth.cpp | 52 - .../runtime/CL/functions/CLTransposeConvLayer.cpp | 250 ++--- .../CL/functions/CLTransposeConvLayerUpsample.cpp | 92 -- .../src/runtime/CPP/functions/CPPOneHotEx.cpp | 4 +- .../src/runtime/CPP/functions/CPPUpsampleEx.cpp | 53 - .../runtime/NEON/functions/NEActivationLayerEx.cpp | 4 +- .../NEON/functions/NEBinaryLogicalOperation.cpp | 6 +- .../src/runtime/NEON/functions/NECast.cpp | 60 -- .../NEON/functions/NEDepthToSpaceLayerEx.cpp | 63 -- .../runtime/NEON/functions/NEEmbeddingLookup.cpp | 4 +- .../NEON/functions/NEFullyConnectedHybridLayer.cpp | 14 +- .../functions/NEFullyConnectedReshapingLayer.cpp | 7 +- .../functions/NEGEMMLowpMatrixMultiplyCoreEx.cpp | 513 --------- .../src/runtime/NEON/functions/NEGatherEx.cpp | 4 +- .../runtime/NEON/functions/NEHashtableLookup.cpp | 4 +- .../src/runtime/NEON/functions/NEPReLU.cpp | 55 - .../src/runtime/NEON/functions/NERNNLayerEx.cpp | 161 --- .../src/runtime/NEON/functions/NEReduceMeanEx.cpp | 180 ---- .../NEON/functions/NESpaceToBatchLayerEx.cpp | 114 -- .../NEON/functions/NESpaceToDepthLayerEx.cpp | 64 -- .../NEON/functions/NETransposeConvLayer.cpp | 231 ++--- compute/cker/CMakeLists.txt | 3 + compute/cker/include/cker/NeonTensorUtils.h | 8 +- compute/cker/include/cker/PortableTensorUtils.h | 3 +- compute/cker/include/cker/TensorUtils.h | 4 +- compute/cker/include/cker/Types.h | 25 + compute/cker/include/cker/Utils.h | 62 ++ .../cker/include/cker/operation/BatchToSpaceND.h | 133 +++ .../cker/include/cker/operation/FullyConnected.h | 67 +- .../include/cker/operation/Helper/PhiloxRandom.h | 276 +++++ .../cker/operation/Helper/RandomDistributions.h | 778 ++++++++++++++ .../cker/include/cker/operation/Helper/RandomOp.h | 52 + .../include/cker/operation/Helper/RandomOpCpu.h | 163 +++ compute/cker/include/cker/operation/L2Normalize.h | 94 ++ compute/cker/include/cker/operation/Logistic.h | 9 - .../cker/include/cker/operation/MatrixBandPart.h | 6 +- compute/cker/include/cker/operation/Pad.h | 15 +- compute/cker/include/cker/operation/Quantize.h | 47 + compute/cker/include/cker/operation/ReLU6.h | 56 + compute/cker/include/cker/operation/Reduce.h | 86 ++ .../cker/include/cker/operation/ResizeBilinear.h | 270 +++++ compute/cker/include/cker/operation/SpaceToDepth.h | 71 ++ compute/cker/include/cker/operation/SplitV.h | 81 ++ .../cker/operation/StatelessRandomUniform.h | 103 ++ compute/cker/include/cker/ruy/RuySupport.h | 41 - docs/conf.py | 2 +- docs/howto/how-to-build-runtime.md | 8 +- docs/howto/how-to-use-nnfw-api.md | 4 +- docs/nnfw/howto/CrossBuildForAndroid.md | 4 +- docs/overview/supported-operations.md | 2 +- docs/release/1.7/release-note-1.7.0.md | 46 - docs/release/1.8/release-note-1.8.0.md | 42 + docs/runtime/api-layered-arch.png | Bin 0 -> 138968 bytes docs/runtime/api.md | 34 + docs/runtime/core.md | 4 +- docs/runtime/heterogeneous-execution.md | 4 +- infra/cmake/packages/ARMComputeSourceConfig.cmake | 2 +- infra/cmake/packages/FarmhashSourceConfig.cmake | 2 +- infra/cmake/packages/FlatBuffersConfig.cmake | 3 +- infra/cmake/packages/HDF5Config.cmake | 1 + infra/cmake/packages/Pybind11Config.cmake | 22 + infra/cmake/packages/Pybind11SourceConfig.cmake | 18 + .../TensorFlowEigenSourceConfig.cmake | 21 + .../TensorFlowEigenSourceConfigVersion.cmake | 10 + .../TensorFlowSourceConfig.cmake | 18 + .../TensorFlowSourceConfigVersion.cmake | 10 + infra/docker/Dockerfile | 3 +- infra/docker/Dockerfile.1804 | 7 +- infra/nncc/CMakeLists.txt | 1 + infra/nncc/command/utcount | 2 +- infra/nnfw/cmake/CfgOptionFlags.cmake | 4 +- infra/nnfw/cmake/packages/EigenConfig.cmake | 2 +- .../packages/TensorFlowLite-2.2.0Config.cmake | 92 -- .../CMakeLists.txt | 83 +- .../packages/TensorFlowLite-2.3.0Config.cmake | 100 ++ infra/nnfw/config/gbs.conf | 6 +- infra/packaging/build | 3 +- infra/packaging/preset/20200630 | 15 +- infra/packaging/preset/20200731_windows | 65 ++ infra/packaging/res/tf2nnpkg.20200630 | 19 +- infra/scripts/build-tcm.sh | 24 + infra/scripts/common.sh | 47 +- infra/scripts/compiler_modules.sh | 2 +- .../scripts/docker_build_cross_aarch64_runtime.sh | 2 +- infra/scripts/docker_build_cross_arm_runtime.sh | 2 +- .../docker_build_cross_arm_runtime_release.sh | 2 +- infra/scripts/docker_build_cross_coverage.sh | 2 +- infra/scripts/docker_build_nncc.sh | 13 +- infra/scripts/docker_build_tizen_cross.sh | 2 +- infra/scripts/docker_collect_nnpkg_resources.sh | 2 +- infra/scripts/test_arm_nnpkg.sh | 4 +- infra/scripts/test_coverage.sh | 2 +- infra/scripts/test_ubuntu_runtime.sh | 11 +- infra/scripts/test_ubuntu_runtime_mixed.sh | 34 +- infra/scripts/tizen_xu4_test.sh | 12 +- nnpackage/spec/30_custom_op.md | 2 +- packaging/nnapi_test_generated.tar.gz | Bin 802276 -> 819008 bytes packaging/nnfw.spec | 26 +- .../AveragePool2D_U8_000/test.recipe | 26 + .../AveragePool2D_U8_000/test.reverse | 0 .../DepthwiseConv2D_003/test.recipe | 44 + .../DepthwiseConv2D_003/test.reverse | 0 .../DepthwiseConv2D_003/test.rule | 3 + .../DepthwiseConv2D_U8_001/test.recipe | 61 ++ .../DepthwiseConv2D_U8_001/test.reverse | 0 .../FullyConnected_003/test.recipe | 55 + .../FullyConnected_003/test.reverse | 0 .../L2Normalize_U8_000/test.recipe | 22 + .../L2Normalize_U8_000/test.reverse | 0 .../Logistic_U8_000/test.recipe | 19 + .../Logistic_U8_000/test.reverse | 0 .../Net_TConv_BN_000/test.recipe | 149 +++ .../ResizeBilinear_U8_000/test.recipe | 32 + .../ResizeBilinear_U8_000/test.reverse | 0 .../SpaceToDepth_U8_000/test.recipe | 22 + .../SpaceToDepth_U8_000/test.reverse | 0 .../TransposeConv_000/test.recipe | 2 +- .../TransposeConv_001/test.recipe | 45 + .../TransposeConv_001/test.reverse | 0 res/TensorFlowLiteRecipes/Unique_000/test.recipe | 27 + res/TensorFlowLiteRecipes/Unique_000/test.reverse | 0 res/TensorFlowLiteRecipes/Unique_001/test.recipe | 27 + res/TensorFlowLiteRecipes/Unique_001/test.reverse | 0 res/TensorFlowLiteRecipes/Unique_002/test.recipe | 27 + res/TensorFlowLiteRecipes/Unique_002/test.reverse | 0 res/TensorFlowLiteRecipes/Unique_003/test.recipe | 27 + res/TensorFlowLiteRecipes/Unique_003/test.reverse | 0 .../Unique_U8_000/test.recipe | 28 + .../Unique_U8_000/test.reverse | 0 .../Unique_U8_001/test.recipe | 28 + .../Unique_U8_001/test.reverse | 0 res/TensorFlowLiteSchema/2.3.0/schema.fbs | 1094 ++++++++++++++++++++ res/TensorFlowLiteSchema/SCHEMA.lst | 1 + .../examples/while_2/__init__.py | 32 + .../examples/while_3/__init__.py | 33 + .../examples/tconv-bn/__init__.py | 27 + runtime/contrib/android/api/build.gradle | 2 +- runtime/contrib/android_benchmark_app/README.md | 2 +- runtime/libs/benchmark/CMakeLists.txt | 3 +- runtime/libs/benchmark/src/Result.cpp | 2 +- .../libs/misc/include/misc/polymorphic_downcast.h | 2 + runtime/nnapi-header/include/NeuralNetworksEx.h | 21 +- runtime/onert/api/CMakeLists.txt | 6 +- runtime/onert/api/include/nnfw.h | 20 +- .../include/{nnfw_dev.h => nnfw_experimental.h} | 6 +- .../api/include/{nnfw_debug.h => nnfw_internal.h} | 18 +- runtime/onert/api/include/nnfw_version.h | 2 +- runtime/onert/api/src/CustomKernel.h | 2 +- runtime/onert/api/src/nnfw_api.cc | 13 +- runtime/onert/api/src/nnfw_api_internal.cc | 77 +- runtime/onert/api/src/nnfw_api_internal.h | 8 +- runtime/onert/backend/acl_cl/KernelGenerator.cc | 880 ++++++---------- runtime/onert/backend/acl_common/AclKernelGen.h | 315 ++++++ runtime/onert/backend/acl_neon/KernelGenerator.cc | 856 ++++++--------- runtime/onert/backend/cpu/Backend.h | 10 +- runtime/onert/backend/cpu/BackendContext.h | 58 ++ runtime/onert/backend/cpu/CMakeLists.txt | 4 + runtime/onert/backend/cpu/ConstantInitializer.cc | 35 +- runtime/onert/backend/cpu/ConstantInitializer.h | 9 + runtime/onert/backend/cpu/ExternalContext.h | 64 ++ runtime/onert/backend/cpu/KernelGenerator.cc | 622 +++++++---- runtime/onert/backend/cpu/KernelGenerator.h | 13 +- runtime/onert/backend/cpu/StaticTensorManager.cc | 106 ++ runtime/onert/backend/cpu/StaticTensorManager.h | 64 ++ runtime/onert/backend/cpu/Tensor.h | 18 +- runtime/onert/backend/cpu/TensorBuilder.cc | 20 +- runtime/onert/backend/cpu/TensorBuilder.h | 13 +- .../onert/backend/cpu/ops/BatchToSpaceNDLayer.cc | 83 ++ .../onert/backend/cpu/ops/BatchToSpaceNDLayer.h | 59 ++ runtime/onert/backend/cpu/ops/CompareLayer.cc | 238 ++--- .../onert/backend/cpu/ops/FullyConnectedLayer.cc | 85 +- .../onert/backend/cpu/ops/FullyConnectedLayer.h | 9 +- runtime/onert/backend/cpu/ops/L2NormLayer.cc | 71 ++ runtime/onert/backend/cpu/ops/L2NormLayer.h | 55 + runtime/onert/backend/cpu/ops/LogSoftMaxLayer.cc | 4 +- runtime/onert/backend/cpu/ops/LogSoftMaxLayer.h | 7 +- runtime/onert/backend/cpu/ops/OperationUtils.h | 11 + runtime/onert/backend/cpu/ops/PadLayer.cc | 25 +- runtime/onert/backend/cpu/ops/PadLayer.h | 8 +- runtime/onert/backend/cpu/ops/QuantizeLayer.cc | 63 ++ runtime/onert/backend/cpu/ops/QuantizeLayer.h | 56 + runtime/onert/backend/cpu/ops/ReLU6Layer.cc | 74 ++ runtime/onert/backend/cpu/ops/ReLU6Layer.h | 57 + runtime/onert/backend/cpu/ops/ReduceLayer.cc | 38 + .../onert/backend/cpu/ops/ResizeBilinearLayer.cc | 87 ++ .../onert/backend/cpu/ops/ResizeBilinearLayer.h | 58 ++ runtime/onert/backend/cpu/ops/SliceLayer.cc | 16 +- runtime/onert/backend/cpu/ops/SliceLayer.h | 3 +- runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc | 74 ++ runtime/onert/backend/cpu/ops/SpaceToDepthLayer.h | 54 + runtime/onert/backend/cpu/ops/SplitVLayer.cc | 99 ++ runtime/onert/backend/cpu/ops/SplitVLayer.h | 60 ++ .../backend/cpu/ops/StatelessRandomUniformLayer.cc | 68 ++ .../backend/cpu/ops/StatelessRandomUniformLayer.h | 59 ++ .../onert/core/include/backend/BackendContext.h | 2 + .../onert/core/include/backend/IExternalContext.h | 34 + .../onert/core/include/backend/IPortableTensor.h | 3 + runtime/onert/core/include/backend/ITensor.h | 11 + .../onert/core/include/backend/ITensorBuilder.h | 4 +- .../onert/core/include/backend/ITensorRegistry.h | 68 +- .../backend/cpu_common/StaticTensorManager.h | 4 +- .../onert/core/include/backend/cpu_common/Tensor.h | 50 +- .../core/include/compiler/StaticShapeInference.h | 2 + .../core/include/exec/DynamicShapeInference.h | 3 + runtime/onert/core/include/ir/Operand.h | 8 +- runtime/onert/core/include/ir/Operations.Include.h | 3 + runtime/onert/core/include/ir/Operations.lst | 3 + runtime/onert/core/include/ir/TypeInfo.h | 17 +- .../core/include/ir/operation/BatchToSpaceND.h | 3 +- .../onert/core/include/ir/operation/LogSoftmax.h | 2 +- runtime/onert/core/include/ir/operation/Pad.h | 2 +- runtime/onert/core/include/ir/operation/Quantize.h | 49 + .../core/include/ir/operation/ResizeBilinear.h | 4 +- runtime/onert/core/include/ir/operation/SplitV.h | 59 ++ .../include/ir/operation/StatelessRandomUniform.h | 52 + runtime/onert/core/include/util/ShapeInference.h | 3 + .../backend/controlflow/DynamicTensorManager.cc | 17 +- .../src/backend/controlflow/DynamicTensorManager.h | 1 + .../src/backend/controlflow/KernelGenerator.cc | 22 +- .../core/src/backend/controlflow/TensorBuilder.cc | 6 +- .../core/src/backend/controlflow/UserTensor.h | 13 +- .../src/backend/cpu_common/DynamicTensorManager.cc | 12 +- .../src/backend/cpu_common/StaticTensorManager.cc | 31 +- runtime/onert/core/src/compiler/ExecutorFactory.cc | 40 +- runtime/onert/core/src/compiler/ExecutorFactory.h | 3 + .../onert/core/src/compiler/Fp32ToFp16Converter.cc | 14 +- runtime/onert/core/src/compiler/HEScheduler.cc | 5 +- runtime/onert/core/src/compiler/HEScheduler.h | 10 +- runtime/onert/core/src/compiler/Linear.cc | 2 +- runtime/onert/core/src/compiler/OperandContext.h | 55 - .../onert/core/src/compiler/OperationValidator.cc | 171 ++- .../onert/core/src/compiler/OperationValidator.h | 5 +- .../core/src/compiler/StaticShapeInference.cc | 34 + runtime/onert/core/src/compiler/TensorBuilders.h | 12 + .../onert/core/src/exec/DynamicShapeInference.cc | 95 +- runtime/onert/core/src/exec/ExecutorBase.cc | 4 +- runtime/onert/core/src/exec/ExecutorBase.h | 1 - runtime/onert/core/src/interp/Tensor.h | 1 + runtime/onert/core/src/interp/operations/Pad.cc | 4 +- runtime/onert/core/src/ir/Graph.cc | 2 +- runtime/onert/core/src/ir/LoweredGraph.cc | 19 +- runtime/onert/core/src/ir/Operand.cc | 15 +- runtime/onert/core/src/ir/OperationDumper.cc | 9 + runtime/onert/core/src/ir/OperationDumper.h | 1 + .../onert/core/src/ir/operation/BatchToSpaceND.cc | 2 +- runtime/onert/core/src/ir/operation/Quantize.cc | 37 + runtime/onert/core/src/ir/operation/SplitV.cc | 33 + .../operation/StatelessRandomUniform.cc} | 30 +- .../core/src/ir/pass/ConstantInsertionPass.cc | 4 +- .../core/src/ir/pass/PermutationEliminationPass.cc | 248 ++--- .../core/src/ir/pass/PermutationEliminationPass.h | 69 +- .../core/src/ir/pass/PermutationInsertionPass.cc | 30 +- .../core/src/ir/pass/PermutationInsertionPass.h | 3 +- .../core/src/ir/pass/PermutationOperationPass.cc | 10 +- runtime/onert/core/src/ir/verifier/Verifier.cc | 52 +- runtime/onert/core/src/ir/verifier/Verifier.h | 6 +- runtime/onert/core/src/util/EventCollector.cc | 9 +- runtime/onert/core/src/util/EventRecorder.h | 3 +- runtime/onert/core/src/util/ShapeInference.cc | 14 + .../frontend/base_loader/include/base_loader.h | 239 +++-- runtime/onert/frontend/circle/CMakeLists.txt | 1 + .../onert/frontend/circle/include/circle_loader.h | 1 + runtime/onert/frontend/circle/src/circle_loader.cc | 66 ++ .../onert/frontend/circle_schema/CMakeLists.txt | 7 + .../include}/circle_schema_generated.h | 0 runtime/onert/frontend/nnapi/model.cc | 2 +- .../frontend/nnapi/wrapper/OperationFactory.cc | 646 ++++-------- runtime/onert/frontend/tflite/src/tflite_loader.cc | 2 + runtime/onert/sample/minimal/CMakeLists.txt | 6 +- runtime/onert/sample/minimal/src/minimal.cc | 2 + runtime/onert/test/core/exec/ExecInstance.cc | 94 +- runtime/onert/test/graph/operand/UseDef.cc | 10 +- tests/custom_op/FillFrom/CMakeLists.txt | 6 +- tests/custom_op/FillFrom/FillFrom_runner.cc | 2 +- tests/custom_op/FillFrom/kernels/FillFromKernel.cc | 2 +- tests/nnapi/nnapi_gtest.skip.aarch64-linux.acl_cl | 30 +- .../nnapi/nnapi_gtest.skip.aarch64-linux.acl_neon | 31 +- tests/nnapi/nnapi_gtest.skip.aarch64-linux.cpu | 22 +- tests/nnapi/nnapi_gtest.skip.armv7l-linux.acl_cl | 30 +- tests/nnapi/nnapi_gtest.skip.armv7l-linux.acl_neon | 31 +- tests/nnapi/nnapi_gtest.skip.armv7l-linux.cpu | 22 +- tests/nnapi/nnapi_gtest.skip.noarch.interp | 29 + tests/nnapi/nnapi_gtest.skip.x86_64-linux.cpu | 22 +- .../specs/Ex/split_v_ex_1D_float_1_nnfw.mod.py | 47 + .../specs/Ex/split_v_ex_1D_float_2_nnfw.mod.py | 25 + .../nnapi/specs/Ex/split_v_ex_1D_int32_nnfw.mod.py | 47 + .../specs/Ex/split_v_ex_4D_float_1_nnfw.mod.py | 28 + .../specs/Ex/split_v_ex_4D_float_2_nnfw.mod.py | 27 + .../specs/Ex/split_v_ex_4D_float_3_nnfw.mod.py | 28 + .../specs/Ex/split_v_ex_4D_float_4_nnfw.mod.py | 32 + .../specs/Ex/split_v_ex_4D_int32_1_nnfw.mod.py | 27 + .../specs/Ex/split_v_ex_4D_int32_2_nnfw.mod.py | 28 + .../specs/Ex/split_v_ex_4D_int32_3_nnfw.mod.py | 28 + .../specs/Ex/split_v_ex_4D_int32_4_nnfw.mod.py | 28 + .../Ex/stateless_random_uniform_ex_nnfw.mod.py | 40 + .../specs/V1_0/l2_normalization_quant8_nnfw.mod.py | 30 + .../specs/V1_0/resize_bilinear_quant8_nnfw.mod.py | 18 + .../specs/{skip => }/V1_2/pad_v2_1_float.mod.py | 0 .../specs/{skip => }/V1_2/pad_v2_1_quant8.mod.py | 0 .../specs/{skip => }/V1_2/pad_v2_all_dims.mod.py | 0 .../{skip => }/V1_2/pad_v2_all_dims_quant8.mod.py | 0 .../specs/{skip => }/V1_2/pad_v2_low_rank.mod.py | 0 .../{skip => }/V1_2/pad_v2_low_rank_quant8.mod.py | 0 tests/nnapi/specs/{skip => }/V1_2/quantize.mod.py | 0 tests/nnfw_api/CMakeLists.txt | 1 + tests/nnfw_api/README.md | 2 + tests/nnfw_api/src/CircleGen.h | 201 ++++ tests/nnfw_api/src/GenModelTests.cc | 152 +++ tests/nnfw_api/src/ModelTestDynamicTensor.cc | 86 +- tests/nnfw_api/src/ModelTestInputReshaping.cc | 16 +- tests/nnfw_api/src/RegressionTests.cc | 20 +- tests/nnfw_api/src/ValidationTestAddModelLoaded.cc | 38 +- .../src/ValidationTestAddSessionPrepared.cc | 36 +- .../src/ValidationTestFourAddModelsSetInput.cc | 10 +- tests/nnfw_api/src/ValidationTestSessionCreated.cc | 34 +- tests/nnfw_api/src/ValidationTestSingleSession.cc | 6 +- tests/nnfw_api/src/fixtures.h | 2 + tests/scripts/CMakeLists.txt | 31 +- tests/scripts/benchmark_nnapi.sh | 23 +- .../scripts/{nnpkg_test.sh => command/nnpkg-test} | 16 +- tests/scripts/command/prepare-model | 64 ++ tests/scripts/{unittest.sh => command/unittest} | 43 +- tests/scripts/command/verify-tflite | 106 ++ tests/scripts/common.sh | 11 +- .../list/frameworktest_list.aarch64.acl_cl.txt | 2 +- .../list/frameworktest_list.armv7l.acl_cl.txt | 2 +- tests/scripts/list/tflite_loader_list.aarch64.txt | 2 +- tests/scripts/list/tflite_loader_list.armv7l.txt | 2 +- .../config}/MODELS/inception_module/config.sh | 0 .../config}/MODELS/inception_nonslim/config.sh | 0 .../config}/MODELS/inception_slim/config.sh | 0 .../config}/MODELS/mobilenet/config.sh | 0 .../config}/MODELS/mobilenet_quant8/config.sh | 0 .../tests => models/config}/abs/config.sh | 0 .../tests => models/config}/add/1D/config.sh | 0 .../tests => models/config}/add/4D/config.sh | 0 .../config}/average_pool_2d/aligned/config.sh | 0 .../config}/average_pool_2d/avgpool1/config.sh | 0 .../config}/average_pool_2d/avgpool2/config.sh | 0 .../config}/batch_to_space_nd2/config.sh | 0 .../tests => models/config}/cast/config.sh | 0 .../tests => models/config}/concat/2D/config.sh | 0 .../config}/concat/concat1/config.sh | 0 .../config}/concat/concat2/config.sh | 0 .../config}/conv_2d/convolution1/config.sh | 0 .../config}/conv_2d/convolution2/config.sh | 0 .../config}/custom/squared_difference/config.sh | 0 .../config}/depthwise_conv_2d/depthconv1/config.sh | 0 .../config}/depthwise_conv_2d/depthconv2/config.sh | 0 .../config}/depthwise_conv_2d_no_fuse/config.sh | 0 .../config}/div/broadcast/config.sh | 0 .../config}/embedding_lookup/config.sh | 0 .../tests => models/config}/equal/config.sh | 0 .../tests => models/config}/exp/config.sh | 0 .../tests => models/config}/floor/floor1/config.sh | 0 .../tests => models/config}/floor/floor2/config.sh | 0 .../config}/fullyconnected/fc1/config.sh | 0 .../config}/fullyconnected/hybrid/config.sh | 0 .../config}/fullyconnected/matmul2x2/config.sh | 0 .../fullyconnected/weights_as_input/config.sh | 0 .../tests => models/config}/gather/config.sh | 0 .../tests => models/config}/greater/config.sh | 0 .../config}/greater_equal/config.sh | 0 .../config}/hashtable_lookup/config.sh | 0 .../config}/l2_normalization/config.sh | 0 .../tests => models/config}/l2_pool_2d/config.sh | 0 .../tests => models/config}/less/config.sh | 0 .../tests => models/config}/less_equal/config.sh | 0 .../tests => models/config}/logistic/config.sh | 0 .../tests => models/config}/max/config.sh | 0 .../config}/max_pool_2d/maxpool1/config.sh | 0 .../config}/max_pool_2d/maxpool2/config.sh | 0 .../tests => models/config}/mean/config.sh | 0 .../tests => models/config}/min/config.sh | 0 .../config}/mul/broadcast/config.sh | 0 .../tests => models/config}/neg/config.sh | 0 .../tests => models/config}/not_equal/config.sh | 0 .../tests => models/config}/one_hot/config.sh | 0 .../tests => models/config}/pack/config.sh | 0 .../tests => models/config}/pad/4D_2D/config.sh | 0 .../tests => models/config}/pad/pad1/config.sh | 0 .../tests => models/config}/pad/pad2/config.sh | 0 .../tests => models/config}/reduce_max/config.sh | 0 .../config}/reduce_mean/test1/config.sh | 0 .../config}/reduce_mean/test2/config.sh | 0 .../config/reduce_sum/float}/config.sh | 0 .../models/config/reduce_sum/uint8/config.sh | 1 + .../tests => models/config}/relu/config.sh | 0 .../tests => models/config}/relu6/config.sh | 0 .../tests => models/config}/reshape/3D/config.sh | 0 .../config}/reshape/reshape1/config.sh | 0 .../config}/reshape/reshape2/config.sh | 0 .../config}/resize_bilinear/config.sh | 0 .../tests => models/config}/rnn/config.sh | 0 .../tests => models/config}/rsqrt/config.sh | 0 .../tests => models/config}/select/config.sh | 0 .../tests => models/config}/shape/config.sh | 0 .../tests => models/config}/sin/config.sh | 0 .../tests => models/config}/slice/config.sh | 0 .../tests => models/config}/softmax/config.sh | 0 .../config}/space_to_batch_nd2/config.sh | 0 .../config}/space_to_depth/config.sh | 0 .../tests => models/config}/sqrt/config.sh | 0 .../tests => models/config}/squeeze/config.sh | 0 .../config}/strided_slice/config.sh | 0 .../config}/sub/broadcast/config.sh | 0 .../tests => models/config}/tanh/config.sh | 0 .../tests => models/config}/tile/config.sh | 0 .../tests => models/config}/topk_v2/config.sh | 0 .../tests => models/config}/transpose/config.sh | 0 .../config}/transpose_conv/same/config.sh | 0 .../config}/transpose_conv/valid/config.sh | 0 .../tests => models/config}/zeros_like/config.sh | 0 tests/scripts/{framework => models}/run_test.sh | 79 +- tests/scripts/onert-test | 49 + tests/scripts/test-driver.sh | 37 +- tests/scripts/test_framework.sh | 10 +- tests/scripts/test_scheduler_with_profiling.sh | 2 +- tests/tools/nnpackage_run/CMakeLists.txt | 2 +- tests/tools/nnpackage_run/src/args.cc | 246 ++--- tests/tools/nnpackage_run/src/h5formatter.cc | 8 +- tests/tools/nnpackage_run/src/nnpackage_run.cc | 2 +- tests/tools/tflite_loader/CMakeLists.txt | 2 +- tests/tools/tflite_run/CMakeLists.txt | 2 +- tests/tools/tflite_run/src/args.cc | 147 +-- tests/tools/tflite_run_2_2_0/CMakeLists.txt | 23 - tests/tools/tflite_vanilla_run/CMakeLists.txt | 23 + .../src/args.cc | 4 +- .../src/args.h | 10 +- .../src/tensor_view.h | 10 +- .../src/tflite_vanilla_run.cc} | 8 +- .../nncc-tc-to-nnpkg-tc/nncc-tc-to-nnpkg-tc.sh | 5 +- tools/nnpackage_tool/sth2nnpkgtc/pb2nnpkgtc.md | 2 +- tools/nnpackage_tool/sth2nnpkgtc/tflite2nnpkgtc.md | 2 +- tools/tflitefile_tool/select_operator.py | 21 +- tools/tflkit/README.md | 12 +- tools/update_version/update-version | 11 +- 852 files changed, 17904 insertions(+), 15233 deletions(-) create mode 100644 .ahub/tcchecker-tca/config.yaml create mode 100644 compiler/.ahub/tcchecker-tca/config.yaml create mode 100644 compiler/bcq-tools/CMakeLists.txt create mode 100644 compiler/bcq-tools/README.md create mode 100644 compiler/bcq-tools/generate_bcq_output_arrays create mode 100644 compiler/bcq-tools/preserve_bcq_info create mode 100644 compiler/luci-interpreter/src/kernels/DepthToSpace.cpp create mode 100644 compiler/luci-interpreter/src/kernels/DepthToSpace.h create mode 100644 compiler/luci-interpreter/src/kernels/DepthToSpace.test.cpp create mode 100644 compiler/luci-interpreter/src/kernels/Reverse.cpp create mode 100644 compiler/luci-interpreter/src/kernels/Reverse.h create mode 100644 compiler/luci-interpreter/src/kernels/Reverse.test.cpp create mode 100644 compiler/luci-interpreter/src/kernels/Slice.cpp create mode 100644 compiler/luci-interpreter/src/kernels/Slice.h create mode 100644 compiler/luci-interpreter/src/kernels/Slice.test.cpp create mode 100644 compiler/luci-interpreter/src/loader/KernelBuilder.test.cpp create mode 100644 compiler/luci/import/include/luci/Import/Nodes/CircleNonMaxSuppressionV4.h create mode 100644 compiler/luci/import/include/luci/Import/Nodes/CircleUnique.h create mode 100644 compiler/luci/import/src/Nodes/CircleNonMaxSuppressionV4.cpp create mode 100644 compiler/luci/import/src/Nodes/CircleUnique.cpp create mode 100644 compiler/luci/lang/include/luci/IR/Nodes/CircleNonMaxSuppressionV4.h create mode 100644 compiler/luci/lang/include/luci/IR/Nodes/CircleNonMaxSuppressionV4Out.h create mode 100644 compiler/luci/lang/include/luci/IR/Nodes/CirclePadV2.h create mode 100644 compiler/luci/lang/include/luci/IR/Nodes/CircleUnique.h create mode 100644 compiler/luci/lang/include/luci/IR/Nodes/CircleUniqueOut.h create mode 100644 compiler/luci/lang/src/Nodes/CircleNonMaxSuppressionV4.test.cpp create mode 100644 compiler/luci/lang/src/Nodes/CircleNonMaxSuppressionV4Out.test.cpp create mode 100644 compiler/luci/lang/src/Nodes/CirclePadV2.test.cpp create mode 100644 compiler/luci/lang/src/Nodes/CircleUnique.test.cpp create mode 100644 compiler/one-cmds/one-import-bcq create mode 100644 compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/fake_quantization/weight.json create mode 100644 compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/bias.json create mode 100644 compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/in.json create mode 100644 compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/out.json create mode 100644 compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/weight.json create mode 100644 compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/record_minmax/in.json create mode 100644 compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/record_minmax/out.json create mode 100644 compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/fake_quantization/ker.json create mode 100644 compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/quantization/ifm.json create mode 100644 compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/quantization/ker.json create mode 100644 compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/quantization/ofm.json create mode 100644 compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/record_minmax/ifm.json create mode 100644 compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/record_minmax/ofm.json create mode 100644 compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/1.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/2.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/3.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/4.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/1.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/2.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/3.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/4.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/0.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/1.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/2.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/3.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/4.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/0.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/1.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/2.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/3.txt create mode 100644 compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/4.txt create mode 100644 compiler/tflchef/core/src/Op/NonMaxSuppressionV4.cpp create mode 100644 compiler/tflchef/core/src/Op/NonMaxSuppressionV4.h create mode 100644 compiler/tflchef/core/src/Op/PadV2.cpp create mode 100644 compiler/tflchef/core/src/Op/PadV2.h create mode 100644 compiler/tflchef/tflite/src/Op/NonMaxSuppressionV4.cpp create mode 100644 compiler/tflchef/tflite/src/Op/NonMaxSuppressionV4.h create mode 100644 compiler/tflchef/tflite/src/Op/PadV2.cpp create mode 100644 compiler/tflchef/tflite/src/Op/PadV2.h create mode 100644 compiler/tflite2circle/src/BuildBuiltinOptions/NonMaxSuppressionV4Options.cpp create mode 100644 compiler/tflite2circle/src/BuildBuiltinOptions/NonMaxSuppressionV4Options.h create mode 100644 compiler/tflite2circle/src/BuildBuiltinOptions/PadV2Options.cpp create mode 100644 compiler/tflite2circle/src/BuildBuiltinOptions/PadV2Options.h create mode 100644 compiler/vconone/CMakeLists.txt create mode 100644 compiler/vconone/README.md create mode 100644 compiler/vconone/driver/driver.cpp create mode 100644 compiler/vconone/include/vconone/vconone.h create mode 100644 compiler/vconone/src/version.cpp create mode 100644 compiler/vconone/src/version.test.cpp create mode 100644 compiler/vconone/version_cfg.h.in delete mode 100644 compute/ARMComputeEx/arm_compute/core/CL/kernels/CLArgOperationKernel.h delete mode 100644 compute/ARMComputeEx/arm_compute/core/CL/kernels/CLCastKernel.h delete mode 100644 compute/ARMComputeEx/arm_compute/core/CL/kernels/CLDepthToSpaceKernel.h delete mode 100644 compute/ARMComputeEx/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernelEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/core/CL/kernels/CLPReLUKernel.h delete mode 100644 compute/ARMComputeEx/arm_compute/core/CL/kernels/CLSpaceToDepthKernel.h delete mode 100644 compute/ARMComputeEx/arm_compute/core/CL/kernels/CLTransposeConvLayerUpsampleKernel.h delete mode 100644 compute/ARMComputeEx/arm_compute/core/CPP/kernels/CPPUpsampleKernelEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/core/NEON/kernels/NECastKernel.h delete mode 100644 compute/ARMComputeEx/arm_compute/core/NEON/kernels/NEDepthToSpaceLayerKernelEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernelEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/core/NEON/kernels/NEPReLUKernel.h delete mode 100644 compute/ARMComputeEx/arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernelEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLArgOperation.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLBatchToSpaceND.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLCast.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLDepthToSpace.h create mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLDirectTransposeConvLayer.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCoreEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLLogicalNot.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLPReLU.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLPixelWiseDivision.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLRNNLayerEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLSpaceToDepth.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLStridedSliceEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLTransposeConvLayerUpsample.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/CPP/functions/CPPUpsampleEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NECast.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEDepthToSpaceLayerEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayerEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCoreEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEPReLU.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NERNNLayerEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEReduceMeanEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NESpaceToBatchLayerEx.h delete mode 100644 compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NESpaceToDepthLayerEx.h delete mode 100644 compute/ARMComputeEx/src/core/CL/cl_kernels/arg_operation.cl delete mode 100644 compute/ARMComputeEx/src/core/CL/cl_kernels/arithmetic_op_quantized.cl delete mode 100644 compute/ARMComputeEx/src/core/CL/cl_kernels/cast.cl delete mode 100644 compute/ARMComputeEx/src/core/CL/cl_kernels/depth_to_space.cl delete mode 100644 compute/ARMComputeEx/src/core/CL/cl_kernels/prelu.cl delete mode 100644 compute/ARMComputeEx/src/core/CL/cl_kernels/prelu_quantized.cl delete mode 100644 compute/ARMComputeEx/src/core/CL/cl_kernels/space_to_depth.cl delete mode 100644 compute/ARMComputeEx/src/core/CL/kernels/CLArgOperationKernel.cpp delete mode 100644 compute/ARMComputeEx/src/core/CL/kernels/CLCastKernel.cpp delete mode 100644 compute/ARMComputeEx/src/core/CL/kernels/CLDepthToSpaceKernel.cpp delete mode 100644 compute/ARMComputeEx/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernelEx.cpp delete mode 100644 compute/ARMComputeEx/src/core/CL/kernels/CLPReLUKernel.cpp delete mode 100644 compute/ARMComputeEx/src/core/CL/kernels/CLSpaceToDepthKernel.cpp delete mode 100644 compute/ARMComputeEx/src/core/CL/kernels/CLTransposeConvLayerUpsampleKernel.cpp delete mode 100644 compute/ARMComputeEx/src/core/CPP/kernels/CPPUpsampleKernelEx.cpp delete mode 100644 compute/ARMComputeEx/src/core/NEON/kernels/NECastKernel.cpp delete mode 100644 compute/ARMComputeEx/src/core/NEON/kernels/NEDepthToSpaceLayerKernelEx.cpp delete mode 100644 compute/ARMComputeEx/src/core/NEON/kernels/NEElementwiseUnaryKernelEx.cpp delete mode 100644 compute/ARMComputeEx/src/core/NEON/kernels/NEPReLUKernel.cpp delete mode 100644 compute/ARMComputeEx/src/core/NEON/kernels/NESpaceToDepthLayerKernelEx.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/CL/functions/CLArgOperation.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/CL/functions/CLCast.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/CL/functions/CLDepthToSpace.cpp create mode 100644 compute/ARMComputeEx/src/runtime/CL/functions/CLDirectTransposeConvLayer.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCoreEx.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/CL/functions/CLPReLU.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/CL/functions/CLRNNLayerEx.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/CL/functions/CLSpaceToDepth.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/CL/functions/CLTransposeConvLayerUpsample.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/CPP/functions/CPPUpsampleEx.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/NEON/functions/NECast.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/NEON/functions/NEDepthToSpaceLayerEx.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCoreEx.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/NEON/functions/NEPReLU.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/NEON/functions/NERNNLayerEx.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/NEON/functions/NEReduceMeanEx.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/NEON/functions/NESpaceToBatchLayerEx.cpp delete mode 100644 compute/ARMComputeEx/src/runtime/NEON/functions/NESpaceToDepthLayerEx.cpp create mode 100644 compute/cker/include/cker/operation/BatchToSpaceND.h create mode 100644 compute/cker/include/cker/operation/Helper/PhiloxRandom.h create mode 100644 compute/cker/include/cker/operation/Helper/RandomDistributions.h create mode 100644 compute/cker/include/cker/operation/Helper/RandomOp.h create mode 100644 compute/cker/include/cker/operation/Helper/RandomOpCpu.h create mode 100644 compute/cker/include/cker/operation/L2Normalize.h create mode 100644 compute/cker/include/cker/operation/Quantize.h create mode 100644 compute/cker/include/cker/operation/ReLU6.h create mode 100644 compute/cker/include/cker/operation/ResizeBilinear.h create mode 100644 compute/cker/include/cker/operation/SpaceToDepth.h create mode 100644 compute/cker/include/cker/operation/SplitV.h create mode 100644 compute/cker/include/cker/operation/StatelessRandomUniform.h delete mode 100644 docs/release/1.7/release-note-1.7.0.md create mode 100644 docs/release/1.8/release-note-1.8.0.md create mode 100644 docs/runtime/api-layered-arch.png create mode 100644 infra/cmake/packages/Pybind11Config.cmake create mode 100644 infra/cmake/packages/Pybind11SourceConfig.cmake create mode 100644 infra/cmake/packages/TensorFlowEigenSource-2.3.0/TensorFlowEigenSourceConfig.cmake create mode 100644 infra/cmake/packages/TensorFlowEigenSource-2.3.0/TensorFlowEigenSourceConfigVersion.cmake create mode 100644 infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfig.cmake create mode 100644 infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfigVersion.cmake delete mode 100644 infra/nnfw/cmake/packages/TensorFlowLite-2.2.0Config.cmake rename infra/nnfw/cmake/packages/{TensorFlowLite-2.2.0 => TensorFlowLite-2.3.0}/CMakeLists.txt (51%) create mode 100644 infra/nnfw/cmake/packages/TensorFlowLite-2.3.0Config.cmake create mode 100644 infra/packaging/preset/20200731_windows create mode 100755 infra/scripts/build-tcm.sh create mode 100644 res/TensorFlowLiteRecipes/AveragePool2D_U8_000/test.recipe create mode 100644 res/TensorFlowLiteRecipes/AveragePool2D_U8_000/test.reverse create mode 100644 res/TensorFlowLiteRecipes/DepthwiseConv2D_003/test.recipe create mode 100644 res/TensorFlowLiteRecipes/DepthwiseConv2D_003/test.reverse create mode 100644 res/TensorFlowLiteRecipes/DepthwiseConv2D_003/test.rule create mode 100644 res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_001/test.recipe create mode 100644 res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_001/test.reverse create mode 100644 res/TensorFlowLiteRecipes/FullyConnected_003/test.recipe create mode 100644 res/TensorFlowLiteRecipes/FullyConnected_003/test.reverse create mode 100644 res/TensorFlowLiteRecipes/L2Normalize_U8_000/test.recipe create mode 100644 res/TensorFlowLiteRecipes/L2Normalize_U8_000/test.reverse create mode 100644 res/TensorFlowLiteRecipes/Logistic_U8_000/test.recipe create mode 100644 res/TensorFlowLiteRecipes/Logistic_U8_000/test.reverse create mode 100644 res/TensorFlowLiteRecipes/Net_TConv_BN_000/test.recipe create mode 100644 res/TensorFlowLiteRecipes/ResizeBilinear_U8_000/test.recipe create mode 100644 res/TensorFlowLiteRecipes/ResizeBilinear_U8_000/test.reverse create mode 100644 res/TensorFlowLiteRecipes/SpaceToDepth_U8_000/test.recipe create mode 100644 res/TensorFlowLiteRecipes/SpaceToDepth_U8_000/test.reverse create mode 100644 res/TensorFlowLiteRecipes/TransposeConv_001/test.recipe create mode 100644 res/TensorFlowLiteRecipes/TransposeConv_001/test.reverse create mode 100644 res/TensorFlowLiteRecipes/Unique_000/test.recipe create mode 100644 res/TensorFlowLiteRecipes/Unique_000/test.reverse create mode 100644 res/TensorFlowLiteRecipes/Unique_001/test.recipe create mode 100644 res/TensorFlowLiteRecipes/Unique_001/test.reverse create mode 100644 res/TensorFlowLiteRecipes/Unique_002/test.recipe create mode 100644 res/TensorFlowLiteRecipes/Unique_002/test.reverse create mode 100644 res/TensorFlowLiteRecipes/Unique_003/test.recipe create mode 100644 res/TensorFlowLiteRecipes/Unique_003/test.reverse create mode 100644 res/TensorFlowLiteRecipes/Unique_U8_000/test.recipe create mode 100644 res/TensorFlowLiteRecipes/Unique_U8_000/test.reverse create mode 100644 res/TensorFlowLiteRecipes/Unique_U8_001/test.recipe create mode 100644 res/TensorFlowLiteRecipes/Unique_U8_001/test.reverse create mode 100644 res/TensorFlowLiteSchema/2.3.0/schema.fbs create mode 100644 res/TensorFlowPythonExamples/examples/while_2/__init__.py create mode 100644 res/TensorFlowPythonExamples/examples/while_3/__init__.py create mode 100644 res/TensorFlowPythonModels/examples/tconv-bn/__init__.py rename runtime/onert/api/include/{nnfw_dev.h => nnfw_experimental.h} (94%) rename runtime/onert/api/include/{nnfw_debug.h => nnfw_internal.h} (67%) create mode 100644 runtime/onert/backend/acl_common/AclKernelGen.h create mode 100644 runtime/onert/backend/cpu/BackendContext.h create mode 100644 runtime/onert/backend/cpu/ExternalContext.h create mode 100644 runtime/onert/backend/cpu/StaticTensorManager.cc create mode 100644 runtime/onert/backend/cpu/StaticTensorManager.h create mode 100644 runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc create mode 100644 runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.h create mode 100644 runtime/onert/backend/cpu/ops/L2NormLayer.cc create mode 100644 runtime/onert/backend/cpu/ops/L2NormLayer.h create mode 100644 runtime/onert/backend/cpu/ops/QuantizeLayer.cc create mode 100644 runtime/onert/backend/cpu/ops/QuantizeLayer.h create mode 100644 runtime/onert/backend/cpu/ops/ReLU6Layer.cc create mode 100644 runtime/onert/backend/cpu/ops/ReLU6Layer.h create mode 100644 runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc create mode 100644 runtime/onert/backend/cpu/ops/ResizeBilinearLayer.h create mode 100644 runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc create mode 100644 runtime/onert/backend/cpu/ops/SpaceToDepthLayer.h create mode 100644 runtime/onert/backend/cpu/ops/SplitVLayer.cc create mode 100644 runtime/onert/backend/cpu/ops/SplitVLayer.h create mode 100644 runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc create mode 100644 runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.h create mode 100644 runtime/onert/core/include/backend/IExternalContext.h create mode 100644 runtime/onert/core/include/ir/operation/Quantize.h create mode 100644 runtime/onert/core/include/ir/operation/SplitV.h create mode 100644 runtime/onert/core/include/ir/operation/StatelessRandomUniform.h delete mode 100644 runtime/onert/core/src/compiler/OperandContext.h create mode 100644 runtime/onert/core/src/ir/operation/Quantize.cc create mode 100644 runtime/onert/core/src/ir/operation/SplitV.cc rename runtime/onert/core/src/{compiler/OperandContext.cc => ir/operation/StatelessRandomUniform.cc} (52%) create mode 100644 runtime/onert/frontend/circle_schema/CMakeLists.txt rename runtime/onert/frontend/{circle/src => circle_schema/include}/circle_schema_generated.h (100%) create mode 100644 tests/nnapi/specs/Ex/split_v_ex_1D_float_1_nnfw.mod.py create mode 100644 tests/nnapi/specs/Ex/split_v_ex_1D_float_2_nnfw.mod.py create mode 100644 tests/nnapi/specs/Ex/split_v_ex_1D_int32_nnfw.mod.py create mode 100644 tests/nnapi/specs/Ex/split_v_ex_4D_float_1_nnfw.mod.py create mode 100644 tests/nnapi/specs/Ex/split_v_ex_4D_float_2_nnfw.mod.py create mode 100644 tests/nnapi/specs/Ex/split_v_ex_4D_float_3_nnfw.mod.py create mode 100644 tests/nnapi/specs/Ex/split_v_ex_4D_float_4_nnfw.mod.py create mode 100644 tests/nnapi/specs/Ex/split_v_ex_4D_int32_1_nnfw.mod.py create mode 100644 tests/nnapi/specs/Ex/split_v_ex_4D_int32_2_nnfw.mod.py create mode 100644 tests/nnapi/specs/Ex/split_v_ex_4D_int32_3_nnfw.mod.py create mode 100644 tests/nnapi/specs/Ex/split_v_ex_4D_int32_4_nnfw.mod.py create mode 100644 tests/nnapi/specs/Ex/stateless_random_uniform_ex_nnfw.mod.py create mode 100644 tests/nnapi/specs/V1_0/l2_normalization_quant8_nnfw.mod.py create mode 100644 tests/nnapi/specs/V1_0/resize_bilinear_quant8_nnfw.mod.py rename tests/nnapi/specs/{skip => }/V1_2/pad_v2_1_float.mod.py (100%) rename tests/nnapi/specs/{skip => }/V1_2/pad_v2_1_quant8.mod.py (100%) rename tests/nnapi/specs/{skip => }/V1_2/pad_v2_all_dims.mod.py (100%) rename tests/nnapi/specs/{skip => }/V1_2/pad_v2_all_dims_quant8.mod.py (100%) rename tests/nnapi/specs/{skip => }/V1_2/pad_v2_low_rank.mod.py (100%) rename tests/nnapi/specs/{skip => }/V1_2/pad_v2_low_rank_quant8.mod.py (100%) rename tests/nnapi/specs/{skip => }/V1_2/quantize.mod.py (100%) create mode 100644 tests/nnfw_api/src/CircleGen.h create mode 100644 tests/nnfw_api/src/GenModelTests.cc rename tests/scripts/{nnpkg_test.sh => command/nnpkg-test} (84%) mode change 100755 => 100644 create mode 100644 tests/scripts/command/prepare-model rename tests/scripts/{unittest.sh => command/unittest} (72%) mode change 100755 => 100644 create mode 100644 tests/scripts/command/verify-tflite rename tests/scripts/{framework/tests => models/config}/MODELS/inception_module/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/MODELS/inception_nonslim/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/MODELS/inception_slim/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/MODELS/mobilenet/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/MODELS/mobilenet_quant8/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/abs/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/add/1D/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/add/4D/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/average_pool_2d/aligned/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/average_pool_2d/avgpool1/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/average_pool_2d/avgpool2/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/batch_to_space_nd2/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/cast/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/concat/2D/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/concat/concat1/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/concat/concat2/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/conv_2d/convolution1/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/conv_2d/convolution2/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/custom/squared_difference/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/depthwise_conv_2d/depthconv1/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/depthwise_conv_2d/depthconv2/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/depthwise_conv_2d_no_fuse/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/div/broadcast/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/embedding_lookup/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/equal/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/exp/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/floor/floor1/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/floor/floor2/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/fullyconnected/fc1/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/fullyconnected/hybrid/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/fullyconnected/matmul2x2/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/fullyconnected/weights_as_input/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/gather/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/greater/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/greater_equal/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/hashtable_lookup/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/l2_normalization/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/l2_pool_2d/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/less/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/less_equal/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/logistic/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/max/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/max_pool_2d/maxpool1/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/max_pool_2d/maxpool2/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/mean/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/min/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/mul/broadcast/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/neg/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/not_equal/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/one_hot/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/pack/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/pad/4D_2D/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/pad/pad1/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/pad/pad2/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/reduce_max/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/reduce_mean/test1/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/reduce_mean/test2/config.sh (100%) rename tests/scripts/{framework/tests/reduce_sum => models/config/reduce_sum/float}/config.sh (100%) create mode 100755 tests/scripts/models/config/reduce_sum/uint8/config.sh rename tests/scripts/{framework/tests => models/config}/relu/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/relu6/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/reshape/3D/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/reshape/reshape1/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/reshape/reshape2/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/resize_bilinear/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/rnn/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/rsqrt/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/select/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/shape/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/sin/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/slice/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/softmax/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/space_to_batch_nd2/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/space_to_depth/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/sqrt/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/squeeze/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/strided_slice/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/sub/broadcast/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/tanh/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/tile/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/topk_v2/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/transpose/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/transpose_conv/same/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/transpose_conv/valid/config.sh (100%) rename tests/scripts/{framework/tests => models/config}/zeros_like/config.sh (100%) rename tests/scripts/{framework => models}/run_test.sh (81%) create mode 100644 tests/scripts/onert-test delete mode 100644 tests/tools/tflite_run_2_2_0/CMakeLists.txt create mode 100644 tests/tools/tflite_vanilla_run/CMakeLists.txt rename tests/tools/{tflite_run_2_2_0 => tflite_vanilla_run}/src/args.cc (98%) rename tests/tools/{tflite_run_2_2_0 => tflite_vanilla_run}/src/args.h (92%) rename tests/tools/{tflite_run_2_2_0 => tflite_vanilla_run}/src/tensor_view.h (94%) rename tests/tools/{tflite_run_2_2_0/src/tflite_run_2_2_0.cc => tflite_vanilla_run/src/tflite_vanilla_run.cc} (96%) diff --git a/.ahub/tcchecker-tca/config.yaml b/.ahub/tcchecker-tca/config.yaml new file mode 100644 index 0000000..cd34d79 --- /dev/null +++ b/.ahub/tcchecker-tca/config.yaml @@ -0,0 +1,43 @@ +version: 2 +test: + - name: NN Runtime + testCaseLanguage: CPP + testFW: GTEST + testCaseFolder: + - ./compute/test/cker + - ./runtime/onert/core/src/backend/cpu_common + - ./runtime/onert/frontend/nnapi + - ./runtime/onert/test/core/compiler + - ./runtime/onert/test/core/exec + - ./runtime/onert/test/core/interp + - ./runtime/onert/test/graph + - ./runtime/onert/test/graph/operand + - ./runtime/onert/test/graph/operation + - ./runtime/onert/test/graph/verifier + - ./runtime/onert/test/ir + - ./runtime/onert/test/util + - ./tests/nnapi/src + - ./tests/nnfw_api/src + - ./tests/tools/tflite_run/src + + testFile: + - extension: cpp + any: true + - extension: cc + any: true + + testCase: + - condition: + - functionName: + starts: + - TEST + + negativeTestCase: + - condition: + - testName: + starts: + - neg_ + + positiveTestCase: + - condition: + - inverse: negativeTestCase diff --git a/.ctags b/.ctags index 13c27ab..8815f1f 100644 --- a/.ctags +++ b/.ctags @@ -3,5 +3,6 @@ --exclude=build --exclude=tags --exclude=tests/scripts/framework/cache +--exclude=tests/scripts/models/cache --exclude=tools/cross/rootfs --exclude=doxygen diff --git a/.gitignore b/.gitignore index d093191..263856b 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,7 @@ *.pyc # Test cache for model download -/tests/scripts/framework/cache +/tests/scripts/**/cache # Test report /report diff --git a/Makefile.template b/Makefile.template index 6c919f3..a21937d 100644 --- a/Makefile.template +++ b/Makefile.template @@ -108,7 +108,9 @@ build: build_internal install: $(TIMESTAMP_INSTALL) -create_tar: runtime_tar_internal +create_package: runtime_tar_internal + +create_acl_tar: acl_tar_internal clean: rm -rf $(WORKSPACE) @@ -149,10 +151,13 @@ install_internal: touch $(TIMESTAMP_INSTALL) runtime_tar_internal: $(TIMESTAMP_BUILD) install_internal - tar -zcf nnfw-package.tar.gz -C $(INSTALL_PATH) lib - tar -zcf nnfw-dev-package.tar.gz -C $(INSTALL_PATH) include/nnfw - tar -zcf nnfw-internal-dev-package.tar.gz -C $(INSTALL_PATH) include/onert - mv nnfw-*package.tar.gz $(INSTALL_PATH)/. + tar -zcf $(WORKSPACE)/nnfw-package.tar.gz -C $(INSTALL_PATH) lib + tar -zcf $(WORKSPACE)/nnfw-devel-package.tar.gz -C $(INSTALL_PATH) include/nnfw + tar -zcf $(WORKSPACE)/nnfw-plugin-devel-package.tar.gz -C $(INSTALL_PATH) include/onert + tar -zcf $(WORKSPACE)/nnfw-test-package.tar.gz -C ${INSTALL_PATH} bin test unittest unittest_standalone + +acl_tar_internal: $(BUILD_FOLDER) + tar -zcf $(WORKSPACE)/nnfw-acl.tar.gz -C ${OVERLAY_FOLDER} lib install_internal_acl: # Workaround to install acl for test (ignore error when there is no file to copy) diff --git a/compiler/.ahub/tcchecker-tca/config.yaml b/compiler/.ahub/tcchecker-tca/config.yaml new file mode 100644 index 0000000..ef681de --- /dev/null +++ b/compiler/.ahub/tcchecker-tca/config.yaml @@ -0,0 +1,54 @@ +version: 2 +test: + - name: NN Compiler + testCaseLanguage: CPP + testFW: GTEST + testCaseFolder: + - ./angkor + - ./arser + - ./circle2circle + - ./circle-quantizer + - ./cwrap + - ./foder + - ./hermes + - ./hermes-std + - ./loco + - ./locomotiv + - ./locop + - ./logo + - ./logo-core + - ./luci + - ./luci-interpreter + - ./luci-value-test + - ./mio-circle + - ./mio-tflite + - ./oops + - ./pepper-assert + - ./pepper-str + - ./pepper-strcast + - ./pp + - ./record-minmax + - ./safemain + - ./souschef + - ./stdex + - ./tflite2circle + + testFile: + - extension: .test.cpp + any: true + + testCase: + - condition: + - functionName: + starts: + - TEST + + negativeTestCase: + - condition: + - testName: + ends: + - _NEG + + positiveTestCase: + - condition: + - inverse: negativeTestCase diff --git a/compiler/bcq-tools/CMakeLists.txt b/compiler/bcq-tools/CMakeLists.txt new file mode 100644 index 0000000..fcf01de --- /dev/null +++ b/compiler/bcq-tools/CMakeLists.txt @@ -0,0 +1,27 @@ +set(BCQ_TOOLS_FILES + generate_bcq_output_arrays + preserve_bcq_info +) + +foreach(BCQ_TOOLS IN ITEMS ${BCQ_TOOLS_FILES}) + + set(BCQ_TOOLS_FILE ${BCQ_TOOLS}) + set(BCQ_TOOLS_SRC "${CMAKE_CURRENT_SOURCE_DIR}/${BCQ_TOOLS_FILE}") + set(BCQ_TOOLS_BIN "${CMAKE_CURRENT_BINARY_DIR}/${BCQ_TOOLS_FILE}") + set(BCQ_TOOLS_TARGET "${BCQ_TOOLS}_target") + + add_custom_command(OUTPUT ${BCQ_TOOLS_BIN} + COMMAND ${CMAKE_COMMAND} -E copy "${BCQ_TOOLS_SRC}" "${BCQ_TOOLS_BIN}" + DEPENDS ${BCQ_TOOLS_SRC} + COMMENT "Generate ${BCQ_TOOLS_BIN}" + ) + + add_custom_target(${BCQ_TOOLS_TARGET} ALL DEPENDS ${BCQ_TOOLS_BIN}) + + install(FILES ${BCQ_TOOLS_BIN} + PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE + GROUP_READ GROUP_EXECUTE + WORLD_READ WORLD_EXECUTE + DESTINATION bin) + +endforeach(BCQ_TOOLS) diff --git a/compiler/bcq-tools/README.md b/compiler/bcq-tools/README.md new file mode 100644 index 0000000..18b0f48 --- /dev/null +++ b/compiler/bcq-tools/README.md @@ -0,0 +1,78 @@ +# BCQ Tools + +This directory includes some tools related with BCQ. + +## preserve_bcq_info + +### Purpose + +`preserve_bcq_info` is for preserving constant nodes which include BCQ information. +When `.pb` file is converted to `.tflite` file by TFlite converter, constant nodes whose values are exactly same are removed and then linked to only one representative node. +This makes us impossible to know what constant node should be linked to a node which we want to apply BCQ. +One of the solutions is making all the same constant nodes different by inserting unique values and ignore the newly generated unique values when BCQ fusing is applied. +`preserve_bcq_info` will generate and insert unique dummy values to the constant nodes whose values are same not to be removed by Tensorflow Lite converter. +As a result, BCQ information will be preserved. + +### How to use + +```bash +preserve_bcq_info \ +--input_path /path/to/original_model.pb \ +--output_path /path/to/preserved_model.pb +``` + +### How it works + +If we add unique dummy value at the end of each constant nodes, all the constant nodes would be different. Following is an example. + +``` +[Original Constant Nodes] +const(value=[1, 2, 3], name='const1') +const(value=[1, 2, 3], name='const2') +const(value=[1, 2, 3], name='const3') + +[After BCQ information preserved] +const(value=[1, 2, 3, -1], name='const1') +const(value=[1, 2, 3, -2], name='const2') +const(value=[1, 2, 3, -3], name='const3') +``` + +For dummy values, negative values are used instead of positive values. +This is because positive valus may be confused with original constant node values. +For your information, unique dummy value starts from -1 and moves to -2, -3, ..., -N, where N is the number of preserved constant nodes. + +### Caution + +- Newly generated dummy values should be ignored when the constant nodes are used. + +## generate_bcq_output_arrays + +### Purpose + +To apply BCQ, BCQ information nodes should be designated as model output so that they are alive even after TFLite conversion is finished. +However, there are so many nodes to designate and sometimes we cannot copy and paste all of them because the string size is too big. +`generate_bcq_output_arrays` is for generating output_arrays, which include BCQ information nodes. + +### How to use + +```bash +generate_bcq_output_arrays \ +--input_path /path/to/original_model.pb \ +--output_path /path/to/output_arrays.txt +``` + +### How it works + +``` +[Original BCQ information nodes] +const(value=[1, 2, 3, -1], name='const1') +const(value=[1, 2, 3, -2], name='const2') +const(value=[1, 2, 3, -3], name='const3') + +[Generated output_arrays] +,const1,const2,const3 +``` + +### Caution + +- Generated output_arrays will be start with comma. diff --git a/compiler/bcq-tools/generate_bcq_output_arrays b/compiler/bcq-tools/generate_bcq_output_arrays new file mode 100644 index 0000000..48e8a93 --- /dev/null +++ b/compiler/bcq-tools/generate_bcq_output_arrays @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 + +import tensorflow as tf + +import argparse +import sys + + +def _get_parser(): + """ + Returns an ArgumentParser for generating output_arrays. + """ + parser = argparse.ArgumentParser( + description=("Command line tool to generated output_arrays of BCQ nodes")) + + # Input and output path. + parser.add_argument( + "-i", + "--input_path", + type=str, + help="Full filepath of the input file.", + required=True) + parser.add_argument( + "-o", + "--output_path", + type=str, + help="Full filepath of the output file.", + required=True) + + return parser + + +def load_graph(frozen_graph_filename): + """ + Load graph from frozen pb file + """ + with tf.compat.v1.gfile.GFile(frozen_graph_filename, "rb") as f: + graph_def = tf.compat.v1.GraphDef() + graph_def.ParseFromString(f.read()) + with tf.Graph().as_default() as graph: + tf.import_graph_def(graph_def, name='') + return graph + + +def dtype2str(dtype): + if dtype == "int32": + return "TF_INT32" + elif dtype == "int64": + return "TF_INT64" + elif dtype == "float32": + return "TF_FLOAT" + elif dtype == "bool": + return "TF_BOOL" + else: + raise Exception("Not supported dtype") + + +def print_output_arrays(flags): + graph_model = load_graph(flags.input_path) + graph_model_def = graph_model.as_graph_def() + ops = graph_model.get_operations() + + output_names = [op.outputs[0].name for op in ops + if op.type == "Const" and "bcqinfo_" in op.outputs[0].name] + + output_arrays = "" + for output_name in output_names: + output_arrays += "," + + colon_index = output_name.find(":") + if colon_index == -1: + output_arrays += output_name + else: + output_arrays += output_name[:colon_index] + + f = open(flags.output_path, 'w') + f.write(output_arrays) + f.close() + + +def main(): + # Parse argument. + parser = _get_parser() + flags = parser.parse_known_args(args=sys.argv[1:]) + + print_output_arrays(flags[0]) + + +if __name__ == "__main__": + main() diff --git a/compiler/bcq-tools/preserve_bcq_info b/compiler/bcq-tools/preserve_bcq_info new file mode 100644 index 0000000..2ede8d4 --- /dev/null +++ b/compiler/bcq-tools/preserve_bcq_info @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 + +import tensorflow as tf +import numpy as np + +import argparse +import sys + + +def _get_parser(): + """ + Returns an ArgumentParser for preserving BCQ information. + """ + parser = argparse.ArgumentParser( + description=("Command line tool to preserve BCQ information")) + + # Input and output path. + parser.add_argument( + "-i", + "--input_path", + type=str, + help="Full filepath of the input file.", + required=True) + parser.add_argument( + "-o", + "--output_path", + type=str, + help="Full filepath of the output file.", + required=True) + + return parser + + +def load_graph(frozen_graph_filename): + """ + Load graph from frozen pb file + """ + with tf.compat.v1.gfile.GFile(frozen_graph_filename, "rb") as f: + graph_def = tf.compat.v1.GraphDef() + graph_def.ParseFromString(f.read()) + with tf.Graph().as_default() as graph: + tf.import_graph_def(graph_def, name='') + return graph + + +def preserve_bcq_info(flags): + """ + Generate unique dummy value from -1 to -N. + + We use negative values to preserve BCQ information because + positive values may cause some confusion with real BCQ information values. + """ + + class UniqueValueGen: + def __init__(self): + self.unique_value = -1 + + def gen(self): + val = self.unique_value + self.unique_value = val - 1 + return val + + unique_value = UniqueValueGen() + + original_graph_model = load_graph(flags.input_path) + original_graph_model_def = original_graph_model.as_graph_def() + + new_graph = tf.compat.v1.GraphDef() + substitution_dict = {} + + DT_INT32 = None # Just for copying DT_INT32 attribute value + + for node in original_graph_model_def.node: + if node.op == "Const": + # Because bcqinfo_do_w_x is BOOL type, we cannot add dummy value at the end. + # Therefore we should convert the type to INT32 type. + if "/bcqinfo_do_w_x" in node.name: + original_tensor = tf.make_ndarray(node.attr["value"].tensor) + substitution_dict[node.name] = tf.make_tensor_proto( + [int(original_tensor[0]), unique_value.gen()], tf.int32) + + preserved_bcqinfo_list = ["/bcqinfo_number_of_clusters", "/bcqinfo_size_of_clusters", + "/bcqinfo_qbits_of_clusters"] + + if any(name in node.name for name in preserved_bcqinfo_list): + original_tensor = tf.make_ndarray( + node.attr["value"].tensor) # variable name change + substitution_dict[node.name] = tf.make_tensor_proto( + np.append(original_tensor, unique_value.gen()), tf.int32) + DT_INT32 = node.attr["dtype"] + + for node in original_graph_model_def.node: + if node.name in substitution_dict: + new_node = new_graph.node.add() + new_node.op = "Const" + new_node.name = node.name + new_node.attr["dtype"].CopyFrom(DT_INT32) + new_node.attr["value"].tensor.CopyFrom(substitution_dict[node.name]) + else: + new_node = new_graph.node.add() + new_node.CopyFrom(node) + + tf.io.write_graph(new_graph, '.', flags.output_path, False) + + +def main(): + # Parse argument. + parser = _get_parser() + flags = parser.parse_known_args(args=sys.argv[1:]) + + # Generate a new pb file, which BCQ information is preserved. + preserve_bcq_info(flags[0]) + + +if __name__ == "__main__": + main() diff --git a/compiler/circle-quantizer/CMakeLists.txt b/compiler/circle-quantizer/CMakeLists.txt index 1335057..009bfab 100644 --- a/compiler/circle-quantizer/CMakeLists.txt +++ b/compiler/circle-quantizer/CMakeLists.txt @@ -13,5 +13,6 @@ target_link_libraries(circle-quantizer luci_service) target_link_libraries(circle-quantizer luci_pass) target_link_libraries(circle-quantizer luci_export) target_link_libraries(circle-quantizer arser) +target_link_libraries(circle-quantizer vconone) install(TARGETS circle-quantizer DESTINATION bin) diff --git a/compiler/circle-quantizer/requires.cmake b/compiler/circle-quantizer/requires.cmake index 2293e53..c21e28e 100644 --- a/compiler/circle-quantizer/requires.cmake +++ b/compiler/circle-quantizer/requires.cmake @@ -5,3 +5,4 @@ require("safemain") require("luci") require("oops") require("arser") +require("vconone") diff --git a/compiler/circle-quantizer/src/CircleQuantizer.cpp b/compiler/circle-quantizer/src/CircleQuantizer.cpp index b56b547..8d3a80c 100644 --- a/compiler/circle-quantizer/src/CircleQuantizer.cpp +++ b/compiler/circle-quantizer/src/CircleQuantizer.cpp @@ -25,6 +25,7 @@ #include #include +#include #include #include @@ -36,6 +37,12 @@ using OptionHook = std::function; using Algorithms = luci::CircleOptimizer::Options::Algorithm; using AlgorithmParameters = luci::CircleOptimizer::Options::AlgorithmParameters; +void print_version(void) +{ + std::cout << "circle-quantizer version " << vconone::get_string() << std::endl; + std::cout << vconone::get_copyright() << std::endl; +} + int entry(int argc, char **argv) { // Simple argument parser (based on map) @@ -49,13 +56,20 @@ int entry(int argc, char **argv) arser::Arser arser("circle-quantizer provides circle model quantization"); + arser.add_argument("--version") + .nargs(0) + .required(false) + .default_value(false) + .help("Show version information and exit") + .exit_with(print_version); + arser.add_argument(qdqw) .nargs(3) .type(arser::DataType::STR_VEC) .required(false) .help("Quantize-dequantize weight values required action before quantization. " "Three arguments required: input_dtype(float32) " - "output_dtype(uint8) granularity(layer)"); + "output_dtype(uint8) granularity(layer, channel)"); arser.add_argument(qwmm) .nargs(3) @@ -63,7 +77,7 @@ int entry(int argc, char **argv) .required(false) .help("Quantize with min/max values. " "Three arguments required: input_dtype(float32) " - "output_dtype(uint8) granularity(layer)"); + "output_dtype(uint8) granularity(layer, channel)"); arser.add_argument("input").nargs(1).type(arser::DataType::STR).help("Input circle model"); arser.add_argument("output").nargs(1).type(arser::DataType::STR).help("Output circle model"); diff --git a/compiler/circle-tensordump/driver/Driver.cpp b/compiler/circle-tensordump/driver/Driver.cpp index a55cd45..5bab9f5 100644 --- a/compiler/circle-tensordump/driver/Driver.cpp +++ b/compiler/circle-tensordump/driver/Driver.cpp @@ -46,7 +46,14 @@ int entry(int argc, char **argv) { std::cout << err.what() << std::endl; std::cout << arser; - return 0; + return 255; + } + + if (arser["--tensors_to_hdf5"] == arser["--tensors"]) + { + std::cout << "[Error] You must specify one option for how to print." << std::endl; + std::cout << arser; + return 255; } std::unique_ptr dump; diff --git a/compiler/circle-tensordump/src/Dump.cpp b/compiler/circle-tensordump/src/Dump.cpp index dfa78f0..a8d3256 100644 --- a/compiler/circle-tensordump/src/Dump.cpp +++ b/compiler/circle-tensordump/src/Dump.cpp @@ -136,6 +136,7 @@ void DumpTensors::run(std::ostream &os, const circle::Model *model, const std::s auto max = quant_param->max(); auto scale = quant_param->scale(); auto zero_point = quant_param->zero_point(); + auto quantized_dimension = quant_param->quantized_dimension(); os << " " + print_format2 + "   ├── min : "; ::print_comma_sepearted(os, min); @@ -146,9 +147,11 @@ void DumpTensors::run(std::ostream &os, const circle::Model *model, const std::s os << " " + print_format2 + "   ├── scale : "; ::print_comma_sepearted(os, scale); os << std::endl; - os << " " + print_format2 + "   └── zero_point : "; + os << " " + print_format2 + "   ├── zero_point : "; ::print_comma_sepearted(os, zero_point); os << std::endl; + os << " " + print_format2 + "   └── quantized_dimension : " << quantized_dimension; + os << std::endl; } // buffer @@ -229,7 +232,7 @@ std::vector hdf5_dims_cast(const flatbuffers::Vector *data, } /** - * This function writes data to given hdf5 file like below. + * This function writes vector data to given hdf5 file like below. * * GROUP "group_name" * ㄴDATATYPE "type" @@ -238,9 +241,9 @@ std::vector hdf5_dims_cast(const flatbuffers::Vector *data, * ㄴDATA "data" */ template -void write_data_to_hdf5(H5::H5File &file, std::string &group_name, std::string dataset_name, - const H5::PredType &type, const flatbuffers::Vector *data, - std::vector dims) +void write_vector_data_to_hdf5(H5::H5File &file, std::string &group_name, std::string dataset_name, + const H5::PredType &type, const flatbuffers::Vector *data, + std::vector dims) { if (data == nullptr) return; @@ -250,6 +253,17 @@ void write_data_to_hdf5(H5::H5File &file, std::string &group_name, std::string d dataset->write(data->data(), type); } +/// @brief This function writes scalar data to given hdf5 file +template +void write_scalar_data_to_hdf5(H5::H5File &file, std::string &group_name, std::string dataset_name, + const H5::PredType &type, T data) +{ + auto dataspace = std::make_unique(H5S_SCALAR); + auto dataset = std::make_unique( + file.createDataSet(group_name + "/" + dataset_name, type, *dataspace)); + dataset->write(&data, type); +} + } // namespace namespace circletensordump @@ -297,8 +311,9 @@ void DumpTensorsToHdf5::run(std::ostream &os, const circle::Model *model, auto buff_data_ptr = reader.buffers()->Get(buff_idx)->data(); if (buff_data_ptr) { - ::write_data_to_hdf5(file, group_name, "weights", ::hdf5_dtype_cast(tensor->type()), - buff_data_ptr, ::hdf5_dims_cast(buff_data_ptr, tensor->shape())); + ::write_vector_data_to_hdf5(file, group_name, "weights", ::hdf5_dtype_cast(tensor->type()), + buff_data_ptr, + ::hdf5_dims_cast(buff_data_ptr, tensor->shape())); } // write quantization parameters @@ -306,17 +321,20 @@ void DumpTensorsToHdf5::run(std::ostream &os, const circle::Model *model, if (quant_param) { auto min = quant_param->min(); - ::write_data_to_hdf5(file, group_name, "min", H5::PredType::NATIVE_FLOAT, min, - ::hdf5_dims_cast(min)); + ::write_vector_data_to_hdf5(file, group_name, "min", H5::PredType::NATIVE_FLOAT, min, + ::hdf5_dims_cast(min)); auto max = quant_param->max(); - ::write_data_to_hdf5(file, group_name, "max", H5::PredType::NATIVE_FLOAT, max, - ::hdf5_dims_cast(max)); + ::write_vector_data_to_hdf5(file, group_name, "max", H5::PredType::NATIVE_FLOAT, max, + ::hdf5_dims_cast(max)); auto scale = quant_param->scale(); - ::write_data_to_hdf5(file, group_name, "scale", H5::PredType::NATIVE_FLOAT, scale, - ::hdf5_dims_cast(scale)); + ::write_vector_data_to_hdf5(file, group_name, "scale", H5::PredType::NATIVE_FLOAT, scale, + ::hdf5_dims_cast(scale)); auto zero_point = quant_param->zero_point(); - ::write_data_to_hdf5(file, group_name, "zero_point", H5::PredType::NATIVE_INT64, zero_point, - ::hdf5_dims_cast(zero_point)); + ::write_vector_data_to_hdf5(file, group_name, "zero_point", H5::PredType::NATIVE_INT64, + zero_point, ::hdf5_dims_cast(zero_point)); + auto quantized_dimension = quant_param->quantized_dimension(); + ::write_scalar_data_to_hdf5(file, group_name, "quantized_dimension", + H5::PredType::NATIVE_INT32, quantized_dimension); } } } diff --git a/compiler/circle-verify/src/Driver.cpp b/compiler/circle-verify/src/Driver.cpp index 1af31d9..7a44c65 100644 --- a/compiler/circle-verify/src/Driver.cpp +++ b/compiler/circle-verify/src/Driver.cpp @@ -35,7 +35,7 @@ int entry(int argc, char **argv) { std::cout << err.what() << std::endl; std::cout << arser; - return 0; + return 255; } auto verifier = std::make_unique(); diff --git a/compiler/circle2circle-dredd-recipe-test/CMakeLists.txt b/compiler/circle2circle-dredd-recipe-test/CMakeLists.txt index 6663cb9..4bcaae3 100644 --- a/compiler/circle2circle-dredd-recipe-test/CMakeLists.txt +++ b/compiler/circle2circle-dredd-recipe-test/CMakeLists.txt @@ -1,25 +1,12 @@ nnas_include(TargetRequire) unset(REQUIRED_TARGETS) -list(APPEND REQUIRED_TARGETS circlechef) list(APPEND REQUIRED_TARGETS circle-inspect) list(APPEND REQUIRED_TARGETS circle-verify) list(APPEND REQUIRED_TARGETS circle2circle) list(APPEND REQUIRED_TARGETS dredd_rule_lib) -list(APPEND REQUIRED_TARGETS tflchef) -list(APPEND REQUIRED_TARGETS tflite2circle) TargetRequire_Return(${REQUIRED_TARGETS}) -nncc_find_resource(TensorFlowLiteRecipes) -nncc_find_resource(CircleRecipes) - -set(TFLITE_RECIPE_REPO "${TensorFlowLiteRecipes_DIR}") -set(CIRCLE_RECIPE_REPO "${CircleRecipes_DIR}") -unset(RECIPE_REPO) - -set(TEST_RECIPE_FILENAME "test.recipe") -set(TEST_RULE_FILENAME "test.rule") - unset(TEST_DEPS) unset(TEST_NAMES) @@ -27,21 +14,9 @@ set(options "") set(oneValueArgs "") set(multiValueArgs PASS) -macro(Add RECIPE) - if(NOT EXISTS "${TFLITE_RECIPE_REPO}/${RECIPE}/test.recipe") - if(NOT EXISTS "${CIRCLE_RECIPE_REPO}/${RECIPE}/test.recipe") - message(FATAL_ERROR "Missing recipe of '${RECIPE}' test") - else() - set(RECIPE_REPO ${CIRCLE_RECIPE_REPO}) - endif() - else() - set(RECIPE_REPO ${TFLITE_RECIPE_REPO}) - endif() - - if(NOT EXISTS "${RECIPE_REPO}/${RECIPE}/test.rule") - message(FATAL_ERROR "Missing rule of '${RECIPE}' test") - endif() +get_target_property(ARTIFACTS_BIN_PATH testDataGenerator BINARY_DIR) +macro(Add RECIPE) cmake_parse_arguments(ARG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) unset(OPT_OPTIONS) foreach(src ${ARG_PASS}) @@ -49,71 +24,20 @@ macro(Add RECIPE) list(APPEND OPT_OPTIONS "--${src}") endforeach(src ${ARG_PASS}) - set(RECIPE_FILE "${RECIPE}.recipe") - set(RECIPE_SOURCE_PATH "${RECIPE_REPO}/${RECIPE}/${TEST_RECIPE_FILENAME}") - set(RECIPE_BINARY_PATH "${CMAKE_CURRENT_BINARY_DIR}/${RECIPE_FILE}") - - set(RULE_FILE "${RECIPE}.rule") - set(RULE_SOURCE_PATH "${RECIPE_REPO}/${RECIPE}/${TEST_RULE_FILENAME}") - set(RULE_BINARY_PATH "${CMAKE_CURRENT_BINARY_DIR}/${RULE_FILE}") - - set(TFLITE_FILE "${RECIPE}.tflite") - set(TFLITE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${TFLITE_FILE}") - set(CIRCLE_FILE "${RECIPE}.circle") - set(CIRCLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${CIRCLE_FILE}") + set(CIRCLE_PATH "${ARTIFACTS_BIN_PATH}/${CIRCLE_FILE}") set(OPT_CIRCLE_FILE "${RECIPE}.opt.circle") set(OPT_CIRCLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}/${OPT_CIRCLE_FILE}") - # Copy .recipe - add_custom_command(OUTPUT ${RECIPE_BINARY_PATH} - COMMAND ${CMAKE_COMMAND} -E copy "${RECIPE_SOURCE_PATH}" "${RECIPE_BINARY_PATH}" - DEPENDS ${RECIPE_SOURCE_PATH} - COMMENT "Generate ${RECIPE_FILE}" - ) - - # Copy .rule - add_custom_command(OUTPUT ${RULE_BINARY_PATH} - COMMAND ${CMAKE_COMMAND} -E copy "${RULE_SOURCE_PATH}" "${RULE_BINARY_PATH}" - DEPENDS ${RULE_SOURCE_PATH} - COMMENT "Generate ${RULE_FILE}" - ) - - if(${RECIPE_REPO} STREQUAL ${TFLITE_RECIPE_REPO}) - # Generate .tflite - add_custom_command(OUTPUT ${TFLITE_OUTPUT_PATH} - COMMAND $ ${RECIPE_BINARY_PATH} ${TFLITE_OUTPUT_PATH} - DEPENDS $ ${RECIPE_BINARY_PATH} - COMMENT "Generate ${TFLITE_FILE}" - ) - - # Generate .circle - add_custom_command(OUTPUT ${CIRCLE_OUTPUT_PATH} - COMMAND $ ${TFLITE_OUTPUT_PATH} ${CIRCLE_OUTPUT_PATH} - DEPENDS $ ${TFLITE_OUTPUT_PATH} - COMMENT "Generate ${CIRCLE_FILE}" - ) - - list(APPEND TEST_DEPS ${TFLITE_OUTPUT_PATH}) - else() - # Generate .circle - add_custom_command(OUTPUT ${CIRCLE_OUTPUT_PATH} - COMMAND $ ${RECIPE_BINARY_PATH} ${CIRCLE_OUTPUT_PATH} - DEPENDS $ ${RECIPE_BINARY_PATH} - COMMENT "Generate ${CIRCLE_FILE}" - ) - endif() - # Generate optimized .circle add_custom_command(OUTPUT ${OPT_CIRCLE_OUTPUT_PATH} - COMMAND $ ${OPT_OPTIONS} ${CIRCLE_OUTPUT_PATH} ${OPT_CIRCLE_OUTPUT_PATH} - DEPENDS $ ${CIRCLE_OUTPUT_PATH} + COMMAND $ ${OPT_OPTIONS} ${CIRCLE_PATH} ${OPT_CIRCLE_OUTPUT_PATH} + DEPENDS $ ${CIRCLE_PATH} COMMENT "Generate ${OPT_CIRCLE_FILE}" ) - list(APPEND TEST_DEPS ${RECIPE_BINARY_PATH} ${RULE_BINARY_PATH} - ${CIRCLE_OUTPUT_PATH} ${OPT_CIRCLE_OUTPUT_PATH}) + list(APPEND TEST_DEPS ${OPT_CIRCLE_OUTPUT_PATH}) list(APPEND TEST_NAMES ${RECIPE}) endmacro(Add) @@ -174,12 +98,15 @@ list(APPEND TEST_DEPS "${RULE_LIB_BINARY_PATH}") # Generate dependencies add_custom_target(circle2circle_dredd_recipe_test ALL DEPENDS ${TEST_DEPS}) +add_dependencies(circle2circle_dredd_recipe_test common_artifacts_deps) + +get_target_property(ARTIFACTS_BIN_PATH testDataGenerator BINARY_DIR) # Run tests add_test( NAME circle2circle_dredd_recipe_test COMMAND "${TEST_RUNNER}" "${TEST_CONFIG}" - "${CMAKE_CURRENT_BINARY_DIR}" + "${ARTIFACTS_BIN_PATH}" ${TEST_NAMES} ) diff --git a/compiler/circle2circle-dredd-recipe-test/requires.cmake b/compiler/circle2circle-dredd-recipe-test/requires.cmake index e4a5b71..70e7c52 100644 --- a/compiler/circle2circle-dredd-recipe-test/requires.cmake +++ b/compiler/circle2circle-dredd-recipe-test/requires.cmake @@ -1,7 +1,5 @@ -require("circlechef") require("circle2circle") require("circle-inspect") require("circle-verify") +require("common-artifacts") require("dredd-rule-lib") -require("tflchef") -require("tflite2circle") diff --git a/compiler/circle2circle-dredd-recipe-test/test.lst b/compiler/circle2circle-dredd-recipe-test/test.lst index 202f669..6328a64 100644 --- a/compiler/circle2circle-dredd-recipe-test/test.lst +++ b/compiler/circle2circle-dredd-recipe-test/test.lst @@ -11,9 +11,10 @@ ## TFLITE RECIPE Add(Net_InstanceNorm_001 PASS fuse_instnorm) -# Add(Net_InstanceNorm_002 PASS fuse_instnorm) +Add(Net_InstanceNorm_002 PASS fuse_instnorm) Add(BatchMatMulV2_000 PASS resolve_customop_batchmatmul) Add(MatMul_000 PASS resolve_customop_matmul) +Add(DepthwiseConv2D_003 PASS) ## CIRCLE RECIPE diff --git a/compiler/circle2circle-dredd-recipe-test/testall.sh b/compiler/circle2circle-dredd-recipe-test/testall.sh index 33a2036..2899587 100755 --- a/compiler/circle2circle-dredd-recipe-test/testall.sh +++ b/compiler/circle2circle-dredd-recipe-test/testall.sh @@ -13,21 +13,22 @@ if [[ $# -lt 2 ]]; then exit 255 fi +WORKDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" CONFIG_PATH="$1"; shift -WORKDIR="$1"; shift +RESOURCE_DIR="$1"; shift source "${CONFIG_PATH}" echo "-- Found circle-inspect: ${CIRCLE_INSPECT_PATH}" echo "-- Found circle-verify: ${CIRCLE_VERIFY_PATH}" echo "-- Found circle2circle: ${CIRCLE2CIRCLE_PATH}" -echo "-- Found workdir: ${WORKDIR}" +echo "-- Found common-artifacts: ${RESOURCE_DIR}" TESTED=() PASSED=() FAILED=() -pushd "${WORKDIR}" +pushd ${WORKDIR} while [[ $# -ne 0 ]]; do PREFIX="$1"; shift @@ -40,7 +41,7 @@ while [[ $# -ne 0 ]]; do cat > "${PREFIX}.log" <( exec 2>&1 - echo "-- Found tflite: ${PREFIX}.tflite" + echo "-- Found circle: ${PREFIX}.opt.circle" # Exit immediately if any command fails set -e @@ -55,7 +56,7 @@ while [[ $# -ne 0 ]]; do set +x # (COMPILED_FILE, INSPECT_PROG_PATH, VERIFY_PROG_PATH, ERROR_LOG) must be set for rule-lib.sh - COMPILED_FILE="${WORKDIR}/${PREFIX}.opt.circle" + COMPILED_FILE="${PREFIX}.opt.circle" INSPECT_PROG_PATH=${CIRCLE_INSPECT_PATH} VERIFY_PROG_PATH=${CIRCLE_VERIFY_PATH} ERROR_LOG="${PREFIX}.error" @@ -66,7 +67,7 @@ while [[ $# -ne 0 ]]; do trap 'echo "** ERROR **" ; cat "${ERROR_LOG}"' ERR source rule-lib.sh - source "${PREFIX}.rule" + source "${RESOURCE_DIR}/${PREFIX}.rule" # unset trap - ERR diff --git a/compiler/circle2circle/CMakeLists.txt b/compiler/circle2circle/CMakeLists.txt index 7b2bf9b..f60c896 100644 --- a/compiler/circle2circle/CMakeLists.txt +++ b/compiler/circle2circle/CMakeLists.txt @@ -19,6 +19,7 @@ target_link_libraries(circle2circle luci_service) target_link_libraries(circle2circle luci_pass) target_link_libraries(circle2circle luci_export) target_link_libraries(circle2circle arser) +target_link_libraries(circle2circle vconone) install(TARGETS circle2circle DESTINATION bin) @@ -44,3 +45,4 @@ target_link_libraries(circle2circle_test luci_service) target_link_libraries(circle2circle_test luci_pass) target_link_libraries(circle2circle_test luci_export) target_link_libraries(circle2circle_test arser) +target_link_libraries(circle2circle_test vconone) diff --git a/compiler/circle2circle/requires.cmake b/compiler/circle2circle/requires.cmake index 8cbb90d..36a9efd 100644 --- a/compiler/circle2circle/requires.cmake +++ b/compiler/circle2circle/requires.cmake @@ -9,3 +9,4 @@ require("hermes") require("hermes-std") require("luci") require("arser") +require("vconone") diff --git a/compiler/circle2circle/src/Circle2Circle.cpp b/compiler/circle2circle/src/Circle2Circle.cpp index 6888d26..849597b 100644 --- a/compiler/circle2circle/src/Circle2Circle.cpp +++ b/compiler/circle2circle/src/Circle2Circle.cpp @@ -26,6 +26,7 @@ #include #include +#include #include #include @@ -34,6 +35,12 @@ using Algorithms = luci::CircleOptimizer::Options::Algorithm; using AlgorithmParameters = luci::CircleOptimizer::Options::AlgorithmParameters; +void print_version(void) +{ + std::cout << "circle2circle version " << vconone::get_string() << std::endl; + std::cout << vconone::get_copyright() << std::endl; +} + int entry(int argc, char **argv) { // Simple argument parser (based on map) @@ -44,6 +51,13 @@ int entry(int argc, char **argv) arser::Arser arser("circle2circle provides circle model optimization and transformations"); + arser.add_argument("--version") + .nargs(0) + .required(false) + .default_value(false) + .help("Show version information and exit") + .exit_with(print_version); + arser.add_argument("--all").nargs(0).required(false).default_value(false).help( "Enable all optimize options"); diff --git a/compiler/circlechef/CMakeLists.txt b/compiler/circlechef/CMakeLists.txt index cba7d0a..3e2ddcb 100644 --- a/compiler/circlechef/CMakeLists.txt +++ b/compiler/circlechef/CMakeLists.txt @@ -18,4 +18,6 @@ add_subdirectory(core) add_subdirectory(circle) # Tools add_subdirectory(tools) -add_subdirectory(tests) +if(ENABLE_TEST) + add_subdirectory(tests) +endif(ENABLE_TEST) diff --git a/compiler/circlechef/circle/src/RecipeChef.cpp b/compiler/circlechef/circle/src/RecipeChef.cpp index 17ef1be..51326c7 100644 --- a/compiler/circlechef/circle/src/RecipeChef.cpp +++ b/compiler/circlechef/circle/src/RecipeChef.cpp @@ -181,6 +181,8 @@ std::unique_ptr generate_recipe(const circle::Model *model) for (uint32_t idx = 0; idx < quant->zero_point()->size(); ++idx) chef_quant->add_zero_point(quant->zero_point()->Get(idx)); } + circlechef::TensorQuantization *chef_quant = operand->mutable_quant(); + chef_quant->set_quantized_dimension(quant->quantized_dimension()); } } diff --git a/compiler/circlechef/core/src/ModelChef.cpp b/compiler/circlechef/core/src/ModelChef.cpp index 76aeacd..d81467d 100644 --- a/compiler/circlechef/core/src/ModelChef.cpp +++ b/compiler/circlechef/core/src/ModelChef.cpp @@ -413,6 +413,7 @@ template void cook_graph(const T &graph, CookParams &cp) quant_builder.add_min(quant_min); quant_builder.add_scale(quant_scale); quant_builder.add_zero_point(quant_zero_point); + quant_builder.add_quantized_dimension(quant.quantized_dimension()); // Update QuantizationParameters Index quant_index = quant_builder.Finish(); diff --git a/compiler/circlechef/proto/circlechef.proto b/compiler/circlechef/proto/circlechef.proto index b8c009b..3e5e6b1 100644 --- a/compiler/circlechef/proto/circlechef.proto +++ b/compiler/circlechef/proto/circlechef.proto @@ -35,6 +35,7 @@ message TensorQuantization { repeated float max = 2; repeated float scale = 3; repeated int64 zero_point = 4; + optional int32 quantized_dimension = 5 [default = 0]; } message Operand { diff --git a/compiler/circlechef/tools/file/Driver.cpp b/compiler/circlechef/tools/file/Driver.cpp index a15da40..bcc0c7a 100644 --- a/compiler/circlechef/tools/file/Driver.cpp +++ b/compiler/circlechef/tools/file/Driver.cpp @@ -41,7 +41,7 @@ int entry(int argc, char **argv) { std::cout << err.what() << std::endl; std::cout << arser; - return 0; + return 255; } int32_t model_version = 1; diff --git a/compiler/circlechef/tools/reverse/Driver.cpp b/compiler/circlechef/tools/reverse/Driver.cpp index 9c0b9ea..8a2b85f 100644 --- a/compiler/circlechef/tools/reverse/Driver.cpp +++ b/compiler/circlechef/tools/reverse/Driver.cpp @@ -38,7 +38,7 @@ int entry(int argc, char **argv) { std::cout << err.what() << std::endl; std::cout << arser; - return 0; + return 255; } std::string circle_path = arser.get("circle"); diff --git a/compiler/circledump/driver/Driver.cpp b/compiler/circledump/driver/Driver.cpp index b8f561f..657f24f 100644 --- a/compiler/circledump/driver/Driver.cpp +++ b/compiler/circledump/driver/Driver.cpp @@ -33,7 +33,7 @@ int entry(int argc, char **argv) { std::cout << err.what() << '\n'; std::cout << arser; - return 0; + return 255; } std::string circle_path = arser.get("circle"); diff --git a/compiler/circledump/src/OpPrinter.cpp b/compiler/circledump/src/OpPrinter.cpp index 2c03203..3294bb2 100644 --- a/compiler/circledump/src/OpPrinter.cpp +++ b/compiler/circledump/src/OpPrinter.cpp @@ -593,6 +593,20 @@ public: } }; +class UniquePrinter : public OpPrinter +{ +public: + void options(const circle::Operator *op, std::ostream &os) const override + { + if (auto *params = op->builtin_options_as_UniqueOptions()) + { + os << " "; + os << "idx_out_type(" << EnumNameTensorType(params->idx_out_type()) << ") "; + os << std::endl; + } + } +}; + class WhilePrinter : public OpPrinter { public: @@ -710,9 +724,11 @@ OpPrinterRegistry::OpPrinterRegistry() _op_map[circle::BuiltinOperator_MAX_POOL_2D] = make_unique(); _op_map[circle::BuiltinOperator_MIRROR_PAD] = make_unique(); _op_map[circle::BuiltinOperator_MUL] = make_unique(); + // There is no Option for NON_MAX_SUPPRESSION_V4 _op_map[circle::BuiltinOperator_ONE_HOT] = make_unique(); _op_map[circle::BuiltinOperator_PACK] = make_unique(); // There is no Option for PAD + // There is no Option for PADV2 // There is no Option for PRELU // There is no Option for RELU // There is no Option for RELU6 @@ -744,6 +760,7 @@ OpPrinterRegistry::OpPrinterRegistry() _op_map[circle::BuiltinOperator_SUM] = make_unique(); _op_map[circle::BuiltinOperator_TRANSPOSE_CONV] = make_unique(); // There is no Option for TOPK_V2 + _op_map[circle::BuiltinOperator_UNIQUE] = make_unique(); _op_map[circle::BuiltinOperator_WHILE] = make_unique(); _op_map[circle::BuiltinOperator_CUSTOM] = make_unique(); diff --git a/compiler/common-artifacts/CMakeLists.txt b/compiler/common-artifacts/CMakeLists.txt index ee4191d..ef50e8d 100644 --- a/compiler/common-artifacts/CMakeLists.txt +++ b/compiler/common-artifacts/CMakeLists.txt @@ -13,30 +13,47 @@ if(${PYTHON_VERSION_MINOR} LESS 3) endif() # Create python virtual environment with tensorflow 1.13.2 -set(VIRTUALENV_OVERLAY "${NNCC_OVERLAY_DIR}/venv_1_13_2") +set(VIRTUALENV_OVERLAY_TF_1_13_2 "${NNCC_OVERLAY_DIR}/venv_1_13_2") + +# Create python virtual environment with tensorflow 2.3.0 +set(VIRTUALENV_OVERLAY_TF_2_3_0 "${NNCC_OVERLAY_DIR}/venv_2_3_0") + +add_custom_command( + OUTPUT ${VIRTUALENV_OVERLAY_TF_1_13_2} + COMMAND ${PYTHON_EXECUTABLE} -m venv ${VIRTUALENV_OVERLAY_TF_1_13_2} +) add_custom_command( - OUTPUT ${VIRTUALENV_OVERLAY} - COMMAND ${PYTHON_EXECUTABLE} -m venv ${VIRTUALENV_OVERLAY} + OUTPUT ${VIRTUALENV_OVERLAY_TF_2_3_0} + COMMAND ${PYTHON_EXECUTABLE} -m venv ${VIRTUALENV_OVERLAY_TF_2_3_0} ) # Create requirements.txt and install required pip packages set(REQUIREMENTS_FILE "requirements.txt") -set(REQUIREMENTS_OVERLAY_PATH "${NNCC_OVERLAY_DIR}/${REQUIREMENTS_FILE}") +set(REQUIREMENTS_OVERLAY_PATH_TF_1_13_2 "${VIRTUALENV_OVERLAY_TF_1_13_2}/${REQUIREMENTS_FILE}") +set(REQUIREMENTS_OVERLAY_PATH_TF_2_3_0 "${VIRTUALENV_OVERLAY_TF_2_3_0}/${REQUIREMENTS_FILE}") add_custom_command( - OUTPUT ${REQUIREMENTS_OVERLAY_PATH} - COMMAND ${CMAKE_COMMAND} -E echo "tensorflow==1.13.2" > ${REQUIREMENTS_OVERLAY_PATH} - COMMAND ${VIRTUALENV_OVERLAY}/bin/python -m pip --default-timeout=1000 install --upgrade pip setuptools - COMMAND ${VIRTUALENV_OVERLAY}/bin/python -m pip --default-timeout=1000 install -r ${REQUIREMENTS_OVERLAY_PATH} --upgrade - DEPENDS ${VIRTUALENV_OVERLAY} ${REQUIREMENTS_OVERLAY_PATH} + OUTPUT ${REQUIREMENTS_OVERLAY_PATH_TF_1_13_2} + COMMAND ${CMAKE_COMMAND} -E echo "tensorflow==1.13.2" > ${REQUIREMENTS_OVERLAY_PATH_TF_1_13_2} + COMMAND ${VIRTUALENV_OVERLAY_TF_1_13_2}/bin/python -m pip --default-timeout=1000 install --upgrade pip setuptools + COMMAND ${VIRTUALENV_OVERLAY_TF_1_13_2}/bin/python -m pip --default-timeout=1000 install -r ${REQUIREMENTS_OVERLAY_PATH_TF_1_13_2} --upgrade + DEPENDS ${VIRTUALENV_OVERLAY_TF_1_13_2} ) -add_custom_target(common_artifacts_python_deps ALL - DEPENDS ${VIRTUALENV_OVERLAY} ${REQUIREMENTS_OVERLAY_PATH} +add_custom_command( + OUTPUT ${REQUIREMENTS_OVERLAY_PATH_TF_2_3_0} + COMMAND ${CMAKE_COMMAND} -E remove -f ${REQUIREMENTS_OVERLAY_PATH_TF_2_3_0} + COMMAND ${CMAKE_COMMAND} -E echo "tensorflow-cpu==2.3.0" >> ${REQUIREMENTS_OVERLAY_PATH_TF_2_3_0} + COMMAND ${CMAKE_COMMAND} -E echo "flatbuffers==1.12" >> ${REQUIREMENTS_OVERLAY_PATH_TF_2_3_0} + COMMAND ${VIRTUALENV_OVERLAY_TF_2_3_0}/bin/python -m pip --default-timeout=1000 install --upgrade pip setuptools + COMMAND ${VIRTUALENV_OVERLAY_TF_2_3_0}/bin/python -m pip --default-timeout=1000 install -r ${REQUIREMENTS_OVERLAY_PATH_TF_2_3_0} --upgrade + DEPENDS ${VIRTUALENV_OVERLAY_TF_2_3_0} ) -# TODO Create python virtual environment with tensorflow 2.3.0-rc0 +add_custom_target(common_artifacts_python_deps ALL + DEPENDS ${VIRTUALENV_OVERLAY_TF_1_13_2} ${VIRTUALENV_OVERLAY_TF_2_3_0} ${REQUIREMENTS_OVERLAY_PATH_TF_1_13_2} ${REQUIREMENTS_OVERLAY_PATH_TF_2_3_0} +) #[[ Generate common resources ]] # TODO add pbtxt @@ -52,6 +69,7 @@ set(SOURCES src/TestDataGenerator.cpp) add_executable(testDataGenerator ${SOURCES}) target_include_directories(testDataGenerator PRIVATE ${HDF5_INCLUDE_DIRS}) target_link_libraries(testDataGenerator PRIVATE ${HDF5_CXX_LIBRARIES}) +target_link_libraries(testDataGenerator PRIVATE arser) target_link_libraries(testDataGenerator PRIVATE foder) target_link_libraries(testDataGenerator PRIVATE luci_import) target_link_libraries(testDataGenerator PRIVATE luci_interpreter) diff --git a/compiler/common-artifacts/exclude.lst b/compiler/common-artifacts/exclude.lst index b614b71..fe9933a 100644 --- a/compiler/common-artifacts/exclude.lst +++ b/compiler/common-artifacts/exclude.lst @@ -5,9 +5,12 @@ #[[ optimize : Exclude from circle optimization(circle2circle) ]] ## TensorFlowLiteRecipes -optimize(ReLU6_000) -optimize(Where_000) -optimize(Where_001) +optimize(Unique_000) +optimize(Unique_001) +optimize(Unique_002) +optimize(Unique_003) +optimize(Unique_U8_000) +optimize(Unique_U8_001) ## CircleRecipes @@ -46,6 +49,7 @@ tcgenerate(DepthToSpace_000) tcgenerate(DepthwiseConv2D_001) # runtime doesn't support dilation tcgenerate(DepthwiseConv2D_003) # runtime doesn't support dilation tcgenerate(DepthwiseConv2D_U8_000) +tcgenerate(DepthwiseConv2D_U8_001) # luci-interpreter doesn't support channel-wise quantization yet tcgenerate(Div_000) tcgenerate(ELU_000) tcgenerate(Equal_000) @@ -90,13 +94,15 @@ tcgenerate(Maximum_000) tcgenerate(MaxPool2D_U8_000) tcgenerate(Mean_U8_000) tcgenerate(Minimum_000) +tcgenerate(NonMaxSuppressionV4_000) +tcgenerate(NonMaxSuppressionV4_001) tcgenerate(MirrorPad_000) tcgenerate(Mul_U8_000) tcgenerate(Neg_000) tcgenerate(Net_Dangle_001) tcgenerate(Net_InstanceNorm_001) tcgenerate(Net_InstanceNorm_002) -tcgenerate(Net_ZeroDim_001) # fix luci +tcgenerate(Net_ZeroDim_001) # luci-interpreter doesn't support zero dim tcgenerate(NotEqual_000) tcgenerate(OneHot_000) tcgenerate(OneHot_001) @@ -105,6 +111,7 @@ tcgenerate(OneHot_003) tcgenerate(Pack_000) tcgenerate(Pack_U8_000) tcgenerate(Pad_U8_000) +tcgenerate(PadV2_000) tcgenerate(Pow_000) tcgenerate(PRelu_000) tcgenerate(Range_000) @@ -120,11 +127,12 @@ tcgenerate(ReduceProd_001) tcgenerate(ReduceProd_002) tcgenerate(ReduceProd_003) tcgenerate(ReLU_000) -tcgenerate(ReLU6_000) # luci NYI +tcgenerate(ReLU6_000) tcgenerate(ReLUN1To1_000) -tcgenerate(Reshape_003) # fix luci +tcgenerate(Reshape_003) # luci-interpreter doesn't support reshape without built-in option tcgenerate(Reshape_U8_000) tcgenerate(ResizeBilinear_000) +tcgenerate(ResizeBilinear_U8_000) # luci-interpreter tcgenerate(ResizeNearestNeighbor_000) tcgenerate(ReverseSequence_000) tcgenerate(ReverseV2_000) @@ -148,7 +156,7 @@ tcgenerate(SpaceToBatchND_002) tcgenerate(SpaceToBatchND_003) tcgenerate(SpaceToDepth_000) tcgenerate(SparseToDense_000) -tcgenerate(SplitV_000) # fix luci +tcgenerate(SplitV_000) tcgenerate(Sqrt_000) tcgenerate(Square_000) tcgenerate(SquaredDifference_000) @@ -164,22 +172,21 @@ tcgenerate(Sum_001) tcgenerate(Tanh_000) tcgenerate(Tile_000) tcgenerate(Tile_U8_000) -tcgenerate(TopKV2_000) # fix luci -tcgenerate(TopKV2_001) # fix luci -tcgenerate(TransposeConv_000) # fix interpreter +tcgenerate(TopKV2_000) +tcgenerate(TopKV2_001) tcgenerate(Unique_000) tcgenerate(Unique_001) tcgenerate(Unique_002) tcgenerate(Unique_003) tcgenerate(Unique_U8_000) tcgenerate(Unique_U8_001) -tcgenerate(Where_000) # luci NYI -tcgenerate(Where_001) # luci NYI -tcgenerate(While_000) # fix luci +tcgenerate(Where_000) +tcgenerate(Where_001) +tcgenerate(While_000) tcgenerate(While_001) tcgenerate(While_002) tcgenerate(While_003) -tcgenerate(YUV_TO_RGB_000) # fix luci +tcgenerate(YUV_TO_RGB_000) tcgenerate(YUV_TO_RGB_U8_000) tcgenerate(ZerosLike_000) diff --git a/compiler/common-artifacts/requires.cmake b/compiler/common-artifacts/requires.cmake index 8c27565..d7bed21 100644 --- a/compiler/common-artifacts/requires.cmake +++ b/compiler/common-artifacts/requires.cmake @@ -1,3 +1,4 @@ +require("arser") require("circle2circle") require("circlechef") require("foder") diff --git a/compiler/common-artifacts/src/TestDataGenerator.cpp b/compiler/common-artifacts/src/TestDataGenerator.cpp index 739300d..7a07dd8 100644 --- a/compiler/common-artifacts/src/TestDataGenerator.cpp +++ b/compiler/common-artifacts/src/TestDataGenerator.cpp @@ -14,6 +14,7 @@ * limitations under the License. */ +#include #include #include #include @@ -62,10 +63,9 @@ template void geneate_random_data(std::mt19937 &gen, void *data, ui } } -void fill_random_data(void *data, uint32_t size, loco::DataType dtype) +void fill_random_data(void *data, uint32_t size, loco::DataType dtype, uint32_t seed) { - std::random_device rd; // used to obtain a seed for the random number engine - std::mt19937 gen(rd()); // standard mersenne_twister_engine seeded with rd() + std::mt19937 gen(seed); // standard mersenne_twister_engine seeded with rd() switch (dtype) { @@ -90,7 +90,25 @@ void fill_random_data(void *data, uint32_t size, loco::DataType dtype) int entry(int argc, char **argv) { - std::string circle_file{argv[1]}; + arser::Arser arser; + arser.add_argument("circle").type(arser::DataType::STR).help("Circle file you want to test"); + arser.add_argument("--fixed_seed") + .required(false) + .nargs(0) + .help("Put a fixed seed into the random number generator"); + + try + { + arser.parse(argc, argv); + } + catch (const std::runtime_error &err) + { + std::cout << err.what() << std::endl; + std::cout << arser; + return 255; + } + + std::string circle_file = arser.get("circle"); size_t last_dot_index = circle_file.find_last_of("."); std::string prefix = circle_file.substr(0, last_dot_index); @@ -136,6 +154,7 @@ int entry(int argc, char **argv) std::unique_ptr output_value_group = std::make_unique(output_file.createGroup("value")); + std::random_device rd; // used to obtain a seed for the random number engine uint32_t input_index = 0; for (uint32_t g = 0; g < circle_model->subgraphs()->size(); g++) { @@ -174,7 +193,10 @@ int entry(int argc, char **argv) std::vector data(byte_size); // generate random data - fill_random_data(data.data(), data_size, input_node->dtype()); + if (arser["--fixed_seed"]) + fill_random_data(data.data(), data_size, input_node->dtype(), 0); + else + fill_random_data(data.data(), data_size, input_node->dtype(), rd()); dataset->write(data.data(), dtype); diff --git a/compiler/hermes/src/hermes.test.cpp b/compiler/hermes/src/hermes.test.cpp index 2cbc093..ea7ef65 100644 --- a/compiler/hermes/src/hermes.test.cpp +++ b/compiler/hermes/src/hermes.test.cpp @@ -18,7 +18,28 @@ #include -TEST(HermesTest, simple_usecase) +namespace { - // TO BE FILLED + +class Logger final : public hermes::Source +{ +public: + Logger(hermes::Context *ctx); + ~Logger(); +}; + +Logger::Logger(hermes::Context *ctx) { activate(ctx->sources(), ctx->bus()); } +Logger::~Logger() { deactivate(); } + +} // namespace + +TEST(HermesTest, logger_constructor_NEG) +{ + hermes::Context context; + // we expect segmentfault from nullptr->sources() + ASSERT_DEATH(Logger logger(&context), ""); + + SUCCEED(); } + +// TODO add HermesTest simple_usecase diff --git a/compiler/locomotiv/src/Node/BiasEncode.test.cpp b/compiler/locomotiv/src/Node/BiasEncode.test.cpp index cdb255c..4680f5c 100644 --- a/compiler/locomotiv/src/Node/BiasEncode.test.cpp +++ b/compiler/locomotiv/src/Node/BiasEncode.test.cpp @@ -90,6 +90,16 @@ template void test() } } // namespace -TEST(NodeExecution_BiasEncode, s32) { test(); } +TEST(NodeExecution_BiasEncode, s32) +{ + test(); + + SUCCEED(); +} -TEST(NodeExecution_BiasEncode, f32) { test(); } +TEST(NodeExecution_BiasEncode, f32) +{ + test(); + + SUCCEED(); +} diff --git a/compiler/locomotiv/src/Node/MatMul.test.cpp b/compiler/locomotiv/src/Node/MatMul.test.cpp index f1f3a52..7d942e1 100644 --- a/compiler/locomotiv/src/Node/MatMul.test.cpp +++ b/compiler/locomotiv/src/Node/MatMul.test.cpp @@ -142,6 +142,8 @@ TEST(NodeExecution_MatMul, f32_2x3_3x3) }; run_test(lhs, rhs, out, Shape{2, 3}, Shape{3, 3}, Shape{2, 3}, loco::DataType::FLOAT32); + + SUCCEED(); } /* from the code below: @@ -183,6 +185,8 @@ TEST(NodeExecution_MatMul, s32_4x2_2x6) }; run_test(lhs, rhs, out, Shape{4, 2}, Shape{2, 6}, Shape{4, 6}, loco::DataType::S32); + + SUCCEED(); } // clang-format on diff --git a/compiler/locop/src/FormattedGraph.test.cpp b/compiler/locop/src/FormattedGraph.test.cpp index c9808d3..aff9ebe 100644 --- a/compiler/locop/src/FormattedGraph.test.cpp +++ b/compiler/locop/src/FormattedGraph.test.cpp @@ -28,6 +28,8 @@ TEST(LinearV1FormatterTest, simple) // TODO Validate the output (when the implementation becomes stable) std::cout << locop::fmt(g) << std::endl; + + SUCCEED(); } TEST(LinearV1FormatterTest, user_defined_node_summary_builder) diff --git a/compiler/locop/src/FormattedTensorShape.test.cpp b/compiler/locop/src/FormattedTensorShape.test.cpp index 0f0017a..fc85df3 100644 --- a/compiler/locop/src/FormattedTensorShape.test.cpp +++ b/compiler/locop/src/FormattedTensorShape.test.cpp @@ -30,4 +30,6 @@ TEST(FormattedTensorShapeTest, BracketFormat) tensor_shape->dim(0) = 4; std::cout << fmt(tensor_shape.get()) << std::endl; + + SUCCEED(); } diff --git a/compiler/luci-interpreter/include/luci_interpreter/core/Tensor.h b/compiler/luci-interpreter/include/luci_interpreter/core/Tensor.h index 9987898..4ac3d86 100644 --- a/compiler/luci-interpreter/include/luci_interpreter/core/Tensor.h +++ b/compiler/luci-interpreter/include/luci_interpreter/core/Tensor.h @@ -79,12 +79,11 @@ private: // // Note that due to historical and performance reasons, per-tensor quantization uses unsigned // integer types, while per-channel uses signed types assuming 'zero_point' == 0. -// -// TODO Add 'quantized_dimension' field for per-channel case when IR provides it. struct AffineQuantization { std::vector scale; std::vector zero_point; + int32_t quantized_dimension; }; class Tensor @@ -108,6 +107,12 @@ public: return _quantization.zero_point[0]; } + const std::vector &scales() const { return _quantization.scale; } + + const std::vector &zero_points() const { return _quantization.zero_point; } + + int32_t quantized_dimension() const { return _quantization.quantized_dimension; } + template const T *data() const { return reinterpret_cast(_data.get()); } template T *data() { return reinterpret_cast(_data.get()); } diff --git a/compiler/luci-interpreter/src/core/KernelParams.h b/compiler/luci-interpreter/src/core/KernelParams.h index a32e0d4..65d1197 100644 --- a/compiler/luci-interpreter/src/core/KernelParams.h +++ b/compiler/luci-interpreter/src/core/KernelParams.h @@ -56,6 +56,11 @@ struct Conv2DParams Activation activation; }; +struct DepthToSpaceParams +{ + int block_size; +}; + struct DepthwiseConv2DParams { Padding padding; diff --git a/compiler/luci-interpreter/src/kernels/Add.cpp b/compiler/luci-interpreter/src/kernels/Add.cpp index 9b93347..9ed155e 100644 --- a/compiler/luci-interpreter/src/kernels/Add.cpp +++ b/compiler/luci-interpreter/src/kernels/Add.cpp @@ -36,7 +36,10 @@ Add::Add(const Tensor *input1, const Tensor *input2, Tensor *output, const AddPa void Add::configure() { - assert(input1()->element_type() == input2()->element_type()); + if (input1()->element_type() != input2()->element_type()) + { + throw std::runtime_error("Input Tensor Data Type Mismatch."); + } output()->resize(calculateShapeForBroadcast(input1()->shape(), input2()->shape())); } diff --git a/compiler/luci-interpreter/src/kernels/Add.test.cpp b/compiler/luci-interpreter/src/kernels/Add.test.cpp index 54e1cc6..705b648 100644 --- a/compiler/luci-interpreter/src/kernels/Add.test.cpp +++ b/compiler/luci-interpreter/src/kernels/Add.test.cpp @@ -169,6 +169,33 @@ TEST(AddTest, Float) } } +TEST(AddTest, Input_Output_Type_NEG) +{ + Tensor input1_tensor = makeInputTensor({1}, {1.f}); + Tensor input2_tensor = makeInputTensor({1}, {2}); + Tensor output_tensor = makeOutputTensor(DataType::FLOAT32); + + AddParams params{}; + params.activation = Activation::RELU; + + Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params); + EXPECT_ANY_THROW(kernel.configure()); +} + +TEST(AddTest, Invalid_Input_Type_NEG) +{ + Tensor input1_tensor = makeInputTensor({1}, {1}); + Tensor input2_tensor = makeInputTensor({1}, {2}); + Tensor output_tensor = makeOutputTensor(DataType::S64); + + AddParams params{}; + params.activation = Activation::RELU; + + Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params); + kernel.configure(); + EXPECT_ANY_THROW(kernel.execute()); +} + } // namespace } // namespace kernels } // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/kernels/CMakeLists.txt b/compiler/luci-interpreter/src/kernels/CMakeLists.txt index fe36231..a1fd1de 100644 --- a/compiler/luci-interpreter/src/kernels/CMakeLists.txt +++ b/compiler/luci-interpreter/src/kernels/CMakeLists.txt @@ -12,6 +12,8 @@ set(SOURCES Concatenation.cpp Conv2D.h Conv2D.cpp + DepthToSpace.h + DepthToSpace.cpp DepthwiseConv2D.h DepthwiseConv2D.cpp Elu.h @@ -40,6 +42,10 @@ set(SOURCES Pad.cpp Reshape.h Reshape.cpp + Reverse.h + Reverse.cpp + Slice.h + Slice.cpp Softmax.h Softmax.cpp SpaceToDepth.h @@ -77,6 +83,7 @@ set(TEST_SOURCES AveragePool2D.test.cpp Concatenation.test.cpp Conv2D.test.cpp + DepthToSpace.test.cpp DepthwiseConv2D.test.cpp Elu.test.cpp FullyConnected.test.cpp @@ -91,6 +98,8 @@ set(TEST_SOURCES Mul.test.cpp Pad.test.cpp Reshape.test.cpp + Reverse.test.cpp + Slice.test.cpp Softmax.test.cpp SpaceToDepth.test.cpp Split.test.cpp diff --git a/compiler/luci-interpreter/src/kernels/DepthToSpace.cpp b/compiler/luci-interpreter/src/kernels/DepthToSpace.cpp new file mode 100644 index 0000000..cab63e2 --- /dev/null +++ b/compiler/luci-interpreter/src/kernels/DepthToSpace.cpp @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "DepthToSpace.h" +#include "Utils.h" +#include + +namespace luci_interpreter +{ +namespace kernels +{ + +DepthToSpace::DepthToSpace(const Tensor *input, Tensor *output, const DepthToSpaceParams ¶ms) + : KernelWithParams({input}, {output}, params) +{ +} + +void DepthToSpace::configure() +{ + if (input()->shape().num_dims() != 4) + { + throw std::runtime_error("Invalid input num_dims."); + } + if (output()->element_type() != DataType::FLOAT32 && output()->element_type() != DataType::U8 && + output()->element_type() != DataType::S8 && output()->element_type() != DataType::S32 && + output()->element_type() != DataType::S64) + { + throw std::runtime_error("Invalid output type"); + } + if (input()->element_type() != output()->element_type()) + { + throw std::runtime_error("Type mismatch on input and output."); + } + const int block_size = params().block_size; + const int32_t input_height = input()->shape().dim(1); + const int32_t input_width = input()->shape().dim(2); + const int32_t input_channels = input()->shape().dim(3); + int32_t output_height = input_height * block_size; + int32_t output_width = input_width * block_size; + int32_t output_channels = input_channels / block_size / block_size; + + assert(input_height == output_height / block_size); + assert(input_width == output_width / block_size); + assert(input_channels == output_channels * block_size * block_size); + + Shape output_shape(4); + output_shape.dim(0) = input()->shape().dim(0); + output_shape.dim(1) = output_height; + output_shape.dim(2) = output_width; + output_shape.dim(3) = output_channels; + + output()->resize(output_shape); +} + +void DepthToSpace::execute() const +{ + tflite::DepthToSpaceParams op_params; + op_params.block_size = params().block_size; + switch (input()->element_type()) + { + case DataType::FLOAT32: + tflite::optimized_ops::DepthToSpace(op_params, getTensorShape(input()), + getTensorData(input()), getTensorShape(output()), + getTensorData(output())); + break; + case DataType::U8: + tflite::optimized_ops::DepthToSpace(op_params, getTensorShape(input()), + getTensorData(input()), getTensorShape(output()), + getTensorData(output())); + break; + default: + throw std::runtime_error("Unsupported Type."); + } +} + +} // namespace kernels +} // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/kernels/DepthToSpace.h b/compiler/luci-interpreter/src/kernels/DepthToSpace.h new file mode 100644 index 0000000..63ce376 --- /dev/null +++ b/compiler/luci-interpreter/src/kernels/DepthToSpace.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LUCI_INTERPRETER_KERNELS_DEPTHTOSPACE_H +#define LUCI_INTERPRETER_KERNELS_DEPTHTOSPACE_H + +#include "core/Kernel.h" +#include "core/KernelParams.h" + +#include + +namespace luci_interpreter +{ +namespace kernels +{ + +class DepthToSpace : public KernelWithParams +{ +public: + DepthToSpace(const Tensor *input, Tensor *output, const DepthToSpaceParams ¶ms); + + const Tensor *input() const { return _inputs[0]; } + Tensor *output() const { return _outputs[0]; } + + void configure() override; + void execute() const override; +}; + +} // namespace kernels +} // namespace luci_interpreter + +#endif // LUCI_INTERPRETER_KERNELS_DEPTHTOSPACE_H diff --git a/compiler/luci-interpreter/src/kernels/DepthToSpace.test.cpp b/compiler/luci-interpreter/src/kernels/DepthToSpace.test.cpp new file mode 100644 index 0000000..1b80570 --- /dev/null +++ b/compiler/luci-interpreter/src/kernels/DepthToSpace.test.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernels/DepthToSpace.h" +#include "kernels/TestUtils.h" + +namespace luci_interpreter +{ +namespace kernels +{ +namespace +{ + +using namespace testing; + +template class DepthToSpaceTest : public ::testing::Test +{ +}; + +using DataTypes = ::testing::Types; +TYPED_TEST_CASE(DepthToSpaceTest, DataTypes); + +TYPED_TEST(DepthToSpaceTest, SimpleCase) +{ + std::vector input_data{1, 2, 3, 4, 5, 6, 7, 8}; + Shape input_shape{1, 1, 2, 4}; + std::vector output_data{1, 2, 5, 6, 3, 4, 7, 8}; + std::vector output_shape{1, 2, 4, 1}; + + Tensor input_tensor = makeInputTensor()>(input_shape, input_data); + Tensor output_tensor = makeOutputTensor(getElementType()); + + DepthToSpaceParams params{}; + params.block_size = 2; + + DepthToSpace kernel = DepthToSpace(&input_tensor, &output_tensor, params); + kernel.configure(); + kernel.execute(); + + EXPECT_THAT(extractTensorData(output_tensor), + ::testing::ElementsAreArray(output_data)); + EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape)); +} + +} // namespace +} // namespace kernels +} // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/kernels/L2Normalize.test.cpp b/compiler/luci-interpreter/src/kernels/L2Normalize.test.cpp index fad450d..f53eaca 100644 --- a/compiler/luci-interpreter/src/kernels/L2Normalize.test.cpp +++ b/compiler/luci-interpreter/src/kernels/L2Normalize.test.cpp @@ -45,12 +45,9 @@ TEST(L2NormalizeTest, Float) ElementsAreArray(ArrayFloatNear(ref_output_data))); } -TEST(L2NormalizeTest, Uint8Quantized) -{ - // TODO - // Implement GetDequantizedOutput Function. - // Create Test for Uint8 Case -} +// TODO Uint8Quantized +// Implement GetDequantizedOutput Function. +// Create Test for Uint8 Case } // namespace } // namespace kernels diff --git a/compiler/luci-interpreter/src/kernels/LeakyRelu.test.cpp b/compiler/luci-interpreter/src/kernels/LeakyRelu.test.cpp index b0c06e7..c79d3d6 100644 --- a/compiler/luci-interpreter/src/kernels/LeakyRelu.test.cpp +++ b/compiler/luci-interpreter/src/kernels/LeakyRelu.test.cpp @@ -61,15 +61,14 @@ TEST(LeakReluTest, FloatSimple) 1.0f, -0.5f, -1.0f, // Row 2 }, /*alpha=*/0.5f, getElementType()); -} -TEST(LeakReluTest, Uint8Simple) -{ - // TODO - // Implement GetDequantizedOutput Function. - // Create Test for Uint8 Case + SUCCEED(); } +// TODO Uint8Simple +// Implement GetDequantizedOutput Function. +// Create Test for Uint8 Case + } // namespace } // namespace kernels } // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/kernels/Logistic.test.cpp b/compiler/luci-interpreter/src/kernels/Logistic.test.cpp index 17456a4..00feddf 100644 --- a/compiler/luci-interpreter/src/kernels/Logistic.test.cpp +++ b/compiler/luci-interpreter/src/kernels/Logistic.test.cpp @@ -49,10 +49,8 @@ TEST(LogisticTest, Float) // TODO make a Shape checking of output_tensor. } -TEST(LogisticTest, Uint8) -{ - // Need to Implement GetDequantizedOutput Function. -} +// TODO Uint8 +// Need to Implement GetDequantizedOutput Function. } // namespace } // namespace kernels diff --git a/compiler/luci-interpreter/src/kernels/Reverse.cpp b/compiler/luci-interpreter/src/kernels/Reverse.cpp new file mode 100644 index 0000000..a463084 --- /dev/null +++ b/compiler/luci-interpreter/src/kernels/Reverse.cpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernels/Reverse.h" +#include "kernels/Utils.h" +#include + +namespace luci_interpreter +{ + +namespace kernels +{ + +Reverse::Reverse(const Tensor *input, const Tensor *axes, Tensor *output) + : Kernel({input, axes}, {output}) +{ +} + +void Reverse::configure() +{ + assert(axes()->shape().num_dims() == 1); + assert(input()->shape().num_dims() >= axes()->shape().num_elements()); + if (input()->element_type() != DataType::S32 && input()->element_type() != DataType::FLOAT32 && + input()->element_type() != DataType::U8 && input()->element_type() != DataType::S16 && + input()->element_type() != DataType::S64) + { + throw std::runtime_error("Unsupported input type."); + } + if (axes()->element_type() != DataType::S32) + { + throw std::runtime_error("Unsupported axes type."); + } + if (axes()->shape().num_elements() > 1) + { + throw std::runtime_error("Current implementation does not support more than 1 axis."); + } + int axis_value = getTensorData(axes())[0]; + if (axis_value < 0 || axis_value >= input()->shape().num_dims()) + { + throw std::runtime_error("Invalid axes value"); + } + assert(input()->element_type() == output()->element_type()); + + output()->resize(input()->shape()); +} + +void Reverse::execute() const +{ + int axis_value = getTensorData(axes())[0]; + switch (output()->element_type()) + { + case DataType::FLOAT32: + tflite::reference_ops::Reverse(axis_value, getTensorShape(input()), + getTensorData(input()), getTensorShape(output()), + getTensorData(output())); + break; + case DataType::U8: + tflite::reference_ops::Reverse( + axis_value, getTensorShape(input()), getTensorData(input()), + getTensorShape(output()), getTensorData(output())); + break; + default: + throw std::runtime_error("Unsupported output type"); + } +} + +} // namespace kernels +} // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/kernels/Reverse.h b/compiler/luci-interpreter/src/kernels/Reverse.h new file mode 100644 index 0000000..3489dae --- /dev/null +++ b/compiler/luci-interpreter/src/kernels/Reverse.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LUCI_INTERPRETER_KERNELS_REVERSE_H +#define LUCI_INTERPRETER_KERNELS_REVERSE_H + +#include "core/Kernel.h" + +namespace luci_interpreter +{ +namespace kernels +{ + +class Reverse : public Kernel +{ +public: + Reverse(const Tensor *input, const Tensor *axes, Tensor *output); + + const Tensor *input() const { return _inputs[0]; } + const Tensor *axes() const { return _inputs[1]; } + Tensor *output() const { return _outputs[0]; } + + void configure() override; + void execute() const override; +}; + +} // namespace kernels +} // namespace luci_interpreter + +#endif // LUCI_INTERPRETER_KERNELS_REVERSE_H diff --git a/compiler/luci-interpreter/src/kernels/Reverse.test.cpp b/compiler/luci-interpreter/src/kernels/Reverse.test.cpp new file mode 100644 index 0000000..5475a8b --- /dev/null +++ b/compiler/luci-interpreter/src/kernels/Reverse.test.cpp @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernels/Reverse.h" +#include "kernels/TestUtils.h" + +namespace luci_interpreter +{ +namespace kernels +{ +namespace +{ + +using namespace testing; + +template class ReverseTest : public ::testing::Test +{ +}; + +using DataTypes = ::testing::Types; +TYPED_TEST_CASE(ReverseTest, DataTypes); + +TYPED_TEST(ReverseTest, MultiDimensions) +{ + // TypeParam + std::vector input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}; + Shape input_shape{4, 3, 2}; + std::vector axis_data{1}; + Shape axis_shape{1}; + + std::vector output_data{5, 6, 3, 4, 1, 2, 11, 12, 9, 10, 7, 8, + 17, 18, 15, 16, 13, 14, 23, 24, 21, 22, 19, 20}; + std::vector output_shape{4, 3, 2}; + + Tensor input_tensor = makeInputTensor()>(input_shape, input_data); + Tensor axis_tensor = makeInputTensor(axis_shape, axis_data); + + Tensor output_tensor = makeOutputTensor(getElementType()); + + Reverse kernel = Reverse(&input_tensor, &axis_tensor, &output_tensor); + kernel.configure(); + kernel.execute(); + + EXPECT_THAT(extractTensorData(output_tensor), + ::testing::ElementsAreArray(output_data)); + EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape)); +} + +} // namespace +} // namespace kernels +} // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/kernels/Slice.cpp b/compiler/luci-interpreter/src/kernels/Slice.cpp new file mode 100644 index 0000000..c4bc3c5 --- /dev/null +++ b/compiler/luci-interpreter/src/kernels/Slice.cpp @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernels/Slice.h" +#include "Utils.h" +#include + +#include +#include + +namespace luci_interpreter +{ + +namespace kernels +{ +const int max_dim = 4; + +Slice::Slice(const Tensor *input, const Tensor *begin, const Tensor *size, Tensor *output) + : Kernel({input, begin, size}, {output}) +{ +} + +template +Shape calculateOutputShape(const Tensor *input, const Tensor *begin, const Tensor *size) +{ + Shape output_shape = Shape(input->shape().num_dims()); + for (int idx = 0; idx < input->shape().num_dims(); idx++) + { + T size_value = getTensorData(size)[idx]; + if (size_value < 0) + { + if (size_value != -1) + { + throw std::runtime_error("Invalid size."); + } + size_value = input->shape().dim(idx) - getTensorData(begin)[idx]; + } + else + { + if (input->shape().dim(idx) < getTensorData(begin)[idx] + size_value) + { + throw std::runtime_error("Invalid begin and size."); + } + } + output_shape.dim(idx) = static_cast(size_value); + } + return output_shape; +} + +template +void getBeginAndSizeVectors(int dimensions, const Tensor *begin, const Tensor *size, + std::vector *begins, std::vector *sizes) +{ + for (int idx = dimensions - 1; idx >= 0; --idx) + { + begins->push_back(getTensorData(begin)[idx]); + sizes->push_back(getTensorData(size)[idx]); + } +} + +void Slice::configure() +{ + assert(input()->element_type() == output()->element_type()); + assert(begin()->element_type() == DataType::S32 || begin()->element_type() == DataType::S64); + assert(size()->element_type() == DataType::S32 || size()->element_type() == DataType::S64); + assert(begin()->shape().num_dims() == 1); + assert(size()->shape().num_dims() == 1); + assert(input()->shape().num_dims() <= max_dim); + + if (begin()->element_type() == DataType::S32) + { + output()->resize(calculateOutputShape(input(), begin(), size())); + } + else if (begin()->element_type() == DataType::S64) + { + output()->resize(calculateOutputShape(input(), begin(), size())); + } + else + { + throw std::runtime_error("Unsupported type."); + } +} + +void Slice::execute() const +{ + std::vector begins; + begins.reserve(max_dim); + std::vector sizes; + sizes.reserve(max_dim); + if (begin()->element_type() == DataType::S32) + { + getBeginAndSizeVectors(input()->shape().num_dims(), begin(), size(), &begins, &sizes); + } + else if (begin()->element_type() == DataType::S64) + { + getBeginAndSizeVectors(input()->shape().num_dims(), begin(), size(), &begins, &sizes); + } + else + { + throw std::runtime_error("Unsupported begin type."); + } + for (int i = input()->shape().num_dims(); i < max_dim; ++i) + { + begins.push_back(0); + sizes.push_back(1); + } + + assert(begins.size() == 4); + assert(sizes.size() == 4); + tflite::SliceParams op_params{}; + op_params.begin_count = 4; + op_params.size_count = 4; + for (int i = 0; i < 4; i++) + { + op_params.begin[i] = begins[3 - i]; + op_params.size[i] = sizes[3 - i]; + } + switch (input()->element_type()) + { + case DataType::FLOAT32: + tflite::optimized_ops::Slice(op_params, getTensorShape(input()), + getTensorData(input()), getTensorShape(output()), + getTensorData(output())); + break; + case DataType::U8: + tflite::optimized_ops::Slice(op_params, getTensorShape(input()), + getTensorData(input()), getTensorShape(output()), + getTensorData(output())); + break; + default: + throw std::runtime_error("Unsupported input type."); + } +} + +} // namespace kernels +} // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/kernels/Slice.h b/compiler/luci-interpreter/src/kernels/Slice.h new file mode 100644 index 0000000..23c3596 --- /dev/null +++ b/compiler/luci-interpreter/src/kernels/Slice.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LUCI_INTERPRETER_KERNELS_SLICE_H +#define LUCI_INTERPRETER_KERNELS_SLICE_H + +#include "core/Kernel.h" + +namespace luci_interpreter +{ +namespace kernels +{ + +class Slice : public Kernel +{ +public: + Slice(const Tensor *input, const Tensor *begin, const Tensor *size, Tensor *output); + + const Tensor *input() const { return _inputs[0]; } + const Tensor *begin() const { return _inputs[1]; } + const Tensor *size() const { return _inputs[2]; } + Tensor *output() const { return _outputs[0]; } + + void configure() override; + void execute() const override; +}; + +} // namespace kernels +} // namespace luci_interpreter + +#endif // LUCI_INTERPRETER_KERNELS_SLICE_H diff --git a/compiler/luci-interpreter/src/kernels/Slice.test.cpp b/compiler/luci-interpreter/src/kernels/Slice.test.cpp new file mode 100644 index 0000000..a360a29 --- /dev/null +++ b/compiler/luci-interpreter/src/kernels/Slice.test.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernels/Slice.h" +#include "kernels/TestUtils.h" + +namespace luci_interpreter +{ +namespace kernels +{ +namespace +{ + +using namespace testing; + +template class SliceTest : public ::testing::Test +{ +}; + +using DataTypes = ::testing::Types; +TYPED_TEST_CASE(SliceTest, DataTypes); + +TYPED_TEST(SliceTest, SimpleTest) +{ + std::vector input_data{1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}; + Shape input_shape{3, 2, 3, 1}; + std::vector begin_data{1, 0, 0, 0}; + Shape begin_shape{4}; + std::vector size_data{2, 1, -1, 1}; + Shape size_shape{4}; + std::vector output_data{3, 3, 3, 5, 5, 5}; + std::vector output_shape{2, 1, 3, 1}; + + Tensor input_tensor = makeInputTensor()>(input_shape, input_data); + Tensor begin_tensor = makeInputTensor(begin_shape, begin_data); + Tensor size_tensor = makeInputTensor(size_shape, size_data); + + Tensor output_tensor = makeOutputTensor(getElementType()); + + Slice kernel(&input_tensor, &begin_tensor, &size_tensor, &output_tensor); + kernel.configure(); + kernel.execute(); + + EXPECT_THAT(extractTensorData(output_tensor), + ::testing::ElementsAreArray(output_data)); + EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape)); +} + +} // namespace +} // namespace kernels +} // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/kernels/TransposeConv.test.cpp b/compiler/luci-interpreter/src/kernels/TransposeConv.test.cpp index 3386d36..b8c0ac4 100644 --- a/compiler/luci-interpreter/src/kernels/TransposeConv.test.cpp +++ b/compiler/luci-interpreter/src/kernels/TransposeConv.test.cpp @@ -68,6 +68,8 @@ TEST(TransposeConvTest, FloatSimple) /*output_data=*/{29, 62, 83, 75, 99, 192, 237, 198, 207, 372, 417, 330, 263, 446, 485, 365}, /*params.padding=*/luci::Padding::SAME, /*stride_height=*/1, /*stride_width=*/1, getElementType()); + + SUCCEED(); } TEST(TransposeConvTest, FloatTwoFiltersTest) @@ -82,21 +84,18 @@ TEST(TransposeConvTest, FloatTwoFiltersTest) 3352, 3652, 2760}, /*params.padding=*/luci::Padding::SAME, /*stride_height=*/1, /*stride_width=*/1, getElementType()); -} -TEST(TransposeConvTest, Uint8Simple) -{ - // TODO - // Implement GetDequantizedOutput Function. - // Create Test for Uint8 Case -} -TEST(TransposeConvTest, Uint8FiltersTest) -{ - // TODO - // Implement GetDequantizedOutput Function. - // Create Test for Uint8 Case + SUCCEED(); } +// TODO Uint8Simple +// Implement GetDequantizedOutput Function. +// Create Test for Uint8 Case + +// TODO Uint8FiltersTest +// Implement GetDequantizedOutput Function. +// Create Test for Uint8 Case + } // namespace } // namespace kernels } // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/loader/CMakeLists.txt b/compiler/luci-interpreter/src/loader/CMakeLists.txt index fb36c4a..d99485d 100644 --- a/compiler/luci-interpreter/src/loader/CMakeLists.txt +++ b/compiler/luci-interpreter/src/loader/CMakeLists.txt @@ -1,3 +1,5 @@ +nnas_find_package(GTest REQUIRED) + set(SOURCES GraphLoader.h GraphLoader.cpp @@ -13,3 +15,8 @@ target_include_directories(luci_interpreter_loader PUBLIC "${LUCI_INTERPRETER_SO target_link_libraries(luci_interpreter_loader PUBLIC luci_lang luci_interpreter_core PRIVATE luci_interpreter_kernels nncc_common) + +set(TEST_SOURCES KernelBuilder.test.cpp) + +GTest_AddTest(luci_interpreter_loader_test ${TEST_SOURCES}) +target_link_libraries(luci_interpreter_loader_test luci_interpreter_loader) diff --git a/compiler/luci-interpreter/src/loader/GraphLoader.cpp b/compiler/luci-interpreter/src/loader/GraphLoader.cpp index 779fa06..95c6547 100644 --- a/compiler/luci-interpreter/src/loader/GraphLoader.cpp +++ b/compiler/luci-interpreter/src/loader/GraphLoader.cpp @@ -16,7 +16,6 @@ #include "loader/GraphLoader.h" -#include "loader/ModuleLoader.h" #include "loader/KernelBuilder.h" #include @@ -70,9 +69,10 @@ bool isExecutableNode(const luci::CircleNode *node) switch (node->opcode()) { // These nodes denote inputs / outputs of a graph. - case luci::CircleOpcode::CONST: + case luci::CircleOpcode::CIRCLECONST: case luci::CircleOpcode::CIRCLEINPUT: case luci::CircleOpcode::CIRCLEOUTPUT: + case luci::CircleOpcode::CIRCLEOUTPUTEXCLUDE: // The following nodes denote outputs of multiple-output nodes. case luci::CircleOpcode::CIRCLEIFOUT: case luci::CircleOpcode::CIRCLESPLITOUT: @@ -102,11 +102,12 @@ bool isTensorProducingNode(const luci::CircleNode *node) } // namespace -GraphLoader::GraphLoader(const ModuleLoader &module_loader, const loco::Graph *graph, - RuntimeGraph *runtime_graph, RuntimeToIR &runtime_to_ir, - std::unordered_map &node_to_tensor) - : _module_loader(module_loader), _graph(graph), _runtime_graph(runtime_graph), - _runtime_to_ir(runtime_to_ir), _node_to_tensor(node_to_tensor) +GraphLoader::GraphLoader( + const loco::Graph *graph, RuntimeGraph *runtime_graph, RuntimeToIR &runtime_to_ir, + const std::unordered_map &graph_to_runtime_graph, + std::unordered_map &node_to_tensor) + : _graph(graph), _runtime_graph(runtime_graph), _runtime_to_ir(runtime_to_ir), + _graph_to_runtime_graph(graph_to_runtime_graph), _node_to_tensor(node_to_tensor) { } @@ -136,6 +137,7 @@ void GraphLoader::loadTensors() const luci::CircleQuantParam *params = node->quantparam(); quantization.scale.assign(params->scale.cbegin(), params->scale.cend()); quantization.zero_point.assign(params->zerop.cbegin(), params->zerop.cend()); + quantization.quantized_dimension = params->quantized_dimension; } auto tensor = std::make_unique(node->dtype(), std::move(shape), std::move(quantization), @@ -178,7 +180,7 @@ void GraphLoader::initInputOutputTensors() const void GraphLoader::loadOperators() { - KernelBuilder kernel_builder(_module_loader, *this); + KernelBuilder kernel_builder(_graph_to_runtime_graph, _node_to_tensor); // Create kernels for executable nodes. This has to be done in execution order. for (const loco::Node *loco_node : @@ -195,11 +197,4 @@ void GraphLoader::loadOperators() } } -void GraphLoader::load() -{ - loadTensors(); - initInputOutputTensors(); - loadOperators(); -} - } // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/loader/GraphLoader.h b/compiler/luci-interpreter/src/loader/GraphLoader.h index e0adc0f..89c5bca 100644 --- a/compiler/luci-interpreter/src/loader/GraphLoader.h +++ b/compiler/luci-interpreter/src/loader/GraphLoader.h @@ -27,29 +27,23 @@ namespace luci_interpreter { -class ModuleLoader; - class GraphLoader { public: - GraphLoader(const ModuleLoader &module_loader, const loco::Graph *graph, - RuntimeGraph *runtime_graph, RuntimeToIR &runtime_to_ir, + GraphLoader(const loco::Graph *graph, RuntimeGraph *runtime_graph, RuntimeToIR &runtime_to_ir, + const std::unordered_map &graph_to_runtime_graph, std::unordered_map &node_to_tensor); - void load(); - - Tensor *getTensorForNode(const loco::Node *node) const { return _node_to_tensor.at(node); } - -private: - void loadOperators(); - void initInputOutputTensors() const; void loadTensors(); + void initInputOutputTensors() const; + void loadOperators(); - const ModuleLoader &_module_loader; +private: const loco::Graph *_graph; RuntimeGraph *_runtime_graph; RuntimeToIR &_runtime_to_ir; + const std::unordered_map &_graph_to_runtime_graph; std::unordered_map &_node_to_tensor; }; diff --git a/compiler/luci-interpreter/src/loader/KernelBuilder.cpp b/compiler/luci-interpreter/src/loader/KernelBuilder.cpp index 56da961..12c7f45 100644 --- a/compiler/luci-interpreter/src/loader/KernelBuilder.cpp +++ b/compiler/luci-interpreter/src/loader/KernelBuilder.cpp @@ -21,6 +21,7 @@ #include "kernels/AveragePool2D.h" #include "kernels/Concatenation.h" #include "kernels/Conv2D.h" +#include "kernels/DepthToSpace.h" #include "kernels/DepthwiseConv2D.h" #include "kernels/Elu.h" #include "kernels/FullyConnected.h" @@ -35,6 +36,8 @@ #include "kernels/Mul.h" #include "kernels/Pad.h" #include "kernels/Reshape.h" +#include "kernels/Reverse.h" +#include "kernels/Slice.h" #include "kernels/Softmax.h" #include "kernels/SpaceToDepth.h" #include "kernels/Split.h" @@ -43,8 +46,6 @@ #include "kernels/Unpack.h" #include "kernels/Transpose.h" #include "kernels/TransposeConv.h" -#include "loader/GraphLoader.h" -#include "loader/ModuleLoader.h" #include @@ -68,20 +69,23 @@ static std::vector collectOutputNodes(const luci::CircleNode const Tensor *KernelBuilder::getInputTensor(const loco::Node *node) const { - const Tensor *tensor = _graph_loader.getTensorForNode(node); + const Tensor *tensor = _node_to_tensor.at(node); assert(tensor != nullptr); return tensor; } const Tensor *KernelBuilder::getOptionalInputTensor(const loco::Node *node) const { - // TODO Revise this when optional inputs are implemented in the IR. + if (dynamic_cast(node)) + { + return nullptr; + } return getInputTensor(node); } Tensor *KernelBuilder::getOutputTensor(const loco::Node *node) const { - Tensor *tensor = _graph_loader.getTensorForNode(node); + Tensor *tensor = _node_to_tensor.at(node); assert(tensor != nullptr); return tensor; } @@ -98,7 +102,7 @@ KernelBuilder::getOutputTensors(const std::vector &nodes) co RuntimeGraph *KernelBuilder::getRuntimeGraph(const loco::Graph *graph) const { - RuntimeGraph *runtime_graph = _module_loader.getRuntimeGraph(graph); + RuntimeGraph *runtime_graph = _graph_to_runtime_graph.at(graph); assert(runtime_graph != nullptr); return runtime_graph; } @@ -120,14 +124,14 @@ std::unique_ptr KernelBuilder::visit(const luci::CircleAdd *node) std::unique_ptr KernelBuilder::visit(const luci::CircleArgMax *node) { assert(node->arity() == 2); - const Tensor *input1 = getInputTensor(node->input()); - const Tensor *input2 = getInputTensor(node->dimension()); + const Tensor *input = getInputTensor(node->input()); + const Tensor *axis = getInputTensor(node->dimension()); Tensor *output = getOutputTensor(node); ArgMaxParams params{}; params.output_type = node->output_type(); - return std::make_unique(input1, input2, output, params); + return std::make_unique(input, axis, output, params); } std::unique_ptr KernelBuilder::visit(const luci::CircleAveragePool2D *node) @@ -188,6 +192,19 @@ std::unique_ptr KernelBuilder::visit(const luci::CircleConv2D *node) return std::make_unique(input, filter, bias, output, params); } +std::unique_ptr KernelBuilder::visit(const luci::CircleDepthToSpace *node) +{ + assert(node->arity() == 1); + + const Tensor *input = getInputTensor(node->input()); + Tensor *output = getOutputTensor(node); + + DepthToSpaceParams params{}; + params.block_size = node->block_size(); + + return std::make_unique(input, output, params); +} + std::unique_ptr KernelBuilder::visit(const luci::CircleDepthwiseConv2D *node) { assert(node->arity() == 3); @@ -224,14 +241,14 @@ std::unique_ptr KernelBuilder::visit(const luci::CircleFullyConnected *n assert(node->arity() == 3); const Tensor *input = getInputTensor(node->input()); - const Tensor *filter = getInputTensor(node->weights()); + const Tensor *weights = getInputTensor(node->weights()); const Tensor *bias = getOptionalInputTensor(node->bias()); Tensor *output = getOutputTensor(node); FullyConnectedParams params{}; params.activation = node->fusedActivationFunction(); - return std::make_unique(input, filter, bias, output, params); + return std::make_unique(input, weights, bias, output, params); } std::unique_ptr KernelBuilder::visit(const luci::CircleIf *node) @@ -255,6 +272,11 @@ std::unique_ptr KernelBuilder::visit(const luci::CircleIf *node) else_graph); } +std::unique_ptr KernelBuilder::visit(const luci::CircleInput *) +{ + throw std::runtime_error("Input node cannot be executed."); +} + std::unique_ptr KernelBuilder::visit(const luci::CircleL2Normalize *node) { assert(node->arity() == 1); @@ -323,11 +345,6 @@ std::unique_ptr KernelBuilder::visit(const luci::CircleLogistic *node) return std::make_unique(input, output); } -std::unique_ptr KernelBuilder::visit(const luci::CircleInput *) -{ - throw std::runtime_error("Input node cannot be executed."); -} - std::unique_ptr KernelBuilder::visit(const luci::CircleMaxPool2D *node) { assert(node->arity() == 1); @@ -402,6 +419,30 @@ std::unique_ptr KernelBuilder::visit(const luci::CircleReshape *node) return std::make_unique(input, shape, output); } +std::unique_ptr KernelBuilder::visit(const luci::CircleReverseV2 *node) +{ + assert(node->arity() == 2); + + const Tensor *input = getInputTensor(node->tensor()); + const Tensor *axes = getInputTensor(node->axis()); + Tensor *output = getOutputTensor(node); + + return std::make_unique(input, axes, output); +} + +std::unique_ptr KernelBuilder::visit(const luci::CircleSlice *node) +{ + assert(node->arity() == 3); + + const Tensor *input = getInputTensor(node->input()); + const Tensor *begin = getInputTensor(node->begin()); + const Tensor *size = getInputTensor(node->size()); + + Tensor *output = getOutputTensor(node); + + return std::make_unique(input, begin, size, output); +} + std::unique_ptr KernelBuilder::visit(const luci::CircleSoftmax *node) { assert(node->arity() == 1); @@ -442,6 +483,19 @@ std::unique_ptr KernelBuilder::visit(const luci::CircleSplit *node) return std::make_unique(axis, input, std::move(outputs)); } +std::unique_ptr KernelBuilder::visit(const luci::CircleSqueeze *node) +{ + assert(node->arity() == 1); + + const Tensor *input = getInputTensor(node->input()); + Tensor *output = getOutputTensor(node); + + SqueezeParams params{}; + params.squeeze_dims = node->squeeze_dims(); + + return std::make_unique(input, output, params); +} + std::unique_ptr KernelBuilder::visit(const luci::CircleStridedSlice *node) { assert(node->arity() == 4); @@ -463,21 +517,15 @@ std::unique_ptr KernelBuilder::visit(const luci::CircleStridedSlice *nod return std::make_unique(input, begin, end, strides, output, params); } -std::unique_ptr KernelBuilder::visit(const luci::CircleSqueeze *node) +std::unique_ptr KernelBuilder::visit(const luci::CircleTranspose *node) { - assert(node->arity() == 1); + assert(node->arity() == 2); - const Tensor *input = getInputTensor(node->input()); + const Tensor *input = getInputTensor(node->a()); + const Tensor *perm = getInputTensor(node->perm()); Tensor *output = getOutputTensor(node); - SqueezeParams params{}; - assert(node->squeeze_dims().size() <= 4); - for (size_t i = 0; i < node->squeeze_dims().size(); i++) - { - params.squeeze_dims.push_back(node->squeeze_dims().at(i)); - } - - return std::make_unique(input, output, params); + return std::make_unique(input, perm, output); } std::unique_ptr KernelBuilder::visit(const luci::CircleTransposeConv *node) @@ -515,15 +563,4 @@ std::unique_ptr KernelBuilder::visit(const luci::CircleUnpack *node) return std::make_unique(input, std::move(outputs), params); } -std::unique_ptr KernelBuilder::visit(const luci::CircleTranspose *node) -{ - assert(node->arity() == 2); - - const Tensor *input = getInputTensor(node->a()); - const Tensor *perm = getInputTensor(node->perm()); - Tensor *output = getOutputTensor(node); - - return std::make_unique(input, perm, output); -} - } // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/loader/KernelBuilder.h b/compiler/luci-interpreter/src/loader/KernelBuilder.h index 7e30d39..d5c5a4b 100644 --- a/compiler/luci-interpreter/src/loader/KernelBuilder.h +++ b/compiler/luci-interpreter/src/loader/KernelBuilder.h @@ -24,18 +24,18 @@ #include #include +#include namespace luci_interpreter { -class GraphLoader; -class ModuleLoader; - class KernelBuilder : public luci::CircleNodeVisitor> { public: - KernelBuilder(const ModuleLoader &module_loader, const GraphLoader &graph_loader) - : _module_loader(module_loader), _graph_loader(graph_loader) + KernelBuilder( + const std::unordered_map &graph_to_runtime_graph, + const std::unordered_map &node_to_tensor) + : _graph_to_runtime_graph(graph_to_runtime_graph), _node_to_tensor(node_to_tensor) { } @@ -45,6 +45,7 @@ public: std::unique_ptr visit(const luci::CircleConcatenation *node) override; std::unique_ptr visit(const luci::CircleConv2D *node) override; std::unique_ptr visit(const luci::CircleConst *node) override; + std::unique_ptr visit(const luci::CircleDepthToSpace *node) override; std::unique_ptr visit(const luci::CircleDepthwiseConv2D *node) override; std::unique_ptr visit(const luci::CircleElu *node) override; std::unique_ptr visit(const luci::CircleFullyConnected *node) override; @@ -61,6 +62,8 @@ public: std::unique_ptr visit(const luci::CircleOutput *node) override; std::unique_ptr visit(const luci::CirclePad *node) override; std::unique_ptr visit(const luci::CircleReshape *node) override; + std::unique_ptr visit(const luci::CircleReverseV2 *node) override; + std::unique_ptr visit(const luci::CircleSlice *node) override; std::unique_ptr visit(const luci::CircleSoftmax *node) override; std::unique_ptr visit(const luci::CircleSpaceToDepth *node) override; std::unique_ptr visit(const luci::CircleSplit *node) override; @@ -82,8 +85,8 @@ private: RuntimeGraph *getRuntimeGraph(const loco::Graph *graph) const; private: - const ModuleLoader &_module_loader; - const GraphLoader &_graph_loader; + const std::unordered_map &_graph_to_runtime_graph; + const std::unordered_map &_node_to_tensor; }; } // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/loader/KernelBuilder.test.cpp b/compiler/luci-interpreter/src/loader/KernelBuilder.test.cpp new file mode 100644 index 0000000..33bc8ec --- /dev/null +++ b/compiler/luci-interpreter/src/loader/KernelBuilder.test.cpp @@ -0,0 +1,743 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "loader/GraphLoader.h" +#include "loader/KernelBuilder.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace luci_interpreter +{ +namespace +{ + +using namespace testing; + +class KernelBuilderTest : public Test +{ +protected: + luci::CircleInput *createInputNode() { return createNode(); } + + template NodeT *createNode(Args &&... args) + { + auto *node = _graph.nodes()->create(std::forward(args)...); + // The actual type does not matter for the purpose of the tests. + // NOTE The type is meaningless for nodes with multiple outputs (corresponding *Out nodes carry + // actual output types). + node->dtype(loco::DataType::FLOAT32); + return node; + } + + template NodeOutT *createNodeOut(loco::Node *node, int index) + { + auto *node_out = createNode(); + node_out->input(node); + node_out->index(index); + return node_out; + } + + template std::unique_ptr buildKernel(const luci::CircleNode *op) + { + std::unordered_map graph_to_runtime_graph; + + RuntimeGraph runtime_graph(nullptr); + RuntimeToIR runtime_to_ir; + GraphLoader graph_loader(&_graph, &runtime_graph, runtime_to_ir, graph_to_runtime_graph, + _node_to_tensor); + graph_loader.loadTensors(); + + KernelBuilder kernel_builder(graph_to_runtime_graph, _node_to_tensor); + + auto kernel = op->accept(&kernel_builder); + return std::unique_ptr(dynamic_cast(kernel.release())); + } + + void checkTensor(const Tensor *tensor, const loco::Node *node) + { + EXPECT_THAT(tensor, Eq(_node_to_tensor.at(node))); + } + +private: + loco::Graph _graph; + std::unordered_map _node_to_tensor; +}; + +TEST_F(KernelBuilderTest, Add) +{ + auto *input1 = createInputNode(); + auto *input2 = createInputNode(); + + auto *op = createNode(); + op->x(input1); + op->y(input2); + + op->fusedActivationFunction(luci::FusedActFunc::RELU); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input1(), input1); + checkTensor(kernel->input2(), input2); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().activation, Eq(op->fusedActivationFunction())); +} + +TEST_F(KernelBuilderTest, ArgMax) +{ + auto *input = createInputNode(); + auto *axis = createInputNode(); + + auto *op = createNode(); + op->input(input); + op->dimension(axis); + + op->output_type(loco::DataType::FLOAT32); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->axis(), axis); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().output_type, Eq(op->output_type())); +} + +TEST_F(KernelBuilderTest, AveragePool2D) +{ + auto *input = createInputNode(); + + auto *op = createNode(); + op->value(input); + + op->padding(luci::Padding::SAME); + op->filter()->h(11); + op->filter()->w(13); + op->stride()->h(17); + op->stride()->w(19); + op->fusedActivationFunction(luci::FusedActFunc::RELU); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().padding, Eq(op->padding())); + EXPECT_THAT(kernel->params().filter_height, Eq(op->filter()->h())); + EXPECT_THAT(kernel->params().filter_width, Eq(op->filter()->w())); + EXPECT_THAT(kernel->params().stride_height, Eq(op->stride()->h())); + EXPECT_THAT(kernel->params().stride_width, Eq(op->stride()->w())); + EXPECT_THAT(kernel->params().activation, Eq(op->fusedActivationFunction())); +} + +TEST_F(KernelBuilderTest, Concatenation) +{ + auto *input1 = createInputNode(); + auto *input2 = createInputNode(); + + auto *op = createNode(2); + op->values(0, input1); + op->values(1, input2); + op->axis(11); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(0), input1); + checkTensor(kernel->input(1), input2); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().axis, Eq(op->axis())); +} + +TEST_F(KernelBuilderTest, Conv2D) +{ + auto *input = createInputNode(); + auto *filter = createInputNode(); + auto *bias = createInputNode(); + + auto *op = createNode(); + op->input(input); + op->filter(filter); + op->bias(bias); + + op->padding(luci::Padding::SAME); + op->stride()->h(11); + op->stride()->w(13); + op->dilation()->h(17); + op->dilation()->w(19); + op->fusedActivationFunction(luci::FusedActFunc::RELU); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->filter(), filter); + checkTensor(kernel->bias(), bias); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().padding, Eq(op->padding())); + EXPECT_THAT(kernel->params().stride_height, Eq(op->stride()->h())); + EXPECT_THAT(kernel->params().stride_width, Eq(op->stride()->w())); + EXPECT_THAT(kernel->params().dilation_height_factor, Eq(op->dilation()->h())); + EXPECT_THAT(kernel->params().dilation_width_factor, Eq(op->dilation()->w())); + EXPECT_THAT(kernel->params().activation, Eq(op->fusedActivationFunction())); +} + +TEST_F(KernelBuilderTest, DepthToSpace) +{ + auto *input = createInputNode(); + + auto *op = createNode(); + op->input(input); + + op->block_size(11); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().block_size, Eq(op->block_size())); +} + +TEST_F(KernelBuilderTest, DepthwiseConv2D) +{ + auto *input = createInputNode(); + auto *filter = createInputNode(); + auto *bias = createInputNode(); + + auto *op = createNode(); + op->input(input); + op->filter(filter); + op->bias(bias); + + op->padding(luci::Padding::SAME); + op->depthMultiplier(11); + op->stride()->h(13); + op->stride()->w(17); + op->dilation()->h(19); + op->dilation()->w(23); + op->fusedActivationFunction(luci::FusedActFunc::RELU); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->filter(), filter); + checkTensor(kernel->bias(), bias); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().padding, Eq(op->padding())); + EXPECT_THAT(kernel->params().depth_multiplier, Eq(op->depthMultiplier())); + EXPECT_THAT(kernel->params().stride_height, Eq(op->stride()->h())); + EXPECT_THAT(kernel->params().stride_width, Eq(op->stride()->w())); + EXPECT_THAT(kernel->params().dilation_height_factor, Eq(op->dilation()->h())); + EXPECT_THAT(kernel->params().dilation_width_factor, Eq(op->dilation()->w())); + EXPECT_THAT(kernel->params().activation, Eq(op->fusedActivationFunction())); +} + +TEST_F(KernelBuilderTest, Elu) +{ + auto *input = createInputNode(); + + auto *op = createNode(); + op->features(input); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); +} + +TEST_F(KernelBuilderTest, FullyConnected) +{ + auto *input = createInputNode(); + auto *weights = createInputNode(); + auto *bias = createInputNode(); + + auto *op = createNode(); + op->input(input); + op->weights(weights); + op->bias(bias); + + op->fusedActivationFunction(luci::FusedActFunc::RELU); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->weights(), weights); + checkTensor(kernel->bias(), bias); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().activation, Eq(op->fusedActivationFunction())); +} + +TEST_F(KernelBuilderTest, L2Normalize) +{ + auto *input = createInputNode(); + + auto *op = createNode(); + op->x(input); + + op->fusedActivationFunction(luci::FusedActFunc::RELU); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().activation, Eq(op->fusedActivationFunction())); +} + +TEST_F(KernelBuilderTest, L2Pool2D) +{ + auto *input = createInputNode(); + + auto *op = createNode(); + op->value(input); + + op->padding(luci::Padding::SAME); + op->filter()->h(11); + op->filter()->w(13); + op->stride()->h(17); + op->stride()->w(19); + op->fusedActivationFunction(luci::FusedActFunc::RELU); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().padding, Eq(op->padding())); + EXPECT_THAT(kernel->params().filter_height, Eq(op->filter()->h())); + EXPECT_THAT(kernel->params().filter_width, Eq(op->filter()->w())); + EXPECT_THAT(kernel->params().stride_height, Eq(op->stride()->h())); + EXPECT_THAT(kernel->params().stride_width, Eq(op->stride()->w())); + EXPECT_THAT(kernel->params().activation, Eq(op->fusedActivationFunction())); +} + +TEST_F(KernelBuilderTest, LeakyRelu) +{ + auto *input = createInputNode(); + + auto *op = createNode(); + op->features(input); + + op->alpha(11.0f); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().alpha, Eq(op->alpha())); +} + +TEST_F(KernelBuilderTest, LocalResponseNormalization) +{ + auto *input = createInputNode(); + + auto *op = createNode(); + op->input(input); + + op->radius(11); + op->bias(13.0f); + op->alpha(15.0f); + op->beta(17.0f); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().radius, Eq(op->radius())); + EXPECT_THAT(kernel->params().bias, Eq(op->bias())); + EXPECT_THAT(kernel->params().alpha, Eq(op->alpha())); + EXPECT_THAT(kernel->params().beta, Eq(op->beta())); +} + +TEST_F(KernelBuilderTest, Logistic) +{ + auto *input = createInputNode(); + + auto *op = createNode(); + op->x(input); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); +} + +TEST_F(KernelBuilderTest, MaxPool2D) +{ + auto *input = createInputNode(); + + auto *op = createNode(); + op->value(input); + + op->padding(luci::Padding::SAME); + op->filter()->h(11); + op->filter()->w(13); + op->stride()->h(17); + op->stride()->w(19); + op->fusedActivationFunction(luci::FusedActFunc::RELU); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().padding, Eq(op->padding())); + EXPECT_THAT(kernel->params().filter_height, Eq(op->filter()->h())); + EXPECT_THAT(kernel->params().filter_width, Eq(op->filter()->w())); + EXPECT_THAT(kernel->params().stride_height, Eq(op->stride()->h())); + EXPECT_THAT(kernel->params().stride_width, Eq(op->stride()->w())); + EXPECT_THAT(kernel->params().activation, Eq(op->fusedActivationFunction())); +} + +TEST_F(KernelBuilderTest, Mean) +{ + auto *input = createInputNode(); + auto *axes = createInputNode(); + + auto *op = createNode(); + op->input(input); + op->reduction_indices(axes); + + op->keep_dims(true); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->axes(), axes); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().keep_dims, Eq(op->keep_dims())); +} + +TEST_F(KernelBuilderTest, Mul) +{ + auto *input1 = createInputNode(); + auto *input2 = createInputNode(); + + auto *op = createNode(); + op->x(input1); + op->y(input2); + + op->fusedActivationFunction(luci::FusedActFunc::RELU); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input1(), input1); + checkTensor(kernel->input2(), input2); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().activation, Eq(op->fusedActivationFunction())); +} + +TEST_F(KernelBuilderTest, Pad) +{ + auto *input = createInputNode(); + auto *paddings = createInputNode(); + + auto *op = createNode(); + op->input(input); + op->paddings(paddings); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->paddings(), paddings); + checkTensor(kernel->output(), op); +} + +TEST_F(KernelBuilderTest, Reshape) +{ + auto *input = createInputNode(); + auto *shape = createInputNode(); + + auto *op = createNode(); + op->tensor(input); + op->shape(shape); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->shape(), shape); + checkTensor(kernel->output(), op); +} + +TEST_F(KernelBuilderTest, ReverseV2) +{ + auto *input = createInputNode(); + auto *axes = createInputNode(); + + auto *op = createNode(); + op->tensor(input); + op->axis(axes); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->axes(), axes); + checkTensor(kernel->output(), op); +} + +TEST_F(KernelBuilderTest, Slice) +{ + auto *input = createInputNode(); + auto *begin = createInputNode(); + auto *size = createInputNode(); + + auto *op = createNode(); + op->input(input); + op->begin(begin); + op->size(size); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->begin(), begin); + checkTensor(kernel->size(), size); + checkTensor(kernel->output(), op); +} + +TEST_F(KernelBuilderTest, Softmax) +{ + auto *input = createInputNode(); + + auto *op = createNode(); + op->logits(input); + + op->beta(11.0f); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().beta, Eq(op->beta())); +} + +TEST_F(KernelBuilderTest, SpaceToDepth) +{ + auto *input = createInputNode(); + + auto *op = createNode(); + op->input(input); + + op->block_size(11); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().block_size, op->block_size()); +} + +TEST_F(KernelBuilderTest, Split) +{ + auto *axis = createInputNode(); + auto *input = createInputNode(); + auto *op = createNode(); + auto *output1 = createNodeOut(op, 0); + auto *output2 = createNodeOut(op, 1); + + op->split_dim(axis); + op->input(input); + + op->num_split(2); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->axis(), axis); + checkTensor(kernel->input(), input); + checkTensor(kernel->output(0), output1); + checkTensor(kernel->output(1), output2); +} + +TEST_F(KernelBuilderTest, Squeeze) +{ + auto *input = createInputNode(); + + auto *op = createNode(); + op->input(input); + + op->squeeze_dims({11, 13}); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().squeeze_dims, ElementsAreArray(op->squeeze_dims())); +} + +TEST_F(KernelBuilderTest, StridedSlice) +{ + auto *input = createInputNode(); + auto *begin = createInputNode(); + auto *end = createInputNode(); + auto *strides = createInputNode(); + + auto *op = createNode(); + op->input(input); + op->begin(begin); + op->end(end); + op->strides(strides); + + op->begin_mask(11); + op->ellipsis_mask(13); + op->end_mask(17); + op->new_axis_mask(19); + op->shrink_axis_mask(23); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->begin(), begin); + checkTensor(kernel->end(), end); + checkTensor(kernel->strides(), strides); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().begin_mask, Eq(op->begin_mask())); + EXPECT_THAT(kernel->params().ellipsis_mask, Eq(op->ellipsis_mask())); + EXPECT_THAT(kernel->params().end_mask, Eq(op->end_mask())); + EXPECT_THAT(kernel->params().new_axis_mask, Eq(op->new_axis_mask())); + EXPECT_THAT(kernel->params().shrink_axis_mask, Eq(op->shrink_axis_mask())); +} + +TEST_F(KernelBuilderTest, Transpose) +{ + auto *input = createInputNode(); + auto *perm = createInputNode(); + + auto *op = createNode(); + op->a(input); + op->perm(perm); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->perm(), perm); + checkTensor(kernel->output(), op); +} + +TEST_F(KernelBuilderTest, TransposeConv) +{ + auto *output_shape = createInputNode(); + auto *filter = createInputNode(); + auto *input = createInputNode(); + + auto *op = createNode(); + op->inputSizes(output_shape); + op->filter(filter); + op->outBackprop(input); + + op->padding(luci::Padding::SAME); + op->stride()->h(11); + op->stride()->w(13); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->output_shape(), output_shape); + checkTensor(kernel->filter(), filter); + checkTensor(kernel->input(), input); + checkTensor(kernel->output(), op); + EXPECT_THAT(kernel->params().padding, Eq(op->padding())); + EXPECT_THAT(kernel->params().stride_height, Eq(op->stride()->h())); + EXPECT_THAT(kernel->params().stride_width, Eq(op->stride()->w())); +} + +TEST_F(KernelBuilderTest, Unpack) +{ + auto *input = createInputNode(); + auto *op = createNode(); + auto *output1 = createNodeOut(op, 0); + auto *output2 = createNodeOut(op, 1); + + op->value(input); + + op->num(2); + op->axis(11); + + auto kernel = buildKernel(op); + ASSERT_THAT(kernel, NotNull()); + + checkTensor(kernel->input(), input); + checkTensor(kernel->output(0), output1); + checkTensor(kernel->output(1), output2); + EXPECT_THAT(kernel->params().axis, Eq(op->axis())); +} + +TEST_F(KernelBuilderTest, NonExisting1_NEG) +{ + auto *op = createNode(); + ASSERT_ANY_THROW(buildKernel(op)); +} + +TEST_F(KernelBuilderTest, NonExisting2_NEG) +{ + auto *op = createNode(); + ASSERT_ANY_THROW(buildKernel(op)); +} + +TEST_F(KernelBuilderTest, NonExisting3_NEG) +{ + auto *op = createNode(); + ASSERT_ANY_THROW(buildKernel(op)); +} + +} // namespace +} // namespace luci_interpreter diff --git a/compiler/luci-interpreter/src/loader/ModuleLoader.cpp b/compiler/luci-interpreter/src/loader/ModuleLoader.cpp index 7780a61..b9a2ae0 100644 --- a/compiler/luci-interpreter/src/loader/ModuleLoader.cpp +++ b/compiler/luci-interpreter/src/loader/ModuleLoader.cpp @@ -41,8 +41,11 @@ void ModuleLoader::load() { const loco::Graph *graph = _module->graph(i); RuntimeGraph *runtime_graph = _graph_to_runtime_graph.at(graph); - GraphLoader loader(*this, graph, runtime_graph, _runtime_to_ir, _node_to_tensor); - loader.load(); + GraphLoader loader(graph, runtime_graph, _runtime_to_ir, _graph_to_runtime_graph, + _node_to_tensor); + loader.loadTensors(); + loader.initInputOutputTensors(); + loader.loadOperators(); } } diff --git a/compiler/luci-interpreter/src/loader/ModuleLoader.h b/compiler/luci-interpreter/src/loader/ModuleLoader.h index 954dbfb..1af0ed7 100644 --- a/compiler/luci-interpreter/src/loader/ModuleLoader.h +++ b/compiler/luci-interpreter/src/loader/ModuleLoader.h @@ -36,11 +36,6 @@ public: void load(); - RuntimeGraph *getRuntimeGraph(const loco::Graph *graph) const - { - return _graph_to_runtime_graph.at(graph); - } - private: const luci::Module *_module; RuntimeModule *_runtime_module; diff --git a/compiler/luci-value-test/CMakeLists.txt b/compiler/luci-value-test/CMakeLists.txt index 3a5c42b..ec74634 100644 --- a/compiler/luci-value-test/CMakeLists.txt +++ b/compiler/luci-value-test/CMakeLists.txt @@ -20,6 +20,6 @@ add_test(NAME luci_value_test COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/evalverify.sh" "${CMAKE_CURRENT_BINARY_DIR}" "${ARTIFACTS_BIN_PATH}" - "${NNCC_OVERLAY_DIR}/venv_1_13_2" + "${NNCC_OVERLAY_DIR}/venv_2_3_0" ${LUCI_VALUE_TESTS} ) diff --git a/compiler/luci-value-test/evalverify.sh b/compiler/luci-value-test/evalverify.sh index dfd55a6..12c9a45 100755 --- a/compiler/luci-value-test/evalverify.sh +++ b/compiler/luci-value-test/evalverify.sh @@ -4,8 +4,10 @@ # # HOW TO USE # -# ./evalverify.sh ... -# work_dir : build directory of luci-value-test (ex: build/compiler/luci-value-test) +# ./evalverify.sh ... +# bin_dir : build directory of luci-value-test (ex: build/compiler/luci-value-test) +# work_dir : artifacts directoy where test materials exist +# venv_dir : python virtual environment home directory VERIFY_SOURCE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" VERIFY_SCRIPT_PATH="${VERIFY_SOURCE_PATH}/luci_eval_verifier.py" diff --git a/compiler/luci-value-test/luci_eval_verifier.py b/compiler/luci-value-test/luci_eval_verifier.py index 6999110..7a2cebb 100755 --- a/compiler/luci-value-test/luci_eval_verifier.py +++ b/compiler/luci-value-test/luci_eval_verifier.py @@ -35,6 +35,10 @@ for i in range(num_inputs): input_data = np.array( np.random.randint(0, 256, size=input_details["shape"]), input_details["dtype"]) + elif input_details["dtype"] == np.bool_: + input_data = np.array( + np.random.choice(a=[True, False], size=input_details["shape"]), + input_details["dtype"]) else: raise SystemExit("Unsupported input dtype") @@ -44,11 +48,6 @@ for i in range(num_inputs): # Do inference interpreter.invoke() -# Get reference output data. -assert len(interpreter.get_output_details()) == 1 # TODO: Support multiple outputs -output_details = interpreter.get_output_details()[0] -ref_output_data = interpreter.get_tensor(output_details["index"]) - # Execute luci interpreter. subprocess.run( [ @@ -56,27 +55,56 @@ subprocess.run( str(num_inputs), circle_model + ".input", circle_model + ".output" ], check=True) -output_data = np.fromfile(circle_model + ".output", output_details["dtype"]) -shape_file = open(circle_model + ".output.shape", 'r') -output_shape = [int(i) for i in shape_file.read().split(',')] -shape_file.close() -luci_output_data = np.reshape(output_data, output_shape) # Compare the results. -try: - if output_details["dtype"] == np.uint8: - if np.allclose(luci_output_data, ref_output_data, rtol=0, atol=0) == False: - raise SystemExit("Execution result of " + tflite_model + - " does not match with " + circle_model) - elif output_details["dtype"] == np.float32: - if np.allclose( - luci_output_data, ref_output_data, rtol=1.e-5, atol=1.e-5) == False: - raise SystemExit("Execution result of " + tflite_model + - " does not match with " + circle_model) - else: - raise SystemExit("Unsupported data type: ", output_details["dtype"]) -except: - print(traceback.format_exc()) - quit(255) +for idx in range(len(interpreter.get_output_details())): + output_details = interpreter.get_output_details()[idx] + output_data = np.fromfile(circle_model + ".output" + str(idx), + output_details["dtype"]) + shape_file = open(circle_model + ".output" + str(idx) + ".shape", 'r') + output_shape = [int(i) for i in shape_file.read().split(',')] + luci_output_data = np.reshape(output_data, output_shape) + try: + if output_details["dtype"] == np.uint8: + if np.allclose( + luci_output_data, + interpreter.get_tensor( + interpreter.get_output_details()[idx]["index"]), + rtol=0, + atol=0) == False: + raise SystemExit("Execution result of " + tflite_model + + " does not match with " + circle_model) + elif output_details["dtype"] == np.float32: + if np.allclose( + luci_output_data, + interpreter.get_tensor( + interpreter.get_output_details()[idx]["index"]), + rtol=1.e-5, + atol=1.e-5) == False: + raise SystemExit("Execution result of " + tflite_model + + " does not match with " + circle_model) + elif output_details["dtype"] == np.int64: + if np.allclose( + luci_output_data, + interpreter.get_tensor( + interpreter.get_output_details()[idx]["index"]), + rtol=0, + atol=0) == False: + raise SystemExit("Execution result of " + tflite_model + + " does not match with " + circle_model) + elif output_details["dtype"] == np.int32: + if np.allclose( + luci_output_data, + interpreter.get_tensor( + interpreter.get_output_details()[idx]["index"]), + rtol=0, + atol=0) == False: + raise SystemExit("Execution result of " + tflite_model + + " does not match with " + circle_model) + else: + raise SystemExit("Unsupported data type: ", output_details["dtype"]) + except: + print(traceback.format_exc()) + quit(255) quit(0) diff --git a/compiler/luci-value-test/test.lst b/compiler/luci-value-test/test.lst index 6a332f9..0e5231e 100644 --- a/compiler/luci-value-test/test.lst +++ b/compiler/luci-value-test/test.lst @@ -1,81 +1,183 @@ #addeval(Abs_000) addeval(Add_000) +#addeval(Add_001) addeval(Add_U8_000) -#addeval(ArgMax_000) -#addeval(ArgMax_001) -#addeval(ArgMax_002) -#addeval(ArgMax_003) -#addeval(ArgMax_U8_000) -#addeval(ArgMax_U8_001) -#addeval(ArgMax_U8_002) -#addeval(ArgMax_U8_003) +#addeval(AddN_000) +addeval(ArgMax_000) +addeval(ArgMax_001) +addeval(ArgMax_002) +addeval(ArgMax_003) +addeval(ArgMax_U8_000) +addeval(ArgMax_U8_001) +addeval(ArgMax_U8_002) +addeval(ArgMax_U8_003) +#addeval(ArgMin_000) +#addeval(ArgMin_001) +#addeval(ArgMin_002) +#addeval(ArgMin_003) +#addeval(ArgMin_U8_000) +#addeval(ArgMin_U8_001) +#addeval(ArgMin_U8_002) +#addeval(ArgMin_U8_003) addeval(AveragePool2D_000) +#addeval(BatchMatMul_000) #addeval(BatchMatMulV2_000) #addeval(BatchMatMulV2_001) #addeval(BatchToSpaceND_000) #addeval(Cast_000) +#addeval(Cast_001) +#addeval(Ceil_000) addeval(Concatenation_000) addeval(Concatenation_U8_000) addeval(Conv2D_000) addeval(Conv2D_001) addeval(Conv2D_002) +#addeval(Conv2D_003) addeval(Conv2D_U8_000) addeval(Conv2D_U8_001) #addeval(Cos_000) +#addeval(DepthToSpace_000) addeval(DepthwiseConv2D_000) addeval(DepthwiseConv2D_U8_000) +#addeval(DepthwiseConv2D_U8_001) +addeval(DepthwiseConv2D_001) #addeval(Div_000) +addeval(ELU_000) #addeval(Equal_000) #addeval(Exp_000) +#addeval(ExpandDims_000) +#addeval(ExpandDims_001) +#addeval(ExpandDims_002) +#addeval(ExpandDims_003) +#addeval(Fill_000) +#addeval(Fill_001) +#addeval(Floor_000) +#addeval(FloorDiv_000) +#addeval(FloorDiv_001) +#addeval(FloorMod_000) +#addeval(FloorMod_001) addeval(FullyConnected_000) addeval(FullyConnected_001) -#addeval(FullyConnected_002) +addeval(FullyConnected_002) #addeval(FullyConnected_U8_000) #addeval(Gather_000) -#addeval(If_000) -#addeval(If_001) +#addeval(GatherNd_000) +#addeval(Greater_000) +#addeval(GreaterEqual_000) +addeval(If_000) +addeval(If_001) +addeval(L2Normalize_000) +addeval(L2Pool2D_000) +#addeval(L2Pool2D_U8_000) +addeval(LeakyRelu_000) +#addeval(Less_000) +#addeval(LessEqual_000) +addeval(LocalResponseNormalization_000) +#addeval(Log_000) +#addeval(LogicalAnd_000) #addeval(LogicalNot_000) #addeval(LogicalOr_000) -#addeval(Logistic_000) +addeval(Logistic_000) +#addeval(LogSoftmax_000) +#addeval(MatMul_000) +#addeval(MatrixDiag_000) +#addeval(MatrixSetDiag_000) +#addeval(Maximum_000) addeval(MaxPool2D_000) addeval(MaxPool2D_U8_000) addeval(Mean_000) addeval(Mean_001) -addeval(Mean_U8_000) +#addeval(Mean_U8_000) +#addeval(Minimum_000) +#addeval(MirrorPad_000) addeval(Mul_000) #addeval(Mul_U8_000) +#addeval(Neg_000) +#addeval(NotEqual_000) +#addeval(OneHot_000) +#addeval(OneHot_001) +#addeval(OneHot_002) +#addeval(OneHot_003) #addeval(Pack_000) #addeval(Pack_U8_000) addeval(Pad_000) addeval(Pad_U8_000) +#addeval(Pow_000) +#addeval(PRelu_000) +#addeval(Range_000) +#addeval(Rank_000) +#addeval(ReduceAny_000) +#addeval(ReduceAny_001) +#addeval(ReduceAny_002) +#addeval(ReduceAny_003) +#addeval(ReduceMax_000) +#addeval(ReduceMin_000) #addeval(ReduceProd_000) #addeval(ReduceProd_001) #addeval(ReduceProd_002) #addeval(ReduceProd_003) #addeval(ReLU_000) +#addeval(ReLU6_000) +#addeval(ReLUN1To1_000) addeval(Reshape_000) addeval(Reshape_001) addeval(Reshape_002) #addeval(Reshape_003) addeval(Reshape_U8_000) +#addeval(ResizeBilinear_000) +#addeval(ResizeNearestNeighbor_000) +#addeval(ReverseSequence_000) +#addeval(ReverseV2_000) +#addeval(Round_000) #addeval(Rsqrt_000) +#addeval(ScatterNd_000) +#addeval(SegmentSum_000) +#addeval(Select_000) +#addeval(Select_001) +#addeval(Select_002) +#addeval(SelectV2_000) +#addeval(SelectV2_001) +#addeval(SelectV2_002) +#addeval(Shape_000) #addeval(Sin_000) +addeval(Slice_000) addeval(Softmax_000) #addeval(Softmax_U8_000) #addeval(SpaceToBatchND_000) #addeval(SpaceToBatchND_001) #addeval(SpaceToBatchND_002) #addeval(SpaceToBatchND_003) -#addeval(StridedSlice_000) -#addeval(StridedSlice_001) +addeval(SpaceToDepth_000) +#addeval(SparseToDense_000) +addeval(Split_000) +#addeval(SplitV_000) +#addeval(Sqrt_000) +#addeval(Square_000) +#addeval(SquaredDifference_000) +addeval(Squeeze_000) +addeval(StridedSlice_000) +addeval(StridedSlice_001) +addeval(StridedSlice_002) #addeval(Sub_000) #addeval(Sub_U8_000) +#addeval(Sum_000) +#addeval(Sum_001) #addeval(Tanh_000) #addeval(Tile_000) #addeval(Tile_U8_000) -#addeval(Transpose_000) -#addeval(Unpack_000) -#addeval(Unpack_001) -#addeval(Unpack_002) +#addeval(TopKV2_000) +#addeval(TopKV2_001) +addeval(Transpose_000) +#addeval(TransposeConv_000) +addeval(Unpack_000) +addeval(Unpack_001) +addeval(Unpack_002) +addeval(Unpack_003) +#addeval(Where_000) +#addeval(Where_001) #addeval(While_000) #addeval(While_001) +#addeval(While_002) +#addeval(While_003) +#addeval(YUV_TO_RGB_U8_000) +#addeval(ZerosLike_000) diff --git a/compiler/luci-value-test/tester/src/EvalTester.cpp b/compiler/luci-value-test/tester/src/EvalTester.cpp index 58f62f5..09eef22 100644 --- a/compiler/luci-value-test/tester/src/EvalTester.cpp +++ b/compiler/luci-value-test/tester/src/EvalTester.cpp @@ -129,7 +129,7 @@ int entry(int argc, char **argv) assert(num_inputs == input_nodes.size()); for (int32_t i = 0; i < num_inputs; i++) { - const auto *input_node = dynamic_cast(input_nodes[i]); + const auto *input_node = loco::must_cast(input_nodes[i]); std::vector input_data(getTensorSize(input_node)); readDataFromFile(std::string(input_prefix) + std::to_string(i), input_data.data(), input_data.size()); @@ -141,24 +141,35 @@ int entry(int argc, char **argv) // Get output. const auto output_nodes = loco::output_nodes(module->graph()); - // TODO: Support multiple outputs - assert(output_nodes.size() == 1); - const auto *output_node = dynamic_cast(output_nodes[0]); - std::vector output_data(getTensorSize(output_node)); - interpreter.readOutputTensor(output_node, output_data.data(), output_data.size()); - - // Output data is written in ${output_file} - // (ex: Add.circle.output) - // Output shape is written in ${output_file}.shape - // (ex: Add.circle.output.shape) - // TODO: Use HDF5 file format - writeDataToFile(output_file, output_data.data(), output_data.size()); - auto shape_str = std::to_string(output_node->dim(0).value()); - for (int i = 1; i < output_node->rank(); i++) + for (int i = 0; i < module->graph()->outputs()->size(); i++) { - shape_str += ","; - shape_str += std::to_string(output_node->dim(i).value()); + const auto *output_node = loco::must_cast(output_nodes[i]); + std::vector output_data(getTensorSize(output_node)); + interpreter.readOutputTensor(output_node, output_data.data(), output_data.size()); + + // Output data is written in ${output_file} + // (ex: Add.circle.output0) + // Output shape is written in ${output_file}.shape + // (ex: Add.circle.output0.shape) + writeDataToFile(std::string(output_file) + std::to_string(i), output_data.data(), + output_data.size()); + // In case of Tensor output is Scalar value. + // The output tensor with rank 0 is treated as a scalar with shape (1) + if (output_node->rank() == 0) + { + writeDataToFile(std::string(output_file) + std::to_string(i) + ".shape", "1", 1); + } + else + { + auto shape_str = std::to_string(output_node->dim(0).value()); + for (int j = 1; j < output_node->rank(); j++) + { + shape_str += ","; + shape_str += std::to_string(output_node->dim(j).value()); + } + writeDataToFile(std::string(output_file) + std::to_string(i) + ".shape", shape_str.c_str(), + shape_str.size()); + } } - writeDataToFile(std::string(output_file) + ".shape", shape_str.c_str(), shape_str.size()); return EXIT_SUCCESS; } diff --git a/compiler/luci/export/src/CircleOperationExporter.cpp b/compiler/luci/export/src/CircleOperationExporter.cpp index 3c01b67..bca1220 100644 --- a/compiler/luci/export/src/CircleOperationExporter.cpp +++ b/compiler/luci/export/src/CircleOperationExporter.cpp @@ -102,6 +102,7 @@ public: void visit(luci::CircleMirrorPad *) final; void visit(luci::CircleMul *) final; void visit(luci::CircleNeg *) final; + void visit(luci::CircleNonMaxSuppressionV4 *) final; void visit(luci::CircleNotEqual *) final; void visit(luci::CircleOneHot *) final; void visit(luci::CirclePack *) final; @@ -149,6 +150,7 @@ public: void visit(luci::CircleTopKV2 *) final; void visit(luci::CircleTranspose *) final; void visit(luci::CircleTransposeConv *) final; + void visit(luci::CircleUnique *) final; void visit(luci::CircleUnpack *) final; void visit(luci::CircleWhere *) final; void visit(luci::CircleWhile *) final; @@ -165,9 +167,11 @@ public: // Virtual for multiple-outputs void visit(luci::CircleCustomOut *) final {} void visit(luci::CircleIfOut *) final {} + void visit(luci::CircleNonMaxSuppressionV4Out *) final {} void visit(luci::CircleSplitOut *) final {} void visit(luci::CircleSplitVOut *) final {} void visit(luci::CircleTopKV2Out *) final {} + void visit(luci::CircleUniqueOut *) final {} void visit(luci::CircleUnpackOut *) final {} void visit(luci::CircleWhileOut *) final {} @@ -599,7 +603,9 @@ void OperationExporter::visit(luci::CircleLocalResponseNormalization *node) { export_simple(node, circle::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, circle::BuiltinOptions_LocalResponseNormalizationOptions, - CreateLocalResponseNormalizationOptions(builder).Union()); + CreateLocalResponseNormalizationOptions(builder, node->radius(), node->bias(), + node->alpha(), node->beta()) + .Union()); } void OperationExporter::visit(luci::CircleLog *node) @@ -691,6 +697,49 @@ void OperationExporter::visit(luci::CircleNeg *node) CreateNegOptions(builder).Union()); } +void OperationExporter::visit(luci::CircleNonMaxSuppressionV4 *node) +{ + auto nms_outs = loco::succs(node); + assert(nms_outs.size() == 2); + + uint32_t op_idx = + md.registerBuiltinOpcode(circle::BuiltinOperator_NON_MAX_SUPPRESSION_V4, node->op_version()); + std::vector inputs_vec{ + get_tensor_index(node->boxes()), get_tensor_index(node->scores()), + get_tensor_index(node->max_output_size()), get_tensor_index(node->iou_threshold()), + get_tensor_index(node->score_threshold()), + }; + std::vector outputs_vec; + + for (uint32_t idx = 0; idx < nms_outs.size(); ++idx) + { + // store in order of index + bool found = false; + for (auto out : nms_outs) + { + auto nms_out = loco::must_cast(out); + if (nms_out->index() == static_cast(idx)) + { + outputs_vec.push_back(get_tensor_index(nms_out)); + found = true; + break; + } + } + if (!found) + { + INTERNAL_EXN("Invalid NonMaxSuppressionV4 output"); + } + } + + auto inputs = builder.CreateVector(inputs_vec); + auto outputs = builder.CreateVector(outputs_vec); + auto options = CreateNonMaxSuppressionV4Options(builder); + auto op_offset = + CreateOperator(builder, op_idx, inputs, outputs, + circle::BuiltinOptions_NonMaxSuppressionV4Options, options.Union()); + gd._operators.push_back(op_offset); +} + void OperationExporter::visit(luci::CircleNotEqual *node) { export_simple(node, circle::BuiltinOperator_NOT_EQUAL, circle::BuiltinOptions_NotEqualOptions, @@ -890,7 +939,7 @@ void OperationExporter::visit(luci::CircleSpaceToDepth *node) { export_simple(node, circle::BuiltinOperator_SPACE_TO_DEPTH, circle::BuiltinOptions_SpaceToDepthOptions, - CreateSpaceToDepthOptions(builder).Union()); + CreateSpaceToDepthOptions(builder, node->block_size()).Union()); } void OperationExporter::visit(luci::CircleSparseToDense *node) @@ -1090,6 +1139,43 @@ void OperationExporter::visit(luci::CircleTransposeConv *node) .Union()); } +void OperationExporter::visit(luci::CircleUnique *node) +{ + auto unique_outs = loco::succs(node); + assert(int32_t(unique_outs.size()) == 2); + uint32_t op_idx = md.registerBuiltinOpcode(circle::BuiltinOperator_UNIQUE, node->op_version()); + + std::vector inputs_vec{get_tensor_index(node->input())}; + std::vector outputs_vec; + + for (int32_t index = 0; index < 2; index++) + { + // store in order of index + bool found = false; + for (auto out : unique_outs) + { + auto unique_out = loco::must_cast(out); + if (unique_out->index() == index) + { + outputs_vec.push_back(get_tensor_index(unique_out)); + found = true; + break; + } + } + if (!found) + { + INTERNAL_EXN("Invalid Unique output"); + } + } + + auto inputs = builder.CreateVector(inputs_vec); + auto outputs = builder.CreateVector(outputs_vec); + auto options = CreateUniqueOptions(builder, to_circle_tensortype(node->idx_out_type())); + auto op_offset = CreateOperator(builder, op_idx, inputs, outputs, + circle::BuiltinOptions_UniqueOptions, options.Union()); + gd._operators.push_back(op_offset); +} + void OperationExporter::visit(luci::CircleUnpack *node) { LOGGER(l); diff --git a/compiler/luci/export/src/CircleTensorExporter.cpp b/compiler/luci/export/src/CircleTensorExporter.cpp index 5cad392..dc8c2fb 100644 --- a/compiler/luci/export/src/CircleTensorExporter.cpp +++ b/compiler/luci/export/src/CircleTensorExporter.cpp @@ -302,7 +302,10 @@ encodeQuantizationParameters(FlatBufferBuilder &builder, luci::CircleQuantParam scale = builder.CreateVector(quantparam->scale); zero_point = builder.CreateVector(quantparam->zerop); } - return circle::CreateQuantizationParameters(builder, min, max, scale, zero_point); + // Note: QuantizationDetails is not supported + return circle::CreateQuantizationParameters(builder, min, max, scale, zero_point, + circle::QuantizationDetails::QuantizationDetails_NONE, + 0, quantparam->quantized_dimension); } void exportOpDefinedTensor(const CircleTensoInfo &info, FlatBufferBuilder &builder, diff --git a/compiler/luci/import/include/luci/Import/Nodes.h b/compiler/luci/import/include/luci/Import/Nodes.h index 2719a5a..825c214 100644 --- a/compiler/luci/import/include/luci/Import/Nodes.h +++ b/compiler/luci/import/include/luci/Import/Nodes.h @@ -73,6 +73,7 @@ #include "Nodes/CircleMirrorPad.h" #include "Nodes/CircleMul.h" #include "Nodes/CircleNeg.h" +#include "Nodes/CircleNonMaxSuppressionV4.h" #include "Nodes/CircleNotEqual.h" #include "Nodes/CircleOneHot.h" #include "Nodes/CirclePack.h" @@ -120,6 +121,7 @@ #include "Nodes/CircleTopKV2.h" #include "Nodes/CircleTranspose.h" #include "Nodes/CircleTransposeConv.h" +#include "Nodes/CircleUnique.h" #include "Nodes/CircleUnpack.h" #include "Nodes/CircleWhere.h" #include "Nodes/CircleWhile.h" diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleNonMaxSuppressionV4.h b/compiler/luci/import/include/luci/Import/Nodes/CircleNonMaxSuppressionV4.h new file mode 100644 index 0000000..f193aae --- /dev/null +++ b/compiler/luci/import/include/luci/Import/Nodes/CircleNonMaxSuppressionV4.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __LUCI_IMPORT_OP_CIRCLE_NON_MAX_SUPPRESSION_V4_H__ +#define __LUCI_IMPORT_OP_CIRCLE_NON_MAX_SUPPRESSION_V4_H__ + +#include "luci/Import/GraphBuilderBase.h" + +namespace luci +{ + +class CircleNonMaxSuppressionV4GraphBuilder : public GraphBuilderBase +{ +public: + bool validate(const ValidateArgs &args) const final; + + void build(const circle::OperatorT &op, GraphBuilderContext *context) const final; +}; + +} // namespace luci + +#endif // __LUCI_IMPORT_OP_CIRCLE_NON_MAX_SUPPRESSION_V4_H__ diff --git a/compiler/luci/import/include/luci/Import/Nodes/CircleUnique.h b/compiler/luci/import/include/luci/Import/Nodes/CircleUnique.h new file mode 100644 index 0000000..ed5b503 --- /dev/null +++ b/compiler/luci/import/include/luci/Import/Nodes/CircleUnique.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __LUCI_IMPORT_OP_CIRCLE_UNIQUE_H__ +#define __LUCI_IMPORT_OP_CIRCLE_UNIQUE_H__ + +#include "luci/Import/GraphBuilderBase.h" + +namespace luci +{ + +class CircleUniqueGraphBuilder : public GraphBuilderBase +{ +public: + bool validate(const ValidateArgs &args) const final; + + void build(const circle::OperatorT &op, GraphBuilderContext *context) const final; +}; + +} // namespace luci + +#endif // __LUCI_IMPORT_OP_CIRCLE_UNIQUE_H__ diff --git a/compiler/luci/import/src/CircleReader.cpp b/compiler/luci/import/src/CircleReader.cpp index 81e945d..bc7f397 100644 --- a/compiler/luci/import/src/CircleReader.cpp +++ b/compiler/luci/import/src/CircleReader.cpp @@ -156,6 +156,7 @@ luci_quantparam(const circle::QuantizationParametersT *quantization) const auto &max = quantization->max; const auto &scale = quantization->scale; const auto &zero_point = quantization->zero_point; + const auto &quantized_dimension = quantization->quantized_dimension; if ((!min.empty() && !max.empty()) || (!scale.empty() && !zero_point.empty())) { @@ -165,6 +166,7 @@ luci_quantparam(const circle::QuantizationParametersT *quantization) quantparam->max = max; quantparam->scale = scale; quantparam->zerop = zero_point; + quantparam->quantized_dimension = quantized_dimension; return quantparam; } diff --git a/compiler/luci/import/src/GraphBuilderRegistry.cpp b/compiler/luci/import/src/GraphBuilderRegistry.cpp index d29557f..cc328cc 100644 --- a/compiler/luci/import/src/GraphBuilderRegistry.cpp +++ b/compiler/luci/import/src/GraphBuilderRegistry.cpp @@ -82,6 +82,7 @@ GraphBuilderRegistry::GraphBuilderRegistry() CIRCLE_NODE(MIRROR_PAD, CircleMirrorPadGraphBuilder); // 100 CIRCLE_NODE(MUL, CircleMulGraphBuilder); // 18 CIRCLE_NODE(NEG, CircleNegGraphBuilder); // 59 + CIRCLE_NODE(NON_MAX_SUPPRESSION_V4, CircleNonMaxSuppressionV4GraphBuilder); // 120, CIRCLE_NODE(NOT_EQUAL, CircleNotEqualGraphBuilder); // 72 CIRCLE_NODE(ONE_HOT, CircleOneHotGraphBuilder); // 85 CIRCLE_NODE(PACK, CirclePackGraphBuilder); // 83 @@ -129,6 +130,7 @@ GraphBuilderRegistry::GraphBuilderRegistry() CIRCLE_NODE(TOPK_V2, CircleTopKV2GraphBuilder); // 48 CIRCLE_NODE(TRANSPOSE, CircleTransposeGraphBuilder); // 39 CIRCLE_NODE(TRANSPOSE_CONV, CircleTransposeConvGraphBuilder); // 67 + CIRCLE_NODE(UNIQUE, CircleUniqueGraphBuilder); // 103 CIRCLE_NODE(UNPACK, CircleUnpackGraphBuilder); // 88 CIRCLE_NODE(WHERE, CircleWhereGraphBuilder); // 109 CIRCLE_NODE(WHILE, CircleWhileGraphBuilder); // 119 @@ -155,10 +157,8 @@ GraphBuilderRegistry::GraphBuilderRegistry() // BuiltinOperator_ARG_MAX = 56, // BuiltinOperator_PADV2 = 60, // BuiltinOperator_FAKE_QUANT = 80, - // BuiltinOperator_UNIQUE = 103, // BuiltinOperator_QUANTIZE = 114, // BuiltinOperator_HARD_SWISH = 117, - // BuiltinOperator_NON_MAX_SUPPRESSION_V4 = 120, // BuiltinOperator_NON_MAX_SUPPRESSION_V5 = 121, // BuiltinOperator_DENSIFY = 124, } diff --git a/compiler/luci/import/src/Importer.test.cpp b/compiler/luci/import/src/Importer.test.cpp index 4426e15..8366546 100644 --- a/compiler/luci/import/src/Importer.test.cpp +++ b/compiler/luci/import/src/Importer.test.cpp @@ -20,4 +20,9 @@ #include -TEST(TensorFlowLiteImport, Dummy) { luci::Importer import; } +TEST(TensorFlowLiteImport, Dummy) +{ + luci::Importer import; + + SUCCEED(); +} diff --git a/compiler/luci/import/src/Nodes/CircleAbs.cpp b/compiler/luci/import/src/Nodes/CircleAbs.cpp index 9054986..3556dc7 100644 --- a/compiler/luci/import/src/Nodes/CircleAbs.cpp +++ b/compiler/luci/import/src/Nodes/CircleAbs.cpp @@ -36,7 +36,7 @@ CircleNode *CircleAbsGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleAdd.cpp b/compiler/luci/import/src/Nodes/CircleAdd.cpp index 3b1bb73..b767d4a 100644 --- a/compiler/luci/import/src/Nodes/CircleAdd.cpp +++ b/compiler/luci/import/src/Nodes/CircleAdd.cpp @@ -36,8 +36,8 @@ CircleNode *CircleAddGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); const auto *options = op.builtin_options.AsAddOptions(); node->fusedActivationFunction(luci_actfunc(options->fused_activation_function)); diff --git a/compiler/luci/import/src/Nodes/CircleArgMax.cpp b/compiler/luci/import/src/Nodes/CircleArgMax.cpp index 2679827..10e8516 100644 --- a/compiler/luci/import/src/Nodes/CircleArgMax.cpp +++ b/compiler/luci/import/src/Nodes/CircleArgMax.cpp @@ -36,8 +36,8 @@ CircleNode *CircleArgMaxGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->dimension(inputs[1]); + node->input(inputs.at(0)); + node->dimension(inputs.at(1)); const auto *options = op.builtin_options.AsArgMaxOptions(); node->output_type(luci_datatype(options->output_type)); diff --git a/compiler/luci/import/src/Nodes/CircleArgMin.cpp b/compiler/luci/import/src/Nodes/CircleArgMin.cpp index 4d85bbf..5ff534d 100644 --- a/compiler/luci/import/src/Nodes/CircleArgMin.cpp +++ b/compiler/luci/import/src/Nodes/CircleArgMin.cpp @@ -36,8 +36,8 @@ CircleNode *CircleArgMinGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->dimension(inputs[1]); + node->input(inputs.at(0)); + node->dimension(inputs.at(1)); const auto *options = op.builtin_options.AsArgMinOptions(); node->output_type(luci_datatype(options->output_type)); diff --git a/compiler/luci/import/src/Nodes/CircleAveragePool2D.cpp b/compiler/luci/import/src/Nodes/CircleAveragePool2D.cpp index cfc3cf1..ad011f7 100644 --- a/compiler/luci/import/src/Nodes/CircleAveragePool2D.cpp +++ b/compiler/luci/import/src/Nodes/CircleAveragePool2D.cpp @@ -34,7 +34,7 @@ CircleNode *CircleAveragePool2DGraphBuilder::build_node(const circle::OperatorT loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->value(inputs[0]); + node->value(inputs.at(0)); const auto *options = op.builtin_options.AsPool2DOptions(); node->padding(luci_padding(options->padding)); diff --git a/compiler/luci/import/src/Nodes/CircleBCQFullyConnected.cpp b/compiler/luci/import/src/Nodes/CircleBCQFullyConnected.cpp index 7cc077e..16ecebd 100644 --- a/compiler/luci/import/src/Nodes/CircleBCQFullyConnected.cpp +++ b/compiler/luci/import/src/Nodes/CircleBCQFullyConnected.cpp @@ -37,11 +37,11 @@ CircleNode *CircleBCQFullyConnectedGraphBuilder::build_node(const circle::Operat { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->weights_scales(inputs[1]); - node->weights_binary(inputs[2]); - node->bias(inputs[3]); - node->weights_clusters(inputs[4]); + node->input(inputs.at(0)); + node->weights_scales(inputs.at(1)); + node->weights_binary(inputs.at(2)); + node->bias(inputs.at(3)); + node->weights_clusters(inputs.at(4)); // TODO Find and move to appropriate place for setting optional input if (auto bias = dynamic_cast(node->bias())) diff --git a/compiler/luci/import/src/Nodes/CircleBCQGather.cpp b/compiler/luci/import/src/Nodes/CircleBCQGather.cpp index c6d2ab5..464f1ac 100644 --- a/compiler/luci/import/src/Nodes/CircleBCQGather.cpp +++ b/compiler/luci/import/src/Nodes/CircleBCQGather.cpp @@ -37,10 +37,10 @@ CircleNode *CircleBCQGatherGraphBuilder::build_node(const circle::OperatorT &op, { auto *node = graph->nodes()->create(); - node->input_scales(inputs[0]); - node->input_binary(inputs[1]); - node->indices(inputs[2]); - node->input_clusters(inputs[3]); + node->input_scales(inputs.at(0)); + node->input_binary(inputs.at(1)); + node->indices(inputs.at(2)); + node->input_clusters(inputs.at(3)); const auto *options = op.builtin_options.AsBCQGatherOptions(); node->input_hidden_size(options->input_hidden_size); diff --git a/compiler/luci/import/src/Nodes/CircleBatchMatMul.cpp b/compiler/luci/import/src/Nodes/CircleBatchMatMul.cpp index 6026b2a..3307756 100644 --- a/compiler/luci/import/src/Nodes/CircleBatchMatMul.cpp +++ b/compiler/luci/import/src/Nodes/CircleBatchMatMul.cpp @@ -34,8 +34,8 @@ CircleNode *CircleBatchMatMulGraphBuilder::build_node(const circle::OperatorT &o loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); const auto *options = op.builtin_options.AsBatchMatMulOptions(); node->adj_x(options->adjoint_lhs); diff --git a/compiler/luci/import/src/Nodes/CircleBatchToSpaceND.cpp b/compiler/luci/import/src/Nodes/CircleBatchToSpaceND.cpp index 4bbfadf..8c2039f 100644 --- a/compiler/luci/import/src/Nodes/CircleBatchToSpaceND.cpp +++ b/compiler/luci/import/src/Nodes/CircleBatchToSpaceND.cpp @@ -33,7 +33,7 @@ bool CircleBatchToSpaceNDGraphBuilder::validate(const ValidateArgs &args) const // input 1 and 2 should have INT32/INT64 type const auto &tensors = args.reader.tensors(); - const auto &tensor_1 = tensors.at(inputs[1]); + const auto &tensor_1 = tensors.at(inputs.at(1)); switch (tensor_1->type) { case circle::TensorType_INT32: @@ -42,7 +42,7 @@ bool CircleBatchToSpaceNDGraphBuilder::validate(const ValidateArgs &args) const default: return false; } - const auto &tensor_2 = tensors.at(inputs[2]); + const auto &tensor_2 = tensors.at(inputs.at(2)); switch (tensor_2->type) { case circle::TensorType_INT32: @@ -53,7 +53,7 @@ bool CircleBatchToSpaceNDGraphBuilder::validate(const ValidateArgs &args) const } // Only support input shape dimension 3 and 4 only - const auto &tensor_0 = tensors.at(inputs[0]); + const auto &tensor_0 = tensors.at(inputs.at(0)); const auto t_0_s = tensor_0->shape.size(); if (t_0_s != 3 && t_0_s != 4) return false; @@ -68,9 +68,9 @@ CircleNode *CircleBatchToSpaceNDGraphBuilder::build_node(const circle::OperatorT loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->block_shape(inputs[1]); - node->crops(inputs[2]); + node->input(inputs.at(0)); + node->block_shape(inputs.at(1)); + node->crops(inputs.at(2)); // No options for BatchToSpaceND diff --git a/compiler/luci/import/src/Nodes/CircleCast.cpp b/compiler/luci/import/src/Nodes/CircleCast.cpp index a4d09b5..7bdb630 100644 --- a/compiler/luci/import/src/Nodes/CircleCast.cpp +++ b/compiler/luci/import/src/Nodes/CircleCast.cpp @@ -47,7 +47,7 @@ bool CircleCastGraphBuilder::validate(const ValidateArgs &args) const const circle::TensorT &output_tensor = *tensors[outputs[0]]; auto name = tensor_name(output_tensor); - const auto &tensor_in = tensors.at(inputs[0]); + const auto &tensor_in = tensors.at(inputs.at(0)); if (tensor_in->type != options->in_data_type) { if (settings->get(luci::UserSettings::Key::DisableValidation)) @@ -77,7 +77,7 @@ CircleNode *CircleCastGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); const auto *options = op.builtin_options.AsCastOptions(); if (options != nullptr) @@ -87,7 +87,7 @@ CircleNode *CircleCastGraphBuilder::build_node(const circle::OperatorT &op, } else { - node->in_data_type(inputs[0]->dtype()); + node->in_data_type(inputs.at(0)->dtype()); node->out_data_type(loco::DataType::Unknown); // type inference should use node->dtype() for Unknown // export should use BuiltinOptions_NONE for Unknown diff --git a/compiler/luci/import/src/Nodes/CircleCeil.cpp b/compiler/luci/import/src/Nodes/CircleCeil.cpp index d3d6cd9..2e1aaa2 100644 --- a/compiler/luci/import/src/Nodes/CircleCeil.cpp +++ b/compiler/luci/import/src/Nodes/CircleCeil.cpp @@ -42,7 +42,7 @@ CircleNode *CircleCeilGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleConv2D.cpp b/compiler/luci/import/src/Nodes/CircleConv2D.cpp index 42c5c26..9516ef1 100644 --- a/compiler/luci/import/src/Nodes/CircleConv2D.cpp +++ b/compiler/luci/import/src/Nodes/CircleConv2D.cpp @@ -39,11 +39,11 @@ CircleNode *CircleConv2DGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->filter(inputs[1]); + node->input(inputs.at(0)); + node->filter(inputs.at(1)); // For now, bias is required (checked in `verify` method). assert(inputs.size() == 3); - node->bias(inputs[2]); + node->bias(inputs.at(2)); const auto *options = op.builtin_options.AsConv2DOptions(); node->padding(luci_padding(options->padding)); diff --git a/compiler/luci/import/src/Nodes/CircleCos.cpp b/compiler/luci/import/src/Nodes/CircleCos.cpp index 5f61cc7..27d60c6 100644 --- a/compiler/luci/import/src/Nodes/CircleCos.cpp +++ b/compiler/luci/import/src/Nodes/CircleCos.cpp @@ -36,7 +36,7 @@ CircleNode *CircleCosGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); // No options for Cos diff --git a/compiler/luci/import/src/Nodes/CircleDepthToSpace.cpp b/compiler/luci/import/src/Nodes/CircleDepthToSpace.cpp index 827b634..49d31bb 100644 --- a/compiler/luci/import/src/Nodes/CircleDepthToSpace.cpp +++ b/compiler/luci/import/src/Nodes/CircleDepthToSpace.cpp @@ -40,7 +40,7 @@ bool CircleDepthToSpaceGraphBuilder::validate(const ValidateArgs &args) const const auto &tensors = args.reader.tensors(); - if (tensors[outputs[0]]->type != tensors[inputs[0]]->type) + if (tensors[outputs[0]]->type != tensors[inputs.at(0)]->type) { return false; } @@ -56,7 +56,7 @@ CircleNode *CircleDepthToSpaceGraphBuilder::build_node(const circle::OperatorT & loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); + node->input(inputs.at(0)); const auto *options = op.builtin_options.AsDepthToSpaceOptions(); node->block_size(options->block_size); diff --git a/compiler/luci/import/src/Nodes/CircleDepthwiseConv2D.cpp b/compiler/luci/import/src/Nodes/CircleDepthwiseConv2D.cpp index 2b13f9e..53f85f2 100644 --- a/compiler/luci/import/src/Nodes/CircleDepthwiseConv2D.cpp +++ b/compiler/luci/import/src/Nodes/CircleDepthwiseConv2D.cpp @@ -40,11 +40,11 @@ CircleNode *CircleDepthwiseConv2DGraphBuilder::build_node(const circle::Operator loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->filter(inputs[1]); + node->input(inputs.at(0)); + node->filter(inputs.at(1)); if (inputs.size() != 3) throw oops::UserExn("DepthwiseConv2d without bias is unsupported"); - node->bias(inputs[2]); + node->bias(inputs.at(2)); const auto *options = op.builtin_options.AsDepthwiseConv2DOptions(); node->padding(luci_padding(options->padding)); diff --git a/compiler/luci/import/src/Nodes/CircleDiv.cpp b/compiler/luci/import/src/Nodes/CircleDiv.cpp index d09cfb8..615c224 100644 --- a/compiler/luci/import/src/Nodes/CircleDiv.cpp +++ b/compiler/luci/import/src/Nodes/CircleDiv.cpp @@ -37,8 +37,8 @@ CircleNode *CircleDivGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); const auto *options = op.builtin_options.AsDivOptions(); node->fusedActivationFunction(luci_actfunc(options->fused_activation_function)); diff --git a/compiler/luci/import/src/Nodes/CircleElu.cpp b/compiler/luci/import/src/Nodes/CircleElu.cpp index 37a290c..919e95e 100644 --- a/compiler/luci/import/src/Nodes/CircleElu.cpp +++ b/compiler/luci/import/src/Nodes/CircleElu.cpp @@ -35,7 +35,7 @@ bool CircleEluGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); switch (tensor->type) { @@ -56,7 +56,7 @@ CircleNode *CircleEluGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->features(inputs[0]); + node->features(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleEqual.cpp b/compiler/luci/import/src/Nodes/CircleEqual.cpp index a53f6e9..1db33b8 100644 --- a/compiler/luci/import/src/Nodes/CircleEqual.cpp +++ b/compiler/luci/import/src/Nodes/CircleEqual.cpp @@ -34,7 +34,7 @@ bool CircleEqualGraphBuilder::validate(const ValidateArgs &args) const const auto &tensors = args.reader.tensors(); - return tensors[inputs[0]]->type == tensors[inputs[1]]->type; + return tensors[inputs.at(0)]->type == tensors[inputs.at(1)]->type; } CircleNode *CircleEqualGraphBuilder::build_node(const circle::OperatorT &, @@ -42,8 +42,8 @@ CircleNode *CircleEqualGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleExp.cpp b/compiler/luci/import/src/Nodes/CircleExp.cpp index a328514..2c031d6 100644 --- a/compiler/luci/import/src/Nodes/CircleExp.cpp +++ b/compiler/luci/import/src/Nodes/CircleExp.cpp @@ -31,7 +31,7 @@ bool CircleExpGraphBuilder::validate(const ValidateArgs &args) const // input type check const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); switch (tensor->type) { case circle::TensorType_FLOAT16: @@ -51,7 +51,7 @@ CircleNode *CircleExpGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleExpandDims.cpp b/compiler/luci/import/src/Nodes/CircleExpandDims.cpp index 1cef67a..ab537c7 100644 --- a/compiler/luci/import/src/Nodes/CircleExpandDims.cpp +++ b/compiler/luci/import/src/Nodes/CircleExpandDims.cpp @@ -34,7 +34,7 @@ bool CircleExpandDimsGraphBuilder::validate(const ValidateArgs &args) const const auto &tensors = args.reader.tensors(); - return tensors[inputs[1]]->type == circle::TensorType_INT32; + return tensors[inputs.at(1)]->type == circle::TensorType_INT32; } CircleNode *CircleExpandDimsGraphBuilder::build_node(const circle::OperatorT &, @@ -42,8 +42,8 @@ CircleNode *CircleExpandDimsGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->axis(inputs[1]); + node->input(inputs.at(0)); + node->axis(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleFill.cpp b/compiler/luci/import/src/Nodes/CircleFill.cpp index 6c3d3a2..95d5b87 100644 --- a/compiler/luci/import/src/Nodes/CircleFill.cpp +++ b/compiler/luci/import/src/Nodes/CircleFill.cpp @@ -37,8 +37,8 @@ CircleNode *CircleFillGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->dims(inputs[0]); - node->value(inputs[1]); + node->dims(inputs.at(0)); + node->value(inputs.at(1)); const auto *options = op.builtin_options.AsFillOptions(); (void)options; diff --git a/compiler/luci/import/src/Nodes/CircleFloor.cpp b/compiler/luci/import/src/Nodes/CircleFloor.cpp index 302a9ea..ce756b3 100644 --- a/compiler/luci/import/src/Nodes/CircleFloor.cpp +++ b/compiler/luci/import/src/Nodes/CircleFloor.cpp @@ -42,7 +42,7 @@ CircleNode *CircleFloorGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleFloorDiv.cpp b/compiler/luci/import/src/Nodes/CircleFloorDiv.cpp index 8751978..55f385d 100644 --- a/compiler/luci/import/src/Nodes/CircleFloorDiv.cpp +++ b/compiler/luci/import/src/Nodes/CircleFloorDiv.cpp @@ -39,8 +39,8 @@ bool CircleFloorDivGraphBuilder::validate(const ValidateArgs &args) const } const auto &tensors = args.reader.tensors(); - const auto &tensor_in_0 = tensors.at(inputs[0]); - const auto &tensor_in_1 = tensors.at(inputs[1]); + const auto &tensor_in_0 = tensors.at(inputs.at(0)); + const auto &tensor_in_1 = tensors.at(inputs.at(1)); const auto &tensor_out = tensors.at(outputs[0]); if (tensor_in_0->type != tensor_in_1->type) @@ -59,8 +59,8 @@ CircleNode *CircleFloorDivGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleFloorMod.cpp b/compiler/luci/import/src/Nodes/CircleFloorMod.cpp index 3ccdce0..2101e41 100644 --- a/compiler/luci/import/src/Nodes/CircleFloorMod.cpp +++ b/compiler/luci/import/src/Nodes/CircleFloorMod.cpp @@ -33,8 +33,8 @@ bool CircleFloorModGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor_in_0 = tensors.at(inputs[0]); - const auto &tensor_in_1 = tensors.at(inputs[1]); + const auto &tensor_in_0 = tensors.at(inputs.at(0)); + const auto &tensor_in_1 = tensors.at(inputs.at(1)); if (tensor_in_0->type != tensor_in_1->type) return false; @@ -48,8 +48,8 @@ CircleNode *CircleFloorModGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleFullyConnected.cpp b/compiler/luci/import/src/Nodes/CircleFullyConnected.cpp index 8937e78..65a863b 100644 --- a/compiler/luci/import/src/Nodes/CircleFullyConnected.cpp +++ b/compiler/luci/import/src/Nodes/CircleFullyConnected.cpp @@ -38,9 +38,9 @@ CircleNode *CircleFullyConnectedGraphBuilder::build_node(const circle::OperatorT loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->weights(inputs[1]); - node->bias(inputs[2]); // bias is optional + node->input(inputs.at(0)); + node->weights(inputs.at(1)); + node->bias(inputs.at(2)); // bias is optional // TODO Find and move to appropriate place for setting optional input if (auto bias = dynamic_cast(node->bias())) diff --git a/compiler/luci/import/src/Nodes/CircleGather.cpp b/compiler/luci/import/src/Nodes/CircleGather.cpp index 1caa05e..75447a3 100644 --- a/compiler/luci/import/src/Nodes/CircleGather.cpp +++ b/compiler/luci/import/src/Nodes/CircleGather.cpp @@ -56,8 +56,8 @@ CircleNode *CircleGatherGraphBuilder::build_node(const circle::OperatorT &op, { auto *node = graph->nodes()->create(); - node->params(inputs[0]); - node->indices(inputs[1]); + node->params(inputs.at(0)); + node->indices(inputs.at(1)); const auto *options = op.builtin_options.AsGatherOptions(); node->axis(options->axis); diff --git a/compiler/luci/import/src/Nodes/CircleGatherNd.cpp b/compiler/luci/import/src/Nodes/CircleGatherNd.cpp index 621d4ae..981adbf 100644 --- a/compiler/luci/import/src/Nodes/CircleGatherNd.cpp +++ b/compiler/luci/import/src/Nodes/CircleGatherNd.cpp @@ -36,7 +36,7 @@ bool CircleGatherNdGraphBuilder::validate(const ValidateArgs &args) const if (outputs.size() != 1) return false; - auto &indices_tensor = args.reader.tensors()[inputs[1]]; + auto &indices_tensor = args.reader.tensors()[inputs.at(1)]; if (!(indices_tensor->type == circle::TensorType::TensorType_INT32 || indices_tensor->type == circle::TensorType::TensorType_INT64)) @@ -53,8 +53,8 @@ CircleNode *CircleGatherNdGraphBuilder::build_node(const circle::OperatorT &, { auto *node = graph->nodes()->create(); - node->params(inputs[0]); - node->indices(inputs[1]); + node->params(inputs.at(0)); + node->indices(inputs.at(1)); // GatherNd options empty diff --git a/compiler/luci/import/src/Nodes/CircleGreater.cpp b/compiler/luci/import/src/Nodes/CircleGreater.cpp index 8810758..1ad0467 100644 --- a/compiler/luci/import/src/Nodes/CircleGreater.cpp +++ b/compiler/luci/import/src/Nodes/CircleGreater.cpp @@ -43,7 +43,7 @@ bool CircleGreaterGraphBuilder::validate(const ValidateArgs &args) const const auto &tensors = args.reader.tensors(); - if (tensors[inputs[0]]->type != tensors[inputs[1]]->type) + if (tensors[inputs.at(0)]->type != tensors[inputs.at(1)]->type) return false; // NOTE: real models do have output dtype NOT BOOL @@ -67,8 +67,8 @@ CircleNode *CircleGreaterGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleGreaterEqual.cpp b/compiler/luci/import/src/Nodes/CircleGreaterEqual.cpp index dff1510..0ac63b0 100644 --- a/compiler/luci/import/src/Nodes/CircleGreaterEqual.cpp +++ b/compiler/luci/import/src/Nodes/CircleGreaterEqual.cpp @@ -40,7 +40,7 @@ bool CircleGreaterEqualGraphBuilder::validate(const ValidateArgs &args) const const auto &tensors = args.reader.tensors(); - if (tensors[inputs[0]]->type != tensors[inputs[1]]->type) + if (tensors[inputs.at(0)]->type != tensors[inputs.at(1)]->type) { return false; } @@ -53,8 +53,8 @@ CircleNode *CircleGreaterEqualGraphBuilder::build_node(const circle::OperatorT & loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleIf.cpp b/compiler/luci/import/src/Nodes/CircleIf.cpp index d609064..db9ffe1 100644 --- a/compiler/luci/import/src/Nodes/CircleIf.cpp +++ b/compiler/luci/import/src/Nodes/CircleIf.cpp @@ -43,7 +43,7 @@ bool CircleIfGraphBuilder::validate(const ValidateArgs &args) const // input 0 should be BOOL type const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); if (tensor->type != circle::TensorType_BOOL) return false; diff --git a/compiler/luci/import/src/Nodes/CircleInstanceNorm.cpp b/compiler/luci/import/src/Nodes/CircleInstanceNorm.cpp index b95c54c..6349fd3 100644 --- a/compiler/luci/import/src/Nodes/CircleInstanceNorm.cpp +++ b/compiler/luci/import/src/Nodes/CircleInstanceNorm.cpp @@ -38,9 +38,9 @@ CircleNode *CircleInstanceNormGraphBuilder::build_node(const circle::OperatorT & loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->gamma(inputs[1]); - node->beta(inputs[2]); + node->input(inputs.at(0)); + node->gamma(inputs.at(1)); + node->beta(inputs.at(2)); const auto *options = op.builtin_options.AsInstanceNormOptions(); node->epsilon(options->epsilon); diff --git a/compiler/luci/import/src/Nodes/CircleL2Normalize.cpp b/compiler/luci/import/src/Nodes/CircleL2Normalize.cpp index fe10a85..e4fdc20 100644 --- a/compiler/luci/import/src/Nodes/CircleL2Normalize.cpp +++ b/compiler/luci/import/src/Nodes/CircleL2Normalize.cpp @@ -46,7 +46,7 @@ CircleNode *CircleL2NormalizeGraphBuilder::build_node(const circle::OperatorT &o loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); const auto *options = op.builtin_options.AsL2NormOptions(); node->fusedActivationFunction(luci_actfunc(options->fused_activation_function)); diff --git a/compiler/luci/import/src/Nodes/CircleL2Pool2D.cpp b/compiler/luci/import/src/Nodes/CircleL2Pool2D.cpp index 0232066..202d9d6 100644 --- a/compiler/luci/import/src/Nodes/CircleL2Pool2D.cpp +++ b/compiler/luci/import/src/Nodes/CircleL2Pool2D.cpp @@ -38,7 +38,7 @@ CircleNode *CircleL2Pool2DGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->value(inputs[0]); + node->value(inputs.at(0)); const auto *options = op.builtin_options.AsPool2DOptions(); node->padding(luci_padding(options->padding)); diff --git a/compiler/luci/import/src/Nodes/CircleLeakyRelu.cpp b/compiler/luci/import/src/Nodes/CircleLeakyRelu.cpp index 4957cea..ad4979f 100644 --- a/compiler/luci/import/src/Nodes/CircleLeakyRelu.cpp +++ b/compiler/luci/import/src/Nodes/CircleLeakyRelu.cpp @@ -39,7 +39,7 @@ CircleNode *CircleLeakyReluGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->features(inputs[0]); + node->features(inputs.at(0)); const auto *options = op.builtin_options.AsLeakyReluOptions(); node->alpha(options->alpha); diff --git a/compiler/luci/import/src/Nodes/CircleLess.cpp b/compiler/luci/import/src/Nodes/CircleLess.cpp index 40ad28c..5060369 100644 --- a/compiler/luci/import/src/Nodes/CircleLess.cpp +++ b/compiler/luci/import/src/Nodes/CircleLess.cpp @@ -39,7 +39,7 @@ bool CircleLessGraphBuilder::validate(const ValidateArgs &args) const } const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); switch (tensor->type) { @@ -56,7 +56,7 @@ bool CircleLessGraphBuilder::validate(const ValidateArgs &args) const return false; } - if (tensors[inputs[1]]->type != tensor->type) + if (tensors[inputs.at(1)]->type != tensor->type) { return false; } @@ -69,8 +69,8 @@ CircleNode *CircleLessGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleLessEqual.cpp b/compiler/luci/import/src/Nodes/CircleLessEqual.cpp index 13e9950..9b4f934 100644 --- a/compiler/luci/import/src/Nodes/CircleLessEqual.cpp +++ b/compiler/luci/import/src/Nodes/CircleLessEqual.cpp @@ -40,7 +40,7 @@ bool CircleLessEqualGraphBuilder::validate(const ValidateArgs &args) const const auto &tensors = args.reader.tensors(); - if (tensors[inputs[0]]->type != tensors[inputs[1]]->type) + if (tensors[inputs.at(0)]->type != tensors[inputs.at(1)]->type) { return false; } @@ -53,8 +53,8 @@ CircleNode *CircleLessEqualGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleLocalResponseNormalization.cpp b/compiler/luci/import/src/Nodes/CircleLocalResponseNormalization.cpp index 7b1f0db..0e32f62 100644 --- a/compiler/luci/import/src/Nodes/CircleLocalResponseNormalization.cpp +++ b/compiler/luci/import/src/Nodes/CircleLocalResponseNormalization.cpp @@ -37,7 +37,7 @@ CircleNode *CircleLocalResponseNormalizationGraphBuilder::build_node( const circle::OperatorT &op, const std::vector &inputs, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); + node->input(inputs.at(0)); const auto *options = op.builtin_options.AsLocalResponseNormalizationOptions(); node->radius(options->radius); diff --git a/compiler/luci/import/src/Nodes/CircleLog.cpp b/compiler/luci/import/src/Nodes/CircleLog.cpp index 2140832..346fc43 100644 --- a/compiler/luci/import/src/Nodes/CircleLog.cpp +++ b/compiler/luci/import/src/Nodes/CircleLog.cpp @@ -35,7 +35,7 @@ bool CircleLogGraphBuilder::validate(const ValidateArgs &args) const // Must be one of bfloat16, half, float32, float64, complex64, complex128. // Currently circle supports half(float16), float32, float64, complex64. const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); switch (tensor->type) { case circle::TensorType_FLOAT16: @@ -55,7 +55,7 @@ CircleNode *CircleLogGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); // No options for Log diff --git a/compiler/luci/import/src/Nodes/CircleLogSoftmax.cpp b/compiler/luci/import/src/Nodes/CircleLogSoftmax.cpp index e738c4a..ef69e86 100644 --- a/compiler/luci/import/src/Nodes/CircleLogSoftmax.cpp +++ b/compiler/luci/import/src/Nodes/CircleLogSoftmax.cpp @@ -38,7 +38,7 @@ CircleNode *CircleLogSoftmaxGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->logits(inputs[0]); + node->logits(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleLogicalAnd.cpp b/compiler/luci/import/src/Nodes/CircleLogicalAnd.cpp index 8509dba..7844da0 100644 --- a/compiler/luci/import/src/Nodes/CircleLogicalAnd.cpp +++ b/compiler/luci/import/src/Nodes/CircleLogicalAnd.cpp @@ -46,8 +46,8 @@ CircleNode *CircleLogicalAndGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleLogicalNot.cpp b/compiler/luci/import/src/Nodes/CircleLogicalNot.cpp index b1ed3ea..3758642 100644 --- a/compiler/luci/import/src/Nodes/CircleLogicalNot.cpp +++ b/compiler/luci/import/src/Nodes/CircleLogicalNot.cpp @@ -31,7 +31,7 @@ bool CircleLogicalNotGraphBuilder::validate(const ValidateArgs &args) const // Only BOOL type is allowed for the input const auto &inputs = args.op.inputs; const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); if (tensor->type != circle::TensorType::TensorType_BOOL) return false; @@ -43,7 +43,7 @@ CircleNode *CircleLogicalNotGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleLogicalOr.cpp b/compiler/luci/import/src/Nodes/CircleLogicalOr.cpp index 00eb9c5..1b87e6f 100644 --- a/compiler/luci/import/src/Nodes/CircleLogicalOr.cpp +++ b/compiler/luci/import/src/Nodes/CircleLogicalOr.cpp @@ -46,8 +46,8 @@ CircleNode *CircleLogicalOrGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleLogistic.cpp b/compiler/luci/import/src/Nodes/CircleLogistic.cpp index 85e7e55..9606e19 100644 --- a/compiler/luci/import/src/Nodes/CircleLogistic.cpp +++ b/compiler/luci/import/src/Nodes/CircleLogistic.cpp @@ -32,22 +32,8 @@ bool CircleLogisticGraphBuilder::validate(const ValidateArgs &args) const if (outputs.size() != 1) return false; - // Must be one of the following types - // float16, float32, float64, complex64, or complex128 const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); - switch (tensor->type) - { - case circle::TensorType_FLOAT16: - case circle::TensorType_FLOAT32: - case circle::TensorType_FLOAT64: - case circle::TensorType_COMPLEX64: - break; - default: - return false; - } - - if (tensors.at(inputs[0])->type != tensors.at(outputs[0])->type) + if (tensors.at(inputs.at(0))->type != tensors.at(outputs[0])->type) return false; return true; @@ -58,7 +44,7 @@ CircleNode *CircleLogisticGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleMatrixDiag.cpp b/compiler/luci/import/src/Nodes/CircleMatrixDiag.cpp index f4ae03c..a4a21a8 100644 --- a/compiler/luci/import/src/Nodes/CircleMatrixDiag.cpp +++ b/compiler/luci/import/src/Nodes/CircleMatrixDiag.cpp @@ -35,7 +35,7 @@ bool CircleMatrixDiagGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); if (tensors[outputs[0]]->type != tensor->type) return false; @@ -48,7 +48,7 @@ CircleNode *CircleMatrixDiagGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->diagonal(inputs[0]); + node->diagonal(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleMatrixSetDiag.cpp b/compiler/luci/import/src/Nodes/CircleMatrixSetDiag.cpp index d6f6aee..cf03131 100644 --- a/compiler/luci/import/src/Nodes/CircleMatrixSetDiag.cpp +++ b/compiler/luci/import/src/Nodes/CircleMatrixSetDiag.cpp @@ -35,7 +35,7 @@ bool CircleMatrixSetDiagGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); if (tensors[outputs[0]]->type != tensor->type) return false; @@ -48,8 +48,8 @@ CircleNode *CircleMatrixSetDiagGraphBuilder::build_node(const circle::OperatorT loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->diagonal(inputs[1]); + node->input(inputs.at(0)); + node->diagonal(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleMaxPool2D.cpp b/compiler/luci/import/src/Nodes/CircleMaxPool2D.cpp index 1798819..4bca0f4 100644 --- a/compiler/luci/import/src/Nodes/CircleMaxPool2D.cpp +++ b/compiler/luci/import/src/Nodes/CircleMaxPool2D.cpp @@ -36,7 +36,7 @@ CircleNode *CircleMaxPool2DGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->value(inputs[0]); + node->value(inputs.at(0)); const auto *options = op.builtin_options.AsPool2DOptions(); node->padding(luci_padding(options->padding)); diff --git a/compiler/luci/import/src/Nodes/CircleMaximum.cpp b/compiler/luci/import/src/Nodes/CircleMaximum.cpp index 6ca7e40..4d1468f 100644 --- a/compiler/luci/import/src/Nodes/CircleMaximum.cpp +++ b/compiler/luci/import/src/Nodes/CircleMaximum.cpp @@ -35,7 +35,7 @@ bool CircleMaximumGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); switch (tensor->type) { @@ -49,7 +49,7 @@ bool CircleMaximumGraphBuilder::validate(const ValidateArgs &args) const return false; } - if (tensors[inputs[1]]->type != tensor->type) + if (tensors[inputs.at(1)]->type != tensor->type) return false; if (tensors[outputs[0]]->type != tensor->type) @@ -63,8 +63,8 @@ CircleNode *CircleMaximumGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleMean.cpp b/compiler/luci/import/src/Nodes/CircleMean.cpp index 8261c7b..d8fa9a5 100644 --- a/compiler/luci/import/src/Nodes/CircleMean.cpp +++ b/compiler/luci/import/src/Nodes/CircleMean.cpp @@ -34,8 +34,8 @@ CircleNode *CircleMeanGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->reduction_indices(inputs[1]); + node->input(inputs.at(0)); + node->reduction_indices(inputs.at(1)); const auto *options = op.builtin_options.AsReducerOptions(); node->keep_dims(options->keep_dims); diff --git a/compiler/luci/import/src/Nodes/CircleMinimum.cpp b/compiler/luci/import/src/Nodes/CircleMinimum.cpp index b770f36..8b4daf1 100644 --- a/compiler/luci/import/src/Nodes/CircleMinimum.cpp +++ b/compiler/luci/import/src/Nodes/CircleMinimum.cpp @@ -35,7 +35,7 @@ bool CircleMinimumGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); switch (tensor->type) { @@ -49,7 +49,7 @@ bool CircleMinimumGraphBuilder::validate(const ValidateArgs &args) const return false; } - if (tensors[inputs[1]]->type != tensor->type) + if (tensors[inputs.at(1)]->type != tensor->type) return false; if (tensors[outputs[0]]->type != tensor->type) @@ -63,8 +63,8 @@ CircleNode *CircleMinimumGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleMirrorPad.cpp b/compiler/luci/import/src/Nodes/CircleMirrorPad.cpp index 41b5e5d..e0ddd4c 100644 --- a/compiler/luci/import/src/Nodes/CircleMirrorPad.cpp +++ b/compiler/luci/import/src/Nodes/CircleMirrorPad.cpp @@ -38,8 +38,8 @@ CircleNode *CircleMirrorPadGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->paddings(inputs[1]); + node->input(inputs.at(0)); + node->paddings(inputs.at(1)); const auto *options = op.builtin_options.AsMirrorPadOptions(); node->mode(luci_mirrorpad_mode(options->mode)); diff --git a/compiler/luci/import/src/Nodes/CircleMul.cpp b/compiler/luci/import/src/Nodes/CircleMul.cpp index d4412b9..e3c4a7e 100644 --- a/compiler/luci/import/src/Nodes/CircleMul.cpp +++ b/compiler/luci/import/src/Nodes/CircleMul.cpp @@ -37,8 +37,8 @@ CircleNode *CircleMulGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); const auto *options = op.builtin_options.AsMulOptions(); node->fusedActivationFunction(luci_actfunc(options->fused_activation_function)); diff --git a/compiler/luci/import/src/Nodes/CircleNeg.cpp b/compiler/luci/import/src/Nodes/CircleNeg.cpp index 3d3079c..a64a695 100644 --- a/compiler/luci/import/src/Nodes/CircleNeg.cpp +++ b/compiler/luci/import/src/Nodes/CircleNeg.cpp @@ -36,7 +36,7 @@ CircleNode *CircleNegGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleNonMaxSuppressionV4.cpp b/compiler/luci/import/src/Nodes/CircleNonMaxSuppressionV4.cpp new file mode 100644 index 0000000..a4ad4a5 --- /dev/null +++ b/compiler/luci/import/src/Nodes/CircleNonMaxSuppressionV4.cpp @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "luci/Import/Nodes/CircleNonMaxSuppressionV4.h" + +#include +#include + +#include +#include + +namespace luci +{ + +bool CircleNonMaxSuppressionV4GraphBuilder::validate(const ValidateArgs &args) const +{ + const auto &inputs = args.op.inputs; + const auto &outputs = args.op.outputs; + + if (inputs.size() != 5) + return false; + if (outputs.size() != 2) + return false; + + const auto &tensors = args.reader.tensors(); + const auto &boxes_tensor = tensors.at(inputs[0]); + if (boxes_tensor->shape.size() != 2) + return false; + if (boxes_tensor->shape.at(1) != 4) + return false; + if (boxes_tensor->shape.at(0) != tensors.at(inputs[1])->shape.at(0)) + return false; + + if (tensors.at(inputs[2])->type != circle::TensorType_INT32) + return false; + if (tensors.at(inputs[3])->type != circle::TensorType_FLOAT32) + return false; + if (tensors.at(inputs[4])->type != circle::TensorType_FLOAT32) + return false; + + return true; +} + +/** + * @brief NonMaxSuppressionV4 Node builder + * + * @note Current loco does not provide multiple outputs + * We will create multiple NonMasSuppressionV4Oout nodes to emulate this + */ + +void CircleNonMaxSuppressionV4GraphBuilder::build(const circle::OperatorT &op, + GraphBuilderContext *context) const +{ + assert(context != nullptr); + + auto graph = context->graph(); + + const std::vector &inputs = op.inputs; + const std::vector &outputs = op.outputs; + const auto &tensors = context->reader()->tensors(); + const auto &opcodes = context->reader()->opcodes(); + auto tensors_ptr = context->reader()->tensors_ptr(); + assert(tensors_ptr != nullptr); + + std::vector input_nodes; + for (const int32_t input_tensor_index : inputs) + { + input_nodes.push_back(context->nodefinder()->node(input_tensor_index)); + } + + // Create CircleNonMaxSuppressionV4 + auto node = graph->nodes()->create(); + node->boxes(input_nodes[0]); + node->scores(input_nodes[1]); + node->max_output_size(input_nodes[2]); + node->iou_threshold(input_nodes[3]); + node->score_threshold(input_nodes[4]); + + assert(outputs.size() == 2); + { + // Let's use name of output 0 as NonMaxSuppressionV4 name + const circle::TensorT &output_tensor = *tensors[outputs[0]]; + node->name(tensor_name(output_tensor)); + node->op_version(opcodes[op.opcode_index].get()->version); + + // NOTE We don't set quantization for NonMaxSuppressionV4 itself but to virtual outputs + } + + // Create virtual outputs of NonMaxSuppressionV4 + for (size_t n = 0; n < outputs.size(); ++n) + { + const circle::TensorT &output_tensor = *tensors[outputs[n]]; + + auto *nodeout = graph->nodes()->create(); + copy_tensor_attributes(output_tensor, nodeout); + + // mark shape_status + if (tensors_ptr->Get(outputs[n])->shape() == nullptr) + nodeout->shape_status(ShapeStatus::NOSHAPE); + else + nodeout->shape_status(ShapeStatus::VALID); + + nodeout->input(node); + nodeout->index(n); + + context->nodefinder()->enroll(outputs[n], nodeout); + } +} + +} // namespace luci diff --git a/compiler/luci/import/src/Nodes/CircleNotEqual.cpp b/compiler/luci/import/src/Nodes/CircleNotEqual.cpp index 5b04856..77e986d 100644 --- a/compiler/luci/import/src/Nodes/CircleNotEqual.cpp +++ b/compiler/luci/import/src/Nodes/CircleNotEqual.cpp @@ -40,7 +40,7 @@ bool CircleNotEqualGraphBuilder::validate(const ValidateArgs &args) const const auto &tensors = args.reader.tensors(); - if (tensors[inputs[0]]->type != tensors[inputs[1]]->type) + if (tensors[inputs.at(0)]->type != tensors[inputs.at(1)]->type) { return false; } @@ -53,8 +53,8 @@ CircleNode *CircleNotEqualGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleOneHot.cpp b/compiler/luci/import/src/Nodes/CircleOneHot.cpp index 9fdbfa8..69294e1 100644 --- a/compiler/luci/import/src/Nodes/CircleOneHot.cpp +++ b/compiler/luci/import/src/Nodes/CircleOneHot.cpp @@ -38,10 +38,10 @@ bool CircleOneHotGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &indices = tensors.at(inputs[0]); - const auto &depth = tensors.at(inputs[1]); - const auto &on_value = tensors.at(inputs[2]); - const auto &off_value = tensors.at(inputs[3]); + const auto &indices = tensors.at(inputs.at(0)); + const auto &depth = tensors.at(inputs.at(1)); + const auto &on_value = tensors.at(inputs.at(2)); + const auto &off_value = tensors.at(inputs.at(3)); if (options->axis < -1 || options->axis > static_cast(indices->shape.size())) return false; @@ -63,10 +63,10 @@ CircleNode *CircleOneHotGraphBuilder::build_node(const circle::OperatorT &op, { auto *node = graph->nodes()->create(); - node->indices(inputs[0]); - node->depth(inputs[1]); - node->on_value(inputs[2]); - node->off_value(inputs[3]); + node->indices(inputs.at(0)); + node->depth(inputs.at(1)); + node->on_value(inputs.at(2)); + node->off_value(inputs.at(3)); const auto *options = op.builtin_options.AsOneHotOptions(); node->axis(options->axis); diff --git a/compiler/luci/import/src/Nodes/CirclePRelu.cpp b/compiler/luci/import/src/Nodes/CirclePRelu.cpp index 0d87cd4..c07920f 100644 --- a/compiler/luci/import/src/Nodes/CirclePRelu.cpp +++ b/compiler/luci/import/src/Nodes/CirclePRelu.cpp @@ -39,8 +39,8 @@ CircleNode *CirclePReluGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->alpha(inputs[1]); + node->input(inputs.at(0)); + node->alpha(inputs.at(1)); // PRelu options are empty diff --git a/compiler/luci/import/src/Nodes/CirclePad.cpp b/compiler/luci/import/src/Nodes/CirclePad.cpp index 6abcf2d..999173b 100644 --- a/compiler/luci/import/src/Nodes/CirclePad.cpp +++ b/compiler/luci/import/src/Nodes/CirclePad.cpp @@ -38,8 +38,8 @@ CircleNode *CirclePadGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->paddings(inputs[1]); + node->input(inputs.at(0)); + node->paddings(inputs.at(1)); const auto *options = op.builtin_options.AsPadOptions(); (void)options; // There are no options. diff --git a/compiler/luci/import/src/Nodes/CirclePow.cpp b/compiler/luci/import/src/Nodes/CirclePow.cpp index ff98331..def0126 100644 --- a/compiler/luci/import/src/Nodes/CirclePow.cpp +++ b/compiler/luci/import/src/Nodes/CirclePow.cpp @@ -39,8 +39,8 @@ CircleNode *CirclePowGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); // Pow options are empty diff --git a/compiler/luci/import/src/Nodes/CircleRange.cpp b/compiler/luci/import/src/Nodes/CircleRange.cpp index c211916..38dc44e 100644 --- a/compiler/luci/import/src/Nodes/CircleRange.cpp +++ b/compiler/luci/import/src/Nodes/CircleRange.cpp @@ -36,9 +36,9 @@ CircleNode *CircleRangeGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->start(inputs[0]); - node->limit(inputs[1]); - node->delta(inputs[2]); + node->start(inputs.at(0)); + node->limit(inputs.at(1)); + node->delta(inputs.at(2)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleRank.cpp b/compiler/luci/import/src/Nodes/CircleRank.cpp index 705ae01..12658b1 100644 --- a/compiler/luci/import/src/Nodes/CircleRank.cpp +++ b/compiler/luci/import/src/Nodes/CircleRank.cpp @@ -38,7 +38,7 @@ CircleNode *CircleRankGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); + node->input(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleReduceAny.cpp b/compiler/luci/import/src/Nodes/CircleReduceAny.cpp index 030c530..21a8219 100644 --- a/compiler/luci/import/src/Nodes/CircleReduceAny.cpp +++ b/compiler/luci/import/src/Nodes/CircleReduceAny.cpp @@ -31,8 +31,8 @@ bool CircleReduceAnyGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor_0 = tensors.at(inputs[0]); - const auto &tensor_1 = tensors.at(inputs[1]); + const auto &tensor_0 = tensors.at(inputs.at(0)); + const auto &tensor_1 = tensors.at(inputs.at(1)); const auto &tensor_o = tensors.at(outputs[0]); if (tensor_0->type != circle::TensorType_BOOL) @@ -57,8 +57,8 @@ CircleNode *CircleReduceAnyGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->reduction_indices(inputs[1]); + node->input(inputs.at(0)); + node->reduction_indices(inputs.at(1)); const auto *options = op.builtin_options.AsReducerOptions(); node->keep_dims(options->keep_dims); diff --git a/compiler/luci/import/src/Nodes/CircleReduceMax.cpp b/compiler/luci/import/src/Nodes/CircleReduceMax.cpp index 8ca8e2e..05492db 100644 --- a/compiler/luci/import/src/Nodes/CircleReduceMax.cpp +++ b/compiler/luci/import/src/Nodes/CircleReduceMax.cpp @@ -33,7 +33,7 @@ bool CircleReduceMaxGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor_axis = tensors.at(inputs[1]); + const auto &tensor_axis = tensors.at(inputs.at(1)); switch (tensor_axis->type) { @@ -52,8 +52,8 @@ CircleNode *CircleReduceMaxGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->reduction_indices(inputs[1]); + node->input(inputs.at(0)); + node->reduction_indices(inputs.at(1)); const auto *options = op.builtin_options.AsReducerOptions(); node->keep_dims(options->keep_dims); diff --git a/compiler/luci/import/src/Nodes/CircleReduceMin.cpp b/compiler/luci/import/src/Nodes/CircleReduceMin.cpp index 3020c37..117d529 100644 --- a/compiler/luci/import/src/Nodes/CircleReduceMin.cpp +++ b/compiler/luci/import/src/Nodes/CircleReduceMin.cpp @@ -33,7 +33,7 @@ bool CircleReduceMinGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor_axis = tensors.at(inputs[1]); + const auto &tensor_axis = tensors.at(inputs.at(1)); switch (tensor_axis->type) { @@ -52,8 +52,8 @@ CircleNode *CircleReduceMinGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->reduction_indices(inputs[1]); + node->input(inputs.at(0)); + node->reduction_indices(inputs.at(1)); const auto *options = op.builtin_options.AsReducerOptions(); node->keep_dims(options->keep_dims); diff --git a/compiler/luci/import/src/Nodes/CircleReduceProd.cpp b/compiler/luci/import/src/Nodes/CircleReduceProd.cpp index 2bb43f6..5f05458 100644 --- a/compiler/luci/import/src/Nodes/CircleReduceProd.cpp +++ b/compiler/luci/import/src/Nodes/CircleReduceProd.cpp @@ -30,7 +30,7 @@ bool CircleReduceProdGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor_1 = tensors.at(inputs[1]); + const auto &tensor_1 = tensors.at(inputs.at(1)); // TODO check input types @@ -52,8 +52,8 @@ CircleNode *CircleReduceProdGraphBuilder::build_node(const circle::OperatorT &op loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->reduction_indices(inputs[1]); + node->input(inputs.at(0)); + node->reduction_indices(inputs.at(1)); const auto *options = op.builtin_options.AsReducerOptions(); node->keep_dims(options->keep_dims); diff --git a/compiler/luci/import/src/Nodes/CircleRelu.cpp b/compiler/luci/import/src/Nodes/CircleRelu.cpp index 056268a..8e1c32a 100644 --- a/compiler/luci/import/src/Nodes/CircleRelu.cpp +++ b/compiler/luci/import/src/Nodes/CircleRelu.cpp @@ -39,7 +39,7 @@ CircleNode *CircleReluGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->features(inputs[0]); + node->features(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleRelu6.cpp b/compiler/luci/import/src/Nodes/CircleRelu6.cpp index 5b44399..0283d73 100644 --- a/compiler/luci/import/src/Nodes/CircleRelu6.cpp +++ b/compiler/luci/import/src/Nodes/CircleRelu6.cpp @@ -39,7 +39,7 @@ CircleNode *CircleRelu6GraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->features(inputs[0]); + node->features(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleReluN1To1.cpp b/compiler/luci/import/src/Nodes/CircleReluN1To1.cpp index edf662f..7f517bc 100644 --- a/compiler/luci/import/src/Nodes/CircleReluN1To1.cpp +++ b/compiler/luci/import/src/Nodes/CircleReluN1To1.cpp @@ -41,7 +41,7 @@ CircleNode *CircleReluN1To1GraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->features(inputs[0]); + node->features(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleReshape.cpp b/compiler/luci/import/src/Nodes/CircleReshape.cpp index f72c152..996ae9d 100644 --- a/compiler/luci/import/src/Nodes/CircleReshape.cpp +++ b/compiler/luci/import/src/Nodes/CircleReshape.cpp @@ -62,7 +62,7 @@ CircleNode *CircleReshapeGraphBuilder::build_node(const circle::OperatorT &op, { // If the second input is not provided, generate it based on the value of the attribute. // TODO Presence of the second input is the current requirement of the IR. - auto *shape_node = (inputs.size() == 2) ? inputs[1] : nullptr; + auto *shape_node = (inputs.size() == 2) ? inputs.at(1) : nullptr; if (shape_node == nullptr) { const auto *options = op.builtin_options.AsReshapeOptions(); @@ -77,7 +77,7 @@ CircleNode *CircleReshapeGraphBuilder::build_node(const circle::OperatorT &op, } auto *node = graph->nodes()->create(); - node->tensor(inputs[0]); + node->tensor(inputs.at(0)); node->shape(shape_node); const auto *options = op.builtin_options.AsReshapeOptions(); diff --git a/compiler/luci/import/src/Nodes/CircleResizeBilinear.cpp b/compiler/luci/import/src/Nodes/CircleResizeBilinear.cpp index 6128f1b..0fccb7b 100644 --- a/compiler/luci/import/src/Nodes/CircleResizeBilinear.cpp +++ b/compiler/luci/import/src/Nodes/CircleResizeBilinear.cpp @@ -38,8 +38,8 @@ CircleNode *CircleResizeBilinearGraphBuilder::build_node(const circle::OperatorT loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->size(inputs[1]); + node->input(inputs.at(0)); + node->size(inputs.at(1)); const auto *options = op.builtin_options.AsResizeBilinearOptions(); node->align_corners(options->align_corners); diff --git a/compiler/luci/import/src/Nodes/CircleResizeNearestNeighbor.cpp b/compiler/luci/import/src/Nodes/CircleResizeNearestNeighbor.cpp index a1f1ef0..324323f 100644 --- a/compiler/luci/import/src/Nodes/CircleResizeNearestNeighbor.cpp +++ b/compiler/luci/import/src/Nodes/CircleResizeNearestNeighbor.cpp @@ -37,8 +37,8 @@ CircleNode *CircleResizeNearestNeighborGraphBuilder::build_node( const circle::OperatorT &op, const std::vector &inputs, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->size(inputs[1]); + node->input(inputs.at(0)); + node->size(inputs.at(1)); const auto *options = op.builtin_options.AsResizeNearestNeighborOptions(); node->align_corners(options->align_corners); diff --git a/compiler/luci/import/src/Nodes/CircleReverseSequence.cpp b/compiler/luci/import/src/Nodes/CircleReverseSequence.cpp index 72d3b15..ad11d4c 100644 --- a/compiler/luci/import/src/Nodes/CircleReverseSequence.cpp +++ b/compiler/luci/import/src/Nodes/CircleReverseSequence.cpp @@ -34,8 +34,8 @@ bool CircleReverseSequenceGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor_in = tensors.at(inputs[0]); - const auto &tensor_lengths = tensors.at(inputs[1]); + const auto &tensor_in = tensors.at(inputs.at(0)); + const auto &tensor_lengths = tensors.at(inputs.at(1)); const auto &tensor_out = tensors.at(outputs[0]); switch (tensor_lengths->type) @@ -58,8 +58,8 @@ CircleNode *CircleReverseSequenceGraphBuilder::build_node(const circle::Operator loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->seq_lengths(inputs[1]); + node->input(inputs.at(0)); + node->seq_lengths(inputs.at(1)); const auto *options = op.builtin_options.AsReverseSequenceOptions(); node->seq_axis(options->seq_dim); diff --git a/compiler/luci/import/src/Nodes/CircleReverseV2.cpp b/compiler/luci/import/src/Nodes/CircleReverseV2.cpp index cd18128..e2e53bb 100644 --- a/compiler/luci/import/src/Nodes/CircleReverseV2.cpp +++ b/compiler/luci/import/src/Nodes/CircleReverseV2.cpp @@ -34,8 +34,8 @@ bool CircleReverseV2GraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor_in = tensors.at(inputs[0]); - const auto &tensor_axis = tensors.at(inputs[1]); + const auto &tensor_in = tensors.at(inputs.at(0)); + const auto &tensor_axis = tensors.at(inputs.at(1)); const auto &tensor_out = tensors.at(outputs[0]); switch (tensor_axis->type) @@ -58,8 +58,8 @@ CircleNode *CircleReverseV2GraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->tensor(inputs[0]); - node->axis(inputs[1]); + node->tensor(inputs.at(0)); + node->axis(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleRound.cpp b/compiler/luci/import/src/Nodes/CircleRound.cpp index 8964895..ad77f9f 100644 --- a/compiler/luci/import/src/Nodes/CircleRound.cpp +++ b/compiler/luci/import/src/Nodes/CircleRound.cpp @@ -37,7 +37,7 @@ bool CircleRoundGraphBuilder::validate(const ValidateArgs &args) const // bfloat16, half (float16), float32, float64, complex64, complex128 // Currently, circle supports float16, float32, complex64 const auto &tensors = args.reader.tensors(); - const auto &tensor_in = tensors.at(inputs[0]); + const auto &tensor_in = tensors.at(inputs.at(0)); const auto &tensor_out = tensors.at(outputs[0]); switch (tensor_in->type) @@ -63,7 +63,7 @@ CircleNode *CircleRoundGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleRsqrt.cpp b/compiler/luci/import/src/Nodes/CircleRsqrt.cpp index b5de0b5..ae05fbb 100644 --- a/compiler/luci/import/src/Nodes/CircleRsqrt.cpp +++ b/compiler/luci/import/src/Nodes/CircleRsqrt.cpp @@ -33,7 +33,7 @@ bool CircleRsqrtGraphBuilder::validate(const ValidateArgs &args) const // bfloat16, half (float16), float32, float64, complex64, complex128 // Currently, circle supports float16, float32, complex64 const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); switch (tensor->type) { case circle::TensorType_FLOAT16: @@ -52,7 +52,7 @@ CircleNode *CircleRsqrtGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleScatterNd.cpp b/compiler/luci/import/src/Nodes/CircleScatterNd.cpp index adcaa00..7f86aeb 100644 --- a/compiler/luci/import/src/Nodes/CircleScatterNd.cpp +++ b/compiler/luci/import/src/Nodes/CircleScatterNd.cpp @@ -32,12 +32,12 @@ bool CircleScatterNdGraphBuilder::validate(const ValidateArgs &args) const // indices must have the same type as shape const auto &tensors = args.reader.tensors(); - if (tensors[inputs[0]]->type != tensors[inputs[2]]->type) + if (tensors[inputs.at(0)]->type != tensors[inputs.at(2)]->type) return false; // indices must be either int32 or int64 - if (tensors[inputs[0]]->type != circle::TensorType_INT32 && - tensors[inputs[0]]->type != circle::TensorType_INT64) + if (tensors[inputs.at(0)]->type != circle::TensorType_INT32 && + tensors[inputs.at(0)]->type != circle::TensorType_INT64) return false; return true; @@ -48,9 +48,9 @@ CircleNode *CircleScatterNdGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->indices(inputs[0]); - node->updates(inputs[1]); - node->shape(inputs[2]); + node->indices(inputs.at(0)); + node->updates(inputs.at(1)); + node->shape(inputs.at(2)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleSegmentSum.cpp b/compiler/luci/import/src/Nodes/CircleSegmentSum.cpp index 1122bdc..fb84e5d 100644 --- a/compiler/luci/import/src/Nodes/CircleSegmentSum.cpp +++ b/compiler/luci/import/src/Nodes/CircleSegmentSum.cpp @@ -33,9 +33,9 @@ bool CircleSegmentSumGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor_in = tensors.at(inputs[0]); + const auto &tensor_in = tensors.at(inputs.at(0)); const auto &tensor_out = tensors.at(outputs[0]); - const auto &tensor_ids = tensors.at(inputs[1]); + const auto &tensor_ids = tensors.at(inputs.at(1)); switch (tensor_ids->type) { @@ -59,8 +59,8 @@ CircleNode *CircleSegmentSumGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->segment_ids(inputs[1]); + node->input(inputs.at(0)); + node->segment_ids(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleSelect.cpp b/compiler/luci/import/src/Nodes/CircleSelect.cpp index ff94212..1e649f1 100644 --- a/compiler/luci/import/src/Nodes/CircleSelect.cpp +++ b/compiler/luci/import/src/Nodes/CircleSelect.cpp @@ -33,7 +33,7 @@ bool CircleSelectGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); if (tensor->type != circle::TensorType_BOOL) return false; // TODO check dtypes for input 1, 2 @@ -46,9 +46,9 @@ CircleNode *CircleSelectGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->condition(inputs[0]); - node->t(inputs[1]); - node->e(inputs[2]); + node->condition(inputs.at(0)); + node->t(inputs.at(1)); + node->e(inputs.at(2)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleSelectV2.cpp b/compiler/luci/import/src/Nodes/CircleSelectV2.cpp index 78b2e64..e6dd04d 100644 --- a/compiler/luci/import/src/Nodes/CircleSelectV2.cpp +++ b/compiler/luci/import/src/Nodes/CircleSelectV2.cpp @@ -33,12 +33,12 @@ bool CircleSelectV2GraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &condition = tensors.at(inputs[0]); + const auto &condition = tensors.at(inputs.at(0)); if (condition->type != circle::TensorType_BOOL) return false; - const auto &t = tensors.at(inputs[1]); - const auto &e = tensors.at(inputs[2]); + const auto &t = tensors.at(inputs.at(1)); + const auto &e = tensors.at(inputs.at(2)); if (t->type != e->type) return false; @@ -50,9 +50,9 @@ CircleNode *CircleSelectV2GraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->condition(inputs[0]); - node->t(inputs[1]); - node->e(inputs[2]); + node->condition(inputs.at(0)); + node->t(inputs.at(1)); + node->e(inputs.at(2)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleShape.cpp b/compiler/luci/import/src/Nodes/CircleShape.cpp index 864b5eb..bd7dfc9 100644 --- a/compiler/luci/import/src/Nodes/CircleShape.cpp +++ b/compiler/luci/import/src/Nodes/CircleShape.cpp @@ -42,7 +42,7 @@ CircleNode *CircleShapeGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); + node->input(inputs.at(0)); const auto *options = op.builtin_options.AsShapeOptions(); node->out_type(luci_datatype(options->out_type)); diff --git a/compiler/luci/import/src/Nodes/CircleSin.cpp b/compiler/luci/import/src/Nodes/CircleSin.cpp index 61d60c7..4b245ef 100644 --- a/compiler/luci/import/src/Nodes/CircleSin.cpp +++ b/compiler/luci/import/src/Nodes/CircleSin.cpp @@ -33,7 +33,7 @@ bool CircleSinGraphBuilder::validate(const ValidateArgs &args) const // input type check const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); switch (tensor->type) { case circle::TensorType_FLOAT16: @@ -53,7 +53,7 @@ CircleNode *CircleSinGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); // No options for Sin diff --git a/compiler/luci/import/src/Nodes/CircleSlice.cpp b/compiler/luci/import/src/Nodes/CircleSlice.cpp index 313c355..8601fbf 100644 --- a/compiler/luci/import/src/Nodes/CircleSlice.cpp +++ b/compiler/luci/import/src/Nodes/CircleSlice.cpp @@ -42,9 +42,9 @@ CircleNode *CircleSliceGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->begin(inputs[1]); - node->size(inputs[2]); + node->input(inputs.at(0)); + node->begin(inputs.at(1)); + node->size(inputs.at(2)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleSoftmax.cpp b/compiler/luci/import/src/Nodes/CircleSoftmax.cpp index 0d316e1..0ef0b54 100644 --- a/compiler/luci/import/src/Nodes/CircleSoftmax.cpp +++ b/compiler/luci/import/src/Nodes/CircleSoftmax.cpp @@ -38,7 +38,7 @@ CircleNode *CircleSoftmaxGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->logits(inputs[0]); + node->logits(inputs.at(0)); const auto *options = op.builtin_options.AsSoftmaxOptions(); node->beta(options->beta); diff --git a/compiler/luci/import/src/Nodes/CircleSpaceToBatchND.cpp b/compiler/luci/import/src/Nodes/CircleSpaceToBatchND.cpp index f1361fb..c1d508e 100644 --- a/compiler/luci/import/src/Nodes/CircleSpaceToBatchND.cpp +++ b/compiler/luci/import/src/Nodes/CircleSpaceToBatchND.cpp @@ -33,7 +33,7 @@ bool CircleSpaceToBatchNDGraphBuilder::validate(const ValidateArgs &args) const // input 1 and 2 should have INT32/INT64 type const auto &tensors = args.reader.tensors(); - const auto &tensor_1 = tensors.at(inputs[1]); + const auto &tensor_1 = tensors.at(inputs.at(1)); switch (tensor_1->type) { case circle::TensorType_INT32: @@ -42,7 +42,7 @@ bool CircleSpaceToBatchNDGraphBuilder::validate(const ValidateArgs &args) const default: return false; } - const auto &tensor_2 = tensors.at(inputs[2]); + const auto &tensor_2 = tensors.at(inputs.at(2)); switch (tensor_2->type) { case circle::TensorType_INT32: @@ -53,7 +53,7 @@ bool CircleSpaceToBatchNDGraphBuilder::validate(const ValidateArgs &args) const } // Only support input shape dimension 3 and 4 only - const auto &tensor_0 = tensors.at(inputs[0]); + const auto &tensor_0 = tensors.at(inputs.at(0)); const auto t_0_s = tensor_0->shape.size(); if (t_0_s != 3 && t_0_s != 4) return false; @@ -68,9 +68,9 @@ CircleNode *CircleSpaceToBatchNDGraphBuilder::build_node(const circle::OperatorT loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->block_shape(inputs[1]); - node->paddings(inputs[2]); + node->input(inputs.at(0)); + node->block_shape(inputs.at(1)); + node->paddings(inputs.at(2)); // No options for SpaceToBatchND diff --git a/compiler/luci/import/src/Nodes/CircleSpaceToDepth.cpp b/compiler/luci/import/src/Nodes/CircleSpaceToDepth.cpp index b612c9a..8ccd55d 100644 --- a/compiler/luci/import/src/Nodes/CircleSpaceToDepth.cpp +++ b/compiler/luci/import/src/Nodes/CircleSpaceToDepth.cpp @@ -41,7 +41,7 @@ CircleNode *CircleSpaceToDepthGraphBuilder::build_node(const circle::OperatorT & loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); + node->input(inputs.at(0)); const auto *options = op.builtin_options.AsSpaceToDepthOptions(); node->block_size(options->block_size); diff --git a/compiler/luci/import/src/Nodes/CircleSparseToDense.cpp b/compiler/luci/import/src/Nodes/CircleSparseToDense.cpp index bfe790f..26d575e 100644 --- a/compiler/luci/import/src/Nodes/CircleSparseToDense.cpp +++ b/compiler/luci/import/src/Nodes/CircleSparseToDense.cpp @@ -36,10 +36,10 @@ CircleNode *CircleSparseToDenseGraphBuilder::build_node(const circle::OperatorT loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->indices(inputs[0]); - node->output_shape(inputs[1]); - node->values(inputs[2]); - node->default_value(inputs[3]); + node->indices(inputs.at(0)); + node->output_shape(inputs.at(1)); + node->values(inputs.at(2)); + node->default_value(inputs.at(3)); const auto *options = op.builtin_options.AsSparseToDenseOptions(); node->validate_indices(options->validate_indices); diff --git a/compiler/luci/import/src/Nodes/CircleSqrt.cpp b/compiler/luci/import/src/Nodes/CircleSqrt.cpp index 8a90f66..c8beaee 100644 --- a/compiler/luci/import/src/Nodes/CircleSqrt.cpp +++ b/compiler/luci/import/src/Nodes/CircleSqrt.cpp @@ -36,7 +36,7 @@ CircleNode *CircleSqrtGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleSquare.cpp b/compiler/luci/import/src/Nodes/CircleSquare.cpp index 8398548..b5ba048 100644 --- a/compiler/luci/import/src/Nodes/CircleSquare.cpp +++ b/compiler/luci/import/src/Nodes/CircleSquare.cpp @@ -33,7 +33,7 @@ bool CircleSquareGraphBuilder::validate(const ValidateArgs &args) const // bfloat16, half (float16), float32, float64, complex64, complex128 // Currently, circle supports float16, float32, complex64 const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); switch (tensor->type) { case circle::TensorType_INT32: @@ -55,7 +55,7 @@ CircleNode *CircleSquareGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleSquaredDifference.cpp b/compiler/luci/import/src/Nodes/CircleSquaredDifference.cpp index 93ce959..6deae94 100644 --- a/compiler/luci/import/src/Nodes/CircleSquaredDifference.cpp +++ b/compiler/luci/import/src/Nodes/CircleSquaredDifference.cpp @@ -37,7 +37,7 @@ bool CircleSquaredDifferenceGraphBuilder::validate(const ValidateArgs &args) con // Inputs must be one of the following types // bfloat16, half(float16), float32, float64, int32, int64, complex64, complex128 const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); switch (tensor->type) { case circle::TensorType_FLOAT16: @@ -53,11 +53,11 @@ bool CircleSquaredDifferenceGraphBuilder::validate(const ValidateArgs &args) con } // Input types must match - if (tensors.at(inputs[0])->type != tensors.at(inputs[1])->type) + if (tensors.at(inputs.at(0))->type != tensors.at(inputs.at(1))->type) return false; // Input and output types must match - if (tensors.at(inputs[0])->type != tensors.at(outputs[0])->type) + if (tensors.at(inputs.at(0))->type != tensors.at(outputs[0])->type) return false; return true; @@ -68,8 +68,8 @@ CircleNode *CircleSquaredDifferenceGraphBuilder::build_node(const circle::Operat loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleSqueeze.cpp b/compiler/luci/import/src/Nodes/CircleSqueeze.cpp index a5252d0..32792c2 100644 --- a/compiler/luci/import/src/Nodes/CircleSqueeze.cpp +++ b/compiler/luci/import/src/Nodes/CircleSqueeze.cpp @@ -38,7 +38,7 @@ CircleNode *CircleSqueezeGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); + node->input(inputs.at(0)); const auto *options = op.builtin_options.AsSqueezeOptions(); assert(options); diff --git a/compiler/luci/import/src/Nodes/CircleStridedSlice.cpp b/compiler/luci/import/src/Nodes/CircleStridedSlice.cpp index 95e4467..8f943a6 100644 --- a/compiler/luci/import/src/Nodes/CircleStridedSlice.cpp +++ b/compiler/luci/import/src/Nodes/CircleStridedSlice.cpp @@ -42,10 +42,10 @@ CircleNode *CircleStridedSliceGraphBuilder::build_node(const circle::OperatorT & loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->begin(inputs[1]); - node->end(inputs[2]); - node->strides(inputs[3]); + node->input(inputs.at(0)); + node->begin(inputs.at(1)); + node->end(inputs.at(2)); + node->strides(inputs.at(3)); const auto *options = op.builtin_options.AsStridedSliceOptions(); node->begin_mask(options->begin_mask); diff --git a/compiler/luci/import/src/Nodes/CircleSub.cpp b/compiler/luci/import/src/Nodes/CircleSub.cpp index 968e9f5..9acf83d 100644 --- a/compiler/luci/import/src/Nodes/CircleSub.cpp +++ b/compiler/luci/import/src/Nodes/CircleSub.cpp @@ -39,8 +39,8 @@ CircleNode *CircleSubGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); - node->y(inputs[1]); + node->x(inputs.at(0)); + node->y(inputs.at(1)); const auto *options = op.builtin_options.AsSubOptions(); node->fusedActivationFunction(luci_actfunc(options->fused_activation_function)); diff --git a/compiler/luci/import/src/Nodes/CircleSum.cpp b/compiler/luci/import/src/Nodes/CircleSum.cpp index b4865de..bd3cb62 100644 --- a/compiler/luci/import/src/Nodes/CircleSum.cpp +++ b/compiler/luci/import/src/Nodes/CircleSum.cpp @@ -34,8 +34,8 @@ CircleNode *CircleSumGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->reduction_indices(inputs[1]); + node->input(inputs.at(0)); + node->reduction_indices(inputs.at(1)); const auto *options = op.builtin_options.AsReducerOptions(); node->keep_dims(options->keep_dims); diff --git a/compiler/luci/import/src/Nodes/CircleTanh.cpp b/compiler/luci/import/src/Nodes/CircleTanh.cpp index 8986378..018f570 100644 --- a/compiler/luci/import/src/Nodes/CircleTanh.cpp +++ b/compiler/luci/import/src/Nodes/CircleTanh.cpp @@ -28,21 +28,13 @@ bool CircleTanhGraphBuilder::validate(const ValidateArgs &args) const const auto &inputs = args.op.inputs; if (inputs.size() != 1) return false; + const auto &outputs = args.op.outputs; + if (outputs.size() != 1) + return false; - // Must be one of the following types - // bfloat16, half (float16), float32, float64, complex64, complex128 - // Currently, circle supports float16, float32, complex64 const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); - switch (tensor->type) - { - case circle::TensorType_FLOAT16: - case circle::TensorType_FLOAT32: - case circle::TensorType_COMPLEX64: - break; - default: - return false; - } + if (tensors.at(inputs.at(0))->type != tensors.at(outputs[0])->type) + return false; return true; } @@ -52,7 +44,7 @@ CircleNode *CircleTanhGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->x(inputs[0]); + node->x(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleTile.cpp b/compiler/luci/import/src/Nodes/CircleTile.cpp index 91054ce..bc6f320 100644 --- a/compiler/luci/import/src/Nodes/CircleTile.cpp +++ b/compiler/luci/import/src/Nodes/CircleTile.cpp @@ -34,10 +34,10 @@ bool CircleTileGraphBuilder::validate(const ValidateArgs &args) const if (outputs.size() != 1) return false; - // Multiples (inputs[1]) must be one of the following types + // Multiples (inputs.at(1)) must be one of the following types // int32, int64 const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[1]); + const auto &tensor = tensors.at(inputs.at(1)); switch (tensor->type) { case circle::TensorType_INT32: @@ -48,7 +48,7 @@ bool CircleTileGraphBuilder::validate(const ValidateArgs &args) const } // Type of input and output must be the same - if (tensors.at(inputs[0])->type != tensors.at(outputs[0])->type) + if (tensors.at(inputs.at(0))->type != tensors.at(outputs[0])->type) return false; return true; @@ -59,8 +59,8 @@ CircleNode *CircleTileGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); - node->multiples(inputs[1]); + node->input(inputs.at(0)); + node->multiples(inputs.at(1)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleTopKV2.cpp b/compiler/luci/import/src/Nodes/CircleTopKV2.cpp index 5c1051c..f0677de 100644 --- a/compiler/luci/import/src/Nodes/CircleTopKV2.cpp +++ b/compiler/luci/import/src/Nodes/CircleTopKV2.cpp @@ -36,7 +36,7 @@ bool CircleTopKV2GraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[1]); + const auto &tensor = tensors.at(inputs.at(1)); if (tensor->type != circle::TensorType_INT32) return false; diff --git a/compiler/luci/import/src/Nodes/CircleTranspose.cpp b/compiler/luci/import/src/Nodes/CircleTranspose.cpp index 8622c8b..cc31530 100644 --- a/compiler/luci/import/src/Nodes/CircleTranspose.cpp +++ b/compiler/luci/import/src/Nodes/CircleTranspose.cpp @@ -39,8 +39,8 @@ CircleNode *CircleTransposeGraphBuilder::build_node(const circle::OperatorT &op, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->a(inputs[0]); - node->perm(inputs[1]); + node->a(inputs.at(0)); + node->perm(inputs.at(1)); const auto *options = op.builtin_options.AsTransposeOptions(); (void)options; diff --git a/compiler/luci/import/src/Nodes/CircleTransposeConv.cpp b/compiler/luci/import/src/Nodes/CircleTransposeConv.cpp index 7bdf46d..ddb1966 100644 --- a/compiler/luci/import/src/Nodes/CircleTransposeConv.cpp +++ b/compiler/luci/import/src/Nodes/CircleTransposeConv.cpp @@ -30,6 +30,24 @@ bool CircleTransposeConvGraphBuilder::validate(const ValidateArgs &args) const if (args.op.inputs.size() != 3) return false; + const auto &inputs = args.op.inputs; + const auto &tensors = args.reader.tensors(); + const auto &filter_tensor = tensors.at(inputs.at(1)); + const auto &filter_shape = filter_tensor.get()->shape; + const auto &ifm_tensor = tensors.at(inputs.at(2)); + const auto &ifm_shape = ifm_tensor.get()->shape; + + // ifm and filters must be 4-D tensor + if (ifm_shape.size() != 4) + return false; + if (filter_shape.size() != 4) + return false; + + // input shape : [batch, height, width, in_channels] + // filters shape : [output_channels, height, weight, in_channels] + if (ifm_tensor.get()->shape.at(3) != filter_tensor.get()->shape.at(3)) + return false; + return true; } @@ -39,9 +57,9 @@ CircleNode *CircleTransposeConvGraphBuilder::build_node(const circle::OperatorT { auto *node = graph->nodes()->create(); - node->inputSizes(inputs[0]); - node->filter(inputs[1]); - node->outBackprop(inputs[2]); + node->inputSizes(inputs.at(0)); + node->filter(inputs.at(1)); + node->outBackprop(inputs.at(2)); const auto *options = op.builtin_options.AsTransposeConvOptions(); node->padding(luci_padding(options->padding)); diff --git a/compiler/luci/import/src/Nodes/CircleUnique.cpp b/compiler/luci/import/src/Nodes/CircleUnique.cpp new file mode 100644 index 0000000..5e79a29 --- /dev/null +++ b/compiler/luci/import/src/Nodes/CircleUnique.cpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "luci/Import/Nodes/CircleUnique.h" + +#include +#include + +#include + +namespace luci +{ + +bool CircleUniqueGraphBuilder::validate(const ValidateArgs &args) const +{ + if (args.op.inputs.size() != 1) + return false; + + if (args.op.outputs.size() != 2) + return false; + + return true; +} + +void CircleUniqueGraphBuilder::build(const circle::OperatorT &op, + GraphBuilderContext *context) const +{ + assert(context != nullptr); + + auto graph = context->graph(); + + const std::vector &inputs = op.inputs; + const std::vector &outputs = op.outputs; + const auto &tensors = context->reader()->tensors(); + auto tensors_ptr = context->reader()->tensors_ptr(); + assert(tensors_ptr != nullptr); + + std::vector input_nodes; + for (const int32_t input_tensor_index : inputs) + { + input_nodes.push_back(context->nodefinder()->node(input_tensor_index)); + } + + // Create CircleUnique + auto node = graph->nodes()->create(); + node->input(input_nodes[0]); + + const auto *options = op.builtin_options.AsUniqueOptions(); + node->output_type(luci_datatype(options->idx_out_type)); + + assert(int32_t(outputs.size()) == 2); + // Let's use name of output 0 as Unique name + const circle::TensorT &output_tensor = *tensors[outputs[0]]; + node->name(tensor_name(output_tensor)); + + // Create virtual outputs of Unique + for (int32_t n = 0; n < 2; ++n) + { + const circle::TensorT &output_tensor = *tensors[outputs[n]]; + + auto *nodeout = graph->nodes()->create(); + copy_tensor_attributes(output_tensor, nodeout); + // mark shape_status + if (tensors_ptr->Get(outputs[n])->shape() == nullptr) + nodeout->shape_status(ShapeStatus::NOSHAPE); + else + nodeout->shape_status(ShapeStatus::VALID); + + nodeout->input(node); + nodeout->index(n); + + context->nodefinder()->enroll(outputs[n], nodeout); + } +} + +} // namespace luci diff --git a/compiler/luci/import/src/Nodes/CircleUnpack.cpp b/compiler/luci/import/src/Nodes/CircleUnpack.cpp index c4282e2..9e7f3d3 100644 --- a/compiler/luci/import/src/Nodes/CircleUnpack.cpp +++ b/compiler/luci/import/src/Nodes/CircleUnpack.cpp @@ -59,7 +59,7 @@ bool CircleUnpackGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor = tensors.at(inputs[0]); + const auto &tensor = tensors.at(inputs.at(0)); const auto &shape = tensor->shape; auto shape_size = static_cast(shape.size()); if (shape_size > 0) diff --git a/compiler/luci/import/src/Nodes/CircleWhere.cpp b/compiler/luci/import/src/Nodes/CircleWhere.cpp index a13c4d6..f4c5f0c 100644 --- a/compiler/luci/import/src/Nodes/CircleWhere.cpp +++ b/compiler/luci/import/src/Nodes/CircleWhere.cpp @@ -35,7 +35,7 @@ bool CircleWhereGraphBuilder::validate(const ValidateArgs &args) const return false; const auto &tensors = args.reader.tensors(); - const auto &tensor_condition = tensors.at(inputs[0]); + const auto &tensor_condition = tensors.at(inputs.at(0)); const auto &tensor_out = tensors.at(outputs[0]); if (tensor_condition->type != circle::TensorType_BOOL) @@ -52,7 +52,7 @@ CircleNode *CircleWhereGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->condition(inputs[0]); + node->condition(inputs.at(0)); return node; } diff --git a/compiler/luci/import/src/Nodes/CircleZerosLike.cpp b/compiler/luci/import/src/Nodes/CircleZerosLike.cpp index 4362925..e60424d 100644 --- a/compiler/luci/import/src/Nodes/CircleZerosLike.cpp +++ b/compiler/luci/import/src/Nodes/CircleZerosLike.cpp @@ -39,7 +39,7 @@ CircleNode *CircleZerosLikeGraphBuilder::build_node(const circle::OperatorT &, loco::Graph *graph) const { auto *node = graph->nodes()->create(); - node->input(inputs[0]); + node->input(inputs.at(0)); // ZerosLikeOptinos are empty diff --git a/compiler/luci/lang/include/luci/IR/CircleNodes.h b/compiler/luci/lang/include/luci/IR/CircleNodes.h index 3b31149..e57f5bb 100644 --- a/compiler/luci/lang/include/luci/IR/CircleNodes.h +++ b/compiler/luci/lang/include/luci/IR/CircleNodes.h @@ -70,10 +70,12 @@ #include "Nodes/CircleMirrorPad.h" #include "Nodes/CircleMul.h" #include "Nodes/CircleNeg.h" +#include "Nodes/CircleNonMaxSuppressionV4.h" #include "Nodes/CircleNotEqual.h" #include "Nodes/CircleOneHot.h" #include "Nodes/CirclePack.h" #include "Nodes/CirclePad.h" +#include "Nodes/CirclePadV2.h" #include "Nodes/CirclePow.h" #include "Nodes/CirclePRelu.h" #include "Nodes/CircleRange.h" @@ -117,6 +119,7 @@ #include "Nodes/CircleTopKV2.h" #include "Nodes/CircleTranspose.h" #include "Nodes/CircleTransposeConv.h" +#include "Nodes/CircleUnique.h" #include "Nodes/CircleUnpack.h" #include "Nodes/CircleWhere.h" #include "Nodes/CircleWhile.h" @@ -130,7 +133,9 @@ #include "Nodes/CircleOutput.h" #include "Nodes/CircleCustomOut.h" #include "Nodes/CircleIfOut.h" +#include "Nodes/CircleNonMaxSuppressionV4Out.h" #include "Nodes/CircleUnpackOut.h" +#include "Nodes/CircleUniqueOut.h" #include "Nodes/CircleSplitOut.h" #include "Nodes/CircleSplitVOut.h" #include "Nodes/CircleTopKV2Out.h" diff --git a/compiler/luci/lang/include/luci/IR/CircleNodes.lst b/compiler/luci/lang/include/luci/IR/CircleNodes.lst index 488dcfb..8010518 100644 --- a/compiler/luci/lang/include/luci/IR/CircleNodes.lst +++ b/compiler/luci/lang/include/luci/IR/CircleNodes.lst @@ -22,7 +22,6 @@ CIRCLE_NODE(BATCHMATMUL, luci::CircleBatchMatMul) CIRCLE_NODE(CAST, luci::CircleCast) CIRCLE_NODE(CEIL, luci::CircleCeil) CIRCLE_NODE(CONCATENATION, luci::CircleConcatenation) -CIRCLE_NODE(CONST, luci::CircleConst) CIRCLE_NODE(CONV_2D, luci::CircleConv2D) CIRCLE_NODE(COS, luci::CircleCos) CIRCLE_NODE(CUSTOM, luci::CircleCustom) @@ -64,10 +63,12 @@ CIRCLE_NODE(MINIMUM, luci::CircleMinimum) CIRCLE_NODE(MIRROR_PAD, luci::CircleMirrorPad) CIRCLE_NODE(MUL, luci::CircleMul) CIRCLE_NODE(NEG, luci::CircleNeg) +CIRCLE_NODE(NON_MAX_SUPPRESSION_V4, luci::CircleNonMaxSuppressionV4) CIRCLE_NODE(NOT_EQUAL, luci::CircleNotEqual) CIRCLE_NODE(ONE_HOT, luci::CircleOneHot) CIRCLE_NODE(PACK, luci::CirclePack) CIRCLE_NODE(PAD, luci::CirclePad) +CIRCLE_NODE(PADV2, luci::CirclePadV2) CIRCLE_NODE(POW, luci::CirclePow) CIRCLE_NODE(PRELU, luci::CirclePRelu) CIRCLE_NODE(RANGE, luci::CircleRange) @@ -111,6 +112,7 @@ CIRCLE_NODE(TILE, luci::CircleTile) CIRCLE_NODE(TOPK_V2, luci::CircleTopKV2) CIRCLE_NODE(TRANSPOSE, luci::CircleTranspose) CIRCLE_NODE(TRANSPOSE_CONV, luci::CircleTransposeConv) +CIRCLE_NODE(UNIQUE, luci::CircleUnique) CIRCLE_NODE(UNPACK, luci::CircleUnpack) CIRCLE_NODE(WHERE, luci::CircleWhere) CIRCLE_NODE(WHILE, luci::CircleWhile) @@ -120,14 +122,17 @@ CIRCLE_NODE(BCQ_FULLY_CONNECTED, luci::CircleBCQFullyConnected) CIRCLE_NODE(BCQ_GATHER, luci::CircleBCQGather) CIRCLE_NODE(INSTANCE_NORM, luci::CircleInstanceNorm) // Virtual node(s) +CIRCLE_NODE(CIRCLECONST, luci::CircleConst) CIRCLE_NODE(CIRCLEINPUT, luci::CircleInput) CIRCLE_NODE(CIRCLEOUTPUT, luci::CircleOutput) CIRCLE_NODE(CIRCLEOUTPUTDUMMY, luci::CircleOutputDummy) CIRCLE_NODE(CIRCLEOUTPUTEXCLUDE, luci::CircleOutputExclude) CIRCLE_NODE(CIRCLECUSTOMOUT, luci::CircleCustomOut) CIRCLE_NODE(CIRCLEIFOUT, luci::CircleIfOut) +CIRCLE_NODE(CIRCLENONMAXSUPPRESSIONV4OUT, luci::CircleNonMaxSuppressionV4Out) CIRCLE_NODE(CIRCLESPLITOUT, luci::CircleSplitOut) CIRCLE_NODE(CIRCLESPLITVOUT, luci::CircleSplitVOut) CIRCLE_NODE(CIRCLETOPKV2OUT, luci::CircleTopKV2Out) +CIRCLE_NODE(CIRCLEUNIQUEOUT, luci::CircleUniqueOut) CIRCLE_NODE(CIRCLEUNPACKOUT, luci::CircleUnpackOut) CIRCLE_NODE(CIRCLEWHILEOUT, luci::CircleWhileOut) diff --git a/compiler/luci/lang/include/luci/IR/CircleQuantParam.h b/compiler/luci/lang/include/luci/IR/CircleQuantParam.h index 7253e65..6944373 100644 --- a/compiler/luci/lang/include/luci/IR/CircleQuantParam.h +++ b/compiler/luci/lang/include/luci/IR/CircleQuantParam.h @@ -29,6 +29,7 @@ struct CircleQuantParam std::vector max; std::vector scale; std::vector zerop; + int32_t quantized_dimension{0}; }; } // namespace luci diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleConst.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleConst.h index fc67174..2502820 100644 --- a/compiler/luci/lang/include/luci/IR/Nodes/CircleConst.h +++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleConst.h @@ -31,7 +31,7 @@ namespace luci * @brief Class to build tensor data * @note This will not be exported as a specific op */ -class CircleConst final : public FixedArityNode<0, CircleNodeImpl> +class CircleConst final : public FixedArityNode<0, CircleNodeImpl> { public: CircleConst() = default; diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleNonMaxSuppressionV4.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleNonMaxSuppressionV4.h new file mode 100644 index 0000000..69f3368 --- /dev/null +++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleNonMaxSuppressionV4.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __LUCI_IR_CIRCLE_NON_MAX_SUPPRESSION_V4_H__ +#define __LUCI_IR_CIRCLE_NON_MAX_SUPPRESSION_V4_H__ + +#include "luci/IR/CircleNodeDecl.h" +#include "luci/IR/CircleOpcode.h" + +#include "luci/IR/LuciNodeMixins.h" + +namespace luci +{ + +/** + * @brief NON_MAX_SUPPRESSION_V4 in Circle + */ +class CircleNonMaxSuppressionV4 final + : public FixedArityNode<5, CircleNodeImpl> +{ +public: + loco::Node *boxes(void) const { return at(0)->node(); } + void boxes(loco::Node *node) { at(0)->node(node); } + + loco::Node *scores(void) const { return at(1)->node(); } + void scores(loco::Node *node) { at(1)->node(node); } + + loco::Node *max_output_size(void) const { return at(2)->node(); } + void max_output_size(loco::Node *node) { at(2)->node(node); } + + loco::Node *iou_threshold(void) const { return at(3)->node(); } + void iou_threshold(loco::Node *node) { at(3)->node(node); } + + loco::Node *score_threshold(void) const { return at(4)->node(); } + void score_threshold(loco::Node *node) { at(4)->node(node); } +}; + +} // namespace luci + +#endif // __LUCI_IR_CIRCLE_NON_MAX_SUPPRESSION_V4_H__ diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleNonMaxSuppressionV4Out.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleNonMaxSuppressionV4Out.h new file mode 100644 index 0000000..a24dc3e --- /dev/null +++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleNonMaxSuppressionV4Out.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __LUCI_IR_CIRCLE_NONMAXSUPPRESSIONV4OUT_H__ +#define __LUCI_IR_CIRCLE_NONMAXSUPPRESSIONV4OUT_H__ + +#include "luci/IR/CircleNodeDecl.h" +#include "luci/IR/CircleOpcode.h" + +#include "luci/IR/LuciNodeMixins.h" + +namespace luci +{ + +/** + * @brief Virtual NONMAXSUPPRESSIONV4OUT in Circle + */ +class CircleNonMaxSuppressionV4Out final + : public FixedArityNode<1, CircleNodeImpl> +{ +public: + CircleNonMaxSuppressionV4Out() = default; + +public: + loco::Node *input(void) const { return at(0)->node(); } + void input(loco::Node *node) { at(0)->node(node); } + +public: + int32_t index(void) const { return _index; } + void index(int32_t index) { _index = index; } + +private: + int32_t _index{-1}; +}; + +} // namespace luci + +#endif // __LUCI_IR_CIRCLE_NONMAXSUPPRESSIONV4OUT_H__ diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CirclePadV2.h b/compiler/luci/lang/include/luci/IR/Nodes/CirclePadV2.h new file mode 100644 index 0000000..563cfd9 --- /dev/null +++ b/compiler/luci/lang/include/luci/IR/Nodes/CirclePadV2.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __LUCI_IR_CIRCLEPADV2_H__ +#define __LUCI_IR_CIRCLEPADV2_H__ + +#include "luci/IR/CircleNodeDecl.h" +#include "luci/IR/CircleOpcode.h" + +#include "luci/IR/LuciNodeMixins.h" + +namespace luci +{ + +/** + * @brief PADV2 in Circle + */ +class CirclePadV2 final : public FixedArityNode<3, CircleNodeImpl> +{ +public: + CirclePadV2() = default; + +public: + loco::Node *input(void) const { return at(0)->node(); } + void input(loco::Node *node) { at(0)->node(node); } + + loco::Node *paddings(void) const { return at(1)->node(); } + void paddings(loco::Node *node) { at(1)->node(node); } + + loco::Node *constant_values(void) const { return at(2)->node(); } + void constant_values(loco::Node *node) { at(2)->node(node); } +}; + +} // namespace luci + +#endif // __LUCI_IR_CIRCLEPADV2_H__ diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleUnique.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleUnique.h new file mode 100644 index 0000000..719a723 --- /dev/null +++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleUnique.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __LUCI_IR_CIRCELUNIQUE_H__ +#define __LUCI_IR_CIRCELUNIQUE_H__ + +#include "luci/IR/CircleNodeDecl.h" +#include "luci/IR/CircleOpcode.h" + +#include "luci/IR/LuciNodeMixins.h" + +namespace luci +{ + +/** + * @brief Unique in Circle + */ +class CircleUnique final : public FixedArityNode<1, CircleNodeImpl> +{ +public: + loco::Node *input(void) const { return at(0)->node(); } + void input(loco::Node *node) { at(0)->node(node); } + +public: + loco::DataType idx_out_type(void) const { return _idx_out_type; } + void output_type(loco::DataType ot) { _idx_out_type = ot; } + +private: + loco::DataType _idx_out_type{loco::DataType::S32}; +}; + +} // namespace luci + +#endif // __LUCI_IR_CIRCELUNIQUE_H__ diff --git a/compiler/luci/lang/include/luci/IR/Nodes/CircleUniqueOut.h b/compiler/luci/lang/include/luci/IR/Nodes/CircleUniqueOut.h new file mode 100644 index 0000000..f846403 --- /dev/null +++ b/compiler/luci/lang/include/luci/IR/Nodes/CircleUniqueOut.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __LUCI_IR_CIRCLE_UNIQUEOUT_H__ +#define __LUCI_IR_CIRCLE_UNIQUEOUT_H__ + +#include "luci/IR/CircleNodeDecl.h" +#include "luci/IR/CircleOpcode.h" + +#include "luci/IR/LuciNodeMixins.h" + +namespace luci +{ + +/** + * @brief Virtual CIRCLEUNIQUEOUT in Circle + */ +class CircleUniqueOut final + : public FixedArityNode<1, CircleNodeImpl> +{ +public: + CircleUniqueOut() = default; + +public: + loco::Node *input(void) const { return at(0)->node(); } + void input(loco::Node *node) { at(0)->node(node); } + +public: + int32_t index(void) const { return _index; } + void index(int32_t index) { _index = index; } + +private: + int32_t _index{-1}; +}; + +} // namespace luci + +#endif // __LUCI_IR_CIRCLE_UNIQUEOUT_H__ diff --git a/compiler/luci/lang/src/Module.test.cpp b/compiler/luci/lang/src/Module.test.cpp index 26bf073..a5973e5 100644 --- a/compiler/luci/lang/src/Module.test.cpp +++ b/compiler/luci/lang/src/Module.test.cpp @@ -22,7 +22,7 @@ TEST(ModuleTest, consturctor) { auto gs = luci::make_module(); - GTEST_SUCCEED(); + SUCCEED(); } TEST(ModuleTest, add) diff --git a/compiler/luci/lang/src/Nodes/CircleCustom.test.cpp b/compiler/luci/lang/src/Nodes/CircleCustom.test.cpp index 74ea82c..c07268c 100644 --- a/compiler/luci/lang/src/Nodes/CircleCustom.test.cpp +++ b/compiler/luci/lang/src/Nodes/CircleCustom.test.cpp @@ -35,7 +35,12 @@ TEST(CircleCustomTest, constructor) ASSERT_EQ(0, custom_node.custom_code().size()); } -TEST(CircleCustomTest, constructor_NEG) { ASSERT_DEBUG_DEATH(luci::CircleCustom{0}, ""); } +TEST(CircleCustomTest, constructor_NEG) +{ + ASSERT_DEBUG_DEATH(luci::CircleCustom{0}, ""); + + SUCCEED(); +} TEST(CircleCustomTest, invalidIndex_NEG) { diff --git a/compiler/luci/lang/src/Nodes/CircleIf.test.cpp b/compiler/luci/lang/src/Nodes/CircleIf.test.cpp index e3c8c9f..35f28e9 100644 --- a/compiler/luci/lang/src/Nodes/CircleIf.test.cpp +++ b/compiler/luci/lang/src/Nodes/CircleIf.test.cpp @@ -41,11 +41,15 @@ TEST(CircleIfTest, constructor) TEST(CircleIfTestDeath, invalid_arity_NEG) { ASSERT_DEBUG_DEATH(luci::CircleIf very_long_name_if_node(0, 1), ""); + + SUCCEED(); } TEST(CircleIfTestDeath, invalid_output_count_NEG) { ASSERT_DEBUG_DEATH(luci::CircleIf if_node(2, 0), ""); + + SUCCEED(); } TEST(CircleIfTestDeath, invalid_input_get_index_NEG) diff --git a/compiler/luci/lang/src/Nodes/CircleNonMaxSuppressionV4.test.cpp b/compiler/luci/lang/src/Nodes/CircleNonMaxSuppressionV4.test.cpp new file mode 100644 index 0000000..b25ce4d --- /dev/null +++ b/compiler/luci/lang/src/Nodes/CircleNonMaxSuppressionV4.test.cpp @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "luci/IR/Nodes/CircleNonMaxSuppressionV4.h" + +#include "luci/IR/CircleDialect.h" +#include "luci/IR/CircleNodeVisitor.h" + +#include + +TEST(CircleNonMaxSuppressionV4Test, constructor) +{ + luci::CircleNonMaxSuppressionV4 nmsv4_node; + + ASSERT_EQ(luci::CircleDialect::get(), nmsv4_node.dialect()); + ASSERT_EQ(luci::CircleOpcode::NON_MAX_SUPPRESSION_V4, nmsv4_node.opcode()); + + ASSERT_EQ(nullptr, nmsv4_node.boxes()); + ASSERT_EQ(nullptr, nmsv4_node.scores()); + ASSERT_EQ(nullptr, nmsv4_node.max_output_size()); + ASSERT_EQ(nullptr, nmsv4_node.iou_threshold()); + ASSERT_EQ(nullptr, nmsv4_node.score_threshold()); +} + +TEST(CircleNonMaxSuppressionV4Test, input_NEG) +{ + luci::CircleNonMaxSuppressionV4 nmsv4_node; + luci::CircleNonMaxSuppressionV4 node; + + nmsv4_node.boxes(&node); + nmsv4_node.scores(&node); + nmsv4_node.max_output_size(&node); + nmsv4_node.iou_threshold(&node); + nmsv4_node.score_threshold(&node); + ASSERT_NE(nullptr, nmsv4_node.boxes()); + ASSERT_NE(nullptr, nmsv4_node.scores()); + ASSERT_NE(nullptr, nmsv4_node.max_output_size()); + ASSERT_NE(nullptr, nmsv4_node.iou_threshold()); + ASSERT_NE(nullptr, nmsv4_node.score_threshold()); + + nmsv4_node.boxes(nullptr); + nmsv4_node.scores(nullptr); + nmsv4_node.max_output_size(nullptr); + nmsv4_node.iou_threshold(nullptr); + nmsv4_node.score_threshold(nullptr); + ASSERT_EQ(nullptr, nmsv4_node.boxes()); + ASSERT_EQ(nullptr, nmsv4_node.scores()); + ASSERT_EQ(nullptr, nmsv4_node.max_output_size()); + ASSERT_EQ(nullptr, nmsv4_node.iou_threshold()); + ASSERT_EQ(nullptr, nmsv4_node.score_threshold()); +} + +TEST(CircleNonMaxSuppressionV4Test, arity_NEG) +{ + luci::CircleNonMaxSuppressionV4 nmsv4_node; + + ASSERT_NO_THROW(nmsv4_node.arg(4)); + ASSERT_THROW(nmsv4_node.arg(5), std::out_of_range); +} + +TEST(CircleNonMaxSuppressionV4Test, visit_mutable_NEG) +{ + struct TestVisitor final : public luci::CircleNodeMutableVisitor + { + }; + + luci::CircleNonMaxSuppressionV4 nmsv4_node; + + TestVisitor tv; + ASSERT_THROW(nmsv4_node.accept(&tv), std::exception); +} + +TEST(CircleNonMaxSuppressionV4Test, visit_NEG) +{ + struct TestVisitor final : public luci::CircleNodeVisitor + { + }; + + luci::CircleNonMaxSuppressionV4 nmsv4_node; + + TestVisitor tv; + ASSERT_THROW(nmsv4_node.accept(&tv), std::exception); +} diff --git a/compiler/luci/lang/src/Nodes/CircleNonMaxSuppressionV4Out.test.cpp b/compiler/luci/lang/src/Nodes/CircleNonMaxSuppressionV4Out.test.cpp new file mode 100644 index 0000000..c6cef4e --- /dev/null +++ b/compiler/luci/lang/src/Nodes/CircleNonMaxSuppressionV4Out.test.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "luci/IR/Nodes/CircleNonMaxSuppressionV4Out.h" + +#include "luci/IR/CircleDialect.h" + +#include + +TEST(CircleNonMaxSuppressionV4OutTest, constructor) +{ + luci::CircleNonMaxSuppressionV4Out vout_node; + + ASSERT_EQ(luci::CircleDialect::get(), vout_node.dialect()); + ASSERT_EQ(luci::CircleOpcode::CIRCLENONMAXSUPPRESSIONV4OUT, vout_node.opcode()); + + ASSERT_EQ(nullptr, vout_node.input()); + ASSERT_EQ(-1, vout_node.index()); +} diff --git a/compiler/luci/lang/src/Nodes/CirclePadV2.test.cpp b/compiler/luci/lang/src/Nodes/CirclePadV2.test.cpp new file mode 100644 index 0000000..e09d517 --- /dev/null +++ b/compiler/luci/lang/src/Nodes/CirclePadV2.test.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "luci/IR/Nodes/CirclePadV2.h" + +#include "luci/IR/CircleDialect.h" +#include "luci/IR/CircleNodeVisitor.h" + +#include + +TEST(CirclePadV2Test, constructor_P) +{ + luci::CirclePadV2 node; + + ASSERT_EQ(luci::CircleDialect::get(), node.dialect()); + ASSERT_EQ(luci::CircleOpcode::PADV2, node.opcode()); + + ASSERT_EQ(nullptr, node.input()); + ASSERT_EQ(nullptr, node.paddings()); + ASSERT_EQ(nullptr, node.constant_values()); +} + +TEST(CirclePadV2Test, input_NEG) +{ + luci::CirclePadV2 pad_node; + luci::CirclePadV2 node; + + pad_node.input(&node); + pad_node.paddings(&node); + pad_node.constant_values(&node); + ASSERT_NE(nullptr, pad_node.input()); + ASSERT_NE(nullptr, pad_node.paddings()); + ASSERT_NE(nullptr, pad_node.constant_values()); + + pad_node.input(nullptr); + pad_node.paddings(nullptr); + pad_node.constant_values(nullptr); + ASSERT_EQ(nullptr, pad_node.input()); + ASSERT_EQ(nullptr, pad_node.paddings()); + ASSERT_EQ(nullptr, pad_node.constant_values()); +} + +TEST(CirclePadV2Test, arity_NEG) +{ + luci::CirclePadV2 pad_node; + + ASSERT_NO_THROW(pad_node.arg(2)); + ASSERT_THROW(pad_node.arg(3), std::out_of_range); +} + +TEST(CirclePadV2Test, visit_mutable_NEG) +{ + struct TestVisitor final : public luci::CircleNodeMutableVisitor + { + }; + + luci::CirclePadV2 pad_node; + + TestVisitor tv; + ASSERT_THROW(pad_node.accept(&tv), std::exception); +} + +TEST(CirclePadV2Test, visit_NEG) +{ + struct TestVisitor final : public luci::CircleNodeVisitor + { + }; + + luci::CirclePadV2 pad_node; + + TestVisitor tv; + ASSERT_THROW(pad_node.accept(&tv), std::exception); +} diff --git a/compiler/luci/lang/src/Nodes/CircleUnique.test.cpp b/compiler/luci/lang/src/Nodes/CircleUnique.test.cpp new file mode 100644 index 0000000..517ee97 --- /dev/null +++ b/compiler/luci/lang/src/Nodes/CircleUnique.test.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "luci/IR/Nodes/CircleUnique.h" + +#include "luci/IR/CircleDialect.h" +#include "luci/IR/CircleNodeVisitor.h" + +#include + +TEST(CircleUniqueTest, constructor) +{ + luci::CircleUnique unique_node; + + ASSERT_EQ(luci::CircleDialect::get(), unique_node.dialect()); + ASSERT_EQ(luci::CircleOpcode::UNIQUE, unique_node.opcode()); + + ASSERT_EQ(nullptr, unique_node.input()); +} + +TEST(CircleUniqueTest, input_NEG) +{ + luci::CircleUnique unique_node; + luci::CircleUnique node; + + unique_node.input(&node); + ASSERT_NE(nullptr, unique_node.input()); + + unique_node.input(nullptr); + ASSERT_EQ(nullptr, unique_node.input()); +} + +TEST(CircleUniqueTest, arity_NEG) +{ + luci::CircleUnique unique_node; + + ASSERT_NO_THROW(unique_node.arg(0)); + ASSERT_THROW(unique_node.arg(1), std::out_of_range); +} + +TEST(CircleUniqueTest, visit_mutable_NEG) +{ + struct TestVisitor final : public luci::CircleNodeMutableVisitor + { + }; + + luci::CircleUnique unique_node; + + TestVisitor tv; + ASSERT_THROW(unique_node.accept(&tv), std::exception); +} + +TEST(CircleUniqueTest, visit_NEG) +{ + struct TestVisitor final : public luci::CircleNodeVisitor + { + }; + + luci::CircleUnique unique_node; + + TestVisitor tv; + ASSERT_THROW(unique_node.accept(&tv), std::exception); +} diff --git a/compiler/luci/lang/src/Nodes/CircleWhile.test.cpp b/compiler/luci/lang/src/Nodes/CircleWhile.test.cpp index 19290c0..913686f 100644 --- a/compiler/luci/lang/src/Nodes/CircleWhile.test.cpp +++ b/compiler/luci/lang/src/Nodes/CircleWhile.test.cpp @@ -41,11 +41,15 @@ TEST(CircleWhileTest, constructor) TEST(CircleWhileTestDeath, invalid_arity_NEG) { ASSERT_DEBUG_DEATH(luci::CircleWhile very_long_name_while_node(0, 1), ""); + + SUCCEED(); } TEST(CircleWhileTestDeath, invalid_output_count_NEG) { ASSERT_DEBUG_DEATH(luci::CircleWhile while_node(2, 0), ""); + + SUCCEED(); } TEST(CircleWhileTestDeath, invalid_input_get_index_NEG) diff --git a/compiler/luci/logex/src/FormattedGraph.cpp b/compiler/luci/logex/src/FormattedGraph.cpp index 4725ee3..f04a418 100644 --- a/compiler/luci/logex/src/FormattedGraph.cpp +++ b/compiler/luci/logex/src/FormattedGraph.cpp @@ -244,6 +244,7 @@ private: IMPLEMENT(luci::CircleMirrorPad) IMPLEMENT(luci::CircleMul) IMPLEMENT(luci::CircleNeg) + IMPLEMENT(luci::CircleNonMaxSuppressionV4) IMPLEMENT(luci::CircleNotEqual) IMPLEMENT(luci::CircleOneHot) IMPLEMENT(luci::CirclePack) @@ -291,6 +292,7 @@ private: IMPLEMENT(luci::CircleTopKV2) IMPLEMENT(luci::CircleTranspose) IMPLEMENT(luci::CircleTransposeConv) + IMPLEMENT(luci::CircleUnique) IMPLEMENT(luci::CircleUnpack) IMPLEMENT(luci::CircleWhere) IMPLEMENT(luci::CircleWhile) @@ -303,9 +305,11 @@ private: IMPLEMENT(luci::CircleInput) IMPLEMENT(luci::CircleOutput) IMPLEMENT(luci::CircleIfOut) + IMPLEMENT(luci::CircleNonMaxSuppressionV4Out) IMPLEMENT(luci::CircleSplitOut) IMPLEMENT(luci::CircleSplitVOut) IMPLEMENT(luci::CircleTopKV2Out) + IMPLEMENT(luci::CircleUniqueOut) IMPLEMENT(luci::CircleUnpackOut) IMPLEMENT(luci::CircleWhileOut) #undef IMPLEMENT @@ -823,6 +827,19 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleNeg *node, locop::NodeS return use_x(tbl(), node, s); } +bool CircleNodeSummaryBuilder::summary(const luci::CircleNonMaxSuppressionV4 *node, + locop::NodeSummary &s) const +{ + s.args().append("boxes", pepper::str(node->boxes())); + s.args().append("scores", pepper::str(node->scores())); + s.args().append("max_output_size", pepper::str(node->max_output_size())); + s.args().append("iou_threshold", pepper::str(node->iou_threshold())); + s.args().append("score_threshold", pepper::str(node->score_threshold())); + + s.state(locop::NodeSummary::State::Complete); + return true; +} + bool CircleNodeSummaryBuilder::summary(const luci::CircleNotEqual *node, locop::NodeSummary &s) const { @@ -1227,6 +1244,14 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleTransposeConv *node, return true; } +bool CircleNodeSummaryBuilder::summary(const luci::CircleUnique *node, locop::NodeSummary &s) const +{ + s.args().append("input", tbl()->lookup(node->input())); + s.args().append("idx_out_type", to_str(node->idx_out_type())); + s.state(locop::NodeSummary::State::Complete); + return true; +} + bool CircleNodeSummaryBuilder::summary(const luci::CircleUnpack *node, locop::NodeSummary &s) const { s.args().append("value", tbl()->lookup(node->value())); @@ -1293,6 +1318,16 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleTopKV2Out *node, return true; } +bool CircleNodeSummaryBuilder::summary(const luci::CircleUniqueOut *node, + locop::NodeSummary &s) const +{ + s.args().append("unique", tbl()->lookup(node->input())); + + s.state(locop::NodeSummary::State::Complete); + + return true; +} + bool CircleNodeSummaryBuilder::summary(const luci::CircleUnpackOut *node, locop::NodeSummary &s) const { @@ -1308,6 +1343,12 @@ bool CircleNodeSummaryBuilder::summary(const luci::CircleIfOut *node, locop::Nod return use_input(tbl(), node, s); } +bool CircleNodeSummaryBuilder::summary(const luci::CircleNonMaxSuppressionV4Out *node, + locop::NodeSummary &s) const +{ + return use_input(tbl(), node, s); +} + bool CircleNodeSummaryBuilder::summary(const luci::CircleWhileOut *node, locop::NodeSummary &s) const { diff --git a/compiler/luci/pass/src/CircleOptimizer.cpp b/compiler/luci/pass/src/CircleOptimizer.cpp index 90fbe90..2edf7a9 100644 --- a/compiler/luci/pass/src/CircleOptimizer.cpp +++ b/compiler/luci/pass/src/CircleOptimizer.cpp @@ -145,7 +145,7 @@ void CircleOptimizer::quantize(loco::Graph *g) const { static const std::vector fakeq_supported_input_dtype{"float32"}; static const std::vector fakeq_supported_output_dtype{"uint8"}; - static const std::vector fakeq_supported_granularity{"layer"}; + static const std::vector fakeq_supported_granularity{"layer", "channel"}; auto input_dtype = _options->param(Options::AlgorithmParameters::Quantize_input_dtype); auto output_dtype = _options->param(Options::AlgorithmParameters::Quantize_output_dtype); @@ -173,7 +173,7 @@ void CircleOptimizer::quantize(loco::Graph *g) const { static const std::vector qwmm_supported_input_dtype{"float32"}; static const std::vector qwmm_supported_output_dtype{"uint8"}; - static const std::vector qwmm_supported_granularity{"layer"}; + static const std::vector qwmm_supported_granularity{"layer", "channel"}; auto input_dtype = _options->param(Options::AlgorithmParameters::Quantize_input_dtype); auto output_dtype = _options->param(Options::AlgorithmParameters::Quantize_output_dtype); diff --git a/compiler/luci/pass/src/FuseBCQPass.cpp b/compiler/luci/pass/src/FuseBCQPass.cpp index b81db88..260de5b 100644 --- a/compiler/luci/pass/src/FuseBCQPass.cpp +++ b/compiler/luci/pass/src/FuseBCQPass.cpp @@ -53,6 +53,11 @@ const std::string node_name_prefix(luci::NodeName node_name) const auto index = prefix.find("Tensordot/"); prefix = prefix.substr(0, index - 1); } + else if (prefix.find("/MatMul") != std::string::npos) + { + const auto index = prefix.find("/MatMul"); + prefix = prefix.substr(0, index); + } else if (prefix.find("kernel/") != std::string::npos) { const auto index = prefix.find("kernel/"); @@ -67,14 +72,190 @@ const std::string node_name_prefix(luci::NodeName node_name) return prefix; } +/** + * @brief Create CircleOutputExclude operation, which has same shape and dtype with + * original circle_node. + */ +luci::CircleOutputExclude *createNoOp(luci::CircleNode *circle_node) +{ + auto graph = circle_node->graph(); + auto noOp = graph->nodes()->create(); + + if (circle_node->shape_status() == luci::ShapeStatus::VALID) + { + noOp->dtype(circle_node->dtype()); + noOp->rank(circle_node->rank()); + for (uint32_t i = 0; i < circle_node->rank(); ++i) + noOp->dim(i) = circle_node->dim(i); + } + else + { + // For type inference + noOp->dtype(loco::DataType::FLOAT32); + } + + return noOp; +}; + } // namespace namespace { -class BCQConverter final +// V means the version of BCQ. +template class BCQFuser; + +template <> class BCQFuser<1> { public: + bool fuseBCQ(loco::Graph *g) + { + bool changed = false; + + for (auto node : loco::all_nodes(g)) + { + if (auto circle_const = dynamic_cast(node)) + { + add_BCQ_info_node(circle_const); + } + } + + if (!is_bcqinfo_valid()) + return false; + + for (auto node : loco::active_nodes(loco::output_nodes(g))) + { + if (auto gather = dynamic_cast(node)) + { + auto params = dynamic_cast(gather->params()); + if (params != nullptr && has_BCQ_info(params)) + { + auto bcq_gather = g->nodes()->create(); + + bcq_gather->op_version(1); + bcq_gather->input_scales(get_alpha(params)); + bcq_gather->input_binary(get_packed_binary_code(params)); + bcq_gather->indices(gather->indices()); + bcq_gather->input_clusters(packed_clusters(params)); + + // input_binary shape : [output_size, hidden_size] + const auto binary_hidden_size = + loco::must_cast(bcq_gather->input_binary())->dim(1).value() * 32; + bcq_gather->input_hidden_size(binary_hidden_size); + + if (do_w_x(params)) + { + bcq_gather->axis(gather->axis()); + } + else + { + const auto axis_transpose = (gather->axis() == 0) ? 1 : 0; + bcq_gather->axis(axis_transpose); + } + + loco::replace(gather).with(bcq_gather); + + changed = true; + } + } + else if (auto fully_connected = dynamic_cast(node)) + { + auto weights = dynamic_cast(fully_connected->weights()); + if (weights != nullptr && has_BCQ_info(weights)) + { + auto bcq_fc = g->nodes()->create(); + + bcq_fc->op_version(1); + bcq_fc->weights_scales(get_alpha(weights)); + bcq_fc->weights_binary(get_packed_binary_code(weights)); + bcq_fc->bias(fully_connected->bias()); + bcq_fc->weights_clusters(packed_clusters(weights)); + bcq_fc->fusedActivationFunction(fully_connected->fusedActivationFunction()); + + loco::Node *bcq_input = fully_connected->input(); + int32_t batch_rank = 0; + + // If input of BCQFullyConnected has more than rank 2, we should reshape it as rank 2 + const auto original_input = loco::must_cast(fully_connected->input()); + if (original_input->shape_status() == luci::ShapeStatus::VALID && + original_input->rank() > 2) + { + auto new_shape = g->nodes()->create(); + new_shape->dtype(loco::DataType::S32); + new_shape->size(2); + new_shape->rank(1); + new_shape->dim(0) = 2; + + auto batch_size = 1; + for (uint32_t i = 0; i < original_input->rank() - 1; ++i) + batch_size *= original_input->dim(i).value(); + + new_shape->at(0) = batch_size; + new_shape->at(1) = + original_input->dim(original_input->rank() - 1).value(); + new_shape->shape_status(luci::ShapeStatus::VALID); + + auto reshape = g->nodes()->create(); + reshape->tensor(original_input); + reshape->shape(new_shape); + + bcq_input = reshape; + batch_rank = original_input->rank() - 2; + } + + // If x_w formation, we should insert Transpose in front and back of BCQFullyConnected + if (do_w_x(weights)) + { + const auto binary_hidden_size = + loco::must_cast(fully_connected->input()) + ->dim(batch_rank) + .value(); + bcq_fc->weights_hidden_size(binary_hidden_size); + bcq_fc->input(bcq_input); + loco::replace(fully_connected).with(bcq_fc); + } + else + { + const auto binary_hidden_size = + loco::must_cast(fully_connected->input()) + ->dim(1 + batch_rank) + .value(); + bcq_fc->weights_hidden_size(binary_hidden_size); + + auto perm = g->nodes()->create(); + perm->dtype(loco::DataType::S32); + perm->size(2); + perm->rank(1); + perm->dim(0) = 2; + perm->at(0) = 1; + perm->at(1) = 0; + perm->shape_status(luci::ShapeStatus::VALID); + + auto input_transpose = g->nodes()->create(); + input_transpose->a(bcq_input); + input_transpose->perm(perm); + + bcq_fc->input(input_transpose); + + auto output_transpose = g->nodes()->create(); + output_transpose->a(bcq_fc); + output_transpose->perm(perm); + + loco::replace(fully_connected).with(output_transpose); + } + + changed = true; + } + } + } + + if (changed) + clear_BCQ_nodes(); + + return changed; + } + +private: void add_BCQ_info_node(luci::CircleConst *node) { const auto node_name = node->name(); @@ -119,16 +300,65 @@ public: return has_info; } + /** + * @brief Exclude BCQ information nodes which are used for fusing BCQ operations + * from graph output by using CircleOutputExclude + */ + void clear_BCQ_nodes() + { + auto clear_nodes = [](std::map &nodes) { + for (auto &n : nodes) + { + auto node = n.second; + + for (auto s : loco::succs(node)) + { + if (auto outnode = dynamic_cast(s)) + { + outnode->from(createNoOp(node)); + } + else if (auto reshape_node = dynamic_cast(s)) + { + for (auto o : loco::succs(reshape_node)) + { + auto circle_output = loco::must_cast(o); + circle_output->from(createNoOp(reshape_node)); + } + } + } + } + }; + + clear_nodes(_do_w_x); + clear_nodes(_alpha); + clear_nodes(_packed_binary_code); + clear_nodes(_number_of_clusters); + clear_nodes(_size_of_clusters); + clear_nodes(_qbits_of_clusters); + clear_nodes(_dequant_weight); + } + + bool is_bcqinfo_valid() + { + // do_w_x should be int32 or bool type + for (auto n : _do_w_x) + { + if (n.second->dtype() != loco::DataType::BOOL && n.second->dtype() != loco::DataType::S32) + return false; + } + + return true; + } + +private: bool do_w_x(luci::CircleConst *node) { const auto prefix = node_name_prefix(node->name()); if (_do_w_x[prefix]->dtype() == loco::DataType::S32) return _do_w_x[prefix]->at(0) == 1; - else if (_do_w_x[prefix]->dtype() == loco::DataType::BOOL) - return _do_w_x[prefix]->at(0); else - throw std::runtime_error("do_w_x should be int or bool"); + return _do_w_x[prefix]->at(0); } luci::CircleConst *get_alpha(luci::CircleConst *node) @@ -187,64 +417,6 @@ public: return packed_clusters; } - /** - * @brief Exclude BCQ information nodes which are used for fusing BCQ operations - * from graph output by using CircleOutputExclude - */ - void clear_BCQ_nodes() - { - auto createNoOp = [](luci::CircleNode *circle_node) { - auto graph = circle_node->graph(); - auto noOp = graph->nodes()->create(); - - if (circle_node->shape_status() == luci::ShapeStatus::VALID) - { - noOp->dtype(circle_node->dtype()); - noOp->rank(circle_node->rank()); - for (uint32_t i = 0; i < circle_node->rank(); ++i) - noOp->dim(i) = circle_node->dim(i); - } - else - { - // For type inference - noOp->dtype(loco::DataType::FLOAT32); - } - - return noOp; - }; - - auto clear_nodes = [createNoOp](std::map &nodes) { - for (auto &n : nodes) - { - auto node = n.second; - - for (auto s : loco::succs(node)) - { - if (auto outnode = dynamic_cast(s)) - { - outnode->from(createNoOp(node)); - } - else if (auto reshape_node = dynamic_cast(s)) - { - for (auto o : loco::succs(reshape_node)) - { - auto circle_output = loco::must_cast(o); - circle_output->from(createNoOp(reshape_node)); - } - } - } - } - }; - - clear_nodes(_do_w_x); - clear_nodes(_alpha); - clear_nodes(_packed_binary_code); - clear_nodes(_number_of_clusters); - clear_nodes(_size_of_clusters); - clear_nodes(_qbits_of_clusters); - clear_nodes(_dequant_weight); - } - private: std::map _do_w_x; std::map _alpha; @@ -262,143 +434,42 @@ namespace luci bool FuseBCQPass::run(loco::Graph *g) { - BCQConverter converter; - bool changed = false; + // Find BCQ version information and check validity. + luci::CircleConst *version_node = nullptr; for (auto node : loco::all_nodes(g)) { if (auto circle_const = dynamic_cast(node)) { - converter.add_BCQ_info_node(circle_const); - } - } - - for (auto node : loco::active_nodes(loco::output_nodes(g))) - { - if (auto gather = dynamic_cast(node)) - { - auto params = dynamic_cast(gather->params()); - if (params != nullptr && converter.has_BCQ_info(params)) + if (circle_const->name().find("/bcqinfo_version") != std::string::npos) { - auto bcq_gather = g->nodes()->create(); - - bcq_gather->input_scales(converter.get_alpha(params)); - bcq_gather->input_binary(converter.get_packed_binary_code(params)); - bcq_gather->indices(gather->indices()); - bcq_gather->input_clusters(converter.packed_clusters(params)); - - const auto binary_hidden_size = - loco::must_cast(bcq_gather->input_binary())->dim(1).value() * 32; - bcq_gather->input_hidden_size(binary_hidden_size); - - if (converter.do_w_x(params)) - { - bcq_gather->axis(gather->axis()); - } - else + // There should be only one bcqinfo_version in the model + if (version_node != nullptr) { - const auto axis_transpose = (gather->axis() == 0) ? 1 : 0; - bcq_gather->axis(axis_transpose); + assert(false && "Multiple version information found"); + return false; } - loco::replace(gather).with(bcq_gather); - - changed = true; + version_node = circle_const; } } - else if (auto fully_connected = dynamic_cast(node)) - { - auto weights = dynamic_cast(fully_connected->weights()); - if (weights != nullptr && converter.has_BCQ_info(weights)) - { - auto bcq_fc = g->nodes()->create(); - - bcq_fc->weights_scales(converter.get_alpha(weights)); - bcq_fc->weights_binary(converter.get_packed_binary_code(weights)); - bcq_fc->bias(fully_connected->bias()); - bcq_fc->weights_clusters(converter.packed_clusters(weights)); - bcq_fc->fusedActivationFunction(fully_connected->fusedActivationFunction()); - - loco::Node *bcq_input = fully_connected->input(); - int32_t batch_rank = 0; + } - // If input of BCQFullyConnected has more than rank 2, we should reshape it as rank 2 - const auto original_input = loco::must_cast(fully_connected->input()); - if (original_input->shape_status() == ShapeStatus::VALID && original_input->rank() > 2) - { - auto new_shape = g->nodes()->create(); - new_shape->dtype(loco::DataType::S32); - new_shape->size(2); - new_shape->rank(1); - new_shape->dim(0) = 2; - - auto batch_size = 1; - for (uint32_t i = 0; i < original_input->rank() - 1; ++i) - batch_size *= original_input->dim(i).value(); - - new_shape->at(0) = batch_size; - new_shape->at(1) = - original_input->dim(original_input->rank() - 1).value(); - new_shape->shape_status(ShapeStatus::VALID); - - auto reshape = g->nodes()->create(); - reshape->tensor(original_input); - reshape->shape(new_shape); - - bcq_input = reshape; - batch_rank = original_input->rank() - 2; - } + // If version node is not found, regard it as version 1. + int32_t bcq_version = (version_node != nullptr) ? version_node->at(0) : 1; - // If x_w formation, we should insert Transpose in front and back of BCQFullyConnected - if (converter.do_w_x(weights)) - { - const auto binary_hidden_size = - loco::must_cast(fully_connected->input()) - ->dim(batch_rank) - .value(); - bcq_fc->weights_hidden_size(binary_hidden_size); - bcq_fc->input(bcq_input); - loco::replace(fully_connected).with(bcq_fc); - } - else - { - const auto binary_hidden_size = - loco::must_cast(fully_connected->input()) - ->dim(1 + batch_rank) - .value(); - bcq_fc->weights_hidden_size(binary_hidden_size); - - auto perm = g->nodes()->create(); - perm->dtype(loco::DataType::S32); - perm->size(2); - perm->rank(1); - perm->dim(0) = 2; - perm->at(0) = 1; - perm->at(1) = 0; - perm->shape_status(ShapeStatus::VALID); - - auto input_transpose = g->nodes()->create(); - input_transpose->a(bcq_input); - input_transpose->perm(perm); - - bcq_fc->input(input_transpose); - - auto output_transpose = g->nodes()->create(); - output_transpose->a(bcq_fc); - output_transpose->perm(perm); - - loco::replace(fully_connected).with(output_transpose); - } + if (bcq_version == 1) + changed = BCQFuser<1>().fuseBCQ(g); + else + assert(false && "Not supported BCQ version"); - changed = true; - } - } + if (changed && version_node != nullptr) + { + // If BCQ is applied and version node was found, remove the node. + loco::replace(version_node).with(createNoOp(version_node)); } - if (changed) - converter.clear_BCQ_nodes(); - return changed; } diff --git a/compiler/luci/pass/src/QuantizationUtils.cpp b/compiler/luci/pass/src/QuantizationUtils.cpp index 6726ce7..e186906 100644 --- a/compiler/luci/pass/src/QuantizationUtils.cpp +++ b/compiler/luci/pass/src/QuantizationUtils.cpp @@ -24,6 +24,13 @@ namespace luci { +uint8_t fp32_to_uint8_cast(float f) +{ + assert(std::numeric_limits::min() <= f); + assert(f <= std::numeric_limits::max()); + return static_cast(f); +} + void compute_sym_scale_zp(float min, float max, float &scaling_factor, int64_t &zp, float &nudged_min, float &nudged_max) { @@ -78,7 +85,7 @@ void compute_asym_scale_zp(float min, float max, float &scaling_factor, int64_t } else zero_point_double = qmin_double - rmin / scale; - if (zero_point_double <= qmin_double) + if (min >= 0) { assert(min >= 0 && max >= 0); nudged_zero_point = kMinScale; @@ -86,7 +93,7 @@ void compute_asym_scale_zp(float min, float max, float &scaling_factor, int64_t if (min > 0 && max > 0) WARN(l) << "The minimum and maximum values are all positive." << std::endl; } - else if (zero_point_double >= qmax_double) + else if (max < 0) { assert(min < 0 && max < 0); nudged_zero_point = kMaxScale; @@ -96,7 +103,14 @@ void compute_asym_scale_zp(float min, float max, float &scaling_factor, int64_t else { assert(min < 0 && max >= 0); - nudged_zero_point = static_cast(std::round(zero_point_double)); + nudged_zero_point = fp32_to_uint8_cast(std::round(zero_point_double)); + } + + // protect scale from being very low due to overflow + if (scale < 1e-5) + { + scale = 1e-5; + nudged_zero_point = fp32_to_uint8_cast(std::round(qmin_double - rmin / scale)); } nudged_min = static_cast((qmin_double - nudged_zero_point) * scale); diff --git a/compiler/luci/pass/src/QuantizeWithMinMaxPass.cpp b/compiler/luci/pass/src/QuantizeWithMinMaxPass.cpp index f8abee7..b335a53 100644 --- a/compiler/luci/pass/src/QuantizeWithMinMaxPass.cpp +++ b/compiler/luci/pass/src/QuantizeWithMinMaxPass.cpp @@ -138,7 +138,8 @@ bool is_quantized(const CircleNode *node) node->dtype() == loco::DataType::S32; // bias } -void sym_wquant_per_channel(CircleConst *node, std::vector &scaling_factor) +void sym_wquant_per_channel(CircleConst *node, std::vector &scaling_factor, + int32_t &channel_dim_index) { assert(node->dtype() == loco::DataType::FLOAT32); @@ -153,7 +154,6 @@ void sym_wquant_per_channel(CircleConst *node, std::vector &scaling_facto uint32_t indices[4] = { 0, }; - int channel_dim_index{0}; if (!get_channel_dim_index(node, dimension, channel_dim_index)) { @@ -189,7 +189,7 @@ void sym_wquant_per_channel(CircleConst *node, std::vector &scaling_facto } void asym_wquant_per_channel(CircleConst *node, std::vector &min, - std::vector &scaling_factor) + std::vector &scaling_factor, int32_t &channel_dim_index) { assert(node->dtype() == loco::DataType::FLOAT32); @@ -204,7 +204,6 @@ void asym_wquant_per_channel(CircleConst *node, std::vector &min, uint32_t indices[4] = { 0, }; - int channel_dim_index{0}; if (!get_channel_dim_index(node, dimension, channel_dim_index)) { @@ -282,6 +281,10 @@ bool is_weights(CircleNode *node) if (dw_conv != nullptr && dw_conv->filter() == circle_const) return true; + auto t_conv = dynamic_cast(out); + if (t_conv != nullptr && t_conv->filter() == circle_const && circle_const->rank() == 4) + return true; + auto fc = dynamic_cast(out); if (fc != nullptr && fc->weights() == circle_const) return true; @@ -350,8 +353,8 @@ struct QuantizeActivation final : public luci::CircleNodeMutableVisitor circle_node->dtype(loco::DataType::S16); } - circle_node->quantparam()->max[0] = nudged_max; - circle_node->quantparam()->min[0] = nudged_min; + circle_node->quantparam()->min.clear(); + circle_node->quantparam()->max.clear(); circle_node->quantparam()->scale.push_back(scaling_factor); circle_node->quantparam()->zerop.push_back(zp); } @@ -472,15 +475,19 @@ struct QuantizeWeights final : public luci::CircleNodeMutableVisitor assert(quantparam != nullptr); auto min = quantparam->min; auto scaling_factor = quantparam->scale; + int32_t channel_dim_index = 0; if (output_type == loco::DataType::U8) { - asym_wquant_per_channel(circle_const, min, scaling_factor); + asym_wquant_per_channel(circle_const, min, scaling_factor, channel_dim_index); } else { - sym_wquant_per_channel(circle_const, scaling_factor); + sym_wquant_per_channel(circle_const, scaling_factor, channel_dim_index); } + quantparam->min.clear(); + quantparam->max.clear(); + quantparam->quantized_dimension = channel_dim_index; } // Find min/max per layer-wise else @@ -493,6 +500,8 @@ struct QuantizeWeights final : public luci::CircleNodeMutableVisitor auto min = quantparam->min[0]; auto scaling_factor = quantparam->scale[0]; asym_wquant_per_layer(circle_const, min, scaling_factor); + quantparam->min.clear(); + quantparam->max.clear(); } } } diff --git a/compiler/luci/service/src/CircleShapeInferenceRule.cpp b/compiler/luci/service/src/CircleShapeInferenceRule.cpp index a291cfe..6355ec5 100644 --- a/compiler/luci/service/src/CircleShapeInferenceRule.cpp +++ b/compiler/luci/service/src/CircleShapeInferenceRule.cpp @@ -1010,6 +1010,12 @@ public: loco::NodeShape visit(const luci::CircleNeg *node) final { return use_x(node); } + loco::NodeShape visit(const luci::CircleNonMaxSuppressionV4 *node) final + { + const auto boxes_shape = loco::shape_get(node->boxes()).as(); + return loco::NodeShape{boxes_shape}; + } + loco::NodeShape visit(const luci::CircleNotEqual *node) final { return broadcast_xy(node); } loco::NodeShape visit(const luci::CircleOneHot *node) final @@ -1818,6 +1824,18 @@ public: return output_shape; } + loco::NodeShape visit(const luci::CircleUnique *node) final + { + auto input_shape = loco::shape_get(node->input()).as(); + + assert(input_shape.rank() == 1); + + loco::TensorShape shape_output; + shape_output = own_shape(node); + + return loco::NodeShape{shape_output}; + } + loco::NodeShape visit(const luci::CircleTransposeConv *node) final { // TransposeConv's output shape is written in its 'inputSizes' argument @@ -2019,6 +2037,34 @@ public: return loco::NodeShape{*then_graph_output->shape()}; } + loco::NodeShape visit(const luci::CircleNonMaxSuppressionV4Out *node) final + { + const loco::DataType S32 = loco::DataType::S32; + + auto nmsv4 = dynamic_cast(node->input()); + if (nmsv4 == nullptr) + INTERNAL_EXN("CircleNonMaxSuppressionV4 IR is not configured correctly"); + + auto index = node->index(); + if (index == 1) + return loco::TensorShape({0}); + + assert(index == 0); + + auto unknown = loco::TensorShape{loco::Dimension()}; + auto max_output_size = dynamic_cast(nmsv4->max_output_size()); + if (max_output_size == nullptr) + return unknown; // we need CircleConst for max output size + + LUCI_ASSERT(max_output_size->dtype() == S32, "Only support int32 for max_output_size"); + + if (max_output_size->size() < 1) + return unknown; + + auto max_output_size_value = uint32_t(max_output_size->at(0)); + return loco::TensorShape{max_output_size_value}; + } + loco::NodeShape visit(const luci::CircleSplitOut *node) final { const loco::DataType S32 = loco::DataType::S32; @@ -2142,6 +2188,19 @@ public: return loco::NodeShape{output_shape}; } + loco::NodeShape visit(const luci::CircleUniqueOut *node) final + { + auto unique = dynamic_cast(node->input()); + if (unique == nullptr) + { + INTERNAL_EXN("CircleUnique IR is not configured correctly"); + } + + auto unique_shape = loco::shape_get(unique).as(); + + return loco::NodeShape{unique_shape}; + } + loco::NodeShape visit(const luci::CircleUnpackOut *node) final { auto unpack = dynamic_cast(node->input()); diff --git a/compiler/luci/service/src/CircleTypeInferenceRule.cpp b/compiler/luci/service/src/CircleTypeInferenceRule.cpp index de2ba3e..e7910bf 100644 --- a/compiler/luci/service/src/CircleTypeInferenceRule.cpp +++ b/compiler/luci/service/src/CircleTypeInferenceRule.cpp @@ -252,6 +252,11 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitorx()); } + loco::DataType visit(const luci::CircleNonMaxSuppressionV4 *node) final + { + return loco::dtype_get(node->boxes()); + } + loco::DataType visit(const luci::CircleNotEqual *) final { return loco::DataType::BOOL; } loco::DataType visit(const luci::CirclePack *node) final @@ -345,7 +350,10 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitortensor()); } - loco::DataType visit(const luci::CircleResizeBilinear *) final { return loco::DataType::FLOAT32; } + loco::DataType visit(const luci::CircleResizeBilinear *node) final + { + return loco::dtype_get(node->input()); + } loco::DataType visit(const luci::CircleResizeNearestNeighbor *node) final { @@ -472,6 +480,11 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitoroutBackprop()); } + loco::DataType visit(const luci::CircleUnique *node) final + { + return loco::dtype_get(node->input()); + } + loco::DataType visit(const luci::CircleUnpack *node) final { return loco::dtype_get(node->value()); @@ -569,6 +582,13 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitordtype(); } + loco::DataType visit(const luci::CircleNonMaxSuppressionV4Out *node) final + { + (void)node; + assert(node->index() == 0 || node->index() == 1); + return loco::DataType::S32; + } + loco::DataType visit(const luci::CircleSplitOut *node) final { return loco::dtype_get(node->input()); @@ -589,6 +609,17 @@ struct TypeInferenceAlgorithm final : public luci::CircleNodeVisitorindex() == 0) + { + return loco::dtype_get(node->input()); + } + assert(node->index() == 1); + auto unique = loco::must_cast(node->input()); + return unique->idx_out_type(); + } + loco::DataType visit(const luci::CircleUnpackOut *node) final { return loco::dtype_get(node->input()); diff --git a/compiler/luci/tests/test.lst b/compiler/luci/tests/test.lst index 188e298..9fd42ed 100644 --- a/compiler/luci/tests/test.lst +++ b/compiler/luci/tests/test.lst @@ -20,6 +20,7 @@ addread(ArgMin_U8_001) addread(ArgMin_U8_002) addread(ArgMin_U8_003) addread(AveragePool2D_000) +addread(AveragePool2D_U8_000) addread(BatchMatMul_000) addread(BatchMatMulV2_000) addread(BatchMatMulV2_001) @@ -30,13 +31,16 @@ addread(Ceil_000) addread(Concatenation_000) addread(Concatenation_U8_000) addread(Conv2D_000) +addread(Conv2D_001) addread(Conv2D_002) addread(Conv2D_003) addread(Conv2D_U8_000) +addread(Conv2D_U8_001) addread(Cos_000) addread(DepthToSpace_000) addread(DepthwiseConv2D_000) addread(DepthwiseConv2D_U8_000) +addread(DepthwiseConv2D_U8_001) addread(DepthwiseConv2D_001) addread(Div_000) addread(ELU_000) @@ -64,6 +68,7 @@ addread(GreaterEqual_000) addread(If_000) addread(If_001) addread(L2Normalize_000) +addread(L2Normalize_U8_000) addread(L2Pool2D_000) addread(L2Pool2D_U8_000) addread(LeakyRelu_000) @@ -75,6 +80,7 @@ addread(LogicalAnd_000) addread(LogicalNot_000) addread(LogicalOr_000) addread(Logistic_000) +addread(Logistic_U8_000) addread(LogSoftmax_000) addread(MatMul_000) addread(MatrixDiag_000) @@ -84,6 +90,7 @@ addread(MaxPool2D_000) addread(MaxPool2D_U8_000) addread(Mean_000) addread(Mean_001) +addread(Mean_U8_000) addread(Minimum_000) addread(MirrorPad_000) addread(Mul_000) @@ -97,6 +104,7 @@ addread(OneHot_003) addread(Pack_000) addread(Pack_U8_000) addread(Pad_000) +addread(Pad_U8_000) addread(Pow_000) addread(PRelu_000) addread(Range_000) @@ -212,6 +220,7 @@ addwrite(ArgMin_U8_001) addwrite(ArgMin_U8_002) addwrite(ArgMin_U8_003) addwrite(AveragePool2D_000) +addwrite(AveragePool2D_U8_000) addwrite(BatchMatMul_000) addwrite(BatchMatMulV2_000) addwrite(BatchMatMulV2_001) @@ -222,13 +231,16 @@ addwrite(Ceil_000) addwrite(Concatenation_000) addwrite(Concatenation_U8_000) addwrite(Conv2D_000) +addwrite(Conv2D_001) addwrite(Conv2D_002) addwrite(Conv2D_003) addwrite(Conv2D_U8_000) +addwrite(Conv2D_U8_001) addwrite(Cos_000) addwrite(DepthToSpace_000) addwrite(DepthwiseConv2D_000) addwrite(DepthwiseConv2D_U8_000) +addwrite(DepthwiseConv2D_U8_001) addwrite(DepthwiseConv2D_001) addwrite(Div_000) addwrite(ELU_000) @@ -256,6 +268,7 @@ addwrite(GreaterEqual_000) addwrite(If_000) addwrite(If_001) addwrite(L2Normalize_000) +addwrite(L2Normalize_U8_000) addwrite(L2Pool2D_000) addwrite(L2Pool2D_U8_000) addwrite(LeakyRelu_000) @@ -267,6 +280,7 @@ addwrite(LogicalAnd_000) addwrite(LogicalNot_000) addwrite(LogicalOr_000) addwrite(Logistic_000) +addwrite(Logistic_U8_000) addwrite(LogSoftmax_000) addwrite(MatMul_000) addwrite(MatrixDiag_000) @@ -276,6 +290,7 @@ addwrite(MaxPool2D_000) addwrite(MaxPool2D_U8_000) addwrite(Mean_000) addwrite(Mean_001) +addwrite(Mean_U8_000) addwrite(Minimum_000) addwrite(MirrorPad_000) addwrite(Mul_000) diff --git a/compiler/mio-tflite/CMakeLists.txt b/compiler/mio-tflite/CMakeLists.txt index 2cfed14..9ef2859 100644 --- a/compiler/mio-tflite/CMakeLists.txt +++ b/compiler/mio-tflite/CMakeLists.txt @@ -5,11 +5,7 @@ if(NOT FlatBuffers_FOUND) return() endif(NOT FlatBuffers_FOUND) -# TODO recover official release version -# NOTE we cannot use version number like "2.3.0-rc0" for find_package() -# use TensorFlowSource-2.3.0-rc0 as config itself -# nnas_find_package(TensorFlowSource EXACT 2.3.0 QUIET) -nnas_find_package(TensorFlowSource-2.3.0-rc0 QUIET) +nnas_find_package(TensorFlowSource EXACT 2.3.0 QUIET) if(NOT TensorFlowSource_FOUND) return() diff --git a/compiler/one-cmds/CMakeLists.txt b/compiler/one-cmds/CMakeLists.txt index 7d73d9b..173b8b4 100644 --- a/compiler/one-cmds/CMakeLists.txt +++ b/compiler/one-cmds/CMakeLists.txt @@ -1,5 +1,6 @@ set(ONE_COMMAND_FILES one-import + one-import-bcq one-import-tf one-import-tflite one-optimize @@ -26,7 +27,7 @@ foreach(ONE_COMMAND IN ITEMS ${ONE_COMMAND_FILES}) install(FILES ${ONE_COMMAND} PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE - GROUP_READ GROUP_WRITE GROUP_EXECUTE + GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE DESTINATION bin) diff --git a/compiler/one-cmds/how-to-prepare-virtualenv.txt b/compiler/one-cmds/how-to-prepare-virtualenv.txt index 41fff3a..62a9496 100644 --- a/compiler/one-cmds/how-to-prepare-virtualenv.txt +++ b/compiler/one-cmds/how-to-prepare-virtualenv.txt @@ -1,12 +1,12 @@ About ----- -Last update: 2020-07-14 +Last update: 2020-08-03 This document explains about 'one-prepare-venv' command. 'one-prepare-venv' will prepare python3 virtual environment with tensorflow-cpu -version 2.3.0rc0, recommanded 2.x version as of now, so that 'one-import-tf' +version 2.3.0, recommanded 2.x version as of now, so that 'one-import-tf' command can execute properly. diff --git a/compiler/one-cmds/how-to-use-one-commands.txt b/compiler/one-cmds/how-to-use-one-commands.txt index 6c2176a..0ee69e0 100644 --- a/compiler/one-cmds/how-to-use-one-commands.txt +++ b/compiler/one-cmds/how-to-use-one-commands.txt @@ -1,7 +1,7 @@ About ----- -Last update: 2020-07-14 +Last update: 2020-07-31 This document briefly explains how to use one-* commands. Detailed options are not explained here. Run the command to see options. @@ -30,6 +30,27 @@ Syntax: one-import [framework] [options] Currently supported frameworks are 'tf', 'tflite' for TensorFlow and TensorFlow lite. +one-import-bcq +------------- + +This will convert Tensorflow model file (.pb) to our circle model file with applying BCQ. +To execute this command, original Tensorflow model file must include BCQ information. + +This command invokes following scripts internally. +- preserve_bcq_info : Prevent BCQ information vanishing problem +- generate_bcq_info : Designate BCQ information nodes as model output automatically +- tf2tfliteV2 : Convert Tensorflow model to tflite model +- tflite2circle : Convert Tensorflow Lite model to circle model +When this command is finished, BCQ information nodes will be removed if BCQ information +was valid and applying BCQ is done correctly without any errors. + +As tf2tfliteV2.py runs TensorFlow lite converter, you need to have TensorFlow +installed in your system. We recommand to use 2.3.0 for now. + +We provide python virtual environment and one-import-bcq will enter and leave +this environment so that you don't need to explictly 'activate' virtual +environment. + one-import-tf ------------- @@ -40,7 +61,7 @@ will internally use TensorFlow lite converter and then invoke tflite2circle converter to convert tflite model to circle model. As tf2tfliteV2.py runs TensorFlow lite converter, you need to have TensorFlow -installed in your system. We recommand to use 2.3.0rc0 for now. +installed in your system. We recommand to use 2.3.0 for now. We provide python virtual environment and one-import-tf will enter and leave this environment so that you don't need to explictly 'activate' virtual diff --git a/compiler/one-cmds/one-codegen b/compiler/one-cmds/one-codegen index 2c80664..820b6d8 100644 --- a/compiler/one-cmds/one-codegen +++ b/compiler/one-cmds/one-codegen @@ -18,7 +18,7 @@ DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" function Usage() { - echo "Usage: $0 [BACKEND] ..." + echo "Usage: one-codegen [BACKEND] ..." echo "Available BACKEND drivers:" backend_exist=0 for file in `find $DRIVER_PATH -name *-compile -type f`; @@ -33,23 +33,34 @@ function Usage() if [ $backend_exist == 0 ]; then echo " (There is no available backend drivers)" fi + + exit 255 } -# Get command from command-line -BACKEND=$1; shift -BACKEND_DRIVER="$BACKEND-compile" +function version() +{ + $DRIVER_PATH/one-version one-codegen + exit 255 +} -if [[ -z "${BACKEND_DRIVER}" ]]; then +# Get command from command-line +BACKEND=$1 +if [[ -z ${BACKEND} ]]; then Usage - exit 255 fi +shift + +if [[ "${BACKEND}" == "--version" ]]; then + version +fi + +BACKEND_DRIVER="${BACKEND}-compile" BACKEND_DRIVER_CMD="${DRIVER_PATH}/${BACKEND_DRIVER}" if [[ ! -f "${BACKEND_DRIVER_CMD}" ]]; then echo "ERROR: '${BACKEND_DRIVER}' is not supported" Usage - exit 255 fi "${BACKEND_DRIVER_CMD}" "$@" diff --git a/compiler/one-cmds/one-import b/compiler/one-cmds/one-import index dbf4af5..b1dd8f4 100644 --- a/compiler/one-cmds/one-import +++ b/compiler/one-cmds/one-import @@ -18,7 +18,7 @@ DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" function Usage() { - echo "Usage: $0 [FRAMEWORK] ..." + echo "Usage: one-import [FRAMEWORK] ..." echo "Available FRAMEWORK drivers:" framework_exist=0 for file in "$DRIVER_PATH"/one-import-*; @@ -31,23 +31,34 @@ function Usage() if [ $framework_exist == 0 ]; then echo " (There is no available import drivers)" fi + + exit 255 } -# Get command from command-line -FRAMEWORK=$1; shift -FRAMEWORK_DRIVER="one-import-$FRAMEWORK" +function version() +{ + $DRIVER_PATH/one-version one-import-tf + exit 255 +} -if [[ -z "${FRAMEWORK_DRIVER}" ]]; then +# Get command from command-line +FRAMEWORK=$1 +if [[ -z ${FRAMEWORK} ]]; then Usage - exit 255 +fi +shift + +if [ ${FRAMEWORK} = "--version" ]; then + version fi +FRAMEWORK_DRIVER="one-import-$FRAMEWORK" + FRAMEWORK_DRIVER_CMD="${DRIVER_PATH}/${FRAMEWORK_DRIVER}" if [[ ! -f "${FRAMEWORK_DRIVER_CMD}" ]]; then echo "ERROR: '${FRAMEWORK_DRIVER}' is not supported" Usage - exit 255 fi "${FRAMEWORK_DRIVER_CMD}" "$@" diff --git a/compiler/one-cmds/one-import-bcq b/compiler/one-cmds/one-import-bcq new file mode 100644 index 0000000..98dd1ef --- /dev/null +++ b/compiler/one-cmds/one-import-bcq @@ -0,0 +1,150 @@ +#!/bin/bash + +# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +usage() +{ + echo "Convert TensorFlow model with BCQ to circle." + echo "Usage: one-import-bcq" + echo " --version Show version information and exit" + echo " --input_path " + echo " --output_path " + echo " --input_arrays " + echo " --input_shapes " + echo " --output_arrays " + echo " --v2 Use TensorFlow 2.x interface (default is 1.x interface)" + exit 255 +} + +version() +{ + $DRIVER_PATH/one-version one-import-bcq + exit 255 +} + +TF_INTERFACE="--v1" + +# Parse command-line arguments +# +while [ "$#" -ne 0 ]; do + CUR="$1" + + case $CUR in + '--help') + usage + ;; + '--version') + version + ;; + '--input_path') + export INPUT_PATH="$2" + shift 2 + ;; + '--output_path') + export OUTPUT_PATH="$2" + shift 2 + ;; + '--input_arrays') + export INPUT_ARRAYS="$2" + shift 2 + ;; + '--input_shapes') + export INPUT_SHAPES="$2" + shift 2 + ;; + '--output_arrays') + export OUTPUT_ARRAYS="$2" + shift 2 + ;; + '--v2') + TF_INTERFACE="--v2" + shift + ;; + *) + echo "Unknown parameter: ${CUR}" + shift + ;; + esac +done + +if [ -z ${INPUT_PATH} ] || [ ! -e ${INPUT_PATH} ]; then + echo "Error: input model not found" + echo "" + usage + exit 2 +fi + +FILE_BASE=$(basename ${OUTPUT_PATH}) +MODEL_NAME="${FILE_BASE%.*}" + +TMPDIR=$(mktemp -d) +trap "{ rm -rf $TMPDIR; }" EXIT + +# activate python virtual environment +VIRTUALENV_LINUX="${DRIVER_PATH}/venv/bin/activate" +VIRTUALENV_WINDOWS="${DRIVER_PATH}/venv/Scripts/activate" + +if [ -e ${VIRTUALENV_LINUX} ]; then + source ${VIRTUALENV_LINUX} +elif [ -e ${VIRTUALENV_WINDOWS} ]; then + source ${VIRTUALENV_WINDOWS} +fi + +# remove previous log +rm -rf "${OUTPUT_PATH}.log" + +# generate temporary preserved pb file +echo "${DRIVER_PATH}/preserve_bcq_info" --input_path ${INPUT_PATH} \ +--output_path "${TMPDIR}/${MODEL_NAME}_preserved.pb" > "${OUTPUT_PATH}.log" +echo " " >> "${OUTPUT_PATH}.log" + +"${DRIVER_PATH}/preserve_bcq_info" --input_path ${INPUT_PATH} \ +--output_path "${TMPDIR}/${MODEL_NAME}_preserved.pb" >> "${OUTPUT_PATH}.log" 2>&1 + +# generate output_arrays automatically +echo "${DRIVER_PATH}/generate_bcq_output_arrays" \ +--input_path "${TMPDIR}/${MODEL_NAME}_preserved.pb" \ +--output_path "${TMPDIR}/${MODEL_NAME}_output_arrays.txt" > "${OUTPUT_PATH}.log" +echo " " >> "${OUTPUT_PATH}.log" + +"${DRIVER_PATH}/generate_bcq_output_arrays" \ +--input_path "${TMPDIR}/${MODEL_NAME}_preserved.pb" \ +--output_path "${TMPDIR}/${MODEL_NAME}_output_arrays.txt" >> "${OUTPUT_PATH}.log" 2>&1 + +# generate temporary tflite file +CONVERT_SCRIPT="python ${DRIVER_PATH}/tf2tfliteV2.py ${TF_INTERFACE} " +CONVERT_SCRIPT+="--input_path ${TMPDIR}/${MODEL_NAME}_preserved.pb " +CONVERT_SCRIPT+="--input_arrays ${INPUT_ARRAYS} " +CONVERT_SCRIPT+="--output_path ${TMPDIR}/${MODEL_NAME}.tflite " +CONVERT_SCRIPT+="--output_arrays ${OUTPUT_ARRAYS}$(cat ${TMPDIR}/${MODEL_NAME}_output_arrays.txt) " +if [ ! -z ${INPUT_SHAPES} ]; then + CONVERT_SCRIPT+="--input_shapes ${INPUT_SHAPES} " +fi + +echo ${CONVERT_SCRIPT} > "${OUTPUT_PATH}.log" +$CONVERT_SCRIPT >> "${OUTPUT_PATH}.log" 2>&1 + +# convert .tflite to .circle +echo " " >> "${OUTPUT_PATH}.log" +echo "${DRIVER_PATH}/tflite2circle" "${TMPDIR}/${MODEL_NAME}.tflite" \ +"${OUTPUT_PATH}" >> "${OUTPUT_PATH}.log" +echo " " >> "${OUTPUT_PATH}.log" + +"${DRIVER_PATH}/tflite2circle" "${TMPDIR}/${MODEL_NAME}.tflite" \ +"${OUTPUT_PATH}" >> "${OUTPUT_PATH}.log" 2>&1 diff --git a/compiler/one-cmds/one-import-tf b/compiler/one-cmds/one-import-tf index c048a4e..d59e1c5 100644 --- a/compiler/one-cmds/one-import-tf +++ b/compiler/one-cmds/one-import-tf @@ -22,14 +22,24 @@ usage() { echo "Convert TensorFlow model to circle." echo "Usage: one-import-tf" + echo " --version Show version information and exit" echo " --input_path " echo " --output_path " echo " --input_arrays " echo " --input_shapes " echo " --output_arrays " - exit 0 + echo " --v2 Use TensorFlow 2.x interface (default is 1.x interface)" + exit 255 } +version() +{ + $DRIVER_PATH/one-version one-import-tf + exit 255 +} + +TF_INTERFACE="--v1" + # Parse command-line arguments # while [ "$#" -ne 0 ]; do @@ -39,6 +49,9 @@ while [ "$#" -ne 0 ]; do '--help') usage ;; + '--version') + version + ;; '--input_path') export INPUT_PATH="$2" shift 2 @@ -59,6 +72,10 @@ while [ "$#" -ne 0 ]; do export OUTPUT_ARRAYS="$2" shift 2 ;; + '--v2') + TF_INTERFACE="--v2" + shift + ;; *) echo "Unknown parameter: ${CUR}" shift @@ -92,14 +109,21 @@ fi # remove previous log rm -rf "${OUTPUT_PATH}.log" +show_err_onexit() +{ + cat "${OUTPUT_PATH}.log" +} + +trap show_err_onexit ERR + # generate temporary tflite file -echo "python" "${DRIVER_PATH}/tf2tfliteV2.py" --v2 --input_path ${INPUT_PATH} \ +echo "python" "${DRIVER_PATH}/tf2tfliteV2.py" ${TF_INTERFACE} --input_path ${INPUT_PATH} \ --input_arrays ${INPUT_ARRAYS} --input_shapes ${INPUT_SHAPES} \ --output_path "${TMPDIR}/${MODEL_NAME}.tflite" \ --output_arrays ${OUTPUT_ARRAYS} > "${OUTPUT_PATH}.log" echo " " >> "${OUTPUT_PATH}.log" -python "${DRIVER_PATH}/tf2tfliteV2.py" --v2 --input_path ${INPUT_PATH} \ +python "${DRIVER_PATH}/tf2tfliteV2.py" ${TF_INTERFACE} --input_path ${INPUT_PATH} \ --input_arrays ${INPUT_ARRAYS} --input_shapes ${INPUT_SHAPES} \ --output_path "${TMPDIR}/${MODEL_NAME}.tflite" \ --output_arrays ${OUTPUT_ARRAYS} >> "${OUTPUT_PATH}.log" 2>&1 diff --git a/compiler/one-cmds/one-import-tflite b/compiler/one-cmds/one-import-tflite index 31ed5af..053489c 100644 --- a/compiler/one-cmds/one-import-tflite +++ b/compiler/one-cmds/one-import-tflite @@ -22,9 +22,16 @@ usage() { echo "Convert TensorFlow lite model to circle." echo "Usage: one-import-tflite" + echo " --version Show version information and exit" echo " --input_path " echo " --output_path " - exit 0 + exit 255 +} + +version() +{ + $DRIVER_PATH/one-version one-import-tflite + exit 255 } # Parse command-line arguments @@ -36,6 +43,9 @@ while [ "$#" -ne 0 ]; do '--help') usage ;; + '--version') + version + ;; '--input_path') export INPUT_PATH="$2" shift 2 @@ -55,12 +65,18 @@ if [ -z ${INPUT_PATH} ] || [ ! -e ${INPUT_PATH} ]; then echo "Error: input model not found" echo "" usage - exit 2 fi # remove previous log rm -rf "${OUTPUT_PATH}.log" +show_err_onexit() +{ + cat "${OUTPUT_PATH}.log" +} + +trap show_err_onexit ERR + # convert .tflite to .circle echo "${DRIVER_PATH}/tflite2circle" "${INPUT_PATH}" "${OUTPUT_PATH}" > "${OUTPUT_PATH}.log" diff --git a/compiler/one-cmds/one-optimize b/compiler/one-cmds/one-optimize index 95384c1..17b6b98 100644 --- a/compiler/one-cmds/one-optimize +++ b/compiler/one-cmds/one-optimize @@ -22,6 +22,7 @@ usage() { echo "Optimize circle model." echo "Usage: one-optimize" + echo " --version Show version information and exit" echo " --all Enable all optimization algorithms" echo " --fuse_bcq Enable FuseBCQ Pass" echo " --fuse_instnorm Enable FuseInstanceNormalization Pass" @@ -33,7 +34,13 @@ usage() echo " Enable ResolveCustomOpMatMulPass Pass" echo " --input_path " echo " --output_path " - exit 0 + exit 255 +} + +version() +{ + $DRIVER_PATH/one-version one-optimize + exit 255 } OPTIMIZE_all=0 @@ -52,6 +59,9 @@ while [ "$#" -ne 0 ]; do '--help') usage ;; + '--version') + version + ;; '--all') OPTIMIZE_all=1 shift @@ -96,7 +106,6 @@ if [ -z ${INPUT_PATH} ] || [ ! -e ${INPUT_PATH} ]; then echo "Error: input model not found" echo "" usage - exit 2 fi OPTIMIZE_OPTIONS="" @@ -123,6 +132,13 @@ fi # remove previous log rm -rf "${OUTPUT_PATH}.log" +show_err_onexit() +{ + cat "${OUTPUT_PATH}.log" +} + +trap show_err_onexit ERR + # NOTE do not wrap ${OPTIMIZE_OPTIONS} with "" # optimize circle echo "${DRIVER_PATH}/circle2circle" ${OPTIMIZE_OPTIONS} \ diff --git a/compiler/one-cmds/one-pack b/compiler/one-cmds/one-pack index 2bc4c60..023b0a8 100644 --- a/compiler/one-cmds/one-pack +++ b/compiler/one-cmds/one-pack @@ -22,9 +22,16 @@ usage() { echo "Package circle to nnpkg" echo "Usage: one-pack" + echo " -v, --version Show version information and exit" echo " -i " echo " -o " - exit 0 + exit 255 +} + +version() +{ + $DRIVER_PATH/one-version one-pack + exit 255 } # Parse command-line arguments @@ -36,6 +43,12 @@ while [ "$#" -ne 0 ]; do '--help') usage ;; + '-v') + version + ;; + '--version') + version + ;; '-i') export INPUT_PATH="$2" shift 2 @@ -55,13 +68,22 @@ if [ -z ${INPUT_PATH} ] || [ ! -e ${INPUT_PATH} ]; then echo "Error: input model not found" echo "" usage - exit 2 fi +INPUT_FILE=$(basename "${INPUT_PATH}") +LOG_FILE="${INPUT_FILE%.*}.pack.log" + # remove previous log -rm -rf "${OUTPUT_PATH}.log" +rm -rf "${LOG_FILE}" + +show_err_onexit() +{ + cat "${LOG_FILE}" +} + +trap show_err_onexit ERR # Package circle model file to nnpkg -echo "${DRIVER_PATH}/model2nnpkg.sh" -o "${OUTPUT_PATH}" "${INPUT_PATH}" > "${OUTPUT_PATH}.log" +echo "${DRIVER_PATH}/model2nnpkg.sh" -o "${OUTPUT_PATH}" "${INPUT_PATH}" > "${LOG_FILE}" -"${DRIVER_PATH}/model2nnpkg.sh" -o "${OUTPUT_PATH}" "${INPUT_PATH}" >> "${OUTPUT_PATH}.log" 2>&1 +"${DRIVER_PATH}/model2nnpkg.sh" -o "${OUTPUT_PATH}" "${INPUT_PATH}" >> "${LOG_FILE}" 2>&1 diff --git a/compiler/one-cmds/one-prepare-venv b/compiler/one-cmds/one-prepare-venv index fce838d..0a53bd3 100644 --- a/compiler/one-cmds/one-prepare-venv +++ b/compiler/one-cmds/one-prepare-venv @@ -26,7 +26,19 @@ if [ -f ${VENV_ACTIVATE} ]; then fi # Install prerequisites -python3 -m pip install -U virtualenv +python3 -m pip install --user -U virtualenv + +function error_no_ensurepip () +{ + echo "ERROR: python3 'ensurepip' module is not found." + echo " On ubuntu, try following command:" + echo + echo " apt install python$(python3 --version | awk '{print $2}' | awk -F. '{print $1"."$2}')-venv" + echo + echo " You may need root privilege for this." + exit 1 +} +python3 -m ensurepip --version > /dev/null 2>&1 || error_no_ensurepip # Create python virtual enviornment python3 -m venv "${DRIVER_PATH}/venv" @@ -37,4 +49,4 @@ source "${VENV_ACTIVATE}" python -m pip --default-timeout=1000 --trusted-host pypi.org --trusted-host files.pythonhost.org \ install -U pip setuptools python -m pip --default-timeout=1000 --trusted-host pypi.org --trusted-host files.pythonhost.org \ - install tensorflow-cpu==2.3.0rc0 + install tensorflow-cpu==2.3.0 diff --git a/compiler/one-cmds/one-quantize b/compiler/one-cmds/one-quantize index ff9e266..c74b2c2 100644 --- a/compiler/one-cmds/one-quantize +++ b/compiler/one-cmds/one-quantize @@ -22,16 +22,23 @@ usage() { echo "Quantize circle model." echo "Usage: one-quantize" + echo " --version Show version information and exit" echo " --input_dtype Input data type (supported: float32, default=float32)" echo " --quantized_dtype Output quantized data type (supported: uint8, default=uint8)" - echo " --granularity Quantize granularity (supported: layer, default=layer)" + echo " --granularity Quantize granularity (supported: layer, channel, default=layer)" echo " --min_percentile Minimum percentile (0.0~100.0, default=1.0)" echo " --max_percentile Maximum percentile (0.0~100.0, default=99.0)" echo " --mode Record mode (supported: percentile/moving_average, default=percentile)" echo " --input_path " echo " --input_data " echo " --output_path " - exit 0 + exit 255 +} + +version() +{ + $DRIVER_PATH/one-version one-quantize + exit 255 } INPUT_DTYPE=float32 @@ -50,6 +57,9 @@ while [ "$#" -ne 0 ]; do '--help') usage ;; + '--version') + version + ;; '--input_dtype') INPUT_DTYPE="$2" @@ -100,13 +110,11 @@ if [ -z ${INPUT_PATH} ] || [ ! -e ${INPUT_PATH} ]; then echo "Error: input model not found" echo "" usage - exit 2 fi if [ -z ${INPUT_DATA} ] || [ ! -e ${INPUT_DATA} ]; then echo "Error: input data not found" echo "" usage - exit 2 fi FILE_BASE=$(basename ${OUTPUT_PATH}) @@ -118,6 +126,13 @@ trap "{ rm -rf $TMPDIR; }" EXIT # remove previous log rm -rf "${OUTPUT_PATH}.log" +show_err_onexit() +{ + cat "${OUTPUT_PATH}.log" +} + +trap show_err_onexit ERR + # quantize circle echo "${DRIVER_PATH}/circle-quantizer" \ --quantize_dequantize_weights ${INPUT_DTYPE} ${QUANTIZED_DTYPE} ${GRANULARITY} \ diff --git a/compiler/one-cmds/requires.cmake b/compiler/one-cmds/requires.cmake index 9b858ad..50c2457 100644 --- a/compiler/one-cmds/requires.cmake +++ b/compiler/one-cmds/requires.cmake @@ -3,3 +3,5 @@ require("tflite2circle") require("circle2circle") require("circle-quantizer") require("record-minmax") +require("vconone") +require("bcq-tools") diff --git a/compiler/pota-quantization-value-test/CMakeLists.txt b/compiler/pota-quantization-value-test/CMakeLists.txt index d97ffc1..73b9ead 100644 --- a/compiler/pota-quantization-value-test/CMakeLists.txt +++ b/compiler/pota-quantization-value-test/CMakeLists.txt @@ -49,21 +49,21 @@ add_test( ${QUANTIZATION_VALUE_TEST_WITH_PARAM} ) -#add_test( -# NAME pota_record_minmax_test -# COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/test_record_minmax.sh" -# "${TEST_CONFIG}" -# "${ARTIFACTS_BIN_PATH}" -# ${QUANTIZATION_VALUE_TEST_WITH_PARAM} -#) +add_test( + NAME pota_record_minmax_test + COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/test_record_minmax.sh" + "${TEST_CONFIG}" + "${ARTIFACTS_BIN_PATH}" + ${QUANTIZATION_VALUE_TEST_WITH_PARAM} +) -#add_test( -# NAME pota_quantization_test -# COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/test_quantization.sh" -# "${TEST_CONFIG}" -# "${ARTIFACTS_BIN_PATH}" -# ${QUANTIZATION_VALUE_TEST_WITH_PARAM} -#) +add_test( + NAME pota_quantization_test + COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/test_quantization.sh" + "${TEST_CONFIG}" + "${ARTIFACTS_BIN_PATH}" + ${QUANTIZATION_VALUE_TEST_WITH_PARAM} +) -#set_tests_properties(pota_record_minmax_test PROPERTIES DEPENDS pota_fake_wquant_test) -#set_tests_properties(pota_quantization_test PROPERTIES DEPENDS pota_record_minmax_test) +set_tests_properties(pota_record_minmax_test PROPERTIES DEPENDS pota_fake_wquant_test) +set_tests_properties(pota_quantization_test PROPERTIES DEPENDS pota_record_minmax_test) diff --git a/compiler/pota-quantization-value-test/compare_tensors.py b/compiler/pota-quantization-value-test/compare_tensors.py index 258d46d..7d95d18 100755 --- a/compiler/pota-quantization-value-test/compare_tensors.py +++ b/compiler/pota-quantization-value-test/compare_tensors.py @@ -69,7 +69,7 @@ def compare_quantization(tensor, tensor_name, expect_dir): if key == "weights": expected_weights = np.array(json_load["weights"]) input_weights = tensor["weights"][:] - if np.allclose(input_weights, expected_weights, rtol=0, atol=0) == False: + if np.allclose(input_weights, expected_weights, rtol=0, atol=1) == False: print("Quantized weights of " + tensor_name + " (" + str(input_weights) + ") do not match with expected value (" + str(expected_weights) + ").") @@ -87,7 +87,7 @@ def compare_quantization(tensor, tensor_name, expect_dir): expected_zero_point = np.array(json_load["zero_point"]) input_zero_point = tensor["zero_point"][:] if np.allclose( - input_zero_point, expected_zero_point, rtol=0, atol=0) == False: + input_zero_point, expected_zero_point, rtol=0, atol=1) == False: print("Quantized zero_point of " + tensor_name + " (" + str(input_zero_point) + ") do not match with expected value (" + str(expected_zero_point) + ").") diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/fake_quantization/ker.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/fake_quantization/ker.json index 21b8eca..2558bb2 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/fake_quantization/ker.json +++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/fake_quantization/ker.json @@ -3,44 +3,44 @@ [ [ [ - 1.003921627998352, - 2.007843255996704 - ], + 1.0039215087890625, + 2.007843017578125 + ], [ - -3.0117647647857666, + -3.0117650032043457, -4.015686511993408 ] - ], + ], [ [ - -5.019608020782471, - 6.023529529571533 - ], + -5.019608497619629, + 6.023530006408691 + ], [ - -7.027451038360596, - 7.968627452850342 + -7.027451515197754, + 7.9686279296875 ] ] - ], + ], [ [ [ - 4.015686511993408, - -2.007843255996704 - ], + 4.01568603515625, + -2.007843494415283 + ], [ - 3.0117647647857666, - -1.003921627998352 + 3.0117645263671875, + -1.0039215087890625 ] - ], + ], [ [ - -7.968627452850342, - -6.023529529571533 - ], + -7.9686279296875, + -6.023530006408691 + ], [ - 7.027451038360596, - 5.019608020782471 + 7.027451515197754, + 5.019608497619629 ] ] ] diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/bias.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/bias.json index 462d0d3..50d44ec 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/bias.json +++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/bias.json @@ -1,7 +1,7 @@ - { - "scale": 0.0059054209919261825, - "weights": [ - 169.0, - 339.0 - ] - } +{ + "weights": [ + 4069, + 8138 + ], + "scale": 0.0002457468386200985 +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ifm.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ifm.json index 107117b..2450886 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ifm.json +++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ifm.json @@ -1,4 +1,4 @@ { - "scale": 0.09411764705882353, + "scale": 0.003916590008884668, "zero_point": 0.0 } diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ker.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ker.json index 3a6e171..b249a0c 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ker.json +++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ker.json @@ -1,52 +1,52 @@ { - "max": 7.968627450980392, - "scale": 0.06274509803921569, "weights": [ [ [ [ - 144, - 160 - ], + 143, + 159 + ], [ - 80, - 64 + 79, + 63 ] - ], + ], [ [ - 48, - 224 - ], + 47, + 223 + ], [ - 16, - 255 + 15, + 254 ] ] - ], + ], [ [ [ - 192, - 96 - ], + 191, + 95 + ], [ - 176, - 112 + 175, + 111 ] - ], + ], [ [ - 1, - 32 - ], + 0, + 31 + ], [ - 240, - 208 + 239, + 207 ] ] ] - ], - "min": -8.031372549019608, - "zero_point": 128.0 + ], + "scale": 0.062745101749897, + "zero_point": 127.0, + "min": -7.9686279296875, + "max": 8.031373023986816 } diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ofm.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ofm.json index 2374639..a2dd668 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ofm.json +++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/quantization/ofm.json @@ -1,4 +1,4 @@ { - "scale": 0.17836222929113052, + "scale": 0.037479765713214874, "zero_point": 0.0 } diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ifm.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ifm.json index 563c042..42f8b56 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ifm.json +++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ifm.json @@ -1,4 +1,4 @@ { - "max": 24.0, - "min": 1.0 + "min": 0.005472412034869194, + "max": 0.9987304735183716 } diff --git a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ofm.json b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ofm.json index fd0c6dc..1862e8c 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ofm.json +++ b/compiler/pota-quantization-value-test/expected_outputs/Conv2D_004/layer/uint8/record_minmax/ofm.json @@ -1,4 +1,4 @@ { - "max": 45.48236846923828, - "min": 0.0 + "min": 0.0, + "max": 9.557340850830078 } diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/fake_quantization/ker.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/fake_quantization/ker.json index 11e91ca..cd34797 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/fake_quantization/ker.json +++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/fake_quantization/ker.json @@ -3,29 +3,29 @@ [ [ [ - 0.9725490212440491, - 1.9450980424880981, - 3.0392158031463623, + 0.9725494384765625, + 1.945098876953125, + 3.039216995239258, 4.0117645263671875 - ], + ], [ - -8.996078491210938, - 9.968626976013184, - -10.941176414489746, - 12.035294532775879 + -8.996077537536621, + 9.9686279296875, + -10.94117546081543, + 12.035295486450195 ] - ], + ], [ [ - 4.984313488006592, - 5.956862926483154, - 7.050980567932129, - 8.023529052734375 - ], + 4.98431396484375, + 5.9568634033203125, + 7.050981521606445, + 8.023530960083008 + ], [ - 13.007843017578125, - -13.980392456054688, - 14.952940940856934, + 13.007843017578125, + -13.980391502380371, + 14.95294189453125, -16.04705810546875 ] ] diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/bias.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/bias.json index df7cb14..e60ff31 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/bias.json +++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/bias.json @@ -1,9 +1,9 @@ { - "scale": 0.007627835447904652, "weights": [ - 131.0, - 262.0, - 393.0, - 524.0 - ] + 2156, + 4312, + 6468, + 8624 + ], + "scale": 0.0004638272181067826 } diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ifm.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ifm.json index 254ce89..4ec4ef2 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ifm.json +++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ifm.json @@ -1,4 +1,4 @@ { - "scale": 0.06274509803921569, + "scale": 0.0038153529167175293, "zero_point": 0.0 } diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ker.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ker.json index 3d14da1..01835fb 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ker.json +++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ker.json @@ -1,38 +1,38 @@ { - "max": 14.952941176470588, - "scale": 0.12156862745098039, "weights": [ [ [ [ - 140, - 148, - 157, + 140, + 148, + 157, 165 - ], + ], [ - 58, - 214, - 42, + 58, + 214, + 42, 231 ] - ], + ], [ [ - 173, - 181, - 190, + 173, + 181, + 190, 198 - ], + ], [ - 239, - 17, - 255, + 239, + 17, + 255, 0 ] ] ] - ], - "min": -16.04705882352941, - "zero_point": 132.0 + ], + "scale": 0.12156862765550613, + "zero_point": 132.0, + "min": -16.04705810546875, + "max": 14.952940940856934 } diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ofm.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ofm.json index 85dd4d9..39c64f3 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ofm.json +++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/quantization/ofm.json @@ -1,4 +1,4 @@ { - "scale": 0.893733185412837, + "scale": 0.07362665981054306, "zero_point": 0.0 } diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ifm.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ifm.json index 9aee7bc..bb4292e 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ifm.json +++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ifm.json @@ -1,4 +1,4 @@ { - "max": 16.0, - "min": 1.0 + "min": 0.02638142943382263, + "max": 0.9729149651527405 } diff --git a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ofm.json b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ofm.json index aa42a66..1c118e1 100644 --- a/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ofm.json +++ b/compiler/pota-quantization-value-test/expected_outputs/DepthwiseConv2D_002/layer/uint8/record_minmax/ofm.json @@ -1,4 +1,4 @@ { - "max": 227.90196228027344, - "min": 0.0 + "min": 0.0, + "max": 18.77479721069336 } diff --git a/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/fake_quantization/weight.json b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/fake_quantization/weight.json new file mode 100644 index 0000000..e1da53a --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/fake_quantization/weight.json @@ -0,0 +1,76 @@ +{ + "weights": [ + [ + 1.0039215087890625, + 2.007843017578125, + -3.0117650032043457, + -4.015686511993408, + -5.019608497619629, + 6.023530006408691, + -7.027451515197754, + 7.9686279296875, + 4.01568603515625, + -2.007843494415283, + 3.0117645263671875, + -1.0039215087890625, + -7.9686279296875, + -6.023530006408691, + 7.027451515197754, + 5.019608497619629 + ], + [ + 1.0039215087890625, + 2.007843017578125, + -3.0117650032043457, + -4.015686511993408, + -5.019608497619629, + 6.023530006408691, + -7.027451515197754, + 7.9686279296875, + 4.01568603515625, + -2.007843494415283, + 3.0117645263671875, + -1.0039215087890625, + -7.9686279296875, + -6.023530006408691, + 7.027451515197754, + 5.019608497619629 + ], + [ + 1.0039215087890625, + 2.007843017578125, + -3.0117650032043457, + -4.015686511993408, + -5.019608497619629, + 6.023530006408691, + -7.027451515197754, + 7.9686279296875, + 4.01568603515625, + -2.007843494415283, + 3.0117645263671875, + -1.0039215087890625, + -7.9686279296875, + -6.023530006408691, + 7.027451515197754, + 5.019608497619629 + ], + [ + 1.0039215087890625, + 2.007843017578125, + -3.0117650032043457, + -4.015686511993408, + -5.019608497619629, + 6.023530006408691, + -7.027451515197754, + 7.9686279296875, + 4.01568603515625, + -2.007843494415283, + 3.0117645263671875, + -1.0039215087890625, + -7.9686279296875, + -6.023530006408691, + 7.027451515197754, + 5.019608497619629 + ] + ] +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/bias.json b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/bias.json new file mode 100644 index 0000000..ecb49bb --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/bias.json @@ -0,0 +1,9 @@ +{ + "weights": [ + 415, + -829, + -1244, + 1658 + ], + "scale": 0.00241205753304663 +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/in.json b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/in.json new file mode 100644 index 0000000..654824b --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/in.json @@ -0,0 +1,4 @@ +{ + "scale": 0.03844216465950012, + "zero_point": 126.0 +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/out.json b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/out.json new file mode 100644 index 0000000..3baa421 --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/out.json @@ -0,0 +1,4 @@ +{ + "scale": 0.741962730884552, + "zero_point": 156.0 +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/weight.json b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/weight.json new file mode 100644 index 0000000..9402240 --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/quantization/weight.json @@ -0,0 +1,80 @@ +{ + "weights": [ + [ + 143, + 159, + 79, + 63, + 47, + 223, + 15, + 254, + 191, + 95, + 175, + 111, + 0, + 31, + 239, + 207 + ], + [ + 143, + 159, + 79, + 63, + 47, + 223, + 15, + 254, + 191, + 95, + 175, + 111, + 0, + 31, + 239, + 207 + ], + [ + 143, + 159, + 79, + 63, + 47, + 223, + 15, + 254, + 191, + 95, + 175, + 111, + 0, + 31, + 239, + 207 + ], + [ + 143, + 159, + 79, + 63, + 47, + 223, + 15, + 254, + 191, + 95, + 175, + 111, + 0, + 31, + 239, + 207 + ] + ], + "scale": 0.062745101749897, + "zero_point": 127.0, + "min": -7.9686279296875, + "max": 8.031373023986816 +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/record_minmax/in.json b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/record_minmax/in.json new file mode 100644 index 0000000..a8ec5b2 --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/record_minmax/in.json @@ -0,0 +1,4 @@ +{ + "min": -4.832756385803223, + "max": 4.969995346069336 +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/record_minmax/out.json b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/record_minmax/out.json new file mode 100644 index 0000000..de3b415 --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/FullyConnected_003/layer/uint8/record_minmax/out.json @@ -0,0 +1,4 @@ +{ + "min": -115.99438369750976, + "max": 73.20612327575684 +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/fake_quantization/ker.json b/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/fake_quantization/ker.json new file mode 100644 index 0000000..76a0440 --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/fake_quantization/ker.json @@ -0,0 +1,48 @@ +{ + "weights": [ + [ + [ + [ + 0.960784912109375, + 2.0588245391845703 + ], + [ + -3.0196075439453125, + -3.980391502380371 + ], + [ + 4.9411773681640625, + -6.039215087890625 + ] + ], + [ + [ + 7.0, + 7.960784912109375 + ], + [ + -9.058823585510254, + -10.019607543945312 + ], + [ + 10.980392456054688, + -11.941176414489746 + ] + ], + [ + [ + 13.039216995239258, + 14.000001907348633 + ], + [ + -14.960784912109375, + -16.05882453918457 + ], + [ + 17.019607543945312, + -17.980392456054688 + ] + ] + ] + ] +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/quantization/ifm.json b/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/quantization/ifm.json new file mode 100644 index 0000000..dc5ca8d --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/quantization/ifm.json @@ -0,0 +1,4 @@ +{ + "scale": 0.03869570419192314, + "zero_point": 126.0 +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/quantization/ker.json b/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/quantization/ker.json new file mode 100644 index 0000000..bc150bb --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/quantization/ker.json @@ -0,0 +1,52 @@ +{ + "weights": [ + [ + [ + [ + 138, + 146 + ], + [ + 109, + 102 + ], + [ + 167, + 87 + ] + ], + [ + [ + 182, + 189 + ], + [ + 65, + 58 + ], + [ + 211, + 44 + ] + ], + [ + [ + 226, + 233 + ], + [ + 22, + 14 + ], + [ + 255, + 0 + ] + ] + ] + ], + "scale": 0.13725490868091583, + "zero_point": 131.0, + "min": -17.980392456054688, + "max": 17.019609451293945 +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/quantization/ofm.json b/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/quantization/ofm.json new file mode 100644 index 0000000..bfd8621 --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/quantization/ofm.json @@ -0,0 +1,4 @@ +{ + "scale": 1.6333034038543701, + "zero_point": 127.0 +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/record_minmax/ifm.json b/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/record_minmax/ifm.json new file mode 100644 index 0000000..2d2af08 --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/record_minmax/ifm.json @@ -0,0 +1,4 @@ +{ + "min": -4.890846576690674, + "max": 4.976558513641357 +} diff --git a/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/record_minmax/ofm.json b/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/record_minmax/ofm.json new file mode 100644 index 0000000..24598f0 --- /dev/null +++ b/compiler/pota-quantization-value-test/expected_outputs/TransposeConv_001/layer/uint8/record_minmax/ofm.json @@ -0,0 +1,4 @@ +{ + "min": -207.54233032226563, + "max": 208.95002136230468 +} diff --git a/compiler/pota-quantization-value-test/test.lst b/compiler/pota-quantization-value-test/test.lst index 65613ff..9eb3489 100644 --- a/compiler/pota-quantization-value-test/test.lst +++ b/compiler/pota-quantization-value-test/test.lst @@ -1,2 +1,4 @@ addTest(Conv2D_004 layer uint8) addTest(DepthwiseConv2D_002 layer uint8) +addTest(FullyConnected_003 layer uint8) +addTest(TransposeConv_001 layer uint8) diff --git a/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/0.txt b/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/0.txt index 8803cb1..0614b5e 100644 --- a/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/0.txt +++ b/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/0.txt @@ -1 +1 @@ -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 +0.01090685,0.0581577 ,0.637094 ,0.64067715,0.26264507,0.13692169,0.9649414 ,0.5117181 ,0.18012471,0.07855253,0.6358017 ,0.62257963,0.41469443,0.93169045,0.20763828,0.7634293 ,0.75929826,0.72708374,0.23463063,0.58222896,0.6351517 ,0.68781173,0.5558012 ,0.7652179 diff --git a/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/1.txt b/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/1.txt new file mode 100644 index 0000000..b1c3938 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/1.txt @@ -0,0 +1 @@ +0.57017624,0.08235867,0.03672464,0.40372616,0.7353964 ,0.59611887,0.7675548 ,0.21004233,0.09803218,0.20009473,0.8821493 ,0.17015271,0.14840214,0.99910176,0.37003204,0.22893582,0.43173164,0.3105084 ,0.41997132,0.43714985,0.08115962,0.71896386,0.7810953 ,0.00524598 diff --git a/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/2.txt b/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/2.txt new file mode 100644 index 0000000..7e562de --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/2.txt @@ -0,0 +1 @@ +0.65292275,0.79842275,0.97853714,0.6711518 ,0.607567 ,0.40971732,0.74838483,0.95853555,0.32158023,0.911524 ,0.66938365,0.8573132 ,0.3047727 ,0.5561248 ,0.914098 ,0.07650814,0.37868017,0.29269257,0.19652605,0.63025194,0.61496884,0.32011527,0.8204132 ,0.21866946 diff --git a/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/3.txt b/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/3.txt new file mode 100644 index 0000000..2958a7f --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/3.txt @@ -0,0 +1 @@ +0.4548901 ,0.56957537,0.0252368 ,0.4884317 ,0.7516498 ,0.02631272,0.22107519,0.95249426,0.34902394,0.11520014,0.808911 ,0.4148615 ,0.63615656,0.84020686,0.3633697 ,0.23993976,0.54176176,0.86938345,0.81628686,0.6380988 ,0.91891205,0.0406627 ,0.90289026,0.9429013 diff --git a/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/4.txt b/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/4.txt new file mode 100644 index 0000000..fc96930 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/Conv2D_004/layer/uint8/4.txt @@ -0,0 +1 @@ +0.9309136 ,0.02123719,0.64467335,0.6910113 ,0.47402772,0.54622203,0.31527275,0.81530565,0.98981965,0.36102158,0.03114039,0.1902339 ,0.45183742,0.60178596,0.4683102 ,0.59810966,0.40558222,0.5420302 ,0.72699505,0.9575108 ,0.46746576,0.08518691,0.40302262,0.69213694 diff --git a/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/0.txt b/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/0.txt index c210774..44f0ff1 100644 --- a/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/0.txt +++ b/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/0.txt @@ -1 +1 @@ -1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12, 13, 14, 15, 16 +0.31365377,0.6127105 ,0.7047126 ,0.2511918 ,0.16652136,0.36075932,0.44332707,0.77615815,0.60456425,0.26207635,0.28714025,0.11579613,0.89698446,0.67223394,0.3757766 ,0.11787009 diff --git a/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/1.txt b/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/1.txt new file mode 100644 index 0000000..98e8104 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/1.txt @@ -0,0 +1 @@ +0.9409595 ,0.3991174 ,0.43546647,0.221152 ,0.7794665 ,0.8619514 ,0.5903087 ,0.24476172,0.5932698 ,0.2727837 ,0.3980262 ,0.13329633,0.4319272 ,0.37872055,0.1721639 ,0.92437047 diff --git a/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/2.txt b/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/2.txt new file mode 100644 index 0000000..e986752 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/2.txt @@ -0,0 +1 @@ +0.6484028 ,0.09222967,0.76285905,0.02265582,0.2564394 ,0.11219095,0.22529566,0.09101159,0.15937322,0.3540595 ,0.25971088,0.4681136 ,0.4279646 ,0.5386553 ,0.11397707,0.7413688 diff --git a/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/3.txt b/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/3.txt new file mode 100644 index 0000000..9b36fb5 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/3.txt @@ -0,0 +1 @@ +0.9182678 ,0.8253187 ,0.6572848 ,0.46436486,0.45208713,0.42112917,0.24383743,0.16039051,0.24649048,0.63431305,0.31141657,0.25664324,0.721266 ,0.18996912,0.35422477,0.8826148 diff --git a/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/4.txt b/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/4.txt new file mode 100644 index 0000000..6b8957d --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/DepthwiseConv2D_002/layer/uint8/4.txt @@ -0,0 +1 @@ +0.97424644,0.9360494 ,0.6849295 ,0.21313633,0.23943195,0.32497332,0.5091704 ,0.67543274,0.49667478,0.73460567,0.5866559 ,0.5312464 ,0.8252662 ,0.36093768,0.7143621 ,0.7234413 diff --git a/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/0.txt b/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/0.txt new file mode 100644 index 0000000..233e5ea --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/0.txt @@ -0,0 +1 @@ + 2.7731526 , 2.451602 , 3.7535272 ,-1.2774152 , 1.5482912 , 1.3402948 , 4.4792123 ,-4.4954367 , 3.354679 ,-3.3615496 ,-4.619757 ,-3.3659618 , 4.7626247 ,-1.3596478 ,-4.835548 , 0.78964525 diff --git a/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/1.txt b/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/1.txt new file mode 100644 index 0000000..6a12608 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/1.txt @@ -0,0 +1 @@ + 0.5400839 ,-3.2621996 ,-3.4817135 , 3.8183312 , 0.48498327, 2.9812584 , 4.111276 , 0.11223658, 4.7201405 , 2.4256718 , 1.4895477 , 4.7596602 ,-0.32709372, 1.3507305 ,-0.30043927,-1.8077502 diff --git a/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/2.txt b/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/2.txt new file mode 100644 index 0000000..eccd2c6 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/2.txt @@ -0,0 +1 @@ + 3.8758078 , 4.978636 ,-0.22925885,-2.6760504 ,-1.9160627 ,-4.609644 ,-0.9515802 , 3.558274 , 2.9096057 , 0.3340422 , 0.38608226,-0.32168412, 4.688853 ,-4.583811 ,-2.5113506 ,-4.6688786 diff --git a/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/3.txt b/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/3.txt new file mode 100644 index 0000000..0da0527 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/3.txt @@ -0,0 +1 @@ +-2.9868221 , 2.4237797 , 1.0833962 ,-0.9231426 ,-2.1091506 ,-2.6163697 ,-0.23101932,-1.9252896 , 4.7034135 , 3.1088963 ,-2.345823 ,-2.7866168 ,-3.186763 ,-4.431844 , 3.3113294 , 0.9501982 diff --git a/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/4.txt b/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/4.txt new file mode 100644 index 0000000..ace24f7 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/FullyConnected_003/layer/uint8/4.txt @@ -0,0 +1 @@ + 3.9716747 ,-2.254871 , 1.1943274 ,-2.212602 , 3.4311683 , 1.114989 , 4.0739036 , 0.47244295,-3.5793104 ,-3.359908 ,-4.7657595 , 2.0369127 ,-2.5619278 ,-3.4452975 ,-4.5852203 ,-1.137643 diff --git a/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/0.txt b/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/0.txt new file mode 100644 index 0000000..e9db48f --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/0.txt @@ -0,0 +1 @@ +-1.4124781 , 0.42694193, 1.1734594 ,-3.5111153 ,-2.9756174 , 1.3682148 ,-2.318465 , 2.198896 ,-4.5043235 , 3.1775594 ,-0.42802384,-1.4872279 , 1.3821319 ,-4.771963 ,-0.12837897, 4.132799 , 3.697655 , 2.0807178 ,-3.621293 , 2.121878 ,-0.25654107, 0.42100102,-1.4009671 ,-2.9733627 ,-0.7058871 ,-2.831215 , 3.5669627 , 2.1420689 ,-1.8789555 , 0.8104939 ,-2.0503597 , 1.7788508 diff --git a/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/1.txt b/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/1.txt new file mode 100644 index 0000000..479d062 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/1.txt @@ -0,0 +1 @@ + 3.4726453 , 3.0497985 ,-4.234619 ,-1.0526706 , 1.7278554 ,-3.341614 , 4.54768 , 3.0954597 ,-3.735109 , 2.8810751 ,-2.5381427 ,-3.2360535 ,-1.5378917 , 2.3052745 ,-3.170938 ,-3.327242 , 2.0654576 ,-2.2294598 ,-1.881382 , 0.13216451,-4.2825613 , 0.26616526, 4.6196365 ,-0.88623226, 1.7103885 ,-1.5865034 ,-3.9114466 ,-3.2227128 , 4.909618 , 2.3318915 , 0.84300846, 0.760918 diff --git a/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/2.txt b/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/2.txt new file mode 100644 index 0000000..ae28234 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/2.txt @@ -0,0 +1 @@ +-4.6097918,-4.21991 ,-3.9955974, 3.6492047, 2.9191775, 2.8082933, 1.6189331, 0.2730309,-1.5029653,-1.9471445, 4.8758197, 3.3177438, 3.1338058,-2.1281245,-1.7526287,-2.5518703,-1.7746793, 4.0455256,-0.5839861,-4.408046 ,-4.0034447, 1.5858272,-4.5896654, 4.7211285,-4.677515 ,-2.6027086,-4.7896166,-3.5512326,-1.9068764,-2.9705904,-4.854087 ,-4.892111 diff --git a/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/3.txt b/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/3.txt new file mode 100644 index 0000000..fd40f84 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/3.txt @@ -0,0 +1 @@ + 2.1514777e-02, 2.6526773e+00,-3.0477784e+00, 1.3287724e+00,-4.1414630e-01,-1.7295350e-01, 7.6649576e-01,-1.8028022e+00,-7.0781744e-01,-2.5262204e-01,-3.0970418e+00,-1.3165286e+00,-4.6649928e+00, 2.0809033e+00,-1.5739973e+00,-4.0531826e-01,-2.1718202e+00, 2.0146034e+00, 2.5044403e+00,-1.1256610e+00, 1.3536702e+00, 1.0283234e-03,-1.8823910e+00, 4.7122188e+00, 9.4781297e-01, 3.2012525e+00,-5.5164534e-01,-2.6158772e+00,-1.8771547e+00,-3.1689723e+00, 4.9054880e+00,-3.4560370e+00 diff --git a/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/4.txt b/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/4.txt new file mode 100644 index 0000000..e81c3b8 --- /dev/null +++ b/compiler/pota-quantization-value-test/test_inputs/TransposeConv_001/layer/uint8/4.txt @@ -0,0 +1 @@ +-2.0927553 ,-2.107511 ,-1.6963564 , 1.7006218 , 1.4575784 , 0.06095728, 1.2659966 , 4.1905265 , 1.3035946 , 4.9793477 ,-4.3388166 ,-0.23496658, 1.9831208 , 2.6154642 ,-0.2790228 ,-3.1774354 ,-3.178935 ,-1.1564373 ,-0.8199472 ,-2.245698 ,-4.8605046 ,-3.569018 ,-1.4226891 ,-4.1067843 , 2.6078918 ,-3.5830674 , 1.9065963 , 2.435578 ,-3.3216476 , 4.5930347 , 2.9191844 , 1.7885648 diff --git a/compiler/pota-quantization-value-test/test_record_minmax.sh b/compiler/pota-quantization-value-test/test_record_minmax.sh index eaa462d..acb7574 100755 --- a/compiler/pota-quantization-value-test/test_record_minmax.sh +++ b/compiler/pota-quantization-value-test/test_record_minmax.sh @@ -59,9 +59,9 @@ while [ "$1" != "" ]; do # Run record-minmax "${RECORD_MINMAX_PATH}" \ - "${TEST_RESULT_FILE}.fake_quantized.circle" \ - "${TEST_RESULT_FILE}.input.h5" \ - "${TEST_RESULT_FILE}.minmax_recorded.circle" + --input_model "${TEST_RESULT_FILE}.fake_quantized.circle" \ + --input_data "${TESTCASE_FILE}.input.h5" \ + --output_model "${TEST_RESULT_FILE}.minmax_recorded.circle" # Dump min/max values (circle-tensordump) "${CIRCLE_TENSORDUMP_PATH}" \ diff --git a/compiler/record-minmax/CMakeLists.txt b/compiler/record-minmax/CMakeLists.txt index 862660e..f8a165b 100644 --- a/compiler/record-minmax/CMakeLists.txt +++ b/compiler/record-minmax/CMakeLists.txt @@ -19,9 +19,14 @@ target_link_libraries(record-minmax safemain) target_link_libraries(record-minmax luci_import) target_link_libraries(record-minmax luci_export) target_link_libraries(record-minmax luci_interpreter) +target_link_libraries(record-minmax vconone) install(TARGETS record-minmax DESTINATION bin) +if(NOT ENABLE_TEST) + return() +endif(NOT ENABLE_TEST) + nnas_find_package(GTest REQUIRED) GTest_AddTest(record_minmax_function_test "${CMAKE_CURRENT_SOURCE_DIR}/tests/RecordFunction.test.cpp") target_include_directories(record_minmax_function_test PRIVATE include) diff --git a/compiler/record-minmax/driver/Driver.cpp b/compiler/record-minmax/driver/Driver.cpp index ae4fcb7..8b09498 100644 --- a/compiler/record-minmax/driver/Driver.cpp +++ b/compiler/record-minmax/driver/Driver.cpp @@ -17,6 +17,13 @@ #include "RecordMinMax.h" #include +#include + +void print_version(void) +{ + std::cout << "record-minmax version " << vconone::get_string() << std::endl; + std::cout << vconone::get_copyright() << std::endl; +} int entry(const int argc, char **argv) { @@ -25,6 +32,13 @@ int entry(const int argc, char **argv) arser::Arser arser( "Embedding min/max values of activations to the circle model for post-training quantization"); + arser.add_argument("--version") + .nargs(0) + .required(false) + .default_value(false) + .help("Show version information and exit") + .exit_with(print_version); + arser.add_argument("--input_model") .nargs(1) .type(arser::DataType::STR) @@ -66,7 +80,7 @@ int entry(const int argc, char **argv) { std::cout << err.what() << std::endl; std::cout << arser; - return 0; + return 255; } auto input_model_path = arser.get("--input_model"); diff --git a/compiler/record-minmax/requires.cmake b/compiler/record-minmax/requires.cmake index 0545035..f6804ce 100644 --- a/compiler/record-minmax/requires.cmake +++ b/compiler/record-minmax/requires.cmake @@ -1,3 +1,4 @@ require("luci") require("safemain") require("arser") +require("vconone") diff --git a/compiler/record-minmax/src/HDF5Importer.cpp b/compiler/record-minmax/src/HDF5Importer.cpp index cf30cd8..a0e65ee 100644 --- a/compiler/record-minmax/src/HDF5Importer.cpp +++ b/compiler/record-minmax/src/HDF5Importer.cpp @@ -20,6 +20,7 @@ #include #include +#include using Shape = luci_interpreter::Shape; using DataType = luci_interpreter::DataType; diff --git a/compiler/record-minmax/src/MinMaxObserver.cpp b/compiler/record-minmax/src/MinMaxObserver.cpp index 45f0197..c22cb41 100644 --- a/compiler/record-minmax/src/MinMaxObserver.cpp +++ b/compiler/record-minmax/src/MinMaxObserver.cpp @@ -38,7 +38,7 @@ void MinMaxObserver::postTensorWrite(const luci::CircleNode *node, assert(node->opcode() != luci::CircleOpcode::UNPACK); assert(node->opcode() != luci::CircleOpcode::WHILE); - if (node->opcode() == luci::CircleOpcode::CONST) + if (node->opcode() == luci::CircleOpcode::CIRCLECONST) { // node is not activation. Do nothing. return; diff --git a/compiler/record-minmax/src/RecordMinMax.cpp b/compiler/record-minmax/src/RecordMinMax.cpp index d12a0d3..17c6aa6 100644 --- a/compiler/record-minmax/src/RecordMinMax.cpp +++ b/compiler/record-minmax/src/RecordMinMax.cpp @@ -158,7 +158,7 @@ void RecordMinMax::profileData(const std::string &mode, const std::string &input auto node = iter->first; auto minmax = iter->second; - float min, max; + float min{0.0f}, max{0.0f}; if (mode == "percentile") { min = getNthPercentile(minmax.min_vector, min_percentile); diff --git a/compiler/record-minmax/tests/RecordFunction.test.cpp b/compiler/record-minmax/tests/RecordFunction.test.cpp index 13b464d..e2f135a 100644 --- a/compiler/record-minmax/tests/RecordFunction.test.cpp +++ b/compiler/record-minmax/tests/RecordFunction.test.cpp @@ -32,6 +32,8 @@ TEST(GetNthPercentileTest, Edge) EXPECT_FLOAT_NEAR(0, getNthPercentile(input, 0)); EXPECT_FLOAT_NEAR(9, getNthPercentile(input, 100)); + + SUCCEED(); } TEST(GetNthPercentileTest, Simple) @@ -47,6 +49,8 @@ TEST(GetNthPercentileTest, Simple) { EXPECT_FLOAT_NEAR(0.09 * std::floor(i) + 0.045, getNthPercentile(input, i)); } + + SUCCEED(); } TEST(GetNthPercentileTest, Float) @@ -61,6 +65,8 @@ TEST(GetNthPercentileTest, Float) EXPECT_FLOAT_NEAR(2.799942346802177, getNthPercentile(input, 1)); EXPECT_FLOAT_NEAR(7.768503955476342, getNthPercentile(input, 3.14)); EXPECT_FLOAT_NEAR(99.40456084968194, getNthPercentile(input, 99)); + + SUCCEED(); } TEST(GetNthPercentileTest, FloatWithNegative) @@ -75,6 +81,8 @@ TEST(GetNthPercentileTest, FloatWithNegative) EXPECT_FLOAT_NEAR(-47.20005765319782, getNthPercentile(input, 1)); EXPECT_FLOAT_NEAR(-42.23149604452366, getNthPercentile(input, 3.14)); EXPECT_FLOAT_NEAR(49.40456084968194, getNthPercentile(input, 99)); + + SUCCEED(); } TEST(GetNthPercentileTest, SigleElement) @@ -84,6 +92,8 @@ TEST(GetNthPercentileTest, SigleElement) EXPECT_FLOAT_NEAR(33, getNthPercentile(input, 0)); EXPECT_FLOAT_NEAR(33, getNthPercentile(input, 50)); EXPECT_FLOAT_NEAR(33, getNthPercentile(input, 100)); + + SUCCEED(); } TEST(GetNthPercentileTest, OutOfBoundary_NEG) @@ -92,6 +102,8 @@ TEST(GetNthPercentileTest, OutOfBoundary_NEG) EXPECT_THROW(getNthPercentile(input, -1), std::runtime_error); EXPECT_THROW(getNthPercentile(input, 101), std::runtime_error); + + SUCCEED(); } TEST(GetNthPercentileTest, EmptyVector_NEG) @@ -99,6 +111,8 @@ TEST(GetNthPercentileTest, EmptyVector_NEG) std::vector input; EXPECT_THROW(getNthPercentile(input, 10), std::runtime_error); + + SUCCEED(); } } // namespace record_minmax diff --git a/compiler/tf2circle-value-pbtxt-remote-test/CMakeLists.txt b/compiler/tf2circle-value-pbtxt-remote-test/CMakeLists.txt index 64dcc28..852018e 100644 --- a/compiler/tf2circle-value-pbtxt-remote-test/CMakeLists.txt +++ b/compiler/tf2circle-value-pbtxt-remote-test/CMakeLists.txt @@ -141,7 +141,6 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E echo 'HDF5_EXPORT_ACTION_PATH=\"$\"' >> ${TEST_CONFIG} COMMAND ${CMAKE_COMMAND} -E echo 'HDF5_IMPORT_ACTION_PATH=\"$\"' >> ${TEST_CONFIG} COMMAND ${CMAKE_COMMAND} -E echo 'MODEL2NNPKG_PATH=\"${NNAS_PROJECT_SOURCE_DIR}/tools/nnpackage_tool/model2nnpkg/model2nnpkg.sh\"' >> ${TEST_CONFIG} - COMMAND ${CMAKE_COMMAND} -E echo 'NNPKG_TEST_PATH=\"${NNAS_PROJECT_SOURCE_DIR}/tests/scripts/nnpkg_test.sh\"' >> ${TEST_CONFIG} COMMAND ${CMAKE_COMMAND} -E echo 'RUNTIME_LIBRARY_PATH=\"${NNAS_PROJECT_SOURCE_DIR}/Product/out/\"' >> ${TEST_CONFIG} DEPENDS nnkit-run diff --git a/compiler/tf2circle-value-pbtxt-remote-test/README.md b/compiler/tf2circle-value-pbtxt-remote-test/README.md index 5546cc8..0d41b0a 100644 --- a/compiler/tf2circle-value-pbtxt-remote-test/README.md +++ b/compiler/tf2circle-value-pbtxt-remote-test/README.md @@ -36,13 +36,13 @@ #--------------- Remote Machine Setting ---------------# set(REMOTE_IP "xxx.xxx.xxx.xxx") set(REMOTE_USER "remote_username") - + #--------------------- Tests list ---------------------# add(UNIT_Add_000) add(UNIT_Add_001) ... ``` - - If any Tensorflow model is added, or if `REMOTE_IP` and `REMOTE_USER` is not given, `tf2circle-value-pbtxt-remote-test` will not be created. + - If any Tensorflow model is added, or if `REMOTE_IP` and `REMOTE_USER` is not given, `tf2circle-value-pbtxt-remote-test` will not be created. 1. (Optional) ssh authentication - This test uses `ssh` and `scp` commands, and those commands require a password of remote machine whenever they are called. This means that you should enter the password everytime when `ssh` and `scp` require. - This test resolves the problem by using `ssh-copy-id`, which copies the public key of host machine to `authorized_keys` of remote machine. Because of that, this test will ask the password of remote machine only once, at the first time. This is the only user interaction while running this test. @@ -71,7 +71,7 @@ ├ Result_latest -> Result_YYMMDD_hhmmss.csv ├ Result_YYMMDD_hhmmss.csv ├ ... - | + | ├ UNIT_Add_000 | ├ metadata | | ├ MANIFEST @@ -91,16 +91,16 @@ | ├ ... ``` -- `nnpkg_test.sh`, runtime products and each nnpackage are sent to `REMOTE_WORKDIR` in remote machine. +- Runtime products and each nnpackage are sent to `REMOTE_WORKDIR` in remote machine. - (TBD) Modify script not to remove obtained h5 file. ``` REMOTE_WORKDIR - ├ nnpkg_test.sh | ├ Product | └ out | ├ bin | ├ lib + | ├ test | ├ ... | ├ UNIT_Add_000 diff --git a/compiler/tf2circle-value-pbtxt-remote-test/testall.sh b/compiler/tf2circle-value-pbtxt-remote-test/testall.sh index ca6fb49..c80b00a 100755 --- a/compiler/tf2circle-value-pbtxt-remote-test/testall.sh +++ b/compiler/tf2circle-value-pbtxt-remote-test/testall.sh @@ -30,7 +30,6 @@ echo "-- Found nnkit-run: ${NNKIT_RUN_PATH}" echo "-- Found TF backend: ${TF_BACKEND_PATH}" echo "-- Found TF2CIRCLE: ${TF2CIRCLE_PATH}" echo "-- Found MODEL2NNPKG: ${MODEL2NNPKG_PATH}" -echo "-- Found nnpkg_test: ${NNPKG_TEST_PATH}" echo "-- Found Runtime library: ${RUNTIME_LIBRARY_PATH}" echo "-- Found randomize action: ${RANDOMIZE_ACTION_PATH}" echo "-- Found HDF5 export action: ${HDF5_EXPORT_ACTION_PATH}" @@ -42,11 +41,6 @@ if [ -z ${MODEL2NNPKG_PATH} ] || [ ! -f ${MODEL2NNPKG_PATH} ]; then exit 3 fi -if [ -z ${NNPKG_TEST_PATH} ] || [ ! -f ${NNPKG_TEST_PATH} ]; then - echo "nnpkg_test is not found" - exit 4 -fi - # Register remote machine ssh information cat /dev/zero | ssh-keygen -q -N "" ssh-copy-id -o ConnectTimeout=5 "${REMOTE_USER}@${REMOTE_IP}" @@ -61,9 +55,6 @@ fi ssh "${REMOTE_USER}@${REMOTE_IP}" "mkdir -p ${REMOTE_WORKDIR}/Product/" scp -r "${RUNTIME_LIBRARY_PATH}" "${REMOTE_USER}@${REMOTE_IP}:${REMOTE_WORKDIR}/Product/" -# Send nnpkg_test.sh -scp "${NNPKG_TEST_PATH}" "${REMOTE_USER}@${REMOTE_IP}:${REMOTE_WORKDIR}/" - TESTED=() PASSED=() FAILED=() @@ -120,8 +111,8 @@ while [[ $# -ne 0 ]]; do # Run test_arm_nnpkg in remote machine scp -r "${WORKDIR}/${PREFIX}/" "${REMOTE_USER}@${REMOTE_IP}:${REMOTE_WORKDIR}/${PREFIX}/" - ssh "${REMOTE_USER}@${REMOTE_IP}" "cd ${REMOTE_WORKDIR}; ./nnpkg_test.sh -i . -o ${PREFIX}/metadata/tc ${PREFIX}" - + ssh "${REMOTE_USER}@${REMOTE_IP}" "cd ${REMOTE_WORKDIR}; ./Product/out/test/onert-test nnpkg-test -i . -o ${PREFIX}/metadata/tc ${PREFIX}" + if [[ $? -eq 0 ]]; then touch "${PASSED_TAG}" fi diff --git a/compiler/tf2nnpackage-value-remote-test/CMakeLists.txt b/compiler/tf2nnpackage-value-remote-test/CMakeLists.txt index 4a59e88..255806c 100644 --- a/compiler/tf2nnpackage-value-remote-test/CMakeLists.txt +++ b/compiler/tf2nnpackage-value-remote-test/CMakeLists.txt @@ -33,12 +33,12 @@ endforeach() get_target_property(ARTIFACTS_SRC_PATH testDataGenerator SOURCE_DIR) -# In this test, only the runtime test is performed because the test from tf to -# nnpackage is done in common-artifacts, and for this runtime test, generation of +# In this test, only the runtime test is performed because the test from tf to +# nnpackage is done in common-artifacts, and for this runtime test, generation of # test data is required. And, tcgenerate in ${ARTIFACTS_SRC_PATH}/exclude.lst # means it won't generate test data, which is why below "tcgenerate" macro excludes -# specific opearators from runtime test. -# Also, since circlize and optimize macro included in `exclude.lst` file is only +# specific opearators from runtime test. +# Also, since circlize and optimize macro included in `exclude.lst` file is only # needed in common-artifacts, it has no function here. macro(circlize) endmacro() @@ -72,7 +72,6 @@ set(TEST_CONFIG "${CMAKE_CURRENT_BINARY_DIR}/test.config") add_custom_command( OUTPUT ${TEST_CONFIG} COMMAND ${CMAKE_COMMAND} -E remove -f ${TEST_CONFIG} - COMMAND ${CMAKE_COMMAND} -E echo 'NNPKG_TEST_PATH=\"${NNAS_PROJECT_SOURCE_DIR}/tests/scripts/nnpkg_test.sh\"' >> ${TEST_CONFIG} COMMAND ${CMAKE_COMMAND} -E echo 'RUNTIME_LIBRARY_PATH=\"${NNAS_PROJECT_SOURCE_DIR}/Product/out/\"' >> ${TEST_CONFIG} COMMENT "Generate test configuration" ) diff --git a/compiler/tf2nnpackage-value-remote-test/README.md b/compiler/tf2nnpackage-value-remote-test/README.md index 36436fc..65f307b 100644 --- a/compiler/tf2nnpackage-value-remote-test/README.md +++ b/compiler/tf2nnpackage-value-remote-test/README.md @@ -15,7 +15,7 @@ set(REMOTE_IP "xxx.xxx.xxx.xxx") set(REMOTE_USER "remote_username") ``` - - If any recipe is added, or if `REMOTE_IP` and `REMOTE_USER` is not given, `tf2nnpackage-value-remote-test` will not be created. + - If any recipe is added, or if `REMOTE_IP` and `REMOTE_USER` is not given, `tf2nnpackage-value-remote-test` will not be created. 1. (Optional) ssh authentication - This test uses `ssh` and `scp` commands, and those commands require a password of remote machine whenever they are called. This means that you should enter the password everytime when `ssh` and `scp` require. - This test resolves the problem by using `ssh-copy-id`, which copies the public key of host machine to `authorized_keys` of remote machine. Because of that, this test will ask the password of remote machine only once, at the first time. This is the only user interaction while running this test. @@ -39,7 +39,7 @@ ### Generated Files While Running - All related files(`pb`, `circle`, `h5` ... etc.) are taken from `build/compiler/common-artifacts` folder. -- `nnpkg_test.sh`, runtime products and each nnpackage are sent to `REMOTE_WORKDIR` in remote machine. +- Runtime products and each nnpackage are sent to `REMOTE_WORKDIR` in remote machine. - Each test result is generated in `build/compiler/common-artifacts` with the name `${RECIPE}.log` ### Check Test Result diff --git a/compiler/tf2nnpackage-value-remote-test/testall.sh b/compiler/tf2nnpackage-value-remote-test/testall.sh index f1c9789..ca672a3 100755 --- a/compiler/tf2nnpackage-value-remote-test/testall.sh +++ b/compiler/tf2nnpackage-value-remote-test/testall.sh @@ -27,15 +27,9 @@ RESULT_CSV="${BINDIR}/Result_${CURRENT_DATETIME}.csv" source "${CONFIG_PATH}" -echo "-- Found nnpkg_test: ${NNPKG_TEST_PATH}" echo "-- Found Runtime library: ${RUNTIME_LIBRARY_PATH}" echo "-- Found workdir: ${WORKDIR}" -if [ -z ${NNPKG_TEST_PATH} ] || [ ! -f ${NNPKG_TEST_PATH} ]; then - echo "nnpkg_test is not found" - exit 4 -fi - # Register remote machine ssh information cat /dev/zero | ssh-keygen -q -N "" ssh-copy-id -o ConnectTimeout=5 "${REMOTE_USER}@${REMOTE_IP}" @@ -50,9 +44,6 @@ fi ssh "${REMOTE_USER}@${REMOTE_IP}" "mkdir -p ${REMOTE_WORKDIR}/Product/" scp -r "${RUNTIME_LIBRARY_PATH}" "${REMOTE_USER}@${REMOTE_IP}:${REMOTE_WORKDIR}/Product/" -# Send nnpkg_test.sh -scp "${NNPKG_TEST_PATH}" "${REMOTE_USER}@${REMOTE_IP}:${REMOTE_WORKDIR}/" - TESTED=() PASSED=() FAILED=() @@ -84,8 +75,8 @@ while [[ $# -ne 0 ]]; do PREFIX=${PREFIX}.opt ; fi scp -r "${PREFIX}/" "${REMOTE_USER}@${REMOTE_IP}:${REMOTE_WORKDIR}/${PREFIX}/" - ssh "${REMOTE_USER}@${REMOTE_IP}" "cd ${REMOTE_WORKDIR}; ./nnpkg_test.sh ${PREFIX}" - + ssh "${REMOTE_USER}@${REMOTE_IP}" "cd ${REMOTE_WORKDIR}; ./Product/out/test/onert-test nnpkg-test ${PREFIX}" + if [[ $? -eq 0 ]]; then touch "${BINDIR}/${PASSED_TAG}" fi diff --git a/compiler/tf2tfliteV2/README.md b/compiler/tf2tfliteV2/README.md index 13359aa..0a90735 100644 --- a/compiler/tf2tfliteV2/README.md +++ b/compiler/tf2tfliteV2/README.md @@ -47,6 +47,9 @@ python tf2tfliteV2.py \ -h, --help show this help message and exit --v1 Use TensorFlow Lite Converter 1.x --v2 Use TensorFlow Lite Converter 2.x + --graph_def Use graph def file(default) + --saved_model Use saved model + --keras_model Use keras model -i INPUT_PATH, --input_path INPUT_PATH Full filepath of the input file. -o OUTPUT_PATH, --output_path OUTPUT_PATH @@ -55,7 +58,8 @@ python tf2tfliteV2.py \ Names of the input arrays, comma-separated. -s INPUT_SHAPES, --input_shapes INPUT_SHAPES Shapes corresponding to --input_arrays, colon- - separated. + separated.(ex:"1,4,4,3:1,20,20,3") -O OUTPUT_ARRAYS, --output_arrays OUTPUT_ARRAYS Names of the output arrays, comma-separated. + ``` diff --git a/compiler/tf2tfliteV2/tf2tfliteV2.py b/compiler/tf2tfliteV2/tf2tfliteV2.py index 82d6ee2..c51dabd 100755 --- a/compiler/tf2tfliteV2/tf2tfliteV2.py +++ b/compiler/tf2tfliteV2/tf2tfliteV2.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved # Copyright (C) 2018 The TensorFlow Authors @@ -48,6 +48,27 @@ def _get_parser(): converter_version.add_argument( "--v2", action="store_true", help="Use TensorFlow Lite Converter 2.x") + # Input model format + model_format_arg = parser.add_mutually_exclusive_group() + model_format_arg.add_argument( + "--graph_def", + action="store_const", + dest="model_format", + const="graph_def", + help="Use graph def file(default)") + model_format_arg.add_argument( + "--saved_model", + action="store_const", + dest="model_format", + const="saved_model", + help="Use saved model") + model_format_arg.add_argument( + "--keras_model", + action="store_const", + dest="model_format", + const="keras_model", + help="Use keras model") + # Input and output path. parser.add_argument( "-i", @@ -83,6 +104,8 @@ def _get_parser(): help="Names of the output arrays, comma-separated.", required=True) + # Set default value + parser.set_defaults(model_format="graph_def") return parser @@ -122,17 +145,26 @@ def _parse_array(arrays, type_fn=str): def _v1_convert(flags): - input_shapes = None - if flags.input_shapes: - input_arrays = _parse_array(flags.input_arrays) - input_shapes_list = [ - _parse_array(shape, type_fn=int) for shape in flags.input_shapes.split(":") - ] - input_shapes = dict(list(zip(input_arrays, input_shapes_list))) - - converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph( - flags.input_path, _parse_array(flags.input_arrays), - _parse_array(flags.output_arrays), input_shapes) + if flags.model_format == "graph_def": + input_shapes = None + if flags.input_shapes: + input_arrays = _parse_array(flags.input_arrays) + input_shapes_list = [ + _parse_array(shape, type_fn=int) + for shape in flags.input_shapes.split(":") + ] + input_shapes = dict(list(zip(input_arrays, input_shapes_list))) + + converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph( + flags.input_path, _parse_array(flags.input_arrays), + _parse_array(flags.output_arrays), input_shapes) + + if flags.model_format == "saved_model": + converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model(flags.input_path) + + if flags.model_format == "keras_model": + converter = tf.compat.v1.lite.TFLiteConverter.from_keras_model_file( + flags.input_path) converter.allow_custom_ops = True @@ -141,27 +173,35 @@ def _v1_convert(flags): def _v2_convert(flags): - file_content = open(flags.input_path, 'rb').read() - try: - graph_def = tf.compat.v1.GraphDef() - graph_def.ParseFromString(file_content) - except (_text_format.ParseError, DecodeError): + if flags.model_format == "graph_def": + file_content = open(flags.input_path, 'rb').read() try: - _text_format.Merge(file_content, graph_def) + graph_def = tf.compat.v1.GraphDef() + graph_def.ParseFromString(file_content) except (_text_format.ParseError, DecodeError): - raise IOError("Unable to parse input file '{}'.".format(flags.input_path)) - - wrap_func = wrap_frozen_graph( - graph_def, - inputs=[ - _str + ":0" if len(_str.split(":")) == 1 else _str - for _str in _parse_array(flags.input_arrays) - ], - outputs=[ - _str + ":0" if len(_str.split(":")) == 1 else _str - for _str in _parse_array(flags.output_arrays) - ]) - converter = tf.lite.TFLiteConverter.from_concrete_functions([wrap_func]) + try: + _text_format.Merge(file_content, graph_def) + except (_text_format.ParseError, DecodeError): + raise IOError("Unable to parse input file '{}'.".format(flags.input_path)) + + wrap_func = wrap_frozen_graph( + graph_def, + inputs=[ + _str + ":0" if len(_str.split(":")) == 1 else _str + for _str in _parse_array(flags.input_arrays) + ], + outputs=[ + _str + ":0" if len(_str.split(":")) == 1 else _str + for _str in _parse_array(flags.output_arrays) + ]) + converter = tf.lite.TFLiteConverter.from_concrete_functions([wrap_func]) + + if flags.model_format == "saved_model": + converter = tf.lite.TFLiteConverter.from_saved_model(flags.input_path) + + if flags.model_format == "keras_model": + keras_model = tf.keras.models.load_model(flags.input_path) + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) converter.allow_custom_ops = True converter.experimental_new_converter = True diff --git a/compiler/tfl-verify/CMakeLists.txt b/compiler/tfl-verify/CMakeLists.txt index d33059f..4421a46 100644 --- a/compiler/tfl-verify/CMakeLists.txt +++ b/compiler/tfl-verify/CMakeLists.txt @@ -6,6 +6,7 @@ file(GLOB_RECURSE SOURCES "src/*.cpp") add_executable(tfl-verify ${SOURCES}) target_include_directories(tfl-verify PRIVATE src) +target_link_libraries(tfl-verify arser) target_link_libraries(tfl-verify foder) target_link_libraries(tfl-verify mio_tflite) target_link_libraries(tfl-verify safemain) diff --git a/compiler/tfl-verify/requires.cmake b/compiler/tfl-verify/requires.cmake index ed6b84d..79503f3 100644 --- a/compiler/tfl-verify/requires.cmake +++ b/compiler/tfl-verify/requires.cmake @@ -1,3 +1,4 @@ +require("arser") require("foder") require("mio-tflite") require("safemain") diff --git a/compiler/tfl-verify/src/Driver.cpp b/compiler/tfl-verify/src/Driver.cpp index 81f6d54..6d18976 100644 --- a/compiler/tfl-verify/src/Driver.cpp +++ b/compiler/tfl-verify/src/Driver.cpp @@ -16,22 +16,31 @@ #include "VerifyFlatBuffers.h" +#include + #include #include #include int entry(int argc, char **argv) { - if (argc != 2) + arser::Arser arser; + arser.add_argument("tflite").type(arser::DataType::STR).help("TFLite file path to verify"); + + try { - std::cerr << "ERROR: Failed to parse arguments" << std::endl; - std::cerr << std::endl; - std::cerr << "USAGE: " << argv[0] << " [tflite]" << std::endl; + arser.parse(argc, argv); + } + catch (const std::runtime_error &err) + { + std::cout << err.what() << std::endl; + std::cout << arser; return 255; } + auto verifier = std::make_unique(); - std::string model_file = argv[argc - 1]; + std::string model_file = arser.get("tflite"); std::cout << "[ RUN ] Check " << model_file << std::endl; diff --git a/compiler/tflchef/core/src/ModelChef.cpp b/compiler/tflchef/core/src/ModelChef.cpp index 932a649..692ce48 100644 --- a/compiler/tflchef/core/src/ModelChef.cpp +++ b/compiler/tflchef/core/src/ModelChef.cpp @@ -413,6 +413,7 @@ template void cook_graph(const T &graph, CookParams &cp) quant_builder.add_min(quant_min); quant_builder.add_scale(quant_scale); quant_builder.add_zero_point(quant_zero_point); + quant_builder.add_quantized_dimension(quant.quantized_dimension()); // Update QuantizationParameters Index quant_index = quant_builder.Finish(); diff --git a/compiler/tflchef/core/src/Op/NonMaxSuppressionV4.cpp b/compiler/tflchef/core/src/Op/NonMaxSuppressionV4.cpp new file mode 100644 index 0000000..eadd62c --- /dev/null +++ b/compiler/tflchef/core/src/Op/NonMaxSuppressionV4.cpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "NonMaxSuppressionV4.h" + +flatbuffers::Offset NonMaxSuppressionV4Chef::value(flatbuffers::FlatBufferBuilder &fbb) const +{ + tflite::NonMaxSuppressionV4OptionsBuilder options_builder{fbb}; + + return options_builder.Finish().Union(); +} + +std::unique_ptr +NonMaxSuppressionV4ChefFactory::create(const tflchef::Operation *operation) const +{ + return std::unique_ptr{new NonMaxSuppressionV4Chef{operation}}; +} diff --git a/compiler/tflchef/core/src/Op/NonMaxSuppressionV4.h b/compiler/tflchef/core/src/Op/NonMaxSuppressionV4.h new file mode 100644 index 0000000..a8e783d --- /dev/null +++ b/compiler/tflchef/core/src/Op/NonMaxSuppressionV4.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __OP_NON_MAX_SUPPRESSION_V4_H__ +#define __OP_NON_MAX_SUPPRESSION_V4_H__ + +#include "OpChef.h" + +class NonMaxSuppressionV4Chef final : public OpChef +{ +public: + explicit NonMaxSuppressionV4Chef(const tflchef::Operation *operation) : _operation{operation} + { + // DO NOTHING + } + +public: + tflite::BuiltinOperator code(void) const override + { + return tflite::BuiltinOperator_NON_MAX_SUPPRESSION_V4; + } + + tflite::BuiltinOptions type(void) const override + { + return tflite::BuiltinOptions_NonMaxSuppressionV4Options; + } + + flatbuffers::Offset value(flatbuffers::FlatBufferBuilder &fbb) const override; + +private: + const tflchef::Operation *_operation; +}; + +struct NonMaxSuppressionV4ChefFactory final : public OpChefFactory +{ + std::unique_ptr create(const tflchef::Operation *operation) const override; +}; + +#endif // __OP_NON_MAX_SUPPRESSION_V4_H__ diff --git a/compiler/tflchef/core/src/Op/PadV2.cpp b/compiler/tflchef/core/src/Op/PadV2.cpp new file mode 100644 index 0000000..bfa2289 --- /dev/null +++ b/compiler/tflchef/core/src/Op/PadV2.cpp @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "PadV2.h" + +flatbuffers::Offset PadV2Chef::value(flatbuffers::FlatBufferBuilder &fbb) const +{ + tflite::PadV2OptionsBuilder padv2_options_builder{fbb}; + return padv2_options_builder.Finish().Union(); +} + +std::unique_ptr PadV2ChefFactory::create(const tflchef::Operation *operation) const +{ + return std::unique_ptr{new PadV2Chef{operation}}; +} diff --git a/compiler/tflchef/core/src/Op/PadV2.h b/compiler/tflchef/core/src/Op/PadV2.h new file mode 100644 index 0000000..d155323 --- /dev/null +++ b/compiler/tflchef/core/src/Op/PadV2.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __OP_PADV2_H__ +#define __OP_PADV2_H__ + +#include "OpChef.h" + +class PadV2Chef final : public OpChef +{ +public: + explicit PadV2Chef(const tflchef::Operation *operation) : _operation{operation} + { + // DO NOTHING + } + +public: + tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_PADV2; } + + tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_PadV2Options; } + + flatbuffers::Offset value(flatbuffers::FlatBufferBuilder &fbb) const override; + +private: + const tflchef::Operation *_operation; +}; + +struct PadV2ChefFactory final : public OpChefFactory +{ + std::unique_ptr create(const tflchef::Operation *operation) const override; +}; + +#endif // __OP_PADV2_H__ diff --git a/compiler/tflchef/core/src/OpChef.def b/compiler/tflchef/core/src/OpChef.def index 263725a..2441862 100644 --- a/compiler/tflchef/core/src/OpChef.def +++ b/compiler/tflchef/core/src/OpChef.def @@ -55,10 +55,12 @@ OP_CHEF(Minimum, MinimumChefFactory) OP_CHEF(MirrorPad, MirrorPadChefFactory) OP_CHEF(Mul, MulChefFactory) OP_CHEF(Neg, NegChefFactory) +OP_CHEF(NonMaxSuppressionV4, NonMaxSuppressionV4ChefFactory) OP_CHEF(NotEqual, NotEqualChefFactory) OP_CHEF(OneHot, OneHotChefFactory) OP_CHEF(Pack, PackChefFactory) OP_CHEF(Pad, PadChefFactory) +OP_CHEF(PadV2, PadV2ChefFactory) OP_CHEF(Pow, PowChefFactory) OP_CHEF(PRelu, PReluChefFactory) OP_CHEF(Range, RangeChefFactory) diff --git a/compiler/tflchef/core/src/OpChefs.h b/compiler/tflchef/core/src/OpChefs.h index 55c37eb..5b2e89b 100644 --- a/compiler/tflchef/core/src/OpChefs.h +++ b/compiler/tflchef/core/src/OpChefs.h @@ -68,10 +68,12 @@ #include "Op/MirrorPad.h" #include "Op/Mul.h" #include "Op/Neg.h" +#include "Op/NonMaxSuppressionV4.h" #include "Op/NotEqual.h" #include "Op/OneHot.h" #include "Op/Pack.h" #include "Op/Pad.h" +#include "Op/PadV2.h" #include "Op/Pow.h" #include "Op/PRelu.h" #include "Op/Range.h" diff --git a/compiler/tflchef/proto/tflchef.proto b/compiler/tflchef/proto/tflchef.proto index 792503b..70b966e 100644 --- a/compiler/tflchef/proto/tflchef.proto +++ b/compiler/tflchef/proto/tflchef.proto @@ -35,6 +35,7 @@ message TensorQuantization { repeated float max = 2; repeated float scale = 3; repeated int64 zero_point = 4; + optional int32 quantized_dimension = 5 [default = 0]; } message Operand { @@ -153,6 +154,10 @@ message PadOptions { // None } +message PadV2Options { + // None +} + message MirrorPadOptions { optional MirrorPadMode mode = 1 [default = REFLECT]; } @@ -362,6 +367,10 @@ message GatherNdOptions { // None } +message NonMaxSuppressionV4Options { + // None +} + message NotEqualOptions { // None } @@ -507,7 +516,7 @@ message Operation { optional LogSoftmaxOptions log_softmax_options = 168; // DequantizeOptions 169 optional NegOptions neg_options = 170; - // PadV2Options 171 + optional PadV2Options padv2_options = 171; optional LessEqualOptions lessequal_options = 172; optional SliceOptions slice_options = 173; optional TransposeConvOptions transpose_conv_options = 174; @@ -534,7 +543,7 @@ message Operation { optional MatrixSetDiagOptions matrix_set_diag_options = 195; // HardSwishOptions 196 optional DepthToSpaceOptions depth_to_space_options = 197; - // NonMaxSuppressionV4Options 198 + optional NonMaxSuppressionV4Options non_max_suppression_v4_options = 198; // NonMaxSuppressionV5Options 199 optional ScatterNdOptions scatter_nd_options = 200; optional NotEqualOptions notequal_options = 201; diff --git a/compiler/tflchef/tflite/src/Op/NonMaxSuppressionV4.cpp b/compiler/tflchef/tflite/src/Op/NonMaxSuppressionV4.cpp new file mode 100644 index 0000000..ad99219 --- /dev/null +++ b/compiler/tflchef/tflite/src/Op/NonMaxSuppressionV4.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "NonMaxSuppressionV4.h" + +#include "Convert.h" +#include "FillerHelper.h" + +namespace tflchef +{ + +void TFliteOpNonMaxSuppressionV4::filler(const tflite::Operator *op, TFliteImport *import, + tflchef::ModelRecipe *model_recipe) const +{ + const auto &inputs = *op->inputs(); + + const tflite::Tensor *max_output_size_tensor = import->tensors()->Get(inputs[2]); + assert(max_output_size_tensor->type() == tflite::TensorType::TensorType_INT32); + + const tflite::Tensor *iou_threshold_tensor = import->tensors()->Get(inputs[3]); + assert(iou_threshold_tensor->type() == tflite::TensorType::TensorType_FLOAT32); + + const tflite::Tensor *score_threshold_tensor = import->tensors()->Get(inputs[4]); + assert(score_threshold_tensor->type() == tflite::TensorType::TensorType_FLOAT32); + + for (int32_t index = 2; index < 5; ++index) + { + fill_tensor_to_import(index, import); + } +} + +tflchef::Operation *TFliteOpNonMaxSuppressionV4::build(const tflite::Operator *op, + TFliteImport *import, + tflchef::ModelRecipe *model_recipe) const +{ + auto operation = model_recipe->add_operation(); + + operation->set_type("NonMaxSuppressionV4"); + + return operation; +} + +} // namespace tflchef diff --git a/compiler/tflchef/tflite/src/Op/NonMaxSuppressionV4.h b/compiler/tflchef/tflite/src/Op/NonMaxSuppressionV4.h new file mode 100644 index 0000000..114a2ad --- /dev/null +++ b/compiler/tflchef/tflite/src/Op/NonMaxSuppressionV4.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __TFLITE_OP_NON_MAX_SUPPRESSION_V4_H__ +#define __TFLITE_OP_NON_MAX_SUPPRESSION_V4_H__ + +#include "TFliteOpChef.h" + +namespace tflchef +{ + +/** + * @brief tflchef operator builder for NON_MAX_SUPPRESSION_V4 + */ +class TFliteOpNonMaxSuppressionV4 : public TFliteOpChef +{ +public: + void filler(const tflite::Operator *op, TFliteImport *import, + tflchef::ModelRecipe *model_recipe) const override; + tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import, + tflchef::ModelRecipe *model_recipe) const override; +}; + +} // namespace tflchef + +#endif // __TFLITE_OP_NON_MAX_SUPPRESSION_V4_H__ diff --git a/compiler/tflchef/tflite/src/Op/PadV2.cpp b/compiler/tflchef/tflite/src/Op/PadV2.cpp new file mode 100644 index 0000000..0b1c9f3 --- /dev/null +++ b/compiler/tflchef/tflite/src/Op/PadV2.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "PadV2.h" + +#include "FillerHelper.h" + +namespace tflchef +{ + +void TFliteOpPadV2::filler(const tflite::Operator *op, TFliteImport *import, + tflchef::ModelRecipe *model_recipe) const +{ + // Filler for paddings and constant_values + fill_tensor_to_import(1, import); + fill_tensor_to_import(2, import); +} + +tflchef::Operation *TFliteOpPadV2::build(const tflite::Operator *op, TFliteImport *import, + tflchef::ModelRecipe *model_recipe) const +{ + auto operation = model_recipe->add_operation(); + + operation->set_type("PadV2"); + + return operation; +} + +} // namespace tflchef diff --git a/compiler/tflchef/tflite/src/Op/PadV2.h b/compiler/tflchef/tflite/src/Op/PadV2.h new file mode 100644 index 0000000..3aa474b --- /dev/null +++ b/compiler/tflchef/tflite/src/Op/PadV2.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __TFLITE_OP_PADV2_H__ +#define __TFLITE_OP_PADV2_H__ + +#include "TFliteOpChef.h" + +namespace tflchef +{ + +/** + * @brief tflchef operator builder for PADV2 + */ +class TFliteOpPadV2 : public TFliteOpChef +{ +public: + void filler(const tflite::Operator *op, TFliteImport *import, + tflchef::ModelRecipe *model_recipe) const override; + tflchef::Operation *build(const tflite::Operator *op, TFliteImport *import, + tflchef::ModelRecipe *model_recipe) const override; +}; + +} // namespace tflchef + +#endif // __TFLITE_OP_PADV2_H__ diff --git a/compiler/tflchef/tflite/src/Op/TransposeConv.cpp b/compiler/tflchef/tflite/src/Op/TransposeConv.cpp index 7e772b9..4e7adf6 100644 --- a/compiler/tflchef/tflite/src/Op/TransposeConv.cpp +++ b/compiler/tflchef/tflite/src/Op/TransposeConv.cpp @@ -35,6 +35,10 @@ void TFliteOpTransposeConv::filler(const tflite::Operator *op, TFliteImport *imp auto vec = extract_buffer(buffer); import->set_tensor_filler(inputs[0], vec); } + + // filter + const tflite::Tensor *filter_tensor = import->tensors()->Get(inputs[1]); + import->set_tensor_filler(inputs[1]); } tflchef::Operation *TFliteOpTransposeConv::build(const tflite::Operator *op, TFliteImport *import, diff --git a/compiler/tflchef/tflite/src/RecipeChef.cpp b/compiler/tflchef/tflite/src/RecipeChef.cpp index db62d0e..088961c 100644 --- a/compiler/tflchef/tflite/src/RecipeChef.cpp +++ b/compiler/tflchef/tflite/src/RecipeChef.cpp @@ -184,6 +184,8 @@ std::unique_ptr generate_recipe(const tflite::Model *model) for (uint32_t idx = 0; idx < quant->zero_point()->size(); ++idx) chef_quant->add_zero_point(quant->zero_point()->Get(idx)); } + tflchef::TensorQuantization *chef_quant = operand->mutable_quant(); + chef_quant->set_quantized_dimension(quant->quantized_dimension()); } } diff --git a/compiler/tflchef/tflite/src/TFliteOpChefs.h b/compiler/tflchef/tflite/src/TFliteOpChefs.h index ad52af1..de14e37 100644 --- a/compiler/tflchef/tflite/src/TFliteOpChefs.h +++ b/compiler/tflchef/tflite/src/TFliteOpChefs.h @@ -68,10 +68,12 @@ #include "Op/MirrorPad.h" #include "Op/Mul.h" #include "Op/Neg.h" +#include "Op/NonMaxSuppressionV4.h" #include "Op/NotEqual.h" #include "Op/OneHot.h" #include "Op/Pack.h" #include "Op/Pad.h" +#include "Op/PadV2.h" #include "Op/Pow.h" #include "Op/PRelu.h" #include "Op/Range.h" diff --git a/compiler/tflchef/tflite/src/TFliteOpRegistry.h b/compiler/tflchef/tflite/src/TFliteOpRegistry.h index 0a44b3f..8d33007 100644 --- a/compiler/tflchef/tflite/src/TFliteOpRegistry.h +++ b/compiler/tflchef/tflite/src/TFliteOpRegistry.h @@ -105,10 +105,12 @@ private: REG_TFL_OP(MIRROR_PAD, TFliteOpMirrorPad); REG_TFL_OP(MUL, TFliteOpMul); REG_TFL_OP(NEG, TFliteOpNeg); + REG_TFL_OP(NON_MAX_SUPPRESSION_V4, TFliteOpNonMaxSuppressionV4); REG_TFL_OP(NOT_EQUAL, TFliteOpNotEqual); REG_TFL_OP(ONE_HOT, TFliteOpOneHot); REG_TFL_OP(PACK, TFliteOpPack); REG_TFL_OP(PAD, TFliteOpPad); + REG_TFL_OP(PADV2, TFliteOpPadV2); REG_TFL_OP(POW, TFliteOpPow); REG_TFL_OP(PRELU, TFliteOpPRelu); REG_TFL_OP(RANGE, TFliteOpRange); diff --git a/compiler/tflchef/tools/file/Driver.cpp b/compiler/tflchef/tools/file/Driver.cpp index cecfeeb..46e5b55 100644 --- a/compiler/tflchef/tools/file/Driver.cpp +++ b/compiler/tflchef/tools/file/Driver.cpp @@ -41,7 +41,7 @@ int entry(int argc, char **argv) { std::cout << err.what() << std::endl; std::cout << arser; - return 0; + return 255; } int32_t model_version = 1; diff --git a/compiler/tflchef/tools/reverse/Driver.cpp b/compiler/tflchef/tools/reverse/Driver.cpp index 1116dec..4d795a3 100644 --- a/compiler/tflchef/tools/reverse/Driver.cpp +++ b/compiler/tflchef/tools/reverse/Driver.cpp @@ -38,7 +38,7 @@ int entry(int argc, char **argv) { std::cout << err.what() << std::endl; std::cout << arser; - return 0; + return 255; } std::string tflite_path = arser.get("tflite"); diff --git a/compiler/tfldump/driver/Driver.cpp b/compiler/tfldump/driver/Driver.cpp index 3961d2f..38c9c06 100644 --- a/compiler/tfldump/driver/Driver.cpp +++ b/compiler/tfldump/driver/Driver.cpp @@ -33,7 +33,7 @@ int entry(int argc, char **argv) { std::cout << err.what() << '\n'; std::cout << arser; - return 0; + return 255; } std::string tflite_path = arser.get("tflite"); diff --git a/compiler/tfldump/src/OpPrinter.cpp b/compiler/tfldump/src/OpPrinter.cpp index 9fc1a64..df027c3 100644 --- a/compiler/tfldump/src/OpPrinter.cpp +++ b/compiler/tfldump/src/OpPrinter.cpp @@ -676,6 +676,7 @@ OpPrinterRegistry::OpPrinterRegistry() _op_map[tflite::BuiltinOperator_MAX_POOL_2D] = make_unique(); _op_map[tflite::BuiltinOperator_MIRROR_PAD] = make_unique(); _op_map[tflite::BuiltinOperator_MUL] = make_unique(); + // There is no Option for NON_MAX_SUPPRESSION_V4 _op_map[tflite::BuiltinOperator_ONE_HOT] = make_unique(); _op_map[tflite::BuiltinOperator_PACK] = make_unique(); // There is no Option for PAD diff --git a/compiler/tflite2circle/CMakeLists.txt b/compiler/tflite2circle/CMakeLists.txt index a0a2e02..b1d1f61 100644 --- a/compiler/tflite2circle/CMakeLists.txt +++ b/compiler/tflite2circle/CMakeLists.txt @@ -14,5 +14,6 @@ target_link_libraries(tflite2circle arser) target_link_libraries(tflite2circle safemain) target_link_libraries(tflite2circle mio_tflite) target_link_libraries(tflite2circle mio_circle) +target_link_libraries(tflite2circle vconone) install(TARGETS tflite2circle DESTINATION bin) diff --git a/compiler/tflite2circle/driver/Driver.cpp b/compiler/tflite2circle/driver/Driver.cpp index 67b8e33..2f11e0a 100644 --- a/compiler/tflite2circle/driver/Driver.cpp +++ b/compiler/tflite2circle/driver/Driver.cpp @@ -24,10 +24,25 @@ #include "CircleModel.h" #include "TFLModel.h" +#include + +void print_version(void) +{ + std::cout << "tflite2circle version " << vconone::get_string() << std::endl; + std::cout << vconone::get_copyright() << std::endl; +} + int entry(int argc, char **argv) { arser::Arser arser{"tflite2circle is a Tensorflow lite to circle model converter"}; + arser.add_argument("--version") + .nargs(0) + .required(false) + .default_value(false) + .help("Show version information and exit") + .exit_with(print_version); + arser.add_argument("tflite") .nargs(1) .type(arser::DataType::STR) @@ -42,7 +57,7 @@ int entry(int argc, char **argv) { std::cout << err.what() << std::endl; std::cout << arser; - return 0; + return 255; } std::string tfl_path = arser.get("tflite"); diff --git a/compiler/tflite2circle/requires.cmake b/compiler/tflite2circle/requires.cmake index ff19b74..837c287 100644 --- a/compiler/tflite2circle/requires.cmake +++ b/compiler/tflite2circle/requires.cmake @@ -2,3 +2,4 @@ require("arser") require("mio-tflite") require("mio-circle") require("safemain") +require("vconone") diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions.h b/compiler/tflite2circle/src/BuildBuiltinOptions.h index 159a8af..00b3de9 100644 --- a/compiler/tflite2circle/src/BuildBuiltinOptions.h +++ b/compiler/tflite2circle/src/BuildBuiltinOptions.h @@ -62,10 +62,12 @@ #include "BuildBuiltinOptions/MirrorPadOptions.h" #include "BuildBuiltinOptions/MulOptions.h" #include "BuildBuiltinOptions/NegOptions.h" +#include "BuildBuiltinOptions/NonMaxSuppressionV4Options.h" #include "BuildBuiltinOptions/NotEqualOptions.h" #include "BuildBuiltinOptions/OneHotOptions.h" #include "BuildBuiltinOptions/PackOptions.h" #include "BuildBuiltinOptions/PadOptions.h" +#include "BuildBuiltinOptions/PadV2Options.h" #include "BuildBuiltinOptions/RangeOptions.h" #include "BuildBuiltinOptions/Pool2DOptions.h" #include "BuildBuiltinOptions/PowOptions.h" diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/NonMaxSuppressionV4Options.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/NonMaxSuppressionV4Options.cpp new file mode 100644 index 0000000..1a39f50 --- /dev/null +++ b/compiler/tflite2circle/src/BuildBuiltinOptions/NonMaxSuppressionV4Options.cpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "NonMaxSuppressionV4Options.h" + +namespace tflite2circle +{ + +flatbuffers::Offset +build_circle_NonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &fb, + const tflite::Operator *) +{ + circle::NonMaxSuppressionV4OptionsBuilder builtin_options_builder{fb}; + return builtin_options_builder.Finish(); +} + +} // namespace tflite2circle diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/NonMaxSuppressionV4Options.h b/compiler/tflite2circle/src/BuildBuiltinOptions/NonMaxSuppressionV4Options.h new file mode 100644 index 0000000..6073142 --- /dev/null +++ b/compiler/tflite2circle/src/BuildBuiltinOptions/NonMaxSuppressionV4Options.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __BBO_NON_MAX_SUPPRESSION_V4_OPTIONS_H__ +#define __BBO_NON_MAX_SUPPRESSION_V4_OPTIONS_H__ + +#include +#include + +namespace tflite2circle +{ + +flatbuffers::Offset +build_circle_NonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &fb, + const tflite::Operator *op); + +} // namespace tflite2circle + +#endif // __BBO_NON_MAX_SUPPRESSION_V4_OPTIONS_H__ diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/PadV2Options.cpp b/compiler/tflite2circle/src/BuildBuiltinOptions/PadV2Options.cpp new file mode 100644 index 0000000..6636634 --- /dev/null +++ b/compiler/tflite2circle/src/BuildBuiltinOptions/PadV2Options.cpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "PadV2Options.h" + +namespace tflite2circle +{ + +flatbuffers::Offset +build_circle_PadV2Options(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op) +{ + circle::PadV2OptionsBuilder builtin_options_builder{fb}; + return builtin_options_builder.Finish(); +} + +} // namespace tflite2circle diff --git a/compiler/tflite2circle/src/BuildBuiltinOptions/PadV2Options.h b/compiler/tflite2circle/src/BuildBuiltinOptions/PadV2Options.h new file mode 100644 index 0000000..36a2c82 --- /dev/null +++ b/compiler/tflite2circle/src/BuildBuiltinOptions/PadV2Options.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __BBO_PADV2_OPTIONS_H__ +#define __BBO_PADV2_OPTIONS_H__ + +#include +#include + +namespace tflite2circle +{ + +flatbuffers::Offset +build_circle_PadV2Options(flatbuffers::FlatBufferBuilder &fb, const tflite::Operator *op); + +} // namespace tflite2circle + +#endif // __BBO_PADV2_OPTIONS_H__ diff --git a/compiler/tflite2circle/src/TFLBuiltinOptions.lst b/compiler/tflite2circle/src/TFLBuiltinOptions.lst index 3ef9f15..a2a1453 100644 --- a/compiler/tflite2circle/src/TFLBuiltinOptions.lst +++ b/compiler/tflite2circle/src/TFLBuiltinOptions.lst @@ -26,6 +26,7 @@ TFL_BUILTIN_OPTIONS(SpaceToDepthOptions) //TFL_BUILTIN_OPTIONS(EmbeddingLookupSparseOptions) TFL_BUILTIN_OPTIONS(MulOptions) TFL_BUILTIN_OPTIONS(PadOptions) +TFL_BUILTIN_OPTIONS(PadV2Options) TFL_BUILTIN_OPTIONS(GatherOptions) TFL_BUILTIN_OPTIONS(BatchToSpaceNDOptions) TFL_BUILTIN_OPTIONS(SpaceToBatchNDOptions) @@ -99,7 +100,7 @@ TFL_BUILTIN_OPTIONS(MatrixSetDiagOptions) TFL_BUILTIN_OPTIONS(IfOptions) TFL_BUILTIN_OPTIONS(WhileOptions) TFL_BUILTIN_OPTIONS(DepthToSpaceOptions) -//TFL_BUILTIN_OPTIONS(NonMaxSuppressionV4Options) +TFL_BUILTIN_OPTIONS(NonMaxSuppressionV4Options) //TFL_BUILTIN_OPTIONS(NonMaxSuppressionV5Options) TFL_BUILTIN_OPTIONS(RankOptions) TFL_BUILTIN_OPTIONS(ScatterNdOptions) diff --git a/compiler/vconone/CMakeLists.txt b/compiler/vconone/CMakeLists.txt new file mode 100644 index 0000000..b8cb793 --- /dev/null +++ b/compiler/vconone/CMakeLists.txt @@ -0,0 +1,31 @@ +if (NOT VCONONE_VERSION) + set(VCONONE_VERSION 0x0000000000080001) + # NOTE order is [build patch minor major] + # if VCONONE_VERSION is set with -D option, it will be cached + # you may have to remove cache file if you remove -D option +endif() + +configure_file(version_cfg.h.in version_cfg.h @ONLY) + +set(DRIVER "driver/driver.cpp") + +file(GLOB_RECURSE SOURCES "src/*.cpp") +file(GLOB_RECURSE TESTS "src/*.test.cpp") +list(REMOVE_ITEM SOURCES ${TESTS}) + +add_library(vconone STATIC ${SOURCES}) +target_include_directories(vconone PUBLIC include) +target_include_directories(vconone PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) + +add_executable(one-version ${DRIVER}) +target_link_libraries(one-version vconone) +install(TARGETS one-version DESTINATION bin) + +if(NOT ENABLE_TEST) + return() +endif(NOT ENABLE_TEST) + +nnas_find_package(GTest REQUIRED) + +GTest_AddTest(vconone_test ${TESTS}) +target_link_libraries(vconone_test vconone) diff --git a/compiler/vconone/README.md b/compiler/vconone/README.md new file mode 100644 index 0000000..c08dd63 --- /dev/null +++ b/compiler/vconone/README.md @@ -0,0 +1,14 @@ +# vconone + +_vconone_ provides version number and strings for one-* commands and command +line tools + +# Revise version number + +To revise version number, update `VCONONE_VERSION` in `CmakeLists.txt` +or give `-DVCONONE_VERSION=0x0000000100080001` at cmake configure step. + +Number given is four numbers `build`, `patch`, `minor` and `major` in order for +each 16bit integers. `build` is not used for now. + +`0x0000000100080001` version is interpretered as `1.8.1` diff --git a/compiler/vconone/driver/driver.cpp b/compiler/vconone/driver/driver.cpp new file mode 100644 index 0000000..12bd0ee --- /dev/null +++ b/compiler/vconone/driver/driver.cpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include + +int main(int argc, char *argv[]) +{ + auto str = vconone::get_string(); + if (argc >= 2) + { + for (int c = 1; c < argc; ++c) + std::cout << argv[c] << " "; + std::cout << "version " << str << std::endl; + std::cout << vconone::get_copyright() << std::endl; + } + else + std::cout << str; + + return 0; +} diff --git a/compiler/vconone/include/vconone/vconone.h b/compiler/vconone/include/vconone/vconone.h new file mode 100644 index 0000000..a6a1998 --- /dev/null +++ b/compiler/vconone/include/vconone/vconone.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __VCON_ONE_H__ +#define __VCON_ONE_H__ + +#include +#include + +namespace vconone +{ + +struct four +{ + uint16_t major; + uint16_t minor; + uint16_t patch; + uint16_t build; // build is not used for now +}; + +union version { + uint64_t v; + four f; +}; + +/** + * @brief get_number will return version union structure + */ +version get_number(void); + +/** + * @brief get_string will return string of major.minor.patch (without build) + */ +std::string get_string(void); + +/** + * @brief get_string4 will return string of major.minor.patch.build + */ +std::string get_string4(void); + +/** + * @brief get_copyright will return copyright string + */ +std::string get_copyright(void); + +} // namespace vconone + +#endif // __VCON_ONE_H__ diff --git a/compiler/vconone/src/version.cpp b/compiler/vconone/src/version.cpp new file mode 100644 index 0000000..9b693c6 --- /dev/null +++ b/compiler/vconone/src/version.cpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "vconone/vconone.h" + +#include "version_cfg.h" + +#include + +namespace vconone +{ + +version get_number(void) +{ + version v; + v.v = VCONONE_VERSION; + return v; +} + +std::string get_string4(void) +{ + std::ostringstream ss; + + auto v = get_number(); + ss << unsigned(v.f.major) << "." << unsigned(v.f.minor) << "." << unsigned(v.f.patch) << "." + << unsigned(v.f.build); + + return ss.str(); +} + +std::string get_string(void) +{ + std::ostringstream ss; + + auto v = get_number(); + ss << unsigned(v.f.major) << "." << unsigned(v.f.minor) << "." << unsigned(v.f.patch); + + return ss.str(); +} + +std::string get_copyright(void) +{ + std::string str; + str = "Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved\r\n"; + str += "Licensed under the Apache License, Version 2.0\r\n"; + str += "https://github.com/Samsung/ONE"; + return str; +} + +} // namespace vconone diff --git a/compiler/vconone/src/version.test.cpp b/compiler/vconone/src/version.test.cpp new file mode 100644 index 0000000..35a0647 --- /dev/null +++ b/compiler/vconone/src/version.test.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +TEST(vconone, version_number) +{ + auto v = vconone::get_number(); + + ASSERT_NE(0x0000000000000000ULL, v.v); +} + +TEST(vconone, version_string) +{ + auto str = vconone::get_string(); + + ASSERT_NE("..", str); + ASSERT_NE("", str); +} + +TEST(vconone, version_string4) +{ + auto str = vconone::get_string4(); + + ASSERT_NE("...", str); + ASSERT_NE("", str); +} + +TEST(vconone, copyright) +{ + auto str = vconone::get_copyright(); + + ASSERT_NE("", str); +} diff --git a/compiler/vconone/version_cfg.h.in b/compiler/vconone/version_cfg.h.in new file mode 100644 index 0000000..aa3ad9e --- /dev/null +++ b/compiler/vconone/version_cfg.h.in @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __VCON_ONE_VERSION_CFG_H__ +#define __VCON_ONE_VERSION_CFG_H__ + +#define VCONONE_VERSION @VCONONE_VERSION@ULL + +#endif // __VCON_ONE_VERSION_CFG_H__ diff --git a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLArgOperationKernel.h b/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLArgOperationKernel.h deleted file mode 100644 index 9699b5c..0000000 --- a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLArgOperationKernel.h +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/** - * @file CLArgOperationKernel.h - * @brief This file defines CLArgOperationKernel - * @ingroup COM_AI_RUNTIME - */ - -#ifndef __ARM_COMPUTE_CLARGOPERATIONKERNEL_H__ -#define __ARM_COMPUTE_CLARGOPERATIONKERNEL_H__ - -#include "arm_compute/core/CL/ICLKernel.h" -#include "arm_compute/core/TypesEx.h" - -namespace arm_compute -{ -class ICLTensor; - -/** - * @brief Class to define interface for the argop kernel. - */ -class CLArgOperationKernel : public ICLKernel -{ -public: - /** - * @brief Default constructor. - */ - CLArgOperationKernel(); - /** - * @brief Prevent instances of this class from being copied (As this class contains pointers). - * @param [in] copiedInstance Const reference of CLArgOperationKernel to be copied - */ - CLArgOperationKernel(const CLArgOperationKernel &) = delete; - /** - * @brief Prevent instances of this class from being copied (As this class contains pointers). - * @param [in] copiedInstance Const reference of CLArgOperationKernel to be copied - * @return Reference of this instance - */ - CLArgOperationKernel &operator=(const CLArgOperationKernel &) = delete; - /** - * @brief Allow instances of this class to be moved - * @param [in] movedInstance Rvalue reference of CLArgOperationKernel to be moved - */ - CLArgOperationKernel(CLArgOperationKernel &&) = default; - /** - * @brief Allow instances of this class to be moved - * @param [in] movedInstance Rvalue reference of CLArgOperationKernel to be moved - * @return Reference of this instance - */ - CLArgOperationKernel &operator=(CLArgOperationKernel &&) = default; - /** - * @brief Initialise the kernel's input, output and border mode. - * @param[in] input An input tensor. Data types supported: U8/QASYMM8/S32/F32. - * @param[out] output The output tensor, Data types supported: S32. - * @param[in] axis Axis along which to reduce. It must be sorted and no duplicates. - * @param[in] op Arg operation to perform. - * return N/A - */ - void configure(const ICLTensor *input, ICLTensor *output, const uint32_t axis, ArgOperation op); - /** - * @brief Static function to check if given info will lead to a valid configuration of @ref - * CLArgOperationKernel - * @param[in] input An input tensor info. Data types supported: U8/QASYMM8/S32/F32. - * @param[in] output The output tensor info, Data types supported: S32. - * @param[in] axis Axis along which to reduce. It must be sorted and no duplicates. - * @param[in] op Arg operation to perform. - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, const uint32_t axis, - ArgOperation op); - - /* - * @brief Run CLArgOperationKernel op - * @param[in] window Window to be used for in_slice - * @param[in] queue cl::CommandQueue - * @return N/A - */ - void run(const Window &window, cl::CommandQueue &queue) override; - -private: - const ICLTensor *_input; - ICLTensor *_output; - uint32_t _axis; -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CLARGOPERATIONKERNEL_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLCastKernel.h b/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLCastKernel.h deleted file mode 100644 index b0357fe..0000000 --- a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLCastKernel.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/** - * @file CLCastKernel.h - * @ingroup COM_AI_RUNTIME - * @brief This file defines CLCastKernel class - */ - -#ifndef __ARM_COMPUTE_CLCASTKERNEL_H__ -#define __ARM_COMPUTE_CLCASTKERNEL_H__ - -#include "arm_compute/core/CL/ICLKernel.h" -#include "arm_compute/core/TypesEx.h" - -namespace arm_compute -{ -class ICLTensor; - -/** - * @brief Class to define OpenCL kernel for cast operation - */ -class CLCastKernel : public ICLKernel -{ -public: - /** - * @brief Construct CLCastKernel object - */ - CLCastKernel(); - - /** - * @brief Prevent instances of this class from being copied (As this class contains pointers) - */ - CLCastKernel(const CLCastKernel &) = delete; - - /** - * @brief Prevent instances of this class from being copied (As this class contains pointers) - */ - CLCastKernel &operator=(const CLCastKernel &) = delete; - - /** - * @brief Construct CLCastKernel object using default move constructor - * @param[in] CLCastKernel object to move - */ - CLCastKernel(CLCastKernel &&) = default; - - /** - * @brief Allow instances of this class to be moved - * @param[in] CLCastKernel object to move - */ - CLCastKernel &operator=(CLCastKernel &&) = default; - - /** - * @brief Destruct this CLCastKernel object - */ - ~CLCastKernel() = default; - - /** - * @brief Initialise the kernel's input and output. - * @param[in] input Input tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - * @param[in] output Output tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - * @param[in] input_subtype Sub data type of input. - * @return N/A - */ - void configure(const ICLTensor *input, ICLTensor *output, SubDataType input_subtype); - - /** - * @brief Enqueue the OpenCL kernel to process the given window on the passed OpenCL command - * queue. - * @note The queue is *not* flushed by this method, and therefore the kernel will not have - * been executed by the time this method returns. - * @param[in] window Region on which to execute the kernel. (Must be a valid region of - * the window returned by window()). - * @param[in,out] queue Command queue on which to enqueue the kernel.@return N/A - * @return N/A - */ - void run(const Window &window, cl::CommandQueue &queue) override; - -private: - const ICLTensor *_input; /**< Source tensor */ - ICLTensor *_output; /**< Destination tensor */ -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_CLCASTKERNEL_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLDepthToSpaceKernel.h b/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLDepthToSpaceKernel.h deleted file mode 100644 index 8615cf1..0000000 --- a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLDepthToSpaceKernel.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLDEPTHTOSPACEKERNEL_H__ -#define __ARM_COMPUTE_CLDEPTHTOSPACEKERNEL_H__ - -#include "arm_compute/core/CL/ICLKernel.h" - -namespace arm_compute -{ -class ICLTensor; - -/** OpenCL kernel to perform depthTospace operation */ -class CLDepthToSpaceKernel : public ICLKernel -{ -public: - /** Default constructor */ - CLDepthToSpaceKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLDepthToSpaceKernel(const CLDepthToSpaceKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLDepthToSpaceKernel &operator=(const CLDepthToSpaceKernel &) = delete; - /** Allow instances of this class to be moved */ - CLDepthToSpaceKernel(CLDepthToSpaceKernel &&) = default; - /** Allow instances of this class to be moved */ - CLDepthToSpaceKernel &operator=(CLDepthToSpaceKernel &&) = default; - /** Default destructor */ - ~CLDepthToSpaceKernel() = default; - /** Initialise the kernel's input and output. - * - * @param[in] input Input tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - * @param[in] output Output tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - */ - void configure(const ICLTensor *input, ICLTensor *output, const int32_t block_size); - - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - -private: - const ICLTensor *_input; /**< Source tensor */ - ICLTensor *_output; /**< Destination tensor */ -}; - -} // namespace arm_compute -#endif /* __ARM_COMPUTE_CLDEPTHTOSPACEKERNEL_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernelEx.h b/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernelEx.h deleted file mode 100644 index 9321c36..0000000 --- a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernelEx.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYKERNELEX_H__ -#define __ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYKERNELEX_H__ - -#include "arm_compute/core/CL/ICLKernel.h" - -namespace arm_compute -{ -class ICLTensor; - -/** OpenCL kernel to multiply matrices - * - * @note This kernel should be used ONLY for Midgard architectures - * - * This kernel performs the following computation: - * - * -# Convert a values from int8 to int32 - * -# Convert b values from int8 to int32 - * -# Compute the int32 matrix product of the resulting a * b and store the result as int32 - * - */ -class CLGEMMLowpMatrixMultiplyKernelEx : public ICLKernel -{ -public: - /** Default Constructor */ - CLGEMMLowpMatrixMultiplyKernelEx(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLGEMMLowpMatrixMultiplyKernelEx(const CLGEMMLowpMatrixMultiplyKernelEx &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLGEMMLowpMatrixMultiplyKernelEx &operator=(const CLGEMMLowpMatrixMultiplyKernelEx &) = delete; - /** Allow instances of this class to be moved */ - CLGEMMLowpMatrixMultiplyKernelEx(CLGEMMLowpMatrixMultiplyKernelEx &&) = default; - /** Allow instances of this class to be moved */ - CLGEMMLowpMatrixMultiplyKernelEx &operator=(CLGEMMLowpMatrixMultiplyKernelEx &&) = default; - /** Initialise the kernel's input and output. - * - * @note This kernel should be used ONLY for Midgard architectures - * - * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: QASYMM8 - * @param[in] input1 Input tensor containing the RHS matrix. Data type supported: same as @p - * input0 - * @param[out] output Output tensor to store the result of matrix multiplication. Data type - * supported: S32 - * @param[in] gemm_info (Optional) GEMM information used to retrieve the original dimensions of - * the input matrices - */ - void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, - const GEMMReshapeInfo &gemm_info = GEMMReshapeInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref - * CLGEMMLowpMatrixMultiplyKernelEx - * - * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: QASYMM8 - * @param[in] input1 Input tensor containing the RHS matrix. Data type supported: same as @p - * input0 - * @param[in] output Output tensor to store the result of matrix multiplication. Data type - * supported: S32 - * @param[in] gemm_info (Optional) GEMM information used to retrieve the original dimensions of - * the input matrices - * - * @return a status - */ - static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, - const ITensorInfo *output, - const GEMMReshapeInfo &gemm_info = GEMMReshapeInfo()); - - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - -private: - const ICLTensor *_input0; - const ICLTensor *_input1; - ICLTensor *_output; - bool _slide_matrix_b; - bool _reinterpret_input_as_3d; - bool _reinterpret_output_as_3d; -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYKERNELEX_H__*/ diff --git a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLPReLUKernel.h b/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLPReLUKernel.h deleted file mode 100644 index dd2dbf6..0000000 --- a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLPReLUKernel.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLPRELU_KERNEL_H__ -#define __ARM_COMPUTE_CLPRELU_KERNEL_H__ - -#include "arm_compute/core/CL/ICLKernel.h" - -namespace arm_compute -{ -class ICLTensor; - -/** OpenCL kernel to calculate PReLU*/ -class CLPReLUKernel : public ICLKernel -{ -public: - /** Default constructor */ - CLPReLUKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers). */ - CLPReLUKernel(const CLPReLUKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers). */ - CLPReLUKernel &operator=(const CLPReLUKernel &) = delete; - /** Allow instances of this class to be moved */ - CLPReLUKernel(CLPReLUKernel &&) = default; - /** Allow instances of this class to be moved */ - CLPReLUKernel &operator=(CLPReLUKernel &&) = default; - /** Initialize the kernel's input, output. - * - * @param[in] input Source tensor1. - * @param[in] alpha Source tensor2. - * @param[out] output Output tensor. - */ - void configure(const ICLTensor *input, const ICLTensor *alpha, ICLTensor *output); - - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - - BorderSize border_size() const override; - -private: - const ICLTensor *_input; - const ICLTensor *_alpha; - ICLTensor *_output; -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CLPRELU_KERNEL_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLSpaceToDepthKernel.h b/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLSpaceToDepthKernel.h deleted file mode 100644 index 4c0a82c..0000000 --- a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLSpaceToDepthKernel.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLSPACETODEPTHKERNEL_H__ -#define __ARM_COMPUTE_CLSPACETODEPTHKERNEL_H__ - -#include "arm_compute/core/CL/ICLKernel.h" - -namespace arm_compute -{ -class ICLTensor; - -/** OpenCL kernel to perform spaceTodepth operation */ -class CLSpaceToDepthKernel : public ICLKernel -{ -public: - /** Default constructor */ - CLSpaceToDepthKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLSpaceToDepthKernel(const CLSpaceToDepthKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLSpaceToDepthKernel &operator=(const CLSpaceToDepthKernel &) = delete; - /** Allow instances of this class to be moved */ - CLSpaceToDepthKernel(CLSpaceToDepthKernel &&) = default; - /** Allow instances of this class to be moved */ - CLSpaceToDepthKernel &operator=(CLSpaceToDepthKernel &&) = default; - /** Default destructor */ - ~CLSpaceToDepthKernel() = default; - /** Initialise the kernel's input and output. - * - * @param[in] input Input tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - * @param[in] output Output tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - */ - void configure(const ICLTensor *input, ICLTensor *output, const int32_t block_size); - - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - -private: - const ICLTensor *_input; /**< Source tensor */ - ICLTensor *_output; /**< Destination tensor */ -}; - -} // namespace arm_compute -#endif /* __ARM_COMPUTE_CLSPACETODEPTHKERNEL_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLTransposeConvLayerUpsampleKernel.h b/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLTransposeConvLayerUpsampleKernel.h deleted file mode 100644 index 9d174de..0000000 --- a/compute/ARMComputeEx/arm_compute/core/CL/kernels/CLTransposeConvLayerUpsampleKernel.h +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLTRANSPOSECONVLAYERUPSAMPLEKERNEL_H__ -#define __ARM_COMPUTE_CLTRANSPOSECONVLAYERUPSAMPLEKERNEL_H__ - -#include "arm_compute/core/CL/ICLKernel.h" - -namespace arm_compute -{ -class ICLTensor; - -/** Interface for the Upsampling layer kernel for transpose convolution on OpenCL. - */ -class CLTransposeConvLayerUpsampleKernel : public ICLKernel -{ -public: - /** Constructor */ - CLTransposeConvLayerUpsampleKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLTransposeConvLayerUpsampleKernel(const CLTransposeConvLayerUpsampleKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLTransposeConvLayerUpsampleKernel & - operator=(const CLTransposeConvLayerUpsampleKernel &) = delete; - /** Default Move Constructor. */ - CLTransposeConvLayerUpsampleKernel(CLTransposeConvLayerUpsampleKernel &&) = default; - /** Default move assignment operator */ - CLTransposeConvLayerUpsampleKernel &operator=(CLTransposeConvLayerUpsampleKernel &&) = default; - /** Default destructor */ - ~CLTransposeConvLayerUpsampleKernel() = default; - - /** Initialise the kernel's input and output. - * - * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32. - * @param[out] output Destination tensor. Data types supported: same as @p input. All but - * the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only - * performed within the XY-plane. - * @param[in] inner_border Top and right inner border sizes. These rows and columns will be - * filled with zero. - * @param[in] info Contains padding and stride information described in @ref - * PadStrideInfo. - */ - void configure(const ICLTensor *input, ICLTensor *output, const BorderSize &inner_border, - const PadStrideInfo &info); - /** Static function to check if given info will lead to a valid configuration of @ref - * CLTransposeConvLayerUpsample - * - * @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32. - * @param[in] output Destination tensor info. Data types supported: same as @p input. All - * but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is - * only performed within the XY-plane. - * @param[in] inner_border Top and right inner border sizes. These rows and columns will be filled - * with zero. - * @param[in] info Contains padding and stride information described in @ref - * PadStrideInfo. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, - const BorderSize &inner_border, const PadStrideInfo &info); - - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - -private: - const ICLTensor *_input; - ICLTensor *_output; - BorderSize _inner_border; - PadStrideInfo _info; -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CLTRANSPOSECONVLAYERUPSAMPLEKERNEL_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/core/CPP/kernels/CPPUpsampleKernelEx.h b/compute/ARMComputeEx/arm_compute/core/CPP/kernels/CPPUpsampleKernelEx.h deleted file mode 100644 index d4c9c61..0000000 --- a/compute/ARMComputeEx/arm_compute/core/CPP/kernels/CPPUpsampleKernelEx.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CPPUPSAMPLEKERNEL_EX_H__ -#define __ARM_COMPUTE_CPPUPSAMPLEKERNEL_EX_H__ - -#include "arm_compute/core/CPP/ICPPKernel.h" - -namespace arm_compute -{ -class ITensor; - -/** CPP kernel to perform tensor upsample. - * - */ -class CPPUpsampleKernelEx : public ICPPKernel -{ -public: - const char *name() const override { return "CPPUpsampleKernelEx"; } - /** Default constructor */ - CPPUpsampleKernelEx(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CPPUpsampleKernelEx(const CPPUpsampleKernelEx &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CPPUpsampleKernelEx &operator=(const CPPUpsampleKernelEx &) = delete; - /** Allow instances of this class to be moved */ - CPPUpsampleKernelEx(CPPUpsampleKernelEx &&) = default; - /** Allow instances of this class to be moved */ - CPPUpsampleKernelEx &operator=(CPPUpsampleKernelEx &&) = default; - /** Default destructor */ - ~CPPUpsampleKernelEx() = default; - - /** Set the input and output of the kernel. - * - * @param[in] input The input tensor to upsample. Data types supported: F32/F16/QASYMM8 - * @param[out] output The output tensor. Data types supported: Same as @p input - * @param[in] info Padding info. - */ - void configure(const ITensor *input, ITensor *output, const PadStrideInfo &info); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - bool is_parallelisable() const override; - -private: - const ITensor *_input; - ITensor *_output; - PadStrideInfo _info; -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CPPUPSAMPLEKERNEL_EX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NECastKernel.h b/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NECastKernel.h deleted file mode 100644 index 4e9f097..0000000 --- a/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NECastKernel.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NECASTKERNEL_H__ -#define __ARM_COMPUTE_NECASTKERNEL_H__ - -#include "arm_compute/core/NEON/INEKernel.h" -#include "arm_compute/core/TypesEx.h" - -namespace arm_compute -{ -class ITensor; - -/** Interface for the cast layer kernel. */ -class NECastKernel : public INEKernel -{ -public: - const char *name() const override { return "NECastKernel"; } - /** Default constructor */ - NECastKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NECastKernel(const NECastKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NECastKernel &operator=(const NECastKernel &) = delete; - /** Default Move Constructor. */ - NECastKernel(NECastKernel &&) = default; - /** Default move assignment operator */ - NECastKernel &operator=(NECastKernel &&) = default; - /** Default destructor */ - ~NECastKernel() = default; - /** Set input, output tensors. - * - * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U32/S32/F32. - * @param[out] output Destination tensor with the same dimensions of input. Data type supported: - * U8/S8/QASYMM8/U32/S32/F32. - * @param[in] input_subtype Sub data type of input. - */ - void configure(const ITensor *input, ITensor *output, SubDataType input_subtype); - /** Static function to check if given info will lead to a valid configuration of @ref NECastKernel - * - * @param[in] input Input tensor info. Data types supported: U8/S8/QASYMM8/U32/S32/F32. - * @param[in] output Output tensor info. Data types supported: U8/S8/QASYMM8/U32/S32/F32. - * @param[in] input_subtype Sub data type of input. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, - SubDataType input_subtype); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - -private: - const ITensor *_input; - ITensor *_output; - SubDataType _input_subtype; -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_NECASTKERNEL_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NEDepthToSpaceLayerKernelEx.h b/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NEDepthToSpaceLayerKernelEx.h deleted file mode 100644 index b62897e..0000000 --- a/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NEDepthToSpaceLayerKernelEx.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NEDEPTHTOSPACELAYERKERNELEX_H__ -#define __ARM_COMPUTE_NEDEPTHTOSPACELAYERKERNELEX_H__ - -#include "arm_compute/core/NEON/INEKernel.h" - -namespace arm_compute -{ -class ITensor; - -/** Interface for the depth to space kernel */ -class NEDepthToSpaceLayerKernelEx : public INEKernel -{ -public: - const char *name() const override { return "NEDepthToSpaceLayerKernelEx"; } - /** Default constructor */ - NEDepthToSpaceLayerKernelEx(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEDepthToSpaceLayerKernelEx(const NEDepthToSpaceLayerKernelEx &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEDepthToSpaceLayerKernelEx &operator=(const NEDepthToSpaceLayerKernelEx &) = delete; - /** Allow instances of this class to be moved */ - NEDepthToSpaceLayerKernelEx(NEDepthToSpaceLayerKernelEx &&) = default; - /** Allow instances of this class to be moved */ - NEDepthToSpaceLayerKernelEx &operator=(NEDepthToSpaceLayerKernelEx &&) = default; - /** Default destructor */ - ~NEDepthToSpaceLayerKernelEx() = default; - /** Initialise the kernel's inputs and output. - * - * @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: - * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. - * @param[out] output Tensor output. Data types supported: same as @p input - * @param[in] block_shape Block shape x value. - */ - void configure(const ITensor *input, ITensor *output, int32_t block_shape); - /** Static function to check if given info will lead to a valid configuration of @ref - * NEDepthToSpaceLayerKernelEx. - * - * @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: - * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. - * @param[in] output Tensor output info. Data types supported: same as @p input - * @param[in] block_shape Block shape value. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - -private: - const ITensor *_input; /**< Source tensor */ - ITensor *_output; /**< Destination tensor */ - int32_t _block_shape; /**< Block shape */ -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NEDEPTHTOSPACELAYERKERNELEX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernelEx.h b/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernelEx.h deleted file mode 100644 index 57de78d..0000000 --- a/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernelEx.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2018-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NEELEMENTWISEUNARYKERNELEX_H__ -#define __ARM_COMPUTE_NEELEMENTWISEUNARYKERNELEX_H__ - -#include "arm_compute/core/NEON/INEKernel.h" -#include "arm_compute/core/TypesEx.h" - -namespace arm_compute -{ -class ITensor; - -/** Interface for an element-wise unary operation kernel - * - * Element-wise operation is computed by: - * @f[ output(x) = OP(input(x))@f] - * - */ -class NEElementwiseUnaryKernelEx : public INEKernel -{ -public: - const char *name() const override { return "NEElementwiseUnaryKernelEx"; } - /** Default constructor */ - NEElementwiseUnaryKernelEx(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEElementwiseUnaryKernelEx(const NEElementwiseUnaryKernelEx &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEElementwiseUnaryKernelEx &operator=(const NEElementwiseUnaryKernelEx &) = delete; - /** Allow instances of this class to be moved */ - NEElementwiseUnaryKernelEx(NEElementwiseUnaryKernelEx &&) = default; - /** Allow instances of this class to be moved */ - NEElementwiseUnaryKernelEx &operator=(NEElementwiseUnaryKernelEx &&) = default; - /** Default destructor */ - ~NEElementwiseUnaryKernelEx() = default; - - /** Static function to check if given info will lead to a valid configuration of @ref - * NEElementwiseUnaryKernelEx - * - * @param[in] op Arithmetic operation to be executed. - * @param[in] input First tensor input. Data types supported: F16/F32/S32. - * @param[in] output Output tensor. Data types supported: Same as @p input. - */ - void configure(ElementWiseUnaryEx op, const ITensor *input, ITensor *output); - - /** Static function to check if given info will lead to a valid configuration of @ref - * NEElementwiseUnaryKernelEx - * - * @param[in] op Arithmetic operation to be executed. - * @param[in] input First tensor input info. Data types supported: F16/F32/S32. - * @param[in] output Output tensor info. Data types supported: Same as @p input. - * - * @return a Status - */ - static Status validate(ElementWiseUnaryEx op, const ITensorInfo *input, - const ITensorInfo *output); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - - /** Common signature for all the specialised arithmetic functions - * - * @param[in] input An input tensor. Data types supported: F16/F32/S32. - * @param[out] output The output tensor. Data types supported: Same as @p input. - * @param[in] window Region on which to execute the kernel. - */ - using ElementwiseUnaryFunction = void(const ITensor *input, ITensor *output, - const Window &window); - -protected: - // Inherited methods overridden: - static Status validate_arguments(const ITensorInfo &input, const ITensorInfo &output); - - /** Function to use for the particular tensor types passed to configure() */ - std::function _function; - - const ITensor *_input; - ITensor *_output; -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NEELEMENTWISEUNARYKERNELEX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NEPReLUKernel.h b/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NEPReLUKernel.h deleted file mode 100644 index 722efd3..0000000 --- a/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NEPReLUKernel.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NEPRELUKERNEL_H__ -#define __ARM_COMPUTE_NEPRELUKERNEL_H__ - -#include "arm_compute/core/NEON/INEKernel.h" - -namespace arm_compute -{ -class ITensor; - -/** Interface for the kernel to perform Parametric Rectified Linear Unit - * - * Result is computed by: - * @f[ output(x) = alpha * x for x < 0, output(x) = x for x >= 0 @f] - */ -class NEPReLUKernel : public INEKernel -{ -public: - const char *name() const override { return "NEPReLUKernel"; } - /** Default constructor */ - NEPReLUKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEPReLUKernel(const NEPReLUKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEPReLUKernel &operator=(const NEPReLUKernel &) = delete; - /** Allow instances of this class to be moved */ - NEPReLUKernel(NEPReLUKernel &&) = default; - /** Allow instances of this class to be moved */ - NEPReLUKernel &operator=(NEPReLUKernel &&) = default; - /** Initialise the kernel's inputs and output - * - * @param[in] input Input tensor. Data type supported: QASYMM8/F32 - * @param[in] alpha Alpha tensor. Data types supported: Same as @p input - * @param[out] output Output tensor. Data types supported: Same as @p input - */ - void configure(const ITensor *input, const ITensor *alpha, ITensor *output); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - - /** Static function to check if given info will lead to a valid configuration of @ref - * NEPReLUKernel.h - * - * @param[in] input Input tensor input info. Data types supported: QASYMM8/F32. - * @param[in] alpha Alpha tensor input info. Data types supported: Same as @p input. - * @param[in] output Output tensor info. Data types supported: Same as @p input. - * - * @return a Status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *alpha, - const ITensorInfo *output); - static Status validate_arguments(const ITensorInfo &input, const ITensorInfo &alpha, - const ITensorInfo &output); - -private: - const ITensor *_input; /**< Source tensor */ - const ITensor *_alpha; /**< Alpha tensor */ - ITensor *_output; /**< Destination tensor */ -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NEPRELUKERNEL_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernelEx.h b/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernelEx.h deleted file mode 100644 index 0ffcf6b..0000000 --- a/compute/ARMComputeEx/arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernelEx.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NESPACETODEPTHLAYERKERNELEX_H__ -#define __ARM_COMPUTE_NESPACETODEPTHLAYERKERNELEX_H__ - -#include "arm_compute/core/NEON/INEKernel.h" -#include "arm_compute/core/Types.h" - -namespace arm_compute -{ -class ITensor; - -/** Interface for the space to depth kernel */ -class NESpaceToDepthLayerKernelEx : public INEKernel -{ -public: - const char *name() const override { return "NESpaceToDepthLayerKernelEx"; } - /** Default constructor */ - NESpaceToDepthLayerKernelEx(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NESpaceToDepthLayerKernelEx(const NESpaceToDepthLayerKernelEx &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NESpaceToDepthLayerKernelEx &operator=(const NESpaceToDepthLayerKernelEx &) = delete; - /** Allow instances of this class to be moved */ - NESpaceToDepthLayerKernelEx(NESpaceToDepthLayerKernelEx &&) = default; - /** Allow instances of this class to be moved */ - NESpaceToDepthLayerKernelEx &operator=(NESpaceToDepthLayerKernelEx &&) = default; - /** Default destructor */ - ~NESpaceToDepthLayerKernelEx() = default; - /** Initialise the kernel's inputs and output. - * - * @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: - * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. - * @param[out] output Tensor output. Data types supported: same as @p input - * @param[in] block_shape Block shape value - */ - void configure(const ITensor *input, ITensor *output, int32_t block_shape); - /** Static function to check if given info will lead to a valid configuration of @ref - * NESpaceToDepthLayerKernelEx - * - * @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: - * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. - * @param[in] output Tensor output info. Data types supported: same as @p input - * @param[in] block_shape Block shape value - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - -private: - const ITensor *_input; /**< Source tensor */ - ITensor *_output; /**< Destination tensor */ - int32_t _block_shape; /**< Block shape */ -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NESPACETODEPTHLAYERKERNELEX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/CLFunctionsEx.h b/compute/ARMComputeEx/arm_compute/runtime/CL/CLFunctionsEx.h index 97bc4ce..cfbd134 100644 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/CLFunctionsEx.h +++ b/compute/ARMComputeEx/arm_compute/runtime/CL/CLFunctionsEx.h @@ -16,25 +16,14 @@ #ifndef __ARM_COMPUTE_CLFUNCTIONSEX_H__ #define __ARM_COMPUTE_CLFUNCTIONSEX_H__ -#include -#include #include -#include -#include #include #include #include #include #include -#include #include -#include -#include #include -#include -#include -#include -#include #include #include diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLArgOperation.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLArgOperation.h deleted file mode 100644 index c37096f..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLArgOperation.h +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/** - * @file CLArgOperation.h - * @ingroup COM_AI_RUNTIME - * @brief This file contains arm_compute::CLArgOperation class - */ - -#ifndef __ARM_COMPUTE_CLARGOPERATION_H__ -#define __ARM_COMPUTE_CLARGOPERATION_H__ - -#include "arm_compute/core/CL/kernels/CLArgOperationKernel.h" -#include "arm_compute/runtime/CL/CLTensor.h" -#include "arm_compute/runtime/IFunction.h" -#include "arm_compute/core/TypesEx.h" - -namespace arm_compute -{ -class ICLTensor; - -/** - * @brief Class to execute CLArgOperation operation - */ -class CLArgOperation : public IFunction -{ -public: - /** - * @brief Construct a new CLArgOperation object - */ - CLArgOperation(); - - /** - * @brief Prevent instances of this class from being copied (As this class contains pointers) - */ - CLArgOperation(const CLArgOperation &) = delete; - - /** - * @brief Prevent instances of this class from being copied (As this class contains pointers) - */ - CLArgOperation &operator=(const CLArgOperation &) = delete; - - /** - * @brief Construct a new CLArgOperation object by using copy constructor - * @param[in] CLArgOperation object to move - */ - CLArgOperation(CLArgOperation &&) = default; - - /** - * @brief Assign a CLArgOperation object. - * @param[in] CLArgOperation object to assign. This object will be moved. - */ - CLArgOperation &operator=(CLArgOperation &&) = default; - - /** - * @brief Initialise the kernel's inputs and outputs. - * @param[in] input Input tensor. Data types supported: U8/QASYMM8/S32/F32. - * @param[out] output The result of arg operation. Data types supported: S32. - * @param[in] axis Axis along which to reduce. It must be sorted and no duplicates. - * @param[in] op Arg operation to perform. - * @return N/A - */ - void configure(ICLTensor *input, ICLTensor *output, std::vector axis, ArgOperation op); - - /** - * @brief Static function to check if given info will lead to a valid configuration - * @param[in] input Input tensor. Data types supported: U8/QASYMM8/S32/F32. - * @param[in] axis Axis along which to reduce. It must be sorted and no duplicates. - * @param[out] output The result of arg operation. Data types supported: S32. - * @param[in] op Arg operation to perform. - * @return a status - */ - static Status validate(const ITensorInfo *input, const std::vector &axis, - const ITensorInfo *output, ArgOperation op); - /** - * @brief Run the OpenCL kernel for this operation - * @return N/A - */ - void run() override; - -private: - ICLTensor *_input{nullptr}; - ICLTensor *_output{nullptr}; - std::vector _axis{}; - ArgOperation _arg_op{ArgOperation::MAX}; - - std::unique_ptr _interm_tensors{nullptr}; - std::unique_ptr _argop_kernels{nullptr}; - size_t _num_of_kernels{0}; -}; -} -#endif /*__ARM_COMPUTE_CLARGOPERATION_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLBatchToSpaceND.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLBatchToSpaceND.h deleted file mode 100644 index eed5cb8..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLBatchToSpaceND.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLBATCH_TO_SPACE_ND_H__ -#define __ARM_COMPUTE_CLBATCH_TO_SPACE_ND_H__ - -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" - -namespace arm_compute -{ -class ICLTensor; - -/** Basic function to run @ref CLBatchToSpaceNDKernel - * - * @note The tensor data type for the inputs must be U8/QASYMM8/S16/S32/F16/F32. - * @note The function converts the input tensor to the tensor of the output tensor's type. - */ -class CLBatchToSpaceND : public ICLSimpleFunction -{ -public: - /** Initialise the kernel's input and output. - * - * @param[in] input Input tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - * @param[out] output Output tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - * @param[in] block_size A pointer to an array of integer values specifying block sizes - * for spatial dimension. - */ - void configure(ICLTensor *input, ICLTensor *output, const int32_t *block_size); -}; - -} // namespace arm_compute -#endif /* __ARM_COMPUTE_CLBATCH_TO_SPACE_ND_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLCast.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLCast.h deleted file mode 100644 index ebe0d8a..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLCast.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/** - * @file CLCast.h - * @ingroup COM_AI_RUNTIME - * @brief This file contains arm_compute::CLCast class - */ - -#ifndef __ARM_COMPUTE_CLCAST_H__ -#define __ARM_COMPUTE_CLCAST_H__ - -#include "arm_compute/core/TypesEx.h" -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" - -namespace arm_compute -{ -class ICLTensor; - -/** - * @brief Class to run @ref CLCastKernel. - * This converts the input tensor to the tensor of the output tensor's type. - */ -class CLCast : public ICLSimpleFunction -{ -public: - /** - * @brief Initialise the kernel's input and output - * @param[in, out] input Input tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - * The input tensor is [in, out] because its TensorInfo might be - * modified inside the kernel. - * @param[out] output Output tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - * @param[in] input_subtype Sub data type of input. - */ - void configure(ICLTensor *input, ICLTensor *output, SubDataType input_subtype); -}; -} -#endif /* __ARM_COMPUTE_CLCAST_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLDepthToSpace.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLDepthToSpace.h deleted file mode 100644 index d52a538..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLDepthToSpace.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLDEPTHTOSPACE_H__ -#define __ARM_COMPUTE_CLDEPTHTOSPACE_H__ - -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" - -namespace arm_compute -{ -class ICLTensor; - -/** Basic function to run @ref CLDepthToSpaceKernel - * - * @note The tensor data type for the inputs must be U8/QASYMM8/S16/S32/F16/F32. - * @note The function converts the input tensor to the tensor of the output tensor's type. - */ -class CLDepthToSpace : public ICLSimpleFunction -{ -public: - /** Initialise the kernel's input and output. - * - * @param[in] input Input tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - * @param[out] output Output tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - * @param[block_size] block size integer only - */ - void configure(ICLTensor *input, ICLTensor *output, const int32_t block_size); -}; -} // namesace arm_compute - -#endif /* __ARM_COMPUTE_CLDEPTHTOSPACE_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLDirectTransposeConvLayer.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLDirectTransposeConvLayer.h new file mode 100644 index 0000000..409eaf5 --- /dev/null +++ b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLDirectTransposeConvLayer.h @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Copyright (c) 2019-2020 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_CLDIRECTTRANSPOSECONVLAYER_H__ +#define __ARM_COMPUTE_CLDIRECTTRANSPOSECONVLAYER_H__ + +#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h" +#include "arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h" +#include "arm_compute/runtime/CL/functions/CLReverse.h" +#include "arm_compute/runtime/CL/functions/CLTranspose.h" + +#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/IFunction.h" +#include "arm_compute/runtime/IMemoryManager.h" +#include "arm_compute/runtime/MemoryGroup.h" + +#include + +namespace arm_compute +{ +class ICLTensor; +/** Function to run the deconvolution layer. + * + * Deconvolution Layer is the backward pass of Convolution Layer. First we transform the input + * depending on the stride and pad info and then perform a 1x1 + * convolution pass. Input stride defines how many zeroes we should put between each element of the + * input and pad is the amount of padding. + * + * The relation between input to output is as follows: + * \f[ + * width\_output = (width\_input - 1) \cdot stride\_x - 2 \cdot padding\_x + kernel\_x + * \f] + * \f[ + * height\_output = (height\_input - 1) \cdot stride\_y - 2 \cdot padding\_y + kernel\_y + * \f] + * + * where: + * width_input is the size of the first input dimension. + * height_input is the size of the second input dimension. + * width_output is the size of the first output dimension. + * height_output is the size of the second output dimension. + * kernel_x and kernel_y are the convolution sizes in x and y. + * stride_x and stride_y is the input stride of the first and second dimension. + * + * The weights used by Deconvolution are supposed to be the same as the ones used for Convolution. + * Therefore, it will be necessary to use the weights in the + * reverse order to perform an actual convolution. This is achieved by using @ref CLReverse. + * + * This function calls the following OpenCL kernels/functions: + * + * -# @ref CLDeconvolutionLayerUpsample + * -# @ref CLConvolutionLayer + * + * And the following CPP kernels: + * -# @ref CLReverse + * + */ +class CLDirectTransposeConvLayer : public IFunction +{ +public: + /** Constructor */ + CLDirectTransposeConvLayer(std::shared_ptr memory_manager = nullptr); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLDirectTransposeConvLayer(const CLDirectTransposeConvLayer &) = delete; + /** Default move constructor */ + CLDirectTransposeConvLayer(CLDirectTransposeConvLayer &&) = default; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLDirectTransposeConvLayer &operator=(const CLDirectTransposeConvLayer &) = delete; + /** Default move assignment operator */ + CLDirectTransposeConvLayer &operator=(CLDirectTransposeConvLayer &&) = default; + /** Set the input, weights, biases and output tensors. + * + * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an + * optional 4th dimension for batch of inputs. + * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. + * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type + * supported: Same as @p input. + * @param[in] bias (Optional) The biases have one dimension. + * Data type supported: Should match @p input data type, except for + * input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type + * @param[out] output Output tensor. The output has the same number of dimensions as the + * @p input. + * @param[in] info Contains padding and policies to be used in the deconvolution, this + * is decribed in @ref PadStrideInfo. + * @param[in] invalid_right The number of zeros added to right edge of the output. + * @param[in] invalid_bottom The number of zeros added to bottom edge of the output. + * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, + * specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel. + * + */ + void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, + const PadStrideInfo &info, unsigned int invalid_right, unsigned int invalid_bottom, + const WeightsInfo &weights_info = WeightsInfo()); + /** Set the input, weights, biases and output tensors. + * + * @param[in] compile_context The compile context to be used. + * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and + * an optional 4th dimension for batch of inputs. + * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. + * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data + * type supported: Same as @p input. + * @param[in] bias (Optional) The biases have one dimension. + * Data type supported: Should match @p input data type, except for + * input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type + * @param[out] output Output tensor. The output has the same number of dimensions as + * the @p input. + * @param[in] info Contains padding and policies to be used in the deconvolution, + * this is decribed in @ref PadStrideInfo. + * @param[in] invalid_right The number of zeros added to right edge of the output. + * @param[in] invalid_bottom The number of zeros added to bottom edge of the output. + * @param[in] weights_info (Optional) Weights information needed for @ref + * CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref + * CLWeightsReshapeKernel. + * + */ + void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *weights, + const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info, + unsigned int invalid_right, unsigned int invalid_bottom, + const WeightsInfo &weights_info = WeightsInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref + * CLDirectTransposeConvLayer + * + * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an + * optional 4th dimension for batch of inputs. + * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. + * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data + * type supported: Same as @p input. + * @param[in] bias (Optional) The biases have one dimension. + * Data type supported: Should match @p input data type, except for input + * of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type + * @param[in] output Output tensor info. The output has the same number of dimensions as the + * @p input. + * @param[in] info Contains padding and policies to be used in the deconvolution, this is + * decribed in @ref PadStrideInfo. + * @param[in] invalid_right The number of zeros added to right edge of the output. + * @param[in] invalid_bottom The number of zeros added to bottom edge of the output. + * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, + * specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *weights, + const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &info, + unsigned int invalid_right, unsigned int invalid_bottom, + const WeightsInfo &weights_info = WeightsInfo()); + + // Inherited methods overridden: + void run() override; + void prepare() override; + +private: + MemoryGroup _memory_group; + CLDeconvolutionLayerUpsample _scale_f; + CLConvolutionLayer _conv_f; + CLReverse _flip_weights; + + CLTensor _scaled_output; + ICLTensor *_original_weights; + CLTensor _weights_flipped; + CLTensor _flip_axis; + + bool _is_prepared; +}; +} // namespace arm_compute +#endif /* __ARM_COMPUTE_CLDIRECTTRANSPOSECONVLAYER_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLFullyConnectedHybridLayer.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLFullyConnectedHybridLayer.h index 1a0284a..f3266f6 100644 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLFullyConnectedHybridLayer.h +++ b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLFullyConnectedHybridLayer.h @@ -50,7 +50,7 @@ #include "arm_compute/core/CL/kernels/CLTransposeKernel.h" #include "arm_compute/runtime/MemoryGroup.h" #include "arm_compute/runtime/CL/CLTensor.h" -#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCoreEx.h" +#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h" namespace arm_compute { @@ -168,7 +168,7 @@ private: CLFullyConnectedHybridLayerReshapeWeights _reshape_weights_kernel; CLScaleFactorSymm8Kernel _scale_factor_kernel; CLQuantizationSymmetricKernel _quant_input_kernel; - CLGEMMLowpMatrixMultiplyCoreEx _mm_gemmlowp; + CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp; CLMultiplyScaleFactorKernel _multiply_scale_kernel; CLGEMMMatrixAccumulateBiasesKernel _accumulate_biases_kernel; // TODO(COMPMID-1889): Use CLGEMM to // add bias in diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCoreEx.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCoreEx.h deleted file mode 100644 index 68aba74..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCoreEx.h +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYCOREEX_H__ -#define __ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYCOREEX_H__ - -#include "arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h" -#include "arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernelEx.h" -#include "arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h" -#include "arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h" -#include "arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h" -#include "arm_compute/runtime/CL/CLTensor.h" -#include "arm_compute/runtime/IFunction.h" -#include "arm_compute/runtime/MemoryGroup.h" - -namespace arm_compute -{ -class IMemoryManager; -class ICLTensor; - -/** Basic function to execute GEMMLowpMatrixMultiplyCore on OpenCL. This function calls the - * following OpenCL kernels: - * - * -# @ref CLGEMMLowpMatrixMultiplyKernel (if the parameter "reshape_b_only_on_first_run" of - * GEMMInfo is FALSE) - * -# @ref CLGEMMLowpMatrixAReductionKernel (if the offset of matrix B is not 0) - * -# @ref CLGEMMLowpMatrixBReductionKernel (if the offset of matrix A is not 0) - * -*/ -class CLGEMMLowpMatrixMultiplyCoreEx : public IFunction -{ -public: - /** Constructor */ - CLGEMMLowpMatrixMultiplyCoreEx(std::shared_ptr memory_manager = nullptr); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLGEMMLowpMatrixMultiplyCoreEx(const CLGEMMLowpMatrixMultiplyCoreEx &) = delete; - /** Default move constructor */ - CLGEMMLowpMatrixMultiplyCoreEx(CLGEMMLowpMatrixMultiplyCoreEx &&) = default; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLGEMMLowpMatrixMultiplyCoreEx &operator=(const CLGEMMLowpMatrixMultiplyCoreEx &) = delete; - /** Default move assignment operator */ - CLGEMMLowpMatrixMultiplyCoreEx &operator=(CLGEMMLowpMatrixMultiplyCoreEx &&) = default; - /** Initialise the kernel's inputs, output - * - * @note GEMMLowp: low precision GEMM kernel. [A * B + C] - * This kernel performs the following computations: - * - * -# Convert a values from QASYMM8 to int32 and add a_offset to each of them. - * -# Convert b values from QASYMM8 to int32 and add b_offset to each of them. - * -# Compute the matrix product of the resulting a * b in int32. - * -# Quantize to uint8 if gemm_info.gemmlowp_output_stage != NONE - * - * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8. - * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a - * @param[in] c Third input tensor (Matrix C). It can be a nullptr. Data type supported: - * S32 - * @param[out] output Output tensor. Data type supported: S32 or QASYMM8 if - * gemm_info.gemmlowp_output_stage != NONE - * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped - * and - * if the reshape of matrix B should be executed only for the first run - */ - void configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, - const GEMMInfo &gemm_info = GEMMInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref - * CLGEMMLowpMatrixMultiplyCoreEx - * - * @param[in] a First input tensor info (Matrix A). Data type supported: QASYMM8. - * @param[in] b Second input tensor info (Matrix B). Data type supported: same as @p a - * @param[in] c Third input tensor info (Matrix C). It can be a nullptr. Data type - * supported: S32 - * @param[in] output Output tensor info. Data type supported: S32 or QASYMM8 if - * gemm_info.gemmlowp_output_stage != NONE - * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped - * and - * if the reshape of matrix B should be executed only for the first run - * - * @return a status - */ - static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, - const ITensorInfo *output, const GEMMInfo &gemm_info = GEMMInfo()); - - // Inherited methods overridden: - void run() override; - void prepare() override; - -private: - MemoryGroup _memory_group; - - // Kernels used - CLGEMMLowpMatrixMultiplyKernelEx _mm_midgard_kernel; - CLGEMMLowpMatrixAReductionKernel _mtx_a_reduction_kernel; - CLGEMMLowpMatrixBReductionKernel _mtx_b_reduction_kernel; - - // Temporary tensors - CLTensor _vector_sum_col; - CLTensor _vector_sum_row; - - int32_t _a_offset; - int32_t _b_offset; - bool _reshape_b_only_on_first_run; - bool _is_prepared; -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYCOREEX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLLogicalNot.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLLogicalNot.h deleted file mode 100644 index 5121671..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLLogicalNot.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLLOGICALNOT_H__ -#define __ARM_COMPUTE_CLLOGICALNOT_H__ - -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" - -namespace arm_compute -{ -class ICLTensor; - -class CLLogicalNot : public ICLSimpleFunction -{ -public: - /** Initialise the function's source and destination. - * - * @param[in] input Source tensor. Data types supported: QASYMM8. - * @param[out] output Output tensor. Data types supported: QASYMM8. - */ - void configure(ICLTensor *input, ICLTensor *output); -}; - -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CLLOGICALNOT_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLPReLU.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLPReLU.h deleted file mode 100644 index 7fbe558..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLPReLU.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLPRELU_H__ -#define __ARM_COMPUTE_CLPRELU_H__ - -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" - -namespace arm_compute -{ -class ICLTensor; - -class CLPReLU : public ICLSimpleFunction -{ -public: - /** Initialise the function's source and destination. - * - * @param[in] input. Data types supported: - * QASYMM8/F16/F32. - * @param[in] alpha. Data types supported: - * QASYMM8/F16/F32. - * @param[out] output Output tensor. Data types supported: Same as @p input. - */ - void configure(ICLTensor *input, ICLTensor *alpha, ICLTensor *output); -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CLPRELU_H__*/ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLPixelWiseDivision.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLPixelWiseDivision.h deleted file mode 100644 index e83fb01..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLPixelWiseDivision.h +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/** - * @file CLPixelWiseDivision.h - * @ingroup COM_AI_RUNTIME - * @brief This file contains arm_compute::CLPixelWiseDivision class - */ -#ifndef __ARM_COMPUTE_CLPIXELWISEDIVISION_H__ -#define __ARM_COMPUTE_CLPIXELWISEDIVISION_H__ - -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" - -namespace arm_compute -{ -class ICLTensor; - -/** - * @brief Class to run @ref CLPixelWiseDivisionKernel. - */ -class CLPixelWiseDivision : public ICLSimpleFunction -{ -public: - /** - * @brief Initialise the kernel's inputs, output and convertion policy. - * @param[in, out] input1 An input tensor. Data types supported: U8/S16/F16/F32 - * The input tensor is [in, out] because its TensorInfo might be - * modified inside the kernel in case of broadcasting of dimension 0. - * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. - * The input tensor is [in, out] because its TensorInfo might be - * modified inside the kernel in case of broadcasting of dimension 0. - * @param[out] output The output tensor, Data types supported: same as @p input1. - * Note: U8 requires both inputs to be U8. - * @param[in] scale Scale to apply after multiplication. - * Scale must be positive and its value must be either 1/255 or - * 1/2^n where n is between 0 and 15. - * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate - * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest - * even. - * @return N/A - */ - void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, float scale = 1.f, - ConvertPolicy overflow_policy = ConvertPolicy::WRAP, - RoundingPolicy rounding_policy = RoundingPolicy::TO_ZERO); - - /** - * @brief Static function to check if given info will lead to a valid configuration of @ref - * CLPixelWiseDivision - * @param[in] input1 An input tensor info. Data types supported: U8/S16/F16/F32 - * @param[in] input2 An input tensor info. Data types supported: same as @p input1. - * @param[in] output The output tensor info, Data types supported: same as @p input1. - * Note: U8 requires both inputs to be U8. - * @param[in] scale Scale to apply after multiplication. - * Scale must be positive and its value must be either 1/255 or 1/2^n - * where n is between 0 and 15. - * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate - * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even. - * @return a status - */ - static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, - const ITensorInfo *output, float scale = 1.f, - ConvertPolicy overflow_policy = ConvertPolicy::WRAP, - RoundingPolicy rounding_policy = RoundingPolicy::TO_ZERO); -}; -} -#endif /*__ARM_COMPUTE_CLPIXELWISEDIVISION_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLRNNLayerEx.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLRNNLayerEx.h deleted file mode 100644 index b49cbd8..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLRNNLayerEx.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLRNN_LAYER_EX_H__ -#define __ARM_COMPUTE_CLRNN_LAYER_EX_H__ - -#include "arm_compute/core/CL/kernels/CLActivationLayerKernel.h" -#include "arm_compute/core/CL/kernels/CLCopyKernel.h" -#include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h" -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" -#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h" -#include "arm_compute/runtime/CL/functions/CLGEMM.h" - -namespace arm_compute -{ -class ICLTensor; - -/** Basic function to run @ref CLRNNLayerEx */ -class CLRNNLayerEx : public IFunction -{ -public: - /** Default constructor */ - CLRNNLayerEx(std::shared_ptr memory_manager = nullptr); - /** Initialize the function - * - * @param[in] input Input is a 2-D tensor of shape [input_size, batch_size]. Data - * types supported: F16/F32 - * @param[in] weights Weights tensor of shape [input_size, num_units] that - * multiplies the input. Data types supported: Same as @p input - * @param[in] recurrent_weights Weights tensor of shape [num_units, num_units] that multiplies - * the current 'state'. Data types supported: Same as @p input - * @param[in] bias Bias vector of shape [num_units]. Data types supported: Same - * as @p input - * @param[out] output Output tensor of shape [num_units, batch_size]. Data types - * supported: Same as @p input - * @param[in,out] hidden_state Output tensor of shape [num_units, batch_size]. Data types - * supported: Same as @p input - * @param[in] info Activation layer parameter. - */ - void configure(const ICLTensor *input, const ICLTensor *weights, - const ICLTensor *recurrent_weights, const ICLTensor *bias, ICLTensor *hidden_state, - ICLTensor *output, ActivationLayerInfo &info); - /** Initialize the function - * - * @param[in] input Input is a 2-D tensor of shape [input_size, batch_size]. Data - * types supported: F16/F32 - * @param[in] weights Weights tensor of shape [input_size, num_units] that multiplies - * the input. Data types supported: Same as @p input - * @param[in] recurrent_weights Weights tensor of shape [num_units, num_units] that multiplies the - * current 'state'. Data types supported: Same as @p input - * @param[in] bias Bias vector of shape [num_units]. Data types supported: Same as @p - * input - * @param[in] output Output tensor of shape [num_units, batch_size]. Data types - * supported: Same as @p input - * @param[in] hidden_state Output tensor of shape [num_units, batch_size]. Data types - * supported: Same as @p input - * @param[in] info Activation layer parameter. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *weights, - const ITensorInfo *recurrent_weights, const ITensorInfo *bias, - const ITensorInfo *hidden_state, const ITensorInfo *output, - const ActivationLayerInfo &info); - - // Inherited methods overridden: - void run() override; - void prepare() override; - -private: - MemoryGroup _memory_group; - CLGEMM _gemm_state_f; - CLSaturatedArithmeticOperationKernel _add_kernel; - CLActivationLayerKernel _activation_kernel; - CLFullyConnectedLayer _fully_connected_kernel; - CLCopyKernel _copy_kernel; - CLTensor _fully_connected_out; - CLTensor _gemm_output; - CLTensor _add_output; - bool _is_prepared; -}; -} -#endif /* __ARM_COMPUTE_CLRNN_LAYER_EX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLSpaceToDepth.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLSpaceToDepth.h deleted file mode 100644 index 2090b46..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLSpaceToDepth.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLSPACETODEPTH_H__ -#define __ARM_COMPUTE_CLSPACETODEPTH_H__ - -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" - -namespace arm_compute -{ -class ICLTensor; - -/** Basic function to run @ref CLSpaceToDepthKernel - * - * @note The tensor data type for the inputs must be U8/QASYMM8/S16/S32/F16/F32. - * @note The function converts the input tensor to the tensor of the output tensor's type. - */ -class CLSpaceToDepth : public ICLSimpleFunction -{ -public: - /** Initialise the kernel's input and output. - * - * @param[in] input Input tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - * @param[out] output Output tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32. - * @param[block_size] block size integer only - */ - void configure(ICLTensor *input, ICLTensor *output, const int32_t block_size); -}; - -} // namespace arm_compute -#endif /* __ARM_COMPUTE_CLSPACETODEPTH_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLStridedSliceEx.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLStridedSliceEx.h deleted file mode 100644 index 03edd15..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLStridedSliceEx.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/** - * @file CLStridedSlice.h - * @ingroup COM_AI_RUNTIME - * @brief This file contains arm_compute::CLStridedSlice and arm_compute::CLStridedSliceCPU class - */ - -#ifndef __ARM_COMPUTE_CLSTRIDEDSLICEEX_H__ -#define __ARM_COMPUTE_CLSTRIDEDSLICEEX_H__ - -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" - -namespace arm_compute -{ -class ICLTensor; - -/** - * @brief Class to run @ref CLStridedSliceKernel - */ -class CLStridedSliceEx : public ICLSimpleFunction -{ -public: - /** - * @brief Initialise the kernel's inputs and outputs - * @param[in] input Tensor input. Data type supported: - * U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 - * @param[out] output Output tensor. Data type supported: Same as @p input - * @param[in] beginData 'begin' vector of strided slice operation - * @param[in] endData 'end' vector of strided slice operation - * @param[in] stridesData 'strides' vector of strided slice operation - * @param[in] beginMask If the ith bit is set, begin[i] is ignored - * @param[in] endMask If the ith bit is set, end[i] is ignored - * @param[in] shrinkAxisMask If the ith bit is set, the ith specification shrinks the - * dimensionality by 1, taking on the value at index begin[i] - * @return N/A - */ - void configure(const ICLTensor *input, ICLTensor *output, ICLTensor *beginData, - ICLTensor *endData, ICLTensor *stridesData, int32_t beginMask, int32_t endMask, - int32_t shrinkAxisMask); -}; -} -#endif /*__ARM_COMPUTE_CLSTRIDEDSLICEEX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLTransposeConvLayer.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLTransposeConvLayer.h index 54a697e..5fb102e 100644 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLTransposeConvLayer.h +++ b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLTransposeConvLayer.h @@ -15,7 +15,7 @@ */ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,16 +37,11 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ - #ifndef __ARM_COMPUTE_CLTRANSPOSECONVLAYER_H__ #define __ARM_COMPUTE_CLTRANSPOSECONVLAYER_H__ -#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h" -#include "arm_compute/runtime/CL/functions/CLTransposeConvLayerUpsample.h" - -#include "arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h" - -#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/CL/functions/CLDirectTransposeConvLayer.h" +#include "arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h" #include "arm_compute/runtime/IFunction.h" #include "arm_compute/runtime/IMemoryManager.h" @@ -54,119 +49,102 @@ namespace arm_compute { -class ICLTensor; -/** Function to run the transpose convolution layer. - * - * @note This layer was copied in order to fix a bug computing to wrong output dimensions. - * - * TransposeConv Layer is the backward pass of Convolution Layer. First we transform the input - * depending on the stride and pad info and then perform a 1x1 - * convolution pass. Input stride defines how many zeroes we should put between each element of the - * input, pad is the amount of padding and finally a is a user - * specified value where a < stride - 1, that increases the padding top and right of the input - * image. - * - * The relation between input to output is as follows: - * \f[ - * width\_output = (width\_input - 1) \cdot stride\_x - \cdot padding\_x + kernel\_x - * \f] - * \f[ - * height\_output = (height\_input - 1) \cdot stride\_y - \cdot padding\_y + kernel\_y - * \f] - * - * where: - * width_input is the size of the first input dimension. - * height_input is the size of the second input dimension. - * width_output is the size of the first output dimension. - * height_output is the size of the second output dimension. - * kernel_x and kernel_y are the convolution sizes in x and y. - * stride_x and stride_y is the input stride of the first and second dimension. - * - * The weights used by Deconvolution are supposed to be the same as the ones used for Convolution. - * Therefore, it will be necessary to use the weights in the - * reverse order to perform an actual convolution. This is achieved by using the @ref - * CPPFlipWeightsKernel. - * - * This function calls the following OpenCL kernels/functions: - * - * -# @ref CLTransposeConvLayerUpsample - * -# @ref CLConvolutionLayer +/** Basic function to compute the deconvolution layer. This function calls the following OpenCL + * kernels/functions: * + * -# @ref CLGEMMDeconvolutionLayer + * -# @ref CLDirectTransposeConvLayer */ class CLTransposeConvLayer : public IFunction { public: - /** Constructor */ + /** Default constructor */ CLTransposeConvLayer(std::shared_ptr memory_manager = nullptr); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLTransposeConvLayer(const CLTransposeConvLayer &) = delete; - /** Default move constructor */ - CLTransposeConvLayer(CLTransposeConvLayer &&) = default; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLTransposeConvLayer &operator=(const CLTransposeConvLayer &) = delete; - /** Default move assignment operator */ - CLTransposeConvLayer &operator=(CLTransposeConvLayer &&) = default; + /** Set the input, weights, biases and output tensors. * - * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, - * and an optional 4th dimension for batch of inputs. - * Data types supported: QASYMM8/F16/F32. - * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. - * Data type supported: Same as @p input. - * @param[in] bias (Optional) The biases have one dimension. Data type supported: - * Same as @p input. - * @param[out] output Output tensor. The output has the same number of dimensions - * as the @p input. - * @param[in] info Contains padding and policies to be used in the - * transpose convolution, this is decribed in @ref PadStrideInfo. - * @param[in] invalid_right The number of zeros added to right edge of the output. - * @param[in] invalid_bottom The number of zeros added to top edge of the output. - * @param[in] weights_info (Optional) Weights information needed for @ref - * CLConvolutionLayer, specifies if the weights tensor has been - * reshaped with @ref CLWeightsReshapeKernel. + * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an + * optional 4th dimension for batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. + * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type + * supported: Same as @p input. + * @param[in] bias (Optional) The biases have one dimension. Data type supported: Same + * as @p input. + * @param[out] output Output tensor. The output has the same number of dimensions as the + * @p input. + * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this + * is described in @ref PadStrideInfo. + * @param[in] invalid_right The number of zeros added to right edge of the output. + * @param[in] invalid_bottom The number of zeros added to bottom edge of the output. + * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, + * specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel. + * */ void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, - const PadStrideInfo &info, unsigned int invalid_right, unsigned int invalid_bottom, + const PadStrideInfo &deconv_info, unsigned int invalid_right, + unsigned int invalid_bottom, const WeightsInfo &weights_info = WeightsInfo()); + /** Set the input, weights, biases and output tensors. + * + * @param[in] compile_context The compile context to be used. + * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and + * an optional 4th dimension for batch of inputs. Data types supported: + * QASYMM8_SIGNED/QASYMM8/F16/F32. + * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data + * type supported: Same as @p input. + * @param[in] bias (Optional) The biases have one dimension. Data type supported: + * Same as @p input. + * @param[out] output Output tensor. The output has the same number of dimensions as + * the @p input. + * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, + * this is described in @ref PadStrideInfo. + * @param[in] invalid_right The number of zeros added to right edge of the output. + * @param[in] invalid_bottom The number of zeros added to bottom edge of the output. + * @param[in] weights_info (Optional) Weights information needed for @ref + * CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref + * CLWeightsReshapeKernel. + * + */ + void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *weights, + const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info, + unsigned int invalid_right, unsigned int invalid_bottom, const WeightsInfo &weights_info = WeightsInfo()); /** Static function to check if given info will lead to a valid configuration of @ref - * CLTransposeConvLayer + * CLTransposeConvLayer + * + * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an + * optional 4th dimension for batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. + * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data + * type supported: Same as @p input. + * @param[in] bias (Optional) The biases have one dimension. Data type supported: Same as + * @p input. + * @param[in] output Output tensor info. The output has the same number of dimensions as the + * @p input. + * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is + * described in @ref PadStrideInfo. + * @param[in] invalid_right The number of zeros added to right edge of the output. + * @param[in] invalid_bottom The number of zeros added to bottom edge of the output. + * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, + * specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel. * - * @param[in] input Input tensor info. 3 lower dimensions represent a single input, - * and an optional 4th dimension for batch of inputs. - * Data types supported: QASYMM8/F16/F32. - * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. - * Data type supported: Same as @p input. - * @param[in] bias (Optional) The biases have one dimension. Data type supported: - * Same as @p input. - * @param[in] output Output tensor info. The output has the same number of dimensions - * as the @p input. - * @param[in] info Contains padding and policies to be used in the - * transpose convolution, this is decribed in @ref PadStrideInfo. - * @param[in] innvalid_right The number of zeros added to right edge of the output. - * @param[in] invalid_bottom The number of zeros added to top edge of the output. - * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, - * specifies if the weights tensor has been reshaped with @ref - * CLWeightsReshapeKernel. * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, - const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &info, - unsigned int innvalid_right, unsigned int invalid_bottom, + const ITensorInfo *bias, ITensorInfo *output, + const PadStrideInfo &deconv_info, unsigned int invalid_right, + unsigned int invalid_bottom, const WeightsInfo &weights_info = WeightsInfo()); + static DeconvolutionMethod + get_deconvolution_method(const ITensorInfo *input, const ITensorInfo *weights, + const ITensorInfo *bias, ITensorInfo *output, + const PadStrideInfo &deconv_info, unsigned int invalid_right, + unsigned int invalid_bottom, const WeightsInfo &weights_info); // Inherited methods overridden: void run() override; void prepare() override; private: - MemoryGroup _memory_group; - CLTransposeConvLayerUpsample _scale_f; - CLConvolutionLayer _conv_f; - CPPFlipWeightsKernel _flip_weights; - CLTensor _scaled_output; - ICLTensor *_original_weights; - CLTensor _weights_flipped; - bool _is_prepared; + std::shared_ptr _memory_manager; + std::unique_ptr _function; }; -} +} // namespace arm_compute #endif /* __ARM_COMPUTE_CLTRANSPOSECONVLAYER_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLTransposeConvLayerUpsample.h b/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLTransposeConvLayerUpsample.h deleted file mode 100644 index 7570fe7..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CL/functions/CLTransposeConvLayerUpsample.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CLTRANSPOSECONVLAYERUPSAMPLE_H__ -#define __ARM_COMPUTE_CLTRANSPOSECONVLAYERUPSAMPLE_H__ - -#include "arm_compute/runtime/IFunction.h" - -#include "arm_compute/core/CL/kernels/CLTransposeConvLayerUpsampleKernel.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/runtime/IFunction.h" -#include "arm_compute/runtime/IMemoryManager.h" - -namespace arm_compute -{ -class ICLTensor; - -/** Basic function to run @ref CLTransposeConvLayerUpsampleKernel */ -class CLTransposeConvLayerUpsample : public IFunction -{ -public: - /** Default constructor */ - CLTransposeConvLayerUpsample(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLTransposeConvLayerUpsample(const CLTransposeConvLayerUpsample &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLTransposeConvLayerUpsample &operator=(const CLTransposeConvLayerUpsample &) = delete; - /** Allow instances of this class to be moved */ - CLTransposeConvLayerUpsample(CLTransposeConvLayerUpsample &&) = default; - /** Allow instances of this class to be moved */ - CLTransposeConvLayerUpsample &operator=(CLTransposeConvLayerUpsample &&) = default; - /** Default destructor */ - virtual ~CLTransposeConvLayerUpsample() = default; - - /** Initialize the function's source, destination, interpolation type and border_mode. - * - * @param[in, out] input Source tensor. Data type supported: QASYMM8/F16/F32. - * @param[out] output Destination tensor. Data type supported: same as @p input. - * @param[in] inner_border The number of zeros added to right and top edges of the input. - * @param[in] info Contains padding and policies to be used in the deconvolution. - */ - void configure(ICLTensor *input, ICLTensor *output, const BorderSize &inner_border, - const PadStrideInfo &info); - /** Static function to check if given info will lead to a valid configuration of @ref - * CLTransposeConvLayerUpsample - * - * @param[in] input Source tensor info. Data type supported: QASYMM8/F16/F32. - * @param[in] output Destination tensor info. Data type supported: same as @p input. - * @param[in] inner_border The number of zeros added to right and top edges of the input. - * @param[in] info Contains padding and policies to be used in the deconvolution. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, - const BorderSize &inner_border, const PadStrideInfo &info); - - // Inherited methods overridden: - void run() override; - -private: - CLTransposeConvLayerUpsampleKernel _upsample; - ICLTensor *_output; -}; -} -#endif /* __ARM_COMPUTE_CLTRANSPOSECONVLAYERUPSAMPLE_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/CPP/functions/CPPUpsampleEx.h b/compute/ARMComputeEx/arm_compute/runtime/CPP/functions/CPPUpsampleEx.h deleted file mode 100644 index 666afef..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/CPP/functions/CPPUpsampleEx.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_CPPUPSAMPLE_EX_H__ -#define __ARM_COMPUTE_CPPUPSAMPLE_EX_H__ - -#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h" - -#include "arm_compute/core/Types.h" - -namespace arm_compute -{ -class ITensor; - -/** Basic function to run @ref CPPUpsample */ -class CPPUpsampleEx : public ICPPSimpleFunction -{ -public: - /** Configure the upsample CPP kernel - * - * @param[in] input The input tensor to upsample. Data types supported: F32/F16/QASYMM8 - * @param[out] output The output tensor. Data types supported: Same as @p input - * @param[in] info Padding information - */ - void configure(const ITensor *input, ITensor *output, const PadStrideInfo &info); -}; -} -#endif /* __ARM_COMPUTE_CPPUPSAMPLE_EX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/NEON/NEFunctionsEx.h b/compute/ARMComputeEx/arm_compute/runtime/NEON/NEFunctionsEx.h index 49504fd..3fad230 100644 --- a/compute/ARMComputeEx/arm_compute/runtime/NEON/NEFunctionsEx.h +++ b/compute/ARMComputeEx/arm_compute/runtime/NEON/NEFunctionsEx.h @@ -18,20 +18,13 @@ #include #include -#include -#include #include #include #include #include #include -#include -#include #include -#include #include -#include -#include #include #endif // __ARM_COMPUTE_NEFUNCTIONSEX_H__ diff --git a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NECast.h b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NECast.h deleted file mode 100644 index f0f0d81..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NECast.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NECAST_H__ -#define __ARM_COMPUTE_NECAST_H__ - -#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h" - -#include "arm_compute/core/Types.h" -#include "arm_compute/core/TypesEx.h" - -namespace arm_compute -{ -// Forward declarations -class ITensor; - -/** Basic function to run @ref NECastKernel that converts an input tensor to the other types */ -class NECast : public INESimpleFunctionNoBorder -{ -public: - /** Configure the kernel. - * - * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U32/S32/F32. - * @param[out] output Destination tensor with the same dimensions of input. Data type supported: - * U8/S8/QASYMM8/U32/S32/F32. - * @param[in] input_subtype Sub data type of input. - */ - void configure(const ITensor *input, ITensor *output, - SubDataType input_subtype = SubDataType::NONE); - /** Static function to check if given info will lead to a valid configuration of @ref NECast - * - * @param[in] input Input tensor info. Data types supported: U8/S8/QASYMM8/U32/S32/F32. - * @param[in] output Output tensor info. Data type supported: U8/S8/QASYMM8/U32/S32/F32. - * @param[in] input_subtype Sub data type of input. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, - SubDataType input_subtype = SubDataType::NONE); -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NECAST_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEDepthToSpaceLayerEx.h b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEDepthToSpaceLayerEx.h deleted file mode 100644 index 005d85a..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEDepthToSpaceLayerEx.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NEDEPTHTOSPACELAYEREX_H__ -#define __ARM_COMPUTE_NEDEPTHTOSPACELAYEREX_H__ - -#include "arm_compute/runtime/IFunction.h" - -#include "arm_compute/core/Types.h" -#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h" - -namespace arm_compute -{ -class ITensor; - -/** Basic function to run @ref NEDepthToSpaceLayerKernelEx. */ -class NEDepthToSpaceLayerEx : public INESimpleFunctionNoBorder -{ -public: - /** Set the input and output tensors. - * - * @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: - * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. - * @param[out] output Tensor output. Data types supported: same as @p input - * @param[in] block_shape Block shape value. - */ - void configure(const ITensor *input, ITensor *output, int32_t block_shape); - /** Static function to check if given info will lead to a valid configuration of @ref - * NEDepthToSpaceLayerEx. - * - * @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: - * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. - * @param[in] output Tensor output info. Data types supported: same as @p input - * @param[in] block_shape Block shape x value. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape); -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NEDEPTHTOSPACELAYEREX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayerEx.h b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayerEx.h deleted file mode 100644 index 27a38e9..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayerEx.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2018-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NEELEMENTWISEUNARYLAYEREX_H__ -#define __ARM_COMPUTE_NEELEMENTWISEUNARYLAYEREX_H__ - -#include "arm_compute/runtime/NEON/INESimpleFunction.h" - -namespace arm_compute -{ -class ITensor; - -/** Basic function to perform negative on an input tensor. */ -class NENegLayer : public INESimpleFunction -{ -public: - /** Initialize the function - * - * @param[in] input Input tensor. Data types supported: F16/F32/S32. - * @param[out] output Output tensor. Data types supported: same as @p input. - */ - void configure(const ITensor *input, ITensor *output); - /** Static function to check if given info will lead to a valid configuration of @ref NERsqrtLayer - * - * @param[in] input First tensor input info. Data types supported: F16/F32/S32. - * @param[in] output Output tensor info. Data types supported: Same as @p input. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output); -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NEELEMENTWISEUNARYLAYEREX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEFullyConnectedHybridLayer.h b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEFullyConnectedHybridLayer.h index 39c57eb..56548a4 100644 --- a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEFullyConnectedHybridLayer.h +++ b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEFullyConnectedHybridLayer.h @@ -46,7 +46,7 @@ #include "arm_compute/core/NEON/kernels/NEMuliplyScaleFactorKernel.h" #include "arm_compute/core/NEON/kernels/NETransposeKernel.h" #include "arm_compute/runtime/MemoryGroup.h" -#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCoreEx.h" +#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h" #include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h" #include "arm_compute/runtime/Tensor.h" @@ -164,7 +164,7 @@ private: MemoryGroup _memory_group; NEFullyConnectedHybridLayerReshapeWeights _reshape_weights_function; NEQuantizationSymmetricKernel _quant_input_kernel; - NEGEMMLowpMatrixMultiplyCoreEx _mm_gemmlowp; + NEGEMMLowpMatrixMultiplyCore _mm_gemmlowp; NEMultiplyScaleFactorKernel _multiply_scale_kernel; NEGEMMMatrixAccumulateBiasesKernel _accumulate_biases_kernel; Tensor _reshape_weights_output; diff --git a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCoreEx.h b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCoreEx.h deleted file mode 100644 index d844513..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCoreEx.h +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCOREEX_H__ -#define __ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCOREEX_H__ - -#include "arm_compute/core/NEON/INEKernel.h" -#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h" -#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" -#include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h" -#include "arm_compute/runtime/IFunction.h" -#include "arm_compute/runtime/IMemoryManager.h" -#include "arm_compute/runtime/MemoryGroup.h" -// #include "arm_compute/runtime/NEON/functions/NEActivationLayer.h" -#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h" -#include "arm_compute/runtime/Tensor.h" - -#include - -namespace arm_compute -{ -class ITensor; - -/** Basic function to execute GEMMLowpMatrixMultiplyCore on NEON. This function calls the following - * NEON kernels if the DOT product instruction is not available: - * - * -# @ref NEGEMMInterleave4x4Kernel - * -# @ref NEGEMMTranspose1xWKernel - * -# @ref NEGEMMLowpMatrixMultiplyKernel - * -# @ref NEGEMMLowpOffsetContributionKernel - * -# @ref NEActivationLayer - * - * otherwise if the DOT product instruction is available: - * - * -# @ref NEGEMMLowpOffsetContributionKernel - * -*/ -class NEGEMMLowpMatrixMultiplyCoreEx : public IFunction -{ -public: - /** Constructor */ - NEGEMMLowpMatrixMultiplyCoreEx(std::shared_ptr memory_manager = nullptr); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEGEMMLowpMatrixMultiplyCoreEx(const NEGEMMLowpMatrixMultiplyCoreEx &) = delete; - /** Default move constructor */ - NEGEMMLowpMatrixMultiplyCoreEx(NEGEMMLowpMatrixMultiplyCoreEx &&) = default; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEGEMMLowpMatrixMultiplyCoreEx &operator=(const NEGEMMLowpMatrixMultiplyCoreEx &) = delete; - /** Default move assignment operator */ - NEGEMMLowpMatrixMultiplyCoreEx &operator=(NEGEMMLowpMatrixMultiplyCoreEx &&) = default; - /** Initialise the kernel's inputs, output - * - * @note GEMM_LOWP: low precision GEMM kernel - * This kernel performs the following computations: - * - * -# Convert a values from QASYMM8 to int32 and add a_offset to each of them. - * -# Convert b values from QASYMM8 to int32 add b_offset to each of them. - * -# Compute the matrix product of the resulting a * b in int32. - * - * @note The @p output type is S32 if @p gemm_info.type == GEMMLowpOutputStageType::NONE. It is - * QASYMM8/QASYMM8_SIGNED otherwise - * - * @param[in] a First input tensor (Matrix A). Data type supported: - * QASYMM8/QASYMM8_SIGNED. - * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a - * @param[in] c Third input tensor (Matrix C). It can be a nullptr. Data type supported: - * S32 - * @param[out] output Output tensor. Data type supported: Data type supported: - * S32/QASYMM8/QASYMM8_SIGNED - * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped - * and - * if the reshape of matrix B should be executed only for the first run - */ - void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *output, - const GEMMInfo &gemm_info = GEMMInfo()); - /** Static function to check if given info will lead to a valid configuration of @ref - * NEGEMMLowpMatrixMultiplyCoreEx - * - * @note The @p output type is S32 if @p gemm_info.type == GEMMLowpOutputStageType::NONE. It is - * QASYMM8/QASYMM8_SIGNED otherwise - * - * @param[in] a First input tensor info (Matrix A). Data type supported: - * QASYMM8/QASYMM8_SIGNED. - * @param[in] b Second input tensor info (Matrix B). Data type supported: same as @p a - * @param[in] c Third input tensor info (Matrix C). It can be a nullptr. Data type - * supported: S32 - * @param[in] output Output tensor info. Data type supported: Data type supported: - * S32/QASYMM8/QASYMM8_SIGNED - * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped - * and - * if the reshape of matrix B should be executed only for the first run - * - * @return a status - */ - static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, - const ITensorInfo *output, const GEMMInfo &gemm_info = GEMMInfo()); - - // Inherited methods overridden - void run() override; - void prepare() override; - -private: - MemoryGroup _memory_group; - NEGEMMAssemblyDispatch _asm_glue; - std::unique_ptr _mm_kernel; - std::unique_ptr _mtx_a_reshape_kernel; - std::unique_ptr _mtx_b_reshape_kernel; - NEGEMMLowpMatrixAReductionKernel _mtx_a_reduction_kernel; - NEGEMMLowpMatrixBReductionKernel _mtx_b_reduction_kernel; - NEGEMMLowpOffsetContributionKernel _offset_contribution_kernel; - NEGEMMLowpOffsetContributionOutputStageKernel _offset_contribution_output_stage_kernel; - - Tensor _vector_sum_col; - Tensor _vector_sum_row; - Tensor _tmp_a; - Tensor _tmp_b; - Tensor _mm_result_s32; - Tensor _signed_a; - Tensor _signed_output; - const ITensor *_original_b; - int32_t _a_offset; - int32_t _b_offset; - - bool _run_vector_matrix_multiplication; - bool _assembly_path; - bool _fused_assembly_path; - bool _reshape_b_only_on_first_run; - bool _is_prepared; - bool _fuse_output_stage; - bool _flip_signedness; -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCOREEX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEPReLU.h b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEPReLU.h deleted file mode 100644 index ca84133..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEPReLU.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2018-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NEPRELU_H__ -#define __ARM_COMPUTE_NEPRELU_H__ - -#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h" - -namespace arm_compute -{ -class ITensor; - -/** Basic function to run @ref NEPReLUKernel */ -class NEPReLU : public INESimpleFunctionNoBorder -{ -public: - /** Initialise the kernel's inputs and output - * - * @param[in] input. Data types supported: QASYMM8/F32. - * @param[in] alpha. Data types supported: Same as @p input. - * @param[out] output Output tensor. Data types supported: Same as @p input. - */ - void configure(const ITensor *input, const ITensor *alpha, ITensor *output); -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NEPRELU_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NERNNLayerEx.h b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NERNNLayerEx.h deleted file mode 100644 index 8a7b179..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NERNNLayerEx.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NERNNLAYER_EX_H__ -#define __ARM_COMPUTE_NERNNLAYER_EX_H__ - -#include "arm_compute/core/NEON/kernels/NEActivationLayerKernel.h" -#include "arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h" -#include "arm_compute/core/NEON/kernels/NECopyKernel.h" - -#include "arm_compute/core/Types.h" -#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h" -#include "arm_compute/runtime/NEON/functions/NEGEMM.h" - -namespace arm_compute -{ -// Forward declarations -class ITensor; - -/** Basic function to run @ref NERNNLayerEx */ -class NERNNLayerEx : public IFunction -{ -public: - /** Default constructor */ - NERNNLayerEx(std::shared_ptr memory_manager = nullptr); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NERNNLayerEx(const NERNNLayerEx &) = delete; - /** Default move constructor */ - NERNNLayerEx(NERNNLayerEx &&) = default; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NERNNLayerEx &operator=(const NERNNLayerEx &) = delete; - /** Default move assignment operator */ - NERNNLayerEx &operator=(NERNNLayerEx &&) = default; - /** Initialize the function - * - * @param[in] input Input is a 2-D tensor of shape [input_size, batch_size]. Data - * types supported: F16/F32 - * @param[in] weights Weights tensor of shape [input_size, num_units] that - * multiplies the input. Data types supported: Same as @p input - * @param[in] recurrent_weights Weights tensor of shape [num_units, num_units] that multiplies - * the current 'state'. Data types supported: Same as @p input - * @param[in] bias Bias vector of shape [num_units]. Data types supported: Same - * as @p input - * @param[out] output Output tensor of shape [num_units, batch_size]. Data types - * supported: Same as @p input - * @param[in,out] hidden_state Output tensor of shape [num_units, batch_size]. Data types - * supported: Same as @p input - * @param[in] info Activation layer parameter. - */ - void configure(const ITensor *input, const ITensor *weights, const ITensor *recurrent_weights, - const ITensor *bias, ITensor *hidden_state, ITensor *output, - ActivationLayerInfo &info); - /** Initialize the function - * - * @param[in] input Input is a 2-D tensor of shape [input_size, batch_size]. Data - * types supported: F16/F32 - * @param[in] weights Weights tensor of shape [input_size, num_units] that multiplies - * the input. Data types supported: Same as @p input - * @param[in] recurrent_weights Weights tensor of shape [num_units, num_units] that multiplies the - * current 'state'. Data types supported: Same as @p input - * @param[in] bias Bias vector of shape [num_units]. Data types supported: Same as @p - * input - * @param[in] output Output tensor of shape [num_units, batch_size]. Data types - * supported: Same as @p input - * @param[in] hidden_state Output tensor of shape [num_units, batch_size]. Data types - * supported: Same as @p input - * @param[in] info Activation layer parameter. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *weights, - const ITensorInfo *recurrent_weights, const ITensorInfo *bias, - const ITensorInfo *hidden_state, const ITensorInfo *output, - const ActivationLayerInfo &info); - - // Inherited methods overridden: - void run() override; - void prepare() override; - -private: - MemoryGroup _memory_group; - NEGEMM _gemm_state_f; - NEArithmeticAdditionKernel _add_kernel; - NEActivationLayerKernel _activation_kernel; - NEFullyConnectedLayer _fully_connected_kernel; - NECopyKernel _copy_kernel; - Tensor _fully_connected_out; - Tensor _gemm_output; - Tensor _add_output; - bool _is_prepared; -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NERNNLAYER_EX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEReduceMeanEx.h b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEReduceMeanEx.h deleted file mode 100644 index 03ac457..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NEReduceMeanEx.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NEON_REDUCE_MEAN_EX_H__ -#define __ARM_COMPUTE_NEON_REDUCE_MEAN_EX_H__ - -#include "arm_compute/runtime/IFunction.h" - -#include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/runtime/MemoryGroup.h" -#include "arm_compute/runtime/NEON/functions/NEReductionOperation.h" -#include "arm_compute/runtime/NEON/functions/NEReshapeLayer.h" - -namespace arm_compute -{ -class ITensor; - -/** Basic function to perform reduce operation */ -class NEReduceMeanEx : public IFunction -{ -public: - /** Constructor */ - NEReduceMeanEx(std::shared_ptr memory_manager = nullptr); - /** Configure kernel - * - * @note Supported tensor rank: up to 4 - * - * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32 - * @param[in] reduction_axis Reduction axis vector. - * @param[in] keep_dims If positive, retains reduced dimensions with length 1. - * @param[out] output Destination tensor. Data type supported: Same as @p input - */ - void configure(ITensor *input, const Coordinates &reduction_axis, bool keep_dims, - ITensor *output); - - /** Static function to check if given info will lead to a valid configuration of @ref - * NEReduceMeanEx - * - * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32 - * @param[in] reduction_axis Reduction axis vector. - * @param[in] keep_dims If positive, retains reduced dimensions with length 1. - * @param[in] output Destination tensor. Data type supported: Same as @p input - * - * @return A status - */ - static Status validate(const ITensorInfo *input, const Coordinates &reduction_axis, - bool keep_dims, const ITensorInfo *output); - - // Inherited methods overridden: - void run() override; - -private: - MemoryGroup _memory_group; - std::unique_ptr _reduction_kernels{nullptr}; - std::unique_ptr _reduced_outs{nullptr}; - NEReshapeLayer _reshape; - unsigned int _reduction_ops; - bool _keep_dims; -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NEON_REDUCE_MEAN_EX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NESpaceToBatchLayerEx.h b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NESpaceToBatchLayerEx.h deleted file mode 100644 index 3b695fb..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NESpaceToBatchLayerEx.h +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NESPACETOBATCHLAYEREX_H__ -#define __ARM_COMPUTE_NESPACETOBATCHLAYEREX_H__ - -#include "arm_compute/runtime/IFunction.h" - -#include "arm_compute/core/NEON/kernels/NEMemsetKernel.h" -#include "arm_compute/core/NEON/kernels/NESpaceToBatchLayerKernel.h" -#include "arm_compute/core/Types.h" - -namespace arm_compute -{ -class ITensor; - -/** Basic function to spatial divide a tensor. This function calls the following NEON - * kernels/functions: - * - * -# @ref NEMemsetKernel - * -# @ref NESpaceToBatchLayerKernel - */ -class NESpaceToBatchLayerEx : public IFunction -{ -public: - /** Default constructor */ - NESpaceToBatchLayerEx(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NESpaceToBatchLayerEx(const NESpaceToBatchLayerEx &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NESpaceToBatchLayerEx &operator=(const NESpaceToBatchLayerEx &) = delete; - /** Allow instances of this class to be moved */ - NESpaceToBatchLayerEx(NESpaceToBatchLayerEx &&) = default; - /** Allow instances of this class to be moved */ - NESpaceToBatchLayerEx &operator=(NESpaceToBatchLayerEx &&) = default; - /** Default destructor */ - virtual ~NESpaceToBatchLayerEx() = default; - /** Set the input and output tensors. - * - * @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: - * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. - * @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32 - * @param[in] paddings 2-D tensor with shape [2, M]. Data types supported: S32 - * @param[out] output Tensor output. Data types supported: same as @p input - */ - void configure(const ITensor *input, const ITensor *block_shape, const ITensor *paddings, - ITensor *output); - /** Set the input and output tensors. (Static block shape and paddings) - * - * @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: - * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. - * @param[in] block_shape_x Block shape x value. - * @param[in] block_shape_y Block shape y value. - * @param[in] padding_left The left padding of the output tensor. - * @param[in] padding_right The right padding of the output tensor. - * @param[out] output Tensor output. Data types supported: same as @p input - */ - void configure(const ITensor *input, const int block_shape_x, const int block_shape_y, - const Size2D &padding_left, const Size2D &padding_right, ITensor *output); - /** Static function to check if given info will lead to a valid configuration of @ref - * NESpaceToBatchLayerEx - * - * @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: - * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. - * @param[in] block_shape block shape tensor info with shape [M]. Data types supported: S32 - * @param[in] paddings paddings tensor info with shape [2, M]. Data types supported: S32 - * @param[in] output Tensor output info. Data types supported: same as @p input - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *block_shape, - const ITensorInfo *paddings, const ITensorInfo *output); - /** Static function to check if given info will lead to a valid configuration of @ref - * NESpaceToBatchLayerEx (Static block shape and paddings) - * - * @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: - * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. - * @param[in] block_shape_x Block shape x value. - * @param[in] block_shape_y Block shape y value. - * @param[in] padding_left The left padding of the output tensor. - * @param[in] padding_right The right padding of the output tensor. - * @param[in] output Tensor output info. Data types supported: same as @p input - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, - const Size2D &padding_left, const Size2D &padding_right, - const ITensorInfo *output); - - // Inherited methods overridden: - void run() override; - -private: - NESpaceToBatchLayerKernel _space_to_batch_kernel; /**< SpaceToBatch kernel to run */ - NEMemsetKernel _memset_kernel; /**< Memset kernel to run */ - bool _has_padding; /**< Flag to check if the output has padding */ -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NESPACETOBATCHLAYEREX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NESpaceToDepthLayerEx.h b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NESpaceToDepthLayerEx.h deleted file mode 100644 index 9f32616..0000000 --- a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NESpaceToDepthLayerEx.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __ARM_COMPUTE_NESPACETODEPTHLAYEREX_H__ -#define __ARM_COMPUTE_NESPACETODEPTHLAYEREX_H__ - -#include "arm_compute/core/Types.h" -#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h" - -namespace arm_compute -{ -class ITensor; - -/** This function calls the following NEON kernels/functions: - * - * -# @ref NESpaceToDepthLayerKernelEx - */ -class NESpaceToDepthLayerEx : public INESimpleFunctionNoBorder -{ -public: - /** Set the input and output tensors. - * - * @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: - * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. - * @param[out] output Tensor output. Data types supported: same as @p input - * @param[in] block_shape Block shape value - */ - void configure(const ITensor *input, ITensor *output, int32_t block_shape); - /** Static function to check if given info will lead to a valid configuration of @ref - * NESpaceToDepthLayerEx (Static block shape and paddings) - * - * @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: - * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. - * @param[in] output Tensor output info. Data types supported: same as @p input - * @param[in] block_shape Block shape value - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape); -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NESPACETODEPTHLAYEREX_H__ */ diff --git a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NETransposeConvLayer.h b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NETransposeConvLayer.h index 408d150..24ff5da 100644 --- a/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NETransposeConvLayer.h +++ b/compute/ARMComputeEx/arm_compute/runtime/NEON/functions/NETransposeConvLayer.h @@ -15,7 +15,7 @@ */ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,16 +37,14 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ - #ifndef __ARM_COMPUTE_NETRANSPOSECONVLAYER_H__ #define __ARM_COMPUTE_NETRANSPOSECONVLAYER_H__ -#include "arm_compute/runtime/CPP/functions/CPPUpsampleEx.h" +#include "arm_compute/runtime/CPP/functions/CPPUpsample.h" #include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h" #include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h" -#include "arm_compute/runtime/NEON/functions/NEPermute.h" +#include "arm_compute/runtime/NEON/functions/NEReverse.h" -#include "arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h" #include "arm_compute/core/Types.h" #include "arm_compute/runtime/IFunction.h" #include "arm_compute/runtime/IMemoryManager.h" @@ -59,8 +57,8 @@ namespace arm_compute { /** Function to run the deconvolution layer. * - * Transpose convolution Layer is the backward pass of Convolution Layer. First we transform the - * input depending on the stride and pad info and then perfrom a 1x1 + * Deconvolution Layer is the backward pass of Convolution Layer. First we transform the input + * depending on the stride and pad info and then perfrom a 1x1 * convolution pass. Input stride defines how many zeroes we should put between each element of the * input, pad is the amount of padding and finaly a is a user * specified value where a < stride - 1 that increases the padding top and right of the input image. @@ -81,21 +79,22 @@ namespace arm_compute * kernel_x and kernel_y are the convolution sizes in x and y. * stride_x and stride_y is the input stride of the first and second dimension. * - * The weights used by Transpose convolution are supposed to be the same as the ones used for - * Convolution. Therefore, it will be necessary to use the weights in the - * reverse order to perform an actual convolution. This is achieved by using the @ref - * CPPFlipWeightsKernel. + * The weights used by Deconvolution are supposed to be the same as the ones used for Convolution. + * Therefore, it will be necessary to use the weights in the + * reverse order to perform an actual convolution. This is achieved by using @ref NEReverse. * * This function calls the following NEON kernels/functions: * - * -# @ref CPPUpsample + * -# @ref CPPUpsampleEx * -# @ref NEConvolutionLayer + * -# @ref NEPermute + * -# @ref NEReverse * */ class NETransposeConvLayer : public IFunction { public: - /** Default constructor */ + /** Constructor */ NETransposeConvLayer(std::shared_ptr memory_manager = nullptr); /** Prevent instances of this class from being copied (As this class contains pointers) */ @@ -112,37 +111,38 @@ public: /** Set the input, weights, biases and output tensors. * * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an - * optional 4th dimension for batch of inputs. Data types supported: F32/F16/QASYMM8. + * optional 4th dimension for batch of inputs. Data types supported: F32/F16/QASYMM8/QASYMM8_SIGNED. * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type - * supported: Same as @p input. + * supported: Same as @p input. * @param[in] bias Optional, ignored if NULL. The biases have one dimension. Data type - * supported: Data types supported: S32 for QASYMM8 input, F32 for F32 input, F16 for F16 input. + * supported: Data types supported: S32 for QASYMM8 and QASYMM8_SIGNED input, F32 for F32 input, F16 + * for F16 input. * @param[out] output Output tensor. The output has the same number of dimensions as the @p - * input. + * input. * @param[in] info Contains padding and policies to be used in the deconvolution, this is - * decribed in @ref PadStrideInfo. - * @param[in] invalid_right The number of zeros added to right edge of the output. - * @param[in] invalid_bottom The number of zeros added to top edge of the output. + * decribed in @ref PadStrideInfo. + * @param[in] invalid_right The number of zeros added to right edge of the output. + * @param[in] invalid_bottom The number of zeros added to bottom edge of the output. * */ void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &info, unsigned int invalid_right, unsigned int invalid_bottom); /** Static function to check if given info will lead to a valid configuration of @ref - * NETransposeConvLayer + * NETransposeConvLayer * * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an - * optional 4th dimension for batch of inputs. Data types supported: F32/F16/QASYMM8. + * optional 4th dimension for batch of inputs. Data types supported: F32/F16/QASYMM8/QASYMM8_SIGNED. * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data type - * supported: Same as @p input. + * supported: Same as @p input. * @param[in] bias (Optional) The biases have one dimension. Data type supported: Data types - * supported: S32 for QASYMM8 input, F32 for F32 input, F16 for F16 input. + * supported: S32 for QASYMM8 and QASYMM8_SIGNED input, F32 for F32 input, F16 for F16 input. * @param[in] output Output tensor info. The output has the same number of dimensions as the @p - * input. + * input. * @param[in] info Contains padding and policies to be used in the deconvolution, this is - * decribed in @ref PadStrideInfo. - * @param[in] innvalid_right The number of zeros added to right edge of the output. - * @param[in] invalid_bottom The number of zeros added to top edge of the output. + * decribed in @ref PadStrideInfo. + * @param[in] innvalid_right The number of zeros added to right edge of the output. + * @param[in] invalid_bottom The number of zeros added to bottom edge of the output. * * @return a status */ @@ -158,17 +158,11 @@ public: private: MemoryGroup _memory_group; NEConvolutionLayer _conv_f; - CPPUpsampleEx _upsample_f; - CPPFlipWeightsKernel _flip_weights; - NEPermute _permute_input; - NEPermute _permute_weights; - NEPermute _permute_output; + CPPUpsample _upsample_f; + NEReverse _flip_weights; Tensor _scaled_output; Tensor _weights_flipped; - Tensor _permuted_input; - Tensor _permuted_weights; - Tensor _permuted_output; - bool _is_nchw; + Tensor _flip_axis; const ITensor *_original_weights; ITensor *_input; PadStrideInfo _info; diff --git a/compute/ARMComputeEx/src/core/CL/CLKernelLibrary.cpp b/compute/ARMComputeEx/src/core/CL/CLKernelLibrary.cpp index 7b6b974..ba42a24 100644 --- a/compute/ARMComputeEx/src/core/CL/CLKernelLibrary.cpp +++ b/compute/ARMComputeEx/src/core/CL/CLKernelLibrary.cpp @@ -55,16 +55,7 @@ using namespace arm_compute; const std::map CLKernelLibraryEx::_kernel_program_map = { // ARMComputeEx kernels - {"arg_op", "arg_operation.cl"}, - {"arithmetic_add_qasymm8", "arithmetic_op_quantized.cl"}, {"binary_logical_op", "binary_logical_op.cl"}, - {"cast", "cast.cl"}, - {"cast_qasymm_in", "cast.cl"}, - {"cast_qasymm_out", "cast.cl"}, - {"comparison_op", "comparison_op.cl"}, - {"comparison_op_qasymm8", "comparison_op_quantized.cl"}, - {"depth_to_space_nchw", "depth_to_space.cl"}, - {"depth_to_space_nhwc", "depth_to_space.cl"}, {"embedding_lookup", "embedding_lookup.cl"}, {"gather_ex", "gather_ex.cl"}, {"gather_ex_1d", "gather_ex.cl"}, @@ -74,10 +65,6 @@ const std::map CLKernelLibraryEx::_kernel_program_map {"instance_normalization_ex", "instance_normalization_ex.cl"}, {"multiply_scale_factor", "multiply_scale_factor.cl"}, {"neg_tensor", "neg_tensor.cl"}, - {"permute_generic", "permute_ex.cl"}, - {"pixelwise_mul_qasymm8", "pixelwise_mul_quantized.cl"}, - {"prelu", "prelu.cl"}, - {"prelu_qasymm8", "prelu_quantized.cl"}, {"quantization_symm8", "quantization_symm8.cl"}, {"reduce_min_max", "reduce_operation.cl"}, {"reduce_sum_mean", "reduce_operation.cl"}, @@ -91,29 +78,15 @@ const std::map CLKernelLibraryEx::_kernel_program_map {"radixsort_reorder", "topkv2_radixsort.cl"}, {"topkv2_quicksort", "topkv2_quicksort.cl"}, {"scale_factor_symm8", "scale_factor.cl"}, - {"space_to_depth_nchw", "space_to_depth.cl"}, - {"space_to_depth_nhwc", "space_to_depth.cl"}, }; const std::map CLKernelLibraryEx::_program_source_map = { #ifdef EMBEDDED_KERNELS { - "arg_operation.cl", -#include "./cl_kernels/arg_operation.clembed" - }, - { - "cast.cl", -#include "./cl_kernels/cast.clembed" - }, - { "embedding_lookup.cl", #include "./cl_kernels/embedding_lookup.clembed" }, { - "depth_to_space.cl", -#include "./cl_kernels/depth_to_space.clembed" - }, - { "gather_ex.cl", #include "./cl_kernels/gather_ex.clembed" }, @@ -150,14 +123,6 @@ const std::map CLKernelLibraryEx::_program_source_map #include "./cl_kernels/neg_tensor.clembed" }, { - "prelu.cl", -#include "./cl_kernels/prelu.clembed" - }, - { - "prelu_quantized.cl", -#include "./cl_kernels/prelu_quantized.clembed" - }, - { "quantization_symm8.cl", #include "./cl_kernels/quantization_symm8.clembed" }, @@ -170,10 +135,6 @@ const std::map CLKernelLibraryEx::_program_source_map #include "./cl_kernels/scale_factor.clembed" }, { - "space_to_depth.cl", -#include "./cl_kernels/space_to_depth.clembed" - }, - { "topkv2.cl", #include "./cl_kernels/topkv2.clembed" }, diff --git a/compute/ARMComputeEx/src/core/CL/cl_kernels/arg_operation.cl b/compute/ARMComputeEx/src/core/CL/cl_kernels/arg_operation.cl deleted file mode 100644 index 03717cf..0000000 --- a/compute/ARMComputeEx/src/core/CL/cl_kernels/arg_operation.cl +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "helpers.h" - -#if defined(DATA_TYPE) && defined(DEPTH_OUT) && defined(OP_CODE) -/** Perform arg_max/arg_min - * - * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. - * e.g. -DDATA_TYPE=short - * @attention Output tensor depth should be given as a preprocessor argument using -DDEPTH_OUT=size. - * e.g. -DDEPTH_OUT=16 - * @attention Operation type(code) specifying which operation to perform should be passed as - * preprocessor argument using -DOP_CODE = number. e.g. -DOP_CODE=1 - * - * @param[in] input_ptr Pointer to the source image. Supported data - * types: - * U8/QASYMM8/S8/U16/S16/F16/U32/S32/F32 - * @param[in] input_stride_x Stride of the source image in X dimension - * (in bytes) - * @param[in] input_step_x input_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] input_stride_y Stride of the source image in Y dimension - * (in bytes) - * @param[in] input_step_y input_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] input_stride_z Stride of the source tensor in Z dimension - * (in bytes) - * @param[in] input_step_z input_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] input_offset_first_element_in_bytes The offset of the first element - * in the source image - * @param[in] input_stride_w Stride of the source tensor in W dimension - * (in bytes) - * @param[in] input_step_w output_stride_w * number of elements along W - * processed per workitem(in bytes) - * @param[out] output_ptr Pointer to the destination image. - * Supported data types: U32 - * @param[in] output_stride_x Stride of the destination image in X dimension - * (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination image in Y dimension - * (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the source tensor in Z dimension - * (in bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] output_stride_w Stride of the source tensor in W dimension - * (in bytes) - * @param[in] output_step_w output_stride_w * number of elements along W - * processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the - * destination image - * @param[in] axis Axis through which reduction occurs - * @param[in] dim Dimension across the axis to be reduced. - */ - -__kernel void arg_op(TENSOR4D_DECLARATION(input), TENSOR4D_DECLARATION(output), const int axis, - const int dim) -{ - Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT(input, 0); - Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT(output, DEPTH_OUT); - - int indices[4] = { - get_global_id(0), get_global_id(1), get_global_id(2) % DEPTH_OUT, - get_global_id(2) / DEPTH_OUT, - }; - - DATA_TYPE value = - *((__global DATA_TYPE *)tensor4D_offset(&in, indices[0], indices[1], indices[2], indices[3])); - DATA_TYPE tval = value; - int idx = 0; - for (int i = 1; i < dim; ++i) - { - indices[axis] = i; - -#if OP_CODE == 1 // ArgMax - value = max(value, *((__global DATA_TYPE *)tensor4D_offset(&in, indices[0], indices[1], - indices[2], indices[3]))); -#elif OP_CODE == 2 // ArgMin - value = min(value, *((__global DATA_TYPE *)tensor4D_offset(&in, indices[0], indices[1], - indices[2], indices[3]))); -#else - return; - -#endif - - if (tval != value) - { - idx = indices[axis]; - tval = value; - } - } - - *((__global uint *)out.ptr) = idx; -} -#endif // defined(DATA_TYPE) && defined(DEPTH_OUT) && defined(OP_CODE) diff --git a/compute/ARMComputeEx/src/core/CL/cl_kernels/arithmetic_op_quantized.cl b/compute/ARMComputeEx/src/core/CL/cl_kernels/arithmetic_op_quantized.cl deleted file mode 100644 index f74c1c1..0000000 --- a/compute/ARMComputeEx/src/core/CL/cl_kernels/arithmetic_op_quantized.cl +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016, 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "helpers_asymm.h" - -#ifdef SATURATE -#define ADD(x, y) add_sat((x), (y)) -#define SUB(x, y) sub_sat((x), (y)) -#else /* SATURATE */ -#define ADD(x, y) (x) + (y) -#define SUB(x, y) (x) - (y) -#endif /* SATURATE */ - -/** Performs a pixelwise addition used to quantize down the int32 accumulator values of GEMMLowp to - * QASYMM8 - * - * The following computations will be performed: - * - * -# Add offset terms to inputs - -# Get scaled value of two inputs - * -# Add inputs - * -# Add offset terms to final result - * -# Multiply each entry of result by result_mult_int - * -# Shift the int32 accumulator by result_shift - * -# Clamp the resulting int32 values to the [0..255] range and cast to QASYMM8. - * - * @attention The inputs and output data types need to be passed at compile time using - * -DDATA_TYPE_IN1, -DDATA_TYPE_IN2 and -DDATA_TYPE_OUT: - * e.g. -DDATA_TYPE_IN1=uchar -DDATA_TYPE_IN2=uchar -DDATA_TYPE_OUT=uchar - * @attention The number of bits to shift left of input tensors must be passed at compile time using - * -DLEFT_SHIFT - * @attention The offset, scalar scale factor and number of bits to shift right of input tensors - * must be passed at compile time using -DIN1_OFFSET, -RIN1_MULT_INT, -DIN1_SHIFT, - -DIN2_OFFSET, - * -RIN2_MULT_INT and -DIN2_SHIFT - * @attention The offset, scalar scale factor and number of bits to shift right of output tensor - * must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULT_INT and - -DRESULT_SHIFT - * - * @attention The input and output data_types need to be passed at compile time using - * -DDATA_TYPE_IN1, -DDATA_TYPE_IN2 and -DDATA_TYPE_OUT: - * e.g. -DDATA_TYPE_IN1=uchar -DDATA_TYPE_IN2=uchar -DDATA_TYPE_OUT=uchar - * @attention The inputs and output scale information of qasymm8 need to be passed at compile time - * using -DSCALE_IN1, -DSCALE_IN2 and -DSCALE_OUT: - * e.g. -DSCALE_IN1=1.f -DSCALE_IN2=1.f -DSCALE_OUT=2.f - * @attention The inputs and output scale offset need to be passed at compile time using - * -DOFFSET_IN1, -DOFFSET_IN2 and -DOFFSET_OUT: - * e.g. -DOFFSET_IN1=0 -DOFFSET_IN2=0 -DOFFSET_OUT=0 - * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. - * -DVEC_SIZE=16 - * @attention To perform saturating operation -DSATURATE has to be passed to the compiler otherwise - * wrapping policy will be used. - * - * @param[in] in1_ptr Pointer to the source tensor. - * Supported data types: QASYMM8 - * @param[in] in1_stride_x Stride of the source tensor in X dimension - * (in bytes) - * @param[in] in1_step_x in1_stride_x * number of elements along X processed - * per workitem(in bytes) - * @param[in] in1_stride_y Stride of the source tensor in Y dimension - * (in bytes) - * @param[in] in1_step_y in1_stride_y * number of elements along Y processed - * per workitem(in bytes) - * @param[in] in1_stride_z Stride of the source tensor in Z dimension - * (in bytes) - * @param[in] in1_step_z in1_stride_z * number of elements along Z processed - * per workitem(in bytes) - * @param[in] in1_offset_first_element_in_bytes The offset of the first element in the source - * tensor - * @param[in] in2_ptr Pointer to the source tensor. Supported data types: - * QASYMM8 - * @param[in] in2_stride_x Stride of the source tensor in X dimension - * (in bytes) - * @param[in] in2_step_x in2_stride_x * number of elements along X processed - * per workitem(in bytes) - * @param[in] in2_stride_y Stride of the source tensor in Y dimension - * (in bytes) - * @param[in] in2_step_y in2_stride_y * number of elements along Y processed - * per workitem(in bytes) - * @param[in] in2_stride_z Stride of the source tensor in Z dimension - * (in bytes) - * @param[in] in2_step_z in2_stride_z * number of elements along Z processed - * per workitem(in bytes) - * @param[in] in2_offset_first_element_in_bytes The offset of the first element in the source - * tensor - * @param[out] out_ptr Pointer to the destination tensor. - * Supported data types: QASYMM8 - * @param[in] out_stride_x Stride of the destination tensor in X dimension - * (in bytes) - * @param[in] out_step_x out_stride_x * number of elements along X processed - * per workitem(in bytes) - * @param[in] out_stride_y Stride of the destination tensor in Y dimension - * (in bytes) - * @param[in] out_step_y out_stride_y * number of elements along Y processed - * per workitem(in bytes) - * @param[in] out_stride_z Stride of the source tensor in Z dimension - * (in bytes) - * @param[in] out_step_z out_stride_z * number of elements along Z processed - * per workitem(in bytes) - * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination - * tensor - */ -__kernel void arithmetic_add_qasymm8(TENSOR3D_DECLARATION(in1), TENSOR3D_DECLARATION(in2), - TENSOR3D_DECLARATION(out)) -{ - // Get pixels pointer - Tensor3D in1 = CONVERT_TO_TENSOR3D_STRUCT(in1); - Tensor3D in2 = CONVERT_TO_TENSOR3D_STRUCT(in2); - Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out); - - // Load data - VEC_DATA_TYPE(int, 16) - in1_data = CONVERT(vload16(0, (__global DATA_TYPE_IN1 *)in1.ptr), VEC_DATA_TYPE(int, 16)); - VEC_DATA_TYPE(int, 16) - in2_data = CONVERT(vload16(0, (__global DATA_TYPE_IN2 *)in2.ptr), VEC_DATA_TYPE(int, 16)); - - // Get scaled value of two inputs - VEC_DATA_TYPE(int, 16) in1_val = in1_data + (VEC_DATA_TYPE(int, 16))(IN1_OFFSET); - VEC_DATA_TYPE(int, 16) in2_val = in2_data + (VEC_DATA_TYPE(int, 16))(IN2_OFFSET); - - VEC_DATA_TYPE(int, 16) - left_shift = (VEC_DATA_TYPE(int, 16))1 << (VEC_DATA_TYPE(int, 16))(LEFT_SHIFT); - VEC_DATA_TYPE(int, 16) shifted_in1_val = in1_val * left_shift; - VEC_DATA_TYPE(int, 16) shifted_in2_val = in2_val * left_shift; - - VEC_DATA_TYPE(int, 16) - scaled_in1_val = - ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(shifted_in1_val, IN1_MULT_INT, IN1_SHIFT, 16); - VEC_DATA_TYPE(int, 16) - scaled_in2_val = - ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(shifted_in2_val, IN2_MULT_INT, IN2_SHIFT, 16); - - // Add inputs and multiply with a multiplier smaller than 1 - VEC_DATA_TYPE(int, 16) sum_val = scaled_in1_val + scaled_in2_val; - VEC_DATA_TYPE(int, 16) - out_val = - ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(sum_val, RESULT_MULT_INT, RESULT_SHIFT, 16); - out_val += (VEC_DATA_TYPE(int, 16))(RESULT_OFFSET); - - VEC_DATA_TYPE(uchar, 16) res = CONVERT(out_val, VEC_DATA_TYPE(uchar, 16)); - - // TODO: Apply min-max BOUND to support fuse with relu. - /* - #if defined(MIN_BOUND) - res = max(res, (uchar16)MIN_BOUND); - #endif // defined(MIN_BOUND) - #if defined(MAX_BOUND) - res = min(res, (uchar16)MAX_BOUND); - #endif // defined(MAX_BOUND) - */ - - // Store result - VSTORE(16)(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE_OUT, 16)), 0, (__global DATA_TYPE_OUT *)out.ptr); -} diff --git a/compute/ARMComputeEx/src/core/CL/cl_kernels/cast.cl b/compute/ARMComputeEx/src/core/CL/cl_kernels/cast.cl deleted file mode 100644 index 4147a00..0000000 --- a/compute/ARMComputeEx/src/core/CL/cl_kernels/cast.cl +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "helpers.h" - -#ifndef SCALE -#define SCALE 1.0f -#endif -#ifndef OFFSET -#define OFFSET 0 -#endif -#ifndef VEC_SIZE -#define VEC_SIZE 1 -#endif - -#if defined(DATA_TYPE_IN) && defined(DATA_TYPE_OUT) -/** Perform a cast operation on an input tensor. - * - * @attention Data types of both input and output can be passed using the -DDATA_TYPE_IN and - * -DDATA_TYPE_OUT compile flag, e.g. -DDATA_TYPE_IN=float, -DDATA_TYPE_OUT=int - * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. - * -DVEC_SIZE=16 - * @attention -DBOOL_INPUT : Whether type of input is bool. - * - * @param[in] input_ptr Pointer to the source image. Supported data - * types: F16/F32 - * @param[in] input_stride_x Stride of the source image in X dimension (in - * bytes) - * @param[in] input_step_x input_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] input_stride_y Stride of the source image in Y dimension (in - * bytes) - * @param[in] input_step_y input_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] input_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] input_step_z input_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source - * image - * @param[out] output_ptr Pointer to the destination image. Supported data - * types: same as @p input_ptr - * @param[in] output_stride_x Stride of the destination image in X dimension - * (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination image in Y dimension - * (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the - * destination image - */ -__kernel void cast(TENSOR3D_DECLARATION(input), TENSOR3D_DECLARATION(output)) -{ - Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); - Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); - - VSTORE(VEC_SIZE) - (CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input.ptr), - VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)), - 0, (__global DATA_TYPE_OUT *)output.ptr); - VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE) - res = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input.ptr), - VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)); -#if defined(BOOL_INPUT) - VEC_DATA_TYPE(char, VEC_SIZE) tmp = CONVERT(res, VEC_DATA_TYPE(char, VEC_SIZE)); - VEC_DATA_TYPE(char, VEC_SIZE) mask = (VEC_DATA_TYPE(char, VEC_SIZE))(1); - res = CONVERT(tmp & mask, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)); -#endif // defined(BOOL_INPUT) - - VSTORE(VEC_SIZE)(res, 0, (__global DATA_TYPE_OUT *)output.ptr); -} - -/** Perform a cast operation on an QASYMM8 input tensor. - * @attention Data types of both input and output can be passed using the -DDATA_TYPE_IN and - * -DDATA_TYPE_OUT compile flag, e.g. -DDATA_TYPE_IN=float, -DDATA_TYPE_OUT=int - * @attention Offset and Scale of input should be given as a preprocessor argument using - * -DOFFSET=int, -DSCALE=float. e.g. -DOFFSET=1, -DSCALE=0.5 - * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. - * -DVEC_SIZE=16 - * - * @param[in] input_ptr Pointer to the source image. Supported data - * types: F16/F32 - * @param[in] input_stride_x Stride of the source image in X dimension (in - * bytes) - * @param[in] input_step_x input_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] input_stride_y Stride of the source image in Y dimension (in - * bytes) - * @param[in] input_step_y input_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] input_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] input_step_z input_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source - * image - * @param[out] output_ptr Pointer to the destination image. Supported data - * types: same as @p input_ptr - * @param[in] output_stride_x Stride of the destination image in X dimension - * (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination image in Y dimension - * (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the - * destination image - */ -__kernel void cast_qasymm_in(TENSOR3D_DECLARATION(input), TENSOR3D_DECLARATION(output)) -{ - Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); - Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); - - VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE) - in_data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input.ptr); - VEC_DATA_TYPE(int, VEC_SIZE) offset = (VEC_DATA_TYPE(int, VEC_SIZE))(OFFSET); - VEC_DATA_TYPE(float, VEC_SIZE) scale = (VEC_DATA_TYPE(float, VEC_SIZE))(SCALE); - - VEC_DATA_TYPE(int, VEC_SIZE) tmp = CONVERT(in_data, VEC_DATA_TYPE(int, VEC_SIZE)) - offset; - VEC_DATA_TYPE(float, VEC_SIZE) out_data = CONVERT(tmp, VEC_DATA_TYPE(float, VEC_SIZE)) * scale; - - VSTORE(VEC_SIZE) - (CONVERT(out_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)), 0, - (__global DATA_TYPE_OUT *)output.ptr); -} - -/** Perform a cast operation on an QASYMM8 output tensor. - * @attention Data types of both input and output can be passed using the -DDATA_TYPE_IN and - * -DDATA_TYPE_OUT compile flag, e.g. -DDATA_TYPE_IN=float, -DDATA_TYPE_OUT=int - * @attention Offset and Scale of output should be given as a preprocessor argument using - * -DOFFSET=int, -DSCALE=float. e.g. -DOFFSET=1, -DSCALE=0.5 - * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. - * -DVEC_SIZE=16 - * - * @param[in] input_ptr Pointer to the source image. Supported data - * types: F16/F32 - * @param[in] input_stride_x Stride of the source image in X dimension (in - * bytes) - * @param[in] input_step_x input_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] input_stride_y Stride of the source image in Y dimension (in - * bytes) - * @param[in] input_step_y input_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] input_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] input_step_z input_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source - * image - * @param[out] output_ptr Pointer to the destination image. Supported data - * types: U8 - * @param[in] output_stride_x Stride of the destination image in X dimension - * (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination image in Y dimension - * (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the - * destination image - */ -__kernel void cast_qasymm_out(TENSOR3D_DECLARATION(input), TENSOR3D_DECLARATION(output)) -{ - Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); - Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); - - VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE) - in_data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input.ptr); - VEC_DATA_TYPE(int, VEC_SIZE) offset = (VEC_DATA_TYPE(int, VEC_SIZE))(OFFSET); - VEC_DATA_TYPE(float, VEC_SIZE) scale = (VEC_DATA_TYPE(float, VEC_SIZE))(SCALE); - - VEC_DATA_TYPE(float, VEC_SIZE) tmp = CONVERT(in_data, VEC_DATA_TYPE(float, VEC_SIZE)) / scale; - VEC_DATA_TYPE(float, VEC_SIZE) out_data = tmp + CONVERT(offset, VEC_DATA_TYPE(float, VEC_SIZE)); - - VSTORE(VEC_SIZE) - (CONVERT(out_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)), 0, - (__global DATA_TYPE_OUT *)output.ptr); -} -#endif // defined(DATA_TYPE_IN) && defined(DATA_TYPE_OUT) diff --git a/compute/ARMComputeEx/src/core/CL/cl_kernels/depth_to_space.cl b/compute/ARMComputeEx/src/core/CL/cl_kernels/depth_to_space.cl deleted file mode 100644 index 0285c95..0000000 --- a/compute/ARMComputeEx/src/core/CL/cl_kernels/depth_to_space.cl +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016, 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "helpers.h" - -#if defined(DATA_TYPE) && defined(DEPTH_OUT) && defined(BLOCK_SIZE) && defined(Z_OUT) -/** Perform space to depth rearrangement of tensor - * - * @attention Data type can be passed using the -DDATA_TYPE compile flag, e.g. -DDATA_TYPE=float - * @attention Input tensor depth should be given as a preprocessor argument using -DDEPTH_OUT=size. - * e.g. -DDEPTH_OUT=16 - * @attention The value of the z-axis of output tensor should be given as a preprocessor argument - * using -DZ_OUT=size. e.g. -DZ_OUT=16 - * @attention block size should be given as a preprocessor argument using -DBLOCK_SIZE=size. e.g. - * -DBLOCK_SIZE=1 - * - * @param[in] input_ptr Pointer to the source image. Supported data - * types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 - * @param[in] input_stride_x Stride of the source image in X dimension (in - * bytes) - * @param[in] input_step_x input_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] input_stride_y Stride of the source image in Y dimension (in - * bytes) - * @param[in] input_step_y input_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] input_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] input_step_z input_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source - * image - * @param[out] output_ptr Pointer to the destination image. Supported data - * types: same as @p input_ptr - * @param[in] output_stride_x Stride of the destination image in X dimension - * (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination image in Y dimension - * (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] output_stride_w Stride of the source tensor in W dimension (in - * bytes) - * @param[in] output_step_w output_stride_w * number of elements along W - * processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the - * destination image - */ -__kernel void depth_to_space_nchw(TENSOR4D_DECLARATION(input), TENSOR4D_DECLARATION(output)) -{ - Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0); - Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT(output, Z_OUT); - - int out_index[4] = {0}; - int in_index[4] = {0}; - - out_index[0] = get_global_id(0); // W - out_index[1] = get_global_id(1); // H - out_index[2] = get_global_id(2) % Z_OUT; // C - out_index[3] = get_global_id(2) / Z_OUT; // B - - in_index[0] = out_index[0] / BLOCK_SIZE; - in_index[1] = out_index[1] / BLOCK_SIZE; - in_index[2] = out_index[2] + - ((out_index[1] % BLOCK_SIZE) * BLOCK_SIZE + out_index[0] % BLOCK_SIZE) * DEPTH_OUT; - in_index[3] = out_index[3]; - - *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset( - &in, in_index[0], in_index[1], in_index[2], in_index[3])); -} -#endif // defined(DATA_TYPE) && defined(DEPTH_OUT) && defined(BLOCK_SIZE) && defined(Z_OUT) - -#if defined(DATA_TYPE) && defined(DEPTH_OUT) && defined(BLOCK_SIZE) && defined(Z_OUT) -/** Perform space to depth rearrangement of tensor (NHWC) - * - * @attention Data type can be passed using the -DDATA_TYPE compile flag, e.g. -DDATA_TYPE=float - * @attention Output tensor depth should be given as a preprocessor argument using -DDEPTH_OUT=size. - * e.g. -DDEPTH_OUT=16 - * @attention The value of the z-axis of output tensor should be given as a preprocessor argument - * using -DZ_OUT=size. e.g. -DZ_OUT=16 - * @attention block size should be given as a preprocessor argument using -DBLOCK_SIZE=size. e.g. - * -DBLOCK_SIZE=1 - * - * @param[in] input_ptr Pointer to the source image. Supported data - * types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 - * @param[in] input_stride_x Stride of the source image in X dimension (in - * bytes) - * @param[in] input_step_x input_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] input_stride_y Stride of the source image in Y dimension (in - * bytes) - * @param[in] input_step_y input_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] input_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] input_step_z input_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source - * image - * @param[out] output_ptr Pointer to the destination image. Supported data - * types: same as @p input_ptr - * @param[in] output_stride_x Stride of the destination image in X dimension - * (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination image in Y dimension - * (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] output_stride_w Stride of the source tensor in W dimension (in - * bytes) - * @param[in] output_step_w output_stride_w * number of elements along W - * processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the - * destination image - */ -__kernel void depth_to_space_nhwc(TENSOR4D_DECLARATION(input), TENSOR4D_DECLARATION(output)) -{ - Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0); - Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT(output, Z_OUT); - - int out_index[4] = {0}; - int in_index[4] = {0}; - - out_index[0] = get_global_id(0); // C - out_index[1] = get_global_id(1); // W - out_index[2] = get_global_id(2) % Z_OUT; // H - out_index[3] = get_global_id(2) / Z_OUT; // B - - in_index[0] = out_index[0] + - ((out_index[2] % BLOCK_SIZE) * BLOCK_SIZE + out_index[1] % BLOCK_SIZE) * DEPTH_OUT; - in_index[1] = out_index[1] / BLOCK_SIZE; - in_index[2] = out_index[2] / BLOCK_SIZE; - in_index[3] = out_index[3]; - - *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset( - &in, in_index[0], in_index[1], in_index[2], in_index[3])); -} -#endif // defined(DATA_TYPE) && defined(DEPTH_OUT) && defined(BLOCK_SIZE) && defined(Z_OUT) diff --git a/compute/ARMComputeEx/src/core/CL/cl_kernels/helpers.h b/compute/ARMComputeEx/src/core/CL/cl_kernels/helpers.h index 2d0b6a2..e07a25e 100644 --- a/compute/ARMComputeEx/src/core/CL/cl_kernels/helpers.h +++ b/compute/ARMComputeEx/src/core/CL/cl_kernels/helpers.h @@ -15,7 +15,7 @@ */ /* - * Copyright (c) 2016-2018 ARM Limited. + * Copyright (c) 2016-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,7 +37,6 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ - #ifndef ARM_COMPUTE_HELPER_H #define ARM_COMPUTE_HELPER_H @@ -59,16 +58,219 @@ #pragma OPENCL EXTENSION cl_arm_printf : enable #endif // defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) +#define GPU_ARCH_MIDGARD 0x100 +#define GPU_ARCH_BIFROST 0x200 + +/** Concatenate two inputs. + * + * @param[in] a The first input to be concatenated + * @param[in] b The second input to be concatenated + * + * @return The concatenated output + */ +#define CONCAT(a, b) a##b + +/** Expand the given vector + * + * @param[in] x The vector to be expanded + * + * @return The expanded output + */ #define EXPAND(x) x +/** Clamp the given value between an upper and lower bound. + * + * @param[in] x The value to be clamped + * @param[in] min_val The lower bound + * @param[in] max_val The upper bound + * + * @return The clamped value. + */ #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) +/** REVn reverses the given vector whose size is n. + * @name REVn + * + * @param[in] x The vector to be reversed + * + * @return The reversed vector + * @{ + */ +#define REV1(x) ((x)) +#define REV2(x) ((x).s10) +#define REV3(x) ((x).s210) +#define REV4(x) ((x).s3210) +#define REV8(x) ((x).s76543210) +#define REV16(x) ((x).sFEDCBA9876543210) +/** @} */ // end of group REVn + +/** Reverse the given vector. + * @name REVERSE + * + * @param[in] x The vector to be reversed + * @param[in] s The size of the vector + * + * @return The reversed vector + * @{ + */ +#define REVERSE_STR(x, s) REV##s((x)) +#define REVERSE(x, s) REVERSE_STR(x, s) +/** @} */ // end of group REVERSE + +/** Circular-right-shift (rotate-right) the vector of size s by the amount of n. + * @name ROTs_n + * + * @param[in] x The vector to be shifted + * + * @return The shifted vector + * @{ + */ +#define ROT1_0(x) ((x)) + +#define ROT2_0(x) ((x)) +#define ROT2_1(x) ((x).s10) + +#define ROT3_0(x) ((x)) +#define ROT3_1(x) ((x).s201) +#define ROT3_2(x) ((x).s120) + +#define ROT4_0(x) ((x)) +#define ROT4_1(x) ((x).s3012) +#define ROT4_2(x) ((x).s2301) +#define ROT4_3(x) ((x).s1230) + +#define ROT8_0(x) ((x)) +#define ROT8_1(x) ((x).s70123456) +#define ROT8_2(x) ((x).s67012345) +#define ROT8_3(x) ((x).s56701234) +#define ROT8_4(x) ((x).s45670123) +#define ROT8_5(x) ((x).s34567012) +#define ROT8_6(x) ((x).s23456701) +#define ROT8_7(x) ((x).s12345670) + +#define ROT16_0(x) ((x)) +#define ROT16_1(x) ((x).sF0123456789ABCDE) +#define ROT16_2(x) ((x).sEF0123456789ABCD) +#define ROT16_3(x) ((x).sDEF0123456789ABC) +#define ROT16_4(x) ((x).sCDEF0123456789AB) +#define ROT16_5(x) ((x).sBCDEF0123456789A) +#define ROT16_6(x) ((x).sABCDEF0123456789) +#define ROT16_7(x) ((x).s9ABCDEF012345678) +#define ROT16_8(x) ((x).s89ABCDEF01234567) +#define ROT16_9(x) ((x).s789ABCDEF0123456) +#define ROT16_10(x) ((x).s6789ABCDEF012345) +#define ROT16_11(x) ((x).s56789ABCDEF01234) +#define ROT16_12(x) ((x).s456789ABCDEF0123) +#define ROT16_13(x) ((x).s3456789ABCDEF012) +#define ROT16_14(x) ((x).s23456789ABCDEF01) +#define ROT16_15(x) ((x).s123456789ABCDEF0) +/** @} */ // end of group ROTs_n + +/** Circular-right-shift (rotate-right) the given vector by the given amount. + * @name ROTATE + * + * @param[in] x The vector to be shifted + * @param[in] s The size of the vector + * @param[in] n The amount to be shifted + * + * @return The shifted vector + * @{ + */ +#define ROTATE_STR(x, s, n) ROT##s##_##n(x) +#define ROTATE(x, s, n) ROTATE_STR(x, s, n) +/** @} */ // end of group ROTATE + +/** Creates a vector of size n filled with offset values corresponding to the location of each + * element. + * @name V_OFFSn + * + * @param[in] dt The data type of the output vector + * + * @return The vector filled with offset values + * @{ + */ +#define V_OFFS1(dt) (dt)(0) +#define V_OFFS2(dt) (dt)(0, 1) +#define V_OFFS3(dt) (dt)(0, 1, 3) +#define V_OFFS4(dt) (dt)(0, 1, 2, 3) +#define V_OFFS8(dt) (dt)(0, 1, 2, 3, 4, 5, 6, 7) +#define V_OFFS16(dt) (dt)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) +/** @} */ // end of group V_OFFSn + +/** Create a vector filled with offset values corresponding to the location of each element. + * @name VEC_OFFS + * + * @param[in] dt The data type of the output vector + * @param[in] s The size of the output vector + * + * @return The vector filled with offset values + * @{ + */ +#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt) +#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s) +/** @} */ // end of group VEC_OFFS + #define VLOAD_STR(size) vload##size #define VLOAD(size) VLOAD_STR(size) #define VSTORE_STR(size) vstore##size #define VSTORE(size) VSTORE_STR(size) +#define float1 float +#define half1 half +#define char1 char +#define uchar1 uchar +#define short1 short +#define ushort1 ushort +#define int1 int +#define uint1 uint +#define long1 long +#define ulong1 ulong +#define double1 double + +#define vload1(OFFSET, PTR) *(OFFSET + PTR) +#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA + +// Convert built-in functions with _sat modifier are not supported in floating point so we create +// defines +// without _sat to overcome this issue +#define convert_float_sat convert_float +#define convert_float1_sat convert_float +#define convert_float2_sat convert_float2 +#define convert_float3_sat convert_float3 +#define convert_float4_sat convert_float4 +#define convert_float8_sat convert_float8 +#define convert_float16_sat convert_float16 +#define convert_half_sat convert_float +#define convert_half1_sat convert_half +#define convert_half2_sat convert_half2 +#define convert_half3_sat convert_half3 +#define convert_half4_sat convert_half4 +#define convert_half8_sat convert_half8 +#define convert_half16_sat convert_half16 + +#define convert_float1 convert_float +#define convert_half1 convert_half +#define convert_char1 convert_char +#define convert_uchar1 convert_uchar +#define convert_short1 convert_short +#define convert_ushort1 convert_ushort +#define convert_int1 convert_int +#define convert_uint1 convert_uint +#define convert_long1 convert_long +#define convert_ulong1 convert_ulong +#define convert_double1 convert_double + +#define convert_char1_sat convert_char_sat +#define convert_uchar1_sat convert_uchar_sat +#define convert_short1_sat convert_short_sat +#define convert_ushort1_sat convert_ushort_sat +#define convert_int1_sat convert_int_sat +#define convert_uint1_sat convert_uint_sat +#define convert_long1_sat convert_long_sat +#define convert_ulong1_sat convert_ulong_sat +#define convert_double1_sat convert_double_sat + #define VEC_DATA_TYPE_STR(type, size) type##size #define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) diff --git a/compute/ARMComputeEx/src/core/CL/cl_kernels/helpers_asymm.h b/compute/ARMComputeEx/src/core/CL/cl_kernels/helpers_asymm.h index a83b1a8..5f1b3f9 100644 --- a/compute/ARMComputeEx/src/core/CL/cl_kernels/helpers_asymm.h +++ b/compute/ARMComputeEx/src/core/CL/cl_kernels/helpers_asymm.h @@ -15,7 +15,7 @@ */ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,29 +37,112 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ - #ifndef ARM_COMPUTE_HELPERS_ASYMM_H #define ARM_COMPUTE_HELPERS_ASYMM_H #include "helpers.h" +/** Convert the given vector with round to nearest even rounding mode + * + * @param[in] x The target to be converted + * @param[in] type The target type + * + * @return The converted vector + */ +#define CONVERT_DOWN_RTE_STR(x, type) (convert_##type##_rte((x))) +#define CONVERT_DOWN_RTE(x, type) CONVERT_DOWN_RTE_STR(x, type) + +/** Quantize a floating-point scalar value to 8-bit asymmetric + * + * @param[in] input Input value to quantize + * @param[in] offset Quantization offset + * @param[in] scale Quantization scale + * + * @return quantized value + */ +inline uchar quantize_qasymm8(float input, float offset, float scale) +{ + float out_f32 = input / scale + offset; + uchar res_u8 = CONVERT_SAT(CONVERT_DOWN_RTE(out_f32, int), uchar); + return res_u8; +} + +/** Dequantize a scalar value from 8-bit asymmetric to floating-point + * + * @param[in] input Input value to quantize + * @param[in] offset Quantization offset + * @param[in] scale Quantization scale + * + * @return quantized value + */ +inline float dequantize_qasymm8(uchar input, float offset, float scale) +{ + return ((float)input - offset) * scale; +} + +/** Dequantize a scalar value from signed 8-bit asymmetric to floating-point + * + * @param[in] input Input value to quantize + * @param[in] offset Quantization offset + * @param[in] scale Quantization scale + * + * @return quantized value + */ +inline float dequantize_qasymm8_signed(char input, float offset, float scale) +{ + return ((float)input - offset) * scale; +} + +/** Quantize a vector of values from floating-point + * + * @param[in] type Output data type. + * @param[in] size Size of vector. + * + * @return quantized values + */ +#define QUANTIZE_IMPL(type, size) \ + inline VEC_DATA_TYPE(type, size) \ + quantize_##type##size(VEC_DATA_TYPE(float, size) input, float offset, float scale) \ + { \ + VEC_DATA_TYPE(float, size) \ + out_f32 = input / (VEC_DATA_TYPE(float, size))(scale) + (VEC_DATA_TYPE(float, size))(offset); \ + VEC_DATA_TYPE(type, size) \ + res = CONVERT_SAT(CONVERT_DOWN_RTE(out_f32, VEC_DATA_TYPE(int, size)), \ + VEC_DATA_TYPE(type, size)); \ + return res; \ + } + +/** Dequantize a vector of values to floating-point + * + * @param[in] type Input data type. + * @param[in] size Size of vector. + * + * @return dequantized values in floating point + */ +#define DEQUANTIZE_IMPL(type, size) \ + inline VEC_DATA_TYPE(float, size) \ + dequantize_##type##size(VEC_DATA_TYPE(type, size) input, float offset, float scale) \ + { \ + return (CONVERT(input, VEC_DATA_TYPE(float, size)) - offset) * scale; \ + } + /** Correctly-rounded-to-nearest division by a power-of-two. * * @param[in] size Size of vector. * * @return Correctly-rounded-to-nearest division by a power-of-two. */ -#define ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(size) \ - inline VEC_DATA_TYPE(int, size) \ - asymm_rounding_divide_by_POW2_##size(VEC_DATA_TYPE(int, size) x, int exponent) \ - { \ - VEC_DATA_TYPE(int, size) \ - mask = (1 << exponent) - 1; \ - const VEC_DATA_TYPE(int, size) zero = 0; \ - const VEC_DATA_TYPE(int, size) one = 1; \ - VEC_DATA_TYPE(int, size) \ - threshold = (mask >> 1) + select(zero, one, x < 0); \ - return (x >> exponent) + select(zero, one, (x & mask) > threshold); \ +#define ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(size) \ + inline VEC_DATA_TYPE(int, size) asymm_rounding_divide_by_POW2_##size( \ + VEC_DATA_TYPE(int, size) x, VEC_DATA_TYPE(int, size) exponent) \ + { \ + const VEC_DATA_TYPE(int, size) zero = (VEC_DATA_TYPE(int, size))0; \ + const VEC_DATA_TYPE(int, size) one = (VEC_DATA_TYPE(int, size))1; \ + VEC_DATA_TYPE(int, size) \ + mask = (one << exponent) - one; \ + VEC_DATA_TYPE(int, size) \ + threshold = (mask >> 1) + select(zero, one, x < 0); \ + return (x >> exponent) + select(zero, one, (x & mask) > threshold); \ } /** Product of two numbers, interpreting them as fixed-point values in the interval [-1, 1), @@ -81,9 +164,19 @@ b_64 = convert_long##size(b); \ VEC_DATA_TYPE(long, size) \ ab_64 = a_64 * b_64; \ - /* COMPMID-907 */ \ + /* Revert COMPMID-907 */ \ + VEC_DATA_TYPE(long, size) \ + mask1 = 1 << 30; \ + VEC_DATA_TYPE(long, size) \ + mask2 = 1 - (1 << 30); \ + VEC_DATA_TYPE(long, size) \ + is_positive_or_zero = ab_64 >= 0; \ + VEC_DATA_TYPE(long, size) \ + nudge = select(mask2, mask1, is_positive_or_zero); \ + VEC_DATA_TYPE(long, size) \ + mask = 1ll << 31; \ VEC_DATA_TYPE(int, size) \ - ab_x2_high32 = convert_int##size(((ab_64 + (1 << 30)) >> 31)); \ + ab_x2_high32 = convert_int##size((ab_64 + nudge) / mask); \ return select(ab_x2_high32, INT_MAX, overflow); \ } @@ -335,9 +428,18 @@ return ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(value, exponent, size); \ } +#define QUANTIZE_STR(input, offset, scale, type, size) quantize_##type##size(input, offset, scale) +#define QUANTIZE(input, offset, scale, type, size) QUANTIZE_STR(input, offset, scale, type, size) +#define DEQUANTIZE_STR(input, offset, scale, type, size) \ + dequantize_##type##size(input, offset, scale) +#define DEQUANTIZE(input, offset, scale, type, size) \ + DEQUANTIZE_STR(input, offset, scale, type, size) + #define ASYMM_ROUNDING_DIVIDE_BY_POW2(x, exponent, size) \ asymm_rounding_divide_by_POW2_##size(x, exponent) #define ASYMM_MULT(a, b, size) asymm_mult##size(a, b) +#define ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(x, quantized_multiplier, left_shift, size) \ + ASYMM_MULT(x *((VEC_DATA_TYPE(int, size))(1) << (-left_shift)), quantized_multiplier, size) #define ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, quantized_multiplier, right_shift, size) \ ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(x, quantized_multiplier, size), right_shift, size) #define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL(a, size) \ @@ -360,11 +462,53 @@ #define ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, size) \ asymm_rescale##size(value, src_integer_bits, dst_integer_bits) +#define MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(size) \ + inline VEC_DATA_TYPE(int, size) \ + multiply_by_quantized_multiplier##size(VEC_DATA_TYPE(int, size) input, int qmul, int shift) \ + { \ + const int left_shift = shift > 0 ? shift : 0; \ + const int right_shift = shift > 0 ? 0 : -shift; \ + return ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(input * (1 << left_shift), qmul, size), \ + right_shift, size); \ + } +#define MULTIPLY_BY_QUANTIZED_MULTIPLIER(input, qmul, shift, size) \ + multiply_by_quantized_multiplier##size(input, qmul, shift) + +QUANTIZE_IMPL(uchar, 1) +QUANTIZE_IMPL(char, 1) +QUANTIZE_IMPL(uint, 1) +QUANTIZE_IMPL(int, 1) +QUANTIZE_IMPL(uchar, 4) +QUANTIZE_IMPL(ushort, 4) +QUANTIZE_IMPL(short, 4) +QUANTIZE_IMPL(uchar, 16) +QUANTIZE_IMPL(char, 16) +QUANTIZE_IMPL(ushort, 16) +QUANTIZE_IMPL(short, 16) +QUANTIZE_IMPL(uint, 16) +QUANTIZE_IMPL(int, 16) + +DEQUANTIZE_IMPL(uchar, 1) +DEQUANTIZE_IMPL(char, 1) +DEQUANTIZE_IMPL(uint, 1) +DEQUANTIZE_IMPL(int, 1) +DEQUANTIZE_IMPL(uchar, 4) +DEQUANTIZE_IMPL(ushort, 4) +DEQUANTIZE_IMPL(short, 4) +DEQUANTIZE_IMPL(uchar, 16) +DEQUANTIZE_IMPL(char, 16) +DEQUANTIZE_IMPL(ushort, 16) +DEQUANTIZE_IMPL(short, 16) +DEQUANTIZE_IMPL(uint, 16) +DEQUANTIZE_IMPL(int, 16) + +ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(1) ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(2) ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(4) ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(8) ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(16) +ASYMM_MULT_IMPL(1) ASYMM_MULT_IMPL(2) ASYMM_MULT_IMPL(4) ASYMM_MULT_IMPL(8) @@ -375,16 +519,19 @@ ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(4) ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(8) ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(16) +ASYMM_SELECT_USING_MASK_IMPL(1) ASYMM_SELECT_USING_MASK_IMPL(2) ASYMM_SELECT_USING_MASK_IMPL(4) ASYMM_SELECT_USING_MASK_IMPL(8) ASYMM_SELECT_USING_MASK_IMPL(16) +ASYMM_MASK_IF_ZERO_IMPL(1) ASYMM_MASK_IF_ZERO_IMPL(2) ASYMM_MASK_IF_ZERO_IMPL(4) ASYMM_MASK_IF_ZERO_IMPL(8) ASYMM_MASK_IF_ZERO_IMPL(16) +ASYMM_MASK_IF_NON_ZERO_IMPL(1) ASYMM_MASK_IF_NON_ZERO_IMPL(2) ASYMM_MASK_IF_NON_ZERO_IMPL(4) ASYMM_MASK_IF_NON_ZERO_IMPL(8) @@ -400,6 +547,7 @@ ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(4) ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(8) ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(16) +ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(1) ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(2) ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(4) ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(8) @@ -415,9 +563,16 @@ ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(4) ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(8) ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(16) +ASYMM_RESCALE_IMPL(1) ASYMM_RESCALE_IMPL(2) ASYMM_RESCALE_IMPL(4) ASYMM_RESCALE_IMPL(8) ASYMM_RESCALE_IMPL(16) +MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(1) +MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(2) +MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(4) +MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(8) +MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(16) + #endif // ARM_COMPUTE_HELPERS_ASYMM_H diff --git a/compute/ARMComputeEx/src/core/CL/cl_kernels/prelu.cl b/compute/ARMComputeEx/src/core/CL/cl_kernels/prelu.cl deleted file mode 100644 index 12c8eeb..0000000 --- a/compute/ARMComputeEx/src/core/CL/cl_kernels/prelu.cl +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "helpers.h" - -#ifndef VEC_SIZE -#define VEC_SIZE 1 -#endif - -#if defined(DATA_TYPE) -/** Returns result of prelu function implemented as below: - * f(input) = alpha * input for input < 0, f(input) = input for input >= 0. - * - * @attention Data type can be passed using the -DDATA_TYPE compile flag, e.g. -DDATA_TYPE=float - * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. - * -DVEC_SIZE=16 - * @note Can only take floating point data types. - * - * @param[in] input1_ptr Pointer to the source image. Supported Data - * types : F16/F32 - * @param[in] input1_stride_x Stride of the source image in X dimension (in - * bytes) - * @param[in] input1_step_x input1_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] input1_stride_y Stride of the source image in Y dimension (in - * bytes) - * @param[in] input1_step_y input1_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] input1_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] input1_step_z input1_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] input1_offset_first_element_in_bytes The offset of the first element in the source - * image - * @param[in] alpha_ptr Pointer to the source image. Supported Data - * types : F16/F32 - * @param[in] alpha_stride_x Stride of the source image in X dimension (in - * bytes) - * @param[in] alpha_step_x input2_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] alpha_stride_y Stride of the source image in Y dimension (in - * bytes) - * @param[in] alpha_step_y input2_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] alpha_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] alpha_step_z input2_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] alpha_offset_first_element_in_bytes The offset of the first element in the source - * image - * - * @param[out] output_ptr Pointer to the destination image. Supported - * data types: same as @p input_ptr - * @param[in] output_stride_x Stride of the destination image in X dimension - * (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination image in Y dimension - * (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the - * destination image - */ -__kernel void prelu(TENSOR3D_DECLARATION(input), TENSOR3D_DECLARATION(alpha), - TENSOR3D_DECLARATION(output)) -{ - Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); - Tensor3D alpha = CONVERT_TO_TENSOR3D_STRUCT(alpha); - Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); - - VSTORE(VEC_SIZE) - (VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr) < 0 - ? VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr) * - VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)alpha.ptr) - : VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr), - 0, (__global DATA_TYPE *)output.ptr); -} -#endif // defined(DATA_TYPE) diff --git a/compute/ARMComputeEx/src/core/CL/cl_kernels/prelu_quantized.cl b/compute/ARMComputeEx/src/core/CL/cl_kernels/prelu_quantized.cl deleted file mode 100644 index a66e107..0000000 --- a/compute/ARMComputeEx/src/core/CL/cl_kernels/prelu_quantized.cl +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "helpers.h" -#define SUB(x, y) (x) - (y) - -#if defined(OFF_IN) && defined(OFF_ALPHA) && defined(OFF_OUT) && defined(SCALE_IN) && \ - defined(SCALE_ALPHA) && defined(SCALE_OUT) && defined(VEC_SIZE) - -#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE) -#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE) -#define VEC_UCHAR VEC_DATA_TYPE(uchar, VEC_SIZE) -#define CONVERT_RTE(x, type) (convert_##type##_rte((x))) -#define CONVERT_DOWN(x, type) CONVERT_RTE(x, type) -#define SELECT_TYPE VEC_INT - -/** Returns result of prelu function implemented as below: - * f(input) = alpha * input for input < 0, f(input) = input for input >= 0. - * - * @attention Data type can be passed using the -DDATA_TYPE_IN compile flag, e.g. - * -DDATA_TYPE_IN=uchar - * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. - * -DVEC_SIZE=16 - * @note Can only take uchar data types. - * - * @param[in] input1_ptr Pointer to the source image. Supported Data - * types : QASYMM8 - * @param[in] input1_stride_x Stride of the source image in X dimension (in - * bytes) - * @param[in] input1_step_x input1_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] input1_stride_y Stride of the source image in Y dimension (in - * bytes) - * @param[in] input1_step_y input1_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] input1_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] input1_step_z input1_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] input1_offset_first_element_in_bytes The offset of the first element in the source - * image - * @param[in] alpha_ptr Pointer to the source image. Supported Data - * types : QASYMM8 - * @param[in] alpha_stride_x Stride of the source image in X dimension (in - * bytes) - * @param[in] alpha_step_x input2_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] alpha_stride_y Stride of the source image in Y dimension (in - * bytes) - * @param[in] alpha_step_y input2_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] alpha_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] alpha_step_z input2_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] alpha_offset_first_element_in_bytes The offset of the first element in the source - * image - * @param[out] output_ptr Pointer to the destination image. Supported - * data types: same as @p input_ptr - * @param[in] output_stride_x Stride of the destination image in X dimension - * (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination image in Y dimension - * (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the - * destination image - */ -__kernel void prelu_qasymm8(TENSOR3D_DECLARATION(input), TENSOR3D_DECLARATION(alpha), - TENSOR3D_DECLARATION(output)) -{ - // Get pixels pointer - Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); - Tensor3D alpha = CONVERT_TO_TENSOR3D_STRUCT(alpha); - Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); - - VEC_INT in_vec = CONVERT(VLOAD(VEC_SIZE)(0, (__global uchar *)input.ptr), VEC_INT); - VEC_INT alpha_vec = CONVERT(VLOAD(VEC_SIZE)(0, (__global uchar *)alpha.ptr), VEC_INT); - - in_vec = SUB(in_vec, (VEC_INT)((int)OFF_IN)); - alpha_vec = SUB(alpha_vec, (VEC_INT)((int)OFF_ALPHA)); - - const VEC_FLOAT inf32 = CONVERT(in_vec, VEC_FLOAT) * (VEC_FLOAT)((float)SCALE_IN); - const VEC_FLOAT alphaf32 = CONVERT(alpha_vec, VEC_FLOAT) * (VEC_FLOAT)((float)SCALE_ALPHA); - const VEC_FLOAT outf32 = - select(inf32, inf32 * alphaf32, CONVERT(inf32 < (VEC_FLOAT)0, SELECT_TYPE)); - const VEC_FLOAT qresf32 = outf32 / ((VEC_FLOAT)(float)SCALE_OUT) + ((VEC_FLOAT)((float)OFF_OUT)); - const VEC_UCHAR res = CONVERT_SAT(CONVERT_DOWN(qresf32, VEC_INT), VEC_UCHAR); - - VSTORE(VEC_SIZE) - (res, 0, (__global uchar *)output.ptr); -} - -#endif // defined(OFF_IN) && defined(OFF_ALPHA) && defined(OFF_OUT) && defined(SCALE_IN) && - // defined(SCALE_ALPHA) && defined(SCALE_OUT) && defined(VEC_SIZE) diff --git a/compute/ARMComputeEx/src/core/CL/cl_kernels/space_to_depth.cl b/compute/ARMComputeEx/src/core/CL/cl_kernels/space_to_depth.cl deleted file mode 100644 index eb612f8..0000000 --- a/compute/ARMComputeEx/src/core/CL/cl_kernels/space_to_depth.cl +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016, 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "helpers.h" - -#if defined(DATA_TYPE) && defined(DEPTH_IN) && defined(BLOCK_SIZE) && defined(Z_IN) -/** Perform space to depth rearrangement of tensor - * - * @attention Data type can be passed using the -DDATA_TYPE compile flag, e.g. -DDATA_TYPE=float - * @attention Input tensor depth should be given as a preprocessor argument using -DDEPTH_IN=size. - * e.g. -DDEPTH_IN=16 - * @attention The value of the z-axis of input tensor depth should be given as a preprocessor - * argument using -DZ_IN=size. e.g. -DZ_IN=16 - * @attention block size should be given as a preprocessor argument using -DBLOCK_SIZE=size. e.g. - * -DBLOCK_SIZE=1 - * - * @param[in] input_ptr Pointer to the source image. Supported data - * types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 - * @param[in] input_stride_x Stride of the source image in X dimension (in - * bytes) - * @param[in] input_step_x input_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] input_stride_y Stride of the source image in Y dimension (in - * bytes) - * @param[in] input_step_y input_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] input_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] input_step_z input_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source - * image - * @param[out] output_ptr Pointer to the destination image. Supported data - * types: same as @p input_ptr - * @param[in] output_stride_x Stride of the destination image in X dimension - * (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination image in Y dimension - * (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] output_stride_w Stride of the source tensor in W dimension (in - * bytes) - * @param[in] output_step_w output_stride_w * number of elements along W - * processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the - * destination image - */ -__kernel void space_to_depth_nchw(TENSOR4D_DECLARATION(input), TENSOR4D_DECLARATION(output)) -{ - Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT(input, Z_IN); - Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0); - - int out_index[4] = {0}; - int in_index[4] = {0}; - - in_index[0] = get_global_id(0); // W - in_index[1] = get_global_id(1); // H - in_index[2] = get_global_id(2) % Z_IN; // C - in_index[3] = get_global_id(2) / Z_IN; // B - - out_index[0] = in_index[0] / BLOCK_SIZE; - out_index[1] = in_index[1] / BLOCK_SIZE; - out_index[2] = - in_index[2] + ((in_index[1] % BLOCK_SIZE) * BLOCK_SIZE + in_index[0] % BLOCK_SIZE) * DEPTH_IN; - out_index[3] = in_index[3]; - - *((__global DATA_TYPE *)tensor4D_offset(&out, out_index[0], out_index[1], out_index[2], - out_index[3])) = *((__global DATA_TYPE *)in.ptr); -} -#endif // defined(DATA_TYPE) && defined(Z_IN) && defined(BLOCK_SIZE) && defined(Z_IN) - -#if defined(DATA_TYPE) && defined(Z_IN) && defined(BLOCK_SIZE) && defined(Z_IN) -/** Perform space to depth rearrangement of tensor - * - * @attention Data type can be passed using the -DDATA_TYPE compile flag, e.g. -DDATA_TYPE=float - * @attention Input tensor depth should be given as a preprocessor argument using -DDEPTH_IN=size. - * e.g. -DDEPTH_IN=16 - * @attention The value of the z-axis of input tensor depth should be given as a preprocessor - * argument using -DZ_IN=size. e.g. -DZ_IN=16 - * @attention block size should be given as a preprocessor argument using -DBLOCK_SIZE=size. e.g. - * -DBLOCK_SIZE=1 - * - * @param[in] input_ptr Pointer to the source image. Supported data - * types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 - * @param[in] input_stride_x Stride of the source image in X dimension (in - * bytes) - * @param[in] input_step_x input_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] input_stride_y Stride of the source image in Y dimension (in - * bytes) - * @param[in] input_step_y input_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] input_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] input_step_z input_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source - * image - * @param[out] output_ptr Pointer to the destination image. Supported data - * types: same as @p input_ptr - * @param[in] output_stride_x Stride of the destination image in X dimension - * (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X - * processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination image in Y dimension - * (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y - * processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the source tensor in Z dimension (in - * bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z - * processed per workitem(in bytes) - * @param[in] output_stride_w Stride of the source tensor in W dimension (in - * bytes) - * @param[in] output_step_w output_stride_w * number of elements along W - * processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the - * destination image - */ -__kernel void space_to_depth_nhwc(TENSOR4D_DECLARATION(input), TENSOR4D_DECLARATION(output)) -{ - Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT(input, Z_IN); - Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0); - - int out_index[4] = {0}; - int in_index[4] = {0}; - - in_index[0] = get_global_id(0); // C - in_index[1] = get_global_id(1); // W - in_index[2] = get_global_id(2) % Z_IN; // H - in_index[3] = get_global_id(2) / Z_IN; // B - - out_index[0] = - in_index[0] + ((in_index[2] % BLOCK_SIZE) * BLOCK_SIZE + in_index[1] % BLOCK_SIZE) * DEPTH_IN; - out_index[1] = in_index[1] / BLOCK_SIZE; - out_index[2] = in_index[2] / BLOCK_SIZE; - out_index[3] = in_index[3]; - - *((__global DATA_TYPE *)tensor4D_offset(&out, out_index[0], out_index[1], out_index[2], - out_index[3])) = *((__global DATA_TYPE *)in.ptr); -} -#endif // defined(DATA_TYPE) && defined(DEPTH_IN) && defined(BLOCK_SIZE) && defined(Z_IN) diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLArgOperationKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLArgOperationKernel.cpp deleted file mode 100644 index 06eeb5b..0000000 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLArgOperationKernel.cpp +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/CL/kernels/CLArgOperationKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibraryEx.h" -#include "arm_compute/core/CL/ICLTensor.h" - -using namespace arm_compute; - -namespace -{ -const TensorShape inferOutputShape(const TensorShape &input_shape, const uint32_t axis) -{ - TensorShape out_shape{input_shape}; - - out_shape.set(axis, 1); - - return out_shape; -} -} // namespace - -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const uint32_t axis, - ArgOperation /*op*/) -{ - ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::S32, DataType::F32, DataType::U8, - DataType::QASYMM8); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(output, DataType::S32); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape().num_dimensions() - 1) != - output->tensor_shape().num_dimensions(), - "Input's rank is not same with output"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->tensor_shape().total_size() == 0, - "Inputs are not broadcast compatible"); - - const TensorShape output_shape = inferOutputShape(input->tensor_shape(), axis); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(output_shape.total_size() != output->tensor_shape().total_size(), - "output shape's size does not match axis"); - - const auto num_dimensions = input->tensor_shape().num_dimensions(); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= num_dimensions, "axis must be less than (input's rank)."); - return Status{}; -} - -} // namespace - -CLArgOperationKernel::CLArgOperationKernel() : _input(nullptr), _output(nullptr), _axis() {} - -void CLArgOperationKernel::configure(const ICLTensor *input, ICLTensor *output, const uint32_t axis, - ArgOperation op) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis, op)); - - _input = input; - _output = output; - _axis = axis; - - std::unique_ptr output_info = output->info()->clone(); - output_info->set_tensor_shape(inferOutputShape(input->info()->tensor_shape(), axis)); - - // Construct kernel and set op_code based on type of ArgOperation as specified by object op - std::string kernel_name = "arg_op"; - int op_code = 0; - if (op == ArgOperation::MAX) - { - op_code = 1; - } - else if (op == ArgOperation::MIN) - { - op_code = 2; - } - else - throw std::runtime_error("Operation not supported, yet"); - - // Set kernel build options - std::set build_opts; - build_opts.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); - build_opts.emplace("-DDEPTH_OUT=" + support::cpp11::to_string(output_info->dimension(2))); - build_opts.emplace("-DOP_CODE=" + support::cpp11::to_string(op_code)); - - // Create kernel - _kernel = - static_cast(CLKernelLibraryEx::get().create_kernel(kernel_name, build_opts)); - - // Configure kernel window - Window win = calculate_max_window(*output_info, Steps()); - - Coordinates coord; - coord.set_num_dimensions(output_info->num_dimensions()); - output->info()->set_valid_region(ValidRegion(coord, output_info->tensor_shape())); - - ICLKernel::configure_internal(win); -} - -Status CLArgOperationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, - const uint32_t axis, ArgOperation op) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis, op)); - - return Status{}; -} - -void CLArgOperationKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - - const TensorShape &shape_in = _input->info()->tensor_shape(); - - unsigned int idx = 2 * num_arguments_per_4D_tensor(); // Skip the input and output parameters - - _kernel.setArg(idx++, _axis); - _kernel.setArg(idx++, shape_in[_axis]); - - Window slice_out = window.first_slice_window_4D().collapse(ICLKernel::window(), 2, 4); - - // Setup input slice - Window slice_in(slice_out); - slice_in.set(Window::DimX, Window::Dimension(0, 0, 0)); - slice_in.set(Window::DimY, Window::Dimension(0, 0, 0)); - slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0)); - slice_in.set(3, Window::Dimension(0, 0, 0)); - - // Copy output's shape in order to use for recovering at end of this method - const TensorShape shape_out = _output->info()->tensor_shape(); - _output->info()->set_tensor_shape(inferOutputShape(shape_in, _axis)); - - do - { - unsigned int idx = 0; - add_4D_tensor_argument(idx, _input, slice_in); - add_4D_tensor_argument(idx, _output, slice_out); - enqueue(queue, *this, slice_out); - } while (window.slide_window_slice_4D(slice_in) && window.slide_window_slice_4D(slice_out)); - - // Recover output's shape of output tensor - _output->info()->set_tensor_shape(shape_out); -} diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLBinaryLogicalOpKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLBinaryLogicalOpKernel.cpp index bb55568..fbc76f5 100644 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLBinaryLogicalOpKernel.cpp +++ b/compute/ARMComputeEx/src/core/CL/kernels/CLBinaryLogicalOpKernel.cpp @@ -43,6 +43,7 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibraryEx.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "support/StringSupport.h" using namespace arm_compute; diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLCastKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLCastKernel.cpp deleted file mode 100644 index 01ea655..0000000 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLCastKernel.cpp +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/CL/kernels/CLCastKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibraryEx.h" -#include "arm_compute/core/CL/ICLTensor.h" - -using namespace arm_compute; - -CLCastKernel::CLCastKernel() : _input(nullptr), _output(nullptr) {} - -void CLCastKernel::configure(const ICLTensor *input, ICLTensor *output, SubDataType input_subtype) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8, - DataType::S16, DataType::S32, DataType::F16, - DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QASYMM8, - DataType::S16, DataType::S32, DataType::F16, - DataType::F32); - ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output); - - _input = input; - _output = output; - - constexpr unsigned int num_elems_processed_per_iteration = 16; - - // Set kernel build options - CLBuildOptions build_opts; - build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input->info()->data_type())); - build_opts.add_option("-DDATA_TYPE_OUT=" + - get_cl_type_from_data_type(output->info()->data_type())); - build_opts.add_option( - ("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration))); - - // Create kernel - if (is_data_type_quantized_asymmetric(input->info()->data_type())) - { - UniformQuantizationInfo qinfo = input->info()->quantization_info().uniform(); - const float scale_in = qinfo.scale; - const int offset_in = qinfo.offset; - build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(scale_in)); - build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(offset_in)); - - _kernel = static_cast( - CLKernelLibraryEx::get().create_kernel("cast_qasymm_in", build_opts.options())); - } - else if (is_data_type_quantized_asymmetric(output->info()->data_type())) - { - UniformQuantizationInfo qinfo = output->info()->quantization_info().uniform(); - const float scale_in = qinfo.scale; - const float offset_in = qinfo.offset; - - build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(scale_in)); - build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(offset_in)); - - _kernel = static_cast( - CLKernelLibraryEx::get().create_kernel("cast_qasymm_out", build_opts.options())); - } - else - { - build_opts.add_option_if(input_subtype == SubDataType::BOOL, "-DBOOL_INPUT"); - _kernel = static_cast( - CLKernelLibraryEx::get().create_kernel("cast", build_opts.options())); - } - - // Configure kernel window - Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration)); - AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration); - update_window_and_padding(win, input_access, output_access); - output_access.set_valid_region(win, input->info()->valid_region()); - - ICLKernel::configure_internal(win); -} - -void CLCastKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - - Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); - Window slice = collapsed.first_slice_window_3D(); - - do - { - unsigned int idx = 0; - add_3D_tensor_argument(idx, _input, slice); - add_3D_tensor_argument(idx, _output, slice); - enqueue(queue, *this, slice, lws_hint()); - } while (collapsed.slide_window_slice_3D(slice)); -} diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLDepthToSpaceKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLDepthToSpaceKernel.cpp deleted file mode 100644 index 3891368..0000000 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLDepthToSpaceKernel.cpp +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/CL/kernels/CLDepthToSpaceKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibraryEx.h" -#include "arm_compute/core/CL/ICLTensor.h" - -using namespace arm_compute; - -namespace -{ -// TODO Use this validation function -#if 0 -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, - const int32_t block_size) -{ - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8, - DataType::S16, DataType::S32, DataType::F16, - DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QASYMM8, - DataType::S16, DataType::S32, DataType::F16, - DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(block_size < 1, - "Block size should be greater than or equal to 1."); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(0) != input->dimension(0) * block_size, - "Output width should be equal to (Input width * block size)"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(1) != input->dimension(1) * block_size, - "Output height should be equal to (Input height * block size)"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->dimension(2) % (block_size * block_size) != 0, - "Input depth should be divisible by (block size * block size)"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG( - output->dimension(2) != input->dimension(2) / (block_size * block_size), - "Output depth should be equal to (Input depth / (block size * block size))"); - - return Status{}; -} -#endif -} // namespace - -CLDepthToSpaceKernel::CLDepthToSpaceKernel() : _input(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -void CLDepthToSpaceKernel::configure(const ICLTensor *input, ICLTensor *output, - const int32_t block_size) -{ - // TODO Add validation of data_layout - _input = input; - _output = output; - - // Set kernel build options - auto layout_out = output->info()->data_layout(); - std::set build_opts; - build_opts.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); - build_opts.emplace("-DBLOCK_SIZE=" + support::cpp11::to_string(block_size)); - auto index_depth = get_data_layout_dimension_index(layout_out, DataLayoutDimension::CHANNEL); - auto depth = output->info()->dimension(index_depth); - build_opts.emplace("-DDEPTH_OUT=" + support::cpp11::to_string(depth)); - build_opts.emplace("-DZ_OUT=" + support::cpp11::to_string(output->info()->tensor_shape().z())); - - // Create kernel - _kernel = static_cast(CLKernelLibraryEx::get().create_kernel( - "depth_to_space_" + lower_string(string_from_data_layout(layout_out)), build_opts)); - - // Configure kernel window - Window win = calculate_max_window(*output->info(), Steps()); - - Coordinates coord; - coord.set_num_dimensions(output->info()->num_dimensions()); - output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape())); - - ICLKernel::configure_internal(win); -} - -void CLDepthToSpaceKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window); - - Window slice_out = window.first_slice_window_4D().collapse(ICLKernel::window(), 2, 4); - - // Setup input slice - Window slice_in(slice_out); - slice_in.set(Window::DimX, Window::Dimension(0, 0, 0)); - slice_in.set(Window::DimY, Window::Dimension(0, 0, 0)); - slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0)); - slice_in.set(3, Window::Dimension(0, 0, 0)); - - do - { - unsigned int idx = 0; - add_4D_tensor_argument(idx, _input, slice_in); - add_4D_tensor_argument(idx, _output, slice_out); - enqueue(queue, *this, slice_out); - } while (window.slide_window_slice_4D(slice_in) && window.slide_window_slice_4D(slice_out)); -} diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLEmbeddingLookupKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLEmbeddingLookupKernel.cpp index 79f5ce0..67aaf2d 100644 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLEmbeddingLookupKernel.cpp +++ b/compute/ARMComputeEx/src/core/CL/kernels/CLEmbeddingLookupKernel.cpp @@ -43,6 +43,7 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibraryEx.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "support/StringSupport.h" using namespace arm_compute; diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernelEx.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernelEx.cpp deleted file mode 100644 index 235e897..0000000 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernelEx.cpp +++ /dev/null @@ -1,372 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernelEx.h" - -#include "arm_compute/core/AccessWindowStatic.h" -#include "arm_compute/core/AccessWindowTranspose.h" -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibraryEx.h" -#include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/CL/OpenCL.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/Window.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "support/ToolchainSupport.h" - -#include -#include -#include - -using namespace arm_compute; -using namespace arm_compute::misc::shape_calculator; - -namespace arm_compute -{ -class Coordinates; -} // namespace arm_compute - -namespace -{ -using ElementsProcessed = Steps; - -Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, - const ITensorInfo *output, const GEMMReshapeInfo &gemm_info) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, - "The number of dimensions for the matrix A must be <= 4"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, - "The number of dimensions for the matrix B must be <= 3"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 2 && - gemm_info.reinterpret_input_as_3d(), - "The input1 tensor cannot have more than 2 dimensions if input0 " - "has to be reinterpreted as 3D"); - - const int m = gemm_info.m(); - const int n = gemm_info.n(); - const int k = gemm_info.k(); - - ARM_COMPUTE_UNUSED(m); - ARM_COMPUTE_UNUSED(n); - ARM_COMPUTE_UNUSED(k); - - ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(0) != static_cast(k)); - ARM_COMPUTE_RETURN_ERROR_ON(input1->dimension(0) != static_cast(n)); - ARM_COMPUTE_RETURN_ERROR_ON(input1->dimension(1) != static_cast(k)); - if (gemm_info.reinterpret_input_as_3d()) - { - ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) * input0->dimension(2) != - static_cast(m)); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) != static_cast(m)); - } - - if (output->total_size() != 0) - { - const TensorInfo tensor_info_output = - output->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, false, gemm_info)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); - } - - return Status{}; -} - -std::pair validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, - ITensorInfo *output, - const GEMMReshapeInfo &gemm_info, - ElementsProcessed &num_elements_processed) -{ - unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0]; - unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1]; - bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); - bool reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0); - - Window win{}; - Window win_out{}; - bool window_changed = false; - - // In case both input and output have to be reinterpreted as 3D tensors, - // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false. - if (reinterpret_input_as_3d == reinterpret_output_as_3d) - { - reinterpret_input_as_3d = false; - reinterpret_output_as_3d = false; - } - - // Output tensor auto inizialitation if not yet initialized - auto_init_if_empty(*output, - input0->clone() - ->set_tensor_shape(compute_mm_shape(*input0, *input1, false, gemm_info)) - .set_data_type(DataType::S32)); - - TensorInfo tmp_info(*output); - - if (reinterpret_output_as_3d) - { - // Since the output tensor has to be reinterpreted as 3D and the execute window is based on a 2D - // GEMM, - // the window needs to be constructed on the 2D collapsed version of the tensor - TensorShape tmp_shape(output->tensor_shape()); - tmp_shape.collapse(2U, 1U); - tmp_info.set_tensor_shape(tmp_shape); - } - - // Special case for 1xN, 2xN, 3xN and 4xN input0 tensor. num_elems_processed_per_iteration_x - // Note: if the dot product instruction is available, the 8x2 tile has to be used - num_elems_processed_per_iteration_x = 4; - num_elems_processed_per_iteration_y = std::min(static_cast(output->dimension(1)), 4); - - // Note: bottom paddings are calculated manually as the output can be reinterpreted as 3D tensor - // The only way to set properly the paddings, it is to set those explicitly through the - // AccessWindowStatic - const int m = reinterpret_input_as_3d ? input0->tensor_shape()[1] * input0->tensor_shape()[2] - : input0->tensor_shape()[1]; - const int bottom_pad = - (num_elems_processed_per_iteration_y - (m % num_elems_processed_per_iteration_y)) % - num_elems_processed_per_iteration_y; - - // Configure window - win = calculate_max_window( - tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y)); - win_out = calculate_max_window( - *output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y)); - - AccessWindowStatic input0_access(input0, 0, 0, input0->dimension(0), - input0->dimension(1) + bottom_pad); - AccessWindowStatic input1_access( - input1, 0, 0, ceil_to_multiple(input1->dimension(0), num_elems_processed_per_iteration_x), - input1->dimension(1)); - AccessWindowStatic output_access( - output, 0, 0, ceil_to_multiple(output->dimension(0), num_elems_processed_per_iteration_x), - output->dimension(1) + bottom_pad); - - window_changed = - update_window_and_padding(win, input0_access, - input1_access) || // window used by the execute_window_loop - update_window_and_padding( - win_out, - output_access); // window used to update the padding requirements of output tensor - - Coordinates coord; - coord.set_num_dimensions(output->num_dimensions()); - output_access.set_valid_region(win_out, ValidRegion(coord, output->tensor_shape())); - - // Collapse along the Z direction - // This collapse needs to be here in order to tune the Z dimension of LWS - Window collapsed = win; - const unsigned int dimension_to_collapse = - std::min(static_cast(output->num_dimensions()), 2u); - collapsed = win.collapse(win, dimension_to_collapse); - - Status err = (window_changed) - ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") - : Status{}; - return std::make_pair(err, collapsed); -} -} // namespace - -CLGEMMLowpMatrixMultiplyKernelEx::CLGEMMLowpMatrixMultiplyKernelEx() - : _input0(nullptr), _input1(nullptr), _output(nullptr), _slide_matrix_b(true), - _reinterpret_input_as_3d(false), _reinterpret_output_as_3d(false) -{ -} - -void CLGEMMLowpMatrixMultiplyKernelEx::configure(const ICLTensor *input0, const ICLTensor *input1, - ICLTensor *output, - const GEMMReshapeInfo &gemm_info) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output); - - ARM_COMPUTE_ERROR_THROW_ON( - validate_arguments(input0->info(), input1->info(), output->info(), gemm_info)); - - _input0 = input0; - _input1 = input1; - _output = output; - _reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); - _reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0); - - // In case both input and output have to be reinterpreted as 3D tensors, - // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false. - if (_reinterpret_input_as_3d == _reinterpret_output_as_3d) - { - _reinterpret_input_as_3d = false; - _reinterpret_output_as_3d = false; - } - - // Check if we need to slide the matrix B - const unsigned int num_dimensions_input0 = _reinterpret_input_as_3d - ? _input0->info()->num_dimensions() - 1 - : _input0->info()->num_dimensions(); - _slide_matrix_b = (_input1->info()->num_dimensions() >= num_dimensions_input0); - - ElementsProcessed num_elements_processed{}; - - // Configure kernel window - auto win_config = validate_and_configure_window(input0->info(), input1->info(), output->info(), - gemm_info, num_elements_processed); - ARM_COMPUTE_ERROR_THROW_ON(win_config.first); - ICLKernel::configure_internal(win_config.second); - - // Create build options - std::string kernel_name(" "); - CLBuildOptions build_opts; - build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D"); - build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D"); - build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, - "-DHEIGHT_GEMM3D=" + - support::cpp11::to_string(output->info()->dimension(1))); - build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, - "-DDEPTH_GEMM3D=" + - support::cpp11::to_string(output->info()->dimension(2))); - build_opts.add_option_if(!_slide_matrix_b, - "-DMATRIX_B_DEPTH=" + - support::cpp11::to_string(input1->info()->dimension(2))); - build_opts.add_option("-DCOLS_A=" + support::cpp11::to_string(input0->info()->dimension(0))); - build_opts.add_option("-DNUM_ELEMS_PROCESSED_PER_THREAD_X=" + - support::cpp11::to_string(num_elements_processed.x())); - build_opts.add_option("-DNUM_ELEMS_PROCESSED_PER_THREAD_Y=" + - support::cpp11::to_string(num_elements_processed.y())); - - kernel_name = "gemmlowp_mm_midgard_ex"; - - // Create kernel - _kernel = static_cast( - CLKernelLibraryEx::get().create_kernel(kernel_name, build_opts.options())); - - // Set config_id for enabling LWS tuning - _config_id = kernel_name; - _config_id += "_"; - _config_id += (_reinterpret_input_as_3d ? "3di_" : ""); - _config_id += (_reinterpret_output_as_3d ? "3do_" : ""); - _config_id += lower_string(string_from_data_type(input0->info()->data_type())); - _config_id += "_"; - _config_id += support::cpp11::to_string(output->info()->dimension(1)); - _config_id += "_"; - _config_id += support::cpp11::to_string(output->info()->dimension(0)); -} - -Status CLGEMMLowpMatrixMultiplyKernelEx::validate(const ITensorInfo *input0, - const ITensorInfo *input1, - const ITensorInfo *output, - const GEMMReshapeInfo &gemm_info) -{ - ElementsProcessed num_elements_processed{}; - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, output, gemm_info)); - ARM_COMPUTE_RETURN_ON_ERROR( - validate_and_configure_window(input0->clone().get(), input1->clone().get(), - output->clone().get(), gemm_info, num_elements_processed) - .first); - - return Status{}; -} - -void CLGEMMLowpMatrixMultiplyKernelEx::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - - if (_input1->info()->num_dimensions() < 3) - { - // The stride_z for matrix B must be zero if we do not slice - ARM_COMPUTE_ERROR_ON(_input1->info()->strides_in_bytes()[3] != 0); - } - - Window slice = window.first_slice_window_3D(); - Window slice_matrix_b = slice; - - slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1)); - slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1)); - - if (_reinterpret_input_as_3d) - { - // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor - const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3; - const unsigned int total_cross_plane_pad = - _input0->info()->padding().top + _input0->info()->padding().bottom; - _kernel.setArg(idx0, static_cast(total_cross_plane_pad)); - } - - if (_reinterpret_output_as_3d) - { - // Pass bottom paddings to the kernel if the output has to be reinterpreted as 3D tensor - const unsigned int idx0 = - 3 * num_arguments_per_2D_tensor() + 3 + (_reinterpret_input_as_3d ? 1 : 0); - const unsigned int total_cross_plane_pad = - _output->info()->padding().top + _output->info()->padding().bottom; - _kernel.setArg(idx0, static_cast(total_cross_plane_pad)); - } - - do - { - Window slice_b = slice; - // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A - // more than 2 - // This scenario can happen when the matrix multiplication is used to perform a convolution - // operation - if (!_slide_matrix_b) - { - slice_b = slice_matrix_b; - } - - unsigned int idx = 0; - add_2D_tensor_argument(idx, _input0, slice); - add_2D_tensor_argument(idx, _input1, slice_b); - add_2D_tensor_argument(idx, _output, slice); - _kernel.setArg(idx++, - static_cast(_input0->info()->strides_in_bytes()[2])); - _kernel.setArg(idx++, - static_cast(_input1->info()->strides_in_bytes()[2])); - _kernel.setArg(idx++, - static_cast(_output->info()->strides_in_bytes()[2])); - enqueue(queue, *this, slice, lws_hint()); - } while (window.slide_window_slice_3D(slice)); -} diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLGatherExKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLGatherExKernel.cpp index 3a25987..3bfe3e4 100644 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLGatherExKernel.cpp +++ b/compute/ARMComputeEx/src/core/CL/kernels/CLGatherExKernel.cpp @@ -45,6 +45,7 @@ #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/utils/misc/ShapeCalculatorEx.h" #include "arm_compute/core/UtilsEx.h" +#include "support/StringSupport.h" using namespace arm_compute; diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLHashtableLookupKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLHashtableLookupKernel.cpp index 7fbdcda..930e7c9 100644 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLHashtableLookupKernel.cpp +++ b/compute/ARMComputeEx/src/core/CL/kernels/CLHashtableLookupKernel.cpp @@ -43,6 +43,7 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibraryEx.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "support/StringSupport.h" using namespace arm_compute; @@ -110,7 +111,7 @@ void CLHashtableLookupKernel::configure(const ICLTensor *lookups, const ICLTenso _hits = hits; // Make _lookup_indices tensor - _lookup_indices = arm_compute::support::cpp14::make_unique(); + _lookup_indices = support::cpp14::make_unique(); _lookup_indices->allocator()->init( TensorInfo(lookups->info()->tensor_shape(), lookups->info()->num_channels(), DataType::S32)); _lookup_indices->allocator()->allocate(); diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLInstanceNormalizationLayerKernelEx.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLInstanceNormalizationLayerKernelEx.cpp index b45f6bb..61c14d2 100644 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLInstanceNormalizationLayerKernelEx.cpp +++ b/compute/ARMComputeEx/src/core/CL/kernels/CLInstanceNormalizationLayerKernelEx.cpp @@ -48,7 +48,7 @@ #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/Window.h" - +#include "support/StringSupport.h" #include "support/ToolchainSupport.h" namespace arm_compute diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLMultiplyScaleFactorKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLMultiplyScaleFactorKernel.cpp index d305896..6b27c99 100644 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLMultiplyScaleFactorKernel.cpp +++ b/compute/ARMComputeEx/src/core/CL/kernels/CLMultiplyScaleFactorKernel.cpp @@ -49,6 +49,7 @@ #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" +#include "support/StringSupport.h" using namespace arm_compute; diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLNegKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLNegKernel.cpp index 74f7b41..643c8b1 100644 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLNegKernel.cpp +++ b/compute/ARMComputeEx/src/core/CL/kernels/CLNegKernel.cpp @@ -43,6 +43,7 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibraryEx.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "support/StringSupport.h" using namespace arm_compute; diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLPReLUKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLPReLUKernel.cpp deleted file mode 100644 index 8910a7b..0000000 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLPReLUKernel.cpp +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/CL/kernels/CLPReLUKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibraryEx.h" -#include "arm_compute/core/CL/ICLTensor.h" - -using namespace arm_compute; - -namespace -{ -constexpr unsigned int num_elems_processed_per_iteration = 16; - -Status validate_info(const ITensorInfo *input, const ITensorInfo *alpha, const ITensorInfo *output) -{ - const TensorShape &out_shape = - TensorShape::broadcast_shape(input->tensor_shape(), alpha->tensor_shape()); - - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32, - DataType::QASYMM8); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(alpha, 1, DataType::F16, DataType::F32, - DataType::QASYMM8); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, - "Inputs are not broadcast compatible"); - // Validate in case of configured output - if (output->total_size() > 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32, - DataType::QASYMM8); - ARM_COMPUTE_RETURN_ERROR_ON_MSG( - detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), - "Wrong shape for output"); - } - return Status{}; -} -} // namespace - -CLPReLUKernel::CLPReLUKernel() : _input(nullptr), _alpha(nullptr), _output(nullptr) {} - -void CLPReLUKernel::configure(const ICLTensor *input, const ICLTensor *alpha, ICLTensor *output) -{ - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, alpha); - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_info(input->info(), alpha->info(), output->info())); - - _input = input; - _alpha = alpha; - _output = output; - - // Create kernel - std::string kernel_name = "prelu"; - std::set build_opts; - build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()))); - build_opts.emplace( - ("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration))); - - if (is_data_type_quantized_asymmetric(input->info()->data_type())) - { - build_opts.emplace("-DOFF_IN=" + support::cpp11::to_string( - input->info()->quantization_info().uniform().offset)); - build_opts.emplace("-DOFF_ALPHA=" + support::cpp11::to_string( - alpha->info()->quantization_info().uniform().offset)); - build_opts.emplace("-DOFF_OUT=" + support::cpp11::to_string( - output->info()->quantization_info().uniform().offset)); - build_opts.emplace("-DSCALE_IN=" + support::cpp11::to_string( - input->info()->quantization_info().uniform().scale)); - build_opts.emplace("-DSCALE_ALPHA=" + support::cpp11::to_string( - alpha->info()->quantization_info().uniform().scale)); - build_opts.emplace("-DSCALE_OUT=" + support::cpp11::to_string( - output->info()->quantization_info().uniform().scale)); - kernel_name += "_qasymm8"; - } - _kernel = - static_cast(CLKernelLibraryEx::get().create_kernel(kernel_name, build_opts)); - - const std::pair broadcast_pair = - ITensorInfo::broadcast_shape_and_valid_region(*input->info(), *alpha->info()); - - const TensorShape &out_shape = broadcast_pair.first; - const ValidRegion &valid_region = broadcast_pair.second; - - // Auto initialize output if not initialized - { - set_shape_if_empty(*output->info(), out_shape); - - if (input->info()->data_type() == DataType::F16 && alpha->info()->data_type() == DataType::F16) - { - set_format_if_unknown(*output->info(), Format::F16); - } - else if (input->info()->data_type() == DataType::F32 || - alpha->info()->data_type() == DataType::F32) - { - set_format_if_unknown(*output->info(), Format::F32); - } - } - - Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration)); - Window win_input1 = win.broadcast_if_dimension_le_one(*input->info()); - Window win_input2 = win.broadcast_if_dimension_le_one(*alpha->info()); - - AccessWindowHorizontal input1_access(input->info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal input2_access(alpha->info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration); - - update_window_and_padding(win_input1, input1_access) || - update_window_and_padding(win_input2, input2_access) || - update_window_and_padding(win, output_access); - - output_access.set_valid_region(win, valid_region); - - ICLKernel::configure_internal(win); -} - -void CLPReLUKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - - const TensorShape &in_shape1 = _input->info()->tensor_shape(); - const TensorShape &in_shape2 = _alpha->info()->tensor_shape(); - const TensorShape &out_shape = _output->info()->tensor_shape(); - - bool can_collapse = true; - if (std::min(in_shape1.total_size(), in_shape2.total_size()) > 1) - { - can_collapse = - (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ); - for (size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); d++) - { - can_collapse = (in_shape1[d] == in_shape2[d]); - } - } - - bool has_collapsed = false; - Window collapsed = - can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) - : window; - - const TensorShape &in_shape1_collapsed = - has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1; - const TensorShape &in_shape2_collapsed = - has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2; - - Window slice = collapsed.first_slice_window_3D(); - Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed); - Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed); - - do - { - unsigned int idx = 0; - add_3D_tensor_argument(idx, _input, slice_input1); - add_3D_tensor_argument(idx, _alpha, slice_input2); - add_3D_tensor_argument(idx, _output, slice); - - enqueue(queue, *this, slice); - - collapsed.slide_window_slice_3D(slice_input1); - collapsed.slide_window_slice_3D(slice_input2); - } while (collapsed.slide_window_slice_3D(slice)); -} - -BorderSize CLPReLUKernel::border_size() const -{ - const unsigned int replicateSize = - _output->info()->dimension(0) - - std::min(_input->info()->dimension(0), _alpha->info()->dimension(0)); - const unsigned int border = - std::min(num_elems_processed_per_iteration - 1U, replicateSize); - return BorderSize(0, border, 0, 0); -} diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLQuantizationSymmetricKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLQuantizationSymmetricKernel.cpp index 2d551f6..1a7a18c 100644 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLQuantizationSymmetricKernel.cpp +++ b/compute/ARMComputeEx/src/core/CL/kernels/CLQuantizationSymmetricKernel.cpp @@ -49,6 +49,7 @@ #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" +#include "support/StringSupport.h" namespace arm_compute { @@ -69,7 +70,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *scale_fac // Output must always be initialized ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape().total_size() == 0); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S8); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); return Status{}; diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLReduceOperationKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLReduceOperationKernel.cpp index a983183..06c2579 100644 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLReduceOperationKernel.cpp +++ b/compute/ARMComputeEx/src/core/CL/kernels/CLReduceOperationKernel.cpp @@ -43,6 +43,7 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibraryEx.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "support/StringSupport.h" using namespace arm_compute; namespace diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLScaleFactorSymm8Kernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLScaleFactorSymm8Kernel.cpp index ff1904a..8d8853c 100644 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLScaleFactorSymm8Kernel.cpp +++ b/compute/ARMComputeEx/src/core/CL/kernels/CLScaleFactorSymm8Kernel.cpp @@ -48,6 +48,7 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "support/StringSupport.h" #include diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLSpaceToDepthKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLSpaceToDepthKernel.cpp deleted file mode 100644 index 64fc038..0000000 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLSpaceToDepthKernel.cpp +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/CL/kernels/CLSpaceToDepthKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibraryEx.h" -#include "arm_compute/core/CL/ICLTensor.h" - -using namespace arm_compute; - -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, - const int32_t block_size) -{ - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8, - DataType::S16, DataType::S32, DataType::F16, - DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QASYMM8, - DataType::S16, DataType::S32, DataType::F16, - DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(block_size < 1, - "Block size should be greater than or equal to 1."); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->dimension(3) != output->dimension(3), - "Input batch should be equal to Output batch"); - - auto layout_out = input->data_layout(); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); - - auto index_depth = get_data_layout_dimension_index(layout_out, DataLayoutDimension::CHANNEL); - auto index_height = get_data_layout_dimension_index(layout_out, DataLayoutDimension::HEIGHT); - auto index_width = get_data_layout_dimension_index(layout_out, DataLayoutDimension::WIDTH); - ARM_COMPUTE_RETURN_ERROR_ON_MSG( - input->dimension(index_depth) * block_size * block_size != output->dimension(index_depth), - "Output depth should be equal to (input depth * block size *block size)"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->dimension(index_width) % block_size) || - (input->dimension(index_height) % block_size), - "Input height and width should be divisible by block size"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG( - (output->dimension(index_width) != (input->dimension(index_width) / block_size)) || - (output->dimension(index_height) != (input->dimension(index_height) / block_size)), - "Output height and width should be equal to " - "input_height/blocksize and input_width/blocksize respectively"); - - return Status{}; -} - -} // namespace - -CLSpaceToDepthKernel::CLSpaceToDepthKernel() : _input(nullptr), _output(nullptr) {} - -void CLSpaceToDepthKernel::configure(const ICLTensor *input, ICLTensor *output, - const int32_t block_size) -{ - - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), block_size)); - - _input = input; - _output = output; - - // Set kernel build options - auto layout_out = input->info()->data_layout(); - std::set build_opts; - build_opts.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); - build_opts.emplace("-DBLOCK_SIZE=" + support::cpp11::to_string(block_size)); - auto index_depth = get_data_layout_dimension_index(layout_out, DataLayoutDimension::CHANNEL); - auto depth = input->info()->dimension(index_depth); - build_opts.emplace("-DDEPTH_IN=" + support::cpp11::to_string(depth)); - build_opts.emplace("-DZ_IN=" + support::cpp11::to_string(input->info()->tensor_shape().z())); - - // Create kernel - _kernel = static_cast(CLKernelLibraryEx::get().create_kernel( - "space_to_depth_" + lower_string(string_from_data_layout(layout_out)), build_opts)); - - // Configure kernel window - Window win = calculate_max_window(*input->info(), Steps()); - - Coordinates coord; - coord.set_num_dimensions(output->info()->num_dimensions()); - output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape())); - - ICLKernel::configure_internal(win); -} - -void CLSpaceToDepthKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window); - - Window slice_in = window.first_slice_window_4D().collapse(ICLKernel::window(), 2, 4); - - // Setup output slice - Window slice_out(slice_in); - slice_out.set(Window::DimX, Window::Dimension(0, 0, 0)); - slice_out.set(Window::DimY, Window::Dimension(0, 0, 0)); - slice_out.set(Window::DimZ, Window::Dimension(0, 0, 0)); - slice_out.set(3, Window::Dimension(0, 0, 0)); - - do - { - unsigned int idx = 0; - add_4D_tensor_argument(idx, _input, slice_in); - add_4D_tensor_argument(idx, _output, slice_out); - enqueue(queue, *this, slice_in); - } while (window.slide_window_slice_4D(slice_in) && window.slide_window_slice_4D(slice_out)); -} diff --git a/compute/ARMComputeEx/src/core/CL/kernels/CLTransposeConvLayerUpsampleKernel.cpp b/compute/ARMComputeEx/src/core/CL/kernels/CLTransposeConvLayerUpsampleKernel.cpp deleted file mode 100644 index 61999cb..0000000 --- a/compute/ARMComputeEx/src/core/CL/kernels/CLTransposeConvLayerUpsampleKernel.cpp +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/CL/kernels/CLTransposeConvLayerUpsampleKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibrary.h" -#include "arm_compute/core/CL/CLValidate.h" -#include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/Window.h" - -using namespace arm_compute; - -CLTransposeConvLayerUpsampleKernel::CLTransposeConvLayerUpsampleKernel() - : _input(nullptr), _output(nullptr), _inner_border(), _info() -{ -} - -Status CLTransposeConvLayerUpsampleKernel::validate(const ITensorInfo *input, - const ITensorInfo *output, - const BorderSize &inner_border, - const PadStrideInfo &info) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, - DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - - const DataLayout data_layout = input->data_layout(); - - const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - const size_t idx_c = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); - - ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(idx_w) == 0); - ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(idx_h) == 0); - - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_c) != output->dimension(idx_c)); - for (size_t i = 3; i < Coordinates::num_max_dimensions; ++i) - { - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(i) != output->dimension(i)); - } - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(inner_border.right > info.stride().first - 1, - "inner_border_right must be smaller that stride_x"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(inner_border.top > info.stride().second - 1, - "inner_border_top must be smaller that stride_y"); - - return Status{}; -} - -void CLTransposeConvLayerUpsampleKernel::configure(const ICLTensor *input, ICLTensor *output, - const BorderSize &inner_border, - const PadStrideInfo &info) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - _input = input; - _output = output; - _inner_border = inner_border; - _info = info; - - // Perform validation step - ARM_COMPUTE_ERROR_THROW_ON(CLTransposeConvLayerUpsampleKernel::validate( - input->info(), output->info(), inner_border, info)); - - // Create kernel - CLBuildOptions build_opts; - build_opts.add_option(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()))); - _kernel = static_cast( - CLKernelLibrary::get().create_kernel("deconvolution_upsample", build_opts.options())); - - constexpr unsigned int num_elems_processed_per_iteration = 1; - - // Configure kernel window - Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration)); - AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration); - output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape())); - - ICLKernel::configure_internal(win); -} - -void CLTransposeConvLayerUpsampleKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - - const DataLayout data_layout = _input->info()->data_layout(); - - const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - - const int out_start_x = _info.pad_left(); - const int out_end_x = _output->info()->dimension(idx_w) - _inner_border.right - - _info.pad_right() + _info.stride().first - 1; - const int out_step_x = _info.stride().first; - - const int out_start_y = _inner_border.top + _info.pad_top(); - const int out_end_y = - _output->info()->dimension(idx_h) - _info.pad_bottom() + _info.stride().second - 1; - const int out_step_y = _info.stride().second; - - switch (data_layout) - { - case DataLayout::NCHW: - { - Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); - - Window slice_out = collapsed.first_slice_window_3D(); - slice_out.set(Window::DimX, Window::Dimension(out_start_x, out_end_x, out_step_x)); - slice_out.set(Window::DimY, Window::Dimension(out_start_y, out_end_y, out_step_y)); - - Window slice_in = collapsed.first_slice_window_3D(); - - do - { - unsigned int idx = 0; - add_3D_tensor_argument(idx, _input, slice_in); - add_3D_tensor_argument(idx, _output, slice_out); - enqueue(queue, *this, slice_out); - } while (collapsed.slide_window_slice_3D(slice_in) && - collapsed.slide_window_slice_3D(slice_out)); - break; - } - case DataLayout::NHWC: - { - // NOTE: not collapsing in NHWC - Window slice_out = window.first_slice_window_3D(); - slice_out.set(Window::DimY, Window::Dimension(out_start_x, out_end_x, out_step_x)); - slice_out.set(Window::DimZ, Window::Dimension(out_start_y, out_end_y, out_step_y)); - - Window slice_in = window.first_slice_window_3D(); - - do - { - unsigned int idx = 0; - add_3D_tensor_argument(idx, _input, slice_in); - add_3D_tensor_argument(idx, _output, slice_out); - enqueue(queue, *this, slice_out); - } while (window.slide_window_slice_3D(slice_in) && window.slide_window_slice_3D(slice_out)); - break; - } - default: - ARM_COMPUTE_ERROR("Unsupported data layout"); - } -} diff --git a/compute/ARMComputeEx/src/core/CPP/kernels/CPPUpsampleKernelEx.cpp b/compute/ARMComputeEx/src/core/CPP/kernels/CPPUpsampleKernelEx.cpp deleted file mode 100644 index 648afb3..0000000 --- a/compute/ARMComputeEx/src/core/CPP/kernels/CPPUpsampleKernelEx.cpp +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/CPP/kernels/CPPUpsampleKernelEx.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" - -#include -#include - -namespace arm_compute -{ -CPPUpsampleKernelEx::CPPUpsampleKernelEx() : _input(nullptr), _output(nullptr), _info() {} - -bool CPPUpsampleKernelEx::is_parallelisable() const { return false; } - -void CPPUpsampleKernelEx::configure(const ITensor *input, ITensor *output, - const PadStrideInfo &info) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - _input = input; - _output = output; - _info = info; - - // Configure kernel window - Window win = calculate_max_window(*input->info(), Steps()); - - // The CPPUpsampleKernelEx doesn't need padding so update_window_and_padding() can be skipped - Coordinates coord; - coord.set_num_dimensions(output->info()->num_dimensions()); - output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape())); - - ICPPKernel::configure(win); -} - -void CPPUpsampleKernelEx::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICPPKernel::window(), window); - - // Initialize _scaled_output buffer - const int width_scaled = _output->info()->dimension(0); - const int height_scaled = _output->info()->dimension(1); - const int stride_x = _info.stride().first; - const int stride_y = _info.stride().second; - const int start_x = _info.pad_left(); - const int start_y = _info.pad_top(); - const int end_y = height_scaled - _info.pad_bottom(); - const int end_x = width_scaled - _info.pad_top(); - const size_t element_size = _input->info()->element_size(); - - // The fill value is normally 0, but for QASYMM8 the '0' corresponds to the offset - const uint8_t fill_value = - _output->info()->data_type() == DataType::QASYMM8 - ? utility::clamp(_output->info()->quantization_info().uniform().offset) - : 0; - // Filling a value different than 0 works only for QASYMM8 datatype since we are filling 1byte - // values in a buffer of uint8_ts - std::fill_n(_output->buffer(), _output->info()->total_size(), fill_value); - - // Create window - Window window_out(window); - window_out.set(Window::DimX, Window::Dimension(start_x, end_x, stride_x)); - window_out.set(Window::DimY, Window::Dimension(start_y, end_y, stride_y)); - - // Create iterators - Iterator in(_input, window); - Iterator out(_output, window_out); - - execute_window_loop( - window, [&](const Coordinates &) { memcpy(out.ptr(), in.ptr(), element_size); }, in, out); -} -} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/core/NEON/kernels/NECastKernel.cpp b/compute/ARMComputeEx/src/core/NEON/kernels/NECastKernel.cpp deleted file mode 100644 index fbb9dbc..0000000 --- a/compute/ARMComputeEx/src/core/NEON/kernels/NECastKernel.cpp +++ /dev/null @@ -1,671 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/NEON/kernels/NECastKernel.h" - -#include "arm_compute/core/AccessWindowStatic.h" -#include "arm_compute/core/CPP/Validate.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/NEON/NEAsymm.h" -#include "arm_compute/core/NEON/wrapper/wrapper.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/Window.h" - -#include - -namespace arm_compute -{ -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, - SubDataType input_subtype) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, - DataType::QASYMM8, DataType::U32, - DataType::S32, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON(input_subtype == SubDataType::BOOL && - input->data_type() != DataType::U8); - - if (output->tensor_shape().total_size() > 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S8, - DataType::QASYMM8, DataType::U32, - DataType::S32, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); - } - - return Status{}; -} - -std::tuple validate_and_configure_window(ITensorInfo *input, ITensorInfo *output) -{ - // Configure kernel window - Window win = calculate_max_window(*input, Steps()); - - // Output tensor auto initialization if not yet initialized - auto_init_if_empty(*output, input->tensor_shape(), 1, DataType::F32); - - // NECastKernel doesn't need padding so update_window_and_padding() can be skipped - Coordinates coord; - coord.set_num_dimensions(output->num_dimensions()); - output->set_valid_region(ValidRegion(coord, output->tensor_shape())); - - return std::make_tuple(Status{}, win); -} - -typedef struct bool8x16 -{ - uint8x16_t val; -} bool8x16_t; - -static inline uint8x16_t vreinterpretq_u8_b8(bool8x16_t __a) { return (uint8x16_t)__a.val; } - -template inline ToV vcast(const FromV &v) { return v; } -template <> inline uint8x16_t vcast(const bool8x16_t &v) -{ - const uint8x16_t vu8 = vreinterpretq_u8_b8(v); - const uint8x16_t zero_uint8x16 = vdupq_n_u8(0); - uint8x16_t mask = vcgtq_u8(vu8, zero_uint8x16); - return vshrq_n_u8(mask, 7); // true -> 1, false -> 0 -} - -template <> inline uint32x4x4_t vcast(const bool8x16_t &v) -{ - const uint8x16_t vu8 = vreinterpretq_u8_b8(v); - const uint8x16_t zero_uint8x16 = vdupq_n_u8(0); - uint8x16_t mask = vcgtq_u8(vu8, zero_uint8x16); - uint8x16_t vb = vshrq_n_u8(mask, 7); // true -> 1, false -> 0 - - const uint32x4x4_t ret = {{ - vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(vb)))), - vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(vb)))), - vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(vb)))), - vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(vb)))), - }}; - - return ret; -} - -template <> inline int32x4x4_t vcast(const bool8x16_t &v) -{ - const uint8x16_t vu8 = vreinterpretq_u8_b8(v); - const uint8x16_t zero_uint8x16 = vdupq_n_u8(0); - uint8x16_t mask = vcgtq_u8(vu8, zero_uint8x16); - uint8x16_t vb = vshrq_n_u8(mask, 7); // true -> 1, false -> 0 - - const int32x4x4_t ret = {{ - vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(vb))))), - vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(vb))))), - vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(vb))))), - vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(vb))))), - }}; - - return ret; -} - -template <> inline float32x4x4_t vcast(const bool8x16_t &v) -{ - const uint8x16_t vu8 = vreinterpretq_u8_b8(v); - const uint8x16_t zero_uint8x16 = vdupq_n_u8(0); - uint8x16_t mask = vcgtq_u8(vu8, zero_uint8x16); - uint8x16_t vb = vshrq_n_u8(mask, 7); // true -> 1, false -> 0 - - const float32x4x4_t ret = {{ - vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(vb))))), - vcvtq_f32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(vb))))), - vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(vb))))), - vcvtq_f32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(vb))))), - }}; - - return ret; -} - -template <> inline uint32x4x4_t vcast(const uint8x16_t &v) -{ - const uint32x4x4_t ret = {{ - vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(v)))), - vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(v)))), - vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(v)))), - vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(v)))), - }}; - - return ret; -} - -template <> inline int32x4x4_t vcast(const uint8x16_t &v) -{ - const int32x4x4_t ret = {{ - vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(v))))), - vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(v))))), - vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(v))))), - vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(v))))), - }}; - - return ret; -} - -template <> inline float32x4x4_t vcast(const uint8x16_t &v) -{ - const float32x4x4_t ret = {{ - vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(v))))), - vcvtq_f32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(v))))), - vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(v))))), - vcvtq_f32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(v))))), - }}; - - return ret; -} - -template <> inline uint8x16_t vcast(const int32x4x4_t &v) -{ - // Saturate cast - return vcombine_u8(vqmovn_u16(vcombine_u16(vqmovun_s32(v.val[0]), vqmovun_s32(v.val[1]))), - vqmovn_u16(vcombine_u16(vqmovun_s32(v.val[2]), vqmovun_s32(v.val[3])))); -} - -template <> inline uint32x4x4_t vcast(const int32x4x4_t &v) -{ - // Saturate cast - const uint32x4x4_t ret = {{ - vcombine_u32(vqmovun_s64(vmovl_s32(vget_low_s32(v.val[0]))), - vqmovun_s64(vmovl_s32(vget_high_s32(v.val[0])))), - vcombine_u32(vqmovun_s64(vmovl_s32(vget_low_s32(v.val[1]))), - vqmovun_s64(vmovl_s32(vget_high_s32(v.val[1])))), - vcombine_u32(vqmovun_s64(vmovl_s32(vget_low_s32(v.val[2]))), - vqmovun_s64(vmovl_s32(vget_high_s32(v.val[2])))), - vcombine_u32(vqmovun_s64(vmovl_s32(vget_low_s32(v.val[3]))), - vqmovun_s64(vmovl_s32(vget_high_s32(v.val[3])))), - }}; - - return ret; -} - -template <> inline float32x4x4_t vcast(const int32x4x4_t &v) -{ - const float32x4x4_t ret = {{ - vcvtq_f32_s32(v.val[0]), vcvtq_f32_s32(v.val[1]), vcvtq_f32_s32(v.val[2]), - vcvtq_f32_s32(v.val[3]), - }}; - - return ret; -} - -template <> inline uint8x16_t vcast(const uint32x4x4_t &v) -{ - return vcombine_u8(vqmovn_u16(vcombine_u16(vqmovn_u32(v.val[0]), vqmovn_u32(v.val[1]))), - vqmovn_u16(vcombine_u16(vqmovn_u32(v.val[2]), vqmovn_u32(v.val[3])))); -} - -template <> inline int32x4x4_t vcast(const uint32x4x4_t &v) -{ - const int32x4x4_t ret = {{ - vcombine_s32(vmovn_s64(vreinterpretq_s64_u64(vmovl_u32(vget_low_u32(v.val[0])))), - vmovn_s64(vreinterpretq_s64_u64(vmovl_u32(vget_high_u32(v.val[0]))))), - vcombine_s32(vmovn_s64(vreinterpretq_s64_u64(vmovl_u32(vget_low_u32(v.val[1])))), - vmovn_s64(vreinterpretq_s64_u64(vmovl_u32(vget_high_u32(v.val[1]))))), - vcombine_s32(vmovn_s64(vreinterpretq_s64_u64(vmovl_u32(vget_low_u32(v.val[2])))), - vmovn_s64(vreinterpretq_s64_u64(vmovl_u32(vget_high_u32(v.val[2]))))), - vcombine_s32(vmovn_s64(vreinterpretq_s64_u64(vmovl_u32(vget_low_u32(v.val[3])))), - vmovn_s64(vreinterpretq_s64_u64(vmovl_u32(vget_high_u32(v.val[3]))))), - }}; - - return ret; -} - -template <> inline float32x4x4_t vcast(const uint32x4x4_t &v) -{ - const float32x4x4_t ret = {{ - vcvtq_f32_u32(v.val[0]), vcvtq_f32_u32(v.val[1]), vcvtq_f32_u32(v.val[2]), - vcvtq_f32_u32(v.val[3]), - }}; - - return ret; -} - -template <> inline uint8x16_t vcast(const float32x4x4_t &v) -{ - // Saturate cast - return vcombine_u8(vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(v.val[0])), - vqmovun_s32(vcvtq_s32_f32(v.val[1])))), - vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(v.val[2])), - vqmovun_s32(vcvtq_s32_f32(v.val[3]))))); -} - -template <> inline uint32x4x4_t vcast(const float32x4x4_t &v) -{ - const uint32x4x4_t ret = {{ - vcvtq_u32_f32(v.val[0]), vcvtq_u32_f32(v.val[1]), vcvtq_u32_f32(v.val[2]), - vcvtq_u32_f32(v.val[3]), - }}; - - return ret; -} - -template <> inline int32x4x4_t vcast(const float32x4x4_t &v) -{ - const int32x4x4_t ret = {{ - vcvtq_s32_f32(v.val[0]), vcvtq_s32_f32(v.val[1]), vcvtq_s32_f32(v.val[2]), - vcvtq_s32_f32(v.val[3]), - }}; - - return ret; -} - -template struct cast_vector; -template <> struct cast_vector -{ - using type = bool8x16_t; -}; -template <> struct cast_vector -{ - using type = uint8x16_t; -}; -template <> struct cast_vector -{ - using type = uint32x4x4_t; -}; -template <> struct cast_vector -{ - using type = int32x4x4_t; -}; -template <> struct cast_vector -{ - using type = float32x4x4_t; -}; - -template inline void store_result(T *ptr, const typename cast_vector::type &v) -{ - wrapper::vstore(ptr, v.val[0]); - wrapper::vstore(ptr + 4, v.val[1]); - wrapper::vstore(ptr + 8, v.val[2]); - wrapper::vstore(ptr + 12, v.val[3]); -} - -template <> inline void store_result(uint8_t *ptr, const uint8x16_t &v) -{ - wrapper::vstore(ptr, v); -} - -inline bool8x16_t vloadq(const bool *ptr) -{ - bool8x16_t ret; - ret.val = wrapper::vloadq(reinterpret_cast(ptr)); - return ret; -} - -template inline typename cast_vector::type load_input(const T *ptr) -{ - return wrapper::vloadq(ptr); -} - -template <> inline typename cast_vector::type load_input(const bool *ptr) -{ - return vloadq(ptr); -} - -template <> inline typename cast_vector::type load_input(const uint32_t *ptr) -{ - return vld4q_u32(ptr); -} - -template <> inline typename cast_vector::type load_input(const int32_t *ptr) -{ - return vld4q_s32(ptr); -} - -template <> inline typename cast_vector::type load_input(const float *ptr) -{ - return vld4q_f32(ptr); -} - -template inline T get_value(const T *ptr) { return *ptr; } - -template <> inline bool get_value(const bool *ptr) -{ - bool ret = (*ptr != 0); - return ret; -} - -template void run_cast(const ITensor *input, ITensor *output, const Window &window) -{ - const int window_step_x = 16; - const auto window_start_x = static_cast(window.x().start()); - const auto window_end_x = static_cast(window.x().end()); - - // Collapse window and reset first dimension to handle tail calculations manually - Window win_collapsed = window.collapse_if_possible(window, Window::DimZ); - win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1)); - - // Create iterators - Iterator in(input, win_collapsed); - Iterator out(output, win_collapsed); - -#ifdef __aarch64__ - constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_NEAREST_EVEN; -#else //__aarch64__ - constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_ZERO; -#endif //__aarch64__ - - execute_window_loop( - win_collapsed, - [&](const Coordinates &) { - const auto in_ptr = reinterpret_cast(in.ptr()); - - int x = window_start_x; - for (; x <= (window_end_x - window_step_x); x += window_step_x) - { - using from_vector = typename cast_vector::type; - const from_vector vin = load_input(in_ptr + x); - - switch (output->info()->data_type()) - { - case DataType::U8: - { - using to_vector = typename cast_vector::type; - const to_vector vout = vcast(vin); - store_result(reinterpret_cast(out.ptr()) + x, vout); - break; - } - case DataType::QASYMM8: - { - using to_vector = typename cast_vector::type; - const UniformQuantizationInfo &qinfo_out = - output->info()->quantization_info().uniform(); - const auto vf = vcast(vin); - const auto vout = vquantize(vf, qinfo_out); - store_result(reinterpret_cast(out.ptr()) + x, vout); - break; - } - case DataType::U32: - { - using to_vector = typename cast_vector::type; - const to_vector vout = vcast(vin); - store_result(reinterpret_cast(out.ptr()) + x, vout); - break; - } - case DataType::S32: - { - using to_vector = typename cast_vector::type; - const to_vector vout = vcast(vin); - store_result(reinterpret_cast(out.ptr()) + x, vout); - break; - } - case DataType::F32: - { - using to_vector = typename cast_vector::type; - const to_vector vout = vcast(vin); - store_result(reinterpret_cast(out.ptr()) + x, vout); - break; - } - default: - ARM_COMPUTE_ERROR("Unsupported data type."); - } - } - - // Compute left-over elements - for (; x < window_end_x; ++x) - { - FromT val = get_value(in_ptr + x); - switch (output->info()->data_type()) - { - case DataType::U8: - { - *(reinterpret_cast(out.ptr()) + x) = static_cast(val); - break; - } - case DataType::QASYMM8: - { - const QuantizationInfo &qinfo_out = output->info()->quantization_info(); - const auto qval = - quantize_qasymm8(static_cast(val), qinfo_out, rounding_policy); - *(reinterpret_cast(out.ptr()) + x) = qval; - break; - } - case DataType::U32: - { - *(reinterpret_cast(out.ptr()) + x) = static_cast(val); - break; - } - case DataType::S32: - { - *(reinterpret_cast(out.ptr()) + x) = static_cast(val); - break; - } - case DataType::F32: - { - *(reinterpret_cast(out.ptr()) + x) = static_cast(val); - break; - } - default: - ARM_COMPUTE_ERROR("Unsupported data type."); - } - } - }, - in, out); -} - -void run_cast_qasymm8(const ITensor *input, ITensor *output, const Window &window) -{ - const int window_step_x = 16; - const auto window_start_x = static_cast(window.x().start()); - const auto window_end_x = static_cast(window.x().end()); - - // Collapse window and reset first dimension to handle tail calculations manually - Window win_collapsed = window.collapse_if_possible(window, Window::DimZ); - win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1)); - - // Create iterators - Iterator in(input, win_collapsed); - Iterator out(output, win_collapsed); - -#ifdef __aarch64__ - constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_NEAREST_EVEN; -#else //__aarch64__ - constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_ZERO; -#endif //__aarch64__ - const auto &qinfo_in = input->info()->quantization_info().uniform(); - const auto &qinfo_out = output->info()->quantization_info().uniform(); - - execute_window_loop( - win_collapsed, - [&](const Coordinates &) { - const auto in_ptr = reinterpret_cast(in.ptr()); - - int x = window_start_x; - for (; x <= (window_end_x - window_step_x); x += window_step_x) - { - using from_vector = typename cast_vector::type; - const auto vf = wrapper::vloadq(in_ptr + x); - const auto vin = vdequantize(vf, qinfo_in); - switch (output->info()->data_type()) - { - case DataType::U8: - { - using to_vector = typename cast_vector::type; - const to_vector vout = vcast(vin); - store_result(reinterpret_cast(out.ptr()) + x, vout); - break; - } - case DataType::QASYMM8: - { - using to_vector = typename cast_vector::type; - const auto vf = vcast(vin); - const auto vout = vquantize(vf, qinfo_out); - store_result(reinterpret_cast(out.ptr()) + x, vout); - break; - } - case DataType::U32: - { - using to_vector = typename cast_vector::type; - const to_vector vout = vcast(vin); - store_result(reinterpret_cast(out.ptr()) + x, vout); - break; - } - case DataType::S32: - { - using to_vector = typename cast_vector::type; - const to_vector vout = vcast(vin); - store_result(reinterpret_cast(out.ptr()) + x, vout); - break; - } - case DataType::F32: - { - using to_vector = typename cast_vector::type; - const to_vector vout = vcast(vin); - store_result(reinterpret_cast(out.ptr()) + x, vout); - break; - } - default: - ARM_COMPUTE_ERROR("Unsupported data type."); - } - } - - // Compute left-over elements - for (; x < window_end_x; ++x) - { - qasymm8_t qval_in = *(in_ptr + x); - const auto val = dequantize_qasymm8(qval_in, qinfo_in); - - switch (output->info()->data_type()) - { - case DataType::U8: - { - *(reinterpret_cast(out.ptr()) + x) = static_cast(val); - break; - } - case DataType::QASYMM8: - { - const auto qval_out = quantize_qasymm8(val, qinfo_out, rounding_policy); - *(reinterpret_cast(out.ptr()) + x) = qval_out; - break; - } - case DataType::U32: - { - *(reinterpret_cast(out.ptr()) + x) = static_cast(val); - break; - } - case DataType::S32: - { - *(reinterpret_cast(out.ptr()) + x) = static_cast(val); - break; - } - case DataType::F32: - { - *(reinterpret_cast(out.ptr()) + x) = static_cast(val); - break; - } - default: - ARM_COMPUTE_ERROR("Unsupported data type."); - } - } - }, - in, out); -} -} // namespace - -NECastKernel::NECastKernel() : _input(nullptr), _output(nullptr), _input_subtype(SubDataType::NONE) -{ -} - -void NECastKernel::configure(const ITensor *input, ITensor *output, SubDataType input_subtype) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), input_subtype)); - - _input = input; - _output = output; - _input_subtype = input_subtype; - - // Configure kernel window - auto win_config = validate_and_configure_window(input->info(), output->info()); - - ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config)); - - INEKernel::configure(std::get<1>(win_config)); -} - -Status NECastKernel::validate(const ITensorInfo *input, const ITensorInfo *output, - SubDataType input_subtype) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, input_subtype)); - ARM_COMPUTE_RETURN_ON_ERROR( - std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get()))); - return Status{}; -} - -void NECastKernel::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); - - switch (_input->info()->data_type()) - { - case DataType::U8: - if (_input_subtype == SubDataType::BOOL) - { - run_cast(_input, _output, window); - } - else - { - run_cast(_input, _output, window); - } - break; - case DataType::QASYMM8: - run_cast_qasymm8(_input, _output, window); - break; - case DataType::U32: - run_cast(_input, _output, window); - break; - case DataType::S32: - run_cast(_input, _output, window); - break; - case DataType::F32: - run_cast(_input, _output, window); - break; - default: - ARM_COMPUTE_ERROR("Unsupported data type."); - } -} -} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/core/NEON/kernels/NEDepthToSpaceLayerKernelEx.cpp b/compute/ARMComputeEx/src/core/NEON/kernels/NEDepthToSpaceLayerKernelEx.cpp deleted file mode 100644 index 95e269d..0000000 --- a/compute/ARMComputeEx/src/core/NEON/kernels/NEDepthToSpaceLayerKernelEx.cpp +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/NEON/kernels/NEDepthToSpaceLayerKernelEx.h" - -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/wrapper/wrapper.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/utils/misc/ShapeCalculatorEx.h" -#include -#include - -using namespace arm_compute::misc::shape_calculator; - -namespace arm_compute -{ -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); - ARM_COMPUTE_RETURN_ERROR_ON(block_shape < 2); - - const DataLayout data_layout = input->data_layout(); - const int idx_channel = - get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); - ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channel] % (block_shape * block_shape) != - 0); - // Validate output if initialized - if (output->total_size() != 0) - { - const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const int idx_height = - get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_width] != - (block_shape * input->tensor_shape()[idx_width])); - ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_height] != - (block_shape * input->tensor_shape()[idx_height])); - ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - } - - return Status{}; -} -} // namespace - -NEDepthToSpaceLayerKernelEx::NEDepthToSpaceLayerKernelEx() - : _input(nullptr), _output(nullptr), _block_shape() -{ -} - -void NEDepthToSpaceLayerKernelEx::configure(const ITensor *input, ITensor *output, - int32_t block_shape) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - TensorShape output_shape = compute_depth_to_space_shape_ex(input->info(), block_shape); - // Output auto inizialitation if not yet initialized - auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape)); - - // Perform validation step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), block_shape)); - - _input = input; - _output = output; - _block_shape = block_shape; - - // Configure kernel window - Window win = calculate_max_window(*input->info(), Steps()); - ICPPKernel::configure(win); -} - -Status NEDepthToSpaceLayerKernelEx::validate(const ITensorInfo *input, const ITensorInfo *output, - int32_t block_shape) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, block_shape)); - return Status{}; -} - -void NEDepthToSpaceLayerKernelEx::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICPPKernel::window(), window); - - const int idx_channel = - get_data_layout_dimension_index(_input->info()->data_layout(), DataLayoutDimension::CHANNEL); - const int depth_size = _input->info()->dimension(idx_channel); - const int r = (depth_size / (_block_shape * _block_shape)); - const int element_size = _input->info()->element_size(); - - Window slice_out = window.first_slice_window_3D(); - - // The slice_out slice does not move - slice_out.set(Window::DimX, Window::Dimension(0, 0, 0)); - slice_out.set(Window::DimY, Window::Dimension(0, 0, 0)); - slice_out.set(Window::DimZ, Window::Dimension(0, 0, 0)); - - // Main loop for NCHW and NHWC - if (_input->info()->data_layout() == DataLayout::NCHW) - { - Window slice_in = window.first_slice_window_2D(); - do - { - Iterator in(_input, slice_in); - execute_window_loop(slice_in, - [&](const Coordinates &id) { - const int x = id.x(); - const int y = id.y(); - - const int z = id.z() % r; - const int out_x = x * _block_shape + (id.z() / r) % _block_shape; - const int out_y = y * _block_shape + (id.z() / r) / _block_shape; - Coordinates output_coords{out_x, out_y, z, id[3]}; - memcpy(_output->ptr_to_element(output_coords), in.ptr(), element_size); - }, - in); - } while (window.slide_window_slice_2D(slice_in)); - } - else - { - Window slice_in = window.first_slice_window_3D(); - do - { - Iterator in(_input, slice_in); - execute_window_loop(slice_in, - [&](const Coordinates &id) { - const int x = id.y(); - const int y = id.z(); - - const int z = id.x() % r; - const int out_x = x * _block_shape + (id.x() / r) % _block_shape; - const int out_y = y * _block_shape + (id.x() / r) / _block_shape; - Coordinates output_coords{z, out_x, out_y, id[3]}; - memcpy(_output->ptr_to_element(output_coords), in.ptr(), element_size); - }, - in); - } while (window.slide_window_slice_3D(slice_in)); - } -} -} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/core/NEON/kernels/NEElementwiseUnaryKernelEx.cpp b/compute/ARMComputeEx/src/core/NEON/kernels/NEElementwiseUnaryKernelEx.cpp deleted file mode 100644 index 200fc4f..0000000 --- a/compute/ARMComputeEx/src/core/NEON/kernels/NEElementwiseUnaryKernelEx.cpp +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2018-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/NEON/kernels/NEElementwiseUnaryKernelEx.h" - -#include "arm_compute/core/CPP/Validate.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/IAccessWindow.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/NEAsymm.h" -#include "arm_compute/core/NEON/NEFixedPoint.h" -#include "arm_compute/core/NEON/wrapper/wrapper.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Validate.h" - -#include -#include -#include -#include -#include - -namespace arm_compute -{ -class Coordinates; - -namespace -{ -template -inline ScalarType elementwise_op_scalar(const ScalarType &a) -{ - switch (op) - { - case ElementWiseUnaryEx::NEG: - return -a; - default: - ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); - } -} - -template -inline VectorType elementwise_op(const VectorType &a) -{ - switch (op) - { - case ElementWiseUnaryEx::NEG: - return wrapper::vneg(a); - default: - ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); - } -} - -template -void elementwise_op(const ITensor *in, ITensor *out, const Window &window) -{ - const int window_step_x = 16 / sizeof(ScalarType); - const auto window_start_x = static_cast(window.x().start()); - const auto window_end_x = static_cast(window.x().end()); - - Window win = window; - win.set(Window::DimX, Window::Dimension(0, 1, 1)); - - Iterator input(in, win); - Iterator output(out, win); - - execute_window_loop(win, - [&](const Coordinates &) { - auto output_ptr = reinterpret_cast(output.ptr()); - const auto input_ptr = reinterpret_cast(input.ptr()); - - int x = window_start_x; - for (; x <= window_end_x - window_step_x; x += window_step_x) - { - wrapper::vstore(output_ptr + x, - elementwise_op(wrapper::vloadq(input_ptr + x))); - } - for (; x < window_end_x; ++x) - { - *(output_ptr + x) = elementwise_op_scalar(*(input_ptr + x)); - } - }, - input, output); -} - -template -std::function -configure_func(const ITensor *input, ITensor *output) -{ - std::string function_to_call("op_"); - function_to_call += string_from_data_type(input->info()->data_type()) + "_"; - function_to_call += string_from_data_type(output->info()->data_type()); - - static std::map - map_function = { - {"op_F32_F32", &elementwise_op}, {"op_S32_S32", &elementwise_op}, - }; -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - map_function["op_F16_F16"] = &elementwise_op; -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - - auto it = map_function.find(function_to_call); - - if (it != map_function.end()) - { - auto func = it->second; - return [func](const ITensor *input, ITensor *output, const Window &window) { - func(input, output, window); - }; - } - return nullptr; -} -} // namespace - -NEElementwiseUnaryKernelEx::NEElementwiseUnaryKernelEx() - : _function(nullptr), _input(nullptr), _output(nullptr) -{ -} - -void NEElementwiseUnaryKernelEx::configure(ElementWiseUnaryEx op, const ITensor *input, - ITensor *output) -{ - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input->info(), *output->info())); - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - // Configure kernel window - const std::pair broadcast_pair = - ITensorInfo::broadcast_shape_and_valid_region(*input->info()); - const TensorShape &out_shape = broadcast_pair.first; - const ValidRegion &valid_region = broadcast_pair.second; - - // Auto initialize output if not initialized - auto_init_if_empty(*output->info(), out_shape, 1, input->info()->data_type()); - - Window win = calculate_max_window(valid_region); - - _input = input; - _output = output; - - INEKernel::configure(win); - - switch (op) - { - case ElementWiseUnaryEx::NEG: - _function = configure_func(input, output); - break; - default: - ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); - } -} - -Status NEElementwiseUnaryKernelEx::validate_arguments(const ITensorInfo &input, - const ITensorInfo &output) -{ - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::F16, DataType::F32, - DataType::S32); - - // Validate in case of configured output - if (output.total_size() > 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input, &output); - } - - return Status{}; -} - -Status NEElementwiseUnaryKernelEx::validate(ElementWiseUnaryEx op, const ITensorInfo *input, - const ITensorInfo *output) -{ - ARM_COMPUTE_UNUSED(op); - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input, *output)); - return Status{}; -} - -void NEElementwiseUnaryKernelEx::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); - ARM_COMPUTE_ERROR_ON(_function == nullptr); - _function(_input, _output, window); -} -} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/core/NEON/kernels/NEPReLUKernel.cpp b/compute/ARMComputeEx/src/core/NEON/kernels/NEPReLUKernel.cpp deleted file mode 100644 index 641641b..0000000 --- a/compute/ARMComputeEx/src/core/NEON/kernels/NEPReLUKernel.cpp +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/NEON/kernels/NEPReLUKernel.h" - -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/NEAsymm.h" -#include "arm_compute/core/NEON/NEElementwiseOperationFuncs.h" -#include "arm_compute/core/NEON/wrapper/wrapper.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Window.h" - -#include - -using namespace arm_compute; -namespace -{ - -/** Conditional element-wise operations */ -enum class ConditionalOperation -{ - PRELU, /**< (x * y) for x < 0, x for x >= 0 */ -}; - -template -inline ScalarType elementwise_conditional_op_scalar(const ScalarType &a, const ScalarType &b) -{ - auto res = ScalarType(0); - - switch (op) - { - case ConditionalOperation::PRELU: - res = a < 0 ? a * b : a; - break; - default: - ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); - } - return res; -} - -template -inline uint8_t elementwise_conditional_op_quantized_scalar(const float &a, const float &b, - QuantizationInfo qinfo) -{ - return quantize_qasymm8(elementwise_conditional_op_scalar(a, b), qinfo, - RoundingPolicy::TO_NEAREST_UP); -} - -template -inline VectorType elementwise_conditional_op(const VectorType &a, const VectorType &b) -{ - VectorType res = {0, 0, 0, 0}; - VectorType const_0 = {0, 0, 0, 0}; - - switch (op) - { - case ConditionalOperation::PRELU: - res = wrapper::vbsl(wrapper::vcgt(a, const_0), a, wrapper::vmul(a, b)); - ; - break; - default: - ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); - } - return res; -} - -template -inline float32x4x4_t elementwise_conditional_op(const float32x4x4_t &a, const float32x4x4_t &b) -{ - float32x4x4_t out = {{ - elementwise_conditional_op(a.val[0], b.val[0]), - elementwise_conditional_op(a.val[1], b.val[1]), - elementwise_conditional_op(a.val[2], b.val[2]), - elementwise_conditional_op(a.val[3], b.val[3]), - }}; - return out; -} - -template -inline VectorType elementwise_conditional_op_broadcast(const VectorType &a, - const ScalarType &broadcast_value, - const bool reorder) -{ - VectorType broadcast_vector = wrapper::vdup_n(broadcast_value, wrapper::traits::vector_128_tag()); - return elementwise_conditional_op(reorder ? broadcast_vector : a, - reorder ? a : broadcast_vector); -} - -template -inline int elementwise_conditional_op_loop(int window_start_x, int window_end_x, int window_step_x, - const ScalarType *input1_ptr, - const ScalarType *input2_ptr, ScalarType *output_ptr) -{ - int x = window_start_x; - for (; x <= (window_end_x - window_step_x); x += window_step_x) - { - const auto a = wrapper::vloadq(input1_ptr + x); - const auto b = wrapper::vloadq(input2_ptr + x); - wrapper::vstore(output_ptr + x, elementwise_conditional_op(a, b)); - } - return x; -} - -template -inline int elementwise_conditional_op_quantized_loop(int window_start_x, int window_end_x, - int window_step_x, const uint8_t *input1_ptr, - const uint8_t *input2_ptr, uint8_t *output_ptr, - int32x4_t voffset1, int32x4_t voffset2, - float32x4_t vscale1, float32x4_t vscale2, - float32x4_t voffseto, float32x4_t invvscaleo) -{ - int x = window_start_x; - for (; x <= (window_end_x - window_step_x); x += window_step_x) - { - // Get inputs and compute output - const float32x4x4_t af = load_quantized(input1_ptr + x, voffset1, vscale1); - const float32x4x4_t bf = load_quantized(input2_ptr + x, voffset2, vscale2); - const float32x4x4_t rf = elementwise_conditional_op(af, bf); - store_quantized(output_ptr + x, rf, voffseto, invvscaleo); - } - return x; -} - -template -inline int elementwise_conditional_op_broadcast_loop(int window_start_x, int window_end_x, - int window_step_x, - const ScalarType *non_broadcast_input_ptr, - const ScalarType &broadcast_value, - ScalarType *output_ptr, const bool reorder) -{ - int x = window_start_x; - for (; x <= (window_end_x - window_step_x); x += window_step_x) - { - const auto a = wrapper::vloadq((non_broadcast_input_ptr + x)); - wrapper::vstore(output_ptr + x, - elementwise_conditional_op_broadcast(a, broadcast_value, reorder)); - } - return x; -} - -template -inline int elementwise_conditional_op_quantized_broadcast_loop( - int window_start_x, int window_end_x, int window_step_x, const uint8_t *non_broadcast_input_ptr, - float32x4x4_t broadcast_vector, uint8_t *output_ptr, int32x4_t voffset_non_broadcast, - float32x4_t vscale_non_broadcast, float32x4_t voffseto, float32x4_t invvscaleo, bool reorder) -{ - int x = window_start_x; - for (; x <= (window_end_x - window_step_x); x += window_step_x) - { - const float32x4x4_t af = - load_quantized(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast); - const float32x4x4_t rf = elementwise_conditional_op(reorder ? broadcast_vector : af, - reorder ? af : broadcast_vector); - store_quantized(output_ptr + x, rf, voffseto, invvscaleo); - } - return x; -} - -template -void elementwise_conditional_op(const ITensor *in1, const ITensor *in2, ITensor *out, - const Window &window) -{ - elementwise_op(in1, in2, out, window, &elementwise_conditional_op_scalar, - &elementwise_conditional_op_broadcast_loop, - &elementwise_conditional_op_loop); -} - -template -void elementwise_conditional_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, - const Window &window) -{ - elementwise_op_quantized(in1, in2, out, window, &elementwise_conditional_op_quantized_scalar, - &elementwise_conditional_op_quantized_broadcast_loop, - &elementwise_conditional_op_quantized_loop); -} -} // namespace - -NEPReLUKernel::NEPReLUKernel() : _input(nullptr), _alpha(nullptr), _output(nullptr) {} - -void NEPReLUKernel::configure(const ITensor *input, const ITensor *alpha, ITensor *output) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, alpha, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input->info(), *alpha->info(), *output->info())); - - // Configure kernel window - const std::pair broadcast_pair = - ITensorInfo::broadcast_shape_and_valid_region(*input->info(), *alpha->info()); - const TensorShape &out_shape = broadcast_pair.first; - const ValidRegion &valid_region = broadcast_pair.second; - - // Auto initialize output if not initialized - auto_init_if_empty(*output->info(), out_shape, 1, input->info()->data_type()); - - Window win = calculate_max_window(valid_region); - - _input = input; - _alpha = alpha; - _output = output; - INEKernel::configure(win); -} - -void NEPReLUKernel::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); - - if (_input->info()->data_type() == DataType::F32) - { - elementwise_conditional_op(_input, _alpha, - _output, window); - } - else if (_input->info()->data_type() == DataType::QASYMM8) - { - elementwise_conditional_op_quantized(_input, _alpha, _output, - window); - } - else - { - ARM_COMPUTE_ERROR("Wrong Type"); - } -} - -Status NEPReLUKernel::validate_arguments(const ITensorInfo &input, const ITensorInfo &alpha, - const ITensorInfo &output) -{ - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::QASYMM8, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input, &alpha, &output); - - const TensorShape out_shape = - TensorShape::broadcast_shape(input.tensor_shape(), alpha.tensor_shape()); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, - "Inputs are not broadcast compatible"); - - // Checks performed when output is configured - if (output.total_size() > 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_MSG( - detail::have_different_dimensions(out_shape, output.tensor_shape(), 0), - "Wrong shape for output"); - } - - return Status{}; -} - -Status NEPReLUKernel::validate(const ITensorInfo *input, const ITensorInfo *alpha, - const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, alpha, output); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input, *alpha, *output)); - - return Status{}; -} diff --git a/compute/ARMComputeEx/src/core/NEON/kernels/NEQuantizationSymmetricKernel.cpp b/compute/ARMComputeEx/src/core/NEON/kernels/NEQuantizationSymmetricKernel.cpp index 6ba0f1f..5841f1d 100644 --- a/compute/ARMComputeEx/src/core/NEON/kernels/NEQuantizationSymmetricKernel.cpp +++ b/compute/ARMComputeEx/src/core/NEON/kernels/NEQuantizationSymmetricKernel.cpp @@ -64,7 +64,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 2); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape().total_size() == 0); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S8); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(scale_factor, 1, DataType::F16, DataType::F32); diff --git a/compute/ARMComputeEx/src/core/NEON/kernels/NESpaceToDepthLayerKernelEx.cpp b/compute/ARMComputeEx/src/core/NEON/kernels/NESpaceToDepthLayerKernelEx.cpp deleted file mode 100644 index 44feb20..0000000 --- a/compute/ARMComputeEx/src/core/NEON/kernels/NESpaceToDepthLayerKernelEx.cpp +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernelEx.h" - -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/wrapper/wrapper.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/utils/misc/ShapeCalculatorEx.h" -#include -#include - -using namespace arm_compute::misc::shape_calculator; - -namespace arm_compute -{ -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); - - ARM_COMPUTE_RETURN_ERROR_ON(block_shape < 1); - - // Validate output if initialized - if (output->total_size() != 0) - { - const DataLayout data_layout = input->data_layout(); - const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const int idx_height = - get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - const int idx_channel = - get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); - const int idx_batch = - get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES); - ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_width] % block_shape != 0); - ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_height] % block_shape != 0); - ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_batch] != - output->tensor_shape()[idx_batch]); - ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_channel] % (block_shape * block_shape) != - 0); - ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().total_size() != - output->tensor_shape().total_size()); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - } - - return Status{}; -} -} // namespace - -NESpaceToDepthLayerKernelEx::NESpaceToDepthLayerKernelEx() - : _input(nullptr), _output(nullptr), _block_shape() -{ -} - -void NESpaceToDepthLayerKernelEx::configure(const ITensor *input, ITensor *output, - int32_t block_shape) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - TensorShape output_shape = compute_space_to_depth_shape_ex(input->info(), block_shape); - auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type()); - - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), block_shape)); - - _input = input; - _block_shape = block_shape; - _output = output; - - // Configure kernel window - Window win = calculate_max_window(*output->info(), Steps()); - INEKernel::configure(win); -} - -Status NESpaceToDepthLayerKernelEx::validate(const ITensorInfo *input, const ITensorInfo *output, - int32_t block_shape) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, block_shape)); - return Status{}; -} - -void NESpaceToDepthLayerKernelEx::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICPPKernel::window(), window); - - const DataLayout data_layout = _input->info()->data_layout(); - const int channel_idx = - get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); - const int element_size = _input->info()->element_size(); - - const size_t channel_size = _input->info()->dimension(channel_idx); - - Window slice_out = window.first_slice_window_3D(); - - int batch_id = 0; - - // Main loop for NCHW and NHWC - if (_output->info()->data_layout() == DataLayout::NCHW) - { - do - { - Iterator out(_output, slice_out); - execute_window_loop(slice_out, - [&](const Coordinates &id) { - const size_t channel_id = id.z(); - const size_t in_x = - id.x() * _block_shape + (channel_id / channel_size) % _block_shape; - const size_t in_y = - id.y() * _block_shape + (channel_id / channel_size) / _block_shape; - const int z = channel_id % channel_size; - Coordinates input_coords{in_x, in_y, z, batch_id}; - memcpy(out.ptr(), _input->ptr_to_element(input_coords), element_size); - }, - out); - ++batch_id; - } while (window.slide_window_slice_3D(slice_out)); - } - else - { - do - { - Iterator out(_output, slice_out); - execute_window_loop(slice_out, - [&](const Coordinates &id) { - const size_t channel_id = id.x(); - const size_t in_x = - id.y() * _block_shape + (channel_id / channel_size) % _block_shape; - const size_t in_y = - id.z() * _block_shape + (channel_id / channel_size) / _block_shape; - const int z = channel_id % channel_size; - Coordinates input_coords{z, in_x, in_y, batch_id}; - memcpy(out.ptr(), _input->ptr_to_element(input_coords), element_size); - }, - out); - ++batch_id; - } while (window.slide_window_slice_3D(slice_out)); - } -} -} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLArgOperation.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLArgOperation.cpp deleted file mode 100644 index 2d379cf..0000000 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLArgOperation.cpp +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/CL/functions/CLArgOperation.h" - -#include "arm_compute/core/CL/kernels/CLArgOperationKernel.h" -#include "arm_compute/runtime/CL/CLScheduler.h" - -namespace arm_compute -{ - -CLArgOperation::CLArgOperation() -{ - // DO NOTHING -} - -void CLArgOperation::configure(ICLTensor *input, ICLTensor *output, std::vector axis, - ArgOperation op) -{ - ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), axis, output->info(), op)); - _input = input; - _output = output; - _axis = axis; - _arg_op = op; - // NOTE The argminmax_axis must have no duplication. - _num_of_kernels = axis.size(); - const size_t num_of_interm_tensors = _num_of_kernels - 1; - - _interm_tensors = arm_compute::support::cpp14::make_unique(num_of_interm_tensors); - _argop_kernels = - arm_compute::support::cpp14::make_unique(_num_of_kernels); - - TensorShape shape{input->info()->tensor_shape()}; - for (size_t i = 0; i < num_of_interm_tensors; i++) - { - shape.set(_axis[i], 1); - _interm_tensors[i].allocator()->init( - TensorInfo(shape, input->info()->num_channels(), input->info()->data_type()) - .set_data_layout(input->info()->data_layout())); - _interm_tensors[i].allocator()->allocate(); - } - - // Set a vector that is ordered ICLTensors sequentially. - std::vector tensors; - tensors.emplace_back(input); - for (size_t i = 0; i < num_of_interm_tensors; i++) - { - tensors.emplace_back(_interm_tensors.get() + i); - } - tensors.emplace_back(output); - - // Apply ArgMinMax on all kernels - for (size_t i = 0; i < _num_of_kernels; i++) - { - _argop_kernels[i].configure(tensors[i], tensors[i + 1], _axis[i], op); - } -} - -Status CLArgOperation::validate(const ITensorInfo *input, const std::vector &axis, - const ITensorInfo *output, ArgOperation op) -{ - const size_t num_of_kernels = axis.size(); - const size_t num_of_interm_tensors = num_of_kernels - 1; - - // Create temporary tensor infos - auto interm_tensors = - arm_compute::support::cpp14::make_unique(num_of_interm_tensors); - - // Create intermediate tensor info - TensorShape shape{input->tensor_shape()}; - - for (size_t i = 0; i < num_of_interm_tensors; i++) - { - shape.set(axis[i], 1); - interm_tensors[i].set_data_type(input->data_type()); - interm_tensors[i].set_tensor_shape(shape); - interm_tensors[i].set_num_channels(input->num_channels()); - } - - // Set a vector that is ordered ITensorInfo sequentially. - std::vector tensors; - tensors.emplace_back(input); - for (size_t i = 0; i < num_of_interm_tensors; i++) - { - tensors.emplace_back(interm_tensors.get() + i); - } - tensors.emplace_back(output); - - // Validate argminmax only on all kernels - for (size_t i = 0; i < num_of_kernels; i++) - { - ARM_COMPUTE_RETURN_ON_ERROR( - CLArgOperationKernel::validate(tensors[i], tensors[i + 1], axis[i], op)); - } - - return Status{}; -} - -void CLArgOperation::run() -{ - for (size_t i = 0; i < _num_of_kernels; ++i) - { - CLScheduler::get().enqueue(_argop_kernels[i]); - } -} - -} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLBinaryLogicalOp.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLBinaryLogicalOp.cpp index 92ee69a..e5122ab 100644 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLBinaryLogicalOp.cpp +++ b/compute/ARMComputeEx/src/runtime/CL/functions/CLBinaryLogicalOp.cpp @@ -48,7 +48,7 @@ using namespace arm_compute; void CLBinaryLogicalOp::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, BinaryLogicalOperation op) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(input1, input2, output, op); _kernel = std::move(k); diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLCast.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLCast.cpp deleted file mode 100644 index b3118f3..0000000 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLCast.cpp +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/CL/functions/CLCast.h" - -#include "arm_compute/core/CL/kernels/CLCastKernel.h" - -using namespace arm_compute; - -void CLCast::configure(ICLTensor *input, ICLTensor *output, SubDataType input_subtype) -{ - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input, output, input_subtype); - _kernel = std::move(k); -} diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLDepthToSpace.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLDepthToSpace.cpp deleted file mode 100644 index db66250..0000000 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLDepthToSpace.cpp +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/CL/functions/CLDepthToSpace.h" - -#include "arm_compute/core/CL/kernels/CLDepthToSpaceKernel.h" - -using namespace arm_compute; - -void CLDepthToSpace::configure(ICLTensor *input, ICLTensor *output, const int32_t block_size) -{ - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input, output, block_size); - _kernel = std::move(k); -} diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLDirectTransposeConvLayer.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLDirectTransposeConvLayer.cpp new file mode 100644 index 0000000..3dede05 --- /dev/null +++ b/compute/ARMComputeEx/src/runtime/CL/functions/CLDirectTransposeConvLayer.cpp @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Copyright (c) 2019-2020 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/CL/functions/CLDirectTransposeConvLayer.h" + +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/UtilsEx.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/utils/misc/ShapeCalculatorEx.h" +#include "arm_compute/runtime/CL/CLScheduler.h" + +#include +#include + +namespace arm_compute +{ +using namespace arm_compute::misc::shape_calculator; + +CLDirectTransposeConvLayer::CLDirectTransposeConvLayer( + std::shared_ptr memory_manager) // NOLINT + : _memory_group(std::move(memory_manager)), + _scale_f(), + _conv_f(), + _flip_weights(), + _scaled_output(), + _original_weights(nullptr), + _weights_flipped(), + _flip_axis(), + _is_prepared(false) +{ +} + +Status CLDirectTransposeConvLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, + const ITensorInfo *bias, ITensorInfo *output, + const PadStrideInfo &info, unsigned int invalid_right, + unsigned int invalid_bottom, + const WeightsInfo &weights_info) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN( + input, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights); + const DataLayout data_layout = input->data_layout(); + + const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + const size_t idx_c = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); + + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) != weights->dimension(idx_h)); + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) < 1); + + auto out_dims = transposeconv_output_dimensions( + input->dimension(idx_w), input->dimension(idx_h), weights->dimension(idx_w), + weights->dimension(idx_h), info, invalid_right, invalid_bottom); + + const TensorShape output_shape = compute_transposeconv_output_shape(out_dims, *input, *weights); + + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, weights); + + if (bias != nullptr) + { + if (is_data_type_quantized_asymmetric(input->data_type())) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias); + } + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, bias); + } + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(idx_w) != output_shape[idx_w], + "Output's width is invalid."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(idx_h) != output_shape[idx_h], + "Output's height is invalid."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(idx_c) != output_shape[idx_c], + "Output's depth is invalid."); + + unsigned int pad_left = 0; + unsigned int pad_right = 0; + unsigned int pad_top = 0; + unsigned int pad_bottom = 0; + const TensorShape scale_out_shape = compute_transposeconv_upsampled_shape( + *input, *weights, info, out_dims, invalid_right, invalid_bottom, pad_left, pad_right, pad_top, + pad_bottom); + TensorInfo scale_out_info(input->clone() + ->set_is_resizable(true) + .reset_padding() + .set_tensor_shape(scale_out_shape) + .set_data_layout(data_layout)); + const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL); + + ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionLayerUpsample::validate(input, &scale_out_info, info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayer::validate(&scale_out_info, weights, bias, output, + conv_info, weights_info)); + + return Status{}; +} + +void CLDirectTransposeConvLayer::configure(ICLTensor *input, ICLTensor *weights, + const ICLTensor *bias, ICLTensor *output, + const PadStrideInfo &info, unsigned int invalid_right, + unsigned int invalid_bottom, + const WeightsInfo &weights_info) +{ + configure(CLKernelLibrary::get().get_compile_context(), input, weights, bias, output, info, + invalid_right, invalid_bottom, weights_info); +} + +void CLDirectTransposeConvLayer::configure(const CLCompileContext &compile_context, + ICLTensor *input, ICLTensor *weights, + const ICLTensor *bias, ICLTensor *output, + const PadStrideInfo &info, unsigned int invalid_right, + unsigned int invalid_bottom, + const WeightsInfo &weights_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); + + unsigned int pad_left = 0; + unsigned int pad_right = 0; + unsigned int pad_top = 0; + unsigned int pad_bottom = 0; + const unsigned int stride_x = info.stride().first; + const unsigned int stride_y = info.stride().second; + + const DataLayout data_layout = input->info()->data_layout(); + + const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + + _original_weights = weights; + _flip_axis.allocator()->init(TensorInfo(TensorShape(2U), 1, DataType::U32)); + _weights_flipped.allocator()->init(weights->info()->clone()->set_data_layout(data_layout)); + _flip_weights.configure(compile_context, weights, &_weights_flipped, &_flip_axis); + + auto out_dims = transposeconv_output_dimensions( + input->info()->dimension(idx_w), input->info()->dimension(idx_h), + weights->info()->dimension(idx_w), weights->info()->dimension(idx_h), info, invalid_right, + invalid_bottom); + + const TensorShape output_shape = + compute_transposeconv_output_shape(out_dims, *input->info(), *weights->info()); + + // Output auto initialization if not yet initialized + auto_init_if_empty( + *output->info(), + input->info()->clone()->set_tensor_shape(output_shape).set_data_layout(data_layout)); + + // Perform validation step + ARM_COMPUTE_ERROR_THROW_ON(CLDirectTransposeConvLayer::validate( + input->info(), weights->info(), bias == nullptr ? nullptr : bias->info(), output->info(), + info, invalid_right, invalid_bottom)); + + _is_prepared = weights_info.retain_internal_weights(); + + _memory_group.manage(&_scaled_output); + + // Find the upsampled dimensions and the padding needed for the convolution with stride 1 in order + // to match output shape + const TensorShape scale_out_shape = compute_transposeconv_upsampled_shape( + *input->info(), *weights->info(), info, out_dims, invalid_right, invalid_bottom, pad_left, + pad_right, pad_top, pad_bottom); + + TensorInfo scale_out_info(scale_out_shape, 1, input->info()->data_type(), + input->info()->quantization_info()); + scale_out_info.set_data_layout(data_layout); + _scaled_output.allocator()->init(scale_out_info); + + // configure scale function + const PadStrideInfo upsample_info(stride_x, stride_y, pad_left, pad_right, pad_top, pad_bottom, + DimensionRoundingType::FLOOR); + _scale_f.configure(input, &_scaled_output, upsample_info); + + // Setup the function to convolve the upscaled output + const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL); + _conv_f.configure(compile_context, &_scaled_output, &_weights_flipped, bias, output, conv_info, + weights_info); + _scaled_output.allocator()->allocate(); + + // Setup flip axis data + _flip_axis.allocator()->allocate(); + _flip_axis.map(true); + auto axis_data = reinterpret_cast(_flip_axis.buffer()); + if (weights->info()->data_layout() == DataLayout::NHWC) + { + axis_data[0] = 1; + axis_data[1] = 2; + } + else + { + axis_data[0] = 0; + axis_data[1] = 1; + } + _flip_axis.unmap(); +} + +void CLDirectTransposeConvLayer::run() +{ + prepare(); + + MemoryGroupResourceScope scope_mg(_memory_group); + + _scale_f.run(); + _conv_f.run(); +} + +void CLDirectTransposeConvLayer::prepare() +{ + if (!_is_prepared) + { + ARM_COMPUTE_ERROR_ON(!_original_weights->is_used()); + + // Run weights flipping and mark original weights tensor as unused + _weights_flipped.allocator()->allocate(); + _flip_weights.run(); + _original_weights->mark_as_unused(); + + // Prepare convolution + _conv_f.prepare(); + + // Free flipped weights + if (!_weights_flipped.is_used()) + { + _weights_flipped.allocator()->free(); + } + + _is_prepared = true; + } +} +} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLEmbeddingLookup.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLEmbeddingLookup.cpp index 3d9a28a..ae9d8af 100644 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLEmbeddingLookup.cpp +++ b/compute/ARMComputeEx/src/runtime/CL/functions/CLEmbeddingLookup.cpp @@ -47,7 +47,7 @@ using namespace arm_compute; void CLEmbeddingLookup::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *lookups) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(input, output, lookups); _kernel = std::move(k); } diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLFullyConnectedHybridLayer.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLFullyConnectedHybridLayer.cpp index f098832..0198946 100644 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLFullyConnectedHybridLayer.cpp +++ b/compute/ARMComputeEx/src/runtime/CL/functions/CLFullyConnectedHybridLayer.cpp @@ -45,7 +45,7 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/runtime/CL/CLScheduler.h" -#include "support/ToolchainSupport.h" +#include "support/MemorySupport.h" #include @@ -60,7 +60,7 @@ Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const I ARM_COMPUTE_UNUSED(weights); ARM_COMPUTE_UNUSED(output); ARM_COMPUTE_RETURN_ON_ERROR( - CLGEMMLowpMatrixMultiplyCoreEx::validate(&input, &weights, nullptr, &output)); + CLGEMMLowpMatrixMultiplyCore::validate(&input, &weights, nullptr, &output)); return Status{}; } @@ -68,7 +68,7 @@ Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const I void CLFullyConnectedHybridLayerReshapeWeights::configure(const ICLTensor *input, ICLTensor *output) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(input, output); _kernel = std::move(k); } @@ -172,7 +172,8 @@ void CLFullyConnectedHybridLayer::configure(const ICLTensor *input, const ICLTen // Quantize input _quantized_input.allocator()->init( - input->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S8)); + input->info()->clone()->set_is_resizable(true).reset_padding().set_data_type( + DataType::QASYMM8_SIGNED)); _memory_group.manage(&_quantized_input); _quant_input_kernel.configure(input, &_scale_factor, &_quantized_input); @@ -199,7 +200,7 @@ Status CLFullyConnectedHybridLayer::validate(const ITensorInfo *input, const ITe { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::S8); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2); @@ -256,8 +257,9 @@ Status CLFullyConnectedHybridLayer::validate(const ITensorInfo *input, const ITe ARM_COMPUTE_RETURN_ON_ERROR(CLScaleFactorSymm8Kernel::validate(input, &scale_factor)); // Validate quantization symm8 kernel - const ITensorInfo &quantized_input = TensorInfo( - input->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S8)); + const ITensorInfo &quantized_input = + TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_data_type( + DataType::QASYMM8_SIGNED)); ARM_COMPUTE_RETURN_ON_ERROR( CLQuantizationSymmetricKernel::validate(input, &scale_factor, &quantized_input)); diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLFullyConnectedLayerEx.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLFullyConnectedLayerEx.cpp index 63e291b..2ff4b96 100644 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLFullyConnectedLayerEx.cpp +++ b/compute/ARMComputeEx/src/runtime/CL/functions/CLFullyConnectedLayerEx.cpp @@ -46,7 +46,7 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/runtime/CL/CLScheduler.h" -#include "support/ToolchainSupport.h" +#include "support/MemorySupport.h" #include @@ -141,7 +141,7 @@ Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const I void CLFullyConnectedLayerReshapeWeightsEx::configure(const ICLTensor *input, ICLTensor *output) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(input, output); _kernel = std::move(k); } diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLFullyConnectedReshapingLayer.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLFullyConnectedReshapingLayer.cpp index 9aebc47..157b4d9 100644 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLFullyConnectedReshapingLayer.cpp +++ b/compute/ARMComputeEx/src/runtime/CL/functions/CLFullyConnectedReshapingLayer.cpp @@ -53,18 +53,21 @@ void CLFullyConnectedReshapingLayer::configure(const arm_compute::ICLTensor *inp fc->configure(input_to_use, _weights, _biases, _output); return std::unique_ptr(fc); } - else + else if (kernel_type == KernelType::PREPROCESSED_WEIGHTS) { - assert(kernel_type == KernelType::PREPROCESSED_WEIGHTS); - bool is_hybrid = (input->info()->data_type() == DataType::F32 || input->info()->data_type() == DataType::F16) && - weights->info()->data_type() == DataType::S8; + (weights->info()->data_type() == DataType::S8 || + weights->info()->data_type() == DataType::QASYMM8_SIGNED); if (is_hybrid) { auto fc = new arm_compute::CLFullyConnectedHybridLayer{_memory_manager}; + ITensorInfo *weights_info = const_cast(_weights->info()); + const auto orgin_weights_data_type = weights_info->data_type(); + weights_info->set_data_type(DataType::QASYMM8_SIGNED); fc->configure(input_to_use, _weights, _biases, _output); + weights_info->set_data_type(orgin_weights_data_type); return std::unique_ptr(fc); } else @@ -74,6 +77,11 @@ void CLFullyConnectedReshapingLayer::configure(const arm_compute::ICLTensor *inp return std::unique_ptr(fc); } } + else + { + throw std::runtime_error("CLFullyConnectedReshapingLayer: Unsupported kernel type"); + } + }(); if (_needs_reshape) diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCoreEx.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCoreEx.cpp deleted file mode 100644 index ca5499d..0000000 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCoreEx.cpp +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCoreEx.h" - -#include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfiguration.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "arm_compute/core/utils/quantization/AsymmHelpers.h" -#include "arm_compute/runtime/CL/CLScheduler.h" -#include "arm_compute/runtime/MemoryGroup.h" - -namespace arm_compute -{ -using namespace arm_compute::misc::shape_calculator; -using namespace arm_compute::cl_gemm; - -namespace -{ -inline bool is_gemm_reshaped(bool reshape_b_only_on_first_run, GPUTarget gpu_target) -{ - return (get_arch_from_target(gpu_target) != GPUTarget::MIDGARD) && (reshape_b_only_on_first_run); -} -} // namespace - -CLGEMMLowpMatrixMultiplyCoreEx::CLGEMMLowpMatrixMultiplyCoreEx( - std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _mm_midgard_kernel(), _mtx_a_reduction_kernel(), - _mtx_b_reduction_kernel(), _vector_sum_col(), _vector_sum_row(), _a_offset(0), _b_offset(0), - _reshape_b_only_on_first_run(false), _is_prepared(false) -{ -} - -void CLGEMMLowpMatrixMultiplyCoreEx::configure(const ICLTensor *a, const ICLTensor *b, - const ICLTensor *c, ICLTensor *output, - const GEMMInfo &gemm_info) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output); - ARM_COMPUTE_UNUSED(c); - ARM_COMPUTE_ERROR_THROW_ON(CLGEMMLowpMatrixMultiplyCoreEx::validate( - a->info(), b->info(), c != nullptr ? c->info() : nullptr, output->info(), gemm_info)); - - _is_prepared = false; - _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run(); - _a_offset = a->info()->quantization_info().uniform().offset; - _b_offset = b->info()->quantization_info().uniform().offset; - - // Get the GPU target - const GPUTarget gpu_target = CLScheduler::get().target(); - - // Set the target for the kernels - _mm_midgard_kernel.set_target(gpu_target); - - // GEMMRHSMatrixInfo rhs_info; - // GEMMLHSMatrixInfo lhs_info; - - // Arguments used by GEMMReshapeInfo - // If we pass the matrix A and matrix B reshaped to CLGEMMMatrixMultiplyKernel, we need to pass m, - // n, k, mult_transpose1xW_width and mult_interleave4x4_height to CLGEMMReshapeInfo - // in order to know how the matrices have been reshaped - bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); - const unsigned int m = reinterpret_input_as_3d - ? (a->info()->dimension(1) * a->info()->dimension(2)) - : a->info()->dimension(1); - const unsigned int n = b->info()->dimension(0); - const unsigned int k = a->info()->dimension(0); - const int depth_output_gemm3d = gemm_info.depth_output_gemm3d(); - - const ICLTensor *matrix_b = b; - // Configure matrix multiply kernel - _mm_midgard_kernel.configure( - a, matrix_b, output, - GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d)); -} - -Status CLGEMMLowpMatrixMultiplyCoreEx::validate(const ITensorInfo *a, const ITensorInfo *b, - const ITensorInfo *c, const ITensorInfo *output, - const GEMMInfo &gemm_info) -{ - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::S8); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); - ARM_COMPUTE_UNUSED(c); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), - "Matrix A already reshaped is not supported"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), - "Matrix B already reshaped is not supported"); - - const ITensorInfo *matrix_a_info = a; - - // Get the GPU target - const GPUTarget gpu_target = CLScheduler::get().target(); - - bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); - const unsigned int m = - reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1); - const unsigned int n = b->dimension(0); - const unsigned int k = a->dimension(0); - const int depth_output_gemm3d = gemm_info.depth_output_gemm3d(); - - bool reshape_matrix_b = is_gemm_reshaped(gemm_info.reshape_b_only_on_first_run(), gpu_target); - - const GEMMReshapeInfo reshape_info = - GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d); - - TensorInfo weights_info(*b); - const ITensorInfo *matrix_b_info = &weights_info; - if (reshape_matrix_b) - { - ARM_COMPUTE_RETURN_ERROR_ON_MSG(false, - "CLGEMMLowpMatrixMultiplyCoreEx does not support reshape_b"); - } - - // Validate matrix multiply - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernelEx::validate( - matrix_a_info, matrix_b_info, output, reshape_info)); - - return Status{}; -} - -void CLGEMMLowpMatrixMultiplyCoreEx::run() -{ - prepare(); - - MemoryGroupResourceScope scope_mg(_memory_group); - - // Run matrix multiply - CLScheduler::get().enqueue(_mm_midgard_kernel, false); -} - -void CLGEMMLowpMatrixMultiplyCoreEx::prepare() -{ - if (!_is_prepared) - { - _is_prepared = true; - } -} -} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLGatherEx.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLGatherEx.cpp index f594d7a..e0b833b 100644 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLGatherEx.cpp +++ b/compute/ARMComputeEx/src/runtime/CL/functions/CLGatherEx.cpp @@ -48,7 +48,7 @@ using namespace arm_compute; void CLGatherEx::configure(const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(input, indices, output, axis); _kernel = std::move(k); } diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLHashtableLookup.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLHashtableLookup.cpp index 27ed8e8..65b89a3 100644 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLHashtableLookup.cpp +++ b/compute/ARMComputeEx/src/runtime/CL/functions/CLHashtableLookup.cpp @@ -47,7 +47,7 @@ using namespace arm_compute; void CLHashtableLookup::configure(const ICLTensor *lookups, const ICLTensor *keys, const ICLTensor *input, ICLTensor *output, ICLTensor *hits) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(lookups, keys, input, output, hits); _kernel = std::move(k); } diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLInstanceNormalizationLayerEx.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLInstanceNormalizationLayerEx.cpp index 80393e8..5a7e408 100644 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLInstanceNormalizationLayerEx.cpp +++ b/compute/ARMComputeEx/src/runtime/CL/functions/CLInstanceNormalizationLayerEx.cpp @@ -50,7 +50,7 @@ CLInstanceNormalizationLayerEx::CLInstanceNormalizationLayerEx() {} void CLInstanceNormalizationLayerEx::configure(ICLTensor *input, ICLTensor *output, ICLTensor *gamma, ICLTensor *beta, float epsilon) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(input, output, gamma, beta, epsilon); _kernel = std::move(k); } diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLPReLU.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLPReLU.cpp deleted file mode 100644 index fbb15ab..0000000 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLPReLU.cpp +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/CL/functions/CLPReLU.h" - -#include "arm_compute/core/CL/kernels/CLPReLUKernel.h" -#include "arm_compute/core/CL/ICLTensor.h" - -using namespace arm_compute; - -void CLPReLU::configure(ICLTensor *input, ICLTensor *alpha, ICLTensor *output) -{ - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input, alpha, output); - _kernel = std::move(k); - - if (output->info()->dimension(0) > 1) - { - ICLTensor *broadcasted_info = (input->info()->dimension(0) == 1) ? input : alpha; - - if (broadcasted_info->info()->dimension(0) == 1) - { - _border_handler.configure(broadcasted_info, _kernel->border_size(), BorderMode::REPLICATE); - } - } -} diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLRNNLayerEx.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLRNNLayerEx.cpp deleted file mode 100644 index 6049b7e..0000000 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLRNNLayerEx.cpp +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/CL/functions/CLRNNLayerEx.h" - -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "arm_compute/runtime/CL/CLScheduler.h" -#include "support/ToolchainSupport.h" - -#include - -using namespace arm_compute; -using namespace arm_compute::misc::shape_calculator; - -CLRNNLayerEx::CLRNNLayerEx(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _gemm_state_f(), _add_kernel(), - _activation_kernel(), _fully_connected_kernel(), _copy_kernel(), _fully_connected_out(), - _gemm_output(), _add_output(), _is_prepared(false) -{ -} - -Status CLRNNLayerEx::validate(const ITensorInfo *input, const ITensorInfo *weights, - const ITensorInfo *recurrent_weights, const ITensorInfo *bias, - const ITensorInfo *hidden_state, const ITensorInfo *output, - const ActivationLayerInfo &info) -{ - const int idx_width = 0; - const int idx_height = 1; - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, recurrent_weights, bias, hidden_state, - output); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_width) != weights->dimension(idx_width)); - ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_height) != - recurrent_weights->dimension(idx_width)); - ARM_COMPUTE_RETURN_ERROR_ON(recurrent_weights->dimension(idx_width) != - recurrent_weights->dimension(1)); - ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() != 1); - ARM_COMPUTE_RETURN_ERROR_ON(bias->dimension(idx_width) != weights->dimension(idx_height)); - ARM_COMPUTE_RETURN_ERROR_ON(hidden_state->dimension(idx_width) != weights->dimension(idx_height)); - ARM_COMPUTE_RETURN_ERROR_ON(hidden_state->dimension(idx_height) != input->dimension(idx_height)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), - hidden_state->tensor_shape()); - - auto shape_info = - TensorInfo(compute_rnn_shape(recurrent_weights, hidden_state->dimension(idx_height)), 1, - input->data_type()); - - ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, weights, bias, &shape_info)); - ARM_COMPUTE_RETURN_ON_ERROR( - CLGEMM::validate(hidden_state, recurrent_weights, nullptr, &shape_info, 1.f, 0.f)); - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate( - ArithmeticOperation::ADD, &shape_info, &shape_info, &shape_info, ConvertPolicy::SATURATE)); - ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(&shape_info, &shape_info, info)); - - return Status{}; -} - -void CLRNNLayerEx::configure(const ICLTensor *input, const ICLTensor *weights, - const ICLTensor *recurrent_weights, const ICLTensor *bias, - ICLTensor *hidden_state, ICLTensor *output, ActivationLayerInfo &info) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, recurrent_weights, bias, hidden_state, output); - ARM_COMPUTE_ERROR_THROW_ON(CLRNNLayerEx::validate(input->info(), weights->info(), - recurrent_weights->info(), bias->info(), - hidden_state->info(), output->info(), info)); - - const int idx_height = 1; - TensorShape shape = - compute_rnn_shape(recurrent_weights->info(), hidden_state->info()->dimension(idx_height)); - - _is_prepared = false; - - _fully_connected_out.allocator()->init(TensorInfo(shape, 1, input->info()->data_type())); - _gemm_output.allocator()->init(TensorInfo(shape, 1, input->info()->data_type())); - - // Manage intermediate buffers and configure - _memory_group.manage(&_fully_connected_out); - _fully_connected_kernel.configure(input, weights, bias, &_fully_connected_out); - - _memory_group.manage(&_gemm_output); - _gemm_state_f.configure(hidden_state, recurrent_weights, nullptr, &_gemm_output, 1.f, 0.f); - - _add_output.allocator()->init(TensorInfo(shape, 1, input->info()->data_type())); - _memory_group.manage(&_add_output); - - _add_kernel.configure(ArithmeticOperation::ADD, &_fully_connected_out, &_gemm_output, - &_add_output, ConvertPolicy::SATURATE); - - _fully_connected_out.allocator()->allocate(); - _gemm_output.allocator()->allocate(); - - _activation_kernel.configure(&_add_output, hidden_state, info); - _add_output.allocator()->allocate(); - - _copy_kernel.configure(hidden_state, output); -} - -void CLRNNLayerEx::run() -{ - prepare(); - - _memory_group.acquire(); - - _fully_connected_kernel.run(); - _gemm_state_f.run(); - CLScheduler::get().enqueue(_add_kernel); - CLScheduler::get().enqueue(_activation_kernel); - - // copy hidden out to output - CLScheduler::get().enqueue(_copy_kernel); - - _memory_group.release(); -} - -void CLRNNLayerEx::prepare() -{ - if (!_is_prepared) - { - _fully_connected_kernel.prepare(); - _gemm_state_f.prepare(); - - _is_prepared = true; - } -} diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLReduceOperation.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLReduceOperation.cpp index 8ce2d74..a41e6db 100644 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLReduceOperation.cpp +++ b/compute/ARMComputeEx/src/runtime/CL/functions/CLReduceOperation.cpp @@ -60,8 +60,7 @@ Status CLReduceOperation::validate(const ITensorInfo *input, const ITensorInfo * const size_t num_of_interm_tensors = num_of_kernels - (keep_dims ? 1 : 0); // Create temporary tensor infos - auto interm_tensors = - arm_compute::support::cpp14::make_unique(num_of_interm_tensors); + auto interm_tensors = support::cpp14::make_unique(num_of_interm_tensors); // Create intermediate tensor info TensorShape shape{input->tensor_shape()}; @@ -119,9 +118,8 @@ void CLReduceOperation::configure(ICLTensor *input, ICLTensor *output, const size_t num_of_kernels = axis.size(); const size_t num_of_interm_tensors = num_of_kernels - (keep_dims ? 1 : 0); - _interm_tensors = arm_compute::support::cpp14::make_unique(num_of_interm_tensors); - _reduce_kernels = - arm_compute::support::cpp14::make_unique(num_of_kernels); + _interm_tensors = support::cpp14::make_unique(num_of_interm_tensors); + _reduce_kernels = support::cpp14::make_unique(num_of_kernels); // Set a vector that is ordered ICLTensors sequentially. std::vector tensors; diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLSpaceToDepth.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLSpaceToDepth.cpp deleted file mode 100644 index 7d7b226..0000000 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLSpaceToDepth.cpp +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2016-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/CL/functions/CLSpaceToDepth.h" - -#include "arm_compute/core/CL/kernels/CLSpaceToDepthKernel.h" - -using namespace arm_compute; - -void CLSpaceToDepth::configure(ICLTensor *input, ICLTensor *output, const int32_t block_size) -{ - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input, output, block_size); - _kernel = std::move(k); -} diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLTransposeConvLayer.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLTransposeConvLayer.cpp index e61746e..3215d01 100644 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLTransposeConvLayer.cpp +++ b/compute/ARMComputeEx/src/runtime/CL/functions/CLTransposeConvLayer.cpp @@ -15,7 +15,7 @@ */ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,218 +37,124 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ - #include "arm_compute/runtime/CL/functions/CLTransposeConvLayer.h" -#include "arm_compute/core/utils/misc/ShapeCalculatorEx.h" -#include "arm_compute/core/Helpers.h" #include "arm_compute/core/Utils.h" -#include "arm_compute/core/UtilsEx.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/runtime/CL/CLScheduler.h" -#include "arm_compute/runtime/CPP/CPPScheduler.h" +#include #include #include using namespace arm_compute; using namespace arm_compute::misc::shape_calculator; -CLTransposeConvLayer::CLTransposeConvLayer(std::shared_ptr memory_manager) // NOLINT - : _memory_group(std::move(memory_manager)), - _scale_f(), - _conv_f(), - _flip_weights(), - _scaled_output(), - _original_weights(nullptr), - _weights_flipped(), - _is_prepared(false) +CLTransposeConvLayer::CLTransposeConvLayer(std::shared_ptr memory_manager) + : _memory_manager(std::move(memory_manager)), _function() +{ +} + +void CLTransposeConvLayer::configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, + ICLTensor *output, const PadStrideInfo &deconv_info, + unsigned int invalid_right, unsigned int invalid_bottom, + const WeightsInfo &weights_info) { + configure(CLKernelLibrary::get().get_compile_context(), input, weights, bias, output, deconv_info, + invalid_right, invalid_bottom, weights_info); +} + +void CLTransposeConvLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, + ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, + const PadStrideInfo &deconv_info, unsigned int invalid_right, + unsigned int invalid_bottom, const WeightsInfo &weights_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); + + switch (CLTransposeConvLayer::get_deconvolution_method(input->info(), weights->info(), nullptr, + output->info(), deconv_info, invalid_right, + invalid_bottom, weights_info)) + { + case DeconvolutionMethod::DIRECT: + { + auto f = arm_compute::support::cpp14::make_unique(); + f->configure(compile_context, input, weights, bias, output, deconv_info, invalid_right, + invalid_bottom, weights_info); + _function = std::move(f); + break; + } + case DeconvolutionMethod::GEMM: + { + auto f = arm_compute::support::cpp14::make_unique(_memory_manager); + f->configure(compile_context, input, weights, bias, output, deconv_info); + _function = std::move(f); + break; + } + default: + ARM_COMPUTE_ERROR("Not supported."); + break; + } } Status CLTransposeConvLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, - const PadStrideInfo &info, unsigned int invalid_right, + const PadStrideInfo &deconv_info, unsigned int invalid_right, unsigned int invalid_bottom, const WeightsInfo &weights_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, - DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights); - - const DataLayout data_layout = input->data_layout(); - - const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - const size_t idx_c = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); - - ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) != weights->dimension(idx_h)); - ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) < 1); - - const unsigned int kernel_x = weights->dimension(idx_w); - const unsigned int kernel_y = weights->dimension(idx_h); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(invalid_right > kernel_x - 1, - "invalid_right must be smaller than kernel_x"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(invalid_bottom > kernel_y - 1, - "inner_border_top must be smaller than kernel_y"); - - // NOTE From the existing CLDeconvolutionLayer, invalid_right and invalid_bottom were added. - auto out_dims = transposeconv_output_dimensions( - input->dimension(idx_w), input->dimension(idx_h), weights->dimension(idx_w), - weights->dimension(idx_h), info, invalid_right, invalid_bottom); - - const TensorShape output_shape = compute_transposeconv_output_shape(out_dims, *input, *weights); - - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, weights); - - if (bias != nullptr) + switch (CLTransposeConvLayer::get_deconvolution_method( + input, weights, bias, output, deconv_info, invalid_right, invalid_bottom, weights_info)) { - if (is_data_type_quantized_asymmetric(input->data_type())) + case DeconvolutionMethod::DIRECT: { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); + // Validate direct convolution layer + ARM_COMPUTE_RETURN_ON_ERROR(CLDirectTransposeConvLayer::validate( + input, weights, bias, output, deconv_info, invalid_right, invalid_bottom, weights_info)); + break; } - else + case DeconvolutionMethod::GEMM: { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias); + // Validate gemm-based convolution layer + ARM_COMPUTE_RETURN_ON_ERROR( + CLGEMMDeconvolutionLayer::validate(input, weights, bias, output, deconv_info)); + break; } - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, bias); + default: + ARM_COMPUTE_ERROR("Not supported."); + break; } - ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(idx_w) != output_shape[idx_w], - "Output's width is invalid."); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(idx_h) != output_shape[idx_h], - "Output's height is invalid."); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(idx_c) != output_shape[idx_c], - "Output's depth is invalid."); - - unsigned int pad_left = 0; - unsigned int pad_right = 0; - unsigned int pad_top = 0; - unsigned int pad_bottom = 0; - const TensorShape scale_out_shape = compute_transposeconv_upsampled_shape( - *input, *weights, info, out_dims, invalid_right, invalid_bottom, pad_left, pad_right, pad_top, - pad_bottom); - TensorInfo scale_out_info(input->clone() - ->set_is_resizable(true) - .reset_padding() - .set_tensor_shape(scale_out_shape) - .set_data_layout(data_layout)); - const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL); - - ARM_COMPUTE_RETURN_ON_ERROR( - CLTransposeConvLayerUpsample::validate(input, &scale_out_info, BorderSize(0, 0), info)); - ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayer::validate(&scale_out_info, weights, bias, output, - conv_info, weights_info)); - return Status{}; } -void CLTransposeConvLayer::configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, - ICLTensor *output, const PadStrideInfo &info, - unsigned int invalid_right, unsigned int invalid_bottom, - const WeightsInfo &weights_info) +DeconvolutionMethod CLTransposeConvLayer::get_deconvolution_method( + const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, + ITensorInfo *output, const PadStrideInfo &deconv_info, unsigned int invalid_right, + unsigned int invalid_bottom, const WeightsInfo &weights_info) { - ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); - - const unsigned int stride_x = info.stride().first; - const unsigned int stride_y = info.stride().second; + ARM_COMPUTE_UNUSED(output, bias, weights_info); - const DataLayout data_layout = input->info()->data_layout(); + const DataLayout data_layout = input->data_layout(); const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - _original_weights = weights; - _weights_flipped.allocator()->init(weights->info()->clone()->set_data_layout(data_layout)); - _flip_weights.configure(weights, &_weights_flipped); - - // NOTE From the existing CLDeconvolutionLayer, invalid_right and invalid_bottom were - // added. - auto out_dims = transposeconv_output_dimensions( - input->info()->dimension(idx_w), input->info()->dimension(idx_h), - weights->info()->dimension(idx_w), weights->info()->dimension(idx_h), info, invalid_right, - invalid_bottom); - - const TensorShape output_shape = - compute_transposeconv_output_shape(out_dims, *input->info(), *weights->info()); - - // Output auto initialization if not yet initialized - auto_init_if_empty( - *output->info(), - input->info()->clone()->set_tensor_shape(output_shape).set_data_layout(data_layout)); - - // Perform validation step - ARM_COMPUTE_ERROR_THROW_ON(CLTransposeConvLayer::validate( - input->info(), weights->info(), bias == nullptr ? nullptr : bias->info(), output->info(), - info, invalid_right, invalid_bottom)); - - _is_prepared = weights_info.retain_internal_weights(); - - _memory_group.manage(&_scaled_output); - - // Find the upsampled dimensions and the padding needed for the convolution with stride 1 in order - // to match output shape - unsigned int pad_left = 0; - unsigned int pad_right = 0; - unsigned int pad_top = 0; - unsigned int pad_bottom = 0; - const TensorShape scale_out_shape = compute_transposeconv_upsampled_shape( - *input->info(), *weights->info(), info, out_dims, invalid_right, invalid_bottom, pad_left, - pad_right, pad_top, pad_bottom); - - TensorInfo scale_out_info(scale_out_shape, 1, input->info()->data_type(), - input->info()->quantization_info()); - scale_out_info.set_data_layout(data_layout); - _scaled_output.allocator()->init(scale_out_info); - - // configure scale function - const PadStrideInfo upsample_info(stride_x, stride_y, pad_left, pad_right, pad_top, pad_bottom, - DimensionRoundingType::FLOOR); - _scale_f.configure(input, &_scaled_output, BorderSize(0, 0), upsample_info); - - // setup the function to convolve the upscaled output - const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL); - _conv_f.configure(&_scaled_output, &_weights_flipped, bias, output, conv_info, weights_info); - _scaled_output.allocator()->allocate(); + if (weights->dimension(idx_w) != deconv_info.stride().first || + weights->dimension(idx_h) != deconv_info.stride().second || invalid_right != 0 || + invalid_bottom != 0) + { + return DeconvolutionMethod::DIRECT; + } + + return DeconvolutionMethod::GEMM; } void CLTransposeConvLayer::run() { prepare(); - - _memory_group.acquire(); - - _scale_f.run(); - _conv_f.run(); - - _memory_group.release(); + _function->run(); } -void CLTransposeConvLayer::prepare() -{ - if (!_is_prepared) - { - ARM_COMPUTE_ERROR_ON(!_original_weights->is_used()); - - // Run weights flipping and mark original weights tensor as unused - _weights_flipped.allocator()->allocate(); - _weights_flipped.map(true); - _original_weights->map(CLScheduler::get().queue(), true); - CPPScheduler::get().schedule(&_flip_weights, Window::DimZ); - _weights_flipped.unmap(); - _original_weights->unmap(CLScheduler::get().queue()); - _original_weights->mark_as_unused(); - - // Prepare convolution - _conv_f.prepare(); - - if (!_weights_flipped.is_used()) - { - _weights_flipped.allocator()->free(); - } - - _is_prepared = true; - } -} +void CLTransposeConvLayer::prepare() { _function->prepare(); } diff --git a/compute/ARMComputeEx/src/runtime/CL/functions/CLTransposeConvLayerUpsample.cpp b/compute/ARMComputeEx/src/runtime/CL/functions/CLTransposeConvLayerUpsample.cpp deleted file mode 100644 index 07feb5a..0000000 --- a/compute/ARMComputeEx/src/runtime/CL/functions/CLTransposeConvLayerUpsample.cpp +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/CL/functions/CLTransposeConvLayerUpsample.h" - -#include "arm_compute/core/CL/OpenCL.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/runtime/CL/CLScheduler.h" -#include "arm_compute/core/CL/ICLTensor.h" - -#include -#include -#include - -using namespace arm_compute; - -CLTransposeConvLayerUpsample::CLTransposeConvLayerUpsample() // NOLINT - : _upsample(), - _output(nullptr) -{ -} - -Status CLTransposeConvLayerUpsample::validate(const ITensorInfo *input, const ITensorInfo *output, - const BorderSize &inner_border, - const PadStrideInfo &info) -{ - return CLTransposeConvLayerUpsampleKernel::validate(input, output, inner_border, info); -} - -void CLTransposeConvLayerUpsample::configure(ICLTensor *input, ICLTensor *output, - const BorderSize &inner_border, - const PadStrideInfo &info) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - _output = output; - _upsample.configure(input, _output, inner_border, info); -} - -void CLTransposeConvLayerUpsample::run() -{ - _output->map(CLScheduler::get().queue(), true); - if (is_data_type_quantized_asymmetric(_output->info()->data_type())) - { - const uint8_t quantized_zero = _output->info()->quantization_info().uniform().offset; - std::fill_n(_output->buffer(), _output->info()->total_size(), quantized_zero); - } - else - { - memset(_output->buffer(), 0, _output->info()->total_size()); - } - _output->unmap(CLScheduler::get().queue()); - - CLScheduler::get().enqueue(_upsample, false); -} diff --git a/compute/ARMComputeEx/src/runtime/CPP/functions/CPPOneHotEx.cpp b/compute/ARMComputeEx/src/runtime/CPP/functions/CPPOneHotEx.cpp index 114e1a7..768c15b 100644 --- a/compute/ARMComputeEx/src/runtime/CPP/functions/CPPOneHotEx.cpp +++ b/compute/ARMComputeEx/src/runtime/CPP/functions/CPPOneHotEx.cpp @@ -41,14 +41,14 @@ #include "arm_compute/runtime/CPP/functions/CPPOneHotEx.h" #include "arm_compute/core/CPP/kernels/CPPOneHotKernelEx.h" -#include "support/ToolchainSupport.h" +#include "support/MemorySupport.h" using namespace arm_compute; void CPPOneHotEx::configure(const ITensor *indices, const ITensor *depth, const ITensor *on_value, const ITensor *off_value, ITensor *output, const int axis) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(indices, depth, on_value, off_value, output, axis); _kernel = std::move(k); } diff --git a/compute/ARMComputeEx/src/runtime/CPP/functions/CPPUpsampleEx.cpp b/compute/ARMComputeEx/src/runtime/CPP/functions/CPPUpsampleEx.cpp deleted file mode 100644 index 6c90ef3..0000000 --- a/compute/ARMComputeEx/src/runtime/CPP/functions/CPPUpsampleEx.cpp +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/CPP/functions/CPPUpsampleEx.h" - -#include "arm_compute/core/CPP/kernels/CPPUpsampleKernelEx.h" -#include "support/ToolchainSupport.h" - -using namespace arm_compute; - -void CPPUpsampleEx::configure(const ITensor *input, ITensor *output, const PadStrideInfo &info) -{ - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input, output, info); - _kernel = std::move(k); -} diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NEActivationLayerEx.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NEActivationLayerEx.cpp index ff81ff8..2752eb6 100644 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NEActivationLayerEx.cpp +++ b/compute/ARMComputeEx/src/runtime/NEON/functions/NEActivationLayerEx.cpp @@ -42,7 +42,7 @@ #include "arm_compute/core/NEON/kernels/NEActivationLayerKernelEx.h" #include "arm_compute/runtime/IRuntimeContext.h" -#include "support/ToolchainSupport.h" +#include "support/MemorySupport.h" namespace arm_compute { @@ -53,7 +53,7 @@ NEActivationLayerEx::NEActivationLayerEx(IRuntimeContext *ctx) // NOLINT void NEActivationLayerEx::configure(ITensor *input, ITensor *output, ActivationLayerInfo activation_info) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(input, output, activation_info); _kernel = std::move(k); } diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NEBinaryLogicalOperation.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NEBinaryLogicalOperation.cpp index e42c453..2fc94b2 100644 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NEBinaryLogicalOperation.cpp +++ b/compute/ARMComputeEx/src/runtime/NEON/functions/NEBinaryLogicalOperation.cpp @@ -42,7 +42,7 @@ #include #include "arm_compute/core/ITensor.h" -#include "support/ToolchainSupport.h" +#include "support/MemorySupport.h" #include @@ -53,7 +53,7 @@ template void NEBinaryLogicalOperationStatic::configure(ITensor *input1, ITensor *input2, ITensor *output) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(COP, input1, input2, output); _kernel = std::move(k); } @@ -69,7 +69,7 @@ Status NEBinaryLogicalOperationStatic::validate(const ITensorInfo *input1, void NEBinaryLogicalOperation::configure(ITensor *input1, ITensor *input2, ITensor *output, BinaryLogicalOperation op) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(op, input1, input2, output); _kernel = std::move(k); } diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NECast.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NECast.cpp deleted file mode 100644 index dc5c620..0000000 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NECast.cpp +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/NEON/functions/NECast.h" - -#include "arm_compute/core/NEON/kernels/NECastKernel.h" -#include "support/ToolchainSupport.h" - -namespace arm_compute -{ -void NECast::configure(const ITensor *input, ITensor *output, SubDataType input_subtype) -{ - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input, output, input_subtype); - _kernel = std::move(k); -} - -Status NECast::validate(const ITensorInfo *input, const ITensorInfo *output, - SubDataType input_subtype) -{ - return NECastKernel::validate(input, output, input_subtype); -} -} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NEDepthToSpaceLayerEx.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NEDepthToSpaceLayerEx.cpp deleted file mode 100644 index 5ec0b86..0000000 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NEDepthToSpaceLayerEx.cpp +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/NEON/functions/NEDepthToSpaceLayerEx.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/NEON/kernels/NEDepthToSpaceLayerKernelEx.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" - -namespace arm_compute -{ -void NEDepthToSpaceLayerEx::configure(const ITensor *input, ITensor *output, int32_t block_shape) -{ - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input, output, block_shape); - _kernel = std::move(k); -} - -Status NEDepthToSpaceLayerEx::validate(const ITensorInfo *input, const ITensorInfo *output, - int32_t block_shape) -{ - return NEDepthToSpaceLayerKernelEx::validate(input, output, block_shape); -} -} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NEEmbeddingLookup.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NEEmbeddingLookup.cpp index 53fb150..e0ab3e0 100644 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NEEmbeddingLookup.cpp +++ b/compute/ARMComputeEx/src/runtime/NEON/functions/NEEmbeddingLookup.cpp @@ -41,13 +41,13 @@ #include "arm_compute/runtime/NEON/functions/NEEmbeddingLookup.h" #include "arm_compute/core/NEON/kernels/NEEmbeddingLookupKernel.h" -#include "support/ToolchainSupport.h" +#include "support/MemorySupport.h" using namespace arm_compute; void NEEmbeddingLookup::configure(const ITensor *input, ITensor *output, const ITensor *lookups) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(input, output, lookups); _kernel = std::move(k); } diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NEFullyConnectedHybridLayer.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NEFullyConnectedHybridLayer.cpp index f457732..a123439 100644 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NEFullyConnectedHybridLayer.cpp +++ b/compute/ARMComputeEx/src/runtime/NEON/functions/NEFullyConnectedHybridLayer.cpp @@ -58,7 +58,7 @@ namespace Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output) { ARM_COMPUTE_RETURN_ON_ERROR( - NEGEMMLowpMatrixMultiplyCoreEx::validate(&input, &weights, nullptr, &output)); + NEGEMMLowpMatrixMultiplyCore::validate(&input, &weights, nullptr, &output)); return Status{}; } @@ -66,7 +66,7 @@ Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const I void NEFullyConnectedHybridLayerReshapeWeights::configure(const ITensor *input, ITensor *output) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(input, output); _kernel = std::move(k); } @@ -158,7 +158,8 @@ void NEFullyConnectedHybridLayer::configure(const ITensor *input, const ITensor // Quantize input _quantized_input.allocator()->init( - input->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S8)); + input->info()->clone()->set_is_resizable(true).reset_padding().set_data_type( + DataType::QASYMM8_SIGNED)); _scale_factor.allocator()->init( TensorInfo(TensorShape{output->info()->dimension(1)}, 1, DataType::F32)); _quant_input_kernel.configure(input, &_quantized_input, &_scale_factor); @@ -186,7 +187,7 @@ Status NEFullyConnectedHybridLayer::validate(const ITensorInfo *input, const ITe ARM_COMPUTE_UNUSED(fc_info.retain_internal_weights); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::S8); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2); ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 2); @@ -224,8 +225,9 @@ Status NEFullyConnectedHybridLayer::validate(const ITensorInfo *input, const ITe ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1)); // Validate quantization kernel - const ITensorInfo &quantized_input = TensorInfo( - input->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S8)); + const ITensorInfo &quantized_input = + TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_data_type( + DataType::QASYMM8_SIGNED)); const ITensorInfo &scale_factor = TensorInfo(TensorShape{output->dimension(1)}, 1, DataType::F32); ARM_COMPUTE_RETURN_ON_ERROR( NEQuantizationSymmetricKernel::validate(input, &quantized_input, &scale_factor)); diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NEFullyConnectedReshapingLayer.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NEFullyConnectedReshapingLayer.cpp index fcac3c7..dc6c784 100644 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NEFullyConnectedReshapingLayer.cpp +++ b/compute/ARMComputeEx/src/runtime/NEON/functions/NEFullyConnectedReshapingLayer.cpp @@ -56,12 +56,17 @@ void NEFullyConnectedReshapingLayer::configure(const arm_compute::ITensor *input assert(kernel_type == KernelType::PREPROCESSED_WEIGHTS); bool is_hybrid = input->info()->data_type() == DataType::F32 && - weights->info()->data_type() == DataType::S8; + (weights->info()->data_type() == DataType::S8 || + weights->info()->data_type() == DataType::QASYMM8_SIGNED); if (is_hybrid) { auto fc = new arm_compute::NEFullyConnectedHybridLayer{_memory_manager}; + ITensorInfo *weights_info = const_cast(_weights->info()); + const auto orgin_weights_data_type = weights_info->data_type(); + weights_info->set_data_type(DataType::QASYMM8_SIGNED); fc->configure(input_to_use, _weights, _biases, _output); + weights_info->set_data_type(orgin_weights_data_type); return std::unique_ptr(fc); } else diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCoreEx.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCoreEx.cpp deleted file mode 100644 index 1290cfd..0000000 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCoreEx.cpp +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCoreEx.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" -#include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" -#include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "arm_compute/runtime/NEON/NEScheduler.h" -#include "arm_compute/runtime/TensorAllocator.h" -#include "support/ToolchainSupport.h" - -using namespace arm_compute; -using namespace arm_compute::misc::shape_calculator; - -NEGEMMLowpMatrixMultiplyCoreEx::NEGEMMLowpMatrixMultiplyCoreEx( - std::shared_ptr memory_manager) - : _memory_group(memory_manager), _asm_glue(memory_manager), _mm_kernel(nullptr), - _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _mtx_a_reduction_kernel(), - _mtx_b_reduction_kernel(), _offset_contribution_kernel(), - _offset_contribution_output_stage_kernel(), _vector_sum_col(), _vector_sum_row(), _tmp_a(), - _tmp_b(), _mm_result_s32(), _signed_a(), _signed_output(), _original_b(nullptr), _a_offset(0), - _b_offset(0), _run_vector_matrix_multiplication(false), _assembly_path(false), - _fused_assembly_path(false), _reshape_b_only_on_first_run(false), _is_prepared(false), - _fuse_output_stage(false), _flip_signedness(false) -{ -} - -void NEGEMMLowpMatrixMultiplyCoreEx::configure(const ITensor *a, const ITensor *b, const ITensor *c, - ITensor *output, const GEMMInfo &gemm_info) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output); - ARM_COMPUTE_UNUSED(c); - ARM_COMPUTE_ERROR_THROW_ON(NEGEMMLowpMatrixMultiplyCoreEx::validate( - a->info(), b->info(), c != nullptr ? c->info() : nullptr, output->info(), gemm_info)); - - const ITensor *matrix_a = a; - const ITensor *matrix_b = b; - GEMMInfo info = gemm_info; - - // Clear state - _mtx_a_reshape_kernel = nullptr; - _mtx_b_reshape_kernel = nullptr; - - // Set internal variables - _a_offset = a->info()->quantization_info().uniform().offset; - _b_offset = b->info()->quantization_info().uniform().offset; - _run_vector_matrix_multiplication = a->info()->dimension(1) < 2; - _reshape_b_only_on_first_run = info.reshape_b_only_on_first_run(); - _is_prepared = false; - _fused_assembly_path = false; - _original_b = b; - - const ITensor *a_to_use = a; - - // If GEMMLowpOutputStage != NONE, fuse the offset contribution with the output stage - if (info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE) - { - _fuse_output_stage = true; - _memory_group.manage(&_mm_result_s32); - TensorInfo info_mm_result_s32(output->info()->tensor_shape(), 1, DataType::S32); - _mm_result_s32.allocator()->init(info_mm_result_s32); - } - -#ifdef __aarch64__ - switch (a->info()->data_type()) - { - case DataType::QASYMM8: - case DataType::QASYMM8_SIGNED: - case DataType::U8: - case DataType::S8: - { - if (a_to_use->info()->data_type() == DataType::QASYMM8 && - info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT) - { - _asm_glue.configure(a_to_use, b, c, output, gemm_info); - _fused_assembly_path = _asm_glue.is_configured(); - } - else - { - _asm_glue.configure(a_to_use, b, nullptr, _fuse_output_stage ? &_mm_result_s32 : output, - gemm_info); - } - _assembly_path = _asm_glue.is_configured(); - break; - } - default: - { - ARM_COMPUTE_ERROR("Datatype not supported"); - break; - } - } -#endif /* __aarch64__ */ - if (!(_assembly_path || _run_vector_matrix_multiplication)) - { - matrix_a = &_tmp_a; - matrix_b = &_tmp_b; - - // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / - // 4.0f) ] - TensorInfo a_info(compute_interleaved_shape(*a_to_use->info()), 1, - a_to_use->info()->data_type(), a_to_use->info()->quantization_info()); - // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / - // 16.0f) ] - TensorInfo b_info(compute_transpose1xW_shape(*b->info()), 1, b->info()->data_type(), - b->info()->quantization_info()); - _tmp_a.allocator()->init(a_info); - _tmp_b.allocator()->init(b_info); - _memory_group.manage(&_tmp_a); - if (!_reshape_b_only_on_first_run) - { - _memory_group.manage(&_tmp_b); - } - - // Configure interleave kernel - { - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(a_to_use, &_tmp_a); - _mtx_a_reshape_kernel = std::move(k); - } - - // Configure transpose kernel - { - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(b, &_tmp_b); - _mtx_b_reshape_kernel = std::move(k); - } - } - - if (!_fused_assembly_path) - { - // Initialize matrix B reduction kernel only if _a_offset is not equal to 0 - if (_a_offset != 0) - { - TensorInfo info_vector_sum_col(compute_reductionA_shape(*b->info()), 1, DataType::S32); - - _vector_sum_col.allocator()->init(info_vector_sum_col); - if (!_reshape_b_only_on_first_run) - { - _memory_group.manage(&_vector_sum_col); - } - - // Configure Matrix B reduction kernel - _mtx_b_reduction_kernel.configure(b, &_vector_sum_col, a_to_use->info()->dimension(0), false); - } - - // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0 - if (_b_offset != 0) - { - TensorInfo info_vector_sum_row(compute_reductionB_shape(*a_to_use->info()), 1, DataType::S32); - - _vector_sum_row.allocator()->init(info_vector_sum_row); - _memory_group.manage(&_vector_sum_row); - - // Configure matrix A reduction kernel - _mtx_a_reduction_kernel.configure(a_to_use, &_vector_sum_row, a_to_use->info()->dimension(0), - false); - } - - if (_fuse_output_stage) - { - // Configure matrix multiply kernel - if (!_assembly_path) - { - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(matrix_a, matrix_b, &_mm_result_s32); - _mm_kernel = std::move(k); - } - - _offset_contribution_output_stage_kernel.configure( - &_mm_result_s32, _a_offset == 0 ? nullptr : &_vector_sum_col, - _b_offset == 0 ? nullptr : &_vector_sum_row, c, - _flip_signedness ? &_signed_output : output, a->info()->dimension(0), _a_offset, - _b_offset, info.gemmlowp_output_stage()); - } - else - { - // Configure matrix multiply kernel - if (!_assembly_path) - { - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(matrix_a, matrix_b, output); - _mm_kernel = std::move(k); - } - // Configure offset contribution kernel - _offset_contribution_kernel.configure(output, _a_offset == 0 ? nullptr : &_vector_sum_col, - _b_offset == 0 ? nullptr : &_vector_sum_row, - a_to_use->info()->dimension(0), _a_offset, _b_offset); - } - } - - // Allocate tensors - if (!_assembly_path && !_run_vector_matrix_multiplication) - { - _tmp_a.allocator()->allocate(); - if (!_reshape_b_only_on_first_run) - { - _tmp_b.allocator()->allocate(); - } - } - - if (!_fused_assembly_path) - { - if (_a_offset != 0 && !_reshape_b_only_on_first_run) - { - _vector_sum_col.allocator()->allocate(); - } - - if (_b_offset != 0) - { - _vector_sum_row.allocator()->allocate(); - } - } - - if (_fuse_output_stage) - { - _mm_result_s32.allocator()->allocate(); - } -} - -Status NEGEMMLowpMatrixMultiplyCoreEx::validate(const ITensorInfo *a, const ITensorInfo *b, - const ITensorInfo *c, const ITensorInfo *output, - const GEMMInfo &gemm_info) -{ - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::S8); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::S8); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); - ARM_COMPUTE_RETURN_ERROR_ON_MSG( - c != nullptr && gemm_info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::NONE, - "Bias addition not supported in NEGEMMLowpMatrixMultiplyCoreEx for output S32"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG((a)->dimension(0) != (b)->dimension(1), - "The product AB is defined only if the number of columns in A is " - "equal to the number of rows in B"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), - "Matrix A already reshaped is not supported"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), - "Matrix B already reshaped is not supported"); - - GEMMInfo info = gemm_info; - const ITensorInfo *matrix_a_info = a; - const ITensorInfo *matrix_b_info = b; - - const ITensorInfo *a_to_use = a; - - TensorInfo tmp_a_info{}; - TensorInfo tmp_b_info{}; - TensorInfo mm_result_s32_info{}; - - int32_t a_offset = a->quantization_info().uniform().offset; - int32_t b_offset = b->quantization_info().uniform().offset; - - bool fuse_output_stage = info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE; - if (fuse_output_stage) - { - auto_init_if_empty( - mm_result_s32_info, - a->clone()->set_tensor_shape(output->tensor_shape()).set_data_type(DataType::S32)); - } - - // Check if we need to run the optimized assembly kernel - bool run_optimised = false; - bool run_optimised_requantized = false; - if (a_to_use->data_type() == DataType::QASYMM8 && - info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT) - { - run_optimised = bool(NEGEMMAssemblyDispatch::validate(a_to_use, b, c, output, gemm_info)); - run_optimised_requantized = run_optimised; - } - else - { - run_optimised = bool(NEGEMMAssemblyDispatch::validate( - a_to_use, b, c, fuse_output_stage ? &mm_result_s32_info : output, gemm_info)); - } - - if (run_optimised) - { - ARM_COMPUTE_RETURN_ERROR_ON(b->dimension(0) != output->dimension(0)); - if (info.depth_output_gemm3d() != 0) - { - if (info.reinterpret_input_as_3d()) - { - ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1)); - ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(2) != output->dimension(2)); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1) * output->dimension(2)); - } - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1)); - } - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.reinterpret_input_as_3d(), - "NEGEMM cannot reinterpret the input tensor as 3D"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.depth_output_gemm3d() != 0, - "NEGEMM cannot reinterpret the output tensor as 3D"); - - const bool run_vector_matrix_multiplication = a->dimension(1) < 2; - if (!run_vector_matrix_multiplication) - { - matrix_a_info = &tmp_a_info; - matrix_b_info = &tmp_b_info; - - // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / - // 4.0f) ] - TensorShape shape_tmp_a = a->tensor_shape(); - shape_tmp_a.set(0, a->dimension(0) * 4); - shape_tmp_a.set(1, std::ceil(a->dimension(1) / 4.f)); - - // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width - // / 16.0f) ] - TensorShape shape_tmp_b = b->tensor_shape(); - shape_tmp_b.set(0, b->dimension(1) * 16); - shape_tmp_b.set(1, std::ceil(b->dimension(0) / 16.f)); - - // Validate interleave kernel - auto_init_if_empty(tmp_a_info, a_to_use->clone()->set_tensor_shape(shape_tmp_a)); - auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(shape_tmp_b)); - - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMInterleave4x4Kernel::validate(a_to_use, &tmp_a_info)); - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMTranspose1xWKernel::validate(b, &tmp_b_info)); - } - } - - if (!run_optimised_requantized) - { - TensorInfo info_vector_sum_col{}; - TensorInfo info_vector_sum_row{}; - - // Validate matrix B reduction kernel only if _a_offset is not equal to 0 - if (a_offset != 0) - { - info_vector_sum_col = TensorInfo(compute_reductionA_shape(*b), 1, DataType::S32); - - // Configure Matrix B reduction kernel - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixBReductionKernel::validate( - b, &info_vector_sum_col, a->dimension(0), false)); - } - - // Validate Matrix A reduction kernel only if _b_offset is not equal to 0 - if (b_offset != 0) - { - info_vector_sum_row = TensorInfo(compute_reductionB_shape(*a), 1, DataType::S32); - - // Configure matrix A reduction kernel - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixAReductionKernel::validate( - a_to_use, &info_vector_sum_row, a->dimension(0), false)); - } - - if (fuse_output_stage) - { - if (!run_optimised) - { - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyKernel::validate( - matrix_a_info, matrix_b_info, &mm_result_s32_info)); - } - - // Validate offset contribution kernel - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpOffsetContributionOutputStageKernel::validate( - &mm_result_s32_info, a_offset == 0 ? nullptr : &info_vector_sum_col, - b_offset == 0 ? nullptr : &info_vector_sum_row, c, output, a_offset, b_offset, - info.gemmlowp_output_stage())); - } - else - { - if (!run_optimised) - { - ARM_COMPUTE_RETURN_ON_ERROR( - NEGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, output)); - } - // Validate offset contribution kernel - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpOffsetContributionKernel::validate( - output, a_offset == 0 ? nullptr : &info_vector_sum_col, - b_offset == 0 ? nullptr : &info_vector_sum_row, a_offset, b_offset)); - } - } - return Status{}; -} - -void NEGEMMLowpMatrixMultiplyCoreEx::run() -{ - prepare(); - - MemoryGroupResourceScope scope_mg(_memory_group); - - // Reshape inputs - if (_mtx_a_reshape_kernel) - { - NEScheduler::get().schedule(_mtx_a_reshape_kernel.get(), Window::DimY); - } - if (_mtx_b_reshape_kernel && !_reshape_b_only_on_first_run) - { - NEScheduler::get().schedule(_mtx_b_reshape_kernel.get(), Window::DimY); - } - - // Run GEMM - if (_asm_glue.is_configured()) - { - _asm_glue.run(); - } - else - { - NEScheduler::get().schedule(_mm_kernel.get(), Window::DimY); - } - - if (!_fused_assembly_path) - { - // Run matrix A reduction kernel only if _b_offset is not equal to 0 - if (_b_offset != 0) - { - NEScheduler::get().schedule(&_mtx_a_reduction_kernel, Window::DimX); - } - - // Run matrix B reduction kernel only if _a_offset is not equal to 0 - if (_a_offset != 0 && !_reshape_b_only_on_first_run) - { - NEScheduler::get().schedule(&_mtx_b_reduction_kernel, Window::DimX); - } - - if (_fuse_output_stage) - { - // Run offset contribution kernel - NEScheduler::get().schedule(&_offset_contribution_output_stage_kernel, Window::DimY); - } - else - { - // Run offset contribution kernel - NEScheduler::get().schedule(&_offset_contribution_kernel, Window::DimY); - } - } -} - -void NEGEMMLowpMatrixMultiplyCoreEx::prepare() -{ - if (!_is_prepared) - { - // Run assembly reshape - if (_asm_glue.is_configured() && _reshape_b_only_on_first_run) - { - ARM_COMPUTE_ERROR_ON(!_original_b->is_used()); - - _asm_glue.prepare(); - _original_b->mark_as_unused(); - } - // Run non-assembly reshape - else if (_mtx_b_reshape_kernel && _reshape_b_only_on_first_run) - { - ARM_COMPUTE_ERROR_ON(!_original_b->is_used()); - - // Run reshape kernel and mark original weights tensor as unused - _tmp_b.allocator()->allocate(); - NEScheduler::get().schedule(_mtx_b_reshape_kernel.get(), Window::DimY); - _original_b->mark_as_unused(); - } - - // Run matrix B reduction kernel only if _a_offset is not equal to 0 - if (_a_offset != 0 && _reshape_b_only_on_first_run) - { - _vector_sum_col.allocator()->allocate(); - NEScheduler::get().schedule(&_mtx_b_reduction_kernel, Window::DimX); - } - - _is_prepared = true; - } -} diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NEGatherEx.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NEGatherEx.cpp index c8bb88a..433c35d 100644 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NEGatherEx.cpp +++ b/compute/ARMComputeEx/src/runtime/NEON/functions/NEGatherEx.cpp @@ -41,7 +41,7 @@ #include "arm_compute/runtime/NEON/functions/NEGatherEx.h" #include "arm_compute/core/NEON/kernels/NEGatherKernelEx.h" -#include "support/ToolchainSupport.h" +#include "support/MemorySupport.h" #include @@ -49,7 +49,7 @@ namespace arm_compute { void NEGatherEx::configure(const ITensor *input, const ITensor *indices, ITensor *output, int axis) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(input, indices, output, axis); _kernel = std::move(k); } diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NEHashtableLookup.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NEHashtableLookup.cpp index 078019f..52d58ac 100644 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NEHashtableLookup.cpp +++ b/compute/ARMComputeEx/src/runtime/NEON/functions/NEHashtableLookup.cpp @@ -41,14 +41,14 @@ #include "arm_compute/runtime/NEON/functions/NEHashtableLookup.h" #include "arm_compute/core/NEON/kernels/NEHashtableLookupKernel.h" -#include "support/ToolchainSupport.h" +#include "support/MemorySupport.h" using namespace arm_compute; void NEHashtableLookup::configure(const ITensor *lookups, const ITensor *keys, const ITensor *input, ITensor *output, ITensor *hits) { - auto k = arm_compute::support::cpp14::make_unique(); + auto k = support::cpp14::make_unique(); k->configure(lookups, keys, input, output, hits); _kernel = std::move(k); } diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NEPReLU.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NEPReLU.cpp deleted file mode 100644 index dac3b84..0000000 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NEPReLU.cpp +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2018-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/NEON/functions/NEPReLU.h" - -#include "arm_compute/core/NEON/kernels/NEPReLUKernel.h" -#include "support/ToolchainSupport.h" - -#include - -using namespace arm_compute; - -void NEPReLU::configure(const ITensor *input, const ITensor *alpha, ITensor *output) -{ - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input, alpha, output); - _kernel = std::move(k); -} diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NERNNLayerEx.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NERNNLayerEx.cpp deleted file mode 100644 index 0e9a5e9..0000000 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NERNNLayerEx.cpp +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2018-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/NEON/functions/NERNNLayerEx.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "arm_compute/runtime/NEON/NEScheduler.h" - -namespace arm_compute -{ -NERNNLayerEx::NERNNLayerEx(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _gemm_state_f(), _add_kernel(), - _activation_kernel(), _fully_connected_kernel(), _copy_kernel(), _fully_connected_out(), - _gemm_output(), _add_output(), _is_prepared(false) -{ -} - -Status NERNNLayerEx::validate(const ITensorInfo *input, const ITensorInfo *weights, - const ITensorInfo *recurrent_weights, const ITensorInfo *bias, - const ITensorInfo *hidden_state, const ITensorInfo *output, - const ActivationLayerInfo &info) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, recurrent_weights, bias, hidden_state, - output); - - const int idx_width = 0; - const int idx_height = 1; - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_width) != weights->dimension(idx_width)); - ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_height) != - recurrent_weights->dimension(idx_width)); - ARM_COMPUTE_RETURN_ERROR_ON(recurrent_weights->dimension(idx_width) != - recurrent_weights->dimension(idx_height)); - ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() != 1); - ARM_COMPUTE_RETURN_ERROR_ON(bias->dimension(idx_width) != weights->dimension(idx_height)); - ARM_COMPUTE_RETURN_ERROR_ON(hidden_state->dimension(idx_width) != weights->dimension(idx_height)); - ARM_COMPUTE_RETURN_ERROR_ON(hidden_state->dimension(idx_height) != input->dimension(idx_height)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), - hidden_state->tensor_shape()); - - auto shape_info = TensorInfo(misc::shape_calculator::compute_rnn_shape( - recurrent_weights, hidden_state->dimension(idx_height)), - 1, input->data_type()); - - ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayer::validate(input, weights, bias, &shape_info)); - ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAdditionKernel::validate( - &shape_info, &shape_info, &shape_info, ConvertPolicy::SATURATE)); - ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayerKernel::validate(&shape_info, &shape_info, info)); - - return Status{}; -} - -void NERNNLayerEx::configure(const ITensor *input, const ITensor *weights, - const ITensor *recurrent_weights, const ITensor *bias, - ITensor *hidden_state, ITensor *output, ActivationLayerInfo &info) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, recurrent_weights, bias, hidden_state, output); - ARM_COMPUTE_ERROR_THROW_ON(NERNNLayerEx::validate(input->info(), weights->info(), - recurrent_weights->info(), bias->info(), - hidden_state->info(), output->info(), info)); - - const int idx_height = 1; - TensorShape shape = misc::shape_calculator::compute_rnn_shape( - recurrent_weights->info(), hidden_state->info()->dimension(idx_height)); - - _is_prepared = false; - - // Manage intermediate buffers and configure - _fully_connected_out.allocator()->init(TensorInfo(shape, 1, input->info()->data_type())); - _gemm_output.allocator()->init(TensorInfo(shape, 1, input->info()->data_type())); - - // Manage intermediate buffers and configure - _memory_group.manage(&_fully_connected_out); - _fully_connected_kernel.configure(input, weights, bias, &_fully_connected_out); - - _memory_group.manage(&_gemm_output); - _gemm_state_f.configure(hidden_state, recurrent_weights, nullptr, &_gemm_output, 1.f, 0.f); - - _add_output.allocator()->init(TensorInfo(shape, 1, input->info()->data_type())); - _memory_group.manage(&_add_output); - - _add_kernel.configure(&_fully_connected_out, &_gemm_output, &_add_output, - ConvertPolicy::SATURATE); - - _fully_connected_out.allocator()->allocate(); - _gemm_output.allocator()->allocate(); - - _activation_kernel.configure(&_add_output, hidden_state, info); - _add_output.allocator()->allocate(); - - _copy_kernel.configure(hidden_state, output); -} - -void NERNNLayerEx::run() -{ - prepare(); - - MemoryGroupResourceScope scope_mg(_memory_group); - - _fully_connected_kernel.run(); - - _gemm_state_f.run(); - - NEScheduler::get().schedule(&_add_kernel, Window::DimY); - NEScheduler::get().schedule(&_activation_kernel, Window::DimY); - - // copy hidden out to output - NEScheduler::get().schedule(&_copy_kernel, Window::DimY); -} - -void NERNNLayerEx::prepare() -{ - if (!_is_prepared) - { - _fully_connected_kernel.prepare(); - _gemm_state_f.prepare(); - - _is_prepared = true; - } -} -} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NEReduceMeanEx.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NEReduceMeanEx.cpp deleted file mode 100644 index 116bba3..0000000 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NEReduceMeanEx.cpp +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2018-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/NEON/functions/NEReduceMeanEx.h" - -#include "arm_compute/core/Helpers.h" -#include "arm_compute/runtime/NEON/NEScheduler.h" - -using namespace arm_compute; - -NEReduceMeanEx::NEReduceMeanEx(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _reduction_kernels(), _reduced_outs(), _reshape(), - _reduction_ops(), _keep_dims() -{ -} - -Status NEReduceMeanEx::validate(const ITensorInfo *input, const Coordinates &reduction_axis, - bool keep_dims, const ITensorInfo *output) -{ - ARM_COMPUTE_UNUSED(keep_dims); - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); - ARM_COMPUTE_RETURN_ERROR_ON(reduction_axis.num_dimensions() > input->num_dimensions()); - - TensorShape out_shape = input->tensor_shape(); - const unsigned int reduction_ops = reduction_axis.num_dimensions(); - const int input_dims = input->num_dimensions(); - Coordinates axis_local = reduction_axis; - - // Convert negative axis - for (unsigned int i = 0; i < reduction_ops; ++i) - { - axis_local[i] = wrap_around(axis_local[i], input_dims); - } - - std::sort(axis_local.begin(), axis_local.begin() + reduction_ops); - for (unsigned int i = 0; i < reduction_ops; ++i) - { - ARM_COMPUTE_RETURN_ERROR_ON(axis_local[i] > 3); - ARM_COMPUTE_RETURN_ERROR_ON(static_cast(axis_local[i]) > - input->num_dimensions() - 1); - if (output->total_size() > 0 && keep_dims) - { - ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(axis_local[i]) != 1); - } - if (keep_dims) - { - out_shape.set(axis_local[i], 1); - } - else - { - out_shape.remove_dimension(axis_local[i] - i); - } - } - const TensorInfo out_info = input->clone()->set_tensor_shape(out_shape); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &out_info); - - return Status{}; -} - -void NEReduceMeanEx::configure(ITensor *input, const Coordinates &reduction_axis, bool keep_dims, - ITensor *output) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input); - - _reduction_ops = reduction_axis.num_dimensions(); - _reduction_kernels = - arm_compute::support::cpp14::make_unique(_reduction_ops); - _reduced_outs = - arm_compute::support::cpp14::make_unique(_reduction_ops - (keep_dims ? 1 : 0)); - _keep_dims = keep_dims; - - Coordinates axis_local = reduction_axis; - const int input_dims = input->info()->num_dimensions(); - const unsigned int reduction_ops = reduction_axis.num_dimensions(); - - // Convert negative axis - for (unsigned int i = 0; i < reduction_ops; ++i) - { - axis_local[i] = wrap_around(axis_local[i], input_dims); - } - - // Perform reduction for every axis - for (unsigned int i = 0; i < _reduction_ops; ++i) - { - TensorShape out_shape = i == 0 ? input->info()->tensor_shape() - : (_reduced_outs.get() + i - 1)->info()->tensor_shape(); - out_shape.set(axis_local[i], 1); - auto in = (i == 0) ? input : (_reduced_outs.get() + i - 1); - - if (i == _reduction_ops - 1 && keep_dims) - { - _reduction_kernels[i].configure(in, output, axis_local[i], ReductionOperation::MEAN_SUM); - } - else - { - _reduced_outs[i].allocator()->init(TensorInfo(out_shape, input->info()->num_channels(), - input->info()->data_type(), - input->info()->quantization_info()) - .set_data_layout(output->info()->data_layout())); - _memory_group.manage(_reduced_outs.get() + i); - _reduction_kernels[i].configure(in, _reduced_outs.get() + i, axis_local[i], - ReductionOperation::MEAN_SUM); - } - } - - // Allocate intermediate tensors - for (unsigned int i = 0; i < _reduction_ops - (keep_dims ? 1 : 0); ++i) - { - _reduced_outs[i].allocator()->allocate(); - } - - // Configure reshape layer if we want to drop the dimensions - if (!keep_dims) - { - TensorShape out_shape = input->info()->tensor_shape(); - - // We have to sort the reduction axis vectors in order for remove_dimension - // to work properly - std::sort(axis_local.begin(), axis_local.begin() + _reduction_ops); - for (unsigned int i = 0; i < _reduction_ops; ++i) - { - out_shape.remove_dimension(axis_local[i] - i); - } - auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(out_shape)); - _reshape.configure(_reduced_outs.get() + _reduction_ops - 1, output); - } -} - -void NEReduceMeanEx::run() -{ - _memory_group.acquire(); - - for (unsigned int i = 0; i < _reduction_ops; ++i) - { - _reduction_kernels[i].run(); - } - - if (!_keep_dims) - { - _reshape.run(); - } - _memory_group.release(); -} diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NESpaceToBatchLayerEx.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NESpaceToBatchLayerEx.cpp deleted file mode 100644 index 198bb76..0000000 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NESpaceToBatchLayerEx.cpp +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/NEON/functions/NESpaceToBatchLayerEx.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/runtime/NEON/NEScheduler.h" - -namespace arm_compute -{ -NESpaceToBatchLayerEx::NESpaceToBatchLayerEx() - : _space_to_batch_kernel(), _memset_kernel(), _has_padding(false) -{ -} - -void NESpaceToBatchLayerEx::configure(const ITensor *input, const ITensor *block_shape, - const ITensor *paddings, ITensor *output) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, block_shape, paddings, output); - - if (input->info()->tensor_shape().total_size() != output->info()->tensor_shape().total_size()) - { - _has_padding = true; - _memset_kernel.configure( - output, PixelValue(0, output->info()->data_type(), output->info()->quantization_info())); - } - _space_to_batch_kernel.configure(input, block_shape, paddings, output); -} - -void NESpaceToBatchLayerEx::configure(const ITensor *input, const int block_shape_x, - const int block_shape_y, const Size2D &padding_left, - const Size2D &padding_right, ITensor *output) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - if (input->info()->tensor_shape().total_size() != output->info()->tensor_shape().total_size()) - { - _has_padding = true; - _memset_kernel.configure( - output, PixelValue(0, output->info()->data_type(), output->info()->quantization_info())); - } - _space_to_batch_kernel.configure(input, block_shape_x, block_shape_y, padding_left, padding_right, - output); -} - -Status NESpaceToBatchLayerEx::validate(const ITensorInfo *input, const ITensorInfo *block_shape, - const ITensorInfo *paddings, const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ON_ERROR( - NESpaceToBatchLayerKernel::validate(input, block_shape, paddings, output)); - - return Status{}; -} - -Status NESpaceToBatchLayerEx::validate(const ITensorInfo *input, const int block_shape_x, - const int block_shape_y, const Size2D &padding_left, - const Size2D &padding_right, const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ON_ERROR(NESpaceToBatchLayerKernel::validate( - input, block_shape_x, block_shape_y, padding_left, padding_right, output)); - - return Status{}; -} - -void NESpaceToBatchLayerEx::run() -{ - // Zero out output only if we have paddings - if (_has_padding) - { - NEScheduler::get().schedule(&_memset_kernel, Window::DimY); - } - NEScheduler::get().schedule(&_space_to_batch_kernel, Window::DimY); -} -} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NESpaceToDepthLayerEx.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NESpaceToDepthLayerEx.cpp deleted file mode 100644 index 97697e3..0000000 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NESpaceToDepthLayerEx.cpp +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/runtime/NEON/functions/NESpaceToDepthLayerEx.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernelEx.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" - -namespace arm_compute -{ -void NESpaceToDepthLayerEx::configure(const ITensor *input, ITensor *output, int32_t block_shape) -{ - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input, output, block_shape); - _kernel = std::move(k); -} - -Status NESpaceToDepthLayerEx::validate(const ITensorInfo *input, const ITensorInfo *output, - int32_t block_shape) -{ - ARM_COMPUTE_RETURN_ON_ERROR(NESpaceToDepthLayerKernelEx::validate(input, output, block_shape)); - return Status{}; -} -} // namespace arm_compute diff --git a/compute/ARMComputeEx/src/runtime/NEON/functions/NETransposeConvLayer.cpp b/compute/ARMComputeEx/src/runtime/NEON/functions/NETransposeConvLayer.cpp index df06892..09f1780 100644 --- a/compute/ARMComputeEx/src/runtime/NEON/functions/NETransposeConvLayer.cpp +++ b/compute/ARMComputeEx/src/runtime/NEON/functions/NETransposeConvLayer.cpp @@ -1,21 +1,5 @@ /* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,14 +21,11 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ - #include "arm_compute/runtime/NEON/functions/NETransposeConvLayer.h" #include "arm_compute/core/Helpers.h" -#include "arm_compute/core/Utils.h" #include "arm_compute/core/UtilsEx.h" #include "arm_compute/core/Validate.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/misc/ShapeCalculatorEx.h" #include "arm_compute/runtime/NEON/NEScheduler.h" @@ -52,20 +33,15 @@ using namespace arm_compute::misc::shape_calculator; namespace arm_compute { + NETransposeConvLayer::NETransposeConvLayer(std::shared_ptr memory_manager) // NOLINT : _memory_group(std::move(memory_manager)), _conv_f(), _upsample_f(), _flip_weights(), - _permute_input(), - _permute_weights(), - _permute_output(), _scaled_output(), _weights_flipped(), - _permuted_input(), - _permuted_weights(), - _permuted_output(), - _is_nchw(false), + _flip_axis(), _original_weights(nullptr), _input(nullptr), _info(), @@ -80,7 +56,7 @@ Status NETransposeConvLayer::validate(const ITensorInfo *input, const ITensorInf { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, - DataType::QASYMM8); + DataType::QASYMM8, DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, input); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(weights, input); const unsigned int width_idx = @@ -95,13 +71,16 @@ Status NETransposeConvLayer::validate(const ITensorInfo *input, const ITensorInf weights->dimension(height_idx), info, invalid_right, invalid_bottom); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); - if (is_data_type_quantized_asymmetric(input->data_type()) && bias) + if (bias != nullptr) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); - } - else if (bias) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias); + if (is_data_type_quantized_asymmetric(input->data_type())) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias); + } } if (output->tensor_shape().total_size() > 0) @@ -110,12 +89,12 @@ Status NETransposeConvLayer::validate(const ITensorInfo *input, const ITensorInf const TensorShape output_shape = compute_transposeconv_output_shape(out_dims, *input, *weights); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimX) < output_shape.x(), - "Output's dim 0 is invalid."); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimY) < output_shape.y(), - "Output's dim 1 is invalid."); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimZ) < output_shape.z(), - "Output's dim 2 is invalid."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimX) != output_shape.x(), + "Output's width is invalid."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimY) != output_shape.y(), + "Output's height is invalid."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimZ) != output_shape.z(), + "Output's depth is invalid."); } unsigned int pad_left = 0; @@ -127,7 +106,6 @@ Status NETransposeConvLayer::validate(const ITensorInfo *input, const ITensorInf pad_bottom); TensorInfo scale_out_info( input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(scale_out_shape)); - scale_out_info.set_data_layout(input->data_layout()); const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL); const unsigned int batches_idx = @@ -149,19 +127,13 @@ void NETransposeConvLayer::configure(ITensor *input, const ITensor *weights, con ITensor *output, const PadStrideInfo &info, unsigned int invalid_right, unsigned int invalid_bottom) { + // Perform validation step ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); + ARM_COMPUTE_ERROR_THROW_ON(NETransposeConvLayer::validate( + input->info(), weights->info(), (bias == nullptr) ? nullptr : bias->info(), output->info(), + info, invalid_right, invalid_bottom)); const DataLayout data_layout = input->info()->data_layout(); - - _input = input; - _original_weights = weights; - _info = info; - _is_prepared = false; - _is_nchw = data_layout == DataLayout::NCHW; - - const unsigned int stride_x = info.stride().first; - const unsigned int stride_y = info.stride().second; - const unsigned int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); const unsigned int height_idx = @@ -173,101 +145,54 @@ void NETransposeConvLayer::configure(ITensor *input, const ITensor *weights, con const TensorShape output_shape = compute_transposeconv_output_shape(out_dims, *input->info(), *weights->info()); + + _input = input; + _original_weights = weights; + _info = info; + _is_prepared = false; + + unsigned int pad_left = 0; + unsigned int pad_right = 0; + unsigned int pad_top = 0; + unsigned int pad_bottom = 0; + const unsigned int stride_x = info.stride().first; + const unsigned int stride_y = info.stride().second; + // Output auto initialization if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->quantization_info()); - // Perform validation step - ARM_COMPUTE_ERROR_THROW_ON(NETransposeConvLayer::validate( - input->info(), weights->info(), bias == nullptr ? nullptr : bias->info(), output->info(), - info, invalid_right, invalid_bottom)); - + _flip_axis.allocator()->init(TensorInfo(TensorShape(2U), 1, DataType::U32)); _memory_group.manage(&_scaled_output); - if (!_is_nchw) - { - _memory_group.manage(&_permuted_input); - _memory_group.manage(&_permuted_weights); - _memory_group.manage(&_permuted_output); - - // Configure the function to transform the input tensor from NHWC -> NCHW - _permuted_input.info()->set_quantization_info(input->info()->quantization_info()); - _permute_input.configure(input, &_permuted_input, PermutationVector(1U, 2U, 0U)); - _permuted_input.info()->set_data_layout(DataLayout::NCHW); - - // Configure the function to transform the weights tensor from NHWC -> NCHW - _permuted_weights.info()->set_quantization_info(weights->info()->quantization_info()); - _permute_weights.configure(weights, &_permuted_weights, PermutationVector(1U, 2U, 0U)); - _permuted_weights.info()->set_data_layout(DataLayout::NCHW); - - // Find the upsampled dimensions and the padding needed for the convolution with stride 1 in - // order to match output shape - - unsigned int pad_left = 0; - unsigned int pad_right = 0; - unsigned int pad_top = 0; - unsigned int pad_bottom = 0; - const TensorShape scale_out_shape = compute_transposeconv_upsampled_shape( - *_permuted_input.info(), *_permuted_weights.info(), info, out_dims, invalid_right, - invalid_bottom, pad_left, pad_right, pad_top, pad_bottom); - - TensorInfo scale_out_info(scale_out_shape, 1, _permuted_input.info()->data_type(), - _permuted_input.info()->quantization_info()); - scale_out_info.set_data_layout(DataLayout::NCHW); - _scaled_output.allocator()->init(scale_out_info); - - const PadStrideInfo upsample_info(stride_x, stride_y, pad_left, pad_right, pad_top, pad_bottom, - DimensionRoundingType::CEIL); - _upsample_f.configure(&_permuted_input, &_scaled_output, upsample_info); - - _weights_flipped.allocator()->init(*_permuted_weights.info()->clone()); - _weights_flipped.info()->set_quantization_info(weights->info()->quantization_info()); - _flip_weights.configure(&_permuted_weights, &_weights_flipped); - - // setup the function to convolve the upscaled output - const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL); - - const auto out_shape = output->info()->tensor_shape(); - TensorShape permuted_out_shape{out_shape[1], out_shape[2], out_shape[0], out_shape[3]}; - TensorInfo permuted_out_info(permuted_out_shape, 1, output->info()->data_type(), - output->info()->quantization_info()); - _permuted_output.allocator()->init(permuted_out_info); - _permuted_output.info()->set_data_layout(DataLayout::NCHW); - _conv_f.configure(&_scaled_output, &_weights_flipped, bias, &_permuted_output, conv_info); - - // Configure the function to transform the convoluted output to NHWC - _permute_output.configure(&_permuted_output, output, PermutationVector(2U, 0U, 1U)); - - _permuted_input.allocator()->allocate(); - _permuted_weights.allocator()->allocate(); - _permuted_output.allocator()->allocate(); - } - else - { - // Find the upsampled dimensions and the padding needed for the convolution with stride 1 in - // order to match output shape - unsigned int pad_left = 0; - unsigned int pad_right = 0; - unsigned int pad_top = 0; - unsigned int pad_bottom = 0; - const TensorShape scale_out_shape = compute_transposeconv_upsampled_shape( - *input->info(), *weights->info(), info, out_dims, invalid_right, invalid_bottom, pad_left, - pad_right, pad_top, pad_bottom); - - TensorInfo scale_out_info(scale_out_shape, 1, input->info()->data_type(), - input->info()->quantization_info()); - _scaled_output.allocator()->init(scale_out_info); - const PadStrideInfo upsample_info(stride_x, stride_y, pad_left, pad_right, pad_top, pad_bottom, - DimensionRoundingType::FLOOR); - _upsample_f.configure(input, &_scaled_output, upsample_info); - - _weights_flipped.allocator()->init(weights->info()->clone()->set_data_layout(data_layout)); - _flip_weights.configure(weights, &_weights_flipped); - - // setup the function to convolve the upscaled output - const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL); - _conv_f.configure(&_scaled_output, &_weights_flipped, bias, output, conv_info); - } + _weights_flipped.allocator()->init(weights->info()->clone()->set_data_layout(data_layout)); + _flip_weights.configure(weights, &_weights_flipped, &_flip_axis); + + // setup the function to convolve the upscaled output + const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL); + + const TensorShape scale_out_shape = compute_transposeconv_upsampled_shape( + *input->info(), *weights->info(), info, out_dims, invalid_right, invalid_bottom, pad_left, + pad_right, pad_top, pad_bottom); + + const PadStrideInfo upsample_info(stride_x, stride_y, pad_left, pad_right, pad_top, pad_bottom, + DimensionRoundingType::FLOOR); + + TensorInfo scale_out_info(scale_out_shape, 1, input->info()->data_type(), + input->info()->quantization_info()); + scale_out_info.set_data_layout(data_layout); + _scaled_output.allocator()->init(scale_out_info); + + _upsample_f.configure(input, &_scaled_output, upsample_info); + + _conv_f.configure(&_scaled_output, &_weights_flipped, bias, output, conv_info); + + // Setup flip axis data + _flip_axis.allocator()->allocate(); + auto axis_data = reinterpret_cast(_flip_axis.buffer()); + axis_data[0] = static_cast(width_idx); + axis_data[1] = static_cast(height_idx); + _scaled_output.allocator()->allocate(); } @@ -275,22 +200,10 @@ void NETransposeConvLayer::run() { prepare(); - // MemoryGroupResourceScope scope_mg(_memory_group); - - // Permute input - if (!_is_nchw) - { - _permute_input.run(); - } + MemoryGroupResourceScope scope_mg(_memory_group); _upsample_f.run(); _conv_f.run(); - - // Permute output - if (!_is_nchw) - { - _permute_output.run(); - } } void NETransposeConvLayer::prepare() @@ -301,22 +214,12 @@ void NETransposeConvLayer::prepare() // Run weights flipping and mark original weights tensor as unused _weights_flipped.allocator()->allocate(); - // Permute weights - if (!_is_nchw) - { - _permute_weights.run(); - } - NEScheduler::get().schedule(&_flip_weights, Window::DimZ); + _flip_weights.run(); _original_weights->mark_as_unused(); // Prepare convolution _conv_f.prepare(); - if (!_weights_flipped.is_used()) - { - _weights_flipped.allocator()->free(); - } - _is_prepared = true; } } diff --git a/compute/cker/CMakeLists.txt b/compute/cker/CMakeLists.txt index 09f6725..609dd45 100644 --- a/compute/cker/CMakeLists.txt +++ b/compute/cker/CMakeLists.txt @@ -8,6 +8,9 @@ target_link_libraries(nnfw_lib_cker INTERFACE gemmlowp) target_link_libraries(nnfw_lib_cker INTERFACE ruy) target_link_libraries(nnfw_lib_cker INTERFACE ruy_instrumentation) target_compile_definitions(nnfw_lib_cker INTERFACE USE_RUY_GEMV) +if(EXPERIMENTAL_RUY_FEATURE) + target_compile_definitions(nnfw_lib_cker INTERFACE EXPERIMENTAL_RUY_FEATURE) +endif(EXPERIMENTAL_RUY_FEATURE) if(PROFILE_RUY) target_link_libraries(nnfw_lib_cker INTERFACE ruy_profiler) endif(PROFILE_RUY) diff --git a/compute/cker/include/cker/NeonTensorUtils.h b/compute/cker/include/cker/NeonTensorUtils.h index 5c38bc6..246fd9a 100644 --- a/compute/cker/include/cker/NeonTensorUtils.h +++ b/compute/cker/include/cker/NeonTensorUtils.h @@ -546,7 +546,7 @@ bool NeonIsZeroVector(const float *vector, int v_size) void NeonCpuBackendGemm(const int8_t *input, const int32_t *bias, const int8_t *input_to_gate_weights, int32_t n_batch, int32_t n_input, - int32_t n_output, int32_t, int32_t *scratch) + int32_t n_output, int32_t, int32_t *scratch, ruy::Context *ruy_context) { MatrixParams lhs_params; lhs_params.order = Order::kRowMajor; @@ -571,8 +571,6 @@ void NeonCpuBackendGemm(const int8_t *input, const int32_t *bias, } // Below code is from tflite::cpu_backend_gemm::detail::GemmImplUsingRuy - ruy::Context *ruy_context = ruy_support::GetRuyContext(); - ruy::Matrix ruy_lhs; ruy::Matrix ruy_rhs; ruy::Matrix ruy_dst; @@ -851,13 +849,13 @@ void NeonMatrixBatchVectorMultiplyAccumulate(const int8_t *__restrict__ matrix, const int m_cols, const int8_t *__restrict__ vectors, const float *scaling_factors, int n_batch, int32_t *scratch, float *__restrict__ result, - int result_stride) + int result_stride, ruy::Context *ruy_context) { if (m_rows % 4 == 0 && result_stride == 1) { const int32_t *bias = static_cast(nullptr); NeonCpuBackendGemm(vectors, bias, matrix, n_batch, m_cols, m_rows, - /*output_zp =*/0, scratch); + /*output_zp =*/0, scratch, ruy_context); // Multiply by float scaling factors and write to result const int total_size = n_batch * m_rows; diff --git a/compute/cker/include/cker/PortableTensorUtils.h b/compute/cker/include/cker/PortableTensorUtils.h index 9769d4b..54714e2 100644 --- a/compute/cker/include/cker/PortableTensorUtils.h +++ b/compute/cker/include/cker/PortableTensorUtils.h @@ -20,6 +20,7 @@ #include "cker/Types.h" #include "cker/neon/neon_check.h" +#include #include #include @@ -142,7 +143,7 @@ void PortableMatrixBatchVectorMultiplyAccumulate(const int8_t *__restrict__ matr const int8_t *__restrict__ vector, const float *scaling_factors, int n_batch, int32_t *, float *__restrict__ result, - int result_stride) + int result_stride, ruy::Context *) { PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector, scaling_factors, n_batch, result, result_stride); diff --git a/compute/cker/include/cker/TensorUtils.h b/compute/cker/include/cker/TensorUtils.h index 6b23c0b..e07c912 100644 --- a/compute/cker/include/cker/TensorUtils.h +++ b/compute/cker/include/cker/TensorUtils.h @@ -73,10 +73,10 @@ void MatrixBatchVectorMultiplyAccumulate(const float *matrix, int m_rows, int m_ void MatrixBatchVectorMultiplyAccumulate(const int8_t *matrix, const int m_rows, const int m_cols, const int8_t *vectors, const float *scaling_factors, int n_batch, int32_t *scratch, float *result, - int result_stride) + int result_stride, ruy::Context *ruy_context) { NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols, vectors, - scaling_factors, n_batch, scratch, result, result_stride); + scaling_factors, n_batch, scratch, result, result_stride, ruy_context); } void ZeroVector(float *vector, int v_size) { PortableZeroVector(vector, v_size); } diff --git a/compute/cker/include/cker/Types.h b/compute/cker/include/cker/Types.h index 41b1916..886ce5e 100644 --- a/compute/cker/include/cker/Types.h +++ b/compute/cker/include/cker/Types.h @@ -259,6 +259,12 @@ struct FullyConnectedParams // FullyConnectedWeightsFormat weights_format; }; +struct L2NormParams +{ + // uint8 inference params. + int32_t input_zero_point; +}; + struct GatherParams { int32_t axis; @@ -271,6 +277,14 @@ struct InstanceNormParams float float_activation_max; }; +struct ResizeBilinearParams +{ + int32_t output_height; + int32_t output_width; + bool align_corners; + bool half_pixel_centers; +}; + struct TransposeConvParams { PaddingType padding_type; @@ -325,6 +339,12 @@ struct SplitParams int16_t axis; }; +struct SplitVParams +{ + uint16_t num_split; + int16_t axis; +}; + struct FusedBatchNormParams { bool is_training; @@ -338,6 +358,11 @@ struct SpaceToBatchParams int32_t output_offset; }; +struct SpaceToDepthParams +{ + int32_t block_size; +}; + enum class Order { kColMajor, diff --git a/compute/cker/include/cker/Utils.h b/compute/cker/include/cker/Utils.h index b69d55c..2abb998 100644 --- a/compute/cker/include/cker/Utils.h +++ b/compute/cker/include/cker/Utils.h @@ -123,6 +123,68 @@ inline int CountLeadingZeros(uint32_t integer_input) return leading_zeros; } +inline void GetInvSqrtQuantizedMultiplierExp(int32_t input, int reverse_shift, + int32_t *output_inv_sqrt, int *output_shift) +{ + assert(input >= 0); + if (input <= 1) + { + // Handle the input value 1 separately to avoid overflow in that case + // in the general computation below (b/143972021). Also handle 0 as if it + // were a 1. 0 is an invalid input here (divide by zero) and 1 is a valid + // but rare/unrealistic input value. We can expect both to occur in some + // incompletely trained models, but probably not in fully trained models. + *output_inv_sqrt = std::numeric_limits::max(); + *output_shift = 0; + return; + } + assert(input > 1); + *output_shift = 11; + while (input >= (1 << 29)) + { + input /= 4; + ++*output_shift; + } + const unsigned max_left_shift_bits = CountLeadingZeros(static_cast(input)) - 1; + const unsigned max_left_shift_bit_pairs = max_left_shift_bits / 2; + const unsigned left_shift_bit_pairs = max_left_shift_bit_pairs - 1; + *output_shift -= left_shift_bit_pairs; + input <<= 2 * left_shift_bit_pairs; + assert(input >= (1 << 27)); + assert(input < (1 << 29)); + using gemmlowp::FixedPoint; + using gemmlowp::Rescale; + using gemmlowp::SaturatingRoundingMultiplyByPOT; + // Using 3 integer bits gives us enough room for the internal arithmetic in + // this Newton-Raphson iteration. + using F3 = FixedPoint; + using F0 = FixedPoint; + const F3 fixedpoint_input = F3::FromRaw(input >> 1); + const F3 fixedpoint_half_input = SaturatingRoundingMultiplyByPOT<-1>(fixedpoint_input); + const F3 fixedpoint_half_three = + GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F3, (1 << 28) + (1 << 27), 1.5); + // Newton-Raphson iteration + // Naive unoptimized starting guess: x = 1 + F3 x = F3::One(); + // Naive unoptimized number of iterations: 5 + for (int i = 0; i < 5; i++) + { + const F3 x3 = Rescale<3>(x * x * x); + x = Rescale<3>(fixedpoint_half_three * x - fixedpoint_half_input * x3); + } + const F0 fixedpoint_half_sqrt_2 = + GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F0, 1518500250, std::sqrt(2.) / 2.); + x = x * fixedpoint_half_sqrt_2; + *output_inv_sqrt = x.raw(); + if (*output_shift < 0) + { + *output_inv_sqrt <<= -*output_shift; + *output_shift = 0; + } + // Convert right shift (right is positive) to left shift. + *output_shift *= reverse_shift; +} + // Comment from tensorflow lite: // // DO NOT USE THIS STRUCT FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING diff --git a/compute/cker/include/cker/operation/BatchToSpaceND.h b/compute/cker/include/cker/operation/BatchToSpaceND.h new file mode 100644 index 0000000..e33b2fb --- /dev/null +++ b/compute/cker/include/cker/operation/BatchToSpaceND.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_BATCH_TO_SPACE_ND_H__ +#define __NNFW_CKER_BATCH_TO_SPACE_ND_H__ + +#include "cker/Shape.h" + +#define UNUSED(x) ((void)(x)) + +namespace nnfw +{ +namespace cker +{ + +// Helper methods for BatchToSpaceND. +// `spatial_index_dim` specifies post-crop offset index in this spatial +// dimension, i.e. spatial offset introduced by flattening batch to spatial +// dimension minus the crop size at beginning. `block_shape_dim` is the block +// size in current dimension. `input_dim` and `output_dim` are input and output +// size of BatchToSpaceND operation in current dimension. +// Output start index is inclusive and end index is exclusive. +inline void GetIndexRange(int spatial_index_dim, int block_shape_dim, int input_dim, int output_dim, + int *start_index, int *end_index) +{ + // (*start_index) * block_shape_dim is effectively rounded up to the next + // multiple of block_shape_dim by the integer division. + *start_index = std::max(0, (-spatial_index_dim + block_shape_dim - 1) / block_shape_dim); + // Similarly, (*end_index) * block_shape_dim is rounded up too (note that + // end_index is exclusive). + *end_index = + std::min(input_dim, (output_dim - spatial_index_dim + block_shape_dim - 1) / block_shape_dim); +} + +template +inline void BatchToSpaceND(const Shape &unextended_input1_shape, const T *input1_data, + const int32_t *block_shape_data, const int32_t *crops_data, + const Shape &unextended_output_shape, T *output_data) +{ + auto input_dim = unextended_input1_shape.DimensionsCount(); + auto output_dim = unextended_output_shape.DimensionsCount(); + + assert(input_dim == 3 || input_dim == 4); + assert(input_dim == output_dim); + + UNUSED(input_dim); + UNUSED(output_dim); + + // Extends the input/output shape from 3D to 4D if needed, NHC -> NH1C. + auto extend_shape = [](const Shape &shape) { + if (shape.DimensionsCount() == 4) + { + return shape; + } + Shape new_shape(4, 1); + new_shape.SetDim(0, shape.Dims(0)); + new_shape.SetDim(1, shape.Dims(1)); + new_shape.SetDim(3, shape.Dims(2)); + return new_shape; + }; + const Shape input1_shape = extend_shape(unextended_input1_shape); + const Shape output_shape = extend_shape(unextended_output_shape); + + const int32_t output_width = output_shape.Dims(2); + const int32_t output_height = output_shape.Dims(1); + const int32_t output_batch_size = output_shape.Dims(0); + + const int32_t depth = input1_shape.Dims(3); + const int32_t input_width = input1_shape.Dims(2); + const int32_t input_height = input1_shape.Dims(1); + const int32_t input_batch_size = input1_shape.Dims(0); + + const int32_t block_shape_height = block_shape_data[0]; + const int32_t block_shape_width = block_shape_data[1]; + + const int32_t crops_top = crops_data[0]; + const int32_t crops_left = crops_data[2]; + + for (int in_batch = 0; in_batch < input_batch_size; ++in_batch) + { + const int out_batch = in_batch % output_batch_size; + const int spatial_offset = in_batch / output_batch_size; + + int in_h_start = 0; + int in_h_end = 0; + // GetIndexRange ensures start and end indices are in [0, output_height). + GetIndexRange(spatial_offset / block_shape_width - crops_top, block_shape_height, input_height, + output_height, &in_h_start, &in_h_end); + + for (int in_h = in_h_start; in_h < in_h_end; ++in_h) + { + const int out_h = in_h * block_shape_height + spatial_offset / block_shape_width - crops_top; + assert(out_h >= 0); + assert(out_h < output_height); + + int in_w_start = 0; + int in_w_end = 0; + // GetIndexRange ensures start and end indices are in [0, output_width). + GetIndexRange(spatial_offset % block_shape_width - crops_left, block_shape_width, input_width, + output_width, &in_w_start, &in_w_end); + + for (int in_w = in_w_start; in_w < in_w_end; ++in_w) + { + const int out_w = + in_w * block_shape_width + spatial_offset % block_shape_width - crops_left; + assert(out_w >= 0); + assert(out_w < output_width); + T *out = output_data + Offset(output_shape, out_batch, out_h, out_w, 0); + const T *in = input1_data + Offset(input1_shape, in_batch, in_h, in_w, 0); + memcpy(out, in, depth * sizeof(T)); + } + } + } +} + +} // namespace cker +} // namespace nnfw + +#endif // __NNFW_CKER_BATCH_TO_SPACE_ND_H__ diff --git a/compute/cker/include/cker/operation/FullyConnected.h b/compute/cker/include/cker/operation/FullyConnected.h index 9bcf3fd..4280c9a 100644 --- a/compute/cker/include/cker/operation/FullyConnected.h +++ b/compute/cker/include/cker/operation/FullyConnected.h @@ -18,6 +18,7 @@ #ifndef __NNFW_CKER_FULLY_CONNECTED_H__ #define __NNFW_CKER_FULLY_CONNECTED_H__ +#include #include "cker/Shape.h" #include "cker/Types.h" #include "cker/Utils.h" @@ -78,8 +79,11 @@ inline void FullyConnected(const FullyConnectedParams ¶ms, const Shape &inpu MatrixBatchVectorMultiplyAccumulate(weights_data, num_units, input_size, input_data, batch_size, output_data, /*result_stride=*/1); - // Apply activation function - ApplyActivationToVector(output_data, batch_size * num_units, params.activation, output_data); + if (params.activation != FusedActivationFunctionType::kNone) + { + // Apply activation function + ApplyActivationToVector(output_data, batch_size * num_units, params.activation, output_data); + } } inline void FullyConnected(const FullyConnectedParams ¶ms, const Shape &input_shape, @@ -140,7 +144,7 @@ inline void FullyConnectedHybrid(const FullyConnectedParams ¶ms, const Shape const float *input_data, const Shape &filter_shape, const int8_t *filter_data, const Shape &, const float *bias_data, const Shape &output_shape, float *output_data, - FCTempArena &temp_arena) + FCTempArena &temp_arena, ruy::Context *ruy_context) { int total_input_size = input_shape.FlatSize(); const int input_size = filter_shape.Dims(1); @@ -186,19 +190,72 @@ inline void FullyConnectedHybrid(const FullyConnectedParams ¶ms, const Shape int32_t *scratch = temp_arena.accum_scratch.data(); MatrixBatchVectorMultiplyAccumulate(filter_data, num_units, input_size, quant_data, scaling_factors_ptr, batch_size, scratch, output_data, - /*result_stride=*/1); + /*result_stride=*/1, ruy_context); #else MatrixBatchVectorMultiplyAccumulate(filter_data, num_units, input_size, quant_data, scaling_factors_ptr, batch_size, output_data, /*result_stride=*/1); + UNUSED_RELEASE(ruy_context); UNUSED_RELEASE(output_shape); #endif // Apply activation function to floats. - ApplyActivationToVector(output_data, batch_size * num_units, params.activation, output_data); + if (params.activation != FusedActivationFunctionType::kNone) + { + // Apply activation function + ApplyActivationToVector(output_data, batch_size * num_units, params.activation, output_data); + } return; } +inline void FullyConnectedSparseWeight(const FullyConnectedParams ¶ms, const Shape &input_shape, + const float *input_data, const Shape &weights_shape, + const float *weights_data, const Shape &bias_shape, + const float *bias_data, const Shape &output_shape, + float *output_data, int w0_size, const uint16_t *w1_segments, + const uint16_t *w1_indices) +{ + UNUSED_RELEASE(params); + UNUSED_RELEASE(input_shape); + + assert(weights_shape.DimensionsCount() == 2); + assert(output_shape.DimensionsCount() == 2); + + const int output_dims_count = output_shape.DimensionsCount(); + const int weights_dims_count = weights_shape.DimensionsCount(); + const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1); + const int output_depth = + MatchingDim(weights_shape, weights_dims_count - 2, output_shape, output_dims_count - 1); + const int accum_depth = weights_shape.Dims(weights_dims_count - 1); + + UNUSED_RELEASE(bias_shape); + if (bias_data) + { + VectorBatchVectorAssign(bias_data, output_depth, batches, output_data); + } + else + { + ZeroVector(output_data, batches * output_depth); + } + for (int b = 0; b < batches; ++b) + { + for (int idx_0 = 0; idx_0 < w0_size; ++idx_0) + { + for (int pw1 = w1_segments[idx_0]; pw1 < w1_segments[idx_0 + 1]; ++pw1) + { + int idx_1 = w1_indices[pw1]; + output_data[b * output_depth + idx_0] += + weights_data[pw1] * input_data[b * accum_depth + idx_1]; + } + } + } + if (params.activation != FusedActivationFunctionType::kNone) + { + // Apply activation function + ApplyActivationToVector(output_data, batches * output_depth, params.activation, output_data); + } +} + } // namespace cker } // namespace nnfw diff --git a/compute/cker/include/cker/operation/Helper/PhiloxRandom.h b/compute/cker/include/cker/operation/Helper/PhiloxRandom.h new file mode 100644 index 0000000..8e8879c --- /dev/null +++ b/compute/cker/include/cker/operation/Helper/PhiloxRandom.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2015 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_CORE_LIB_RANDOM_PHILOX_RANDOM_H_ +#define TENSORFLOW_CORE_LIB_RANDOM_PHILOX_RANDOM_H_ + +#include + +#include "cker/Types.h" +#include "cker/Shape.h" +#include "cker/Utils.h" + +// Function qualifiers that need to work on both CPU and GPU. +#if defined(__CUDACC__) || defined(__HIPCC__) +// For nvcc. +#define PHILOX_DEVICE_FUNC __host__ __device__ +#define PHILOX_INLINE __inline__ +#else +// For non-nvcc. +#define PHILOX_DEVICE_FUNC +#define PHILOX_INLINE inline +#endif +#define PHILOX_DEVICE_INLINE PHILOX_DEVICE_FUNC PHILOX_INLINE + +#include + +namespace nnfw +{ +namespace cker +{ +namespace random +{ + +// A class that represents an inline array. It can be used on both CPU and GPU, +// and also trivially copyable between CPU and GPU. +// Arguments: +// T: the array element type; +// ElementCount: the fixed size of the array; +template class Array +{ +public: + static constexpr int kElementCount = ElementCount; + PHILOX_DEVICE_INLINE Array() + { + for (int i = 0; i < ElementCount; ++i) + { + data_[i] = T(0); + } + } + + PHILOX_DEVICE_INLINE const T &operator[](int index) const { return data_[index]; } + + PHILOX_DEVICE_INLINE T &operator[](int index) { return data_[index]; } + + size_t size() const { return ElementCount; } + +private: + T data_[ElementCount]; +}; + +// A class that encapsulates all the states for a random number generator using +// the philox_4x32_10 algorithm. Each invocation returns a 128-bit random bits +// in the form of four uint32. +// There are multiple variants of this algorithm, we picked the 4x32_10 version +// that is most suited for our applications. +// Since this class is meant to be copied between CPU to GPU, it maintains a +// value semantics. +// +// For example: To use this class and populate an array of 1024 randoms on CPU +// with two threads, +// +// void Fill(PhiloxRandom rnd, uint32* output, int start, int limit) { +// assert(start % 4 == 0); +// assert(limit % 4 == 0); +// rnd.Skip(start / 4); +// for (int i = start; i < limit; i += 4) { +// auto sample = rnd(); +// ... copy sample[0..3] to output[i..i+3] +// } +// } +// +// PhiloxRandom rng(seed); +// PhiloxRandom rng_copy = rng; +// rng.Skip(1000/4); +// +// ... schedule Fill(rng_copy, output, 0, 512) in thread 1; +// ... schedule Fill(rng_copy, output, 512, 1024) in thread 2; +// ... wait for thread 1 & 2 to finish executing Fill(). +// +// NOTE: +// 1. PhiloxRandom is trivially copyable. +// 2. PhiloxRandom is compilable by gcc and nvcc. +class PhiloxRandom +{ +public: + using ResultType = Array; + using ResultElementType = uint32_t; + // The number of elements that will be returned. + static constexpr int kResultElementCount = 4; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 10; + // The type for the 64-bit key stored in the form of two 32-bit uint + // that are used in the diffusion process. + using Key = Array; + + PHILOX_DEVICE_INLINE + PhiloxRandom() {} + + PHILOX_DEVICE_INLINE + explicit PhiloxRandom(uint64_t seed) + { + key_[0] = static_cast(seed); + key_[1] = static_cast(seed >> 32); + } + + PHILOX_DEVICE_INLINE + explicit PhiloxRandom(uint64_t seed_lo, uint64_t seed_hi) + { + key_[0] = static_cast(seed_lo); + key_[1] = static_cast(seed_lo >> 32); + counter_[2] = static_cast(seed_hi); + counter_[3] = static_cast(seed_hi >> 32); + } + + PHILOX_DEVICE_INLINE + PhiloxRandom(ResultType counter, Key key) : counter_(counter), key_(key) {} + + PHILOX_DEVICE_INLINE + ResultType const &counter() const { return counter_; } + + PHILOX_DEVICE_INLINE + Key const &key() const { return key_; } + + // Skip the specified number of samples of 128-bits in the current stream. + PHILOX_DEVICE_INLINE + void Skip(uint64_t count) + { + const uint32_t count_lo = static_cast(count); + uint32_t count_hi = static_cast(count >> 32); + + counter_[0] += count_lo; + if (counter_[0] < count_lo) + { + ++count_hi; + } + + counter_[1] += count_hi; + if (counter_[1] < count_hi) + { + if (++counter_[2] == 0) + { + ++counter_[3]; + } + } + } + + // Returns a group of four random numbers using the underlying Philox + // algorithm. + PHILOX_DEVICE_INLINE ResultType operator()() + { + ResultType counter = counter_; + Key key = key_; + + // Run the single rounds for ten times. Manually unrolling the loop + // for better performance. + counter = ComputeSingleRound(counter, key); + RaiseKey(&key); + counter = ComputeSingleRound(counter, key); + RaiseKey(&key); + counter = ComputeSingleRound(counter, key); + RaiseKey(&key); + counter = ComputeSingleRound(counter, key); + RaiseKey(&key); + counter = ComputeSingleRound(counter, key); + RaiseKey(&key); + counter = ComputeSingleRound(counter, key); + RaiseKey(&key); + counter = ComputeSingleRound(counter, key); + RaiseKey(&key); + counter = ComputeSingleRound(counter, key); + RaiseKey(&key); + counter = ComputeSingleRound(counter, key); + RaiseKey(&key); + counter = ComputeSingleRound(counter, key); + + SkipOne(); + + return counter; + } + +private: + // We use the same constants as recommended by the original paper. + static constexpr uint32_t kPhiloxW32A = 0x9E3779B9; + static constexpr uint32_t kPhiloxW32B = 0xBB67AE85; + static constexpr uint32_t kPhiloxM4x32A = 0xD2511F53; + static constexpr uint32_t kPhiloxM4x32B = 0xCD9E8D57; + + // Helper function to skip the next sample of 128-bits in the current stream. + PHILOX_DEVICE_INLINE void SkipOne() + { + if (++counter_[0] == 0) + { + if (++counter_[1] == 0) + { + if (++counter_[2] == 0) + { + ++counter_[3]; + } + } + } + } + + // Helper function to return the lower and higher 32-bits from two 32-bit + // integer multiplications. + PHILOX_DEVICE_INLINE + static void MultiplyHighLow(uint32_t a, uint32_t b, uint32_t *result_low, uint32_t *result_high) + { +#ifndef __CUDA_ARCH__ + const uint64_t product = static_cast(a) * b; + *result_low = static_cast(product); + *result_high = static_cast(product >> 32); +#else + *result_low = a * b; + *result_high = __umulhi(a, b); +#endif + } + + // Helper function for a single round of the underlying Philox algorithm. + PHILOX_DEVICE_INLINE static ResultType ComputeSingleRound(const ResultType &counter, + const Key &key) + { + uint32_t lo0; + uint32_t hi0; + MultiplyHighLow(kPhiloxM4x32A, counter[0], &lo0, &hi0); + + uint32_t lo1; + uint32_t hi1; + MultiplyHighLow(kPhiloxM4x32B, counter[2], &lo1, &hi1); + + ResultType result; + result[0] = hi1 ^ counter[1] ^ key[0]; + result[1] = lo1; + result[2] = hi0 ^ counter[3] ^ key[1]; + result[3] = lo0; + return result; + } + + PHILOX_DEVICE_INLINE void RaiseKey(Key *key) + { + (*key)[0] += kPhiloxW32A; + (*key)[1] += kPhiloxW32B; + } + +private: + ResultType counter_; + Key key_; +}; + +} // namespace random +} // namespace cker +} // namespace nnfw +#endif // TENSORFLOW_CORE_LIB_RANDOM_PHILOX_RANDOM_H_ diff --git a/compute/cker/include/cker/operation/Helper/RandomDistributions.h b/compute/cker/include/cker/operation/Helper/RandomDistributions.h new file mode 100644 index 0000000..baeafd7 --- /dev/null +++ b/compute/cker/include/cker/operation/Helper/RandomDistributions.h @@ -0,0 +1,778 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2015 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_HELPER_RANDOM_DISTRIBUTIONS_H__ +#define __NNFW_CKER_HELPER_RANDOM_DISTRIBUTIONS_H__ + +#include + +#include + +#include +#include + +#include "cker/Types.h" +#include "cker/Shape.h" +#include "cker/Utils.h" + +#include "cker/eigen/EigenSupport.h" +#include "cker/operation/Helper/PhiloxRandom.h" + +namespace nnfw +{ +namespace cker +{ +namespace random +{ + +// Helper function to convert a 16-bit integer to a half between [0..1). +PHILOX_DEVICE_INLINE Eigen::half Uint16ToHalf(uint16_t x); +// Helper function to convert a 16-bit integer to a bfloat16 between [0..1). +// PHILOX_DEVICE_INLINE bfloat16 Uint16ToGfloat16(uint16 x); +// Helper function to convert a 32-bit integer to a float between [0..1). +PHILOX_DEVICE_INLINE float Uint32ToFloat(uint32_t x); +// Helper function to convert two 32-bit integers to a double between [0..1). +PHILOX_DEVICE_INLINE double Uint64ToDouble(uint32_t x0, uint32_t x1); + +// Computes a + b. Requires that the result is representable in the destination +// type and that b is not maximal (i.e. b + 1 is not 0). Notably, the addend b +// need *not* be representable in that type. (The condition on b excludes the +// extremal case INT_MIN + UINT_MAX = INT_MAX, which this function cannot +// compute.) +template +PHILOX_DEVICE_INLINE Int SignedAdd(Int a, typename std::make_unsigned::type b) +{ + // Implementation note: both b_div_2 and b - b_div_2 are positive and + // representable as Int. + auto b_div_2 = b >> 1; + return a + static_cast(b_div_2) + static_cast(b - b_div_2); +} + +// A class that generates uniform distribution random numbers from the +// underlying random integer generator. +// Arguments: +// Generator: a generator type that returns a number of uint32 upon each +// invocation. It needs to define kResultElementCount for the +// sample count for each invocation, and ResultType for the +// actual returned sample type. +// RealType: the data type of the real numbers that will be returned by the +// distribution. This could be either float or double for now. +// This class is meant to be implemented through specialization. The default +// is not defined by design. +template class UniformDistribution; + +template class UniformDistribution +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = Generator::kResultElementCount; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 3; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = false; + typedef Array ResultType; + typedef Eigen::half ResultElementType; + + PHILOX_DEVICE_INLINE + ResultType operator()(Generator *gen) + { + typename Generator::ResultType sample = (*gen)(); + ResultType result; + for (int i = 0; i < kResultElementCount; ++i) + { + result[i] = Uint16ToHalf(sample[i]); // Truncate the upper 16 bits. + } + return result; + } +}; + +template class UniformDistribution +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = Generator::kResultElementCount; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 3; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = false; + typedef Array ResultType; + typedef float ResultElementType; + + PHILOX_DEVICE_INLINE + ResultType operator()(Generator *gen) + { + typename Generator::ResultType sample = (*gen)(); + ResultType result; + for (int i = 0; i < kResultElementCount; ++i) + { + result[i] = Uint32ToFloat(sample[i]); + } + return result; + } +}; + +template class UniformDistribution +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = Generator::kResultElementCount / 2; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 3; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = false; + typedef Array ResultType; + typedef double ResultElementType; + + PHILOX_DEVICE_INLINE + ResultType operator()(Generator *gen) + { + typename Generator::ResultType sample = (*gen)(); + ResultType result; + for (int i = 0; i < kResultElementCount; ++i) + { + result[i] = Uint64ToDouble(sample[2 * i], sample[2 * i + 1]); + } + return result; + } +}; + +template class UniformDistribution +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = Generator::kResultElementCount; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 3; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = false; + typedef Array ResultType; + typedef int32_t ResultElementType; + + // Must have lo < hi + UniformDistribution(int32_t lo, int32_t hi) + : lo_(lo), range_(static_cast(hi) - static_cast(lo)) + { + } + + PHILOX_DEVICE_INLINE + ResultType operator()(Generator *gen) + { + typename Generator::ResultType sample = (*gen)(); + ResultType result; + for (int i = 0; i < kResultElementCount; ++i) + { + result[i] = SignedAdd(lo_, sample[i] % range_); + } + return result; + } + +private: + // Note that lo_ is intentionally signed while range_ is intentionally + // unsigned. This is because hi - lo can overflow signed integers if + // lo < 0 < hi, but always fits in unsigned. + int32_t lo_; + int32_t range_; +}; + +template class UniformDistribution +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = Generator::kResultElementCount / 2; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 3; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = false; + typedef Array ResultType; + typedef int64_t ResultElementType; + + // Must have lo < hi + UniformDistribution(int64_t lo, int64_t hi) + : lo_(lo), range_(static_cast(hi) - static_cast(lo)) + { + } + + PHILOX_DEVICE_INLINE + ResultType operator()(Generator *gen) + { + typename Generator::ResultType sample = (*gen)(); + ResultType result; + for (int i = 0; i < kResultElementCount; ++i) + { + auto bits = sample[2 * i] | static_cast(sample[2 * i + 1]) << 32; + result[i] = SignedAdd(lo_, bits % range_); + } + return result; + } + +private: + // Note that lo_ is intentionally signed while range_ is intentionally + // unsigned. This is because hi - lo can overflow signed integers if + // lo < 0 < hi, but always fits in unsigned. + int64_t lo_; + uint64_t range_; +}; + +// Similar to `UniformDistribution`, except that instead of generating numbers +// in the range [low, high), it generates numbers covering the whole range of +// the integer type. +template class UniformFullIntDistribution; + +template class UniformFullIntDistribution32 +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = Generator::kResultElementCount; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 3; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = false; + typedef Array ResultType; + typedef IntType ResultElementType; + + PHILOX_DEVICE_INLINE + ResultType operator()(Generator *gen) + { + typename Generator::ResultType sample = (*gen)(); + ResultType result; + for (int i = 0; i < kResultElementCount; ++i) + { + result[i] = sample[i]; + } + return result; + } +}; + +template class UniformFullIntDistribution64 +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = Generator::kResultElementCount / 2; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 3; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = false; + typedef Array ResultType; + typedef IntType ResultElementType; + + PHILOX_DEVICE_INLINE + ResultType operator()(Generator *gen) + { + typename Generator::ResultType sample = (*gen)(); + ResultType result; + for (int i = 0; i < kResultElementCount; ++i) + { + result[i] = sample[2 * i] | static_cast(sample[2 * i + 1]) << 32; + } + return result; + } +}; + +template +class UniformFullIntDistribution + : public UniformFullIntDistribution32 +{ +}; +template +class UniformFullIntDistribution + : public UniformFullIntDistribution32 +{ +}; +template +class UniformFullIntDistribution + : public UniformFullIntDistribution64 +{ +}; +template +class UniformFullIntDistribution + : public UniformFullIntDistribution64 +{ +}; + +// A class that adapts the underlying native multiple samples to return a single +// sample at a time. +template class SingleSampleAdapter +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = 1; + // The number of elements that will be returned by the underlying generator. + static constexpr int kNativeElementCount = Generator::kResultElementCount; + typedef typename Generator::ResultElementType ResultType; + typedef typename Generator::ResultElementType ResultElementType; + + PHILOX_DEVICE_INLINE + explicit SingleSampleAdapter(Generator *gen) + : generator_(gen), used_result_index_(Generator::kResultElementCount) + { + } + + PHILOX_DEVICE_INLINE + ResultType operator()() + { + if (used_result_index_ == Generator::kResultElementCount) + { + unused_results_ = (*generator_)(); + used_result_index_ = 0; + } + + return unused_results_[used_result_index_++]; + } + + PHILOX_DEVICE_INLINE + void Skip(uint64_t num_skips) + { + if (!num_skips) + { + return; + } + int num_unused_results = kNativeElementCount - used_result_index_; + if (num_skips <= num_unused_results) + { + used_result_index_ += num_skips; + return; + } + num_skips -= num_unused_results; + used_result_index_ = kNativeElementCount; + SkipFromGenerator(num_skips / kNativeElementCount); + num_skips = num_skips % kNativeElementCount; + if (num_skips) + { + unused_results_ = (*generator_)(); + used_result_index_ = num_skips; + } + } + +private: + // This implementation iteratively skips over `num_skips` samples + // from `generator_`. There is an O(1) implementation for PhiloxRandom + // in random_distributions.cc. + PHILOX_DEVICE_INLINE + void SkipFromGenerator(uint64_t num_skips) + { + while (num_skips--) + { + (*generator_)(); + } + } + + Generator *generator_; + typename Generator::ResultType unused_results_; + int used_result_index_; +}; + +// A class that generates unit normal distribution random numbers from the +// underlying random integer generator. +// Arguments: +// Generator: a generator type that returns a number of uint32 upon each +// each invocation. It needs to define kResultElementCount for the +// sample count for each invocation, and ResultType for actual +// returned sample type. +// RealType: the data type of the real numbers that will be returned by the +// distribution. This could be either float or double for now. +// This class is meant to be implemented through specialization. The default +// is not defined by design. +template class NormalDistribution; + +PHILOX_DEVICE_INLINE +void BoxMullerFloat(uint32_t x0, uint32_t x1, float *f0, float *f1); + +PHILOX_DEVICE_INLINE +void BoxMullerDouble(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, double *d0, double *d1); + +// Exactly like the float version, except that we convert to half afterwards; +// since we don't have half-precision sin/cos even on GPUs, there's nothing to +// gain from working in half internally. +template class NormalDistribution +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = Generator::kResultElementCount; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 70; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = false; + typedef Array ResultType; + typedef Eigen::half ResultElementType; + + PHILOX_DEVICE_INLINE + ResultType operator()(Generator *gen) + { + typename Generator::ResultType sample = (*gen)(); + ResultType result; + for (int i = 0; i < kResultElementCount; i += 2) + { + float f[2]; + BoxMullerFloat(sample[i], sample[i + 1], &f[0], &f[1]); + result[i] = Eigen::half(f[0]); + result[i + 1] = Eigen::half(f[1]); + } + return result; + } +}; + +template class NormalDistribution +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = Generator::kResultElementCount; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 70; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = false; + typedef Array ResultType; + typedef float ResultElementType; + + PHILOX_DEVICE_INLINE + ResultType operator()(Generator *gen) + { + typename Generator::ResultType sample = (*gen)(); + ResultType result; + for (int i = 0; i < kResultElementCount; i += 2) + { + BoxMullerFloat(sample[i], sample[i + 1], &result[i], &result[i + 1]); + } + return result; + } +}; + +template class NormalDistribution +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = Generator::kResultElementCount / 2; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 70; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = false; + typedef Array ResultType; + typedef double ResultElementType; + + PHILOX_DEVICE_INLINE + ResultType operator()(Generator *gen) + { + typename Generator::ResultType sample = (*gen)(); + ResultType result; + for (int i = 0; i < kResultElementCount; i += 2) + { + const int i2 = 2 * i; + BoxMullerDouble(sample[i2], sample[i2 + 1], sample[i2 + 2], sample[i2 + 3], &result[i], + &result[i + 1]); + } + return result; + } +}; + +// A class that returns standard normal distribution between +// [-kTruncateValue, kTruncateValue]. +// Arguments: +// Generator: a generator type that returns a number of uint32 upon each +// each invocation. It needs to define kResultElementCount for the +// sample count for each invocation, and ResultType for actual +// returned sample type. +// RealType: the data type of the real numbers that will be returned by the +// distribution. This could be either float or double for now. +// This class is meant to be implemented through specialization. The default +// is not defined by design. +template class TruncatedNormalDistribution; + +// Exactly like the float version, except that we convert to half afterwards; +// since we don't have half-precision sin/cos even on GPUs, there's nothing to +// gain from working in half internally. +template +class TruncatedNormalDistribution +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = SingleSampleGenerator::kNativeElementCount; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 90; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = true; + // The threshold where the normal distribution is truncated. + const float kTruncateValue = 2.0f; + + typedef Array ResultType; + typedef Eigen::half ResultElementType; + + PHILOX_DEVICE_INLINE + ResultType operator()(SingleSampleGenerator *gen) + { + ResultType results; + int index = 0; + while (true) + { + // Repeatedly take samples from the normal distribution, until we have + // the desired number of elements that fall within the pre-defined cutoff + // threshold. + const uint32_t x0 = (*gen)(); + const uint32_t x1 = (*gen)(); + float f[2]; + BoxMullerFloat(x0, x1, &f[0], &f[1]); + + if (Eigen::numext::abs(f[0]) < kTruncateValue) + { + results[index++] = Eigen::half(f[0]); + if (index >= kResultElementCount) + { + return results; + } + } + if (Eigen::numext::abs(f[1]) < kTruncateValue) + { + results[index++] = Eigen::half(f[1]); + if (index >= kResultElementCount) + { + return results; + } + } + } + } +}; + +// Partial specialization for float. +template +class TruncatedNormalDistribution +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = SingleSampleGenerator::kNativeElementCount; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 90; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = true; + // The threshold where the normal distribution is truncated. + const float kTruncateValue = 2.0f; + + typedef Array ResultType; + typedef float ResultElementType; + + PHILOX_DEVICE_INLINE + ResultType operator()(SingleSampleGenerator *gen) + { + ResultType results; + int index = 0; + while (true) + { + // Repeatedly take samples from the normal distribution, until we have + // the desired number of elements that fall within the pre-defined cutoff + // threshold. + const uint32_t x0 = (*gen)(); + const uint32_t x1 = (*gen)(); + float f[2]; + BoxMullerFloat(x0, x1, &f[0], &f[1]); + + if (Eigen::numext::abs(f[0]) < kTruncateValue) + { + results[index++] = f[0]; + if (index >= kResultElementCount) + { + return results; + } + } + if (Eigen::numext::abs(f[1]) < kTruncateValue) + { + results[index++] = f[1]; + if (index >= kResultElementCount) + { + return results; + } + } + } + } +}; + +// Partial specialization for double. +template +class TruncatedNormalDistribution +{ +public: + // The number of elements that will be returned. + static constexpr int kResultElementCount = (SingleSampleGenerator::kNativeElementCount > 1) + ? SingleSampleGenerator::kNativeElementCount / 2 + : 1; + // Cost of generation of a single element (in cycles). + static constexpr int kElementCost = 90; + // Indicate that this distribution may take variable number of samples + // during the runtime. + static constexpr bool kVariableSamplesPerOutput = true; + typedef Array ResultType; + typedef double ResultElementType; + const double kTruncateValue = 2.0; + + PHILOX_DEVICE_INLINE + ResultType operator()(SingleSampleGenerator *gen) + { + ResultType results; + int index = 0; + while (1) + { + const uint32_t x0 = (*gen)(); + const uint32_t x1 = (*gen)(); + const uint32_t x2 = (*gen)(); + const uint32_t x3 = (*gen)(); + double d[2]; + BoxMullerDouble(x0, x1, x2, x3, &d[0], &d[1]); + + if (Eigen::numext::abs(d[0]) < kTruncateValue) + { + results[index++] = d[0]; + if (index >= kResultElementCount) + { + return results; + } + } + if (Eigen::numext::abs(d[1]) < kTruncateValue) + { + results[index++] = d[1]; + if (index >= kResultElementCount) + { + return results; + } + } + } + } +}; + +// Helper function to convert two 32-bit uniform integers to two floats +// under the unit normal distribution. +PHILOX_DEVICE_INLINE +void BoxMullerFloat(uint32_t x0, uint32_t x1, float *f0, float *f1) +{ + // This function implements the Box-Muller transform: + // http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform#Basic_form + // Do not send a really small number to log(). + // We cannot mark "epsilon" as "static const" because NVCC would complain + const float epsilon = 1.0e-7f; + float u1 = Uint32ToFloat(x0); + if (u1 < epsilon) + { + u1 = epsilon; + } + const float v1 = 2.0f * M_PI * Uint32ToFloat(x1); + const float u2 = Eigen::numext::sqrt(-2.0f * Eigen::numext::log(u1)); +#if defined(TENSORFLOW_USE_SYCL) || !defined(__linux__) + *f0 = Eigen::numext::sin(v1); + *f1 = Eigen::numext::cos(v1); +#else + sincosf(v1, f0, f1); +#endif + *f0 *= u2; + *f1 *= u2; +} + +// Helper function to convert four 32-bit uniform integers to two doubles +// under the unit normal distribution. +PHILOX_DEVICE_INLINE +void BoxMullerDouble(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, double *d0, double *d1) +{ + // This function implements the Box-Muller transform: + // http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform#Basic_form + // Do not send a really small number to log(). + // We cannot mark "epsilon" as "static const" because NVCC would complain + const double epsilon = 1.0e-7; + double u1 = Uint64ToDouble(x0, x1); + if (u1 < epsilon) + { + u1 = epsilon; + } + const double v1 = 2 * M_PI * Uint64ToDouble(x2, x3); + const double u2 = Eigen::numext::sqrt(-2.0 * Eigen::numext::log(u1)); +#if defined(TENSORFLOW_USE_SYCL) || !defined(__linux__) + *d0 = Eigen::numext::sin(v1); + *d1 = Eigen::numext::cos(v1); +#else + sincos(v1, d0, d1); +#endif + *d0 *= u2; + *d1 *= u2; +} + +// Helper function to convert an 16-bit integer to a half between [0..1). +PHILOX_DEVICE_INLINE Eigen::half Uint16ToHalf(uint16_t x) +{ + // IEEE754 halfs are formatted as follows (MSB first): + // sign(1) exponent(5) mantissa(10) + // Conceptually construct the following: + // sign == 0 + // exponent == 15 -- an excess 15 representation of a zero exponent + // mantissa == 10 random bits + const uint16_t man = x & 0x3ffu; // 10 bit mantissa + const uint16_t exp = static_cast(15); + const uint16_t val = (exp << 10) | man; + + Eigen::half result; + result.x = val; + return result - Eigen::half(1.0); +} + +// Helper function to convert an 32-bit integer to a float between [0..1). +PHILOX_DEVICE_INLINE float Uint32ToFloat(uint32_t x) +{ + // IEEE754 floats are formatted as follows (MSB first): + // sign(1) exponent(8) mantissa(23) + // Conceptually construct the following: + // sign == 0 + // exponent == 127 -- an excess 127 representation of a zero exponent + // mantissa == 23 random bits + const uint32_t man = x & 0x7fffffu; // 23 bit mantissa + const uint32_t exp = static_cast(127); + const uint32_t val = (exp << 23) | man; + + // Assumes that endian-ness is same for float and uint32. + float result; + memcpy(&result, &val, sizeof(val)); + return result - 1.0f; +} + +// Helper function to convert two 32-bit integers to a double between [0..1). +PHILOX_DEVICE_INLINE double Uint64ToDouble(uint32_t x0, uint32_t x1) +{ + // IEEE754 doubles are formatted as follows (MSB first): + // sign(1) exponent(11) mantissa(52) + // Conceptually construct the following: + // sign == 0 + // exponent == 1023 -- an excess 1023 representation of a zero exponent + // mantissa == 52 random bits + const uint32_t mhi = x0 & 0xfffffu; // upper 20 bits of mantissa + const uint32_t mlo = x1; // lower 32 bits of mantissa + const uint64_t man = (static_cast(mhi) << 32) | mlo; // mantissa + const uint64_t exp = static_cast(1023); + const uint64_t val = (exp << 52) | man; + // Assumes that endian-ness is same for double and uint64. + double result; + memcpy(&result, &val, sizeof(val)); + return result - 1.0; +} + +} // namespace random +} // namespace tensorflow +} + +#endif // __NNFW_CKER_HELPER_RANDOM_DISTRIBUTIONS_H__ diff --git a/compute/cker/include/cker/operation/Helper/RandomOp.h b/compute/cker/include/cker/operation/Helper/RandomOp.h new file mode 100644 index 0000000..7dc51fe --- /dev/null +++ b/compute/cker/include/cker/operation/Helper/RandomOp.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2015 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_HELPER_RANDOM_OP_H__ +#define __NNFW_CKER_HELPER_RANDOM_OP_H__ + +#include "cker/Types.h" +#include "cker/Shape.h" +#include "cker/Utils.h" + +#include "cker/operation/Helper/RandomDistributions.h" + +namespace nnfw +{ +namespace cker +{ + +namespace functor +{ + +template struct FillPhiloxRandom; + +typedef Eigen::ThreadPoolDevice CPUDevice; +// Declares the partially CPU-specialized functor struct. +// +// NOTE: Due to inlining done by the compiler, you may need to add +// explicit instantiation of the functor in random_op.cc. See example +// functor::FillPhiloxRandom. +template struct FillPhiloxRandom +{ + void operator()(random::PhiloxRandom gen, typename Distribution::ResultElementType *data, + int64_t size, Distribution dist); +}; + +} // namespace functor +} // namespace tensorflow +} +#endif // __NNFW_CKER_HELPER_RANDOM_OP_H__ diff --git a/compute/cker/include/cker/operation/Helper/RandomOpCpu.h b/compute/cker/include/cker/operation/Helper/RandomOpCpu.h new file mode 100644 index 0000000..85d2677 --- /dev/null +++ b/compute/cker/include/cker/operation/Helper/RandomOpCpu.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_HELPER_RANDOM_OP_CPU_H__ +#define __NNFW_CKER_HELPER_RANDOM_OP_CPU_H__ + +#define EIGEN_USE_THREADS + +#include +#include +#include + +#include "cker/Types.h" +#include "cker/Shape.h" +#include "cker/Utils.h" + +#include "cker/eigen/EigenSupport.h" + +#include "cker/operation/Helper/PhiloxRandom.h" +#include "cker/operation/Helper/RandomOp.h" +#include "cker/operation/Helper/RandomDistributions.h" + +#if EIGEN_COMP_GNUC && __cplusplus > 199711L +#define DISABLE_FLOAT_EQUALITY_WARNING \ + _Pragma("GCC diagnostic push") _Pragma("GCC diagnostic ignored \"-Wfloat-equal\"") +#define ENABLE_FLOAT_EQUALITY_WARNING _Pragma("GCC diagnostic pop") +#else +#define DISABLE_FLOAT_EQUALITY_WARNING +#define ENABLE_FLOAT_EQUALITY_WARNING +#endif + +namespace nnfw +{ +namespace cker +{ + +typedef Eigen::ThreadPoolDevice CPUDevice; + +namespace functor +{ +using random::PhiloxRandom; +using random::SingleSampleAdapter; + +// The default implementation of the functor, which should never be invoked +// But we still need to provide implementation for now for the linker to work, +// since we do not support all the distributions yet. +template struct FillPhiloxRandom +{ + typedef typename Distribution::ResultElementType T; + void operator()() {} +}; + +// A class to fill a specified range of random groups +template struct FillPhiloxRandomTask; + +// Specialization for distribution that takes a fixed number of samples for +// each output. +template struct FillPhiloxRandomTask +{ + typedef typename Distribution::ResultElementType T; + static void Run(random::PhiloxRandom gen, T *data, int64_t size, Distribution dist) + { + const int kGroupSize = Distribution::kResultElementCount; + gen.Skip(0); + int64_t offset = 0; + + // First fill all the full-size groups + int64_t limit_group_full = size / kGroupSize; + for (int64_t index = 0; index < limit_group_full; ++index) + { + auto samples = dist(&gen); + std::copy(&samples[0], &samples[0] + kGroupSize, data + offset); + offset += kGroupSize; + } + + int64_t remaining_size = size - limit_group_full * kGroupSize; + + // If there are any remaining elements that need to be filled, process them + if (remaining_size > 0) + { + auto samples = dist(&gen); + std::copy(&samples[0], &samples[0] + remaining_size, data + offset); + } + } +}; + +// Specialization for distribution that takes a variable number of samples for +// each output. This will be slower due to the generality. +template struct FillPhiloxRandomTask +{ + typedef typename Distribution::ResultElementType T; + static constexpr int64_t kReservedSamplesPerOutput = 256; + + static void Run(random::PhiloxRandom base_gen, T *data, int64_t size, Distribution dist) + { + const int kGroupSize = Distribution::kResultElementCount; + static const int kGeneratorSkipPerOutputGroup = + kGroupSize * kReservedSamplesPerOutput / PhiloxRandom::kResultElementCount; + + int64_t offset = 0; + + // First fill all the full-size groups + int64_t limit_group_full = size / kGroupSize; + int64_t group_index; + for (group_index = 0; group_index < limit_group_full; ++group_index) + { + // Reset the generator to the beginning of the output group region + // This is necessary if we want the results to be independent of order + // of work + PhiloxRandom gen = base_gen; + gen.Skip(group_index * kGeneratorSkipPerOutputGroup); + SingleSampleAdapter single_samples(&gen); + + auto samples = dist(&single_samples); + std::copy(&samples[0], &samples[0] + kGroupSize, data + offset); + offset += kGroupSize; + } + + int64_t remaining_size = size - limit_group_full * kGroupSize; + // If there are any remaining elements that need to be filled, process them + if (remaining_size > 0) + { + PhiloxRandom gen = base_gen; + gen.Skip(group_index * kGeneratorSkipPerOutputGroup); + SingleSampleAdapter single_samples(&gen); + + auto samples = dist(&single_samples); + std::copy(&samples[0], &samples[0] + remaining_size, data + offset); + } + } +}; + +// Partial specialization for CPU to fill the entire region with randoms +// It splits the work into several tasks and run them in parallel +template +void FillPhiloxRandom:: +operator()(random::PhiloxRandom gen, typename Distribution::ResultElementType *data, int64_t size, + Distribution dist) +{ + FillPhiloxRandomTask::Run(gen, data, size, + dist); +} + +} // namespace functor + +} // end namespace tensorflow +} + +#endif // __NNFW_CKER_HELPER_RANDOM_OP_CPU_H__ diff --git a/compute/cker/include/cker/operation/L2Normalize.h b/compute/cker/include/cker/operation/L2Normalize.h new file mode 100644 index 0000000..a0075c3 --- /dev/null +++ b/compute/cker/include/cker/operation/L2Normalize.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_L2NORMALIZE_H__ +#define __NNFW_CKER_L2NORMALIZE_H__ + +#include "cker/Shape.h" +#include "cker/Utils.h" +#include "cker/Types.h" + +namespace nnfw +{ +namespace cker +{ + +void L2NormalizeFloat32(const Shape &input_shape, const float *input_data, + const Shape &output_shape, float *output_data) +{ + float epsilon = 1e-6; + const int trailing_dim = input_shape.DimensionsCount() - 1; + const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); + const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); + for (int i = 0; i < outer_size; ++i) + { + float squared_l2_norm = 0; + for (int c = 0; c < depth; ++c) + { + const float val = input_data[c]; + squared_l2_norm += val * val; + } + float l2_norm = std::sqrt(squared_l2_norm); + l2_norm = std::max(l2_norm, epsilon); + for (int c = 0; c < depth; ++c) + { + *output_data = *input_data / l2_norm; + ++output_data; + ++input_data; + } + } +} + +void L2NormalizeQuant8(L2NormParams ¶ms, const Shape &input_shape, const uint8_t *input_data, + const Shape &output_shape, uint8_t *output_data) +{ + const int trailing_dim = input_shape.DimensionsCount() - 1; + const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); + const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); + const int32_t input_zero_point = params.input_zero_point; + + for (int i = 0; i < outer_size; ++i) + { + int32_t square_l2_norm = 0; + for (int c = 0; c < depth; c++) + { + // Note that input_data advances by depth in the second pass below. + int32_t diff = input_data[c] - input_zero_point; + square_l2_norm += diff * diff; + } + int32_t inv_l2norm_multiplier; + int inv_l2norm_shift; + GetInvSqrtQuantizedMultiplierExp(square_l2_norm, -1, &inv_l2norm_multiplier, &inv_l2norm_shift); + for (int c = 0; c < depth; c++) + { + int32_t diff = *input_data - input_zero_point; + int32_t rescaled_diff = MultiplyByQuantizedMultiplierSmallerThanOneExp( + 128 * diff, inv_l2norm_multiplier, inv_l2norm_shift); + int32_t unclamped_output_val = 128 + rescaled_diff; + int32_t output_val = std::min(static_cast(255), + std::max(static_cast(0), unclamped_output_val)); + *output_data = static_cast(output_val); + ++input_data; + ++output_data; + } + } +} + +} // namespace cker +} // namespace nnfw + +#endif // __NNFW_CKER_L2NORMALIZE_H__ diff --git a/compute/cker/include/cker/operation/Logistic.h b/compute/cker/include/cker/operation/Logistic.h index 7477858..3d3e59e 100644 --- a/compute/cker/include/cker/operation/Logistic.h +++ b/compute/cker/include/cker/operation/Logistic.h @@ -32,18 +32,9 @@ namespace cker inline void Logistic(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data) { -#ifdef __aarch64__ auto input_map = MapAsVector(input_data, input_shape); auto output_map = MapAsVector(output_data, output_shape); output_map.array() = input_map.array().unaryExpr(Eigen::internal::scalar_logistic_op()); -#else - // Note, this can be done using TANH: (1/2) + (1/2) * TANH(x/2) - const int size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < size; i++) - { - output_data[i] = 1.f / (1.f + std::exp(-input_data[i])); - } -#endif } } // namespace cker diff --git a/compute/cker/include/cker/operation/MatrixBandPart.h b/compute/cker/include/cker/operation/MatrixBandPart.h index 9f49c8f..5674ff3 100644 --- a/compute/cker/include/cker/operation/MatrixBandPart.h +++ b/compute/cker/include/cker/operation/MatrixBandPart.h @@ -32,10 +32,10 @@ void MatrixBandPart(const T num_lower_diags, const T num_upper_diags, const Shap { auto last_dim = input_shape.DimensionsCount() - 1; - T batch_num = 0; - for (int dim = 0; dim < last_dim - 2; dim++) + T batch_num = 1; + for (int dim = 0; dim < input_shape.DimensionsCount() - 2; dim++) { - batch_num += input_shape.Dims(dim); + batch_num *= input_shape.Dims(dim); } const T row_num = input_shape.Dims(last_dim - 1); diff --git a/compute/cker/include/cker/operation/Pad.h b/compute/cker/include/cker/operation/Pad.h index af432f3..4a2732d 100644 --- a/compute/cker/include/cker/operation/Pad.h +++ b/compute/cker/include/cker/operation/Pad.h @@ -26,9 +26,10 @@ namespace nnfw { namespace cker { +template inline void Pad(const int32_t *padding_data, int32_t pad_rank, const Shape &input_shape, - const float *input_data, const Shape &output_shape, float *output_data, - const float *constant_value_data) + const T *input_data, const Shape &output_shape, T *output_data, + const T *constant_value_data) { // Note, this is pad with mode=`CONSTANT`: it doesn't support `REFLECT` and `SYMMETRIC` // TODO: come up with more subtle solution that uses subtensors like arm compute @@ -38,7 +39,7 @@ inline void Pad(const int32_t *padding_data, int32_t pad_rank, const Shape &inpu /** List of padding information */ using PaddingList = std::vector; - auto constant_value = constant_value_data ? *constant_value_data : 0; + const T constant_value = constant_value_data ? *constant_value_data : 0; assert(output_shape.DimensionsCount() == input_shape.DimensionsCount()); PaddingList padding_list(pad_rank); @@ -64,7 +65,7 @@ inline void Pad(const int32_t *padding_data, int32_t pad_rank, const Shape &inpu { const int32_t in_row_len = input_shape.Dims(0); std::fill_n(output_data, padding_list[0].first, constant_value); - std::memcpy(output_data + padding_list[0].first, input_data, in_row_len * sizeof(float)); + std::memcpy(output_data + padding_list[0].first, input_data, in_row_len * sizeof(T)); std::fill_n(output_data + padding_list[0].first + in_row_len, padding_list[0].second, constant_value); break; @@ -89,7 +90,7 @@ inline void Pad(const int32_t *padding_data, int32_t pad_rank, const Shape &inpu out_offset += padding_list[1].first; // copy a row of input data - memcpy(output_data + out_offset, input_data + in_offset, in_row_len * sizeof(float)); + memcpy(output_data + out_offset, input_data + in_offset, in_row_len * sizeof(T)); out_offset += in_row_len; @@ -132,7 +133,7 @@ inline void Pad(const int32_t *padding_data, int32_t pad_rank, const Shape &inpu out_offset += padding_list[2].first; // copy a row of input data - memcpy(output_data + out_offset, input_data + in_offset, in_row_len * sizeof(float)); + memcpy(output_data + out_offset, input_data + in_offset, in_row_len * sizeof(T)); out_offset += in_row_len; @@ -191,7 +192,7 @@ inline void Pad(const int32_t *padding_data, int32_t pad_rank, const Shape &inpu out_c_offset += padding_list[3].first; // copy a row of input data - memcpy(output_data + out_c_offset, input_data + in_offset, in_row_len * sizeof(float)); + memcpy(output_data + out_c_offset, input_data + in_offset, in_row_len * sizeof(T)); out_c_offset += in_row_len; diff --git a/compute/cker/include/cker/operation/Quantize.h b/compute/cker/include/cker/operation/Quantize.h new file mode 100644 index 0000000..5c82d11 --- /dev/null +++ b/compute/cker/include/cker/operation/Quantize.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_QUANTIZE_H__ +#define __NNFW_CKER_QUANTIZE_H__ + +#include "cker/Shape.h" +#include "cker/Types.h" +#include "cker/Utils.h" +#include +#include +namespace nnfw +{ +namespace cker +{ +template +inline void Quantize(const Shape &input_shape, const InputT *input_data, const Shape &output_shape, + OutputT *output_data, const float output_scale, const int32_t output_offset) +{ + const int flat_size = MatchingFlatSize(input_shape, output_shape); + int min_val = std::numeric_limits::min(); + int max_val = std::numeric_limits::max(); + + for (int i = 0; i < flat_size; i++) + { + int32_t unclamped = static_cast(round(input_data[i] / output_scale)) + output_offset; + int32_t clamped = std::min(std::max(unclamped, min_val), max_val); + output_data[i] = clamped; + } +} +} // namespace cker +} // namespace nnfw + +#endif // __NNFW_CKER_QUANTIZE_H__ diff --git a/compute/cker/include/cker/operation/ReLU6.h b/compute/cker/include/cker/operation/ReLU6.h new file mode 100644 index 0000000..20df561 --- /dev/null +++ b/compute/cker/include/cker/operation/ReLU6.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2018 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_RELU6_H__ +#define __NNFW_CKER_RELU6_H__ + +#include "cker/Shape.h" +#include "cker/eigen/Utils.h" + +#include +#include + +namespace nnfw +{ +namespace cker +{ + +inline void ReLU6(const Shape &input_shape, const float *input_data, float *output_data) +{ + int size = input_shape.FlatSize(); + + for (int i = 0; i < size; ++i) + { + if (input_data[i] <= 0) + { + output_data[i] = 0; + } + else if (input_data[i] > 6.0) + { + output_data[i] = 6.0; + } + else + { + output_data[i] = input_data[i]; + } + } +} + +} // namespace cker +} // namespace nnfw + +#endif // __NNFW_CKER_RELU6_H__ diff --git a/compute/cker/include/cker/operation/Reduce.h b/compute/cker/include/cker/operation/Reduce.h index 4ba3652..cf9634a 100644 --- a/compute/cker/include/cker/operation/Reduce.h +++ b/compute/cker/include/cker/operation/Reduce.h @@ -159,6 +159,92 @@ public: num_resolved_axis, temp_index_data(), reducer, output_data); } + // Computes the mean of elements across dimensions given in axis. + // It does so in two stages, first calculates the sum of elements along the axis + // then divides it by the number of element in axis for quantized values. + template + inline bool QuantizedMeanOrSum(const T *input_data, int32_t input_zero_point, float input_scale, + const Shape &input_shape, T *output_data, + int32_t output_zero_point, float output_scale, + const Shape &output_shape, const std::vector &axes, + bool /*keep_dims*/, U *temp_sum, bool compute_sum, + U reducer(const U current, const T in)) + { + // Reset output data. + size_t num_outputs = 1; + for (int idx = 0; idx < output_shape.DimensionsCount(); ++idx) + { + size_t current = static_cast(output_shape.Dims(idx)); + // Overflow prevention. + if (num_outputs > std::numeric_limits::max() / current) + { + return false; + } + num_outputs *= current; + } + for (size_t idx = 0; idx < num_outputs; ++idx) + { + output_data[idx] = T(); + temp_sum[idx] = U(); + } + + // Resolve axis. + int num_resolved_axis = 0; + if (!ResolveAxis(input_shape.DimensionsCount(), axes, resolved_axis_data(), &num_resolved_axis)) + { + return false; + } + + if (!ReduceImpl(input_data, input_shape, output_shape, resolved_axis_data(), + num_resolved_axis, temp_index_data(), reducer, temp_sum)) + { + return false; + } + + // Calculate mean by dividing output_data by num of aggregated element. + U num_elements_in_axis = 1; + for (int idx = 0; idx < num_resolved_axis; ++idx) + { + size_t current = static_cast(input_shape.Dims(resolved_axis_data()[idx])); + // Overflow prevention. + if (current > static_cast(std::numeric_limits::max() / num_elements_in_axis)) + { + return false; + } + num_elements_in_axis *= current; + } + + if (num_elements_in_axis > 0) + { + const float scale = input_scale / output_scale; + if (compute_sum) + { + // TODO(b/116341117): Eliminate float and do this completely in 8bit. + const float bias = -input_zero_point * scale * num_elements_in_axis + 0.5f; + for (size_t idx = 0; idx < num_outputs; ++idx) + { + const U value = + static_cast(std::round(temp_sum[idx] * scale + bias)) + output_zero_point; + output_data[idx] = static_cast(value); + } + } + else + { + const float bias = -input_zero_point * scale + 0.5f; + for (size_t idx = 0; idx < num_outputs; ++idx) + { + float float_mean = + static_cast(temp_sum[idx]) / static_cast(num_elements_in_axis); + float result = std::min(std::round(float_mean * scale + bias) + output_zero_point, + static_cast(std::numeric_limits::max())); + result = std::max(result, static_cast(std::numeric_limits::min())); + output_data[idx] = static_cast(result); + } + } + } + return true; + } + inline int32_t *resolved_axis_data(void) { return _resolved_axis.size() ? _resolved_axis.data() : _resolved_axis_small; diff --git a/compute/cker/include/cker/operation/ResizeBilinear.h b/compute/cker/include/cker/operation/ResizeBilinear.h new file mode 100644 index 0000000..7fc1e91 --- /dev/null +++ b/compute/cker/include/cker/operation/ResizeBilinear.h @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_RESIZEBILINEAR_H__ +#define __NNFW_CKER_RESIZEBILINEAR_H__ + +#include "cker/Shape.h" +#include "cker/Types.h" +#include + +namespace nnfw +{ +namespace cker +{ + +inline void ResizeBilinearKernel2x2(int32_t x0, int32_t x1, int32_t y0, int32_t y1, int32_t x, + int32_t y, int32_t depth, int32_t batch, + const Shape &input_shape, const float *input_data, + const Shape &output_shape, float *output_data) +{ + const int32_t input_width = input_shape.Dims(2); + const int32_t output_width = output_shape.Dims(2); + + const int32_t input_x_offset = (x1 - x0) * depth; + const int32_t input_y_offset = (y1 - y0) * depth * input_width; + const int32_t output_x_offset = depth; + const int32_t output_y_offset = depth * output_width; + + for (int ch = 0; ch < depth; ch++) + { + const int32_t input_offset = Offset(input_shape, batch, y0, x0, ch); + + float x0y0 = input_data[input_offset]; + float x1y0 = input_data[input_offset + input_x_offset]; + float x0y1 = input_data[input_offset + input_y_offset]; + float x1y1 = input_data[input_offset + input_x_offset + input_y_offset]; + + // Top left corner. + const int32_t output_offset = Offset(output_shape, batch, y, x, ch); + output_data[output_offset] = x0y0; + + // Top right corner. + output_data[output_offset + output_x_offset] = (x0y0 + x1y0) / 2; + + // Bottom left corner. + float output = (x0y0 + x0y1) / 2; + output_data[output_offset + output_y_offset] = output; + + // Bottom right corner. + output_data[output_offset + output_x_offset + output_y_offset] = + (output + ((x1y0 + x1y1) / 2)) / 2; + } +} + +inline void ResizeBilinear2x2(int32_t batches, int32_t input_height, int32_t input_width, + int32_t depth, int32_t output_height, int32_t output_width, + const Shape &input_shape, const float *input_data, + const Shape &output_shape, float *output_data) +{ + for (int b = 0; b < batches; b++) + { + for (int y0 = 0, y = 0; y <= output_height - 2; y += 2, y0++) + { + for (int x0 = 0, x = 0; x <= output_width - 2; x += 2, x0++) + { + int32_t x1 = std::min(x0 + 1, input_width - 1); + int32_t y1 = std::min(y0 + 1, input_height - 1); + ResizeBilinearKernel2x2(x0, x1, y0, y1, x, y, depth, b, input_shape, input_data, + output_shape, output_data); + } + } + } +} + +inline void ResizeBilinearKernel(const float *input_ptr, int32_t depth, float scale, + float *output_ptr) +{ + for (int32_t i = 0; i < depth; i++) + { + *output_ptr += *input_ptr * scale; + output_ptr++; + input_ptr++; + } +} + +inline void ComputeInterpolationValues(const float value, const float scale, + const bool half_pixel_centers, int32_t input_size, + float *scaled_value, int32_t *lower_bound, + int32_t *upper_bound) +{ + if (half_pixel_centers) + { + *scaled_value = (value + 0.5f) * scale - 0.5f; + } + else + { + *scaled_value = value * scale; + } + float scaled_value_floor = std::floor(*scaled_value); + *lower_bound = std::max(static_cast(scaled_value_floor), static_cast(0)); + *upper_bound = std::min(static_cast(std::ceil(*scaled_value)), input_size - 1); +} + +inline void ResizeBilinearGeneric(int32_t batches, int32_t input_height, int32_t input_width, + int32_t depth, int32_t output_height, int32_t output_width, + float height_scale, float width_scale, const Shape &input_shape, + const float *input_data, float *output_data, + const bool half_pixel_centers) +{ + memset(output_data, 0, batches * output_height * output_width * depth * sizeof(float)); + + int32_t output_offset = 0; + for (int b = 0; b < batches; ++b) + { + for (int y = 0; y < output_height; ++y) + { + float input_y; + int32_t y0, y1; + ComputeInterpolationValues(y, height_scale, half_pixel_centers, input_height, &input_y, &y0, + &y1); + for (int x = 0; x < output_width; ++x) + { + float input_x; + int32_t x0, x1; + ComputeInterpolationValues(x, width_scale, half_pixel_centers, input_width, &input_x, &x0, + &x1); + float *output_ptr = &output_data[output_offset]; + + // Run kernel on the 4 corners of the bilinear resize algorithm. + int32_t input_offset = Offset(input_shape, b, y0, x0, 0); + float scale = (1 - (input_y - y0)) * (1 - (input_x - x0)); + const float *input_ptr = &input_data[input_offset]; + ResizeBilinearKernel(input_ptr, depth, scale, output_ptr); + + input_offset = Offset(input_shape, b, y0, x1, 0); + scale = (1 - (input_y - y0)) * (input_x - x0); + input_ptr = &input_data[input_offset]; + ResizeBilinearKernel(input_ptr, depth, scale, output_ptr); + + input_offset = Offset(input_shape, b, y1, x0, 0); + scale = (input_y - y0) * (1 - (input_x - x0)); + input_ptr = &input_data[input_offset]; + ResizeBilinearKernel(input_ptr, depth, scale, output_ptr); + + input_offset = Offset(input_shape, b, y1, x1, 0); + scale = (input_y - y0) * (input_x - x0); + input_ptr = &input_data[input_offset]; + ResizeBilinearKernel(input_ptr, depth, scale, output_ptr); + + output_offset += depth; + } + } + } +} + +template +inline void ResizeBilinearGenericSmallChannel(int32_t batches, int32_t input_height, + int32_t input_width, int32_t depth, + int32_t output_height, int32_t output_width, + float height_scale, float width_scale, + const Shape &input_shape, const T *input_data, + T *output_data, const bool half_pixel_centers) +{ + T *output_ptr = &output_data[0]; + for (int b = 0; b < batches; ++b) + { + for (int y = 0; y < output_height; ++y) + { + float input_y; + int32_t y0, y1; + ComputeInterpolationValues(y, height_scale, half_pixel_centers, input_height, &input_y, &y0, + &y1); + for (int x = 0; x < output_width; ++x) + { + float input_x; + int32_t x0, x1; + ComputeInterpolationValues(x, width_scale, half_pixel_centers, input_width, &input_x, &x0, + &x1); + + int32_t input_offset[4] = { + Offset(input_shape, b, y0, x0, 0), Offset(input_shape, b, y0, x1, 0), + Offset(input_shape, b, y1, x0, 0), Offset(input_shape, b, y1, x1, 0)}; + float scale[4] = {(1 - (input_y - y0)) * (1 - (input_x - x0)), + (1 - (input_y - y0)) * (input_x - x0), + (input_y - y0) * (1 - (input_x - x0)), (input_y - y0) * (input_x - x0)}; + + for (int d = 0; d < depth; d++) + { + const T *input_ptr = &input_data[d]; + *output_ptr++ = static_cast( + input_ptr[input_offset[0]] * scale[0] + input_ptr[input_offset[1]] * scale[1] + + input_ptr[input_offset[2]] * scale[2] + input_ptr[input_offset[3]] * scale[3]); + } + } + } + } +} + +void ResizeBilinear(ResizeBilinearParams ¶ms, const Shape &input_shape, const float *input_data, + const Shape &output_shape, float *output_data) +{ + int32_t batches = static_cast(MatchingDim(input_shape, 0, output_shape, 0)); + int32_t input_height = input_shape.Dims(1); + int32_t input_width = input_shape.Dims(2); + int32_t depth = static_cast(MatchingDim(input_shape, 3, output_shape, 3)); + + // Specialize for 2x2 upsample. + if (!params.align_corners && !params.half_pixel_centers && + params.output_height == 2 * input_height && params.output_width == 2 * input_width) + { + ResizeBilinear2x2(batches, input_height, input_width, depth, params.output_height, + params.output_width, input_shape, input_data, output_shape, output_data); + } + else + { + float height_scale = static_cast(input_height) / params.output_height; + float width_scale = static_cast(input_width) / params.output_width; + if (params.align_corners && params.output_height > 1) + { + height_scale = static_cast(input_height - 1) / (params.output_height - 1); + } + if (params.align_corners && params.output_width > 1) + { + width_scale = static_cast(input_width - 1) / (params.output_width - 1); + } + + ResizeBilinearGeneric(batches, input_height, input_width, depth, params.output_height, + params.output_width, height_scale, width_scale, input_shape, input_data, + output_data, params.half_pixel_centers); + } +} + +void ResizeBilinear(ResizeBilinearParams ¶ms, const Shape &input_shape, + const uint8_t *input_data, const Shape &output_shape, uint8_t *output_data) +{ + int32_t batches = MatchingDim(input_shape, 0, output_shape, 0); + int32_t input_height = input_shape.Dims(1); + int32_t input_width = input_shape.Dims(2); + int32_t depth = MatchingDim(input_shape, 3, output_shape, 3); + + float height_scale = (params.align_corners && params.output_height > 1) + ? (static_cast(input_height - 1) / (params.output_height - 1)) + : (static_cast(input_height) / params.output_height); + + float width_scale = (params.align_corners && params.output_width > 1) + ? (static_cast(input_width - 1) / (params.output_width - 1)) + : (static_cast(input_width) / params.output_width); + + ResizeBilinearGenericSmallChannel( + batches, input_height, input_width, depth, params.output_height, params.output_width, + height_scale, width_scale, input_shape, input_data, output_data, params.half_pixel_centers); +} +} // namespace cker +} // namespace nnfw + +#endif // __NNFW_CKER_RESIZEBILINEAR_H__ diff --git a/compute/cker/include/cker/operation/SpaceToDepth.h b/compute/cker/include/cker/operation/SpaceToDepth.h new file mode 100644 index 0000000..ef67931 --- /dev/null +++ b/compute/cker/include/cker/operation/SpaceToDepth.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_SPACE_TO_DEPTH_H__ +#define __NNFW_CKER_SPACE_TO_DEPTH_H__ + +#include "cker/Shape.h" +#include "cker/Types.h" + +namespace nnfw +{ +namespace cker +{ + +template +inline void SpaceToDepth(const SpaceToDepthParams ¶ms, const Shape &unextended_input_shape, + const T *input_data, const Shape &unextended_output_shape, T *output_data) +{ + assert(unextended_input_shape.DimensionsCount() <= 4); + assert(unextended_output_shape.DimensionsCount() <= 4); + const Shape input_shape = Shape::ExtendedShape(4, unextended_input_shape); + const Shape output_shape = Shape::ExtendedShape(4, unextended_output_shape); + + const int output_depth = output_shape.Dims(3); + const int output_width = output_shape.Dims(2); + const int output_height = output_shape.Dims(1); + + const int input_depth = input_shape.Dims(3); + const int batch_size = input_shape.Dims(0); + + // Number of continuous values that we can copy in one interation. + const int stride = params.block_size * input_depth; + + for (int batch = 0; batch < batch_size; ++batch) + { + for (int out_h = 0; out_h < output_height; ++out_h) + { + T *output_ptr = output_data + Offset(output_shape, batch, out_h, 0, 0); + for (int offset_h = 0; offset_h < params.block_size; ++offset_h) + { + T *dst = output_ptr; + for (int out_w = 0; out_w < output_width; ++out_w) + { + memcpy(dst, input_data, stride * sizeof(T)); + input_data += stride; + dst += output_depth; + } + output_ptr += stride; + } + } + } +} + +} // namespace cker +} // namespace nnfw + +#endif // __NNFW_CKER_SPACE_TO_DEPTH_H__ diff --git a/compute/cker/include/cker/operation/SplitV.h b/compute/cker/include/cker/operation/SplitV.h new file mode 100644 index 0000000..9e46f4b --- /dev/null +++ b/compute/cker/include/cker/operation/SplitV.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_SPLIT_V_H__ +#define __NNFW_CKER_SPLIT_V_H__ + +#include "cker/Shape.h" +#include "cker/Types.h" + +namespace nnfw +{ +namespace cker +{ + +template +void SplitV(const SplitVParams ¶ms, const Shape &input_shape, const Scalar *input_data, + std::vector &output_shapes, Scalar *const *output_data) +{ + const int split_dimensions = input_shape.DimensionsCount(); + int axis = params.axis < 0 ? params.axis + split_dimensions : params.axis; + int outputs_count = params.num_split; + + int64_t split_size = 0; + + for (int i = 0; i < outputs_count; i++) + { + // TFLITE_DCHECK_EQ(output_shapes[i]->DimensionsCount(), split_dimensions); + for (int j = 0; j < split_dimensions; j++) + { + if (j != axis) + { + MatchingDim(output_shapes[i], j, input_shape, j); + } + } + split_size += output_shapes[i].Dims(axis); + } + + int64_t outer_size = 1; + for (int i = 0; i < axis; ++i) + { + outer_size *= input_shape.Dims(i); + } + // For all output arrays, + // FlatSize() = outer_size * Dims(axis) * base_inner_size; + int64_t base_inner_size = 1; + for (int i = axis + 1; i < split_dimensions; ++i) + { + base_inner_size *= input_shape.Dims(i); + } + + const Scalar *input_ptr = input_data; + int copy_size = 0; + for (int k = 0; k < outer_size; k++) + { + for (int i = 0; i < outputs_count; ++i) + { + copy_size = output_shapes[i].Dims(axis) * base_inner_size; + memcpy(output_data[i] + k * copy_size, input_ptr, copy_size * sizeof(Scalar)); + input_ptr += copy_size; + } + } +} + +} // namespace cker +} // namespace nnfw + +#endif // __NNFW_CKER_SPLIT_V_H__ diff --git a/compute/cker/include/cker/operation/StatelessRandomUniform.h b/compute/cker/include/cker/operation/StatelessRandomUniform.h new file mode 100644 index 0000000..d5952ae --- /dev/null +++ b/compute/cker/include/cker/operation/StatelessRandomUniform.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_STATELESS_RANDOM_UNIFORM_H__ +#define __NNFW_CKER_STATELESS_RANDOM_UNIFORM_H__ + +#include "cker/Types.h" +#include "cker/Shape.h" +#include "cker/Utils.h" + +#include "cker/eigen/EigenSupport.h" + +#include "cker/operation/Helper/Tensor.h" +#include "cker/operation/Helper/PhiloxRandom.h" +#include "cker/operation/Helper/RandomOpCpu.h" +#include "cker/operation/Helper/RandomDistributions.h" + +namespace nnfw +{ +namespace cker +{ + +void GenerateKey(Tensor seed, random::PhiloxRandom::Key *out_key, + random::PhiloxRandom::ResultType *out_counter) +{ + // Grab the two seeds + uint32_t seed0; + uint32_t seed1; + + const auto seed_vals = seed.flat(); + + seed0 = seed_vals(0); + seed1 = seed_vals(1); + // Scramble the seeds so that the user doesn't need to worry about which + // part of the seed needs to be strong. + (*out_key)[0] = 0x3ec8f720; + (*out_key)[1] = 0x02461e29; + (*out_counter)[0] = static_cast(seed0); + (*out_counter)[1] = (*out_counter)[3] = 0; + (*out_counter)[2] = static_cast(seed1); + const auto mix = random::PhiloxRandom(*out_counter, *out_key)(); + (*out_key)[0] = mix[0]; + (*out_key)[1] = mix[1]; + (*out_counter)[0] = (*out_counter)[1] = 0; + (*out_counter)[2] = mix[2]; + (*out_counter)[3] = mix[3]; +} + +template +void Fill(random::PhiloxRandom random, Tensor *output) +{ + // Build distribution + typedef typename Distribution::ResultElementType T; + + auto flat = output->flat(); + // Reuse the compute kernels from the stateful random ops + functor::FillPhiloxRandom()(random, flat.data(), flat.size(), + Distribution()); +} + +inline void StatelessRandomUniform(const Shape &shape_shape, const int *shape_data, + const Shape &seed_shape, const int *seed_data, + const Shape &output_shape, float *output_data) +{ + Tensor shape_t; + Tensor seed_t; + + shape_t.shape.ReplaceWith(shape_shape.DimensionsCount(), shape_shape.DimsData()); + shape_t.buffer = (void *)shape_data; + + seed_t.shape.ReplaceWith(seed_shape.DimensionsCount(), seed_shape.DimsData()); + seed_t.buffer = (void *)seed_data; + + Tensor output_t; + output_t.shape.ReplaceWith(output_shape.DimensionsCount(), output_shape.DimsData()); + output_t.buffer = output_data; + + random::PhiloxRandom::Key key; + random::PhiloxRandom::ResultType counter; + + GenerateKey(seed_t, &key, &counter); + + Fill>( + random::PhiloxRandom(counter, key), &output_t); +} +} // namespace cker +} // namespace nnfw + +#endif // __NNFW_CKER_STATELESS_RANDOM_UNIFORM_H__ diff --git a/compute/cker/include/cker/ruy/RuySupport.h b/compute/cker/include/cker/ruy/RuySupport.h index 432b181..9612dd5 100644 --- a/compute/cker/include/cker/ruy/RuySupport.h +++ b/compute/cker/include/cker/ruy/RuySupport.h @@ -22,11 +22,6 @@ #include #include "cker/Types.h" -namespace -{ -const int kDefaultNumThreadpoolThreads = 4; -} - namespace nnfw { namespace cker @@ -34,42 +29,6 @@ namespace cker namespace ruy_support { -struct RuyContext -{ -public: - RuyContext() : ruy_context_(new ruy::Context) - { - SetMaxNumThreads(onert::util::getConfigInt(onert::util::config::RUY_THREADS)); -#ifdef USE_RUY_GEMV - ruy_context_->cache_policy = ruy::kCacheLHSOnNarrowMul; -#endif - }; - - ruy::Context *ruy_context() const { return ruy_context_.get(); } - - static inline RuyContext &GetRuyContext() - { - static thread_local RuyContext instance; - return instance; - } - - void SetMaxNumThreads(int max_num_threads) - { - const int target_num_threads = - max_num_threads > -1 ? max_num_threads : kDefaultNumThreadpoolThreads; - ruy_context_->max_num_threads = target_num_threads; - } - -private: - const std::unique_ptr ruy_context_; -}; - -inline ruy::Context *GetRuyContext() -{ - auto &ctx = RuyContext::GetRuyContext(); - return ctx.ruy_context(); -} - template void MakeRuyMatrix(const MatrixParams ¶ms, DataPointer data_ptr, ruy::Matrix *dst) diff --git a/docs/conf.py b/docs/conf.py index 3abe4f4..649b677 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -21,7 +21,7 @@ copyright = '2020, Samsung Research & contributors' author = 'Samsung Research & contributors' # The full version, including alpha/beta/rc tags -release = '1.7.0' +release = '1.8.0' # -- General configuration --------------------------------------------------- diff --git a/docs/howto/how-to-build-runtime.md b/docs/howto/how-to-build-runtime.md index 2bfd14c..f475119 100644 --- a/docs/howto/how-to-build-runtime.md +++ b/docs/howto/how-to-build-runtime.md @@ -13,7 +13,7 @@ In the Ubuntu, you can easily install it with the following command. ``` $ sudo apt-get install cmake libboost-all-dev -``` +``` If your linux system does not have the basic development configuration, you will need to install more packages. A list of all packages needed to configure the development environment can be found in the https://github.com/Samsung/ONE/blob/master/infra/docker/Dockerfile.1804 file. @@ -44,7 +44,7 @@ python3-venv \ scons \ software-properties-common \ unzip \ -wget +wget $ mkdir /tmp/gtest $ cd /tmp/gtest @@ -63,7 +63,7 @@ In a typical linux development environment, including Ubuntu, you can build the ``` $ git clone https://github.com/Samsung/ONE.git one $ cd one -$ cp -n Makefile.template Makefile; make install +$ make -f Makefile.template install ``` Unfortunately, the debug build on the x86_64 architecture currently has an error. To solve the problem, you must use gcc version 9 or higher. Another workaround is to do a release build rather than a debug build. This is not a suitable method for debugging during development, but it is enough to check the function of the runtime. To release build the runtime, add the environment variable `BUILD_TYPE=release` to the build command as follows. @@ -107,7 +107,7 @@ $ tree -L 3 ./Product/out │   │   ├── NeuralNetworksEx.h │   │   ├── NeuralNetworksExtensions.h │   │   ├── NeuralNetworks.h -│   │   ├── nnfw_dev.h +│   │   ├── nnfw_experimental.h │   │   └── nnfw.h │   └── onert │   ├── backend diff --git a/docs/howto/how-to-use-nnfw-api.md b/docs/howto/how-to-use-nnfw-api.md index 6c0fb49..1198a31 100644 --- a/docs/howto/how-to-use-nnfw-api.md +++ b/docs/howto/how-to-use-nnfw-api.md @@ -23,8 +23,8 @@ nnfw_load_model_from_file(session, nnpackage_path); ``` 3) (Optional) Assign a specific backend to operations ``` c - // Use acl_neon backend for CONV_2D and acl_cl for otherwise. - // Note that defalut backend is acl_cl + // Use 'acl_neon' backend for CONV_2D and 'cpu' for otherwise. + // Note that defalut backend is 'cpu'. nnfw_set_op_backend(session, "CONV_2D", "acl_neon"); ``` diff --git a/docs/nnfw/howto/CrossBuildForAndroid.md b/docs/nnfw/howto/CrossBuildForAndroid.md index d7e48c8..08d5fd6 100644 --- a/docs/nnfw/howto/CrossBuildForAndroid.md +++ b/docs/nnfw/howto/CrossBuildForAndroid.md @@ -44,11 +44,9 @@ Different from cross build for linux, Here is an example of using Makefile. ```bash -cp -n Makefile.template Makefile - TARGET_OS=android \ CROSS_BUILD=1 \ NDK_DIR=/path/android-tools/r20/ndk \ EXT_ACL_FOLDER=/path/arm_compute-v19.11.1-bin-android/lib/android-arm64-v8a-neon-cl \ -make install +make -f Makefile.template install ``` diff --git a/docs/overview/supported-operations.md b/docs/overview/supported-operations.md index 6120e24..1d9050a 100644 --- a/docs/overview/supported-operations.md +++ b/docs/overview/supported-operations.md @@ -106,7 +106,7 @@ SELECT_V2 | O |   |   SHAPE | O | O | O SIN | O | O | O SKIP_GRAM | O |   |   -SLICE | O | O |   +SLICE | O | O | O SOFTMAX | O | O | O SPACE_TO_BATCH_ND | O | O | O SPACE_TO_DEPTH | O | O | O diff --git a/docs/release/1.7/release-note-1.7.0.md b/docs/release/1.7/release-note-1.7.0.md deleted file mode 100644 index c1a4f50..0000000 --- a/docs/release/1.7/release-note-1.7.0.md +++ /dev/null @@ -1,46 +0,0 @@ -## Feature Highlights - -- **ONE** Compiler - - Compiler supports more operations - - New command line interface for user interface consistancy -- **ONE** Runtime - - Runtime CPU backend supports more operations - - Runtime CPU backend supports more quant8 operations - - API changes - - New optimization - -## ONE Compiler - -### Compiler supports more operations - -- MatrixDiag, MatrixSetDiag, ReverseSequence, ReverseV2, SegmentSum, SelectV2, SparseToDense, Where - -### New command line interface for user interface consistancy - -- one-import: imports conventional model files to circle - - one-import-tf: imports TensorFlow model to circle - - one-import-tflite: imports TensorFlow lite model to circle -- one-optimize: circle optimize command -- one-quantize: circle quantize command - - supports float32 to uint8, layer wise (for Conv series) -- one-pack: package command -- one-prepare-venv: prepares python virtual environment for importing TensorFlow model -- one-codegen: backend(if available) code generator - -## ONE Runtime - -### Runtime CPU backend supports more operations - -- LogSoftmax, SpaceToBatchND - -### Runtime CPU backend supports more quant8 operations - -- Logistic, Mul, Tanh, SpaceToBatchND, Transpose, Sub, Max, Min, Less, Greater, GreaterEqual, LessEqual, Equal, NotEqual - -### API changes - -- Introduce basic asynchronous execution API - -### New optimization - -- Remove dynamic tensor overhead from static models diff --git a/docs/release/1.8/release-note-1.8.0.md b/docs/release/1.8/release-note-1.8.0.md new file mode 100644 index 0000000..1cbbd0b --- /dev/null +++ b/docs/release/1.8/release-note-1.8.0.md @@ -0,0 +1,42 @@ +# Release Note 1.8.0 + +## Feature Highlights + +- **ONE** Compiler + - Support new command line interface + +- **ONE** Runtime + - CPU backend supports 7 more operations + - CPU backend supports 9 more quant8 operations + +## ONE Compiler + +### New command line interface for user interface consistancy + +- `one-import-bcq` : import BCQ(Binary coding quantized) TensorFlow model +- Commands now support `--version` option to show version number + +### Changes + +- Experimental support for TensorFlow 2.x has updated to 2.3.0 (TensorFlow 1.3.2 is our official support version) +- Support more operators in luci-interpreter +- Enhancing one-quantizer + +## ONE Runtime + +### Rename headers + +- Rename `nnfw_dev.h` to `nnfw_experimental.h` + +### Optimization + +- Remove copies for model input/outputs whenever possible + +### Support CPU backend operation + +- BatchToSpaceND, L2Normalization, ReLU6, ResizeBilinear, SpaceToDepth, SplitV, StatelessRandomUniform + +### Support CPU backend quant8 operation + +- BatchToSpaceND, L2Normalization, Pad, PadV2, ResizeBilinear, Slice, Quantize, SpaceToDepth, Sum + diff --git a/docs/runtime/api-layered-arch.png b/docs/runtime/api-layered-arch.png new file mode 100644 index 0000000000000000000000000000000000000000..86eda7513fdac6509a69f966cc3fa7e524630278 GIT binary patch literal 138968 zcmeFZby$>N*ET$qpeP|Kio_@)B_$volF}d`NGs9;(j8-i(jZ883W$J64k`kobV`bJ zcf-uPuK^i;&v!icbAQkK=X-k`$~ZIEjs)K&r>ZP_>IBsZ7z}nw?v~VT80lxBMdB5UOh2JtsbdMmoTG1#So& zKYc~P+&+xqiC=noQcMdE4H*~f6MFt*Qc~gHP@PVpo-Dlt9 z5qs5bKWDq zfpfao9!N<&g=v^}A%E=9!j$q#a<3Zi?V!;t{?vI9Hc4&aXRG{@2*HP? zo-V$~9!A7iddkFvTnFCZa#g@7`URtaTQpK}OGkj4Sdl|`g zOe<<1$scXvhBMBr8Lenjhm)oVGjelVh3=@{P(Uffb|AkgzWMf`;I@hIz>jb;&#n9S zj#+J?-Kf#J*SS-<1r$}&7Nrm&lPoXd?;5(_dH7{6;>Hh>Yok-P;eF zZWWWCE;J3-J#T!^FTRVQmoDMxRz=0JcmSm?HAQfcOoZsSN`?qeO6CTwswYwT)1 z%V}!tKVnjB_o>ReP?YPsPg0HU8-*zHODhwm`1P}YVwrv!9CyEDwjopB#2Ik*4A)oq zu^v1jGJI-Z;(7w{FO3n3H-^I)^w|GoEMIXvwq5qgG2Pc%yWPemoEVUb^ZZmTsFo#Nj5jC(qmut6Me z5?9jb*t?c(YQ)7wTyOKZVK~(lUk2Y(u5gN%)z{yjeuqbpNc0d-=%zp>?oRd1NBGou zjlX&LN!g#w{J#2$nAX>u6W_pBZISl1B-zs!nPjKhd{^Pd^`}2PiM>QxPkI7zISHTX zxcoEx;4?bMt$qCy?`KkHKc7xaNK$Wcn`G%YHSpwEBKhR0)eAC4C*GZ$slK|X*K@(N z`tu{6iu20P3WtUH=>~BlKl3i$`*d8C)bPc^m%T+|*ONFz7u2Lz$ysYIEZzJPB6Wp; z{8CPcMCzHWn^eJfQt?UyR_QrHRL)aBf1YIgj?*SeT=C@dfbX^MY9H2y8H6n>i?1>GGWd<;-q6S+e$80srsI?^6{^#Q z@BF?ovi_UO&ia#0KiuYIP@UCpwx#*aN5aG$pDBO4jF65r58DpESfo4t;4$szz~3sj zt`HH;98Y^H`Lyt zw@gxxwK23ww&`Af`%v8Z5(j+cyVZA-(UeivQJiH)|I^|HdDv4?@(*~1m zmPYpR&R5a5B2-IMXEG@>gRNg-@<^6s z%j5=)M%96Acg14awZiB?D#?JHk8-&;=ic-ZIx4Ci28U26>CFv%8D5BXU}6#$_t# zViZ-)`FDKgJgyIJU3a{p$aRhDAs;nw8}|ub9SaRx$Dt_m*@r*52Tl6#?S8K5xYG0> zMXvDs`>ZnNcAn;J^}VZ|4HRhV|0O)&8MLKD-1Fu0V>7jTc3)3_bSxUx zoiCV;e-bxxU6wP$R5}0bH~)JP#+tUgM!A+W_bUg>erXdnuCTU$tZz zWkZagjJymBQ4dv5Q_nB_;P7c|h|K0h{`8`_4pesRiBHJQsB6s6?<5en7R`MGZ8#JFEf6Y^Sua?#JqHV~A z+ybd+o@kC|uP6BsY)E2=U5o*F9%+xPLmfk9q1I3-I3zf%xV^Xqh|h>VJdd}6iZ==a z1*k^P6tPU+=Vah#`6Rq4_fq;LQ!4Knl2a!J$hSFv@+*>cQImXmJ1q2tPC82Ff$Z)F zSPA_8&2Nu0LOzDD(hDVrPWM7wzwbirJ^V#j+ z)LiPO;Gx(&&gQpS(^J!1Q}E22*oB>1XY_rhky`Wp_HU)H^4VMULJLNJ-rh=kmsS_{ zUd~SLMQGLgFL!z06+AGtH(6%tdr|pn@A1et=Hb_m21>v6n=#Kbe`EfdHJ>GPTh=wc z;`X3I6)7?nEy%{1%Q7eF?z)Xw^?Uv4b*-X=>Fsf5L?~SU5tjlVD_@STIrBBLb+dBQ zg`c!%30_%#3wAB|B%S=4L^|#^_moQ;f;uSHKQ`#c{YaW-&7WsSo!LvYt1Pw(h0zrW zcZ}}5*DA{IFf6qht7fUqp6+a!9_^g7I@vv%e?jNs`}N;~ZDp%Y$h?xfh83uR=bujH zZV7H#tq>xKH}k5*^Tah;%3H0CJ2#>hwuW4+KMWOlw3kRI4;NpKNp4H#i1`(hzh|@~ z`HPf7_x49eYp4D)my0$rM!L<;&2xmW&c0glCvct1{Ef8f%br0&eHyTxApu=ERC z+9i>;Hlt3}fy`cQ$1~Isv|cEOwq}vK$3ew(E~0~vzHCa3$@PB!ay8S!t=*hEzZA)k zl(wVMlC^%Pq$>=mjkfm(pangRK`U% zK7E^y&dm#=9zRyPw)(0{IEL;GmU{Ee+PJ7Fsu(UL528eshZZYed*m-n^wasgU%#|| zZY#La3_pT{Q*BEJxev31NXkt`Tp*{J5&~TR>wf936r9R!>X6D{J{e14ZO3-Fd zzh~9rRtxd)tEET+A10LAR>N$kpI9YomO6lr#+}VwWXW_zH=u-Yu+gJi^c>gvrq}q+ z66rHKUQycJ(cP>OhyDq(ksnpRk>Osv=-zF%&AzXGKmCqhQmBchA|TTk4nG4Mq=xxM z!(ek)VX4pB3s=L%+gH$M5a^ormGy9b+?G%0d>?64_ND*A*L2gMuYkQ=$9-Q}sfcF& zbH<~5N_*7Qk%u`h%Jo%KfU5(nYF%0JHJ^&2XRh?CN>cH@ov91nwV2b< ze#ZVYvR(|fiY8sI8GEt-z^uQCww$Sw5{w1BCW7JPP{9bmD;)4w6zAgMYZ)9S7#`+* zTo~+y1q}b-JIdfE^d}hng`U~}i5L1DMhO0Q8vJ!l#Qpp2<3x#ge_!KIg3n+#)g|TR zz)y8!dlM5I2XkA;Nc4Cmc;on^TX!8`uyYL1UmUsHR~EteJr)|;j@n9!g2uMi?Drnn z-Zx=)wSEMR0~2-?1TU>k9PhzhtsmMr2)c?~#(YB%yoTQ9xD3a9#nDRSvbK^cT+-Iw z1kTIO!_IkG^aLCZ7q)+3DtKE;=HPMgKatDkj*gE6IXGNgT-aT>*=_C3IJg7^1UNXa zb6mg92EM`O;AZ1^&y~%_fqs9GzvD=mI2hYoJaV+KwShz9-n(z>#LRgYJ15t5-Wx~fJo3@Oqrx1}-jB#)U(lF$ zfuu!G2y+~sn&=5p8!}UHkkl4ZDjMJ?P&4Qct|Rz|Y5ynmdba_0?`$UwCJvL6x~buc zGdrM^so(9!X+(H^5>C)}nkQWR$rVXS;@er&k(W>DWm>0(lKn}{x$^AHMTI;4z5^Qm zbLSbagwpfbz*FCh%J4i%4~>>+50a0VeN4Cx_wTsh`X*ibI{VY#2P^8!^JyC2Iu+xotNi- z-W*sk?$ds}e`{mMuEE94tHZ3!|E+K16O|JFPol+$|D?9KZ;LNucgw%^Z6Y#z(*NPW z_7C;^dtX=*j!Iq<`M*Bi{cn@OM`{0^@4-2Zz&YT>>q*l8OQqQ#6%H4#7x{O-CvH>? z&f%DF>2sNXrHTimii`UOd;B}!^R4zJz$ZH8YJV!=@1y_fF`&2`|IYXRqqrC;{YP=r0xwpFRQ%6LkSj zE90eemplYgiR$33aUmM;&rB}wKIAs?hTb#FWjKn!uR;3~I9rFsxhM60k3|}?)9-#J z^;T_LnP`q&9YXI7DQu_E?PaFYZB`S?mszx@hk7qQp}RH(B#R0~HRSI$NQnkUKIule z4ebnSdFvs)y{L7#D!0aCer(;+x+Yv5CLmt_qzFg(!zB#%MT8J(cfWp+d@2kJ?hu1S zL}b&A8dFS%l1EG(CnAfEALFrFhFeERPalZ?9J7&r;Vmy~iX&3^Jpi^hX#Ix>`|xKX zAnJ=I*P4Y_6y;Rnh3~c*Zl@WtOo?pttG5lI=7!j(x^i_Dwr7y283v#VL%O%oKV_?* z6m9WaJ=1H$S!?jzgq8H}{kN;j3Eo-#LpVT6=a|8x zA=|xI4>!k2kI464%SjtYZzZ6arxNylB{XI-bSkR@nOIn6zuMo5b}Ep`<+Wm^!~Ofh z5H28XbT9FvKU@aBI|s0l1og?AI${$_`3D;8Lag0g(N{x$WCujoZi8g&ALALx;2LhX zMP9XR+VTs#9tWh5%dMVtFm^Q+*h?4Dr2Ww`6z_sD!f$F~)8|BDTmd-KM0ty>murbH zp~b=tyQll>pUm)eQo^nYqc7+4KxQ`FlpAuGnZ4GA6XK8UzO)b&aU^XM6cG_T7`uq< zeVzp(4XdPJfN_vjnUmBtujHMr}i9P|Eq6L)aCcn1>bMXnr* zUED|y=ua*0kj$YxHidA#tU?m!MD(4jTwyo2e=7=RAtZna!ID!qRuu;&xSNr)P@a7f(9B!StHwVRV2 zXb~^Y3{?rv{zCTWDq{eLthLL9!dmp5g!uXkZMN|&1qmYl3_`#2f~jA!tbh?xQ~?-K9Qn6u$m4z&0B|-Y_x>2 z8paXS!($U9!KyZj?fp|8X? zVX}Al>k9>MG-{8c&IG{wD&UG1CwhqXg^ZEIWF!!(cVpxr_E62QKq^_K#&DuhA8e{L z%qkd$V1`1==~PRileenNOvS@2m=1S=PjpfONCuwX3dgQZ@x-WaVr;@Sw(heyq-|x_ zDYm(Y5uQG>7iEKJ*~I356*7|KMv3mNwvX*1@CdIqRIauuFd(h_KKhO5Te&1V@IClw zGoWLiuvx<>?>^+U5#cyc=EN61Js$+3w-BGZjFB4d*4{>(%ivuNt)fFufU&b~d_Y~D z!mj0{Dn?GQ7s4+uxdq>NX~FaI1hf_|9EolQe4=kOml(v2m~gke41HTvO}qH9Pm)dM z0P$2c#d>3dr}jVLqm3*$+%AEI&EH;v!8`)Fpdi2;ObM>FYEx(h#GHWy}EC83&b9IJr4-_ zYY0Z+7?qXvYkP9jBe?5{T-!HIX!Tk}R^55_lcV3mwmPJ|b%cnd(^f1u=F1n@mEuJ_ zZbY+N%vP){ekG0E{q~BIu_=MyE_pQa&6}%%E^HJdJyl*iQr#qUqI%0a8w&~i?2^9K ziv-I%RUk?6PX>Q2-j@M9WCyG}g9kj;JW)@~T z699E>*oYTKBoU8pc;QG$CfJQ&pB7J?0dA#CRPxBSjm~2BsHK*sFb@BHvzI!Uw}2BI zcwNF~nfuzbRDyDXsQ$8$^K2A9?c3sccu}Tv4(`+YdS*?r%wqHXrQ9SDtDgx0UMp-C z6|$?ax*!BVh|KDX>OcE?`qu16d&Dnt+JHTc#QUT${*5T-Gcj%vK~lc)bs3(!p>7Ui zB^urFP(uj5vefborg?hD)s0`;D0pAz;2FOXkEp?92v7rBr8jw4xtX(IOwvN6p-~G$ zT<%uZRk&NIK;O49wpFE$qa9i538aU)KqrtI4II)nTG( z;L&<|3}lGQL#Xu%CTK z-|J{S-lf{1rNzF8Y$Hv6yy4eul8(YMR;Z}o| zybhmOTCfk%suUc#Kuk9My=Lm_p`XUVg-{w#PZlrdUw4Y&B8L16U7=EB-TY0RNHf`? zE|%bh1)^#)xTF3@mg=jq_(V#4aCzSr9mCcxGe1#6|ing-`M$Lpp5j3W53K2*hc*0EbhOE==nAF`|}fuk@$TZC)}?2 z-4rXJ?%iMNU`EYi;mq$mt1VFFvM}gp@AOu`+&SWML6{Od(sroIezHD}uGLF}rUJ1r99` z7AMiKaOG0Yjgs+M-qfn7m&2vmH+@0Tdu2yW|2Jg%-u|b84^96y#w_a0+g@-1(|>Wd zt*-L~FnwIba~MOTPrNF0f~*Xc@x$KtH7v<9qn=_iNZfmf65f$e+OGar^x#;YLs&R! z#Zu!i2GCPtBAI=B6Uoq%q9)~lK&6|&QIW&X;Badq;2f2!w=KN-K3X%xK_au^yJb8P z_kzy*6Lo`7Q^j%v>)7UaV+><0dV3~6au+z9t4(zzHz0KXV@W+j8EA8<{h0ig-Ph-* z7y@XXIWG*-NXt_R^?L5~4Y6;Ti#fx#`;dEm_QKAt{&o+Cs=Sipdn_iZcIH{@7Cwyx zhRTQ~$3+DJyHL`jTM@}!Vh4J+UHGFlMfS(i;_JBQ_b1YXIk@unb~aR2`4(!<2$Ag} z(Hm&_(oaEOXXxp+ZVAu|or!~dUP=N*#1FE`xBGG_Jl>&yL@vjm{P2;wrTC#-@NLSN z0WcO$u{i-?ZKGRj&J=J2y1p$=-9%@|;f^1DiO5dFdE#8Ciyd$7tNxdFlXyo|-xw+d zjsN0v{yPuiGbXgs%~pGea+0>Wiz3R9$&kPFRn_y{nJbB;Gp>8nbMGo@Rb!UP;deFS zQ);SPMiT$82K8s#}yDXA!U=Nw=+PVYauu#t`5` z>$&)O*?9MRDL<{nY*Z0s`WkH&PgGaAyU{(yK@rOZ?QK#F-PeDKIbZXSi?erg!}U5{ zzkme5kjGV%&$#mXo0&yk~Rb-uj zTT+oRh?|e?G~UH6B*O$m)1-IGe=+eSFT`z0s)b zMwpbn#G(7^1YwQ=C9B?_Gl-@Wqul|pq$cHvz{C6sa8aj0l%aoX{`D{uIvssZTUBvGx@#=~s*u;;#xoYi0#FR7b8( z0DFES7!WHIhD#-6R(v$xh}3!7_`{E8a5YKwi)sN1OGZUa_}Jh8}_%-!dT{R=Z)x#h!| zbjMe9;Yjoevgr-aX^|rv+k>nCEx?0|730Eb$PCbV#MF~d0Ra#fbu2C_1gH}MQSp;_ z=W4}S_!aTV?Syv}@Q!(Jjc9QS#BLRbc&1})9*cLpG?PnrlWJJGfZZe{8P-SXCj*gFETqF!ZZ1`3R2z5F{H&?I%fLF^IUTyk0yo>62QJ(g@hOA4xe{`L$Ul z?Tfj6S&Zyed+U8ag4{A<`4ieR(EtgKX=uVA%#m9%$TDVIKKC)Smj8Z69HpUW0X2He z5H%L6>`?*1;57h)3sarYHT;E;xG0492izv&##CQ(G-;)Ap-P6l%d=mO;JY&h5BU%f zt=XJ#!g3NY1OgZ-RnJo@l0%h4peZ2rQpt(|iK>%>!K$SogkP&)5piZ&N&B`~Ju~mc z;T~oeXS#B={gVm-ky2Sb`Tw=N@C|rQk!AO~UDY-%e$tZM8(=ra<2>!QIsD>BR?iZn zbK=$)$|}`B^!x(KMs6kHN{&59(o_N=ax*4VtZAXQIA?crG9~DZBXHgf0p4Tl+0MOL zz2D77Y6H@$Fl?_FdPPh@aBfLvM@4k&L6(>h1T97q(7OO!@3FkKLTd!DHq|Lh)CJP8 zzEFl()gCpban@eooKytp;?;t}0fmFflti1;tFS5n=fns8UNTDB7|W6cV=O7-&j%Wa zg1AzHx=QTCiCGwwP8L({)t|kY5pWLVtqf&^pMm#lCFpwMT3kf zJu8bHYy33(F)+ktS_TjyY>0Z0dKfj}7SMN;In7XYQVXGH2P-|2cc&q;AcpXiD?65>Yth7HiTZOJJv(DJdwUjOBaGrkUARk(9R@yoYi%6>G|FT0 z6B$@5F-L{!^3M7k>r`ismO`1`NNuP=m1k)x>XLY(PIXy&Z|w!_-tmd9LG;Dtj$&+3 zG^)i!K(qdeX)R`uL9z_%fwm6Bb09b>31-lt57w+Mu5mP0;HNWzHTy?pAF%Eqp=&w2 zzvih7Z$)6nH>NHDe2%anrWSI3r%?(}*B#9v`yXs+yRSH^wA*^@GBp`aP@I1Yq*{Nq z{}I;9kWE$quiB&%LUJS{2RJ)Gh;Qqym>u!KP`>Ctvam0I(X+xxM zgJ=_WjL9^Nn=v4<#E)BtTjPg4e2GuwC;>-x^Z*yCwe2w05aH)-Jy2FWx==Qg7b?13 zPtTxVVlyBm0OXeqII3*x*aN;0lfPD928YTfp&x^t*Yq03L)JFtct-%!&pq8_0zNbc z2}CAC)F1pifS@fr!4vrp{e;A9`ZC87dq%G?!3mk^ZQ)`9va&ntb_ZpH$soW^`;5!0 z9Ff-f7eHEeKMU0kclufplhoDoyL0P-sLib@cmOp!08iKf6TmW(;CuuCi5SiOX&jr8 z|CGc$F1(!>unzW6%m*&GVAj^6gfkOu)AY*VC{8ng*nl_>dMq389wTNP(#meh6?krU zd&bJXe5y|YHR3bc3^@#z-~|mRdUPPg+LHtP)j{0A>T>Twqty0$M$Vjh82*!y;*#Zuyf%HvVfyW zp-9oio?+KMN6qF`?(7Wq-`I4g@n8d72IqFBZQYJqT zTBJmoKH0~xDWEoR;TzAzD~Bt^!4)|Twx9C7O%uB$CCuY|xw55OkQEu)<{pTRZ5i1CCZ1 zL8S$%3e}~xCbYT-cO{gz9k2jKCSc_eUa?sJ3?8olc|_wsoX$8LWBergEj26a8xW8>h>3lr~+iv^vusG?D65?NTgvYWK7D zXn6m7GMMjO10_pyH%eKotXFj)S$pOa{`GK&;~l;bX9YT%`{@j}jM_qqiIRKgpBWrF z?4#iTvekCr6nRcbJ7M3S+=s}4PFgNUP2i|35EmeJ?^0eic7fOAG0Kp>X*h4go;NJ!-SwW)!wj{mly)e#^HRfu|ZXI+fns4!FVDlz^My{T6r z1J2kA-%$XpPY@G3s^|wgPW^XMFd~5@?)%Pf2}&D=Vo5D(aGyTsiK&(Ft&YN|VbHJI z9_%4vMGUr3-2*u->)HSpc4(9@??NtsQKHG;S%L>R%`mFvFduj%n*ZZ*7=i!@QwLs^ z{(==~^Ct(P=OM?*sJ>pi2)u$Z87A%?!!!PGrI!c%jXfh-6ar9w1E5_!ZN1Q}xhe6i zrYXW}XFWpF7P;pPHB-EORnG?nc9}R2<$0Whl7179lyr|;Y1 zNZly`R6`mQ6%8HHMJm`?56u+w;-?BgjJxlOc`oB1SYEw-INxqZ3DzK64CXLu#e@dR z0kOFw(d(%B;oz=7&P&XSE#Q`TV#EEbF*}a^AhD}cV*{JkF2Y!c^oGijQ2S4fuigdA zZfw?3pg&EP`6252xG0Uw|8o1LXEW^qyLsjtsA)v3f}$a_1-_%_oRuN#7D$-itfoa8 zuuOfl?vsy33qaic>V^+`gCW2@rJt>&|F*ZDjtjPb{oS-e|%YN?NIi49vF za(9d)(h=lR47uAAP!CbYN82HTwy!TpBUd5fdJ0n}G3Oh%1gc)sMw)iOA{pX1n zLVlW9(<#uv$N)#8fI?|@HFOw+>4FqRe5$cHyX7^_Eeb~EaP@*hRn1a!M(5P58QVNY zXHNIEPOTqV5J7+D_=MjRR?rObbbYQ*D^JvZwEjeK#g0at?BLDGf;!q5@YG3nm&P#J zFZni%Nu5m&u5}g9C98t2dN5$hANu=zV5fIbrJ>KNtd9m!)k@sL>$oqQ`z{2aVb5{y z1ZMiJ9OQP6a^PeTdNW7?S+$mH>-51#P#S9u;+5jvVlEv)MxqQr5$o=<(u2F;5oNyN zAFo;C+1MM^S>nmsi_PHPPxkCX)jEa9RQtn{W-jAC{nHc-g)+dAL0v6M)2gQb3@Sz+ zarM#32EHv1Ck*GX9$@@Z%UUzd3a5G7b6C<<`38_Ck>NIK;FC2=NFW+|n;13uJ5K zF}82)hwohH+g0anE{*$irFLaBOy!LvS0F#QvHRy!IOJW*F#fBmuaeGo;l2pB0nXoq z={d_{lhA^ry?K{mTa<_ zLJ4hveLO+S_>jf)KO>sg$ zvQlJ&-ta7phSyOb^p*O)2Wkp!zjKN=o=EPqDbIa90=Q&`T+95}RZK47cn1?R%!EXj z1HZXnJ@1)#>NxYh=WpRdRK2s>ZfmY;M53@53J?QpotE(lR8Od z)o-YoE{mJrzR^EJ&6r;X3at{|()dJULXuFp23&B z$Fe&#6PAQ|MmXfSO3VvRu6f1z-xyBmMqs*U-Sabm6`5*j*RKr$ekTXZDyk9 zwa)}X4zgSS)1yl+7MJP9X132#j>UORJ4^Y`*i+5T16D3p&~15CDx~;h*u=u>RC^kV zM#x#QC=9L9YdImXaK?l%>9{FG@vO>Vgl2o}`iH>Uvdhoe0Kb^JZ*by6_}d@yrWC|> zZlJdn4vpT37Po6TB3vE~Okw-1vNMuO)uCJ;PN7`|o z(=Yjw?-CO6dKrbgB;K6~)8~q2_!WpJ<% zn{)Ws?Pi}iZ=v!1`m;M1A92t52BnYaTKgDJwknU#&<0CyrOyjIV(h(Zt9>v&!#G&4#RTKU8NN;Cr@3j|) zSVNo=|J_7i<6~HovjHH^1vuj(zbta1?2#1Yk;8{f()6OE*RYcNdn1Dklxw%DG4DmwXSV}5{PZ?jI!04gN- z^x)CePu>@y_?o4QmJ&EH9`QsgT(7&`t7!@PF@}3sHmijIXJ*T}5YPc}k!|=?$7?RC zX;$A#k$=Nn)ax5#K@cbv869X_yw^AdZae8C7*8QrCS_#U}9&>6?nCL#x#PS=+?IT^u#E>SM-aMit*}fq% z5!cksi*fh_9*x%cQKYS>rKOqZWQneJ>DoqwLAGQuuz9C%X!0h%479B9{28r|lCJ8w z%nWvYDDGl$%@4J_ggT(apPG z#damXCkVTJNcCAH^igd??*2etoALl{%c0vDeUO?NbmjUa$6{PM1QK`;Tv1+QJ_-v( zCm#b>RDKr#;3fqN+yMMP0htzw@+tE9XLYQrl}I>2C0t*aHq&OPs**bMM1_T*nC4hl zuW~#x+j|_+Ez=r1pyDSb0UqTX=L)*q6=J97t9t9!K}4@Q(5rQ8)Ebn#?CW>6Fr-(^ z#AKEeX*>uS+$EWg2w0LrBzuIn?XM4MsgReM0zAAznax01XxCmCo@yYihz>h=tNBA@ z-i<_N7UH`4ZV~vaWv(h>O7@zxAL(wgsp;KpP_=dfL>W-dYkL(Z?LweQS9v6a`?Vi<}SW1rJ}1&@y1Q*hTMwMsf4+KHKdH z>{Endo1cML4KZTv{@;n!+E+$@Ea%jx?+7|_B5>(-Stb3Mg1(X3fbiG~?$Zx1L8}-b zyfJ<35lb|$oF2UX-wJZlk%ZIVHwuCtY4w3L;Fna$8G6hWhtmGV9vJxGj@sP{y)!2s z-eEOx> z0};?D6Su%&@S<%+!5=c?j0C1tBzgxqRtHee#K`8(;6~N$c|5vJaXDkcL{~#}vEfd! zy)X(*BSiSkX)M*poukEhuYCtx!{1jWPVEDw{yfNZ44Vxb0s3}jZt;lk^cN4yt_CJL zplY6r!?(5VTFoSW%eMtRUeWNtIW-%*8G2uWp7c1nF7i$TLJY}$KhQ6A?8P|X2a&B}` zahcSf=3dMOi9~7Tq@g|)jxnakvK7i5I!Gj(>;W6ec@B~9HD_g#i3SzYPV;(z`cxE3 zZ;d4PAeh>quADKAS?V(uv+y%hw!5{q;r7U_7^FamOl8|Wq9T3tN#hQggEp&^YWwle z;pN6lE-W*qodZFWn}^V3@MSjcZ7R(Vib=)C`rq-A$@sQh53~OYl8*y=91kSHJk@e@ zzk~17T+q+aF|2Vr^xrvu{db(%-VwBT3-aIFp!Dj-tdtlcUP0~zqQ4*=LZU8+j^jnu zM&&{T&jM2sC=S_scIW9cc#Ve0${)Cb!Y<$RN3WfyX@kFgig*3vXD>G>muVyVXZw-c zeCdR|h*qASAA2~D-u=3`7qsq~)I%V5lq#(srSXEAAeHBakYfJt_S;)MXD8cNw;``H z6YoBq6&f$-WSJayz7E@U7qBGj;Y=276b6D%bq9erY9%33&{#I9+t8)I5#x zUjP7nvqrUhRaVCuaHxke=_E+y$)0^KVRh=0U$#9Qw0+H7|0V4kEXH*%{{kdoQ>fMJ zP1#Pp<@OrdBp}FkbP{|br~6dt zgU9L2GnBI>_M?K)%>!*Aj+SY}V&%yqC_qht#1{%shhxW`a~%oblSiW#Nzw096Sg}t z!M!$eyGAGMv@Z{9emabUWMC4Tpg zm~I$kx}y80`*&z~3o$0utvp{tBs7ILN5nV~jM+l9IKe*3CUlQEe{TO2!9xh%owHo&4W25OlYcyEo_TY>^Vg*3M5 zJbuV$WEAS?3<9K^pCoI#irAi`fDdSZ^s|ThPrus&X8ez<=J-+_9Y+QUjUbQO13;}Y z_yA<)H{$d*+gnrLJ-i)&K%CfjI3%k#xUK=79j7nan@V(ZFl1v+WsR)TFiEDCIa$Tl6kuIs)7^R>!K9SCizo3i4(KB;b1d zMl_+lN8}3{blRZMku*=U@j)tQGzsokw2(-!Hh+R8{iD>^vVUEwlDdOPa1Pk|)n8=$ zd%Sh9M_K#6^Pqd17?6REG=Oo&wGmZE{WLP^Q?LZ5u+`j6IDT{I0duob(gtb zO>behhF56=Bhv4_Xv+H0&Tk zm6rdJQF6l~(sOIMW@#7AAw;CKSPu#bml3<(pC%^;^)o;>^xB0sFIckMIG7ztqgu9)cqU2aLNs6 zfa6Wz=m)M5LFvkBIKKUrEk6oI`9=i=1q!xN%2jZa=f@V*pd21^=PJk37gTm}bpjs& zNfqFuvytz9=Uh*TtsyfR(yndQRy$uWrw|0O$vn(@=GQ&@MSjc-N$0oMid>XA;QQZ6 zANY4RhhZHs|GO)}NRFTn`%n_%p1fuY>g3%)VM3Dhcdj z^F0OhBuc4DF4J#6O+HujwDlaV53%QQCIPJ?nmUg>Y~$h8S~>86TrOYAVOtnWX23<-NdkKsYZ#{Ei5q?3~2 z_OFzzZqQQTeiho|{DdzJ;Ji$rt3MEhBhH5j(@!ulHeU^ndNXZZGH}PzANfn?tX;fe zO94D?y@^i1i21od$IF?D-<0rGWyMm*YTPB1LmiDvxSCCGQC6y1b3DyrCiq1yM=vMD zcOzdJWH|kjA37i%Z4Q8fv&Y9Za9vB3cyxk*>kUK&(gC*O;tToh2G;y=sNsyFE+@W_ zXW%ANd|f}d_nbBeb;1|TtHkCyXT)z-xq_B!ijkLg=%$5TWq~TtYwfq9iCh#_o76Ea z-ph^-f~tW$1%Q|h9Y^~-OIRphQLG2p-Vvzs0JK6W4tfE?P z_2O8?4)?;kj&jf+?$a+f=z#Qb?|@r`ahNuUp;D3=Rq{ndSHMB&KlxXSm^Pl-0gNev z*!6N-<-`TTJx=ED0r6l6_7fni#hk@J27{DpfLyaKp`527%dMKWEv_XNbSCjfI`#2L zlv}ursE~@FcUQ(jH+AzQ13onod*etrlUZ)-+*K&uJQWIE8qin?u=PwyUAjnWl~vwY z+!{33R}%SS8ui>MrYr(F&qxdj<&u@Zy-bYIM%RV_%}=H~Xzqny2wz>%Z(RiYS_&Wut0-S-MOM04HN% z22QNCGjv?+(mA2lI){`=Em}0x-kvk3wI1m1fLul+pKlAZ)^b3LiV*h}dyV$djG)`G z!;vsX;*AS~tvMXaH)sEcYM&n%-t=V>H+NLGtp=(3(fcTL=2pJ32bFVo`UB05@#|It zn#~AlHcpMh6YBT3&%!B&(6t<&P{0#B=y}5YfF3&>2ddfRgbwA z2^ArKMN{q;v=~y1Q?bXp_>2z>QkfrfvOop+m61vm(t7q2E{UF=de^5Z2F!Y#0aL0p z*lST1f{sHIScJ6LAuIUt0Igc@MF0j-Tph6)zY1>BP(;>Dv_g5)i0(&4R8}taW>^BA zv`MKRb@Kvp%k3wqcF4ExGBz!5G;DcHWQs+;8VMN}kA-joR9*_t$rf`y)BFCOG=I1MCO)-nw`czWs*kj20ssBb8X8RqH1tKYwPbVkb!i_|KpFy#?=j zH5Yn;^Vg3_$k|8nhN8mzz zi&`?HYd{+ZGSgje%8qU>>I?|K$ScB<(GJqM9}hQSepoHaK|E_ETfKHbfKl}z(D zhc@axID__hx;23HL2%v}vlI5AZERMzDII%rd-cz^R}6dKRlIMv6{GwLc7?AltcIa$ z?xF?-whh<3Y&!#QX(D^#(Wj+542!vM$L3FK#pWv*#|{?&Zp}|&-w>?sw?K7CZ5e=| zfJ`YSI`UOJuUXYIdp>|lXvm_*p&iR(fT+zQyS1kr-knay*A@7f={TF{1| zmG^3grQf(NN4Oc&ioBJL^rDW_wyIWk{*EJ7iZU-@Z(NhwTJ+oDFfg=3*6nR%_-tp4 zg^GBrLoDZ#DA@vIkjT0=vkM6SCO?F30WV&49Mi_u=UUq~1dQ0#tdiZ|`Ot_h4#JwU zhj$0m&8>1QaKVRBpSi@A}SC57#6`!lqOfM?A5H2%qJ2$-@b1 z5aFwR0bsa?LU;Q<7mP>(Z67C6G~+p6zLcEP0}S zX*0|OVmjBPQr!mvnOn_giwZ%d)t(9W^dHYvzLJZ8x+9-;8FVufI4cxAYOlG~gEJ1% zdeYO*(RZyq$Pu*U6H0jYAGA98mzN0XJl|7i_i?fKz`oN7+gDmOmRNH{1`R3JBSTa> z^P`5e484la97pSkA`R}##`G{VE~tj*2YD7zj)A*I0=|p}gdin)m3>FCP@zh+8+qS^ zFZSn}6ub*$T((S_Xyd$$Y5()Gp4F9)h9xxBpgy%0*8M!6>gP+ri!(##m)~_uw+8-Y z|2eG>BCADn&^9nOGTfy-b778`Bbo`aD;qs(Rns0Tpl(C+G#Rz)<={z5+k6S+VP~Wj zbk_DB?Mis`P4H7h3wlB5oPml>}`I_$4#(|SAj{H$aNCnzoe%%BN?DE3JKoIp3l`-YO zfwd+MnQFq`Z=b9p_PLshNWjkM+K;p65D*J=MO*V1T2R};cf%oj9dhruEeGl?WbV9XK zTa*T_NvrhE1`Q!gn#6mPX8f}7cBl(rx*Rkeg8RFFq_&_tGSopd52tazE)xc4qr&Ag z7FGJz?YAZ+Jl}u!A?BY?0jSCKOVYe(z|4KZoW;N)^UY~3q|UUij*dTjcu_;;JgO1- zxn=!{oHHQji|4-0@D7S8$MP2>OI9p+E{7#t8w1tX1|J9au8pCZ(eF_I&vT3;HtQ+M z?uL`i3}t>nNB(zJ&sABtPxD2Q{2gJcA@u}7r)<5`LuL~bq)Q^(I(Se2xXF6cs)N+C zs%)B4&7t(Xk0#YncOAbmVajggndOaPHN(F3KV`8XVswg-)_4B^m<_tEzxRF~Z76wR z!?F$phhqCYt1D!r(*#7;@r(YxBmmyu1;V(Cw8NOh_t&I1aG&Na>}qU$zUl+dW~FO7 zDAFRFG5hH2Okb@4Q9CZO$oT-M-#zeXP3~14D!$dO1ld*Syq$Q^wb5zc4jR=Ktm;5+ zKlvOY)!Z#P_h@NV}Fd^u_eem9UsaudTim-FrH zX_Zx%Tnp%EiB2{kmx6a9Dre?1^hCW^F3rE{xxfMnq>q}BH_3$-MrT%L3a55uJbtCk zZ+Cvw+7Kn%yK5c2qd8XGyR8p7XlaZ)H`G8Dm46W5_2H0jbk{#KcTiCS?SBU;ckKFZ zCiKe#E(Iee0!Ew?WJh);q4os5(?ZiVYaw^M5&-WqQzaI(y}U7LC!DdhGre0uU*+Qi zcC?blI-WEAy*Xofqe;zTUW?i`Ca){sS_p%>XAVwKpRW1c4x_kv*BAPHF%I9}D);)!gx@5BS?#NC@laPi5pWfC%<>+hyT155mb#g=fu-64+?YH^1A zAC*>i!eL1tAA`}4UKTk!GxR7^C*#|HS1(9J<1WQAcq8~^G&bCL;_<+Rtu(-0`U}ZI zsmm6Wx~!aocuH`(Tc14!9$(j@6KU~pU{kWn~O;gH?qz8C!x4PtPx8rDthPY%1x*MJkGM!3T%=51avLDv&Oppn< z!PN1wQV-S0H*_smV@@NYyrqL{$Oj6j;w}`Zxg<@XCu} z@?;CEQ(McRTz1-8u@fCycX$-xlgGEr)l@!TmO~A=vlsGZAoYZ(1G`$`SP>8Bgh7|8e(^F0?V^Vjt&T+Q^kk}SX)cjzx`KUz`My3qplQdwpLxyZ_dYDV0% zXTSM@%SMk`FS{hZ+69l70@WXc2_x+3orU{A_z+M0=}h44QitwkZ^*2b6sA-wh%wA? zx@vMhY`s*OOxSli{*75z6b(r)d3Mnp2_aCC5JHR%9@;YBuT@Juu7Y z2`3o#npYy>q-j)K#vwL|>QYoq88(PCJG^##CwJ<{^FQoYJJQ{5L6)}?#dKg9fBMRh z&9>axC}l?)(;r%}T)EjTgw7OQ#oQ8{PX|7C%`b8$f7d7Ub#}ZvU`yBPj6wa0I!}GO z0A$p3Eq+jVP%3$>J0CH8zjB*y`z#bK;g@z9;0Zg@b_~D9-NC`xAM@J3<;uJ->GrWB z$gG(p;dYq&%G{63_E6{A%h`Yr@=}3$r*xnE0U8wnKXxo0jNtRKSON8jpqBNxX>bN$ z!^MdVKdZoU&|Aw~yGH8u;?-XiYPPB(@{_0eU-nm4JC!RYjLsEZ>WGdnBSY*6;-+u1 zASN^riXjKJEs!lT9g5I~%xq&*eeCpO2 zl5(*dzVFoQV4_%($eYPC!}&v>|Ct+6Hp#+UpI_$bqsDnP#R|P5j$y;z#4Rao4 z<=M^?dKUM0kr>HZ6&j$b%oG?@g;o7S3}z+Ao6z4L2Ng(=w_A-X-S_7DXtBSmD4w&@ z3B!56tUrTcwy?*+SbIqg-u2XAS!x1n+>0Xg7Izf~6mr;kWx3h<@t>>Gw?p>pD;&2o z&dEp^zuci^itbzhP!4Yvp4cQF4HGx7+@E%_ymL@E$1muy?K!B6R)Lzm3OyM4CPK4o z=kwYVqo8BM)8=+JfJ2e>fT|C2OiIK#k1kgZM62j0p=Ssu8q|6`)DkOqMH4bRv1R?` z*q6E%S~yNSt3d8xO{ij9{LH=F*_@fX7iaLZGUkhEPjOqZEAs^0{rkY1EqakZiCS)! zW7IbFIQ!~#P+<@V9J|c}6eH*K;aBk)Jqeuwno>v3TEgg-=XK`;>r=ye*U7tH5mPP< zx%>u{(gENqkPhWcaBiIr$yrsA3fc;C3d7+I7E1=7G?sErO=b)9C!{8{XCE1ee<`ji z9(E^u$!|Ca&r4q&QUDkJT6w15&z3^d_V~{fij@G0@zGqGng)t-V_4nuMU`2~Ojp|99-cl!tK8|GwkEQGg*x4OVEmTx;KK{jijQe4CNW zlwu^49p5E}hNL-Xn-%ms>R8G_*KTz^XeRExsLxOO8K$w)KZPs-;ki4W=x2W(bl&%I5k(JXmqhuH2$3Tj{iE=_CX` z848ph(j?#Zm^AEI`|dRiDM(L>_A#3x8?APE^dC3i$MK~pnS0+>8Nb7k@Ho#&U3C+3>EuGMiv+x`Srx=pCX+e?REd}A z&awju%s~!JYQV!hT=qkC{WR;-4oZ+d*t2eUM3k1SEJAm(x_%Epeexp$Pu71& zE!604yvU%Bgy|Y8VpaMuu9ageYc;83%Bt;GVn-Oa>azk$6^wz1mmFjb78M(Iux?e+ zKN$^_ofm~Fh{YdcOYu3}k`wEJgW6BaounexnW3$QuIpE4!_kc1v zslgUg;-%7G(%sh?phCWEnp;%AmDO`Aec6hSau+C9@mywUSqfPK0mPRV%vQ_Lo)aT* zrv)ex;{^p*M=w8@3i@U?e`^AVf}4*;Oc(BieI!OT>oa84a;EO?-iwy~;=!1nju!uf z%N^4;Q_#dxnFCdd0@udU46$oY`|!D{IB~OMaA&BzYr5t;NH;_u;;`G*9p0tU;bMyU zP?iA1d8J|E6ftTLdK6OAg8u|MPQpT5^PF*4BGi{TT29faIg3GAM2B+c{h26N)D>8# z!|t#AJE+)Y{T0%?VhhJ+^~@#eRP_mU;KIU&fi#DEnUq)kYm*YZ=k-F9B4my|guUcy z_q@{SaLe-BEI@v!jDj$Y;Qtikc_G|KX6B~&8t4$<~_s$P_Z5a)w)h)2TGUWu4MoSDd znL%|6W$V0P@=%)i>8wN!0~$~nohO3!op_gk#l6)dHmM1G!bRpS>k z>#p>5_jvhYcl~a3@Bs@CjOTQGJmfX&MM)rW^8@}IIDl&u!H~pR;kBO8}1h@TX`fiHL!!pyH%PPEAG-SL0Y? z!dH)b@f_o{SWO#~zMBawp)tBGv&21%s}^e?EFK=43EVS(n+(;nx!D4{!!_f@^qGL6 zaG{GNfB$F49UWc?B9FU@`nQUX-Gi=k2a39`=(He~iMM$4*F@AjrngeqpA>I3 zo}be!hpk;%es_ySx9|n9nKL{QNQFB5dn1YSa`~j3mC9Z(#`p{`0`kvmR(YVtT&p?g zq-f{2d_@#tMxyUr8C0jV=VQ{9ndoHwBnh`NTZy)%KCUEboKLm*qC|uK#9w>xy(<{c z310_ukHc5fCOLU#0{3*H&Zlj0E}eU8$WhuU9lb zWq8d@mrIxf(~^@|mS{Ijv1=x6AYE)9uuO^=?1lowjW0E)GGCcYs09sqx`HULOraIm zqQjoMU7r2&h73Pd74D7ry|H+hc~M}?WQU=Jm}-W(cD_Ixh{kD@t#LQ#&&xq&#MRVt z^G2DvPV5@FD#P=Aw`rtzADyoPAJ_EC9W27vrm0d=$J|9t0m_x+V%C=P#x7JTwPc^+b3f_HgspQB}yk3_4jEa6ab z{H(lZ^xBNDczCSosUXj!#rPGBgbXjJ@|+W3-17lnu-|45Uh#cZutA*}bP=HyhXN}c z_67d1>FS&Nmm=@n%P=~&LouM5SMV=-Qzj6$v%E&UrHic5=>mfIpuqpLrY0Hl5dAY+ zBAJ1yO6Wn(V-({Kx|;@nx!nhS!W07VZ&j8E>-OkxX?*)bDp-HV_jw;Z4uTK0m%)tu z;H_)Wxnq=a{}Bmlrc9Q6p#9M14LivKWhDiuS;?15Yq!nTj{Mo-a&cq~$I!nZ{qs=E z5QO~o`i-~_tS5>9b!Y7=a+3DCunfiXb5J$WAcuX9<8aiCWGDjn9cX(=*J`^XUP`_* zAB7qZ@4m6OOHD3lMkwg=F4bfMQiwve5sHw}_80@Tf$jM?p=wzd29j-VB*~=ZOX|=2 z?_V8B>R7OOlEKAmPfS-4#Iie8MTtBPdl~lg) zOo*a?>Zf|m4mf}m{$2^<`sD)bxxJ%PSo`J$6RIjO{V=Ut;3$Tit%K68HS`G(S@}a7Su{&kij?nc3 zznsLJ`!iw-H!!8&1aKt{GBAlv;e>-@X}JKyH(t@3b$&(^!w1-7phyoC5FiUqB5~r? zmL`ZIaiQZ(!5Ldp*5aE&_m9&t-b>JaCZp@eAtxx7=kOHda%=P`Ew0){Ct@`z1cEWIc!esW!%MchhO9AHXQCRB>x>! z?!v75cs;NLn{88Hme*$i=F3C&`qT_n6%d|jRnSG$96+uP z1TKCdlq^=A!S}r+zFW?zn6zhA4Rlm{?$h60I<#!vDC0ctvnSWWi!tQ_(+U7~A1p5g z(QT6hiziYCg$)%fT66C!F#t@a`tStz@ccN}<`0d(fV4KL*DSBO6&zHym6gc$7KSO? zhTSkXH--u2h?b|Ab0MPMTH}|j&V{sawR&pas7Yk{elyv0@PV@IsLEE(LY8o;Zl$fv zD&8em<6wpJL*Pdd%7%ITmXZYiR8^$A#7GNZpLNCqRtpTJ5gK3nY@foLWOzhMSnsh#>v#DPODa^T6>ulYVDiH`_jdN zMA`dYDzVoLMd_KwfyWu(Ny?8a&?R>h6q9l!gh6#|r&tWx>>0rmQ_lNpK|2aawag!q0NQ%V`>K_(%xrn! zvpMq&oM39YtQ6AXmo|J)jn9rzCUC_=1Q&E+3gk|!7(3;F#$UJq6QUnp;oI5xCnr;B zve}mJKce;+omLi(nb~zJ|2Y;UW}mBzVAyA8S6y2fYioO6j669++geW8tndF4`~fpj0+BL7UYE`k)wshytXP<@2Qn1ikWpm?J4vA3#r zy@+$2EP8-jJbE2851;;t!=Oq|l87j{+0!Yt5n+JvI>`6bZ%{GG);RhUfM{N< z5ghQYbbRNGc<8_fWev6-RmZSHN|yWRWDg2=*_^l#>XUSXrDyS;&yAX$y~Na`&M)ec z^0+kG56De5Phm=Rxh>L%Ux#xyu;0x27}+1gBE+oX%kY{bT_^^whEe2PI5T5jSJtH! zJ(zZbK1C7l)S5nv?@XO-S0xVP-O1CH^eHjjStqNpCWh~brc#)kD^>yQpy@^(%xhR4 z%q}zcZc^0>X)(R_4k(CjA0XG z;%vv2kJEg0RX;>$E*?w3O`AqfSU!L=8qD9qc1IEAbD#=Ag+c(}x~7+jug&Zm9c_I+ zk+P(65>5t4e8ph^D7pIWmBvY(JtPS>`KYzr@r9$Gfd zS(O*%cgf~-)BHYI$lT1Z&JrOf{nCVvSwOB;g?#zp& zNfEE;Q_7ldy0RIW!^jn^TsXrC8 zi!9Pe+HI04C**3p+CO1NArzw5+!&5sBzrZn@n6RS^2CD?{Dx7 zO3BYz=AwV8k_@UhNskFpE9P}yew;i$4pbFh8$njYuiitv=k*VCPC{{GvGYWJ93i_Y z$)qtcP5iF(Vp6X}&xw;gAPFiOU69^1oz>&eI_)2X6rU}1#snm=cd5Ddvt!U6KQ6C& zf-d9f7SG0fY*#)3TiSe=P+z&tS#k2s?0ptPIs({bjxdYqHl!;Q9RsVx(V*90ewEmoGJ2t z3m0orpYejcr*35#xz8Lh!D};%)4N|9Mg=4sQcmxBA~mVGr=ILAJJs#C_X1l_4+bho zrCp=td+M2$%Lc4*{d zS$Fzz&+)%lpzn@I|Be#a(t@VLpw>B;tnpU%4qcsyyz+to3xx=%4Ayz9fVN;^U* zDnCK8uDN7ng@p3)2l}-$0}FQ}pCIS+y(560Gg~yDteYSo*F$%8(T~tkvU$91FdcC< zD@1fjL+UoHiQM}ATNuAH3O-%SVY16^eActovr7Oa-?kD6+yh1;#s~Dwb@EwwnVSbp ze^@B@pDas@ov!9vUHtR<^}Tceb0!HCnHMJ>r)Xdr+!>&sQXEB*Rs3PUZTm!s*R~^9 z?`W`OjYYi5N7gyFj4a~bSO{KVyALazvs$F1`#?W~AEnBoRpVJ12ljf43lb}IG9{=# zKM)Qb6dVJ})7GVNt*mI=cPzCPPePCo3bh0vpXVmrzMOkFASK8owwRXaA8&!{|08&9 zs?#)AZsa64`^}M)9Ce#m+LpJd>eWZsEL#Vt*BvK3X|>zquW5#qw$SId6}#^w98L@( zEuS7yl{1Sx%=Cefn$-Q;r8VghmqtC`YN|MUhIYc4jymojuG8$ghfW+G@&i@K#ucDP zaOcQhqS2~y+ea43Mya2SO!M8X#`TtF8YN0xhv6UhF*D^Cd=ts7Z0k{dr%%sO4WWwn zoDE~;k51b@-0sS1UU<$ri;N2RE|48(#0l(Y+j*$LD%RJ$INpd|EiU)lsavZJ-rY94n7p|3PJF8>>pl6A+30Cn#e0YvfVos2GxmW+j-qVPJ)u> zg|+TgfoRE*H@3V`!I!gH?HLrQZomQ&WG_2)gJX*9W3dMMxa`f;o=uZbylQCDd~OMT z_DsaIV@Z?TLai&pCShqU+;jINfIID*i9rw${nyBOVl(d){<`uXey%%S@u3WLDejW2mny%_u*WAfw z=OY73i+Zr2d%WR_h@c%d<1FJoW&sDql}9C=%muazAMw?-`T&a0jXz)bBm5i_vr6u% zxO{qHW|b}jplwj6!C$&ML+Mi-b|KkI6K~8e6JAHck0{&r)k*#$<9jJG?qvCaaWbTM ziJpZzg1|9`#txy6qIJAs*CE@<5VK1jA@`v6U>*C-p@`AJ)v7@U&8aecq7N|2R(N|; z1j*W@Zdt(|O!UFfP}4MaISe$aDAWcsWK{UzJd!L?5vmg@go3XDfXy#;Pw>{qm&A_U zyW#Qk>rd?xE#Ad_W-iO^Mr5cV#NVuzmmtdC;C86zjohRw?@@KsBYAILAADR0E^uA1)XS_-JoE!`x z7~{Bquz9nC-vRu%P~R4tgE1VsGn@^+F%i`@k6<2}j-vPe)U{F--16P`+5Ba$>M`OeW)=K);d}M$tQXn1j^qq`a%$-U>o5u%X3PH)SXnykkHx<* zg%4qc&*C~a*Hg|$Ia*aVZkY=}eark%q1Gg;GWf~n{;+MlXH7a-tfGTqu~B z?#@5LE^B4lItd`9@3a`p7SC!(HB6BwCSiMm?uvOihQp8Uo9|Bf4mx^V!bqT3{k=h5 zWn@&4qYdgcE?Oo|jB}o1o{3KZ^Gvzxi~razp>Lj`BS<-DHyMFaIPJ3rCMRSgft_Gb zM*!(X8C%X+)#-B;-CxFD#XH{(Gd>W0QZ@w#ia#B&_ylh&1vEg^4Mn;QsSWJ5X*wlW zTYjMT%e4*yfEXaTMj%9$MY6e7v++jLhDu`3(RgV#pRJXr9l?LQ{B zbfWnA6|JTSPgl@xRQUV@zQriu#EbmJ`EI|vr9-nPq9uzHUsa>6@d$7t@jcO-P&JmI;$1~2XR7Tu&oy;m7HNdoQ85mgnhcWTW}lkZ zQSUmY$iQzv%!^LL?_GZ=B!VUzCKw%59({dY@sSbx>>=e*NB4$s?tF8B-G|vo$FEltA;C6`n*1 zEyjDlYrv|J5pn__-qx*&o0YdeJ(p^XFvwnVo_JF$BE`?$(+_I|m92r3B z7r$a;<>RUR%h7ya;AB*kLo1pyG&Ga#@e^>kC5a`8u>bqpo+5{qXX&Z3#{I_eW|<~1 z%D>4ioMBdx*m=#WB7AS_$JE-}j~SU&w6|XsIm~3X`oIAkqF4l2<=derI`?2wYY!sX zj~x-^(!3@Dq`d6)z5hza*_e;`s>P~WEJas|3o(5-jv>)s9ygi>VdIilzCyA4G2PRt z&RLNO(bcQ{Q^iN}&$bw?`UFtV zQWTtJ(71N*hm~-g_zDLCQ9Zr#dbm44J4z_}f%moW1XB=Nn&=c9kt7uwj^CIEg&>7^ z1Tx8Y?t=ddhbOjhAw`o&&%P7A|M^ z_nr``i}U^-j(HwQss^xJC1I0QivtGYW76hu;GgLZe00nrrebEbbXt+)?ne``*0ug! zX5K|<)zeWfg(o}JoZ+V>dojKYYpJ#I&wMC?{DhO0u z%aaifIrb0b+^Ydmc=rQ|>^I*;o(cXyx9pA_Lt`WSbH1pA=zQ|sOG6U{ah%&MlsL}y z#4op}LcEHvAS|g0zvezoeh)978ay7HFOkX_sC;^%JfA+r@)I*J0GYDa{=nIXg_8rM zoU9`e<36Ev9LHqeBC-baZ{CwRha}^X&&{Hx`B0^)iH-R;Gp?5dXpCMB#rfCTMJM|X zeY1j&I{_nERP9JcFrQi+3<{D1a8bL+9!80ACk9{RqP)rmeFDDt)Onph6GRFi%wS35 zSmOh;dr{>6L86(z{E&j*oNZ>%u!x17_4v=dQ*qJ*KF<~S;E#B1D#ceZxG#t%-_|pH z1bC;-z=?6ld_OA#r1lT%MmKvpGad=AUA=EUizV{-TR2bO7fa&e?&`+3rDD6pSQiq7 zFe8VttN!L~NM7!)RL_`IU7_Pb!E4KQ3N8wA>^F-SrbRbX3gQfq7WO{SzWt9LhKcQ$ z-q0E&Z!^ugnSXASk^buolY2Z}FY%rCxF@)!o5Q!?`^gqAaZhIVw^~k51SZ1kO5@tC zH6wIHwJ>&y+~*Q0gyBZ1nd)MPjSi{EsI#5vtFH?vz^ASNIY_{dT7>9aFa?1t-60}S z-jmy7IpXRRhcRSQdvPS>e*EC3X*1ZmG_5KUvmL`aN@TU7V)q3EC37LNL~PBz4fOmy*J zjd*ODP08Bib3w~QerDF?2vR=%qdOuijn+uBabXU*SKk*(`n^p2M?nWrHV-$p-4#zt z>x`R#j|_wQ374=tyfJm1XSFPSz6LY)5WpZOh5bW^25osTSi+%1i_?idW`fsM<)Imt z{$mh;hzK-5)Bs@g<;VK+o?nB~F>Q$`cdT~HwN1iQmYYq5-KUZlnL}W`4<#H+H7ActRUUeLwM}!CEYb{I- z4w^shIy)zQnmoPM?$%n#qZlBzRbr7d2@YD0T(mXKY|8jO>D<|YTPH_+_V|h;VSfaD zif8)R=1k8TKB^4jzR9lL0a3Kw`&nt-`~A#R2C6XGvb{2s4#6xZs|;hcuduKLBxm0y z$Yl1QnfUaC1X3o0VXcp&fQSLt{ zbzr%8s6c(*2ef_?7ApJcQ$`&f86$iY8`f0Kw4Q~Zy9=iBpq(T>;-XxU0B+F%iw1^! ztEUJ8bFZ6OATbgixG!jC{5?PEI@I=ZCA&$kGxBiH_v!Ow*&HAFhPCohGRTqrDP=c| z3R=S%3;<9?5OED0XcflJ8dWIUXy_I91~pv~tdHb9IR%qiD2GyUoe=lF9QMG zAYiu=FJGI4{Ke1jS>b)4G=>0z9^()9gkQ_&xa2?6&yN6>#EBjWV+;a!*LMx*qW;bf zfEU@;a7S*S3k-6yMPW)2)d+mbg~!aO$AeJ2MwR#gI(udkr3z^^9u3FHgcML`f6v0L zf5iyCTP^GT9Z595TLppduXmvlBi0~@uEyv4Vx9yPVzHa<(9q2kTNO(z15o*1aG?eH z7&9&qCN&H3+E1~yD92ZEG=?L@El?UbN1qu?TeM2rXcan-OK7gwK!KBo`|f(e)sU8y z)2HrPf{aPe#ZLrVd%89wm2-;rf{)J8z3(O*YE=!1p-)-NNtr-S!yk1ED399WqHY!} z8m}fONk9(E1X62`eM$>+RM|Ey#C#J+6lxBD!of0so%J)UVaT?o@s-~0(c#Khajphe zIfsmLYd}`2XX~MZN7zMYb_t!hn_nAg?Oj?5ZU>XN{DO;f{MMvY@#=Im2!ohB=ynzC zLB-|^d<1QAf&E7iaxcEu`3gPuL5l9amNm#@ILBc<@LGTmT+MrNcN4DF)jfch)b$MG zE@e*@k}GGm6rHu$?&3MC#CwL_bVJmT7NGD`0gDI7``wA8Pht2ND}fjg}^&|V<6vVf6t4PUYGrV?RumL z8&x-eLdcyN?!x<#`Wk2DM)Jq;4vNn0Y0VmisFS(5s#8fm3HNKBLDBW)*??xmU_?HU zJ{C{RCctdoHHsbWpjbIsauftzbnM)w%pWP8qwiQ0LfP4s>yA=9#MX2+3hCThrLCEa$ z1*xk4y=x~L3_+n%7d5J?h|G7>i zoIPoXWjvIT^?X<847dccNucJd7|`tQc{P;z>XkFl+zfwV_FF^C`oM(rfq;36&O&2g zVs~5cNK`^N3=3<(Q15H-QS`cL$6CEA5ab+H6tU3wNNxf?R1Kys_M`|509$jH%5*?h z7&P4uxCFXK6H(_LO9cMVm`E|=oDL92$Rh}cI+X{$2$_3-)|B2^9_cAt{6Ieo2NEo- z<|V>^y0k3yHyjGvzZeIOccIOHs;VS*IgpR&EZyq#CBeRq-F1k_shEHEGk|&Rxh-=V z8?=TA>PtjngYVdUnW^J;ag+$3-;eYiNL!G5)UuBtw^9Z=--fyj>Q;%M%_9^Omn|wM zG3^`g?F{vz_M~7sKfS@VCDxcTflz#1v)qD}qq{r^hRZhJ{RJzjJB6Z(dA+8F9oRuU z=ZH~1s1sak?3FR+!LPPms0M4B+10OyR;RJvQs#jFDK zeMQ?4QAl}IE(%zO{)Sly>f?94S*?&+gPEcrHkc_CnaWOA1u$HYsSaL zUu9Nl^tIc-1XW6Ap*pwp`N5$c$`!#6#Q&n~NJo$QlB*t1m>e|e*l%{yXPSUdJdVJW z(+ij-`<$3%k}RS%Lt5O-jW>~aMpY=Wc}``=uQ0O~=dUkVsE|8*_EPoZKC>h5j0==V zR!cR(+a~VhqDg)ti=fGUWcMg#tipoISIC^o17mpnMZlb-_J%HV|Lj;0>~wy~Pk)RF zywiE#VJ0#B%TvD?r<4$Z_wL&5yV`(ps#Q)!=uy(8-sm>1)`zlAv4G=}%$o$!#N!a* zB@_0)H;w=izG0*P;&A|$WIg0B2Tl*olSH2~TvB2~<;ndu>YOJj z>lHpmWo*&%OE-TBa;QOPgM08_sI2v6fZX>5D1i@hhL2|3egTUDC@bK?s$jC(#`psS z)rHLJDN9dIr(T(~Of_vg!Mv1H)&4I|brUHjPhcy4wsBhhzYvcNs?vK20xd2J5_tb> zZrvF6cB3Ym{ux#RBSa}_*bF0kfN&xZs|eWX>oMZC(%0K=U7{F=cXv=t(Fk#lUr?dC z??Ftm1d;{+Tb}#PBoveTSJD5?B#?IhRWbelj!C{kA#L1RE*QFLpa=#ennn75hBVV> z!y8l>b0uDy+D?c)$iAs(GdX~seFoz9|3l9LFG2j(k3evFttzs!(k{;gnkdVeZNI&F z*MSZ+NX^FnhnijbAc1VH^X+viDVhP}ksXI21Uduc5J!^jOa574n#koYALpX3#d4K) zoljtI8q&-aptjN}!N#)xe0B`1{ex7M3(z!!YD4Q0{!a~V zNC)y}jyPhoclGB6P;6g310Zpoh9Sgq1LWA1B>Q3hwYU6*GdO;-j|j2b3`EH*2ottc zU0Sn33#Ss|k1G6fPa8t7a{mw2co^L7nLoQ3!HHKfge6;f<% zKn0E?LjB}P!K?wv3Vd>GfdEF|1ww^2AVX6W3`n5W+}>KAPmXoi&tYy7H7YSy z766*wd(RH8be7Cq;nrPb=s16=07iLU&G{Wo^XG}2=%o**`2ij->uEQ0`c39c4yl1k z>i5+_AecM8Wr=NPR&jXVabjbQ!AMX-Hu0$sxK-TVYP5hxrv$KviFMH%;SD9rD^R6b zyf3rL=VyhR`WD1Ndl)7`DU??vv?0^ge4h1)NFFyGl%MMT69QK2mwATR*gf=1%^g6- z&3FxkV3sA&Qi@A)H1I^iK48^4|4H2BFHmv=k>5_scoScdYT9p5{MTPbL7AIQ+~;d8 zzy9%G|NSfG3v5qEUTj2oH>y?t;g?`>uvAZ3r|p}RUf=g7%{Kn>d(j)Dp#1Wi((C$x z%J;u{`fnokn>~KB$KNRMTYLPS0)HdU@5|-y#QCi~ezV8lDDay-{!W3v5$Ct|_&ag_ z-_;(L6Lc19L!7-Qkia+1A{y=b2j>A=-xTLkA6~JGZ><%$aU>x*N){oTf5V0n?i z6}2w(%5|D&Y3YC9A-L)G|8g#(LOMiJ7Kkr(hW=U+%Dx2X+))RrxR52W00`M>7#e0{ zfCPi~?1s}ot&aiU8ISZ>^sF5Ea|3I@*KOHz2yglfiyM>Csn0hlvbh>1OW)wJMH`TH zKp&|L**FFzX+Y@mCmE+(#yFD$KizKKj4f?`H&30eV+wnbt>;CDsKNR!f}?idsZAav z69-*R@Ph&Dv8;+qmLe|I%Og?wqer2~DSWz>|BsKmN_$+^yeI^eO<8X6&fOqpg+FJA z-ZJ5C5V@I3*jVb8U`@7kWRO8f%aJUr6B`e9d{zSxE(WYX`FBu%19~X}-7OQ!w=D;` zwFmcX|K`x{jflopy7+@sntw9Fq;4dNyN`qXOrVwzI4nh*59z8T`idUtFe z7-vddc}*R20KsICtM(ZoR7n4tfDVz)<8;e*FcNhAUu^yoEJ%H^)VGXH7C1&p;N`L* zKI{kQXYfq&mt5t>t6@54As%B^Io0`EcvE`+pyZ%fxiXv1C$BJ!IwtSDx%swV-mgym zvfVrV&3)-ns*cSM*4;g!hu)GcV`KGe+-ChT$+wl}R}0v0+GkzS-Q=tNlr{5989a$n z0tEY_#Le`}+cx)hpZevto0%Ku#g-1EBtldo_af4fTa8_tYd2wWHi@RRlS=CjqqNjb zDjcV*S;lFc?J*ybiTkAv+3^~)DIasb#JZAHl;l%(8_ z69$nfD-5aS^Lj|GrH@3H9Bz{oY7wVrAz-rt9Gg|>qL1T+p&3u z+bC-etvgn*8!*QiZrg2}6Xe#_JhV4JM;6BF#@8{}Zj! zX~NG%(A%@QuI-dHGe-Ru;0?Z|{PJ6X0gTLV0p1jpO_BO7z>v!OeFOi#fg$PmeFFn~ z=l2c#+k^&<^S@1KV5;Ecx;`pcrLTT9dlh?a1U+*i_2s{BNFJnt)K(mWRRka@u z8!#d%RvTx5^ko}!$gS!e0}Tp%@;RHKW=mdb{LvVb(JDmS2CzgF_Cfd()tLA0OS_@4 zslcE9k7;Xei2_NA`Af0jN9ou`><}PTQga0*eE-s2V1VAzUvqRXwY>nQa|uHJdh(3| z;st6&N7Bl3EI@-u6d8byI1EE5G7adJOtk%Qx0VvA)(AHpx=v+F6ayvhS%_h2`nEwh zrmVA+z~`Ch*VqIW5P|G3i{V=jJ@|`&>O+U1zc<-{VNsME#nZ(V$upbr#9nsF*0Z&$ zY$u?|ZP%{REwo@p@F^Amm=lPX8?;kX^qu)`N#9B;NFqA=uL11zP(0MpG6sPQ$PYLS zzfMcEKN|#waBM$VxzOW~tymn|lAOQe7ZOtgk}Z5TwNjoB?eYc znyb^;<#Cvj;98$6hBtJpI$L=iKwHcc)mz^p{TU8wfYXMh?fNEFF71YPqxh25$;$$Y z_fJNgSqEUXETU@!Ac(Efhh48-8H)7P-w3$3(_*FB^T%k##Lx;fA^M(q<+}q4@1~XO zu5k5E9u{7>j9WZg~EKBhJ@+^T80a>r^)-Q zWtR-t&jalFq8R}EP3<#!j$XQAA5qt9UibkuiU)#`-O*h-;EPXCmWW`FA%T=+SrIuN zy&P{~UheTa780W2MKqacP`C6P-A4lLKiU0Ix~KsR>Me2`2W{1$U(pXrTKWReGUd-Q zq!Ki7Vqxi?fZBF!=0?oB=`1YC5I@`M8|k+C98{eK%H?h(j$i-*3qqtU;9a`Sngvef zws!7M>X`-8f!1Pq*>8u#j)9iJ`&EKWI|JJ{&UgXjY@n2me?bnF{U)V=gxmU zGYB*gON-NcpMx|;vpfPFvcl?kfzx-Ovj$-y-S>p1eoA(d%e~p|PJd6hwLKJBG6S-} zg$i?^y}QKCv2xk7ta^+-r8`Avs_y}c8;9Zzo{82f3v9=%A<|xgUOlzX>ccD&l`TDA z)NR@0&aX>{8I=QDa>NcZZhEzO{**wuw~JtOVI;IVYth1Wes9vX6g_xEsIs;jf}_i8 zGMDRzx}#eRqc*t0pDLt11LZ%DZ3<}tML#JL-n#t!3bGu|KihXCP`@I4==dX;)Tzqu z*LbLH+5aP$WwTv^mk{L081Lh%h>FC0=e-LW2~z_jMOKK<;VaJ<90so1I?8J+%W0vIBm;Wi(k{o5hx#B-i7(2%!7Z z4CpS;y_hKE6q-z0ARtf*`&f%+pnmE|-iRSjP#Discx59o(QOhOV4i?gTlDgfgO}5% zByf)fukZLq%{WC!u+BBx+o2TB(`&SSD@{SXaRt{9E(2jVs{_UKc}n*dg03a<~HkzEm4#QPqpUpcm+cz!SKz0J!=YZj3$6GNUFKZ8|Y@G zL$BBwH1Oj{kJ=NgfWYpf(VP>2WE4o2@j*2FY4Hr0i03#AI=dwSaB4b-cJxF|f$bXT z#wH{Dv&djLV&Op^IWN)MZ4D=|-9PX@(vtWJQ4NQ%P%HH*2z?EAI6DB;0T@5Qdpq&~ z&FL}$AmwC6)o!hm!P13(#@AAjzdv3#dorEEZ zyE}J1an+<@DXJ_ymF~Wd2g*vfzxRs}SJXV(mN7AaPuA<%>u{Nfx(B$Z1%%w1C>Oci zRjil%7~j1l5UrYsz(sXwi*c4+noyHl-$ZZ+)Av~=Ii~D7eF>TbLF>!XrCeiNtl0!! zD3R8`H7aghF<(1){M0vqqI))U+4v!oCj#7-&hNG191v>SeyANw${eugdFiU-0FoE(WbhC!M)HcX)kc6G4r+o;z+#YGl z0Q8h7f8CG=yf=4E*_!!nv<=kQQxlSfPd!sP*B`f)u7GkID`vjsY9fQo;T9rViMVdP zjOtl_p-*9&h3YfV6y?oN90h6_fLa+$_Lk&=O(e;>zfuSq@6Iewj z;0~pOlpEoeirC(Z;CYuH9H!w3SPC!{$HV4^A|y`d0)RrBuU>Pbtp;%a1xFDHfbP)% zP;LL{;RTf)9Z3Mx&j1O=-8m1xzLIAA#QV*Y-d!pX4hJ_Lf|5JnIys0&clc|tahWhe z7%vfc?!l$=A_CQ5wkKqDp)jZiLfUn?7uUBLe1MwiwcQ@k#J0{afJFhYGr;oAK&7EO zAQm-?x^KSzslU+iNBiH5Y8$8n0K_?XCaAp_sPg8eHrE12MwNPjT`>qgy~*@vI4Edl zS7o>*`vA{yOWvcS-4G2s9Ho_omFm12d?$*&C4J>`;3S?qAh*-3+~X*p{jm82Y!1RW z!wrIV?$GJ)7q!y4A{IfL&4J1}$N-xMBNuFc`Yc2UmB?8vR|Yh>k84MLn8q|zK5sLU zLv6HyjOQpRvNqwLSI9l>;0LY3W190DHo6i3d!QhDEpOTRwH%V3T<$`~bqkA^*-`qN zpbC%1&5z9Dh>6rBVSx4YpdR_HpntgmKMb%P_14X|bg>xhw!=&vi7uPY0h=(j$Maka zLnu_*aJ}Ist+c`Mag7&;R*0QrSi*(5GM{NH2S2EoN^$9HXm$XYWbiz~fKS1%Fc#|F z7s1W9+KQ%Fi%#2)N1!I^{2?v%5KjCIcd{P%HVS6`OiWX(m$A~3;!qv#wF)>i@MVEO zv2zmTyu}mkhf~gm885$-2z(t2khJ&9=WYnXaaPE*pMXiBJg@zd|28;A0l9d|`1wl% zbWP>_aZzoZ!4=rMo!?#h!w}#gFhia1LkcPD#!nNiu*BWiscz#HN|ve=Bf-#Pe}9${ zq|on{d4Lg#8+v8qm*7}x{eZ2thU)t3z=tE04)j1pfm8PvA^SAYB@4M(8}>^XK+Q9% znvqKDxCU^z*UdEpLCR)S!&*PxWq>yGSi^hs(~W-yZrLYPyVjss&ttO)1-c z=!l>rYs{rNlIqOfq+Iy6Y0u)IGxN3M-ubt)T4xRtntJ9WvgvBnmSFPUr7u`yCvp`` z5o&1w)rR5E3d zcfmtB8EW3KmplgMk^=zkbCi1_-nQ**6Hq<=kGCP#C24(m5O-Qi(oC8s1mkx49Fd>k zz9_~(0G@AW^Ep}!(JLJbOJEk$Km!Eh+EWD(Ub0EEN#x2{U4XWl#yCOqBZKED_(v9G zCZ0d#AYE627&`TjOYWvsTTRp*bNhoXfeFy{uhc)NWt*3%HCHJtnhO-`j4$Sl;6GCK z2u+yVPPW`patek}pSOTnC+!WHhPlNvVB(zJFm!YMV5EM#1w}b9e>)|WkE z|MXl1>SNQ>I&8D(I|YI7ubEd)y;1yyDM!K@Hqv53F|Q7JXsLr?aRz=)kU3P^CD7?@Z26~%t@fo!QD2oPA-^I3L2KV{*d z@)uHn-rZGkx$3ZH!_?LM*?T@4af@{$n#Qo1piT>_$+YhbH)rxV$`&qu&xa#i47fC! z_-;i zF0>OD2L~I{l8|Sl%`}K$qN^hja{GZp=JSeAl>I5xMY?bwAdomOw=F55z)0+A2#gZF ztKk`}uzwn=mYrV(A%>zhEtPH8DE@`f0}2D4;x#Cd+jD0&cJxRRsus6lFa4btjk0M% zU12_VK3N2ubk{455FNwDPD{vouuP>qE4<&fxDk>x?A_Xoo)AXL68 z=W(@wWK8qf)=AKrZMQoA^BWkPkj%q9MZwtW2cSFIePFVT$ZQf!4zRnwd=CVmojpkv zLf~dX*v6iGCWF145DDJyQaQMByZ^Fm(2?s$6QP0m=iYP$p!e3p$xo2!liw3A9*9(q zS13CCbjAFeQ&7atOfm_AJ^AIfrWpWvPE;N4qHX!f%^hbQI2&U2&ioCrL5m*Jrptl>DUkli5RpH`)0nL<6_ed1epGPUoG zfk}xSycnX>zjyV)Y~AcOseyZxiHK@lg`9WhSNv<6?86spZ6kjSO*`ZqcrrVlUM zK6#R5yx$JPWPR;Vm~Np|MDgh3!aaM`UeZ5ph&=Pq;8G;lXBA5=e!v}@gi%;T zj<`?tN;1K2Y@z!fKftU2Qoy4vDV2DM=VvWdxAMdGm;~E=XxK5{SLs_2o0ynbtS3U4 zTUgkaZrzaq1}+&bgYn76Tqa-Cr01%u6-D6E(sAvHQ&V87{Ys(8H(F}@Y5aMv9u+ndx!kQ*j?6AyM>@SY*Uj_-0iS1v8Z_q={YbB>FV<9*RNk3 zTqYgiEmTP%bbTfje%Ya)$3l5}a609^whVhl@7zFI=;A(+z=qZTWg}bZ+vuq~>5)&~ zLw9ut8|>T-oktw;$_Jr?cnm$u_i@J27_bBTjL#zfxPCSz%Z`k`BrH3uR#YuHf+5GW z%qVqZ}yn(V9d-1Q^LISEs-ZvJnmIZD7v`~BE z_&+~By-H~k3!nrthAe#PJsxYSntu(hbZi-bDu)<`%ho?>|>Z)y_@lEahG}w z^|Sxv=iwbxes;sN776Hpd7d{76g3r-!#O=g{PQ^}TjHW_sa3=88-p8}ZVaQr_s^`u zmVTGErG`t-`I$U)zJjoF=Srf2zo;3w4qt*%mv205ZR^{-h(9ML`YXCJ6g|iEQjP?C zZH6QOe*Aczqe6KKr;HUz{CQ_@=-})Gl+s&6c_86dQlJk|iotdv_Vjl(_MvZh4IZ$r{U1V4`(GW- zUe&ydvKYmni3`36IHuqpixpj*n z^G$9p_A-MGn7dgsGq^S)EjBSRQKScNx0s*(oJ1;|;Ns=g-+h@%lZzZbw<;@I7{Ax+ zyAR3P;VhUSGwrkg5b|VG+BPQFQoQKxfsN2IlNPZhW!S9yE~M{toVN0_eV^@^3|w3Q zlNOWv{jVL6{hxY}N)kOY*diXx$X1N%yTVNhUpC%gUmUa!;6eYTpmhf~@Y zwlgwjkG8ize5s(IAi|fy?U!Ydavx+P*Y$rYA!bQ)shVY^powm2Ed+s-a%%fzl-}KN2#s%bzcF`<=Z0$Yo zTWfG;<;8Z=Ey^{cG)ob*_}nJ~jeAPDmaQsL;@>HUo^hB$M&qiN((Ls_mh?LHe|wmoChZ)Z>RS1!XFz#E4^`H7YU-#m z(*eCG794r(UzB%nF%wmj6kp;vrVL#=)XOE=n5sTHO#B3bfqdCNd+~1v&_UDw9oKk> zzdm6n75$h6FnU&**q(G^gt=Dd#r z@<(7#^mKRs!P=luig$NCaanY$3hxsGsb3CI$$<{IRw??&fxh_xKt`Q@eEKG}DZ(t^-pDY9n| z+4tp~iXv+wdnH-M5@RsNjPLcDq3N7==l%XXe!qYF&zO1LuY0-fYrmeCDW6P+AX!{u z-b?(n9V=&p%yYzo!VTIR^P_XA(`(ncz9ykNwv1IG7G}q~v3ccc{`BO^`QD@{m5Dnb zn|AY2P|8oValZ!9aN?Gs)4*XL=t?9Q6CIsDi$W8klc9B2NOFFDegrf+Z)!`8^{j7$ z4wJQQLf)S@N6aJ{A(WQY*%e>>YeH#JU3p`)g|Hb}y&=ye?fIIua^X@Z7z@qdd7re$ zBDeoTd!r1vor$uc>kqC|pw75V_Y=7OL>G`UI|bUT*(AAT(tPzqoc^vi!-Yg>R2ddd zfFjVV={jr1rlyLa>)*16%EpZw1wYiE7H<-rNtz?in@DfE=GXJSpGBd~de^bzROQ0Y z71W&e3#wzA=)aQIi-_14Q4ito6cMw7GVPeyD+QrHmZrYh51f^kWDqO$Aa0&SB~$T<+WkyRfR2wyE;9xEE1?t5G9pD7k*oO4A!6tHUo-ZvhbT1b__XjI z{SUg1p8%!5FeYqMB#`9hk>m0fEWdu7N1c8}#spX?@=|H0O#2g}f;faKvPHVM806v3F}5#OGdV_-YyOfRy~uk?QvAb8l%}rPr%sW=cPJLs}Nl zYoU~jfU6`tF0eH==GTv3m)*D`m^SjVsYdd~Z+3g1qT2DMy0W)pxwQ>)6s5g(o2v!$X&kAH}J4E9TIMG+G&VI}_i!ds8ZEW-N z^Lr-jGgQwiFu#b&G4RBGQ&oZE=Lng?VDgt9sYzRnLW8Npob2q@hm=y-X)v8R#UUe- zV-<6Ab4Pkk89@%Co%
    fI<_!_%b&a*AG@CbXcC7&$RWRo1(*kGE zp*8w=U8nl(@VU9iR5It}v6WrEhSk1(TZc%U&rME*3V04Y|8^*Feyk(Qz_q0|m_tzN z+NbNu+LgXRq4)POon>2?4n_B>e;np;eVBgl`toI7*|g)Sw?qDZAy>~!_8aAZ@2*ME zzxi_K#8%UTA+u1N?{{ZoVEi08oJLSe>SM;d>wy`rkxMr0drXk$w< zXZutxyY&CwRC5}C3k!?!^II7i4Wi040*$^yi<;(;=-60$E5+|?st#1OZ9TG>Xux)4 ztz2@6xr`vb*b8D^WULBCoyN z=Eg5cST@H>MI3Jq;&41eTu)Yv(f%GGHRxiD zM^M#B$1N~P6)u>(E;|hxH~pS#iLLu0FSimbjtq7{ufvD5R$`kG#cawVnm^RAx-KKs zTjl{R-zq&*1ts|}^=#yhN^tQw&rcdk+~g3oeW4}bdH%wMY?M_dQC2K^OQyEKMSp#M#G-&>p zqSzI(kuK(>!(UCG78Xj9u~=+`_4?43(GPJKNv6NQ1*6!+)ci;4!}0=mp;WL8Sb>R8g>kqZD^PZ{vRv^He;xX z+j{E$^&^*Q{KcJV`1|2stTLyan`ymGK+SDb-7eLB$cKBDVzcOUu50<=t{=tsG*&wdE4( zz-{=v7g&ZMLl%!S+?JkdKn`GS=xP!rrFDCw23yL?__;sIU01hxf4UlVC?+b(I!i7@ zF@6L2xNMI4XOAaJcb;(npT`A-b(vXHl&^1p$mXoC7bgq!3Y7(J zEdc^zUP?-8Do501Fx-cVQ3Lj)VALe9Teps3NN$&^L3;|9+y+Kn2|h#B}zJey*n{AI@+4hkh1ATO9}3aoKOM8;5{TR z5VQjp@i)1bYDC{E5#+7XD%Cxv*u1tq_eEMS=nO%34#Dt8-4~bToA`$bnG?BMA^fRW zQE6%W=)fkoEN?7Glb&hz$Zx^jT}5`$M6ARBJ|x+fl=f7Oc=7|fkdVYpe6tnE+~Q?| z^%Nw?6-nXBEYqIddIzrmaZ5ub@#f0PN`^w~TYG{vhwKNc?1q+H5F{7g!|V+0+D8n3&aNm(D$HcM=q*N z^_H4hJ+BtfslPzWPjO>?>6_}F9Cb2WlRl3M=lwT9&189jV)dO66QX14uIT!@6m+$6 z0Yq0TuS*o3O;$QS-m)fu#)Xc_T5=sN-3iZy-D~9#3nka6;0wFwCQJ$SU0wHH2zCLZ6BAv z&I@Q-9`GwIExmJ`tuXP1QORQXb`6OKWf~6;#_BL>5Yi|3@1GI)VwoJKCsU}QWc!-& z)FIikgY3I@1)OMou?%?W5ol<2%w9yehicx~wlLE#RCMEhJ;BoykEjgWs=tIxK}*_+ z`N5gafF?{!%l)mFL~(16Htf*9Vm20d%=hUgt(HI~0g)@^1vj)U6=FY5H%`tAJhtku zhP+grAxIcjQz33T8#SB|p0oXap$kSwLK^3NLSidtDn65-U|WSL(JS*MdI}^GLUhz; z1O(H$WVk_y*y&o{O0yWY8kbCyS?q1uB*uPcu1*6jwYY|>aUK|)v!QKoNvmanquRT3 z1Q#h|P?qbIC~&H$!YgZ;3lHW9?vHJ&8A*M zGl8gMz&Hc23TwqZ*BZ?#b+(}SmJY;D6rLC_|uRW-+PHPD_9y*T`6RDD`?MzA}Nq?OZ z;!oA;T)$S1d5=Ooi6Wn5{*qMsgcb_!tHx@XXsxPCKTH>jh_89cwn6~~GOa<2B z*;3O5o)*~Dr&U8HU;#g?`n84cGw47?0o|t(ZcX*vpa5!Qy@+?Xqx0oEi&`C|x4a!| z-E`a<$yY5h6gX;m>p<4Ro}7J3A$p~;v0N&JU)~BgOPYCP^~SH+S7;`|z3|ysG866C zU5%~|?^xLWX%?V&yXK_4<|Ng7GPD(Xf+`O^h@*LKx>k<8Ilo!jhS>e$%FSfQO_0Av z+_WbKBI;qL7AjVIHW%bI*7 zqT&`=0!UQ}L~~-mF`R)_<^&!i>^#s)RGpXdv9}yL(8snEC>zDCf9(AHS|91#I*Hi9 zHg}g^HT0rp&ohiF@%?am_j`w2RyyqqxaWrD9ip$lrHAZOh;O1CsASPo!lX>6{QT5Y zhMgS$qN=s5*nVLrGq*3uU2kE2jGlfjyZzKQp7xWu6}n?VT-?_{Mkd*0o&s;P(G``o z5c?Hu6yg4;AyV_oxH)&wz)lEPl~l8f#$UfO>6#?X2a0>Mk_$RnATH?oOTT6&;O9Yn zEnOk&=|*2CU#KunKiv~vw3w)c+pbF7?3m>|4Q;RWwe_zY)R}tq%Ad7WSuU4VbLo|e zVZa}aQ71Y=bgs7s?mA{%ov1Yay09ru=ic4mI>N$S!Cwz_%BF{WymHrY;FCiJirE?n z;pa?=eBJ26sdbxxOh`+qO&o=(;ra#4_jG3IvS`25u||to*BLjP$M6;oYX)lyytRD2 z-S~nZ3Oc#IF!WW}Z)}8>!vRNcX5%}MgEuOe(Q-pC&tP9vR}Vx*=vr2#p`%jkp>b6Y zX;*pNScR2u>?x<(5DxJAYDX;A2xa*vO)%Wr{Kry-YRr`NUzBZYHjYamgjI8kVUqDV z;W2Uo_wf!^{m+`#$sCd49^zDXseC2vCRdlw=_gK4c8b2&D@)HQQ?q)PKq#Jj^lw1| zx&kGNq)_-qzgEtyVH2JWK%{*No<9=x%xPzng3BI|XRpQ|(~gIq3FPqX zKLNhj{oW=jhijC4m5I3B5>+4OG^QlrxQXV%-{$)teB&X7K0h;wfXgJ380C z*RlF+o9HQZ8V=FkWw7>Dai&A9q&J*mbi1bydOOn`1nxg(L-zVq1PKhj0x$@gG_sh? zo-8lj#id#06ZS{!9YB|R?q*|7t2Bamv&W^s1$mm1V z9wW5rs9QB5W(cu3(sk-J@X+L13(@ z>j?*-x1FP0+RGwG&y1JVmEeu(oX5MZSSXN=-?^tektU9rrC{|dlV0?z)8s+2gK)fk&BDo{8<__ZW zI8>-{&nkEwEmIykfuj-b-T5*Eyr|4`O5wZU1mA2e|osP z(553Capq`b={@r7W93vztJrNeAKo&Z-cy+r612W7?P*+UcfTz=J1P@NRhD%bB@Tri6oi_y={ol|oZSL<#Y zmu#C(Xx~`G#IyaOzDK#6)~H^5P(7Y-uShKV_GD64Q-eC%ds(wCYjJ7#-1ve{kBZA# z!BhwTwsulV#O6ZH_SViM!Uapx!pzrgY5C+BqIjs_a1};6`B(&w@L=rU6-hnH-K6o~ z{wCLTqWgAf*La+qRoWl7?xD9mVZX+bp~1pGXL`KwDXOk|1U9Rct*9!-!uayDkbFz8 z#*R!Z(!q{n0?E7D@FgK(PXpV8TI-Cphzp5T3Y{SnmNF++csY$ThIazx$~C&6UIsZk zJ;|)TQN%~?h#YNx^X6`{{y9r<{LDnjXliOkp!HbVjYS-deFSvEVnp9o()EF&)7 z{i5&ck|d50^i6)y&c*2V;%5T)=I_(E1^MOz;nSb+FT(5aZaViGBb6(zi{KTh&T3H9RMqU?% zs0gOcC=-MF+etfou3&ns=3Xm|oeV1Kw5=3}%yzpXzGRuw9pcpr%|YMNHDY*&iA z`C!pD)}P>lYjw|W3wVFnH-EQkHz=$}uAGpk@!UkRQC#|vH&HsSdjRf;?@*BV*vaIw zDJGu%+uELz9XZ32bK;?@F!6-{%f!LcA|fVswN2TKrMY@!01PY6#BeFoI<~$k<1ofJxfpf+acs!eU9O;%pTnz5bPw2eTg&*A zgMnSB3K_aqACvJdJv}`yD#!2b4(b;QoKwNd2^<_1cb}@9f6m_+*|I>C3|{|c`$Swl z0oykocrioGDw814Z_ z&Pd+Cr|rkj{5oKhmQMDB?cn)p9G0@-als<5ePEOPKtDM8v=hQ1mhAa3V|Gx=>DxU{ zF>&#-@#!dc1zp-{Ay4_W2dY{Vg%W)va;HJ3p?ME|V39$Y4m*K*%&?B#{=+PE^-cUU|78Q+e%4g?0WKQ<`Hraq|h7hK&^` zFhf1jF)@YN3xrU&d#5ZVm5F#&ss7>~+nSuk(}A*_lwkgqEqx!Az~wgne-(P5uQcS` z`)o`LQdUwJJU;jK?=bmh>@RTC3=$R|>4Oi~{E`kH&+c@&DndJxj%ly#$YAL)@DXVG zuSuvCcx`+Y4CKZ8@_%FeNI>U^Ed8&~Q8r!}ahgu(UB7P}Ff=pN;Cj2*w(FsD3NJ5j zRxiZH&^LicNAZTaKLM&AXtIL~8Hv~xvZh>#_F z0~`)rzF8Fs;iCOM^M*Z>gye%KrbS}atF?^Mr>X7$Ql?c`;Ap-YCI(Xi%Fzb8hp$M^fE z(<-cYlfq?9iCx90IL=gu2EMt}fWQrO_?a0gm$9(I7+T!kSs{B-} zOu{COc(ghwyTpctp31n~+CCsu(EycS7TVz87uCwbe1XcqEW1wpqtNW;(NU|O98c&J z_6J^_M^}EI-J3TTiRUVfUqy8+urjgeU#e{`*Q<%I=4wav6}u4AL02wARn6236bXo2 z2uDCtvz|6Z>iD*~jO!M)4r@I3HC)cnJ%Ttn=bU{;oh*Qm!BV_%>6?s<>HOM0FX`Wb{A>xFQhJ>3v5lOm?ajsQgGA+XW$3!yACB^*|1hl1?{P^77ApnQVu8k8YTBLY6r!k z((72Dbf(RotWpoC?r>qZLE>i4M@9sGZK*j!U=4igJ9wU1JqzDC%bN=RpYYAep3^yN zwq%Vk@pJcZZBlR9w0YCLX@S$P#K-x%BLHhS^mt3ezDgv1gC`6TFalX=p#i^ za390BI}-Je~c&4!c5%C0pcNv1zlYhaIa zo1LhgAxS3@VsuQ(-KJu-x9N|n3>BJ&cE9&&%V|jj`Olv^sPX=BDxicy28#9$qBq@# zsdcKR#1F*Dx4dx$iHJ|3y*Wu;iby0DsR;BhI3{?0jrNSQ{-7d&8O16K@Zm|W{IZtB zMI3`+DL91XW4F+pA}5hFJG?t}?=3p}j&!KwP64yO2fnBQk&OM6=xML*W~u8a+^r%b zCKjFoTBG))ksy1+66x=M#!PC{(s?{cmIS_2ZkfsWhDUZJyOoLO*{-(7W4<8fA0l}i zSV&PaC%;%I-)_v_NEV_bw_D~}CjMO&vkYB{lRRUUD)>v;@>bx-(aU;zFZO~jF(0Au zh}$|TEy^49FPhYaaO(IX=~yj{>o=vxfTz-_%XM*a(UwqT*K^osvUK<`LspszcuT;j z+I-F96B8ko@!2^;hm?`c1_L2Rmk+6b`JiiDNV0srxssA`iwrs(76C*EianBzUQFQiw;EfQ|mkBYbB7y9R`u9ln5+i>PI-+}6_ z?t?KXw6AmZg)HMiAN){cmeI0D(&APwNlStYany6p&tfb`tuQY%L{o~NEhDuHBm6;S zSRO5>I?sp`2-`udn8uAr^M|%9xI9G(omiuRsEJo8Y9g0Glh1q{#+*pHpTEaU{_mdn zAA}RNlH0MotQNw=BVyKB2Vs=tT(;~4wq{QUjPWc%b~}wf9n+$FqRZHZ%$lt<{^O3u zL+d=~n3Olmwn^@lwTx8V4@yKfukT*<1LRk0j|uqT&%|q1bNjTdTgff@htCZ@I;XA@ zO8BM%=0E+`ZM03XN{*O(>iU7(<&bRJqEzY~vd6A(e|&RhY3V7BqBi^ECeQl>$ngd* z^9-pO$RW@%i0db3XZ}ZMR90*yRn|VXPw<|h&}Cez{lfc=f+miK5OySMMYC9;VRFB% zjs2M^E&qP^(ng-Oup-aiC*e365Ho9OG*H(Uz7Lbi;Q>T1Pgr>w&O<5wC)Ubl@sl zP``fT8)J(1V@T$X%C0>ST798GR{Le6A?x0nW&ekEeR6OsYh$Ftu+uV4)O5O;<*&V+ z>P~);LP+9Xvu4fmvVPIn{nbw7<9)`EFzj>k+AYbi2`!Izo#pbT6PHV1G>guyR#!Zm zpnZ|fKV{^;I&Jdcd5m{L|r$ z{2{)R7j8*EQ~!Rob2i$uYJS|SN!l&OY0iis%$Psfjjr;_YVLOH%bzFp`{ej8-$d&& zj11Tn_vz?6jkYCgR4sf7O74Zo&vpHDzGD}FeF*GaClS1b3CTr2ul4E6>!9Res5#Pl#bDz zDyL2OFfmRp|HH5V1c0=`gF0Sv`H8nwlw!~UI#si$IBE$fwc5?^Ge9b^lXhBt-axyb z&TdWDp4HM6FLGr~)twes!z8E@I`L%_wY>ZKPXllPu85~eSAii^9ZZB|#jcdN*paJN zQAv>#IJq0@US=6TmC&uef|l-{Cg7x>tW(VtfN5lG;U_2SLk4_&9y#^D{tip_L)i;V z))O=Z%#V%&#r$W=e%g_iMPgT`Ov&dZUzZbWquBtddx;6dL#dEBO3AU`H)WLHhyq~c zF2DE3;0*p_97Q1*1>g>nWE5PetE-_vWJt`v_=Aon%lP`%KlM2N^I7=MQMGsq21Rh0 zIdyc!Bjh9a+GkUQho?m)u8FU7cmCV^Yev@VBPhp~v#DuYso#90wDun6ir8{G9;$Tq z?6g_;_p*RX48!y!TYU(>HWibfydN67G zho455pcJC^zI?cct^lx(Y0d2ZbL-(hHIjZE2>N=I*jNO$xM^o8`S>2V<#c^32CVb( zn$ev<4gO#2I~kM3iBaV?PYYCs zGTker;wAjpUHAh_OF6XuGz03PntUafCn@z;T0{>(DMtRfRI{IFy!!roMihEcLh}JJ(On zRC@U7Ev=g=^fID$k&`;;3rdMhlhol|H)Jtv@$yGIHc&V9r$xMDp&Xh=lKrk9hwAr$ zT$RS{F&|~;3r{z0eHucg2kKjndQ#|xoyeB~VbRsqxo-OEAAfnE^v(f!CQE>#p{wnt z(!&qJ0JI>Fl|w_d3sMjD_a*;*$$wvR>Lr7#p;h8gNPYtSTYx+rjFJN&z#hcl<}cfmTjMig!yK>1qjO3&y-J= z*-S20d$AiTm)b`{f+^j`M%_F@mQ>luA%^?(%FxG^H)r0NBxAI%Ur=hNfa!Tm9dMr)X5hlw% zOjLe0KS8oPKo=wBRV1?T=D|KE4z2*3oNCfyFV5q028yZPfy@1qU*5GK@t8W8oJDq0HcSMa=&8Wuqx=pRmKtk;nUY zZpV!@<_%wY+Kjjm$Mx&qr>aHHK&h&5!ytnqLZ+9hMwZ-s8^5QouF!WMd6vZ6wQN+f zqT8ZPR{qs{q;Rf7rZqR6D-7;(TZIx-s#*uqPMKW2CDVGQs{`TNV{q zNg)D1zRh2om6-i|n+7Hbr;+vcH8EmV2_%Si2B%~*Dgq=Ov$o^M9bnp-K&F~)u2#&d z_CPhvH`5-iP>nVmvgO4a>YW@~2&zL{HUOJ67EHAzt0a>k#1t`ufE(~3j3vJhppBG? znZ`tGzCqQ)u-r6eRD=g$Sk$*g8hNINp{j|ld^x4xqBOXmD_K~mAi^4G1+!V?YQ+5Vc$2(`QvO94c5rE|G29n}U~9lFl1-w)V=wNHr~t>9 zlh;!gQPZTedVtTS0KEX8?(!1A;t!s-M8(dn_V}|kAd_YH+})GXKWxmkG@Hp@385_} zH?t70L>a5r#ubzfMZxD zoz}*hlNodD0=`iWfAUnq!JKCLlO8_VbsU~w802$(4H;dx?mnr$bz7YmV7Mqt_0xSA z>KocMVTfapGc>}x8x9s7Ngat?^}`)?_VXsTb`S4EF|jZ1O7o#!Kpl$jP3~;Dtt{gD zVPF!crEEjXW4O%Q19j@Xj-4KZ6D!QS#uTCRX)fKO5zm)u{WASLvj|#}`3JVD@3%CJ z-1=OSD*zmf4^=u8p-~%cx_8?UP>Z?8QslrPmWCv$Z88xEKE_bP0I_dCa3~MO)~f0+5Ey`{-NS3Tow!%U|QWfkS*4FW^bpO z@64koB}jKk6{+e=hpJEW^}li+3>Iq|M~}25XrRA6fw2bLhqg&{`o3VHw{q)sP}-ds zhN_rWOCx!gI=AUB`QG zB(dX6`!JRuSN3siZCs(|4Lm^2IJ-804pevb`1(NU+ax>69pc3Uy}0Rv(eCo96&HC$ zLpE;k8CYN@xP=>{+ylhgzlvcFE8i9Oa3saWdP4I`WfsTf@+ffSv zzODy4o=W?a**XGljfQJsv4H)nP_Yr`U`^ZN&g0AtUsM$hQF?)cUtM2E`#{7xEAMR{ zX&dU|u4!Xm{7|0NJ>Bun6ZQ_$b*`&WG&SvHjv5zQaoW=QY5{2#L6Yvg99bgmXHxw5 z7PKX$ll!C2Q(QT=mw3?9V?Dd+<#b<_G28E_!$~D&+X|hwJR#<_%1v$`Axk+1pI15_~k*_u?+bEnGgct$CwRXkp=sumEKupwkF9uxXd|e`iKz1c;6H0<%BR z-j#pVjiHMb)fw~c`8(ohAjgqYXWR#Xi3n%TEX)RT{8?6;kSUcrkY`@uV7*KtHcaMW zToJK{+4uR7=%iyHuJj)EON4jn=0 z+sB!^Pyg_E#Z!=b0`D+qBqB>@P4{1cyFIuUNCdz}tL|0MCg2CwAl#r)M)>e5YSCd{ z^8N`9TL}E5oE1?pse)9UMhp=TV_z~xXLZNFZ33}rNH6CxtUy3eyUZgwP!$dqoi^E} zDecRj7)kMA(;Fh(o)S@VK;|9Y3K76o*u*8(csOL+SS()x_6H;i>(s?hZk!*L5 zas^y(Tz1Llhl4JayRIIla0BH?5I9x6h2dkzV)4Ox4dPkOEsU;X684!~b-pV%!!MWq zE9P!?bD=mCD6pS=_qMqd0ju4{a4TiyVAgFz6mkGr z1hD1;Z#Y_EsW*fnG16r;Fy5Km^u4A-e+~iP15b?=Sd)Y<^uURI)4Uk zr+(P|Z2^aw{ViCY!VQh^`Lrz?`EY+nOenrb>xTM{i>vS0q5`7&Fu}t^uX4mbm^Q(l z!)8_2g7}UOnFm-(bhy7nT+-eP&MUWB? zn;4y;Q{xG9j|z{2{pwgm5gIaHY>QOhg&OAzXCwt}-0(p5(ZD#1!(hqu!Gw<_0RBCa zK>E(O{2~lg(j!BOVinGnS5g{MZ3x%!-JMlBAsob}2>}6_g(*<4r_K4;3Y{zl8Tkz- zP6G5)m&g~l$_IRl^N-?|p4oEKLsq+(!_v;ww0XgD(*8cEu)QP)wX@FY_YEB!mm^*D z$Ksttfo`4T^j5JMEj3^x#&J$xTkZptw%wV6J3si=YPZX+dtR|4*UoU=lNEJac95(X z-F+3;7C5QGRJONu=zTa~x*lqZ?`z0@#YHbyUVcesg&*vUmaQTI0T*p+H=+_S3)!NS zKK<~bb14w;#Vt1!R|xnfGZ65i=p>!~rlKv&n8Rl{oD(_4`a}r4m8KrL zrQ;`ECI$+ZK$kcF0F@jTq8=cll@T0ugy0cXNYn%Hu2ejTxX#EKP!^KMFk9l0w&Ptk)!;dBv$iQ_Jm|YT*h9$er?&fsq;F6uSB_v_C)_)q^@6 zb{xze6DIV-vGM_;eb+2BYv3OmYKT;M(&GVQmhMu070P|o^ZkX+XmsL$%wlV3{WN54 z$T%&XeO?B5lMOQfqSo|0RC&O4&PTW@StYVQ0dyi%I07x7yrs@^7{PtdtC%_&9uu)J zHORfZ@Xo<(=}WMTivQ!TX95=m`uDSPATkNay1c(YB)^q8ylSxM%-QHw)h~((WQ<|l zd|me?&^9q|ti-4?)sTYKotLR?upJ4$1Ql#u3H9F7LRdk2@Y!AfR8s)lebzgb(jOo2 z6pTnTd#)UjEC&sK(JUu!-<_1br!6h_B}!2>oR#ju*XZ_ikb`OLZ~T7+DS2I%l@b-o zaEzjp`yu>H^vL0`J0JA3=n)o`6t}8#{5VVUA6rx@ZkF4wwN{MOWvz62-7RY(jyfhh458hbaE;^>25oBBDN}-O89HN4cq^_#%e-BK_uSCNVVjQ~Ve@s1o zr7PzhCQ}Bo6U6WQSt?0WH;7Ja8zq>=pBnHJh~w=8yEvX#r+} z1dc7QKAodZeWi!-Uwiz4$%4{g!^86HxocA%Pph}uOYM;CcZD8_#c}J{6dpu)Rj@Dr zFSqTe$Vw!ydspi3Bd0cE{e9%WZ~5O@1nuuz{+qE=Na8nR|1HaZ%ktmq8`(nsSM}}t z_m_;3hn8{1%i8r*-op~CiJNlm5e>L=g7;^4I8~3-VWp^%+Ra-CtNt9&VP8Pv=32(4 zBdRu_)<VsS~nPMAAL)B1>m4w zzmib+*QMv7KNk3iwdC4oxlzLC`&1PVfC$&B1 zaX)%unat22>GeT>$}GK<37g?wp5|7;*K(l3W|r{Sd8r8W*L~c45a&CCG?P)0A_ci$)NR`Ib#qHsq4kfY|Lu<#e11qwQ1edr8wW_& z_3t`O-McOFu(?OU8Oh4*U-rij9}v|pfIXw(oAGB37L8F(uW*h?Rq^f6jwbiuXW?HLPgpzIUp6py4I=i0eYOx-MTF;v5%fDfSaB?h4rnZ#tsN^J6P zGhgG!bf0HC^RF$yzyA`>RB_)Y2DzkN{0yt^(Es?wPaD9~Vk~<@9WCtjUw^c!)9pl> z61Hv$zrE_d{duI-AN?S=5h0$_Y1L1p{_Brce|-(L|`6@!dmRVB;$XsB)%HRiDWVN^W=WK_n&_pWrunE zCTl7Q{3h$aU&r5M{f{g2n_K^bvHa%NRPKS?o!{L0AENBH67qMQ>$ei}50&M&67mnN z_5Z&T@?7B)bV&0KAoMylXC0`D2Nq?i`U&axJ=zA1qAF6epP=VoUA+qW`18ZxZwYxP zj1%0@^KJrl3vrx{gI-_qh2fBNcBGvMRa%!_wK%rmm^=eLsiq3!1}BZT*Ee#|pFynJ&7c_zuE|5%^{!(}!+m-^JmSY#@NFAU z%X8T*56SEMP5fUis28}vrZF_!pInDW9w#ADX}L*g{eEDVvuSZ?ewd;wV6#> z01Yr0VE}u&G<4nrDUn?U!}5e?ME%BP;xL+Y6+Z*5Gy@cb&By04#>-`g(a`r;1&@@& z+KPqO8wo9?et8xL+wz9S2esuJv9bEn86H8<-rzx(~RHbo z=5v*P^M1m*s{!BU)@$K)sv?Ru;d5}n4ysdN=RMH98@IkT61peWqpt+Xr8U=R=R99F zElE7hZD*#TP2n@)iu+cAIkA>zf^LgB*HlkPrP0Qd{o|+JId$yod@7W`*X)$>Vn~mz z&>dY!C9XKux|eD**|53eKIk?aBRBpI%c-2x=L1_gE^@{gTsss$$_(`UPF`wHmDZ;C zE|y;LN*~h<9Wwt6`_h3T>fjwA?j@M+nSlI^zI%yDWEV^~)_lTUbLToAh&$F8QO2A|M$I9w*iQMVdS%P(gEf9Q zx&mp)L(*^d*vJ2ap8QVJAuzc76APGX5=H}>*c644eHId*K z1AIeD(lZY<7E!`Tq|{!q^&%b9;~TOQ_&nDNzI%SsKuabS|4s`9Y zZZ>rscM^4w5xpoQ7V!?gfvwqD1cY%VG_-?CD|+X6m~Z+{&HLtzqdN`V!e<(cNsfGs zyHCE*xGa&STf>lxTMWE#T~-1HYUrNse$c^!+*YOdxe5b`eos$9WOty$l+;v5NM}ev zEC`wSY*fDHe~P^S&g}f98%tF>;^bL6PKaktYK^0&-<7%L1z-4w~zt>dwj2GlDEQlZbb4U3e(LYajngh#N zp&pIYaY$qFqo*ftN_30*2r^ydTbOR~>bGExN7e9P_O70D%`9R>JH6{;;utQmf=d$3 zMj=LzZAu8?l&lb=ilz-RcMl3NMzcAbfw|v9+*K!Q&#hD^AY;xlyN@K?u_$WBJqhQ( zx$T;~?NRxJJ+yRNE>kw41}}nS4;jE)kZHN_Fd&wE&H7Dyd!Mgj0g%a`;i(!Hl)D#5 z&qISGHxn3pWlu2V{=ZdK`+_Ri){4EFOT20f*L**PG?dK}a^RPG3Y1;t_sK)j36O95 zvp7*J-;RJMB`q|Xyhg4QJ8x(ztm|6*I;`X;(0 zj<2P`uv+@~KH@3p$0|8f$=!h5yuK=2@zjVM5?4DvWc5ux`7g_2ZV zO`M;SzkTc$>7NJ1-AOFuK}E_mHEvSr{V=%TlGG236;DIG%$W&hf$_ z`%{kRG>;XM`@Z2Y%D6LFyTBY57N6wgge+lU*I)0kMM7hc=bm8N9ISP~8zu`15!kjm* z#ZGd4A6LF!6ThCkpA3t$?@vJ-5^LL3^){9EG#;go_5O57WgN4Z*HYVl-Ju zWsp#=E%~5K`8Ubog;t)nw*W>3hx;qY1^y-q6KE z89 YM%Iv>ohiwSSAiE`%ua!l(>Bg!k{Yh&`{!XHdHI+y%j9()u_+$9ui2F1kAi% zLoD7KlT{osVdOpiy0mBbR&k%J!6oO(-qI)9Q`O6)^M>Y8H29@%jK@WZei0?}lFBOQ zQ4OnJ=&&>_Is>hu%<+b9eJ9+Ad7FidD)dBspfj|}E-V(09Ui`Y*8?|Q$1Q{zTsYQD zPP8XLhmo>&Xmux@?`;sCj&n0;%Hk?UPRuBCGII`tB%pReFzUD+5c=KTG@aL6`y>-V^NN_p0+ zbq|5$Jt97tIQVMOT*g>|fLR9Bmo^Tv7Sd3!)% zkzRq#sRFENXKjdyIaj24b7Xe*hmcN}-3@ReM_oV<(Ue&?8wVVSeD&kck!fGmwao8A z>7I@n9{c#s(EzKt#0;Ew6?{xG&4w{Qoa@8mq~Jk&r{Y4kVzUeQ3Y=}KIO zLHQ(i5936|UDx2kcY|#%Rfl`3yQd?hCbf!!@MbyHmLjY|#QiOKpk+rW=?<(iVcLUa zp5c>sblp@5hv_=*ctJ6>8!h2$KVpK11sRiG>QdBX-Sp_cw3zQgF|3@6>`Nzp`im~@ zXWmk7rlVJ?Q6x`6u`my_RT*>qe!6o^-95XCVk&JAd&~~5fD&NM^yao6r-_F4#06Fk zbw3p|t|gNLhvg0V=RBZDZL(zfcz~-0Y5Xl*89Y!zY=CCyP@taSBU z)JYwQ{=mMxo+w(R5|#N>|1~DmmO#(lqW$+A`dm6+CmJ@K4SO4)bWBKk>HACbWf}Y< z8OVa(wTC3q34s0*EX$BUpUlO0?e^S;1h|bB*R(P=llfd`|K|rAya5GOxnT;@%%5<5 z^+0-9nG**?;qDsh9*{Ox{Bk}vbYFjKl9I|@Xyn=CALuHz@ad*G9_mG<^L@LTONAw} za+!(a%1nWA9Ej0urLa`-Msd=DC3H=THSPOXpu2w^T12qCW%o8elKTh?LkHeoE1(O2d2ItYKf`WW9pv0mr& z>jOvsfcq)W#cB7`iBJk}SYLMAH6qILF861jp+-4+S7~Lxdn|p*aea_PU?V5pvMczE zKW*Xrg0n9I;?r0adO57&VMA4lK(Vg0M&+D+1sT)Dxf9Cm0clYEE4ZE6|I59dN|(r1 z#qK~1Dw#H+TJVif<*WwZ0lKW^r%wK>s9dXs#&Dba$=adU#scW&+RM~Khqc_=hLeTF zeD}nc)6w129B(s88|-{rxTdi;ao*DNc5aCK}$Pxx_xL?HOvm`j**&zTHw5qCs$*i zTw;}pGxK@=ZeXBGu*i#qA8v@mH)iBkWfB`m0mXNdD)!#5Gyj0)m$a!1D$x|gE^%&k zFsvGjzPqofrJ)-;&_c-cp#>Q{=cg1#|+1r{ZBXxgJ zvAMk8n+X#sNRVD*Bs!VVqncHnDc+S6M^PqxE3Hr%cj0D`(O1RizdllnSiCBYeL_yP z5yr4J-ZXTGj6q-BTBKtBp}Dl&zxRCxh|SL)05#C~3Qev2poq_mxM-vRq?-k`H8?KQ zhecuEv`DR#IF!`6MFDSII5ji_qxd|WZ>JQ?F7of|!PQZS80&!?2anOdYpj-}D=K{4r% zop~~%NOs5^6hC|u<9fk1VB-^ziVWJW_bS;A(7bXDs;^?KV0)eKyL&~$N+_w)aXm0| zMlJ{rbk2}5KfKOiO8-aVhwP2Vr=#;4s{F2Q8$eDpykr)s36rlyjeWzGq6mF01|m1! z8^tx}RQ8X6&!Ia>_-X&J80S}+K4#P*MChJRVj~7T%=WmJGkitF)RJCU6N9>U)3I{p zLiP|oD@{dlV}~mxn``5EyrA-C*fkIHCk`^F8s?y!JFeV#;8vz@(}dSk^J3%KQqkso z!w|TGhs7uJCZ;?G)drg9@$UGg033eUibM(-lqM|8-7kb(jI_eQRQY2R!j{{r$d&J{BzNpb=nIF- zTN-Ii0%(d0mr5JZc-1$#R_}^s7XD#^LmtT&_sDRTitRh zdNDO`lo7q~QbQ4K=1XV|^%Y@}`7$)|a`^Kd?jt>Q9GWVx(X=F8*&FvYsc>zwH_1Gc zW89I&&{6(*z?EJn-u-ljd)|q0t#i#0i`^IE*bDmx?z-Bno(mZ4O(lA{%wW6Dn<@HO z2g&QDPy~6ET~FeQ$-$EP4vLLz$7$KXaGyuZZOsm!>_)7uBvVs>j?Zv|LY-kzu95c4|I^6cZwPeUeVnj?YC>p+_^?cu|ge09L|WOPq2a#e> z35PGQ&bwh5^3Ljcf+pSI>OrVNZ;Ho=ZNADreLA^gUGQ$*N(PF=vO5%7%w~v6g9JJ; z<&=IYcMElj;hcE5i!-GQNv@~#jC6V8 zbh!?9;`(d~madm#utuH3Z?iYH8umaV9xuD)l#qN|2{xZ>A8ezTN*Yo>dhm}6VdG@k zExhrG=3AN0MJ~4sZ%Tv8x+8~gN*aHilft!Y9`Ge;+TknMyFHGvnV;E=Y!t^u-6%_I z@9e&6^zwQ1%zVSUdRBfs!S&*jdG%Bx5$}XgJXjlN?F|U_^(lDY*NH@|WNSE2`JoHX zr#xA#BIWn{$I76k<=zPn1}jV}#v>K|ldc|DCXJ19q`@PAxOA2rimB;8HE9&U#4;xa zu_~||Z{Nwb?(A?`CoPtT*`QtJw_R*(Kb`q$+)sv?u}e5OaIc;wk@RMH(xzHrt8!4~ zw$)JWZ8U-V({aKbGeP{Gq{5UTp@{eMJQIpFy3ta~!)IRd>aQhY40`K1rcLRM5gwOk zO6-bGgzV^?fEr8mTI<#)52gUuM1Q|%FfOYZ#q6Krz=ipds#x=iti`L{GZX1U>eocp z zy}f_PDB@IuR9s>Iz3EqmOE z$I$p}x$g5muX8_N*LjVQ zqB%DxntR-MJ8z4{uBw~h9VffEa=UxQa;AJ{#aJ)Rl!!hQthqpY*(qVPF|sixrIjlIbM^v zP3KpFKFb@L2-7+4w)IUgpCaZ>IaJ<+6F9#*eUiA8n&y{R0de`-SfIN-qTEp|LDZ9V z=sxhmYP3H549L?$da)o|hng92-b^oX@-lT1tW!>-#sG2nX}g!l*Y~6cvUBBl{QB$e za^o`70(rTDN{*gW{UK1|;$YYBD$5lO!pBD@%L=8y<+PyXJ`4>RqXKymv$Ha@Rrfez zWE{Lldwm?q;>vcI+DH7!8xJCzo(1 z8am~b|Gv}fIhIN^)S;RX@Uv2@n~DF>ed0_jG0*-kz;-@8gEQb+Zk=!*TApByr0))& zhNtaGmA<0wpKRSi}$EwXf)Ef zl8+0O$t&O?l?JZ}H!}0_XXo`jz`p-%#kr@z<6BR`w6lW`7khs})%9iP+na0~zT5NL z(Z}jp1xB2gByelZ#a_Lpy!y)(q3by8^b?RzeqItCtcI?pY7rCg^$k?nOjES~*45(v zK-vD@$orXOuD-8m`q_W%VjDBQPNWoh*jEk2uWvq|hz5N6rSB#;x87j_rT)|e$mcwz zqT>i_%Lqe@4#j+!4NI(S>A)uYo~CPzf53g^sj+XLq^XL_xgWFK4&Cn11Q&bg@|DI+ zQ7dBYbul?n>6c(6k+fT)1g_WohAF@<_WIrW+hqlT3pBkznBkpxJQ#dR?t)Jnm9gj! zpY4-yt8~zG&AEh}dR*kiN^(hM<|EJbjG5>VyofGYR`3UbaW;e$spUjm?WXur%K@xK z{WEV(c2pyjZpO(zcxWIxf6(rF@}n0iB}g^1S;pGf4rBWR=n4||5CR* znAU1;Xp{k)z`iT&m_N}c!d?UJWGowwf2e|o>KF~>_X|XEerRy)9LHJVoLk49o%Bq1 zcx@wH-ZX3KGAg3-L4t!v;U`EpTjO&zG)W~!%D@{)<2et}`rPp8xbnqhk@7d*UN;bVeyZ$oa3n=7%U`P&<`Pl3*;0PA$u~D{FNao>jq(}dn)B_MZi4dt z3n;2Z);pCxa!axfB`;roo@mVN<_DALPX-1l6-$~_3-BJ=Yk?*b=hoNQ`X{CnY^-JU zQLa-u_J&Qj6PZQBkC^jCs&%i)yv2qWe~gba#aDk>**JX%zhcaLQF#*#VuE(?8}lk? zSsFfToal@Vm&;a405?K3=S@jzY&dW#FgJHUdLBKI(SwqHzi2huu&gyc|LMXg!-jzM z-eSvcv}jegyX_(W22ou@PU|DCW*-r>gRGA4^$?3hL`26oo%%a#p3ZRmP?}BUf^`Eq>bw9$q9L0j^>#q1wLO>A(rH7+&U9x zriXy{$TI=&CUv*(?*wQwX87HzW^B6-!|%pDdz=ossy@aI1ywC(BZs_Z*W>Y`tiIWY zB))+$33|_jJk#gSANBB#R)-iDgxE|s0&QZySnvfEu3*euF^rnP2&R1RpQ*D9KHNFotd5?eyWFCEc z(70eew;fEucr-0NzdRt2^HQ;4xN0>b0rciIolYV^KN3pp3Nvp3$J}R{wJ& z{v#L*;pn^3KUM(-W^H_47?oUTNYn71x@zUUnd4{0DfXFiKt5yo&83%H1ABhHpwog= zJKF~x(LsyApDd?MhCCU%-hQ^V{*A@p)?4OU&BOH*^{ZZ+0GE?@GP1QCRGNc~)m{Ug z6Qwdr`*PM-$b&~=seIal>ma+yf-~iTH@tejtf>bmNqQf3B$L7ZFAfEP zi8A${2wA;Kj*^SWKG9iT3KGJgFU5s#8H{{)KIYP#9LganRLK$a)m2u+git{*0kKR_ z;W(}OONT-Bad$8l##IAJA~;mQ+$#fGIrS_BvPy9OCJF z&viSMVPSYfV|z*CQ~Rhy&${?nxWwS`s`v2^*NOX#hDPg`jU|p-Zl#^^=~xBB1r17a z2n?h`uS9C<5hcAUhsg=FV{Ze8dn|fK$CNU$Lq|-+ z2P=K9y4!9Uf9-dBN$UR?^l>I4bLxSr=_R;!(Nj1_jAED%{$6 zVY8!ni)woAufiurH5{PTT3?5bfQcLa@%lsMGaD&VjdWQvEd8#f<HjekU3C03JJ;=!{fUZ!fuIQG;F zbY|A&CfBDuG;3xkKr*k_%C)j}t~+5k*}1 zQtOUa>UNwH>JP?J1&4BQHOQ<03}E#6nx!vlqO&2RjSrev!$&uj!-1ehY%xdE`wrj) zWJ-xbcgt)4)b0M6|EeKN(7CkVVx_XOto>;OViTxtSXCi4{f@DTG6eMTf?Ka@pmWt z9dX`T3{C5sTQ!qjO$zCW#?%a<>#?Ibrt}|1i|yYU#hA{z8#l%_ExK-{X$`gJoOWzv zb$g5Q9dg~037M^Gnn?Do&1=GcEMB-PUFoxsGQC8u^R#q+nyKhsyKR$n$p4`@_kSF&VXu93w4sbL_0oZ=7B_&j0qB!y zo3eVlLjVOvTa`ma1|sEwu62@bPIt#II1CW>T)W@)^Gr4FzI!oAxX}<%|3p?PmJ7z2p&I&`Kg!M?wIKTmz-64g+`>4t?_T|TY=q4N+ znN;Qt@x{jvoeUj0ps>)8QsBEaZM6ob!s;f5%BIJQ-3^LCDPnO>xYzy$WBN4)#mAqB{{*8s)%_TrW2_pyO3&O}v`R!dq50vM?t>iHW)fF?m^^h)pPyI&{$jhAr-FVWKiLr>~$Le zeE)jtqz^4;_x;e-0a~3@0Lyh8V7Wrlv8{ik zsF{Rr;V^r_p6C7G_KW>-Ku-XJ;F{}Ux4M0l;#^U!)3t$5xjOYeFMCrit_c8eUDECD zQ(&oqwG z=hj2AmPe{5G}}tD0IoJbXGCQzGcx|{ALTO=)XRm7)j$Ynn+pks}Qr(Ov)@sRL+drPV zqota1;jyr)Auo%-LoVZrPPZPKEBB2{#wt+chrgya44>ySD!UbaN`R^!Gko!x9ftZ6 zZx@Z*b-rzt?((%T8Rk%PSrK-X2gZ zJI*%BI{XvBcF4TF;k+^_fy-h9UwQxk>z9V=Q40bdHjlKvoc>r_#Yk@ z&8c}k@#!1!SUtw4Qr(aF)4OoTDf|IU+Z~z4t`qQcD;66Zaqx2o|L>n`k#_AWUCLF3 zlP1-YZp|$%4+n~^)OxPH)Xj(!yX$$vky4SCj+s0)VC}N4Oy4gWTRE)+tZa|t#_@Hu?9}RyH z%g^cq<+H;y%U47r%PqZP@AaR)VjTJ3bFF(ofMO~#%dxit7-xH3q87Z}%ToU!ET_E&r)Az45?nZ$~LrXOm!zx_9lHQ_SCG zd^W2&T)pmYe0suXqQrGJ`Ic{N^slReN+ni&sXBw{9P?JyeYMHxYIdFv)zSk_cZpW* zHXOG%XyTJ36l01XCHK!KtK2bq9?xlQyzxz$tK`?}D`xUtQku^@e4k6W{7eo9FE|X0gfZ*XR2lY`H~#NxR@< z5-BD3daE?iv14R~`HAajN9b)zMc0p|ujKIV(hcG0dzGUqx9@Oo#w! zYwT?;&?>mfJa>E1lOCa-bWT{WagCh5GBcp2MAKe6yVs?wn)Z^mzQ26-KU504MIeW&PYUH&HCuL(2(FXdi--|2 z4p{`F;%WQi4S!Lsg}kz(%Z}C)ieLA7U?lVw3=bp;NDRMPb&!15+6?$=`;F0taC0zi zoS)eopnurh+}z3vYjOf|(Gq8gx9aO<0|OZZ{v%8g}`tr{_?42bkqKDg=@ttw%>Hw-*c|5k)I%M zG{H}v>cc7WN6I(;E#)WVIPXy^a(6g>k{OIxupUgDlRURw+0KQ`Rm&S7Y&lE1qPpfz z;xEik#F!TRn7$>IghZ$ZO4s~rJMu`|;TC~UrFPlc=VBvb>G0fRkMXh9UkkSV6wl1@ z9QZq8gEf>0aP^y-uhc_w&n7lX;+t_}hBp624IdkJt(BG0E7z&eeDM20U{l z`(gUZrW6%di8q+?n1(=BbpXPoBAC+IT*Y)ro$KQG0=0Zg+g`lGz9628JH(u6%jPU& z`Zm30Ps)-+`>AA=oY9OYAL$-F<+M&W$vQ~?RzIE#$s%ZyaDKNuNx?7hQ|Us16=kIl zh2gX9lI1K8c^;(2-c*Ig7aWD$NERjQyZv`tV_t8mM<}5@78e9OQ)FkV2gTxuT&4@I zvt*@B_JB<5w5(9-RvxXN5JMxzl4{+G#OSL2mtLNTwu9`brDR)jMyE5!2?< zXxCS>F>zdZBg<=B^jfSc>(>C-QRA)3^lUQx`(GMF7| zzA`t!x(u)b0b5CqQ}M%APQ=wl)Il8RPF?Q-^S%S3w;g@(@mfi-vDESF8gy469BTGm z07+H6cIpkWx7A0y%(161wGic6&3HoVgEC2M%aoiLDZRX!IBZbslj-daV3hve`+C^x z!NsjF*Du8b%xJw&bE_Pk{kMwK%R?XmGw;qf(!VHx+gP3zTTWX>ND%(={yh45w3|I5d_xrS^roYJRtGRL(Q|}vV1iE5%|ESe04MtNpLGr=hu)|MY z6j8zT;Zt+a%&mMF*F z)c(XAjXzN(hu!$bWBLLu+tn=XI6q4mPkv|BsOBl!%0V{2)*RkD<4veyUylgM6cM+pByp9l7o5 z_c}f`Y;T;-nW6K`9nU99-0tJmGruZ$?ABOHyhEkH)UAvl#Zj_=r#PL2CY2#S*5Ts9 zpI|nS)-uxxZQ&G}@B%>_EAIU3sm2I@C5toSkTo5;aeOI@nUgbdn|ReaPx zrZd@U3r3$ZD1M=dJJH)&eP%{x5+8*6pm&|xg{R*I*{je+tN14%^_#mK6yVGC`hv>gHZ2$DyrtiI5p?J=Hz$@$AglFmsIe< ztsC6g(SBa_s!>!3l-dcyu-53A*}2dQQeS zyn}KjrIFCh?c!`K=7TLaEU|me3>U>9q$1nt4tg z&T%RnXkPI({N66L`kdC_PSsC_220?c>HT^f$(z&8QvlH#q3$7M&ttIcL@W9fq7grm?m2_-$jZ4!vY?Yt zbVi;_Y^7rkJkRHG?TmZ7Q-|L4gkHCtFA_l@d^qO#mv+n*IC_&#^a_144Jf}Fc#IEu zB2;hag9kwBq}fiz8!v7HOvVLS@AC#jVjU7V;pu#arLcPU)AAzWcbh9e!D^F3J8mCfLdnL#aFg$6Q;YF<1)w3 zuXT83Du+x&NYAxp7J{MQv>U-1Ufg-(iN(xQIcPCLN93VOTcQrK>HLt+*JUWSB0lBP zofR^v&OTv&)oH5!-8}$;R@{P$@mgOco|kq^XnFgrO=u3ZGOd9~(0i;)cY4R08@(1k zJJ%_rb+|y+wvFJJTdRv)hw7TyX=vTj;696+<+P$Dd#{rycG4cd( zbQK4HTq{@6)9%(BD8u3BBlOYOEq;d+mRV|Ca6aVnGYofcB4b)8KP(*~hH|wEXfCcM zcwN93==ybd*EYrB>IKfu+u{1p_;O@nO$lqGtqk9MdnbnaO6}5v7Z;XIzD^{rYp@Fs zM%GU6DO~;od4BblHfcVy;u07*Y5w^Wb5OV~%;D!K&lOk~LJx)l-drS|0BQ`v8 zPWGOG{(du4|r@iv8q!6*buDdoCeA$j*bcsP}cuyhCSDaw7a&>P4cdJwP0W zx5=Ep!~lf)*2ogo?%abK3FRLANOI%-3U!>n!q(Gn1R*_oZ|Gy9%q|`OA=qn!t9_Fh zeZ0h2cgV&EoQt;X_?mB2DwtQ0JuIxhlp;uAu-KUYQ+uo9*ln8W zQJtPkN2SS74$6tbKpm6rw6d?ScGf>yr#`>dUi~O8tMq;wI3LfczNBuPtf$ke$D*2V zmPx$&b;7et+L!3H`BQLubu(wZIEU#g_WJ8X*JQSJWS1{bV+1tv1ikwnT>^B`C_UjIzuZ81gJhXh`PNb|6tLqIM6Jze|O7mHo86BLf+w;%$~w76H41iwiLM7bJAzMaX>X zM5jU@QwI+9&1sXTc7}*}6rllX;6ls#!z*shMt8!!id^SC-zs zn3Dik0IB_Y!CZAhZ%d#wQTpDqjFMp=UxkE_j@K0(XRf?kkJhZyLkNU6j;717kZ}%a zL5@QCQ^eORb&dRI=fO!RL8aBrG#@cxP^34FR9{yvT+YgCJUvtWcrn={6^k~&mzd}r zkE|qW6R!4q*Q(XWN(o=ZXGdxn<3c3P9?Ws?JSc5IzLU4AKT}@ueCZ*lR*0VIDe@Z; zg&(~n^muH~i)fw_+~=v)C!Dwz{94ZEK4h_{;yPqR9G_O5xzW2)#<(nd7B(fM*9%URPBKKIKNA4>fw=-^Z8S2Ve&!f&8=4$P=jz*gr&+&lTA)_;#xNY5K#-E3I#5mm5XUU ztiamZj%L$~{NeuGw1BQI|tdGlkk(i70w zKAl&a!-w*Cvjts|RCMd(V?J~5dfLc$kqm1&8_fken~V3RIt2n)%4V|OY7=R$oM7DC z>(o@>?9^?Q@Q@JiQDl5LvQY&sMlM&I-U2eb@MrA#iGGoh<2l9Xg1mt@52(@ww=;`s zgI_xKfuWYkVIGE$6!EVip&D7sO#o8mb5>(I{?qz69=QIoG_$Kjn&=aZUbSj#>WMKq z=+24lN*^W5Y<9O|%FRtSsBRN~GJI}Dd}i(X*Ex_WUQX&_d=DZnv4 z(ap5)0~MZfY`?2FcJ(FkY23!ujY{2Q9@`0@tE1`bIn#Q$Iqb9^aSIQ2yY!vg6Cgzn z5rPH^e-q-R1US=lmI+rMastcpwg9sePith zT^+Pyq46Tvz;Z*g>YJXl&?AX zGiM{$H%+?>*43Nmf9s!51XMSI^^bMgOUN!z-loDb5bY)DP;%lB>=O0D$~^fdqATS< z>B{QOn|l$ox+Z2>ri~RtovR2*ULA&)`LwJ>Mt}uZ(fB$ehE3e{xG5Tbjwk<$mP}o|L3L=HeIf#AuFIq zMc*|uGvnIs{mZ(yB5P!=j>3103obqjt3<4>`uRm_^itez=Ku8U)JEOMQe1T+>iAHF zlVxZ3rlyTo{xC{mnAq7e@unDEzoFGT+OaIChkc(c@$Js}o|k$B54$wDC*w!uk{yO> zWvA988ef_?2EE)+ua8|cx&{c)OZGLS=Y3o`hvMX)Rjp6o>iKx+cRpJzRykxefL2s$ zYO%&$>2zHHYDO>QRjX>;_;a-*)MZVJt-tJcC~9SXs6aj1{)FTBg4&x%boCeW&K}j* z69eCfEVM#b&00Pcs9ha+T54D6H#pi&D8SNfRjz`=paP6V16!wnDM?&s(_&gN5|<;Q zkD-V%8CcV`h1T?9V1)bCfrG3(hrO|PmxFXBImiVuac^Av3f2Db1Jz@gYu?DwTbU*z zlVlN7JV=jInop^%wS|)c**Q_kn`AwO!i%38YEEjfGxxM3R#zjWc7a-QDJ{0RS$ z`=6c;XHKp0V2N)BOeJk(w0-Ci!J4@In7gc2V@o;J!T=ByLFns^Z#UdJ z(IXzFo3c_ESRd1^d+h02Ue2uR_dW&wl2@CM*Q#NaFyPrW!Qr}<%el>l^{CF4EiecXCl$p)V&e_P^sRu`_a-)5q`xtv(IA-6%)xKT^ zml>WAe=R==&ioa5-@JJ&&xgevKi;e*cW>^x{uck6NB;ad?xlrSW*jhnzLXzlK^C6gV!yX$q=zu?@^OV%y;R7W}q! zyJ>`xxhsYM`G$>4oUAK_%0kxltj99Ct=$ak}45c=1LsGySiH1-t>l z#bmy}Tv&Wf^;|8v(v-p)$PbKP@Am{Qw9u&B;gtP~q&7ZfTs>bVm8nT*8{CNVeFYjr zqu*g;q|7PnZh682Ocaq+zn-VpnP7i%i`E)e9h_LtkoZd{6g%1MZcqdVJr>w+XZea`ylQEvZ4UeYE% zxp6wYHbKvJP}4rnv|lJiV{8lDYjz>m$2f0ItNZ)N(P=@k7$+TR1E;PDOj*{*655!s zg}>atb?!mqbaJRI|I5{;24h0e7MGuVHs|Q&XMbwid&p&Zplo?%zlt4j)d-I}-{L3@ zS!}wfPOBEo>;+|+Hz@Q2`HLz@aVkKIm4olOyYCyRfUVW0n*+J;X|;v#^^2aIY&x4~ zU7_&BV*s?KI>}s5`khG)`_dJ9nNqQQ-yz>epJiHPu+ACO;&5=na3oxGA26ceyj`5> zCx31^)9KKQhZLKV^9q*6ow5^YYXi5a2XzJ!{zA4FY09Q5m|3(YWkB$k$Z2uN|8MX& z$pbaCUPoT;i+Y8cJVLl|iS@37VweC%bD(FFEXG8~ZZEs3Q^VG@LLa!Ch^A_j)w1s= z-AIY7)tIR;i#FQRH;0+NNsYJ^{<@Q*PUm`GlE*Lovscz00RhD8)pTk+yoEDb%aOV= zm~@xpx1S#Scu1Igcr<>9w6w$}Ow?T`yU(rn2PpO3nlpXo}5DB(`?6s4tjhov&-1)@tMj zRcg(j4h1>rG~0%_K*+MIm%OzcSkuM3^=@jeNSUBlWu7(lN8THEqEC2rtud!3;>*67A5`)ktm|O%Xh(-nGwwO9rvb3CzAVbPdt#%p5?i?xl4ACN8}+3 z!qMihet@eDG;8*8aU4;cQutigL~F0aRU$UY1)A)2T5qw}Dd8gaSQ7)v83@nRT?MWZ zoZm@;MJlKzDF~|~5eNLrPLr~ocjbC5uDsAPYR6HdoNzK07 zQ-O!w!A!Bs{Iy93*$=`{Y_kC!0G3rEyzmr zfV#!YTl(7#`sNd*_eXpe>X|KLHeV)YWSmXIFNzPj;Z__h@_?DZXDC=6CrQ6p|dysjVO~zehq+LUPeIn5R z8Yucjcnr#kn`{&~B&=UwYL#|!JIKQCO<|ZUF_J9_T}g-dX^q-2&<~Fpx$M;8^FJc) zQ)e|^oauI5oSCuLL4)B2z04SztxY%jx-D>~jotvn6gKNRL6z()7WoO+xCUUl z%Bc0~OJj*rgRzaVjZ_@N2L_;SA;yX5n%+)$g0(#Bm@&68fEz~{w@yi&0bPp;igaw@ zPjuG0S~HGVFVu|BEs&8J(pdv>QjuD#flh&KLTYodsLmNT=!-!`O?81B?=-^Hqd}^IV-PJoh(`?tlxRHfb9< ztX{O*lU6~XdOkar@>qb}F0;04+rtYJxQU{{`z>UG^`68vcat!5)6TGiPQ>`)zY&1KKOI1j^_48ZR*qA^qyaNDGTKr-#<=-bj&S&_*skudJU9f_jru+#GRG!oRvB zX*k-aC>T{@#zhKIC?4G8fhpUJ*n^*&*Zf5VSbQ2{F^D^7{P@^mzU{Qqt2H39A8Lz2 zecO`Q&R`nwx;!KqzI{XWGc3d4B_Nm!uR$9#^b+ES!00SevaT5*-#sBgsdz1ddfsq&%di^uS4f!CcO`JQn0J?zl8ZO)M>KGmqU3jMSJG_Rot^^moZ z0h$c}&5?lSPK)J#0-oZ z*kMpb?G}nt0y``g49Mo8R8*Jh3)=ga>OJJKaLFOI@{RgUsOQuiMtcwGCm%`uBMB5P zrx)6yq@cxAN#b!$^}Z_fEDw^7n??pc>;!^O4>nRxkxOiZOwsu-+2Em%U`=`dJor|7 z8j$8Q4bYG8l0+{?P-7Ef$>a$VON_^^nEd`2(7p5V77q|=1V(c^;?|jmyM&qy7HTSl zVXOrZYNK<Jtmn*WksIj5FH1&qdW(jXA2av5zsr!^MJPk-4Q!pvHOsMwCw@u z7K;eNf0q&2gZT#UT@Jw4|(eMigqAczvW{OdH69X*p-l4<)CV$DB_@#%K<`N3K}+m(J!O-M?(uO zMUfWbI7lr3iG212cvDUDZ)ER>Oo|G~qzE>Jmze%8oPvT0CPvxlWy&D!y4~qd0jnoV zG8vgF50jvEOCFHcb<^Zkg*el}19QgF36NG}BcpM!eK3FVRvLA@!a!Yf8KrOl6UHMENlw>XWP4Iha1^V!7j?~*vby0&Y<^ zW538u(m{;vCpF~kST#nVy0M4=5V>O);(T^d%q5}#+dInhsV`Z;%DNPXuK}ep0W^ZN zy?gr2Q8dIICelhTqsXs8KVFI>?WhUe!HD^r0?Rf7kpd9H0`&VJ1oY;wcHaE(sWc3vY%-+Hj8h2YH=Q+hIP$cvA$Ri5C!Tltij#CIe z4s$SuMDO@xo-@$0%^)@D4|C$8LCABVFxXZ{g#RL^sQa#r)vvA-?&n>li`ugf4ms;y z^g;+eGX!brhZlp7L0t3LF*^nO9^8OfvtPjveGg$H1~$@AA!QoRN}j6A_KB7 z16B`@!E9Lp$dM#6s;q;rR|2mDp|e(0{PsgDg#p+LWuo)BOwKU2!hVr zJ$}f8lw48aBp&x3WMbP@@Rd(F2NOK5q{`dP@Qbx=dSw9$SjZ4BsG)nNekL+4i)`!q zDUqT?-Z~jM<%$Z3Ffdmyy=629sZw*LfR%YGL)pSXS|-#>(j-Ktx-)t-!-1nE^_CTI zwXF0bkmUc~SowXhusjsHb9x_nx}5%z^A{_i&1s_n|0=X3-mVv6;06CQC;vRk&T(BE zfLJ^f%sSCBdvA44FI76Vo7(b@m%V%SA#gO~!#g$Ob0$;c-`V1HmAN`g=#O$hBJf+{ zZ@?%7>q&BEORhTrH4@kjO+U8$6`=-wYk;TlW^>%dDNvN;;cMKKJ|xEi$ve0u$&@%| zs@$(<>g{U7IWKX8gOi#w9NvF*u5OtmnmuI7A)mi3SH9ir{?^%?yuQX8WzryvW4fh* z^_RD~oA;CY4+Y>c3~+Hr<0Mil_ktfYYv~;=$ko(wQ!27iLWK5#MMIgf>HXepNLldS zCgiWj7Z@?Mw2a!mCDuIOlS0T{T>ST6)%igEDu?I%^*`%DJj1}A?7m$bk(S+pBY^;&`3A|yknj5QO&a*eidHoL;j!S` zE|59ALMQxjm+UV^!*Q9O&<_wa{rfFQ3m?u@`taxfc76Q! zulq0U!L&sU+w9x*ZkS_m2p!KRU9^i2{|gaOF@RmwG7JCuu`*0HD?P8m|MH8!JHjPG z4|(_du~&9&6PJhq?A>pqr%L<>UW2#Hgy>W+Szf>^&5Cn=)A!vC1=STOQ2mJPI2)PI;`|o$ujD@7+dM`cG&i~4CL8JeV z7JIFY#dt=Y64)*BTyz+y!pBy#IueRz8zH3_4XWUc^cP0h{_Y|OJ&!^$i3a}(kDWIX zgW7RBLB#K!6@wbRHzVoVq20OzxSD{l`a>8b)qb!9V;C=yk=Bk1uM3o0O0}=OL;3d& zzz9lucW_#D{n?o{h}i!_T(^S+ZxaU3TR%H^14@i{#A>%#m|X{~4i7!)vzu8Wu*+U# zJfChy`jPx7qC!51nJt`^n|zm9vO$tzlUc=%_>+UhQ}zJ`J8P!3E1Cx}8_b__KDv<| z&wcs+9%0F23bAs#q^eTUZ+Ze7#)}37dh-CozF+pzwHBD z=@U_XkH91$0sf{J^L4Kbz%bs;g2nm?oGWOYI0!j?h|m&@-&`c4Aon!t8~+2Q_6~3gD9@=}Am6oPdoXL16+TI={->aK?vn=s&Z-=IL>d$= zM67}@RbT-2o^d1aXbP=6yEVf9Vd_OVHv1hY-sRrGkX}v#@6jpx-@Zq-Aae0N1dETq zX>IMT$KTWvNL4(_u4`P!gJco5u7)hy^TUu0MH@P`lj=9}DZ!L3zAt(zIGLzV?l$q` z48!YaIfgPw3T-HWtlU1rXEC#%jJd8;9X@!_X&U4wG)#{;r4;N9zor_*8wbamN(V`@ z`$0o~MP)He{vvN7y!AWk{agcx&)Vf4SO^aQaV|A~8dM}od7bO2$FTd_;@F2ZfH^n6 zH}}oJ><)nF9TzsPEGev+4aoKCOR~N?(hY5WW*EW=qU;0w(3XDI!5smeJ(jY;WA&)g zwA<%juLBOZO;5;0%Do0mxP1%=ZF9QHOg@k1wLK5yCw0P3(cNhD44NkAz_hua8-)tz zbtQ0dq9Y{qx2~!4f~D&lwz4mp{Y(3j_jI@7V+-LdpA~pZ+jqz5Ev*qcObGN_Fp4Tt zo$4SPVx!NlAi%iXAyMzPD_UPJ|B6S*8~AaIR2VF@To_!hkZHg=?f69-c1X1DfYSn) z-R8H>i~KsO)%xvWNPTWYeXRrON@0mdMe_FS!)RI`#3=Kn(YjMAhO->L52-^`IApC7 z@_Bv-pi=T+TU7Ug80Wuwjs1eibub%-Wxw0pw||~7`Aa!pynL1(5~;W;k+yS?D$P&= z?D>#?t9rb6K+V>2|2qe8O3Bkya>s_a1OO?1O84JNu{<~#qj~U?wRypeI(-|o*^Eca zAknqq2U0l8CXoh*1el+P+O{60{l8%Q|Et~T`G@=+jald1NPXxVMe)7kv*kk(#xV5q zi<)qNEEt!Q`VoAVE+Z|?x(s1(t6YA5QLHI`3uYFiHPH%JBfoAK6j@xb$AO{BZ|8c7 zc=EQhWV&i+FnQ};G}Lxq^QdF^HInwnH=Xx7$S%c50I>hph2Ve-&C6|Zhi^xGXdp^$W~onB zxsdCcY*GmS&HZU`35Xq1SIdN;L-2xq3VzINvq$*TcBOeB^1&v@pFXGr#a<~wV8>j8 zwK&OnkWp<}_5lgB%tA`SwIg64Nu#A_FO6vJWA(16(#OrcudD^nwl|%xwNN)0?Kbf| zm78Z&j@2wZ!I>W>GSj3mHSz7Wa{%$`-ux2iFjw|t!%H7t`;G~k2CswW)8*G36|(h> zg3?q&$tUJPkI!wk_Z9z1AUOiCIZG4wZz0h`}wK zxxFmG&tCxHk3^huInIML3_7?0nI#t}uA7TE6$^)d@_6cB_tV!bmHc;E#pa`6Q=A*_ zUxdEjc@${%!BbO;W=iBj$P><4(RQ`?#CfHz;^+rI=HUYpQ2Sh0@cn9&gkg>igho$8JBryiQ)N^jf?7bY-@mc^RCR-*0qpZz?!U zuT%v9rvBG(j@1jwPrFPR_iauHas-F!uG4dFQO5eD-3FlN@A>ute5MK)k3z>lh>Qhb zw3&**^^&wmU5gXbDx;Z9Je`q?6&rI^mQUQXImbo?S%Tkve8N9QL-L3e2%E98v;3?D z|Mu^YS%i_yC4C@)4F1pof>lUt)q7|r+=Ll`po ziwp=NXW=khWCEFgHv(*d9gnKquEj=yUf?BvjzDd)x>aDuSvZYk&YN*yD#?$YYg1L+ zSecJhH>6hCP${iF6RDfl3l2uTDb-DDZ;~y(ruR!gl2!AL90>SKI=_URlNfNhn%WgB zYMquB?MowFAeddFYnWYS5mE5mg}Awf*;=PjytdIEasdJlt=4!DZ1u-{jZZ9OHJAz> zl--zlRpbpoNG7gk=pIQbDNDGh&OyHlM)a}u5*vLtRWkCR2?M9D< z+x6!0OD_&kUf*-|>`h8VW^yL-)Ym+~vvi3D!WFQ^L-JftWB7mjTXpU_+K6M}`-*m~ z4kW)~iNGVOa~>Um)JNnw5dJsUduT94$U7KK+khka$MI7y3zXR+Wta8g!%MQ@_)br3 zY;0l9_74@oP3UIp(A@21$L+(qf-DJAuDQe90J03h0tW$X`W=Uymt-EL;9maoj0ust zQ~&55d4cq*PsrWuB!W?Fk*MwhQ%>KcV^yP@w_GkU?yQ}~#)RC; z>%#Tx#0RL=2gmW8znM5j>HbEmC5YuWmmy9-kv1tLOfiORrd@SV*%)9dK(0%sw5SNn z(<8unSPp@wuzi5cheJ4jFU&)EX~;e#jw*NVTCfAy2fRx~hag0$7ATmrM03p^)qLPb zYo<+p^d1_7O+6*xz;B_Uz%VGSn4E0_qr#}Gyf>B$^$JYb^@U4Mm)?&mh{8i*NT}=d z*Np33eREz*9kwB+_fx^~v#!Nyr*2!Bl*NiwccMUT|# zS?$O4fUv1S!gfpSW5xpDi#7qn10bpxfKz>a{yHpa|TLc77Ot)H+v`b0! zZQjqmU_Efy^?#k#TM7m;=go5aE_hz05(v0sl%HH@(J%uL{6m6v+u+!SN*4e!1a{HL zKtQ_1$@)v6)L2hh+$hLA3rmjhw(i&?AE zSFn+LvQj=t?xU01ZZ>ZZzZ>Qj{VJq_#@pb|de4r z97uqSd4WF(O;^1V&Zl|omjG>b-d1{~9E%XROP*o5wA+NX%3+{#uQBJjyS&SeeI)|+ zT}=KFoG1>0_05}ymrPx=Chmba+$ML{@X{mzQ5JcFQ@4-pm=V{;v_G!7Mk~*%pp&Fr zPgG})bOw~`>PvYp&$wzm-9~?Z8<1V%L6|B8_<&g0cgnL;Z$dY19JLo_cGKRet^oM! z+$(>w|2814*z;nJqOo`x;lGdbL^{pAeV&Op*G?ZVF90*J{tSo3pnP5jauMuYa2!d8 zFmnvUdsS8%pTl+F9aFVPKfHd9Q%wf)Fwc2`gLv{n-L!^(m^><%{HW;yU+LYjjcytl zaJS=v!kM=_--ROu9(bM;6kMfV;uvq5een-^lXC zLx5P5B=_IZ2cU0K92k@;+1eXG#r}j7XQTWmh1RJBsLh%bFF-zT-4cK)2x6-wq=zUoCPWM_EsfWYecteHbZYCS8c;GOhK|DIZ ztZQ`@PC-$NRxJS9zr#XtUF%k*pQ;aZ1A~oTL;CQhp*aw+qD$zI)@P@TEH-kly`)hG zU>dfuWa3hmIW&EXZw8EJrzPk-@1#)#rY%?qoHS%~1CKRlf=^gkfz@H?~%%1n45>%RDS`4X!xG1 zt#n)H?BYwZ+g(uJ+~x!LQSQ6{BP4oI+O@@YuQ=%t=Ko>u&Eu(FyZ-S=WvGxe5lV(A zI)_rimO_*vb7qy3S?1ZMNRuIBB{rGKP?@K#0YxI2lA#QlGLIXZ@47zQ$vOAyxnIxo z=kJf-Klk0)KEw67*1FdCUTc|b+D{KiGxodT4Scg-EPJ9XQplz!oVcGmd!N;xmum|S zemQ4oWEXy!M#j{y4W&2Z*f1`%xu&pd?I=osx_lVGZHQ3={tQ~+D*Fd&>i=$k-P}&6 zR*ocU_Sdgp-K4fyhcNvGzSD~(R1j^!CT8I8{jyeDU#-Kk zH7n4aQF2DL4J=)w-IZJL%DP;MvCZQS^^&`zg$#-l4NQ)AL7K)kd^XIq*r(Obd8+S& z%xOw6yuk*`w3NK!c3%r{8GEl3>q z8u$3z554W*TCnd7_;rs}u%~_;P{R~IQ-@ZAH?H?neMWk{TNyWAIfK07U)ALKW44I{ z>~hBdIQN}B1f<;+Sm&B@QtCWjePFEx7FEDBWDv=kmDna*c-Z9f|lTvO&XC*Eh zBW1b$fwjub3NGNLJ?H$bt&ziWQVnq368y8bKtFju5VBNiW~(@GnKWz4Q@TtTQY_X%Tlyy&tP8&Q^oHh*X>b*h2CK%`cx9sxfF^ z6|hXQ&&rXv_1Dg_k%|VRmvwQ)Mx79tE!T`vJfD>L zfO3V%LPT58i)}$r3MCs1!W+`@uP-s|j$nhVK|_ZRM1R_RzlPmlqV+m1*S^W0w#(g~ zI^?fx^w^Ofe2`3!tAxkqxlW)=;@^+mK>Zt?xfWRhC!HZkD#(4w!5ekhxNBM)$r^h3 z)n7{C(EMVzbLBl&6J*M!MhPT>A}O1gkC5wg0ckin00T%8l%N7T# zLPYRtD-51_+vPrzX`&J%u4xwPu_-=W`U1Hd;+~(0*?x|ouiMH2x}Jl0yG z_j{V*SYd9LM>j&mvu_)m;s0D$TBq~@Z)j`Li=T&d*@$)YE~?9#@eY0U?jAQ4Gh3IgO$fzgg$V~2c1)nK4C4;=e#iS!}l8! zi{HF>qGft5{Y^G85=bAfztQ}9PxA1ATUuMiZ+w?NkcffEVnxernzx)%u*V6<@G zfexzvolwN0Ll+QvOhk7t^VXOO?vqVAu;Q>{AHNJeg0qykFVn@L z@Rv6h{u-A5)cX7~3x1{9h^*NL)dXg((7-BIORDjn2-4KffDYc4G1+973;)&T4cwPw zhyyULO7CQ*t(V{l61{?H^*7W)(rmyjTj#~W{BDrIChDZc8m$_Uyf^BXB|d-6TW!fU z%Ek#^wwpa!yP#Po;$)b#c$A^ND6O2~SB+~w2d>f%9PS16H-};d>ZO*fth!OdC9qur znn@0eGei8AT@RrA!tp6In<{%@4mDQonmK9HKg*ZK?z_Jf$Ke(~2UpFlf_uF^V|oVU z$tI%V`ApShx0hBtC+Fq&QulIwAGlS|*gMEVz*zB^IOxj+k#~Ta_?t)wBY+H28~qBJ zwG`2#gAmTR$Sh^R2q^^t_t$~aX3G5j(zLz}OMFob1|Tb=?Vm>L7Ht5rlIbh=h6_}x z_>~+3UWj-^$wzenX9%H)NB?J@f28(&yRhTt!EpQCnjqN-{C0K2&J)KYeYD;+)EuO5 z(vEB^_wx7!obQwFS7zU@sAKd&7RF33SekCjw3>#V?(*v6e0lZ?7D+7RL!VI`awXS3 zkb$r>LD^PXNj$NMk+l_&EVnTeVsv0^B= zlt6}B8vTi|scc2vKa{59KGj1pXFB~ZYIQ!0JP3Az(ZiSv$g}Z94)Z_7?!}n z8RlhD(f09x{DoBP2}8&{-2>Nx)h+c9mF#fJ1JACo4OG4j+8g7&ixnk!jMd$m2z#LZ zDAZvWh_fx2IIBDKoxlwZn@%a8Ln5pKj;;N{@HL81>Q3L0uDh;W$mB6L|;k%td`|y6>HLp^}HHN*GV7O)ZAP z@8fhCgkP6o)?YYJjt(?B2#+~y(f{z0k#Ta*e8dI%*Nm4M7R)v=7rD-Tz1~}YR4?DJ zS@A4tjT-KxpXx0jM3{#HT@o}1a9wOEC})lCf_|zws2#3OVD#|m)uzehNzeWW?PqCP zS)tO(5WhM;tWtfKb4x-NSK`TV6#?)4^@c{HT3Pyg@dAc)#iCvJR*}tb3jst6T{Gyd zV5b;}fBn_xrh|IKrD~OdQHa4CzCXKCc@~j%vT-tgu4lPPT}5`cShuHxPG($WPcrLE zXvdY^@Fw{aTQh#@10TaInxuY?1oQ8%nRr;lHj#b_hLhZkZpguq%Yzg;)8DtjHagA^ z?2sMOzy5(A@B#M|wG^=ac=yp7F?vz>%I;z1_(?F%Ww>Tm?EIz8u0Az{o)w=0!W+Q* ze#*q)+i+4UaN?^A@)5|iTzF7QnMf)9S4_Ro8coNMuy;AzHS8A!l6TUm%>DRtt*nU% zq3}elhK3sUw^?xq@rU(x1+*vmZ7uJ)H!vh?G|=P>=2w1oiHD|hZ&S!`mi zSDEcxRLL4jc?)eIbW(?>4bx-yCAkh4Hr_j$ZscSdqan#@ONx-R?>i6W`$}%7gFV`M zyMt9KAKt@hu0n_6&yFC>?V0DGlFbC%dirmg6O&UC4xCY+l*i3o%mJqiwiejHQDv_a^X=h4W~|CxM!?;pjO-4K`4D5>e_?`jqHpM z98;k7mReXo_CaTmf&xqj>7)_EilN&uO@*}dK8))vcN|cq>`(;_5>M12LkHQYc7F<| zlV_rh5y;`3-IPqTX{0q zBNT4`T3S)(Si1Be)Si4^g}zS=Zc%h9^bx!H6BokmB5Kp!mP)mE;Wdf(64D!zEhI@1 znKd7Jg=$~fNE8hVW~a9V2c%1+FS^)RfTZhO?oY8O)$9#1D%f$*3l-EG=pJ}K;&#gE zX_KUFpIAh8c5a`bPXwKGKPQ8)IW)9*JrH32?i@w)s49a+b&A{K@9oAcRwvA2ZQ(hG z6-dBN`~lI0>ODVt#MwX)()xAb9yvpy;P*c?HC2C18!9rze@b7}B&WZ5m_FO5eS)X( z#*YWp29ei0UG1B-4K*_dE=R~~u6|M))!(2hh9{-%vu!@v*Lsxt0H4fWY7*_jz@2(V zlKUG<)UXdCPyBpJo_`1Nu@xP_R&?7>VGqRs_LT#NT%P;3FP9?i$4{T+0wF#U+4MaB z+osJHn)Ckn;Y}+r8ue{Ii2FWqE5$wE1!p9zcSa!EAa#k{USr- zMS3|9{6Q;dc4`iUZjQcF&kN}Z+b2A)cifgRU*jjDNfwArufEjmJ|(CZ5%$kd<7Cuy z)0iFaX!=x7C??ga2pGn6gNJc99TDU!3~a0-JmJeVnD+N}pF|?)S7T{5AGF+y^jaJJTKcQLnoFJJzdGYygkRNn)cxA%)PXj@hl0CP+ zLbH}Vhl&lu8xN=igkGVJBLN!J)MX2;3{KED%2VGbZsTX8j>Xcb9p_TLG>Pd(nkUur zVwY=@X?TU6P8QZqJ!ezNTCB-PpOwg%N*(SlN{iu?cbbkOXq1W*v1%NyMDESs&QY(N z>X|;va+Q}CJB~WJw7yL8R5ZHhDr}!F%jL+XT|BNqw41d}RXSeyhhtZPXSEMg6e60g z0%p#bsv+b%Vz)gkXti`lY+ftoKNpR3jHlWN^r*xd$+E24y*Q2yFkVQ36Ad zaz^sH99_Bpw~>BP9KwqUwImxU$TKK zVl}6CfIYn`e&JeKjMgwSzBvr673*a;FwDU)j30C3UVS;-U2Qb|NNb1UtT0Z8R@B#r zz$k?iJs@`lbLjy7!34ob$kB^N`g9L&^SIU8M#H!BQFq~D;DFzoM>=9&wJukThcK-2 zTTCZyVQEbd{EpC^?DU1NFFr)VPBT% z&Ym>9P4F1h6s|0q=xDgas$SjQW>&(m!9JMl{KU%Ny#NUHBAUIbo-ez$`-Z$XJ`fTN z&Ob?~2f?)}4uNs<>@{ROcNrDm8y3iQpmf$B%ml|zzx?%3+i=k=4s;7?RBegPx%fKsa^$L}Lq&moSwpC@QhQ#4m&ENzLNl6uC{~Ww#_j%Icn{`2@vz;~> zh9}7%zO9rw6D}^)nf){=s9s}v%t)p<*z4ETAC19mk_7w;Y2Yf6Q&`vSXMdDY5LrM_ z%I-3;d@uEZK>6m$3v|0iI9z&uj5>Pa=S#XZBd~r@BhwkfBW3sZHA~tyeag(9>0dG! z{JC`aECi31JkvP)&9QOiomLhN zF+Riih>4a*TFLDdF7>=*!z24g5hd~vEl!H%<$R@u*RiHfUBpqD+O0UZ<;_w)s0=P@r*ti)BlL0;b7U@ds#@YueK}Q4;wG6LiAm)THKLS3&5CsV6Wc0!Qmv%i*v7G5(G7uD=)Zn`%!T)mkPoKZ0Crb12c#oRp7+JAZWq+;_I zJN4t^P4@&>B$}tEUIqzrRu@;NEX@uS>Qb3O`wBy+*Dtk_)jTw_qEp&GEG}s8AGZrO zYCahrVD&ghb=L8zdB^d3amMCskFL}hJ$!iz`5v^rO9O<561bsf$Ld>o`_6AgBD(lI z7z4F(n!7s?`1i)+H?wS##1|i%lo|s$WI?N7Gi8bJvvlA!qilO>4-1k( z3n-z9>pdbl7|p+!{6M%)FOQ`&Rah_o$dmJQ*-6f1UujKZSb9qLgO-g4?0@`Fmmx?H)3$rAy%tsc<&?{>589eES#Rno1jms|8%Xw9D+ zh#v1QC)6iwFD^fmG<^X#DVA;o@Kfqe4EE^4r;fs*h+iHkgB=Jy{8?}66$w?z2aC%# zkf8XXXjJ-;>8#)~ySO9&+{k^o;ez35meniw+XZbT8PkhzhO~QhnAXcXbw3}7ol@sL zqFPq0H9r72qx;Ed(4Nk?QK>CzxP}Lqz-=W-&M+e{6BWTXmdnp3^Hy^k8*VX5?TOC4 zSvprT>!wma`{d=#5G|TUhj$PK-KnNp-n~#IcE#f!Vc*cHsjI<~yQ1Hz+$=&91OrFM z%a?50imUN8#6_H^>x4=f%0i`kE`96Xo-wI;x{}YNVI!jiQwd6i3z(3L>`bov-weh# zPgfI8r)TK*Y9HS|D>yO`7{p2UDLlK}Cu6dOluq`R0;_Xp@V9W|S5}4&xbOqVl)%b= zFf#4*3}l6|yFp@Og_U6{^# zCd|0qu(`NF?j|eLKGJQ^N?CG1cF?k0cq+*uO`}Yonq@Y6$HaLb!1A?7^PG7Qds^Wb zcaYGAJ@X5ifoCT_410#>{CC~Qyx8cU7;C)^N@*u~hAi?v8NLdj zloiauSd#a9u;y=?LoVRbHau1OC?$Ih_(g)=2ai@l>tPiu9NmK5xQvp~fpkLn`{E%K zJzu0{MmPiC*I-a=``K8S36AOQbGzAT{aUt!Kr#}%Dr#|Cy5}7JkiqUkvBP-g-2mu<|rvo!hyX=3xKqm zVs0Scl{mPTA@`l!F)^SuoHIZv@1q<{GZyadPG%OQ;F4*W4Uks>$HX>@kj<6V~f8o{M;b7wEkDi&K zFD7|6A8I8&j)@5zRgofH(>Xj$rsj4QnlMr{izAqw20&tN)>2%3FTZ7WDRI7RTozxW z*PF7l1;taPV8$Hs;d3qHBD!Zs__&$cA)xop4$S2o?~bio|Lm39j~6B`+q<&L@5c=* zR*#_UShN;~KFq|qrY=>Su`7eoa6gQyBgFLp*0}5jjTrnoUtA4vW_=!%(PXEW>S^#g z_}0t0w?8u^9458PTa#XXC#03$mLwdnmxTTcpRQ7grN2n?Pk~hEhzC6jHP_a9(+K?FG$;C5>y7&wP(M2 z5r<8%Lt-pS?QZxIqSiMPit9({0}o5b9iUl0l^l>Xzj5&mXZ`e3rhZ-&6#tNL)(SnM z5za=^SJ5ksfE{7ImA?HpF+}?`ktusAE_=*Bhz8hGl=|)04r*4}AB-HsQa4mV2K~IF zcwU{iVQTt(@57fx_19@+4mVms?5|i6IW}V!U43hTf<3o=y0e?SY>r(dl!RiBZ$#i0pz35$bN{hD|j}rUMCDs>m9DCTQd<&`iu1%!ZbV?~-4#riZ8W zL~&MT*w>`Tl%EPuPZ^amG@ZWF4pag71PYXts8H(Y;LJ7C4T>8ctr-*1V5Fe@5}4t{ zPDQ%MCX;m#{vM~&;vi=|Z=_*%R{0$*6+kU6Q()ljDBJk9`hxHqXl&1x+H~K^u8Co> zm~nB~aL|=(@mW|`W9~|~{|2Gd^mdhIXnTJz(Xp48%D~RHINUuhzW0z*N3z7CooY)L zXKrH8CH$Dtx7z16H`J+*_UrwWUs88#mLxzgDSl+ZKu`&qdjn(^nB=674yDWTHR&Q@(d(*-i=jvfFAqBR{k8$YpN}Ca3 zaL7~KaZuH2wXIremos4dOvpHc8;O1!4UPJXZ&K%lX%YpZB>6S)&@0?Xv}m%5?d`Cb zU&K6WXuX6`Z;}1|Zvu;ZSz?+NRIqG_u0g5m3%QXW83HQgbKf9Cy!R7ar+vd6uweYC z9~A1y|0dFIatos=BfIA&p-EThNBfKzZCifoc~xt#Vx@4oX)nj8>?IY?+)$0nOU0y` z)xrhA2$Shnk|yuM7q{eT6+z<&@}Va*vH-GupxPW9xqMXbgOb}}mAYF`h{p+*MDLe2 z&VDe6DROIJ;Qa#9u#bxEhS?;$z_E3Ha-Ot9#O%H|vAgAFkOs=~x`s{a>r8W9I0FbALFKFb`W_b8RGmGsXtR8BHcvQlD zG_<2QJGsjW8y^`A5=BqTO1Kw-guQ}M}T zwRxNrkwIYqXfOe?ygor?3_c(l)U3tA;Lt{Aodb8Y=*7Jj9j`5--f2hmNbm4!hk4?d z$^k?z|!cr(YUSWN zcp&hkrM}B(JL}|VS6PYL)8T?&uBm+wpnHWh9|eqbKJ?HUg&rdcjj8<~DKsY|9E-&Q`Hh=t$BWO;{aA`<1T;5^fNHBcDu&;`O+0&;UN=3q-jESuKFAg= z#|jCT-uOza*`k`XgDdBpGP>q7kh`~j8oiJnBgjST!%adcsXg(zX^WMCZ^&w2eX4n6 zChZ|AU#)T&aNoS}?yCOHn;PuH+Dqz;^p)iMM$1<;pqe85Nz=>2t6@FQjiLAXV$pLy zO_)OJQ5c?+kiPn|Hz8d*)uJrj&P{Ui=>67jB#`ChHXOY_vpsLub^GJerjslrE5lxX zDzWlrJ0U!tVx=$HWwPW1?guC$>?8hB#3{!YH)8Il*m>Oy_wSEDiBf_*p#yF@ffA(? zpTBIe`pRIga#kh+&=jPs_!?S2DUI)u?yCIIqMW|aVs{nD6oE%+y4lV*SMeKTx}E&F zocs?tP45xIPwqRhn*N56KD$$;MtuAw$+fMtlu%q_%`)tr6=M)(32 z1e@`uNGzgBc!dH(={)xAb|KB>WZ#o-Fouw65#c( z5?8e~kE!RyE@kUx?aN-8?&Z=h59>ixAr6xuzTBg5dBETVaa(lMy!weOj_k!|(o6iR zcQ(ZRnFjY_s`mQH^M9*aE-C|+47K5M{iPTG;4P*>5lG4`Ni6n3m^=3|r08x3$4VL3 z8lLyq2de&@94oyUd*AWdxQ36NKhf z;;hhXZ0Nyj(5+rrOl97GquZLIO=sdS8PD#yCH^mVU_k8fXJa3Yg z;n~oz@>|fCtocxvB6M5)cSEt&-MQ=au}2pkO8-Qom95aQk#MYKtd!N6-RuzM_pY35 zN;?%V-5g=;xoJPi#Wy5-`I6Hnp55;fmn}=?MaCjzM6-YjSN*tr{b?+b3uaZH7aGEF zIX&nL0*={%!Omy(*yrPtLS|}jsZ+<01p9H@{q@Wy7po0KP^$La}8DbyUy2P-j__@$2NwC$eisd)K!lE%)ga zeiV4(L-oim1WM-~kPqoB4Nj%HiDM$U|GSc`%zYDn?2Ksv6ez#fzswF!|Ie`Yd(wlm z(yw4bQO=H+gA(VsL|yutSj~o9zijg>JO~vB(3rz95$R_W%vN~2n6%b*C+b{oM^l@g;w6* zUG1IOW(Un5!uKo_sYTA|fW5Dl43BeGl8puYCy3si`Gbf3jR9 zDIVxGJae9`=;{ok(<_salrsRQwhgO)N|BmP%kkDXdy3U^tZ=SO1egHC=~GV6xS)W- z$tf5NEiakbb`T%1eg8cGxXUls_`%Gx$o=v{$JF%^gn1@=G7tbl?=Dow45Yc2?+95I z;Iz{y2ebEnGE{bk+|MbRirI1$ys?lO*;k>6^UOi~>14dNsrF+`@cBMqkAy9ShNq;cGzz3=~IstPTf z!6NNvwFKKKFosE0-a}~ucL32KRmVt!;0&%4C}v@ZsVNT;Zu>P^=#5|}^*e+AaVQ~D zKI`C89SMLkUmHwyPBtsEwDh@Qg8TBXeaJ-+^4&==wh-K#38MJO(?Es6_YFyMS zHwu=E z!D(YZz8!_Nm9Vfjr%|(Zm+swulnMOxBql~P;${Rs@KtcwvIK;xKHH$i$yI5;PBttI z@+GQK$I7U-(%&0th%X|X@PKZj3HP!AE5YG8@w$yrqrP%Ht6pOIqLsM&Oscbg1g?Jx zijG9{gFPw}?IyMDd(ynYsMW0BJNRR@W=q2bTR`vhgYGD+ia_9 z^^Znv+VtX{4=1m@=7Xp&)~rlRtr*Y^G~B1j@9%(N2~`Uyrg7MA#}@1gX-EpXzW(fY ziEA_EL?fr#Hy;4BMMt@1pQ07q0|$@QO*9{3#V^jDHAX)r%RqEWBj?Lu8eQ261WeTY zlV;5++arQ9`FoFz{ljV%mxaYDZ{tI)Bqa|5*4y;fsIXtRYj5CtV}Q*bt@XQsU2VLF znpQ3#UXG1+hEjQ;B5>_*fWXBHqZ?bZDz(;E35JL+|+of^MaPmoH&VTS2 zgBs`gj_GgL#0RcQd5c`PG2!^>HSt;vb#NAlOy^%4{R<^`UN0n1vDH`Te z!z*B%F6~98_f%(2;eI$3tpE1=kJvwax9wo!$~|){O>lkWt_;@}nto3SKAyFw{QC}g z?g=AlpN5~oFJNr$+9g|6(LKu>6dc!wSO0r;TeaXG;{e0z-zzzB8M}vf-k^K^JOBMI z-%C_fG6@{^>uWoKEpA!PQH}4v-joKLu2k7~V4YOt9cUlAv%AcWqHX#2ODMnO-0|5E zFFR9u;rI3$D{ZouR{fN1{_6Pse}BvR{W>?{E}l4n(BBtSq*LxH_8NxK#J_+3*FV}p zOIcjc&|KsA|LB0_%FJwgt*PhMpY-pqGd9>n zLz_AB-fZ{p@Alnvx%Bcd4k~zWyH=8sxCMf+dce{V!WVYn2N-7|3B51LM&tpSCAmdOK|mDZY46?Dw<({f|A*;GP5j z93pO2iro{qrQytf+luepz)PH$pd9V@@}jx1L*?L}&iDJb|M*A6A-F5duVbB-4QGAX znZlN`)+=bz;`Ln4T*WYEc6WxXHWy`|8)%ORz#KQMmNz*W;dY{nESTuQk6Sbb6UQ$aK zpNf!1iIe96DPi=hjj!&=LdUj=gXmj79s=3QE5x`?yTJgAcf4n01hG$-W}{-OwSL+x z1BdR2s^!(M;<2Xv@RtU;xEArQ0q^U<*ZG^wYQL z7-nTR!a9yA7=I2l9a%VJG7tZiqmQ1{pRYWGc8UpDNJd6+ovgR818A$N?c|Rw_sEeD zu}JG1M=qwLH$PxaEgNeQvbE~%M3nH|-(nDk?j-}Io9gYpSYRK6t?DW!)`GphDp5Z0 z7!@1)Mz$NBJkrMf)OoF)?fseT=QqLM$3KjoAwnt7^SWn!Av2< zUt2F$Y_6`@tnWg{q*Ek4>QvzDCb*>$ZT9mbA19Z(E$63UlNt^HdvB~V#r6{0UoY%T zbPtr9pGJ>iN4Izun&%}L>n$FJXApXmo3V|=zN(5I zvoe-mMq|Gj8&jV+x%tHR;tM_%HJdE5OY&;3NBzl=KrAVeh*gn0fy( zq4oV8e1)9|sd=Su80Dm`f!KMn5$NJc3b6xi%SzBYj608SLEobS&mgmlWr9V$wl&AJ zF!kORg|QYD^+uMQ*MH?AEOa zX#&Q0^F_Ap%GjeV7}~z49!*CVKUgES=LoPN{Bb1f6e7AeNZ;XsLi6L;Mj|of`vRkM zb@?w|?6*CSfa`p>6#{K@>?vHQwLOsvK1j!bYGmvQW5`c1=i<(SXx;Llc71;rk6@y; z-s_7qMmbfzip}!Ym->@BovUxH(fX?6-;v z8mk*yttyFl^F(IN9RH`s@4Ka2;bjud(QSjB>v1 zKx~Wq0_fsi3bEO`-GL3cq$`Rox5NL%5#Sk%r)+fpA@-kRlzt&xqJVAgW?ti4SB*Dj z0d1e_Q@Ac-qdB5zi+)9M$ZdjDMCUnvH}c;gP;;0>T(n|9^AzNDvt zF5V+dA@)K`31(Zh7&xF;{r@87{{tc>Cyk9AhE#jUyTdrl3+~{_gc=s!F!7s6j`Ik=73`{JIm{Gwmsw+T7 zo-;Ud28zOYpbqB!`&q5WfX_|QZ~}140}T?))r?$*a(ekJd0u^QK*mL0z%RVzRCEVi zx9hcUGg&2L0VU#0GXXLIYLn84o34(|geeT-4r71BGh+nADE?2vUFwokp+^w+w zmC$-JWce9uXJJljX(B@1C=k3CheFE%U>Gj(>S|2H&;PEM(i|?269ES>~Lv?WT#d zVH;LB-!GqpSu5C4(GkRstYx!KAZ9~Q3+`&m z-4^8ethPBrdtgUbQ`m){Zyxm~K})Fcf`i|$w%EPr+nD){PKMdX84w9k-iDCkOtdAE zofS%y<8+&nc6yHzvfTfYJi}j8I|N~v+9UueKIo_!ksW36E7mV*7ilcyFnxKI=6u~Z z|Mvd6Z`;R_sNx3=9seNL8>F8(h=dc?=Vk-2V~dtraU5G@2z&vOoWYw;Sl>G9(@=f2 zq-IrDlu6E~Vc|0imuT1Z&hZwcA`8a6+mT|>1YxBuNsYW7=sZL1_tRl|*Sdhn!_*tG zC9w4*COI2)jMYz^>`^(<>C|c zHBBDK@OY5n)d6E^tH4lZJfNZ@a@{F=iFQL2GZedX4yRL$Jxk%7!))x##s{U%^3<~g znRuy{fu}Yr%YMJVA)Zm5H)}&YbI{$#N|&qooSTq2q{D&~)KpmjxVx;_0( zJZVnM-kXnSY(I~&FT^8jHdn($A*1(j>cnJRpx8DPcjJ4-``M>=Sv1F>z}9{QQU=_? z=e(i7pS#sGi{T=D!;{&*s6D|a5C3JL%yiEgCD0uul+#fR?87*8iMyuQvt}jsuApOK}nq8nLq1-l$nP6k6DO<6|gI{EQs-u zS}W+K1`RkM>NXL;l9ZzcQ#dgFswCJ1#y4!&;sLZf)P7p0yDhf7ImZ>WA^vNXrcZ;F zVTOA#W)o=^r6QZi3l8+0X<{z2!4iP^K`<;sL1s1K> ze~x_MoiuDEyszPlx=8LEYDB03%wY=S~l*Ltv_xt4h6jG zV?=0;3yf@tC(lk1K9?6tS&FBT4Mv~j#VXWu;=D^G?4edZw2F`m>8j=YY@u&F+T!Mh zC>Tssn=+tsMkl|BbG(1{O3V23jMyv|$1CM<6lQUCySfqlF@IFvNa(NXtQD1Sg!XKKD+3zTOva{igw6yJJk;F-s4J zI1Nz~XnoR;b2x2j{m3Z~-(?r%T>qp>;k;dsTbVagXEc_P)*&?0_U`K8HCN zaBd*PQEq$VU6rZSs3>Gm-{9cW8b5LR)cv@%pGr>27oFRbK1opN2VRGcBL&=bOc=;Vh!W z(xm2>4ulNI=8A7gt{PXTVX9o*RX;%5J0i3FV;z;~C7b7blO^*oNipJC@cz@gG{Z&l z0|rEoOE$~OKYcoumw7ncXP+N$m2yYo)rGT{nAX7An=y8x_3Bu?5ew7 ztw1Jd7>!QxHV=5?Xr@XFp+oM4x2UPH^%gYO+MW#UiP(z>twqZvOe0kQEtK!Nf6*}N zV8|TQgYXAC#0L5m*`@?zH`G8d;^o42o-pK&X(jJgsIJn1*+xJ4n{wmlcb&jM7etQoNjp zoiw@CcgY*#HR~I^TM5K$hz)sLnn@yE`oG8FHZsAQs-Y`vE&qNgPuFX?Kew?LL2rt~ zd?(Zr8axlGXmeF1`aD$d6*5}x*?l2mX+GHIn%wDRC5H_k65WH@PrbDMY(ORG#{COb zq>n?Ty;I`T<&O7td;gry$KMu6b{EA9hgFqCUf>9jDF3{W<+gX;VIS2QBl56zEp!_h z(c`J(+fwOKv}!PVXTagf67xJ?UD$G0X}3#L!LTUIC0tFMgK(9a*Vz17P;3g_maU5q zTZ{C^O85OSo|0LR5!(^lQSXMwO%=1u%9n5p?@yMFb@^F&YygJoa5dmikb+I*=(-bq z>=H(mchnLT6J+xtVA<_dGT&5MTG$fvs7><6>w@-A+e|n|h>g!Km-Pis>)RZSD!mj_ zEjB#bZWg?Bij^x|w!Lp!Lf7$Xk+Yli&Zd`Ez7oR4ErDk8l*QSU;w<+~d$+rn)YOlD zAU~a-=guyiqXp31bujV((YM@7u0pi$JXG*t-7|=DX>L&NuGgmh#Ufj&{nqU zj$W4zN71RXe}(F`(w!I4_@g<2SL&rdOi3s05#W?c>Bo#^}bU!lxV_j0+#pDFvuJ4qej&UJvL(A0e{W8CcE0dM> zm6z?J$4%9>ZSL6BX%=i>tVShc@aHRoY%E@x-{tCV0B;%86?;bhlEqon;fik&r4~pA z7FY%rP=Sl>vxEbVIkgb`)A~Hu%)@mxS`EO$roV6lM@BQ4Sb^u$7&Q)KfuixKO z?K~qj5^YGBvD8v`!Q&^(`{`$vop%~j3~*EPC(}Ckag92RJ@el` zJ{IY30sp1Qqwb)u>6ly_IwDN{QINY(TJGgW#@=X?*o9WP@+c7`KYfJac zc{X_bA0iBk%xI8bVbl;a3Jm(5OT;JGe%K}pGL^J?|8@`b0fV?xG)UF*z(;exr7F`S zNtSiiLgM+hGj^bAac0!jT(_dMJWx9>D#&=j0$G}Onm)sKT2^GFaTZQ!tRT~#l zMrdwj1vaIQLyhSX*m4>d;EftIqtbq z&YLGlT{-pP>~3MhBmK}%J0&B{(yI(JuJWqB6s93Faa>f{v38SEiMONVe2rjs_`!nw ze8qjlS1-1|b4SBYBbO$Z_H>h=MQrWtg4csEUqg@4yoUn`r6^jWkAo@N3cZAr=0{rM zG0Dl+??PUw^cp$_#=WWG)~g+}p>T{L`#8M1XP9dJ7_(unX7|KR28x|lK^^__H8T}2 zsIV>KbWFOR+d&iZXRFZmwJgzY$Lr^3 z8=dS`!@oG)lzBQMVqFF;D1~HyvOwqP*k>zQqQmukJMzTO98kWw(B4|E_2iL?^Pq)G zIS1b*iN(Q$el6{#X%7RE0$Z=ac>jpqo6kSq2GnMr{Lup-9n$BAo`ncS)4vE4;s3D5 zw_Mc57ys-}40Hc06Q?}p3gb|&d*w;stc^82>!D)c$hcG8c+tKWZ=xtc_XC0Hg7n5U zXYOs-uv1mx+!>8Nx6gSt=ORFKy@-{qxF)stNX>|1iSmZpZjaU_gVqx7f)a0>1tG4r zS*TS^9S#9RX&j!;-rL|?hpczEtLgICK44z5Gm5>+uZOG5!NYpZFKsrX5}DV|6&QFG z@}BsstK@7fY9;H_z$eH1{ux_sbbe6q_xB%t4 zlb3i_{4}+f^fuA&%Zy5-qao;p#0-dg*1bq}GI?EBmY8)k8TECkwmCg|7m{S1`wJSsBeA{-lR&zC9qqHC;M zSepM=;(dm`@e4i;J;{o&rP2&wmmCdmKC>1D0rq}@v3phBrTrAeWa+Ea`JIVNbl>;f z^lgo_T+Cde#arLGLM+nh19)GTuPrSnXZ3!YZ)r9MiNM5lNZPEusJG|?^PoG$q6nT{FXk=gu1wNB= z5b{aTG-ty@e$&hNn!`0wSpm`&@Rfxu)qQ$*0xT|r8Tfvm zOWr?Z0tF|Kq?L68vzQu0&{MbRY-hTwMnmiW?pR})$9!ZE1hnH5vrDHG6*!?UzsZIp zX;Jgtc>G}!;AMFv-Dk`ckl~uR@7pdtL|#L^ZFwk;i-#ELO5o#N;3a=2!WVtqxNerQ z@ncbqjU~@7V5iX72EF++9n+I~>n~k9xN@H|vvP?>XE=GD5LST)7+->z9mQ9wus|`A zDuc}R;~Ob}JuNLlkP^q6K^a8Pjl9xkQUyvLBghCbXfN|{QGlWt-9~775Q#$LJhSzV z)JV@TLMQgZ#OTG8Z^y@N2%!rj(t&d+57fCw z5zZRvsuV(~V^4dEo(6YU1ntLk{9bPK*FD0r zkIYgG9E^PO@RQWD2~LcOjGw_C-95*FB#9;^nVIgaWdeU|lLo%qygglrnd=xWeBP}+ zf5rSpT)?nD=(VU_TnbRTP*ly#bs?LQVQV#{wG)!5AE`Z(W*ew3WQLFhd5Uk zszVK-HZ*a!kUswq!o7!fIe)X}DeDb7P1)obb;4rPd8w?`?p3l8=sqT|sW8G3(gI)2 z{Zfj)eSS;2>$^_C3Gt;%_+8Eyl(79I3?&omdivv@ljr6m!LYG$&!jV* zO0R_N#7^N=_<+FIcI)W73RNK8HmAPXXdvU;ojETgKH9Zf0-!(`!|9YSzJM4o&F#ag zmisxAW5MN@jw^$0nfqz+o9%x~t^eU44Hv=g6tQ4kmq-7@MSn{@GlWnoE82&BBSjGY z_p3#bZ~s777E$W&2mIH6w1cf}VVfm~@$~<8bsg-{@dPbqM7;m)*}iiiH`=2!9{#@( zaxTaq2V-EqKD#0mk)Yts6HLZvFe8P@XfPuu#?jP_lrKjM4Wv?Uw9ptWG>}4Iw9r5f qfzd((DFj9f4Wtl2$}+%~#(#bf$%oR#|5zOvfWXt$&t;ucLK6TYI) literal 0 HcmV?d00001 diff --git a/docs/runtime/api.md b/docs/runtime/api.md index 5932792..3ff9ff0 100644 --- a/docs/runtime/api.md +++ b/docs/runtime/api.md @@ -1 +1,35 @@ # API + +## Runtime Layered Architecture + +Here is a figure of runtime layered architecture. + +![Layered Architecture](api-layered-arch.png) + +There are three parts - Frontend, Core and Backend. Core works with Frontend and Backend API. Frontend gets user inputs(neural networks models) and Backend does the actual computation. + +## Frontend API + +Frontend API is about from creation/loading the model and + +Runtime supports two (frontend) APIs - NN API and NNFW API. + +### NN API + +NN API stands for Android Neural Networks API. It is part of Android Open Source Project and we provide a binding between NN API and One Runtime. + +For usage, refer to [Howto : NN API](../howto/how-to-use-nnapi-binding.md). + +### NNFW API + +NNFW API is ONE's own API. It supports loading models from NN Packages. As it is our own API, It can do most of functionalities that One Runtime offers. Representatively, it provides functions for execution with multiple backends. + +For usage, refer to [Howto : NNFW API](../howto/how-to-use-nnfw-api.md). + +## Backend API + +Backend API is defined by One Runtime. + +Backend API is about actual computation of operations and memory management for operands. In order to allow different kinds of computation units or computation libraries, One Runtime defines Backend API to support user defined operation kernels and memory manager. It contains a lot of C++ headers which are subject to change. + +For detailed descriptions, refer to [Backend API](../runtime/backend-api.md). diff --git a/docs/runtime/core.md b/docs/runtime/core.md index 42ba75f..64a6c62 100644 --- a/docs/runtime/core.md +++ b/docs/runtime/core.md @@ -68,7 +68,7 @@ Let's say we have some functions written in a certain programming language. Then With generated tensors and kernels, the compiler creates executor objects. There are 3 types of executors are supported - Linear, Dataflow, and Parallel. Linear executor is the default executor and Dataflow Executor and Parallel Executor are experimental. -For more about executors, please refer to [Executors](./executors.md) document. +For more about executors, please refer to [Executors](executors.md) document. ### Module `exec` @@ -83,4 +83,4 @@ For more about executors, please refer to [Executors](./executors.md) document. Backends are plugins and they are loaded dynamically(via `dlopen`). So this module is a set of interface classes for backend implementation. `compiler` can compile with a variety of backends without knowing specific backend implementation. -Backend interface classes are mostly about memory management and kernel generation. For more, please refer to [Backend API](./backend-api.md) document. +Backend interface classes are mostly about memory management and kernel generation. For more, please refer to [Backend API](backend-api.md) document. diff --git a/docs/runtime/heterogeneous-execution.md b/docs/runtime/heterogeneous-execution.md index dc39dae..e7a5e27 100644 --- a/docs/runtime/heterogeneous-execution.md +++ b/docs/runtime/heterogeneous-execution.md @@ -12,11 +12,11 @@ Here is another case. Let's say we have a model that is not sequential so there ![Add-3Conv model](heterogeneous-execution-add-3-conv-model.png) -Say we have 3 backends that are based on CPU, GPU and NPU(Neural Processing Unit) respectively. After executing Add, 3 Conv2D operations are ready to run. We may utilize those backends with [Parallel Executor (experimental)](./executors.md#parallel-executor-experimental). For this case we may get performance gain regardless of kernels' speed as those are run in parallel independently. +Say we have 3 backends that are based on CPU, GPU and NPU(Neural Processing Unit) respectively. After executing Add, 3 Conv2D operations are ready to run. We may utilize those backends with [Parallel Executor (experimental)](executors.md#parallel-executor-experimental). For this case we may get performance gain regardless of kernels' speed as those are run in parallel independently. ## Graph Transformation -Unfortunately it is not that simple to get performance gain. As each backend has its own memory management module, a copy must be done between backend boundaries. Plus, it may require layout changes so "Permute" operations are added from `PermutationInsertionPass`. This process is done from [Lowering](./core.md#1-lowering) phase of compilation. +Unfortunately it is not that simple to get performance gain. As each backend has its own memory management module, a copy must be done between backend boundaries. Plus, it may require layout changes so "Permute" operations are added from `PermutationInsertionPass`. This process is done from [Lowering](core.md#1-lowering) phase of compilation. Here is an example of that. Let's say we have assigned different backends for Add and Conv2D. So a Permute operation is inserted between them. diff --git a/infra/cmake/packages/ARMComputeSourceConfig.cmake b/infra/cmake/packages/ARMComputeSourceConfig.cmake index 51a235a..adec1f9 100644 --- a/infra/cmake/packages/ARMComputeSourceConfig.cmake +++ b/infra/cmake/packages/ARMComputeSourceConfig.cmake @@ -8,7 +8,7 @@ function(_ARMComputeSource_import) nnas_include(OptionTools) envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com") - set(ARMCOMPUTE_URL ${EXTERNAL_DOWNLOAD_SERVER}/ARM-software/ComputeLibrary/archive/v19.11.1.tar.gz) + set(ARMCOMPUTE_URL ${EXTERNAL_DOWNLOAD_SERVER}/ARM-software/ComputeLibrary/archive/v20.05.tar.gz) ExternalSource_Download(ARMCOMPUTE ${ARMCOMPUTE_URL}) set(ARMComputeSource_DIR ${ARMCOMPUTE_SOURCE_DIR} PARENT_SCOPE) diff --git a/infra/cmake/packages/FarmhashSourceConfig.cmake b/infra/cmake/packages/FarmhashSourceConfig.cmake index 8a9a384..a19c8b9 100644 --- a/infra/cmake/packages/FarmhashSourceConfig.cmake +++ b/infra/cmake/packages/FarmhashSourceConfig.cmake @@ -9,7 +9,7 @@ function(_FarmhashSource_import) # NOTE TensorFlow 1.12 downloads farmhash from the following URL # TensorFlow 1.13.1 downloads farmhash from the following URL - # TensorFlow 2.3-rc0 downloads farmhash from the following URL + # TensorFlow 2.3.0 downloads farmhash from the following URL envoption(FARMHASH_1_12_URL https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz) ExternalSource_Download(FARMHASH ${FARMHASH_1_12_URL}) diff --git a/infra/cmake/packages/FlatBuffersConfig.cmake b/infra/cmake/packages/FlatBuffersConfig.cmake index ab0b770..da084e7 100644 --- a/infra/cmake/packages/FlatBuffersConfig.cmake +++ b/infra/cmake/packages/FlatBuffersConfig.cmake @@ -25,7 +25,8 @@ function(_FlatBuffers_build) BUILD_DIR ${CMAKE_BINARY_DIR}/externals/FLATBUFFERS/build INSTALL_DIR ${EXT_OVERLAY_DIR} BUILD_FLAGS ${ADDITIONAL_CXX_FLAGS} - IDENTIFIER "1.10-fix1" + IDENTIFIER "1.10-fix2" + EXTRA_OPTS "-DFLATBUFFERS_BUILD_TESTS:BOOL=OFF" PKG_NAME "FLATBUFFERS") endfunction(_FlatBuffers_build) diff --git a/infra/cmake/packages/HDF5Config.cmake b/infra/cmake/packages/HDF5Config.cmake index e282e0b..19803f1 100644 --- a/infra/cmake/packages/HDF5Config.cmake +++ b/infra/cmake/packages/HDF5Config.cmake @@ -27,6 +27,7 @@ _HDF5_build() find_path(HDF5_CONFIG_DIR "hdf5-config.cmake" PATHS ${EXT_OVERLAY_DIR} PATH_SUFFIXES + cmake share/cmake share/cmake/hdf5 cmake/hdf5 diff --git a/infra/cmake/packages/Pybind11Config.cmake b/infra/cmake/packages/Pybind11Config.cmake new file mode 100644 index 0000000..b6d5004 --- /dev/null +++ b/infra/cmake/packages/Pybind11Config.cmake @@ -0,0 +1,22 @@ +function(_Pybind11_import) + nnas_find_package(Pybind11Source QUIET) + + if(NOT Pybind11Source_FOUND) + set(Pybind11_FOUND FALSE PARENT_SCOPE) + return() + endif(NOT Pybind11Source_FOUND) + + nnas_include(ExternalBuildTools) + ExternalBuild_CMake(CMAKE_DIR ${Pybind11Source_DIR} + BUILD_DIR ${CMAKE_BINARY_DIR}/externals/PYBIND11/build + INSTALL_DIR ${EXT_OVERLAY_DIR} + IDENTIFIER "2.5.0" + PKG_NAME "PYBIND11" + EXTRA_OPTS "-DPYBIND11_TEST:BOOL=OFF") + + find_path(Pybind11_INCLUDE_DIRS NAMES pybind11.h PATHS ${EXT_OVERLAY_DIR} PATH_SUFFIXES include/pybind11) + + set(Pybind11_FOUND TRUE PARENT_SCOPE) +endfunction(_Pybind11_import) + +_Pybind11_import() diff --git a/infra/cmake/packages/Pybind11SourceConfig.cmake b/infra/cmake/packages/Pybind11SourceConfig.cmake new file mode 100644 index 0000000..76f51e4 --- /dev/null +++ b/infra/cmake/packages/Pybind11SourceConfig.cmake @@ -0,0 +1,18 @@ +function(_Pybind11Source_import) + if(NOT DOWNLOAD_PYBIND11) + set(Pybind11Source_FOUND FALSE PARENT_SCOPE) + return() + endif(NOT DOWNLOAD_PYBIND11) + + nnas_include(ExternalSourceTools) + nnas_include(OptionTools) + + envoption(PYBIND11_URL https://github.com/pybind/pybind11/archive/v2.5.0.tar.gz) + + ExternalSource_Download(PYBIND11 ${PYBIND11_URL}) + + set(Pybind11Source_DIR ${PYBIND11_SOURCE_DIR} PARENT_SCOPE) + set(Pybind11Source_FOUND TRUE PARENT_SCOPE) +endfunction(_Pybind11Source_import) + +_Pybind11Source_import() diff --git a/infra/cmake/packages/TensorFlowEigenSource-2.3.0/TensorFlowEigenSourceConfig.cmake b/infra/cmake/packages/TensorFlowEigenSource-2.3.0/TensorFlowEigenSourceConfig.cmake new file mode 100644 index 0000000..d50d045 --- /dev/null +++ b/infra/cmake/packages/TensorFlowEigenSource-2.3.0/TensorFlowEigenSourceConfig.cmake @@ -0,0 +1,21 @@ +function(_TensorFlowEigenSource_import) + if(NOT DOWNLOAD_EIGEN) + set(TensorFlowEigenSource_FOUND FALSE PARENT_SCOPE) + return() + endif(NOT DOWNLOAD_EIGEN) + + nnas_include(ExternalSourceTools) + nnas_include(OptionTools) + + # Exact version used by TensorFlow v2.3.0. + # See tensorflow/tensorflow/workspace.bzl. + envoption(EXTERNAL_DOWNLOAD_SERVER "https://gitlab.com") + envoption(TENSORFLOW_2_3_0_EIGEN_URL ${EXTERNAL_DOWNLOAD_SERVER}/libeigen/eigen/-/archive/386d809bde475c65b7940f290efe80e6a05878c4/eigen-386d809bde475c65b7940f290efe80e6a05878c4.tar.gz) + + ExternalSource_Download(EIGEN DIRNAME TENSORFLOW-2.3.0-EIGEN ${TENSORFLOW_2_3_0_EIGEN_URL}) + + set(TensorFlowEigenSource_DIR ${EIGEN_SOURCE_DIR} PARENT_SCOPE) + set(TensorFlowEigenSource_FOUND TRUE PARENT_SCOPE) +endfunction(_TensorFlowEigenSource_import) + +_TensorFlowEigenSource_import() diff --git a/infra/cmake/packages/TensorFlowEigenSource-2.3.0/TensorFlowEigenSourceConfigVersion.cmake b/infra/cmake/packages/TensorFlowEigenSource-2.3.0/TensorFlowEigenSourceConfigVersion.cmake new file mode 100644 index 0000000..04df5eb --- /dev/null +++ b/infra/cmake/packages/TensorFlowEigenSource-2.3.0/TensorFlowEigenSourceConfigVersion.cmake @@ -0,0 +1,10 @@ +set(PACKAGE_VERSION "2.3.0") +set(PACKAGE_VERSION_EXACT FALSE) +set(PACKAGE_VERSION_COMPATIBLE FALSE) +set(PACKAGE_VERSION_UNSUITABLE TRUE) + +if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION) + set(PACKAGE_VERSION_EXACT TRUE) + set(PACKAGE_VERSION_COMPATIBLE TRUE) + set(PACKAGE_VERSION_UNSUITABLE FALSE) +endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION) diff --git a/infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfig.cmake b/infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfig.cmake new file mode 100644 index 0000000..5c3a0f8 --- /dev/null +++ b/infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfig.cmake @@ -0,0 +1,18 @@ +function(_TensorFlowSource_import) + if(NOT DOWNLOAD_TENSORFLOW) + set(TensorFlowSource_FOUND FALSE PARENT_SCOPE) + return() + endif(NOT DOWNLOAD_TENSORFLOW) + + nnas_include(ExternalSourceTools) + nnas_include(OptionTools) + + envoption(TENSORFLOW_2_3_0_URL https://github.com/tensorflow/tensorflow/archive/v2.3.0.tar.gz) + + ExternalSource_Download(TENSORFLOW DIRNAME TENSORFLOW-2.3.0 ${TENSORFLOW_2_3_0_URL}) + + set(TensorFlowSource_DIR ${TENSORFLOW_SOURCE_DIR} PARENT_SCOPE) + set(TensorFlowSource_FOUND TRUE PARENT_SCOPE) +endfunction(_TensorFlowSource_import) + +_TensorFlowSource_import() diff --git a/infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfigVersion.cmake b/infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfigVersion.cmake new file mode 100644 index 0000000..04df5eb --- /dev/null +++ b/infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfigVersion.cmake @@ -0,0 +1,10 @@ +set(PACKAGE_VERSION "2.3.0") +set(PACKAGE_VERSION_EXACT FALSE) +set(PACKAGE_VERSION_COMPATIBLE FALSE) +set(PACKAGE_VERSION_UNSUITABLE TRUE) + +if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION) + set(PACKAGE_VERSION_EXACT TRUE) + set(PACKAGE_VERSION_COMPATIBLE TRUE) + set(PACKAGE_VERSION_UNSUITABLE FALSE) +endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION) diff --git a/infra/docker/Dockerfile b/infra/docker/Dockerfile index e675b53..052cc4f 100644 --- a/infra/docker/Dockerfile +++ b/infra/docker/Dockerfile @@ -1,8 +1,6 @@ FROM ubuntu:16.04 ARG UBUNTU_MIRROR -ENV http_proxy $http_proxy -ENV https_proxy $https_proxy RUN if [ -n "$http_proxy" ] ; then echo "Acquire::http::proxy \"${http_proxy}\";" >> /etc/apt/apt.conf ; fi RUN if [ -n "$https_proxy" ] ; then echo "Acquire::https::proxy \"${https_proxy}\";" >> /etc/apt/apt.conf ; fi @@ -22,6 +20,7 @@ RUN apt-get update && apt-get -qqy install libprotobuf-dev protobuf-compiler # Additonal tools RUN apt-get update && apt-get -qqy install doxygen graphviz wget unzip clang-format-3.9 python3 python3-pip python3-venv hdf5-tools pylint +RUN pip3 install --upgrade pip RUN pip3 install yapf==0.22.0 numpy # Install google test (source) diff --git a/infra/docker/Dockerfile.1804 b/infra/docker/Dockerfile.1804 index fc6fc9a..cc31bba 100644 --- a/infra/docker/Dockerfile.1804 +++ b/infra/docker/Dockerfile.1804 @@ -1,12 +1,6 @@ FROM ubuntu:18.04 ARG UBUNTU_MIRROR -ENV http_proxy $http_proxy -ENV https_proxy $https_proxy - -RUN if [ -n "$http_proxy" ] ; then echo "Acquire::http::proxy \"${http_proxy}\";" >> /etc/apt/apt.conf ; fi -RUN if [ -n "$https_proxy" ] ; then echo "Acquire::https::proxy \"${https_proxy}\";" >> /etc/apt/apt.conf ; fi -RUN if [ -n "$UBUNTU_MIRROR" ] ; then sed "s/archive.ubuntu.com/${UBUNTU_MIRROR}/g" -i /etc/apt/sources.list ; fi # Install 'add-apt-repository' RUN apt-get update && apt-get -qqy install software-properties-common @@ -22,6 +16,7 @@ RUN apt-get update && apt-get -qqy install libprotobuf-dev protobuf-compiler # Additonal tools RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -qqy install doxygen graphviz wget unzip clang-format-3.9 python3 python3-pip python3-venv hdf5-tools pylint +RUN pip3 install --upgrade pip RUN pip3 install yapf==0.22.0 numpy # Install google test (source) diff --git a/infra/nncc/CMakeLists.txt b/infra/nncc/CMakeLists.txt index 3ac6680..0be6885 100644 --- a/infra/nncc/CMakeLists.txt +++ b/infra/nncc/CMakeLists.txt @@ -98,6 +98,7 @@ option(DOWNLOAD_CAFFE "Download Caffe source" ON) option(DOWNLOAD_PYTORCH "Download Pytorch source" ON) option(DOWNLOAD_ONNX "Download ONNX source" ON) option(DOWNLOAD_ABSEIL "Download Abseil-cpp source" ON) +option(DOWNLOAD_PYBIND11 "Download Pybind11 source" ON) option(DOWNLOAD_GTEST "Download Google Test source" ON) option(BUILD_GTEST "Build Google Test from the downloaded source" ON) diff --git a/infra/nncc/command/utcount b/infra/nncc/command/utcount index d4610e3..d06c5c9 100644 --- a/infra/nncc/command/utcount +++ b/infra/nncc/command/utcount @@ -13,7 +13,7 @@ BUILD_ITEMS="angkor cwrap pepper-str pepper-strcast pp stdex \ oops pepper-assert \ hermes hermes-std \ loco locop locomotiv logo-core logo \ -foder souschef arser \ +foder souschef arser vconone \ safemain mio-circle mio-tflite \ tflite2circle \ luci \ diff --git a/infra/nnfw/cmake/CfgOptionFlags.cmake b/infra/nnfw/cmake/CfgOptionFlags.cmake index d1395f8..3c6b7d9 100644 --- a/infra/nnfw/cmake/CfgOptionFlags.cmake +++ b/infra/nnfw/cmake/CfgOptionFlags.cmake @@ -23,7 +23,7 @@ CMAKE_DEPENDENT_OPTION(BUILD_RUNTIME_NNAPI_TEST "Build Runtime NN API Generated OFF) option(BUILD_RUNTIME_NNFW_API_TEST "Build Runtime NNFW API Tests" ON) option(BUILD_TFLITE_RUN "Build tflite-run" ON) -option(BUILD_TFLITE_RUN_2_2_0 "Build tflite-run 2.2.0" OFF) +option(BUILD_TFLITE_VANILLA_RUN "Build tflite-vanilla-run" OFF) option(BUILD_TFLITE_BENCHMARK_MODEL "Build tflite benchmark model" OFF) option(BUILD_NNAPI_TEST "Build nnapi_test" ON) option(BUILD_NNPACKAGE_RUN "Build nnpackge_run" ON) @@ -70,7 +70,7 @@ option(DOWNLOAD_BOOST "Download boost source" OFF) option(DOWNLOAD_RUY "Download ruy source" ON) option(BUILD_BOOST "Build boost source" OFF) option(BUILD_TENSORFLOW_LITE "Build TensorFlow Lite from the downloaded source" ON) -option(BUILD_TENSORFLOW_LITE_2_2_0 "Build TensorFlow Lite from the downloaded source" OFF) +option(BUILD_TENSORFLOW_LITE_2_3_0 "Build TensorFlow Lite 2.3.0 from the downloaded source" OFF) option(BUILD_GTEST "Download and build Google Test" ON) option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" ON) option(BUILD_RUY "Build ruy library from the downloaded source" ON) diff --git a/infra/nnfw/cmake/packages/EigenConfig.cmake b/infra/nnfw/cmake/packages/EigenConfig.cmake index f37d653..e71830a 100644 --- a/infra/nnfw/cmake/packages/EigenConfig.cmake +++ b/infra/nnfw/cmake/packages/EigenConfig.cmake @@ -1,5 +1,5 @@ function(_Eigen_import) - nnas_find_package(TensorFlowEigenSource-2.3.0-rc0 QUIET) + nnas_find_package(TensorFlowEigenSource EXACT 2.3.0 QUIET) if(NOT TensorFlowEigenSource_FOUND) set(Eigen_FOUND FALSE PARENT_SCOPE) diff --git a/infra/nnfw/cmake/packages/TensorFlowLite-2.2.0Config.cmake b/infra/nnfw/cmake/packages/TensorFlowLite-2.2.0Config.cmake deleted file mode 100644 index e698235..0000000 --- a/infra/nnfw/cmake/packages/TensorFlowLite-2.2.0Config.cmake +++ /dev/null @@ -1,92 +0,0 @@ -if(BUILD_TENSORFLOW_LITE_2_2_0) - macro(return_unless VAR) - if(NOT ${VAR}) - message("${VAR} NOT TRUE") - set(TensorFlowLite_2_2_0_FOUND PARENT_SCOPE) - return() - endif(NOT ${VAR}) - endmacro(return_unless) - - nnas_include(ExternalSourceTools) - nnas_include(OptionTools) - - # Below urls come from https://github.com/tensorflow/tensorflow/blob/v2.2.0/tensorflow/lite/tools/make/Makefile - - set(absl_url "https://github.com/abseil/abseil-cpp/archive/43ef2148c0936ebf7cb4be6b19927a9d9d145b8f.tar.gz") - ExternalSource_Download("tflite220_Absl" ${absl_url}) - set(TFLite220AbslSource_DIR "${tflite220_Absl_SOURCE_DIR}") - if (NOT TFLite220AbslSource_DIR STREQUAL "") - set(TFLite220AbslSource_FOUND TRUE) - endif() - return_unless(TFLite220AbslSource_FOUND) - - set(eigen_url "https://gitlab.com/libeigen/eigen/-/archive/52a2fbbb008a47c5e3fb8ac1c65c2feecb0c511c/eigen-52a2fbbb008a47c5e3fb8ac1c65c2feecb0c511c.tar.gz") - ExternalSource_Download("tflite220_Eigen" ${eigen_url}) - set(TFLite220EigenSource_DIR "${tflite220_Eigen_SOURCE_DIR}") - if (NOT TFLite220EigenSource_DIR STREQUAL "") - set(TFLite220EigenSource_FOUND TRUE) - endif() - return_unless(TFLite220EigenSource_FOUND) - - set(farmhash_url "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz") - ExternalSource_Download("tflite220_Farmhash" ${farmhash_url}) - set(TFLite220FarmhashSource_DIR "${tflite220_Farmhash_SOURCE_DIR}") - if (NOT TFLite220FarmhashSource_DIR STREQUAL "") - set(TFLite220FarmhashSource_FOUND TRUE) - endif() - return_unless(TFLite220FarmhashSource_FOUND) - - set(fft2d_url "https://storage.googleapis.com/mirror.tensorflow.org/www.kurims.kyoto-u.ac.jp/~ooura/fft2d.tgz") - ExternalSource_Download("tflite220_FFT2D" ${fft2d_url}) - set(TFLite220FFT2DSource_DIR "${tflite220_FFT2D_SOURCE_DIR}") - if (NOT TFLite220FFT2DSource_DIR STREQUAL "") - set(TFLite220FFT2DSource_FOUND TRUE) - endif() - return_unless(TFLite220FFT2DSource_FOUND) - - set(flatbuffers_url "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/flatbuffers/archive/v1.11.0.tar.gz") - ExternalSource_Download("tflite220_FlatBuffers" ${flatbuffers_url}) - set(TFLite220FlatBuffersSource_DIR "${tflite220_FlatBuffers_SOURCE_DIR}") - if (NOT TFLite220FlatBuffersSource_DIR STREQUAL "") - set(TFLite220FlatBuffersSource_FOUND TRUE) - endif() - return_unless(TFLite220FlatBuffersSource_FOUND) - - set(fp16_url "https://github.com/Maratyszcza/FP16/archive/febbb1c163726b5db24bed55cc9dc42529068997.zip") - ExternalSource_Download("tflite220_FP16" ${fp16_url}) - set(TFLite220FP16Source_DIR "${tflite220_FP16_SOURCE_DIR}") - if (NOT TFLite220FP16Source_DIR STREQUAL "") - set(TFLite220FP16Source_FOUND TRUE) - endif() - return_unless(TFLite220FP16Source_FOUND) - - set(gemmlowp_url "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip") - ExternalSource_Download("tflite220_GEMMLowp" ${gemmlowp_url}) - set(TFLite220GEMMLowpSource_DIR "${tflite220_GEMMLowp_SOURCE_DIR}") - if (NOT TFLite220GEMMLowpSource_DIR STREQUAL "") - set(TFLite220GEMMLowpSource_FOUND TRUE) - endif() - return_unless(TFLite220GEMMLowpSource_FOUND) - - set(neon2sse_url "https://github.com/intel/ARM_NEON_2_x86_SSE/archive/master.zip") - ExternalSource_Download("tflite220_NEON2SSE" ${neon2sse_url}) - set(TFLite220NEON2SSESource_DIR "${tflite220_NEON2SSE_SOURCE_DIR}") - if (NOT TFLite220NEON2SSESource_DIR STREQUAL "") - set(TFLite220NEON2SSESource_FOUND TRUE) - endif() - return_unless(TFLite220NEON2SSESource_FOUND) - - set(tensorflow_url "https://github.com/tensorflow/tensorflow/archive/v2.2.0.tar.gz") - ExternalSource_Download("tflite220_TensorFlow" ${tensorflow_url}) - set(TFLite220TensorFlowSource_DIR "${tflite220_TensorFlow_SOURCE_DIR}") - if (NOT TFLite220TensorFlowSource_DIR STREQUAL "") - set(TFLite220TensorFlowSource_FOUND TRUE) - endif() - return_unless(TFLite220TensorFlowSource_FOUND) - - nnas_include(ExternalProjectTools) - add_extdirectory("${CMAKE_CURRENT_LIST_DIR}/TensorFlowLite-2.2.0" tflite-2.2.0) - - set(TensorFlowLite_2_2_0_FOUND TRUE) - return() -endif() diff --git a/infra/nnfw/cmake/packages/TensorFlowLite-2.2.0/CMakeLists.txt b/infra/nnfw/cmake/packages/TensorFlowLite-2.3.0/CMakeLists.txt similarity index 51% rename from infra/nnfw/cmake/packages/TensorFlowLite-2.2.0/CMakeLists.txt rename to infra/nnfw/cmake/packages/TensorFlowLite-2.3.0/CMakeLists.txt index 8e7f78e..20547b9 100644 --- a/infra/nnfw/cmake/packages/TensorFlowLite-2.2.0/CMakeLists.txt +++ b/infra/nnfw/cmake/packages/TensorFlowLite-2.3.0/CMakeLists.txt @@ -1,8 +1,8 @@ -# Reference: https://github.com/tensorflow/tensorflow/blob/v2.2.0/tensorflow/lite/tools/make/Makefile +# Reference: https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/lite/tools/make/Makefile # -# Tensorflow Lite library 2.2.0 +# Tensorflow Lite library 2.3.0 # -set(TENSORFLOW_LITE_BASE ${TFLite220TensorFlowSource_DIR}/tensorflow/lite) +set(TENSORFLOW_LITE_BASE ${TFLiteVanillaTensorFlowSource_DIR}/tensorflow/lite) file(GLOB TFLITE_CORE_SRCS "${TENSORFLOW_LITE_BASE}/*.c" "${TENSORFLOW_LITE_BASE}/*.cc" @@ -18,8 +18,7 @@ file(GLOB TFLITE_API_SRCS "${TENSORFLOW_LITE_BASE}/core/api/*.c" list(APPEND TFLITE_PROFILING_SRCS "${TENSORFLOW_LITE_BASE}/profiling/memory_info.cc") list(APPEND TFLITE_PROFILING_SRCS "${TENSORFLOW_LITE_BASE}/profiling/time.cc") -file(GLOB TFLITE_EXPERIMENTAL_SRCS "${TENSORFLOW_LITE_BASE}/experimental/resource/*.cc" - "${TENSORFLOW_LITE_BASE}/experimental/ruy/*.cc") +file(GLOB TFLITE_EXPERIMENTAL_SRCS "${TENSORFLOW_LITE_BASE}/experimental/resource/*.cc") file(GLOB TFLITE_SPARSITY_SRCS "${TENSORFLOW_LITE_BASE}/tools/optimize/sparsity/*.cc") @@ -32,24 +31,37 @@ list(APPEND TFLITE_SRCS ${TFLITE_EXPERIMENTAL_SRCS}) list(APPEND TFLITE_SRCS ${TFLITE_SPARSITY_SRCS}) # externals -list(APPEND TFLITE_SRCS "${TFLite220FarmhashSource_DIR}/src/farmhash.cc") -list(APPEND TFLITE_SRCS "${TFLite220FFT2DSource_DIR}/fftsg.c") -list(APPEND TFLITE_SRCS "${TFLite220FFT2DSource_DIR}/fftsg2d.c") -list(APPEND TFLITE_SRCS "${TFLite220FlatBuffersSource_DIR}/src/util.cpp") +list(APPEND TFLITE_SRCS "${TFLiteVanillaFarmhashSource_DIR}/src/farmhash.cc") +list(APPEND TFLITE_SRCS "${TFLiteVanillaFFT2DSource_DIR}/fftsg.c") +list(APPEND TFLITE_SRCS "${TFLiteVanillaFFT2DSource_DIR}/fftsg2d.c") +list(APPEND TFLITE_SRCS "${TFLiteVanillaFlatBuffersSource_DIR}/src/util.cpp") # externals - absl -file(GLOB_RECURSE ABSL_SRCS "${TFLite220AbslSource_DIR}/absl/*.cc") -file(GLOB_RECURSE ABSL_EXCLS "${TFLite220AbslSource_DIR}/absl/*test*.cc" - "${TFLite220AbslSource_DIR}/absl/*benchmark*.cc" - "${TFLite220AbslSource_DIR}/absl/synchronization/*.cc" - "${TFLite220AbslSource_DIR}/absl/debugging/*.cc" - "${TFLite220AbslSource_DIR}/absl/hash/*.cc" - "${TFLite220AbslSource_DIR}/absl/flags/*.cc") +file(GLOB_RECURSE ABSL_SRCS "${TFLiteVanillaAbslSource_DIR}/absl/*.cc") +file(GLOB_RECURSE ABSL_EXCLS "${TFLiteVanillaAbslSource_DIR}/absl/*test*.cc" + "${TFLiteVanillaAbslSource_DIR}/absl/*benchmark*.cc" + "${TFLiteVanillaAbslSource_DIR}/absl/synchronization/*.cc" + "${TFLiteVanillaAbslSource_DIR}/absl/debugging/*.cc" + "${TFLiteVanillaAbslSource_DIR}/absl/hash/*.cc" + "${TFLiteVanillaAbslSource_DIR}/absl/flags/*.cc" + "${TFLiteVanillaAbslSource_DIR}/absl/random/*.cc") list(REMOVE_ITEM ABSL_SRCS ${ABSL_EXCLS}) list(APPEND TFLITE_SRCS ${ABSL_SRCS}) +# externals - ruy +file(GLOB RUY_SRCS "${TFLiteVanillaRuySource_DIR}/ruy/*.cc") +file(GLOB_RECURSE RUY_EXCLS "${TFLiteVanillaRuySource_DIR}/ruy/*test*.cc" + "${TFLiteVanillaRuySource_DIR}/ruy/*benchmark*.cc" + "${TFLiteVanillaRuySource_DIR}/ruy/*example*.cc") +list(REMOVE_ITEM RUY_SRCS ${RUY_EXCLS}) +# Temporary fix for ruy compilation error. +# TODO(b/158800055): Remove this hack once the ruy version is correctly bumped. +list(REMOVE_ITEM RUY_SRCS "${TFLiteVanillaRuySource_DIR}/ruy/prepare_packed_matrices.cc") +list(APPEND TFLITE_SRCS ${RUY_SRCS}) + + # Build with mmap? true -# caution: v2.2.0's Makefile has wrong code on this part. This is fixed on master branch. +# caution: v2.3.0's Makefile has wrong code on this part. This is fixed on master branch. set(BUILD_WITH_MMAP TRUE) if(${BUILD_WITH_MMAP}) list(REMOVE_ITEM TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/mmap_allocation_disabled.cc") @@ -86,25 +98,26 @@ file(GLOB_RECURSE TFLITE_EXCLS "${TENSORFLOW_LITE_BASE}/*test*.cc" list(REMOVE_ITEM TFLITE_SRCS ${TFLITE_EXCLS}) # include headers -list(APPEND TFLITE_INCLUDES "${TFLite220TensorFlowSource_DIR}") -list(APPEND TFLITE_INCLUDES "${TFLite220EigenSource_DIR}") -list(APPEND TFLITE_INCLUDES "${TFLite220AbslSource_DIR}") -list(APPEND TFLITE_INCLUDES "${TFLite220GEMMLowpSource_DIR}") -list(APPEND TFLITE_INCLUDES "${TFLite220NEON2SSESource_DIR}") -list(APPEND TFLITE_INCLUDES "${TFLite220FarmhashSource_DIR}/src") -list(APPEND TFLITE_INCLUDES "${TFLite220FlatBuffersSource_DIR}/include") -list(APPEND TFLITE_INCLUDES "${TFLite220FP16Source_DIR}/include") - -add_library(tensorflow-lite-2.2.0 STATIC ${TFLITE_SRCS}) -target_include_directories(tensorflow-lite-2.2.0 SYSTEM PUBLIC ${TFLITE_INCLUDES}) -target_compile_definitions(tensorflow-lite-2.2.0 PUBLIC "GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DTFLITE_WITH_RUY -DTFLITE_WITH_RUY_GEMV") -set_property(TARGET tensorflow-lite-2.2.0 PROPERTY POSITION_INDEPENDENT_CODE ON) -target_link_libraries(tensorflow-lite-2.2.0 eigen ${LIB_PTHREAD} dl) -if(${BUILD_WITH_NNAPI}) - target_link_libraries(tensorflow-lite-2.2.0 rt) +list(APPEND TFLITE_INCLUDES "${TFLiteVanillaTensorFlowSource_DIR}") +list(APPEND TFLITE_INCLUDES "${TFLiteVanillaEigenSource_DIR}") +list(APPEND TFLITE_INCLUDES "${TFLiteVanillaAbslSource_DIR}") +list(APPEND TFLITE_INCLUDES "${TFLiteVanillaGEMMLowpSource_DIR}") +list(APPEND TFLITE_INCLUDES "${TFLiteVanillaNEON2SSESource_DIR}") +list(APPEND TFLITE_INCLUDES "${TFLiteVanillaFarmhashSource_DIR}/src") +list(APPEND TFLITE_INCLUDES "${TFLiteVanillaFlatBuffersSource_DIR}/include") +list(APPEND TFLITE_INCLUDES "${TFLiteVanillaFP16Source_DIR}/include") +list(APPEND TFLITE_INCLUDES "${TFLiteVanillaRuySource_DIR}") + +add_library(tensorflow-lite-2.3.0 STATIC ${TFLITE_SRCS}) +target_include_directories(tensorflow-lite-2.3.0 SYSTEM PUBLIC ${TFLITE_INCLUDES}) +target_compile_definitions(tensorflow-lite-2.3.0 PUBLIC "GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DTFLITE_WITH_RUY -DTFLITE_WITH_RUY_GEMV") +set_property(TARGET tensorflow-lite-2.3.0 PROPERTY POSITION_INDEPENDENT_CODE ON) +target_link_libraries(tensorflow-lite-2.3.0 eigen ${LIB_PTHREAD} dl) +if(NOT ANDROID AND ${BUILD_WITH_NNAPI}) + target_link_libraries(tensorflow-lite-2.3.0 rt) endif() if(ANDROID) - target_link_libraries(tensorflow-lite-2.2.0 log) - target_include_directories(tensorflow-lite-2.2.0 PUBLIC "${NDK_DIR}/..") + target_link_libraries(tensorflow-lite-2.3.0 log) + target_include_directories(tensorflow-lite-2.3.0 PUBLIC "${NDK_DIR}/..") endif() diff --git a/infra/nnfw/cmake/packages/TensorFlowLite-2.3.0Config.cmake b/infra/nnfw/cmake/packages/TensorFlowLite-2.3.0Config.cmake new file mode 100644 index 0000000..d00ca96 --- /dev/null +++ b/infra/nnfw/cmake/packages/TensorFlowLite-2.3.0Config.cmake @@ -0,0 +1,100 @@ +if(BUILD_TENSORFLOW_LITE_2_3_0) + macro(return_unless VAR) + if(NOT ${VAR}) + message("${VAR} NOT TRUE") + set(TensorFlowLite_2_3_0_FOUND PARENT_SCOPE) + return() + endif(NOT ${VAR}) + endmacro(return_unless) + + nnas_include(ExternalSourceTools) + nnas_include(OptionTools) + + # Below urls come from https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/lite/tools/make/Makefile + + set(absl_url "https://github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz") + ExternalSource_Download("TFLiteVanilla_Absl" ${absl_url}) + set(TFLiteVanillaAbslSource_DIR "${TFLiteVanilla_Absl_SOURCE_DIR}") + if (NOT TFLiteVanillaAbslSource_DIR STREQUAL "") + set(TFLiteVanillaAbslSource_FOUND TRUE) + endif() + return_unless(TFLiteVanillaAbslSource_FOUND) + + set(eigen_url "https://gitlab.com/libeigen/eigen/-/archive/386d809bde475c65b7940f290efe80e6a05878c4/eigen-386d809bde475c65b7940f290efe80e6a05878c4.tar.gz") + ExternalSource_Download("TFLiteVanilla_Eigen" ${eigen_url}) + set(TFLiteVanillaEigenSource_DIR "${TFLiteVanilla_Eigen_SOURCE_DIR}") + if (NOT TFLiteVanillaEigenSource_DIR STREQUAL "") + set(TFLiteVanillaEigenSource_FOUND TRUE) + endif() + return_unless(TFLiteVanillaEigenSource_FOUND) + + set(farmhash_url "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz") + ExternalSource_Download("TFLiteVanilla_Farmhash" ${farmhash_url}) + set(TFLiteVanillaFarmhashSource_DIR "${TFLiteVanilla_Farmhash_SOURCE_DIR}") + if (NOT TFLiteVanillaFarmhashSource_DIR STREQUAL "") + set(TFLiteVanillaFarmhashSource_FOUND TRUE) + endif() + return_unless(TFLiteVanillaFarmhashSource_FOUND) + + set(fft2d_url "https://storage.googleapis.com/mirror.tensorflow.org/github.com/petewarden/OouraFFT/archive/v1.0.tar.gz") + ExternalSource_Download("TFLiteVanilla_FFT2D" ${fft2d_url}) + set(TFLiteVanillaFFT2DSource_DIR "${TFLiteVanilla_FFT2D_SOURCE_DIR}") + if (NOT TFLiteVanillaFFT2DSource_DIR STREQUAL "") + set(TFLiteVanillaFFT2DSource_FOUND TRUE) + endif() + return_unless(TFLiteVanillaFFT2DSource_FOUND) + + set(flatbuffers_url "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/flatbuffers/archive/v1.12.0.tar.gz") + ExternalSource_Download("TFLiteVanilla_FlatBuffers" ${flatbuffers_url}) + set(TFLiteVanillaFlatBuffersSource_DIR "${TFLiteVanilla_FlatBuffers_SOURCE_DIR}") + if (NOT TFLiteVanillaFlatBuffersSource_DIR STREQUAL "") + set(TFLiteVanillaFlatBuffersSource_FOUND TRUE) + endif() + return_unless(TFLiteVanillaFlatBuffersSource_FOUND) + + set(fp16_url "https://github.com/Maratyszcza/FP16/archive/4dfe081cf6bcd15db339cf2680b9281b8451eeb3.zip") + ExternalSource_Download("TFLiteVanilla_FP16" ${fp16_url}) + set(TFLiteVanillaFP16Source_DIR "${TFLiteVanilla_FP16_SOURCE_DIR}") + if (NOT TFLiteVanillaFP16Source_DIR STREQUAL "") + set(TFLiteVanillaFP16Source_FOUND TRUE) + endif() + return_unless(TFLiteVanillaFP16Source_FOUND) + + set(gemmlowp_url "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip") + ExternalSource_Download("TFLiteVanilla_GEMMLowp" ${gemmlowp_url}) + set(TFLiteVanillaGEMMLowpSource_DIR "${TFLiteVanilla_GEMMLowp_SOURCE_DIR}") + if (NOT TFLiteVanillaGEMMLowpSource_DIR STREQUAL "") + set(TFLiteVanillaGEMMLowpSource_FOUND TRUE) + endif() + return_unless(TFLiteVanillaGEMMLowpSource_FOUND) + + set(neon2sse_url "https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz") + ExternalSource_Download("TFLiteVanilla_NEON2SSE" ${neon2sse_url}) + set(TFLiteVanillaNEON2SSESource_DIR "${TFLiteVanilla_NEON2SSE_SOURCE_DIR}") + if (NOT TFLiteVanillaNEON2SSESource_DIR STREQUAL "") + set(TFLiteVanillaNEON2SSESource_FOUND TRUE) + endif() + return_unless(TFLiteVanillaNEON2SSESource_FOUND) + + set(tensorflow_url "https://github.com/tensorflow/tensorflow/archive/v2.3.0.tar.gz") + ExternalSource_Download("TFLiteVanilla_TensorFlow" ${tensorflow_url}) + set(TFLiteVanillaTensorFlowSource_DIR "${TFLiteVanilla_TensorFlow_SOURCE_DIR}") + if (NOT TFLiteVanillaTensorFlowSource_DIR STREQUAL "") + set(TFLiteVanillaTensorFlowSource_FOUND TRUE) + endif() + return_unless(TFLiteVanillaTensorFlowSource_FOUND) + + set(ruy_url "https://github.com/google/ruy/archive/34ea9f4993955fa1ff4eb58e504421806b7f2e8f.zip") + ExternalSource_Download("TFLiteVanilla_Ruy" ${ruy_url}) + set(TFLiteVanillaRuySource_DIR "${TFLiteVanilla_Ruy_SOURCE_DIR}") + if (NOT TFLiteVanillaRuySource_DIR STREQUAL "") + set(TFLiteVanillaRuySource_FOUND TRUE) + endif() + return_unless(TFLiteVanillaRuySource_FOUND) + + nnas_include(ExternalProjectTools) + add_extdirectory("${CMAKE_CURRENT_LIST_DIR}/TensorFlowLite-2.3.0" tflite-2.3.0) + + set(TensorFlowLite_2_3_0_FOUND TRUE) + return() +endif() diff --git a/infra/nnfw/config/gbs.conf b/infra/nnfw/config/gbs.conf index 515cada..bad9eb2 100644 --- a/infra/nnfw/config/gbs.conf +++ b/infra/nnfw/config/gbs.conf @@ -5,7 +5,7 @@ profile = profile.tizen [profile.tizen] user=obs_viewer obs = obs.tizen -repos = repo.tizen_base,repo.tizen_mobile +repos = repo.tizen_one,repo.tizen_base,repo.tizen_mobile buildroot = /home/GBS-ROOT/ [obs.tizen] @@ -15,6 +15,8 @@ url = http://api.tizen.org url = http://download.tizen.org/snapshots/tizen/unified/latest/repos/standard/packages/ [repo.tizen_base] -url = http://download.tizen.org/snapshots/tizen/base/latest/repos/standard/packages/ +url = http://download.tizen.org/snapshots/tizen/base/latest/repos/standard/packages/ +[repo.tizen_one] +url = http://nnfw.mooo.com/archive/tizen/ diff --git a/infra/packaging/build b/infra/packaging/build index 036c2d5..e941a72 100644 --- a/infra/packaging/build +++ b/infra/packaging/build @@ -85,7 +85,8 @@ function join_by # Invoke "preset_configure" function that the preset provides preset_configure -NPROC=$(cat /proc/cpuinfo | grep -c processor) +NPROC=${NPROC:-$(cat /proc/cpuinfo | grep -c processor)} +echo "[BUILD] \"make\" with -j${NPROC} option. You can specify the number of jobs by defining NPROC" cmake --build . -- -j$((NPROC/2)) all cmake --build . -- install # Install NN Package tools diff --git a/infra/packaging/preset/20200630 b/infra/packaging/preset/20200630 index e159935..5d16358 100644 --- a/infra/packaging/preset/20200630 +++ b/infra/packaging/preset/20200630 @@ -14,6 +14,7 @@ function preset_configure() REQUIRED_UNITS+=("souschef") REQUIRED_UNITS+=("safemain") REQUIRED_UNITS+=("arser") + REQUIRED_UNITS+=("vconone") # Hermes Logging Framework REQUIRED_UNITS+=("hermes" "hermes-std") # loco IR and related utilities @@ -27,12 +28,16 @@ function preset_configure() REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter" "circle-verify") REQUIRED_UNITS+=("record-minmax" "circle-quantizer") REQUIRED_UNITS+=("one-cmds") + REQUIRED_UNITS+=("bcq-tools") + + NPROC=${NPROC:-$(cat /proc/cpuinfo | grep -c processor)} # TODO Use "nncc configure" and "nncc build" cmake \ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \ -DCMAKE_BUILD_TYPE=release \ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \ + -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \ ${EXTRA_OPTIONS[@]} \ "${NNAS_PROJECT_PATH}/infra/nncc" } @@ -44,14 +49,4 @@ function preset_install() # Install tf2nnpkg install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.${PRESET}" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg" - - # Create python virtual enviornment - python3 -m venv "${NNAS_INSTALL_PREFIX}/bin/venv" - - # Install tensorflow - source "${NNAS_INSTALL_PREFIX}/bin/venv/bin/activate" - python -m pip --default-timeout=1000 --trusted-host pypi.org --trusted-host files.pythonhost.org \ - install -U pip setuptools - python -m pip --default-timeout=1000 --trusted-host pypi.org --trusted-host files.pythonhost.org \ - install tensorflow-cpu==2.3.0rc0 } diff --git a/infra/packaging/preset/20200731_windows b/infra/packaging/preset/20200731_windows new file mode 100644 index 0000000..65d179e --- /dev/null +++ b/infra/packaging/preset/20200731_windows @@ -0,0 +1,65 @@ +#!/bin/bash + +function preset_configure() +{ + REQUIRED_UNITS=() + # Common Libraries + REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp" "stdex") + REQUIRED_UNITS+=("oops" "pepper-assert" "foder") + REQUIRED_UNITS+=("souschef") + REQUIRED_UNITS+=("safemain") + REQUIRED_UNITS+=("arser") + REQUIRED_UNITS+=("vconone") + # Hermes Logging Framework + REQUIRED_UNITS+=("hermes" "hermes-std") + # loco IR and related utilities + REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo") + # Flatbuffer I/O + REQUIRED_UNITS+=("mio-tflite" "mio-circle") + # Circle compiler library (.circle -> .circle) + REQUIRED_UNITS+=("luci") + # Tools + REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef" "circlechef") + REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter" "circle-verify") + REQUIRED_UNITS+=("record-minmax" "circle-quantizer") + REQUIRED_UNITS+=("one-cmds") + + NPROC=$(cat /proc/cpuinfo | grep -c processor) + + # TODO Use "nncc configure" and "nncc build" + cmake \ + -G "MSYS Makefiles" \ + -DTF2NNPKG_FOR_WINDOWS=ON \ + -DUSE_PROTOBUF_LEGACY_IMPORT=ON \ + -DCMAKE_EXE_LINKER_FLAGS="-Wl,--allow-multiple-definition" \ + -DCMAKE_SHARED_LINKER_FLAGS="-Wl,--allow-multiple-definition" \ + -DENABLE_TEST=OFF \ + -DDOWNLOAD_GTEST=OFF \ + -DBUILD_GTEST=OFF \ + -DCMAKE_C_COMPILER=gcc \ + -DCMAKE_CXX_COMPILER=g++ \ + -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \ + -DCMAKE_BUILD_TYPE=release \ + -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \ + -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \ + ${EXTRA_OPTIONS[@]} \ + "${NNAS_PROJECT_PATH}/infra/nncc" +} + +function preset_install() +{ + # Install libraries to bin/ for Windows release + mv ${NNCC_INSTALL_PREFIX}/lib/*.dll ${NNCC_INSTALL_PREFIX}/bin + rm -rf ${NNCC_INSTALL_PREFIX}/lib + + install -t "${NNPKG_INSTALL_PREFIX}/bin" -D \ + "${NNAS_PROJECT_PATH}/tools/nnpackage_tool/model2nnpkg/model2nnpkg.sh" + + # Install tf2nnpkg + install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.20200630" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg" + + # Though you have to install tensorflow to run 'tf2tfliteV2', + # tensorflow can't be installed in mingw. First, You can install tensorflow + # from Window native CMD(run as administrator) with python virtual environment. + # And, you must copy it to "${NNAS_INSTALL_PREFIX}/bin/venv" +} diff --git a/infra/packaging/res/tf2nnpkg.20200630 b/infra/packaging/res/tf2nnpkg.20200630 index 9101f82..7846fd3 100644 --- a/infra/packaging/res/tf2nnpkg.20200630 +++ b/infra/packaging/res/tf2nnpkg.20200630 @@ -14,10 +14,16 @@ command_exists() { usage() { echo "Convert TensorFlow model to nnpackage." - echo "Usage: tf2nnpkg --info --graphdef [OPTION] -o " - exit 0 + echo "Usage: tf2nnpkg" + echo " --info " + echo " --graphdef " + echo " -o " + echo " --v2 (optional) Use TF 2.x interface" + exit 255 } +TF_INTERFACE="--v1" + # Parse command-line arguments # while [ "$#" -ne 0 ]; do @@ -39,6 +45,10 @@ while [ "$#" -ne 0 ]; do export OUTPUT_DIR="$2" shift 2 ;; + '--v2') + TF_INTERFACE="--v2" + shift + ;; *) echo "${CUR}" shift @@ -83,10 +93,7 @@ OUTPUT=$(awk -F, '/^output/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' INPUT_SHAPES=$(grep ^input ${INFO_FILE} | cut -d "[" -f2 | cut -d "]" -f1 | tr -d ' ' | xargs | tr ' ' ':') # generate tflite file -python "${ROOT}/bin/tf2tfliteV2.py" --v2 --input_path ${GRAPHDEF_FILE} \ ---output_path "${TMPDIR}/${MODEL_NAME}.tflite" \ ---input_arrays ${INPUT} --output_arrays ${OUTPUT} || \ -python "${ROOT}/bin/tf2tfliteV2.py" --v1 --input_path ${GRAPHDEF_FILE} \ +python "${ROOT}/bin/tf2tfliteV2.py" ${TF_INTERFACE} --input_path ${GRAPHDEF_FILE} \ --output_path "${TMPDIR}/${MODEL_NAME}.tflite" \ --input_arrays ${INPUT} --input_shapes ${INPUT_SHAPES} \ --output_arrays ${OUTPUT} diff --git a/infra/scripts/build-tcm.sh b/infra/scripts/build-tcm.sh new file mode 100755 index 0000000..22fb335 --- /dev/null +++ b/infra/scripts/build-tcm.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# STEP 1 +# Download latest TCM tool from +# https://github.sec.samsung.net/RS-TCM/tca-standalone/releases/download/v0.0.8/tca-standalone-0.0.8.jar +# +# STEP 2 +# Create symbolic link `./src` for source directory to be analyzed which has `.ahub` configuration. +# +# STEP 3 +# run this `build-tcm.sh` script. +# +# See the following link for additional details. +# https://github.sec.samsung.net/RS-TCM/tca-standalone/wiki/Tutorials-CPP-Gtest +# + +echo ${PROJECT_DIR:=${PWD}} + +java -jar $PROJECT_DIR/tca-standalone-0.0.8.jar \ + --outdir=$PROJECT_DIR/tcm-output \ + --config=$PROJECT_DIR/.ahub/tcchecker-tca/config.yaml \ + --local=$PROJECT_DIR/src \ + --logfile=$PROJECT_DIR/tcm-output/tcm.log \ + --debug diff --git a/infra/scripts/common.sh b/infra/scripts/common.sh index 28aa213..a10aac2 100755 --- a/infra/scripts/common.sh +++ b/infra/scripts/common.sh @@ -15,15 +15,18 @@ # TFLiteModelVerification $1 $2 $3 # Run ./tests/scripts/test-driver.sh script verification test # -# Unittests $1 $2 $3 -# Run ./tests/scripts/test-driver.sh script unittest +# NNAPIGTest $1 $2 $3 +# Run [INSTALL_PATH]/test/onert-test unittest command for nnapi gtest # # NNPackageTest $1 $2 -# Run ./tests/scripts/nnpkg_test.sh script nnpackage test +# Run [INSTALL_PATH]/test/onert-test nnpkg-test command CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ROOT_PATH="$(cd ${CURRENT_PATH}/../../ && pwd)" +# Install path on CI +INSTALL_PATH=$ROOT_PATH/Product/out + function CheckTestPrepared() { # Model download server setting @@ -47,16 +50,12 @@ function TFLiteModelVerification() export BACKENDS=$1 if [[ "$2" == "" ]]; then - ./tests/scripts/test-driver.sh \ - --reportdir=$ROOT_PATH/$3 \ - --verification \ - . + $INSTALL_PATH/test/onert-test verify-tflite --api=nnapi \ + --reportdir=$ROOT_PATH/$3 else - ./tests/scripts/test-driver.sh \ - --frameworktest_list_file=$2 \ - --reportdir=$ROOT_PATH/$3 \ - --verification \ - . + $INSTALL_PATH/test/onert-test verify-tflite --api=nnapi \ + --list=$2 \ + --reportdir=$ROOT_PATH/$3 fi unset BACKENDS @@ -64,10 +63,10 @@ function TFLiteModelVerification() } # $1: (required) backend -# $2: (required) unittest skiplist file relative path from nnfw root directory +# $2: (required) nnapi gtest skiplist file relative path from nnfw root directory # pass empty string if there is no test list # $3: (required) relative path for report from nnfw root directory -function Unittests() +function NNAPIGTest() { [[ $# -ne 3 ]] && echo "Invalid function argument setting" && exit 1 @@ -75,7 +74,7 @@ function Unittests() # Backup original nnapi_gtest.skip # TODO Pass skiplist to test-driver.sh - SKIPLIST_FILE="${ROOT_PATH}/Product/out/unittest/nnapi_gtest.skip" + SKIPLIST_FILE="${INSTALL_PATH}/unittest/nnapi_gtest.skip" BACKUP_FILE="${SKIPLIST_FILE}.backup" if [[ "$2" != "" ]]; then cp ${SKIPLIST_FILE} ${BACKUP_FILE} @@ -83,10 +82,9 @@ function Unittests() fi export BACKENDS=$1 - ./tests/scripts/test-driver.sh \ + $INSTALL_PATH/test/onert-test unittest \ --reportdir=$ROOT_PATH/$3 \ - --unittest \ - . + --unittestdir=$INSTALL_PATH/unittest unset BACKENDS # TODO Pass skiplist to test-driver.sh @@ -115,7 +113,7 @@ function NNPackageTest() do for entry in "nnpkg-tcs"/$f; do if [ -e $entry ]; then - BACKENDS="$1" tests/scripts/nnpkg_test.sh -d -i nnpkg-tcs $(basename "$entry") + BACKENDS="$1" $INSTALL_PATH/test/onert-test nnpkg-test -d -i nnpkg-tcs $(basename "$entry") fi done EXITCODE_F=$? @@ -144,16 +142,11 @@ function TFLiteLoaderTest() export BACKENDS=$1 if [[ "$2" == "" ]]; then - ./tests/scripts/test-driver.sh \ - --frameworktest \ - --framework_driverbin="$ROOT_PATH/Product/out/bin/tflite_loader_test_tool" \ + $INSTALL_PATH/test/onert-test verify-tflite --api=loader \ --reportdir=$ROOT_PATH/$3 - . else - ./tests/scripts/test-driver.sh \ - --frameworktest \ - --framework_driverbin="$ROOT_PATH/Product/out/bin/tflite_loader_test_tool" \ - --frameworktest_list_file=tests/scripts/list/tflite_loader_list.${TEST_ARCH}.txt \ + $INSTALL_PATH/test/onert-test verify-tflite --api=loader \ + --list=$2 \ --reportdir=$ROOT_PATH/$3 fi unset BACKENDS diff --git a/infra/scripts/compiler_modules.sh b/infra/scripts/compiler_modules.sh index d436e8a..a0323e0 100644 --- a/infra/scripts/compiler_modules.sh +++ b/infra/scripts/compiler_modules.sh @@ -7,7 +7,7 @@ DEBUG_BUILD_ITEMS="angkor;cwrap;pepper-str;pepper-strcast;pp;stdex" DEBUG_BUILD_ITEMS+=";oops;pepper-assert" DEBUG_BUILD_ITEMS+=";hermes;hermes-std" DEBUG_BUILD_ITEMS+=";loco;locop;locomotiv;logo-core;logo" -DEBUG_BUILD_ITEMS+=";foder;souschef;arser" +DEBUG_BUILD_ITEMS+=";foder;souschef;arser;vconone" DEBUG_BUILD_ITEMS+=";safemain;mio-circle;mio-tflite" DEBUG_BUILD_ITEMS+=";tflite2circle" DEBUG_BUILD_ITEMS+=";luci" diff --git a/infra/scripts/docker_build_cross_aarch64_runtime.sh b/infra/scripts/docker_build_cross_aarch64_runtime.sh index 7da6736..011d14c 100755 --- a/infra/scripts/docker_build_cross_aarch64_runtime.sh +++ b/infra/scripts/docker_build_cross_aarch64_runtime.sh @@ -6,7 +6,7 @@ CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ROOT_PATH="$CURRENT_PATH/../../" # prepare rootfs -if [ ! -d $ROOTFS_DIR ]; then +if [ -z "$ROOTFS_DIR" ] || [ ! -d $ROOTFS_DIR ]; then echo "It will use default rootfs path" else DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs" diff --git a/infra/scripts/docker_build_cross_arm_runtime.sh b/infra/scripts/docker_build_cross_arm_runtime.sh index f1f666a..551fb57 100755 --- a/infra/scripts/docker_build_cross_arm_runtime.sh +++ b/infra/scripts/docker_build_cross_arm_runtime.sh @@ -6,7 +6,7 @@ CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ROOT_PATH="$CURRENT_PATH/../../" # prepare rootfs -if [ ! -d $ROOTFS_DIR ]; then +if [ -z "$ROOTFS_DIR" ] || [ ! -d $ROOTFS_DIR ]; then echo "It will use default rootfs path" else DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs" diff --git a/infra/scripts/docker_build_cross_arm_runtime_release.sh b/infra/scripts/docker_build_cross_arm_runtime_release.sh index ea66f17..876f318 100755 --- a/infra/scripts/docker_build_cross_arm_runtime_release.sh +++ b/infra/scripts/docker_build_cross_arm_runtime_release.sh @@ -6,7 +6,7 @@ CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ROOT_PATH="$CURRENT_PATH/../../" # prepare rootfs -if [ ! -d $ROOTFS_DIR ]; then +if [ -z "$ROOTFS_DIR" ] || [ ! -d $ROOTFS_DIR ]; then echo "It will use default rootfs path" else DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs" diff --git a/infra/scripts/docker_build_cross_coverage.sh b/infra/scripts/docker_build_cross_coverage.sh index 08244e5..f42251b 100755 --- a/infra/scripts/docker_build_cross_coverage.sh +++ b/infra/scripts/docker_build_cross_coverage.sh @@ -6,7 +6,7 @@ CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ROOT_PATH="$CURRENT_PATH/../../" # prepare rootfs -if [ ! -d $ROOTFS_DIR ]; then +if [ -z "$ROOTFS_DIR" ] || [ ! -d $ROOTFS_DIR ]; then echo "It will use default rootfs path" else DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs" diff --git a/infra/scripts/docker_build_nncc.sh b/infra/scripts/docker_build_nncc.sh index 418b50d..046bc8a 100755 --- a/infra/scripts/docker_build_nncc.sh +++ b/infra/scripts/docker_build_nncc.sh @@ -54,7 +54,18 @@ pushd $ROOT_PATH > /dev/null mkdir -p ${NNCC_INSTALL_PREFIX} ./nncc docker-run ./nnas create-package --prefix "${PWD}/${NNCC_INSTALL_PREFIX}" -- "${CONFIG_OPTIONS}" +# create python virtual environment +./nncc docker-run python3 -m venv "${NNCC_INSTALL_PREFIX}/bin/venv" + +./nncc docker-run "${NNCC_INSTALL_PREFIX}/bin/venv/bin/python" \ + -m pip --default-timeout=1000 --trusted-host pypi.org --trusted-host files.pythonhost.org \ + install -U pip setuptools +./nncc docker-run "${NNCC_INSTALL_PREFIX}/bin/venv/bin/python" \ + -m pip --default-timeout=1000 --trusted-host pypi.org --trusted-host files.pythonhost.org \ + install tensorflow-cpu==2.3.0 + mkdir -p ${ARCHIVE_PATH} -tar -zcf ${ARCHIVE_PATH}/nncc-package.tar.gz -C ${NNCC_INSTALL_PREFIX} ./ +tar -zcf ${ARCHIVE_PATH}/nncc-package.tar.gz -C ${NNCC_INSTALL_PREFIX} --exclude "bin/venv" ./ +tar -zcf ${ARCHIVE_PATH}/nncc-venv-package.tar.gz -C ${NNCC_INSTALL_PREFIX} bin/venv popd > /dev/null diff --git a/infra/scripts/docker_build_tizen_cross.sh b/infra/scripts/docker_build_tizen_cross.sh index 18809ad..ee0f183 100755 --- a/infra/scripts/docker_build_tizen_cross.sh +++ b/infra/scripts/docker_build_tizen_cross.sh @@ -6,7 +6,7 @@ CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ROOT_PATH="$CURRENT_PATH/../../" # prepare rootfs -if [ ! -d $ROOTFS_DIR ]; then +if [ -z "$ROOTFS_DIR" ] || [ ! -d $ROOTFS_DIR ]; then echo "It will use default rootfs path" else DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs" diff --git a/infra/scripts/docker_collect_nnpkg_resources.sh b/infra/scripts/docker_collect_nnpkg_resources.sh index 556c5bd..55adaa1 100755 --- a/infra/scripts/docker_collect_nnpkg_resources.sh +++ b/infra/scripts/docker_collect_nnpkg_resources.sh @@ -60,7 +60,7 @@ pushd $ROOT_PATH > /dev/null REQUIRED_UNITS=() # Common Libraries REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp" "stdex") -REQUIRED_UNITS+=("oops" "safemain" "foder" "arser" "oops") +REQUIRED_UNITS+=("oops" "safemain" "foder" "arser" "vconone") # Hermes Logging Framework REQUIRED_UNITS+=("hermes" "hermes-std") # loco IR and related utilities diff --git a/infra/scripts/test_arm_nnpkg.sh b/infra/scripts/test_arm_nnpkg.sh index 68adaf9..d00eb73 100755 --- a/infra/scripts/test_arm_nnpkg.sh +++ b/infra/scripts/test_arm_nnpkg.sh @@ -7,10 +7,10 @@ BACKENDS=("acl_cl" "acl_neon" "cpu") for BACKEND in "${BACKENDS[@]}"; do - NNPackageTest ${BACKEND} "tests/scripts/list/nnpkg_test_list.armv7l-linux.${BACKEND}" + NNPackageTest ${BACKEND} "Product/out/test/list/nnpkg_test_list.armv7l-linux.${BACKEND}" done # Interpreter test export DISABLE_COMPILE=1 -NNPackageTest "interp" "tests/scripts/list/nnpkg_test_list.noarch.interp" +NNPackageTest "interp" "Product/out/test/list/nnpkg_test_list.noarch.interp" unset DISABLE_COMPILE diff --git a/infra/scripts/test_coverage.sh b/infra/scripts/test_coverage.sh index c3dc048..12a9942 100755 --- a/infra/scripts/test_coverage.sh +++ b/infra/scripts/test_coverage.sh @@ -32,7 +32,7 @@ export GCOV_PREFIX_STRIP=`cat $ROOT_PATH/tests/scripts/build_path_depth.txt` TENSOR_LOGGING=trace_log.txt ONERT_LOG_ENABLE=1 GRAPH_DOT_DUMP=1 ./infra/scripts/test_ubuntu_runtime_mixed.sh # Enable trace event (acl_cl default backend) export TRACE_FILEPATH=trace.json -TFLiteModelVerification "acl_cl" "tests/scripts/list/frameworktest_list.armv7l.acl_cl.txt" "report/acl_cl/trace" +TFLiteModelVerification "acl_cl" "Product/out/test/list/frameworktest_list.armv7l.acl_cl.txt" "report/acl_cl/trace" unset TRACE_FILEPATH # Interpreter diff --git a/infra/scripts/test_ubuntu_runtime.sh b/infra/scripts/test_ubuntu_runtime.sh index 76e567a..f250df5 100755 --- a/infra/scripts/test_ubuntu_runtime.sh +++ b/infra/scripts/test_ubuntu_runtime.sh @@ -68,7 +68,7 @@ else fi UNITTEST_SKIPLIST="Product/out/unittest/nnapi_gtest.skip.${TEST_PLATFORM}.${BACKEND}" -FRAMEWORK_TESTLIST="tests/scripts/list/frameworktest_list.${TEST_ARCH}.${BACKEND}.txt" +FRAMEWORK_TESTLIST="Product/out/test/list/frameworktest_list.${TEST_ARCH}.${BACKEND}.txt" REPORT_BASE="report/${BACKEND}" EXECUTORS=("Linear" "Dataflow" "Parallel") @@ -91,7 +91,7 @@ do export EXECUTOR="${EXECUTOR}" fi - Unittests "${BACKEND}" "${UNITTEST_SKIPLIST}" "${REPORT_PATH}" + NNAPIGTest "${BACKEND}" "${UNITTEST_SKIPLIST}" "${REPORT_PATH}" TFLiteModelVerification "${BACKEND}" "${FRAMEWORK_TESTLIST}" "${REPORT_PATH}" if [ $EXECUTOR = "Interpreter" ]; then @@ -103,12 +103,7 @@ done # Current support acl_cl backend testlist only # TODO Support more backends -TFLITE_LOADER_TESTLIST="tests/scripts/list/tflite_loader_list.${TEST_ARCH}.txt" +TFLITE_LOADER_TESTLIST="Product/out/test/list/tflite_loader_list.${TEST_ARCH}.txt" if [[ $TFLITE_LOADER = "1" ]]; then TFLiteLoaderTest "${BACKEND}" "${TFLITE_LOADER_TESTLIST}" "${REPORT_BASE}/loader/${EXECUTOR}" - - # Test custom op - pushd ${ROOT_PATH} > /dev/null - ./Product/out/tests/FillFrom_runner - popd > /dev/null fi diff --git a/infra/scripts/test_ubuntu_runtime_mixed.sh b/infra/scripts/test_ubuntu_runtime_mixed.sh index 265a2ac..24fde88 100755 --- a/infra/scripts/test_ubuntu_runtime_mixed.sh +++ b/infra/scripts/test_ubuntu_runtime_mixed.sh @@ -14,32 +14,26 @@ TEST_OS="linux" # This test requires test model installation pushd ${ROOT_PATH} > /dev/null -echo -echo "==== Run nnfw_api_gtest begin ====" -echo -NNFW_API_TEST_MODEL_INSTALLER=tests/scripts/nnfw_api_gtest/install_nnfw_api_gtest_nnpackages.sh -TEST_BIN=Product/out/unittest_standalone/nnfw_api_gtest -$NNFW_API_TEST_MODEL_INSTALLER --install-dir ${TEST_BIN}_models -${TEST_BIN} -echo -echo "==== Run nnfw_api_gtest end ====" -echo +echo "" +echo "==== Run standalone unittest begin ====" +echo "" +Product/out/test/onert-test prepare-model --model=nnpackage +Product/out/test/onert-test unittest --unittestdir=Product/out/unittest_standalone +echo "" +echo "==== Run standalone unittest end ====" +echo "" + +# Test custom op +pushd ${ROOT_PATH} > /dev/null +./Product/out/test/FillFrom_runner popd > /dev/null -Product/out/unittest_standalone/test_compute -Product/out/unittest_standalone/test_onert -Product/out/unittest_standalone/test_onert_backend_cpu_common -Product/out/unittest_standalone/test_onert_frontend_nnapi -Product/out/unittest_standalone/tflite_test - -pushd ${ROOT_PATH} - # NOTE Fixed backend assignment by type of operation # TODO Enhance this with randomized test BACKENDS=(acl_cl acl_neon cpu) # Get the intersect of framework test list files -TESTLIST_PREFIX="tests/scripts/list/frameworktest_list.${TEST_ARCH}" +TESTLIST_PREFIX="Product/out/test/list/frameworktest_list.${TEST_ARCH}" SKIPLIST_PREFIX="Product/out/unittest/nnapi_gtest.skip.${TEST_ARCH}-${TEST_OS}" sort $TESTLIST_PREFIX.${BACKENDS[0]}.txt > $TESTLIST_PREFIX.intersect.txt sort $SKIPLIST_PREFIX.${BACKENDS[0]} > $SKIPLIST_PREFIX.union @@ -65,5 +59,5 @@ export OP_BACKEND_Conv2D="cpu" export OP_BACKEND_MaxPool2D="acl_cl" export OP_BACKEND_AvgPool2D="acl_neon" export ACL_LAYOUT="NCHW" -Unittests "acl_cl;acl_neon;cpu" "Product/out/unittest/nnapi_gtest.skip.${TEST_ARCH}-${TEST_OS}.union" "report/mixed" +NNAPIGTest "acl_cl;acl_neon;cpu" "Product/out/unittest/nnapi_gtest.skip.${TEST_ARCH}-${TEST_OS}.union" "report/mixed" TFLiteModelVerification "acl_cl;acl_neon;cpu" "${TESTLIST_PREFIX}.intersect.txt" "report/mixed" diff --git a/infra/scripts/tizen_xu4_test.sh b/infra/scripts/tizen_xu4_test.sh index 5521b5f..8f9e86f 100755 --- a/infra/scripts/tizen_xu4_test.sh +++ b/infra/scripts/tizen_xu4_test.sh @@ -23,19 +23,21 @@ function install_model() { # download tflite model files pushd $HOST_HOME - tests/scripts/framework/run_test.sh --download=on + tests/scripts/models/run_test.sh --download=on --run=off # TODO Since this command removes model file(.zip), # We must always download the file unlike model file(.tflite). # Because caching applies only to tflite file. find tests -name "*.zip" -exec rm {} \; - tar -zcf cache.tar.gz tests/scripts/framework/cache + tar -zcf cache.tar.gz -C tests/scripts/models cache $SDB_CMD push cache.tar.gz $TEST_ROOT/. rm -rf cache.tar.gz - $SDB_CMD shell tar -zxf $TEST_ROOT/cache.tar.gz -C $TEST_ROOT + $SDB_CMD shell tar -zxf $TEST_ROOT/cache.tar.gz -C $TEST_ROOT/Product/out/test/models # download api test model file for nnfw_api_gtest MODEL_CACHE_DIR=$(mktemp -d) - tests/scripts/nnfw_api_gtest/install_nnfw_api_gtest_nnpackages.sh --install-dir $MODEL_CACHE_DIR + tests/scripts/models/run_test.sh --download=on --run=off \ + --configdir=test/scripts/nnfw_api_gtest/models \ + --cachedir=$MODEL_CACHE_DIR tar -zcf $MODEL_CACHE_DIR/api_model_test.tar.gz -C $MODEL_CACHE_DIR . $SDB_CMD push $MODEL_CACHE_DIR/api_model_test.tar.gz $TEST_ROOT/Product/out/unittest_standalone/nnfw_api_gtest_models/ $SDB_CMD shell tar -zxf $TEST_ROOT/Product/out/unittest_standalone/nnfw_api_gtest_models/api_model_test.tar.gz \ @@ -157,7 +159,7 @@ else rm -rf ${GCOV_DIR}/* pushd ${GCOV_DIR} - sdb pull ${TEST_ROOT}/tests/scripts/build_path.txt + sdb pull ${TEST_ROOT}/Product/out/test/build_path.txt SRC_PREFIX=`cat build_path.txt` GCOV_PREFIX_STRIP=`echo "${SRC_PREFIX}" | grep -o '/' | wc -l` GCOV_DATA_PATH="/opt/usr/nnfw-gcov" diff --git a/nnpackage/spec/30_custom_op.md b/nnpackage/spec/30_custom_op.md index 504695f..d98521b 100644 --- a/nnpackage/spec/30_custom_op.md +++ b/nnpackage/spec/30_custom_op.md @@ -42,7 +42,7 @@ typedef void (*nnfw_custom_eval)(nnfw_custom_kernel_params *params, char *userda ``` The structures and relevant APIs are defined in nnfw APIs. -Please see `nnfw_dev.h` for detail. +Please see `nnfw_experimental.h` for detail. You can find example in `nnfw` repository. diff --git a/packaging/nnapi_test_generated.tar.gz b/packaging/nnapi_test_generated.tar.gz index ebbb8496ca4360d2954dcfdfc7cfab34ce362a0a..504dbf9a65dff20423750a37c0d05a5df6e5b96c 100644 GIT binary patch literal 819008 zcmagF2~-p3+csX?U$xe@xFag0;;uj{E+|VWk*w+kbux4Qh?NQmH z>v4`RoQf|Ey?9pp;H8Utmrk(NdN=&W^6qlSQw`?%zn_s>hD8QWCweT~xshx#+pQ^* z+jI!CvB(IDToF3q{hY4eOgg{`gbsCDY;hE5MXOwSSo>Mg^k#erSVvM%wWVzZ=3Du4 z9^xa;17ZX+)P$;A2h%tSar*#Uhzn-xXQrk7EICsq#OQ~pgz3m+<&u z_;0M(1yPKGa5n(VQ2?zALekGBQagP03O$7`(&?rIX*?`EZ-h{d>Eu|AL@wWov^r`A z)Ve8+yaH_L*NAdpomSILfa@|#5b+8C84r|^=UY8RvJS9rs>7BDtKB=u<=srY%fe>CNV#0Fq`-7Kmk&m6N)}Duxj2c>toy zjj{TCcYcR3hZC!8rE@0XK|RL-sB360GJD4)yHaB!(_Hen<5|Xl9VD-C@8}@Msm27B zm=8tK5lQM}g}NFVbb|+~ z*0AGF*wSL^n`j-ZhL~BEE`WYm{cLn9w<_H*m9SWekD0G=(X>_&+QT6&5U0sSNjYOR zgHekjAyY-m;c)0GOS0}2tL|FOHyp`NQ!2YY>|48ph0ssaGbU4Z)%Ag|WS=h3-XI!D zg^k0bICz+g>JU8vwL`kqdz*UfQqg5A^vmsP&NW0Uovkhv*0IoYLMv6Qnk0Ds6OjkR z#=$D7XSfLsgf&wpbohg7mirtIjo+vxC#CcS~>!2 z0W04TLs02;PLwxj(Xs?NOxWp}0*j!&(s>w&y8_SgS}nisZ<;S!7qu9uMPfRuDv)7_ zhqXAb=jI`545KJMTqIcm&o;Cgf_02*LkHj(7mTO1N-GgzNOJ`|7heupJ*v|r#UL{( zfgDiF-~pFJUr+<;sfIuRU=aehYUIG+l1@YgkmkBI10ChYY(O4P(@_AA-Peq<_8Nse zXC{3sqK4DqNTgolg7#JN;L-k_WVMlD_2tyT;)J%X^A$*?>K9!W6H%dHZW{aqi6+CN z0tpW-3ai?E2xVAe5g$c>PNpA1M*6ed9W3LtR3K({2RziQRrtA_T#wav1Eo3* zjac)72j(dQvh`SG79pun2n9e0hy)9dt3`u=uC>CQ-cBGl@}Zfu?X#Y!6Z)C7LidDU z^dJ_VrIAVM%Xg7*_g?DGmial~W6Zo?vD&ZoM&55b4NB zfykjucwE4kO)BX1rYF1%KFwpWgt;X#Q7nNs^Hff+(xz5l)uc*GQPDAN)ab^(Osv#UB7Y zn2tw4s#HcCg6pW$GU6S$uEdlixziR8Voow}y@|c2V7=FS=GM}0^-$F_09>t<@yZQf z^?~7hus5(otcUVmW8ZxZjNS(Ys#zFZH9kPAJN$iYC?bliL0mFNv^1zctQ(2(vQAk! zVdXh4yDq&I+--dooc0E`x$+T(hxdZVfqaeEc5*`N2tu?je1sT3HyX`v#`GW#{q_z z#6L5-W;t>6#OU>-Lt%IeYC&jFSJx)ya?dQrYEHZeC_i6QIhj|P8MRcst%6eb-YiJq zUQ1#lGO7(H6qNVLYVo@fN>I9!MzlN;VbP@f^niEserr(j*h4?voS1iv*56Vm>`(fMM_l!z98UNLW=I zz{-$Dvql41PYb4~zuVuF?ql+61&lb0{+ylsD>aO_DG$k`yYxQ~WO#G`%3Mmi)X=-r zj=7{Sx3VBKg17(f*hchYn~*TY>b84==QU+{H3uGNZG4Ujzf#fN>`i;`KX5wS`;nC; zOW9v?35lLslfgdEI6tENQ4?DE&a5RzFvhy=#H zN21aEdvhR#n!Is|?uS4JuQL8^2{d)Qzq?7C+auWWz&ph^gMMH`!t0xJf3Id41^FDSxlDx8n=NocvEpyXr_K5+RFq0WHg>34S)I(% zyX%{5LOvhEWEPd$?SHHnvHF3fH4~<9CpcGLc2I6Oe8Sj$ht8cgGatL1x@h%A%I3Xw zErq^heszKQC&PF2T^v_90_ z$NsgNk~!<$VYX!FMv?6`-S94Xz;$gC3Kf$+=nls^!s>rfA^n8BUk-xtDtQuW`o&lD zR^)1Gny!PVb7_ej1=MARn1(!>_m_i`fh_eFpr$%`b|ckC$ad`N%-^MsOq!T?xuwfg z`&wa}GI)#Ehc?IrXYFq%UqWrGQ(jt5&8%Tv^~PxU;*&;%W5>uAoC}1GXv5&A+I>r! zU$+_~1M5b4^!8Dv3Mi#E0s)i2o#qQ)$yRsC~hHTvWkUh8g#c@5>x%Z|Swy)Nj$sK^gpYN%crA%;5o58~xnnLYAL z07Kz%hgsG~#NFI5-Tx$J_w-~#H7#|$*L_W-x&9s%#wSD$X-d81^-R|df`u^w>mD!f zKa$4~+cj1!af%HCch=I8b1tFYP(5u@b!F;Ad~?A|zx6Sw8bJzY3fSy-?EZqsR|R56 zqg*L&sR&@9kC-1|pEhM(w7cHL!Qvn@X2hgKD)NJk<$tou`uUyAc=vQfTsL;I5N39-# zmMHX2Gkvm1?~@H`qYgg2)~_Ekx2DBn;eVD02!4I@amb*#in^u-sg~^=fVl^zJTs%n zGaKK;4=(P#z901=)U1adAYey~g1h@G zKu#=C9rTG#L*(BelE^3jtSUL^lAb(bKO?t#&fw#JCwVuOwJ!jbpBYk7qA)f#)KJeA ztqz&*19skT{ksBnKFHph9UjcOq^HG%``TjxvPvaRLbX2<^QV7Vco`2fUl@|n_Tg$+ z@^9_jZi1VR!OP>btSX6<+iNOgKkRCS+_T3VW+LZP`^{2@V@9L6s;m(A4n$MGdvy#0 z8;W;`r*8kMRE}z$(2BAUhhC(P{sl2@HDHx*$-YKUeSH_{MmN+bK9npQ%Pqgy-za9# zKHe42AM`nO{^alS(G7jIc`N&o3o zBIhRws`2g438km^14@YpKJvYHonoLQxgQM)dDqv;;)5gwzhLu2-V}IOf(wdt;x0h) zJi)O>5ontCl455=TvM>{j9pBbi1soXvvv%%q{M~ihALWzS$3aRI)pj^`u?~ujOq?Zm!%VX3YH4f zWYU7B%DKwK{$(pDc3aHChDkj-6dj14t*@X4avTVDak88Rz7imyCLK9;K#Rz}6toyO z11@^qA@O91cN`bRazN6bTZ@WlLX0+}7j^jg)%13|%y!PNDc14vU+`Rgn^%g|DU_!b zaVog72VX9?(F!7{_s@Y)-}EQI5ReP!y=O#eZg;H+c7S4`{dBbQ;3$ciQ>c!7WPf~Q z#OfX{Q*9Y;NI&7eh;~v#_0|t*buK~{3}hoGA`g^)!L^2X?AbihgkZXR@fP0_6Fujs zB;D-*Jb$15GpK$v$)kzbn zCounjPdxIQ2%ghQ4UK+lHjnJVbQuSN2h?wL;AUwPGVS+e8VbKalfYk|B?WUZPo~E( z#05<1<pVrxd}ooxy@jQG~AI zjfL@pyHr2Fgv5J{&XJQ!LWE+AKhDht)rg0?-M1dWJ<@Ypr+(z$O!{8y99nPcY=r4+ zw)uE!Fwps|;%H;atzo7^BIoq=%X3>sMJ7)DPk#Zv;i|+;_h)$`~89@8% z*P^~s0)n^FAW>b<{k~Myb@vcMDY*s)H()&nO>en{tUK9%Hi_Tsw{A&wC>JRyYC-pqEFEfx-xMZS2tkx5+Y z{#8@{+qjjjT)YM4L>$*10`#D zq;ujVGDy6Cn&SZ4<+R9hDx>b*f&pMj1BBFAjlMcjDgwITG!<=h~D)?*Z_ z-ivnLGj!ONHMo9z>vw)oQ?wf*LZb<6J4j;NK`rY`6Kr10-hxzYZpqhRKh7BfEP&;S zh`5IQ`XBPX#l;2k*Sju3d_!nWPo5triMExVHA%ZHbwNY&*1GG~3#n>+ItdlhS zV>Wivj7S&VqXpN^K);bdNwidpGNJO2Ha|Vn&<3U>fQNNTB{EPavTM^O(C``uH*nVTmSKh^js;_t!{Y=1jx_nC96_jZtgAyQ zQuEIOcQY*Tudsd!gd^O-i2}Pw7p^<@E;wUS4-cSPq7szm*;Q3Zzeh^n4D5Pst9XoK z%)139QF1>|Z@z-A!M(AcYxINWzf8V}JRy?x8+braZ++z}k}XoPSI$aDEdOpbKwEBf z1q`M=h!`b-(#@+RXSRzZ`-pe3_d-{KX5WNS*d+-|j}4jzY8~#5xqI%Re2X>vO^nz* z8KR%Ay@WSu-jGj;N48~%oD13X%~*zcWY%4PIP59|KH+hD06NVpXQJtO%J&HQ!X@n8 zkwfSdo8k#3eSY;SeaQ|@c-~)F=WI~;IeuvdXHa?RX5K~eO-K2yRY>R#{k4N(Zff~p zc<$o;?YUsfQsQV8;<^KDITcq_m+jqycyFk|E3rB8A7SoxW!{ok;e`W+5w4?Cm=i-a zZc%h*;b&#mi($%&3A#<4+3_eqtX+@m)MX}({a8fikWd{ZljI!e#GVD*eucZqc?w&F zJuY_l+!4;>Udhq|SbVkHe_j6A|9Sbzk(v`x;fBjg|L5{?QNYSJI`g9N)g+Uzo*jHS zJzgU^yyEhc6F|lWRZ}pMcZQsiKNn51XBHyr?wj6j$Veeh3OqT3t^Eqhe5uddkC`8i zEy9?w^u=E8?-4~Q_>>8<$wacMUgFL_kA;q7~Jnq(@+3b-n6mf^Rv z4jd&IHijx za*syT>OMr)wshj$(SG7*T4ChzR-(kLpSZ-tQf7IAf!)yP+Bl~i2}81FfQbxU=wINp z<6ckec?3Vz(XEeVAnj9EX#+yL`!u-J!q0IlCOsmsz^=njoXNiOBq}S^o37j|AipXXXyDURF*Twd zed)bL{Al+^bnASm31}rrnXXLHb&?Y}g@5~c^?V}Np2kI})Gf2(NN;r{ac)4hyOi=I z#`IgQ+UkSRmJQlQhJk>x8Sen}L%OMPV|v|tg^V`})K*(<)zC`iV3w&I6#diLJ@a3G zOf+7V!jxER$TH`7*iTXmpm38|ady(N~c_V2fP<(o4Z z9n3EYXO;=ZqP>}XhF2Y45!xX@w_EYAw5kTn9he~3JX5Mf!TEFVXCe!nUeh@+>vO&Y z`1plKBlg3wX%YZqVA6`Nl?HnZpV!jR4wSnci_e7Fesi*)&=+w^{@N>NQR?@2p8*cy zG;Qr0>l%%=YQa3JzBC2*Zq{sjV^sVf6!i-Bfq_j6&*F!N4~nPSG_8T09W~tQ)WGa~vr%$H6hEFSN(be<)Djg1-boZWPHW(B*xadrQOBcpOVx&F@*1Y9i#=%f6M@ zdn;Yft8)$AhHiyj_T`8JEs8vJH?DNv?n?qUbRg4-;1i|)r*Q(I^oo7A=!yjMS~3S# zb;$G@R7v2U|2>?Gp*uHXGjIV}BxiK|uNSyn%dNL{T{T0odud9ZlUWmYX-TMqT0m2#HmFXxC`kKzck!s2dwSxN9&1xhgpMBYnu$Ww|Uguz?^Cb ziz)xj2q$hHIjY6vRg9)uOjZ~8xbH2(NoH4xopIOfzaMqaYG2R)@e89%$kESq1k2j% zTDXq}<5tfE&tZ2)$LXoL5sLreNuB|`TCha3)|4z?^a!hCSk7Sj8f%TAr`h4b;8={y zWQpn}IQwJh-Yq$cwtkfRS1Z9=Rf8Dd8Al@80m<4R52On1*i$LJ^jSC=OvY zMOjwCfVXkO1cH)WvuD2_J@~9;HdP^KrgXI)jH*%4j?9YxWUV)m;WXmQ?DaO(;A^cz z@drw+c+08vn#e&8A<5?_^+qdHf{$6?4+NfpxIpHlESfmvL`%CY2cq-$YJlpTQRJ~i zi(M8!iu^;D0CfNLHORQ4vh8sH{6S~nLpN$lu}B2;br=sT67BVW=fEF&?ayO}n;(pb zhiQ?sE}Z!SysjG!n-EV~tEk4TQpj#C&^zW5GqgmkXt}T1MFQ=ksAr2yRwOHEzhAn+ z#hg3V*^t*WyYol4A4R~@V`>Yia_gyvy#pBV&O=Mj^R)THRmU%CDRPzvrPG?I|Jayg z?L^Nwj|2s6$SlVaXUi=-)tXJKTOXYFyPGla{uDQ@GM?Vcp28}@9h{sr)VRA4<$Y=} zZfnwu_^5s$GJJ;~@cr}H8S%(3vu8x2=ic3!LKXEiTlP+Ed`aKE?QhkHg-6B767YWt;0SVI2C|w>2+ZuVF`n^(Vvqk`Ct(P%!SRo5syab z)u}FXHFifofxws*1Oeq$`%=2GcPbkiWxe#@v@x$x+jx}o2_|7l?p3OhA5x`d-+A`Y z51*lr_*KE##~b(G#UX$2t?L|u>d2Cj>&0hR*8zCi@+FSW#?AY%sc^)*2hclP&bJcF z!Sar((*w&bTlUX&(N^hqn&S5H;N{$Bn`-etvXDRY->xdm>&xt_tejodE0ucQ$qgWL zI|&zltlJqZG-_roDX9T#%y##GPp?5$E9kLxvdNp!qK)&D zZyZC%L43w%bY2Q7VOdd>3E^zIum2{Ef_PQI!w92{X3omFo{9J~=kzY!@fI_U>^(Bt z&No40J(>EAldK9jHCYf+C0rfGX;N$xz|W|>&nN^g`RIG-qE+6nDl~f$Gv(zUMcdGm z%}KVC3RUhL`|)nV#}_m$>HS8r&fRox>cwTpPtfW^f zVRCa%9I2VE!qdNffqTgiKYTDzMoV$H9rmC^Ju|Ew>vnF}To>*-G2gf98yi+OdTs@n z>4L=jFXrGMN6mK?aUSG(rTVHfpK83~O$X%Zevb#aW)}&;AU8t}7(`}+;x4JkCW=Vr z_a!>_+k{%UJAb`Fiu}S9AQGp>&h0ZM_wwI_e_mUS(gN!e0qrHqzKLs9$BfK1N!i25 zLgVXJTLU!mZuWfBhv*! ze-Upq8ng_aL!%AKc8Q$SEbsgTCaA^=TpKYdZ*dfS~osa&L0y($3#$bPMyusE#xla4dkwS zk;-tZMtx>kz4&u|tnP&V`q8tk3rAZ`&(`m#NJnYLN6_gwShM*H^ogMZnsI|a!Lz?~ z?sN@**a(lz^~E+vNj}`s|#0G z3qo}2$mad0zQ{=q*~mz)lWf}kJ*jyy;JH$x=ksIVP&#^}_Kb<*8FAEOrPQ)_34-I~ z|M$mM=8k=2DLvMpFF?vwa`q(6 zLn>$bAU%$mts`Jp_iICGO#DK%WEeWJ1=x}0%`7G9bFX@Xcda|)W5*!l@a3#CO|-jV z+j9pp{}X1^E0yu#qH^b<3iY)R;IzM}U>fl5H4Q~(LXpjlv~k<4ZVKPlmrv)bf<1G> zR#+d4o3z`ez%LK$-hM|)|40iT{kIk#|XEMDHZ0F z5?w{K(lXBFwgUd5r^=vF17GRFd~`oVAkdnZBrnnydB^x`KQe6s7@VKw`d?S;Wg(Db zip!fx)xHuX=n*s2TDKDoafc!1r&@4}3>^WjRT*{oCG4y*<*F&|YKm3wfd{?yNBRLF zc>9CC;f9o=B|q)MvYHAcA(&1=FYL(FS2T-*@Ib0~0z>Lb4Rjc@mmKtm2?=#`!7pD0 zU&?iH-@@Z9`ahsjs&*ASjZ85i#=-L}xjuC<*RD%ByrOeP3uOJ|#P6Q2n2KU;{f&b_T(#<37L4tn1u|#iK z&YFGC%^1a7WEL=`Sow;N-omUV?P2oN*YltE#0|E6zTPw%cKReZd!9)O!(xaUm5#Ml zCe#gtGf|jmvhrffNLyE}>p@w+;NHLuEEuPs(in9J>sTi$exd2Ay>6*5T&q)7u7+nX z3|(BBvT(>Lc7m2#OoXTLHmmqfp%O=_tEShq?RPd$NxRM8y9!nEH+Zsp)?mdkcnRie z!8CLa*=K6@;^FD4Wt|j@-{nhlHK~iFowwKDYdW-pLU_8Y=EZZ_#|La!qRE0S^{xGU zIm#D|uOm_`jIlYK12o_&GgEveWv1He@!UnP$4g$Dqq1ye8*XSeQpOf_*4lr{oj;i* zaFonR_idw{wgVn59OD{?$8XZzlD2YR~y0_yiB4iQP;K}PcHrWDdIZ#AC}sSj3~;mSDpG7_C#=N^KJ zD`Zdn3Hb&CUIRd*5&$$1O~j5RM?+D+mS!V0P5DaE?@1^a!Z5Hxu@+Pt=;sjIOW6 z7U%g$&iSH9o(`p(+4L_ZK`f#9vZbNFP}NkH?~+)9GV#oyOdObdh-m(|sz1(R7(G-b zpe_f_V5S_#bs6@zHrI`9n(AEHY#x=pIP4~{mv^Is@>iU{4_Ec=zu?;e3mG9?M2#gs zY7oJf^}3D*=P{(bOAc<1I`D;KHc(LyHQau>QFYM&TCJtiNX+N^ zeEKeI7}*GJ^crbD^$9E+kb?@g zJwod%gVmf68(V~byfI=x9aliw2-by0>heQi9s!Y8(1;S4ogaQpY&>)J&l#4R- zVf-X1v7hQRZD{Bl$m3A9G^SS6OmWSvTveOx z8ROHm(&VTL-uO?ucdb6xpSh{u*tI{01tdZ4wE$>)mhVdo-sm98obStFQ8UYaXsXL? zRt<7)^n2tHNz=Ydxyz41JAUS0QNLwTM>!s{fI)7HPz4&+&G3N3+-G{lH9{ zd$d*D>(Ww@lPcHP8kL^?5g&P~z_Ow=PuQBK{opcr%%KoViC4aGH*AE? z@96sxA|#=8z+Ua6=Ns$na(*Ohb@XQ~6Z5nZ38+e>$iGCUy+ZOjnUVMfWJ^(FP4wh4lB^PjyFNqbatQdbL9 zOc~ozvXq$}%oN5i7igtbM(UMk^Gv*Yk9E3olL3HEt@eTr8J254e@DQB@9XO{8qy9y(L0lGtdyu^2>v@2I zFxrUBc0P#kS#cR(A6@q0v2ZTE;E7L!H#q@i>-)N4T@HNT)XJUnV#NQmI_zuIVdrT6 z*TjMAL{Fu#_O+R#;@Yatk;o4?4qglxjEutq<1F2*k%6D+tf+H>nN5*CNZdcJ3?~EV z(3S?$7-~Q6`q%^Feh*wd1cBXWe z#uodKr!kucRvG-5lsbZx)L<|y)Hvv0+Tg7~iS$itIFUFr2MK?=s<{Ul$7Z{8VrnkY zB6~36IgGyXSNWi=USLND5T~Z5j*A)=_!9NFoE*ebqN8o+Dp3Z{$p-}QX`PY=cGSSph)gAbjZa^d zG=KaA?dd3U$l;GDe^pZICdf<27YEq6d`5cmKA7E9HQ9hI(v1=<;?|yHur{l74ISY! z@`9!UId*olbQ998)KbX6e_1D3KGrGkWb%OztRLbCM*C8toa>>8VBt%O z^AO48$%v}jA&kQRKX=@CXlU_#A|M~yyV7VV1I6S@BZz}$|35o?xX}$&^|FC$Ot4M* zJ)$=-&p<#l1^o+zD0#@8@q8sM(?UEWGB6cQ0O)T0lTL3kPo$mH2)7t4%D?)(1k2`A z)*sPz+H@Gby)EL{SN!Lr_8$q4eC{Fl_kK+O5*s>2D*}bq2Ggyw5 zI~hh%vHpSGx8ztKgY;%UvwO>6t$xZ#&3X4WW^2-)iqS~cmJs}Gyl2+<-YlPjVedep zch*FdFUL=OlWsqr6%g~@C$6CRcQP_;FXFnsq!i1tM5QeV++ky2t&Va|#+CHj z^M6TK7xiLCNi_dLcwTZqtr)lE@`GKEy7xKFs(cKlH5YY1jz5+6QCb7taIxJ2luLV= z{Jt>tF*nJ2$I4F7lffsIZxe)tvXs(&hU2*&gWatkHR%_*y+2He+wyo$S_5CDMIE~rmmVE4G0!vU3(I#btF+)|HG1VZ+yl8!zSHGdkNil-pFZUZ-!DWPX zl=v;T_;-Y8+UkD|b^RTD#yCS-6G{x@=lpvpoxt1)5rpxN$!2`ZTYM{9d>Jji)h)iY zExzm)-?|px*Dc%5-NcNQ8%lS05({xx@V(#vwg1Cz4! z7mq{fL2Z$3zd!a4ZmW>?&;#yPs}DCiz4LYA4p7CVv310`m3G;=Fqv2j;{{wsRBRg9lfy07m7`W zw8y3>O5;LHFRuwOU2myk){;%CRCyA35Wbo5e1LE|13o9isbI&8CAAhkS}XOUJO#Q` z0UCwZU=I{iZ;}7#H={$T5K&5)9tU9{Gv(ZmdG;sWdYHr0yO_j0O2b+06s(^o;p>Xd8`M!Tbi|jFuX|)Rz6kTrl zs0iSLmiqxTZ&oU0s2h%wH^L?SF7V0okvFwTKK_EzJF~~kugH=xB16S(ma)pOt zpLHXdJ(*9v2dC~d@&Z~pdrDRvL(h#&=tWhDc(gP)ZXM9?SC{B>+$eo2N~ZdQKBTqc zg}(sFBCtOlzUJxaVotpJQQyxQnAngLOzH;b1N`ZAh9wuMi2uVxbQmd~^g5Vrg}Lc z9xrmzSey9}oay_u%>E_6P0`CsZ__=vg-t$(o&pbuwV1wx;llVE%4{a%tQmCu zWw2itzeVW|9nyPn%s=@0#=D(X`2Z920$$g1&b`k8SMB>+z0oC3;hOl0a@yAx`c(=h zyLYKjQlVe}x1#oG>9Vk3#W?wMt|T?yq4tP;7_xGr(BTWcN5XcQ5y4$3BNg)^Cxs#$ zP%ON>z92|-yxMr5l$g4UAnn?teN$dcdpZU5XT60vs$V9rg+h=4wyUZq2p_gBGLYuR zif! zDgxKxQIizq&$1}l{7wlWl&u zRik2R0mUHsCci*GZbY15jjVsoD@9JWY(};_fcv$8%#T%qJlQ<*`A2vvP7&^~=<+%} zX=@D5Pg}jNJFzP!n){&t%ImWEIUz~ZagJhWikFP1PLOu<-9``~PSqM0p3{+o=zA(? z1l{Gpui{IPGY3o%^}70FOA$eMVUS7#s2*L~J6{%3gMPESU26O@47J>R5TS#o2_F0~>3gBMcWj z;&35e(*_|ScUpNs7D^*B^qv2DT!KD@$1j+_E}geqJG`iODfaeMM5fZ3#d1N_I?eLZ z%xMJ;Y_|I=($1Fk&?b-JO}~EkV;p)-%C=aY>*Z&8HvI-1&`waPV(_2!2BPv8B(Hcv zm_IA>gSpT^SSFI4nQT`;BCd{08Vv`AM~*MpT2%I2Q>GfOf35Sy8j%%~?o|@fj zKNbM+_KgcbcHNb%ZuRWvfhUsoNHR2C6^^+~*`2kBd|?ImQCnwIaTUZYjhF^lEeJbd zwq9fCVup-(P`LgBK)r9^wYkx|5xKz&KBJD3KC?ckag^1YE`B#rl5n{QH8`+{3BL*T zspYh7T3R4eTzrJ`Xx053d89vZ9jpG2slfk?x_2)>LmBuKXKif4_DabmJR+fRuWY7Qil4jEYj<) z^1=OwR85BZ`3h!X zO+XHA5>!kTiI1=1U4+W3&olh*kWM(_-Rrs(sPULGv%CP+%Yu6j0h#4!@FU=5yxZH@ zb-?gepJ!$<^nj8SV6wBxQj!~dp>X7w7l#+fx4oq=fZh3#G1zuG8_RfBEq7i$QSi1W z0BI)SKLmT9yhL2E#WjOc|L!yi$f~n?%{`wc`7J@85D)y_i@jHFCEl~7*W!$1Nmo=G zJ+!A##Pms{APMdmEl~u(i?^HuPUF>OSI`8+aha^GBkG-l#Q8#JipA%@!gCmNn9r5aFTTj zyEu5PFxg$quv#%0kj{--tBCNIpkEv{ZD6E_7M7s`KhSCCR$9vKguTaqs4I$E#@NPY zeIWHzl5aOutn!tYmwr27C3LBP9V%z{Uh+r@INn-&G0Q41WUpUnTUB!Ll70IGB8DSi z5~ceju}%H|N^}=zXG>cdn?6%2eqz0NtED|J2~N3ZGnOBR_$+qzc5n1iQC`T}wK(hO zik^7yYNdI_D7;k?nW7&-6&0)2p2xom6}cgAiI*iWzGvK>%gxbUX6L^*h+Lln5Pu{|SvF=BSk(0hc`6XG~?|)R3v7IV_s9Da0G$Gad>pzaz zl@+(TXV&u20p850v$2w|IoO=%YdFroanhe85rJ9z)0Mhaq@b}9C&}O4)Juq zai9_SSN)P0d~b=gxV=63nhh?f2X=2}Ig-OIh1nj?!2NSgbZyTkhP(dKi@BZg7cvWm zsR-LbH>6@k#y2^E-p-Ubv3yDqBj0#8Ko>ekX~7PZugB+8upO~WAISs-70LN0Pg*WK zYkPds-R%_W;E@D3gy6D>;YOBOfoiFtF2XVcA7W*NL;kD28QOjJbZzwTA%3LsDm(R9 z!S;^1kCf-bsWz*O%nJ)+I95?7+G|$#0PJe#aJ>zWW)`35)mD+( z>GLL)Nxtv$f`>YpZ;Z?d?AF6SLCUx?(L{u!a0LB;c6b!LsAP z*Q2>)%=h9Et8`pL!pPkk3tXEn>f1MY^hb-TPM#~7%sYdDPia^&`M#Zqy0v0F5ZVbZ zPXpnh-Hm|A8;Q$CiaZQv>n{%k5s4o}DCS7}B&-uvfQuW{yo58iBO|XAxodsjuB|XxQpqL_zPJwy|nWON0KSi{uCUh=u zsjV8CRs4G-{(ryjv_hABiy4Lpw2zx*)PlWU-K5N?zbrA4t1C}cNxO9`c}@BUpVs#H zy?1!#>tsz!^U6w__lQq^l;q$3(){QY#2U7(C;fgPAivEsUb`TajiL9d z7RP8Oo@gH~&(ogz%QL}ehuquq?AY>1yw5|Qjl7qBx)Viv1MHc+rB2D7&k_n5*s9~f zevunmJpykIz%|ah9wsa;^(!w`_=y7!(OiUMb3p6P(g2R9OKC1q??PYpBe9cwD&k7i8v}*uxsr2PX3-i z;&vq*loHfuux6w)@1OFcuvY7D+I$~tWH)IXdLhBS#&|}FKEqw-8~7;6b4BJhXe){n zw*AT_#(_5iTk0pBmmqlV?^RpA+o5+gmFFpKEuJmKHy2d2I~uRt8{{`o9%Xyrk^EB| zrQG-?36QKHwmCXfD0sW-DCF3@uV>664)Y!auikt7AQHH4XxK8ccNr&TfI>JE{HVLp zGF=NFPd9&9`rAr$@U=CL96&C=lRI*#?0w_T2|8n!?A#^K7aNFWqOITg5NaM$jKD%m zHsX75VMY~w@7mO56**Em`bdx@L-XWqp(@stz`2#Ni~FhESvi(I3h!NUPcr&fLRFOX z^7@9_(U6suJX45k()qBOF8g)0SI*4a=}r6>>DsNLtRV;A<54^MKg$Aszso~n3)#(U zTP@=Bl%4cKT;UYa^AhFj<3#ki;mTtY`^>yO$_p1hK0a{iozScGA}2n=uRJ90Gj&pNDc7PqSrholkC7m2{sw-Ig?Q@qe-R9Z*dz>;9rfAVxvm zLO`l0O$Z4^2_lFz0i^^LPz)UlA|;@LKtw=+Y&uGi08yF+F*FgSNCXrq5mZpn&1-~PVe?6vpI%FrE|yU z(j1AHI<0l1S2PHZ5d8)ho3}YnMCy_M;;xs)fj}|lKTg}=+&Q7LwYhy<>OXZpHioq%d(}4*~ zR=LDn^o)ze$}gxFa1Pe~@f zV|-VA#gn6qS*pjq{X14mon+uwi?>yQo(LJ-CKKjO{BJ-?wVpm{1IDS68?@GnUc^SLU zm$njcfnUZVr|RY`4wqTrn5y@NV5^zr_S|@P9<)g-Ee{J zdcFy(HI#Z~D%V@+RYTs30i3ML;}8qO)^$la>Al~H1aPVq;0 zb~ie!K>GV9_UE6fd*A4m8&{;*DKC4VFa|X_9hh-WMjydkC(&IO_<3XT?gc-eAWvpk zf=E>^i8%N6X#lNyacg^E9QgEc(y6%DSAjU2PY&mI$blY=H*}+hlN)1&+#WA%WFndB z?Za0`c1(PAp2^SF9SuTP2A+04)>`Q+=9QIfb)w_8Y~swDjIO=rf+aRzS5oe%m7ZpZ zI4oWXoNi2R@yKhNPrMo_+){VD_3Z6AQf~8Pp%yQKFiadNuo>zHD~CHjs#bN)9rM{$ zU8(6gl{aQLjLi>xFFF%;I{bEZW?QZyy6B5>#$d>H!D)iuu7&rw-H>q&Z(1B$c|Jn} zeBao%Ls;v4c8hr{masi{W^z+-`e~I`oLh@f2hL*%J1NKX+cNt!brjopM0`{`eSG_I zFUNYb5yn$BR7*(UDV67;ppn;UHR<|eiypknm5!AVb;W!Q;l_mWc^o4@%N;8zuAr|w zuDajPcK5wJ=VBL%Zj@2QJ{n53`1z%)Q~Zz9>-xp|?|EcRmd6@-c&@%SiJaf3cE0rd z?!)UU2h&|$pH2;b;ZVFt+Ypq=95R6hHpMK-;qhwnY|rGt1w>g zNtjJKrUmaX-$S=l>eoNVY;<2FFKgAmbbv4ESzHCxGREa3vKDePm-toQ`5qNTR4M2L z+wEYBDboW_yv=E_W(XxKL^>m7A`TZW>+U3se^{+n5L`hxvc!qsNcjUMZ?QBF%X*pwe z{pT~?#uhHG%Y$^6cJ*AfRZ+Uqwp^xZYKgof=OJo-iM~glPPq`kj+DH+K*b|J~As?Dvc#~-pC z`skf>r|#1Tpu&I+U%D=o{^+qQt>pDoR%Xq{Qhl21YuIZuAtAew-4VX;X%9zQzs)7a z^oHgz%3%+~-wS`S*e@grsh+%8waU^@-Ezv^_z=Dr-Bpn>HRBy?==`+LYS6hPzZnQt zc2lce9(gys>0;yX#`x0M+;d}vdb95AH4FQSuc*Z3=UBV9+OrMw=WuJwhrK&c?X3jF zp6TS$jZVoAGHPfio?&5MW_2a{Ll5xF7mwPAFu#rXAy0V~Hr(tA6x9|o8by%!Np&J?Kw}bzc})l4QJ9f4 zclwjd_r(gQ!=>E{(mv9L8i$AC6EZFwzuV$dkzo=`_RI^m_IQYXu6?`m!8qG?xXAuy zr01?XIUqgZ@q_PH{;n44nq>~g$qn0a>K>=?qpHRg#d7O7Ga}}RI1;J+g-xiyHFtadBfWU@!9Y<_2>|bZ^jNun-5Vk`Us%O z_|=_=tUV=+ysQnA4Dozo={z~{#zMEvjf#$owAR6|ak}Pvg&H3ij#hO~=GfTA#FmF# zOi(-iu(ZvuV6S6UtA9>e(Snv6d#|O!fhSq}&rT5F$_JnHx*ox`q@8@k%rN^J3~;QzWf)IQfRpToC&^Fh}B4>;Pkb}Z2}x>ao;;k<{1>n($e;-->W z&;y2|yIq=fUfY?m`-XR)?)rdw_fGd@V&BtCfv;En=CL7m{;gN44{W^9|M75RWygcz zIzbZ}(s)OCMlc3@vg>24_G1@ak22wYxhmnxQkml}vnLnSu1$?Tiak-50>8DisrlnX z;GNr9UW!6?_2w=XsP--E+LQ=I3lj_aqKt|I`!00x-+1hns5an_F7?x4qZU6{R2Zkf zv#5G2W+2Z5F76sX;649Qrrj;>EnNfdQB|djPZf{*D3p41c{#m#fOb-Z5qv=88QfjI z5qrf~gx+_kI$WmJpA3OdO5&Znd!pVjVR9c>a-uNQW_lEIA_ zo*|7Cgl$viTaXQPwbSj9eEs=rl~DiuQDH5$?98tsrC%}zAHOypmxeoC1uD2dL)c1IG=4#w9&Ro{2 zh^s&8F8)5uIb-XS^imna$(*nC&xjxG>xDni`hv!vW*)$W4>}v12~O7X{H#VR)ve5H z&N_W^>|>$B>HXmgmwUe{>3*Ws54YH=v8Nv2nu~kgEtGRNzY|V>>~=cBM zP?7RyBe~mN>uNsilk>Ed?S3*>oanNJ@H*pJh)2hM`(Ac;)zz84CC(XSWh$I^`Z`04 zZ(rC$0>92>nkl8MRzGX(Wph3nB(STWyNb=TI~!S(d!LMsn0_`M<2*B^@U|-ZjjjF*yk+2(UNTYajru_+2WbNg>&6QmztLa9=QeL znKxNoTYp~J%UKN&J~<2{Ouz*EFqOMcsJPF$!oeBDzeTibp=L{s2Xs{!X2 zAkMistFC6HAv1|D+%GzGI7ocG7q@@;ViK92J-2^H=A`mTy$sGOp&JfYXD*esvmV91 zHl~Y)1jp30R^jzqFBiARZx|JN+{faq%$5H})9$RjdT!f-jhDxhM^8={RDDCXOX8R) z-5?obY+y;)LdRz8DV>hq(3=ZgGUp|XR!S#_g_=7~gnPXY^t8|xeL{otYK!zH&F#*Y zi5FJs^GTl!Hj`dd*09f%e=r$m)!p51Y@q!t@|jas$*rPp_g$;eQ{9hj0;*q$3UWRH zmA=Ut(`}Ecj+^S{EY4A9t&Al4z&1MUnSAor;v2@zfkAIv7rQ*Pz?YntKg@&!Rd%za9T|3WNfo_&DfG@(b3lIOiXz<688QGI+ zS`u$}AH*-e)1NTL-&Y==PyOi_qnXJJ@Wo}Xko2<21Af!5R>yVcd1r14Tv{$K#>#W4nxX0}0mnSNIS_;oOP$kflb>>n4E5*O4?WZqJ zoQ2gtPN9N?48@O}KFG`|cWi&KrsGuiRi_`P0xfJ#eYyZ3B+ESAy7i_AI&~j+j$fV| zZV8zEiWb;8_2Ozn$Kbu2g(=IGu-7c>uNyDzh`1~Qv9G7;L|3<4Ju?QBvxf6F4VJSj zuZZhT(vm*0v1KpO)Lx&PuM#Xno}K>4?$BqCSKvXFn>U_&NcJ?><2zWor*FHgE?}Rg z&9P5&wrGz}c3M4M$@0CNTzu8Kl62|#(+Nh_(d9tv5?*_G$@7I>I%Qdn6Fc6-SACmM zTkSqOI>`A}JlvG1odY3ZHJmFHMwuti)>V9y@OG+|>89 z53%5h9j?+2?=+@zfmv5{wBjkNPc5Uc!Ea_l#2B%p!AP9uE7<%+rDj{ft4AK?`Gs_` zh?&F6)ej~(TOs~^PhIbY^<&~%Je=cK>?fE@Y}eDi^MBd4zwm(b73121&&P3;LeIXg zYdXo}--y6>&R-Xy>WM~hWgEM4B4*ld={*^HxoYh|=aj<>b#`%NCyG78`3f?N|8dC4 z)QhK+lNUQypE?nrc6rvdXK4fsJZ=bkGsro5s-uo<1`vWhzk42-c%E(K#7{T0c~#8* zuq<0@vt;}rcpzuqqdBx{kh3QC$-cgI0nDZ5u`@LMmG(zj``xZ{mc30rW*08&?r&#T z=KWY|ufbXMwq7@DWq|2caU6GPL#PUzKWJ z?@i!+l@Lnze-X$^k<4feyPYN~sIvuR=Bbl16} zmU>A!oA(8}V>fNy)9<&gipyRfx$Jp_H01-`?Sj}OX?FrV3n%Lx`oQc zMMq9iCEU#+LOH}j&Wju2tuE1tKdQUy7m6o$&sTyCNiN-Hu@{27&Awf22v%{)P5iQu z*zZAKNL<1CcAJUk@vLA+t5Tj%=6t*QfKw&vyCvTaRw+$?7`AuFMg(x`HtSkw;|MCZ z>8wiG3l-0~2)uAh-YW@#?kI_sEGg?mg}2FWP_>-Y zKJB+S5y*2spPKcmF8C}<;a&reaj)bt&O1ht-7n;~wSY{N(~A68H(*lrEhqOYz^yuR z4@KN6z(ZIg1(htKu#E3(mXV1{*8LQ#_D1<}x8!-Wbc_6?8?dNq8szivdZF%_o#}39(ifpko;zL?li0D}w0Z1nc5g*IMx97)I zQisosd#%?OX|*)cOx_~kw%$Nwi_-LentYYF8N!31mqo6mD0wPl=IfUqxw2Y4H+helYqAMsrDMwW7s-OaGP14<_C9ztEyi?D8D<`imK&17Sb$73lm%A4ifb|InxI-pi{H0M z>h!%b4qA*9keuq3(W&r$uF1Kxzo7i0Tf_|Q#}I$T@N;tk>0K-*)_yDRr)KYdSild1 zepZL>8uCk<@ic#i?$5~k;({mn;o6(eE&RoVpCw9=Uj2yP-<9}1+uHG$68|~?4!2F9 z$PeHAJfxrH{bcr)tgh=JgjOF^pNvsZZDbsLYKx4HbFDGB7`a7~LxhS;|G2}yp!~Jl zp<%lpH|y67KR27&G;?m&e~@Psw7&SgXt3@d3Z;%55&d_e?^Fz50|%`Aj<)VIWPBJ; z7hTE1M{j=klVv|E`pLLmUr+y`#CvV{`kk(S&>XWF9XMc=@+C4(Y$`!U$G6rvcJa*? zNpzo#anL|yoRZ&E49Dlcp!~Jl;kz_%j?aJ1@N=_bdi)QKzm@k>bKCGoG0qTwM;)ix zeBa0qzx^!dSJuFmPyfIlj$PTjDG2*z?~&RZ;gpN^%RXm}zkQNCUH-cYZ)NoUh|-^j z^ef4qhtKCe3Gmw+w|HMhXEZWSt1m0|SJjA?u^-I-wZ`I8R>naesOmjG#`SMqp_RRV zz4$esjLyThN*zw-{6^l-tfI`h|AbX-4f2m!H^@N$nDxA>!4G82Qf;qs=a&6_Der56_a43d}QhY;^>c0+ysj9wtC*=|b+pMakd6Sp1D77ve7w)}z zOACsMKCEfX&p`Hy$&uDYfB*uv9RY4fz=RN>5CSHQ0EH1SI0A$tU?K=m1OXF8fT9T4 z4g|OZ0TV-jVhETx0u)EUBoLqk0w#$7B@r+w1So}oNh3gM1Z*b)+=+n6AV3)eY!?FD zg@ElwfV&YeSp+DHfXN|1IRs1|0m>s_3J6dE0aHYPiU`;q1h@wQQ$m1B2$(VgR7SuM z2oQmQsUScV1PqA)kq8(H0iqBvRRpMtfTKH^4|r_=c3Q zv-jpUn$Ks)Lhk2B(#V#S$Z^SBQoo4dA! z;d(-1ztHZkpfBn2PBMm>VpW;_3%E7-*dChnD*A>l?-WCxC03u=zlf8<#}3f$uA*=2 z^3E{Mvcy_4`Xmv@eFpC#6w*}sf)#K(@)?lRF&b$J&U!z?j+X8#KA z8b0N>%qsa((ba+^SZoj#xPrqmD$hUw&n;AlK-Xpnr%EsAV`rlo5^c>DHDq(?$`d7xtC9v-L^S!ykG7x_(zL#>n|`Q=(Dg1lxORF!;iUG$OtYEw-S8BVnK zv?B-AAkDkf-@iLzpbw+2hnkbuK%f}%K?w8&`5FXzlKc_^#gf--fI5>8Zh*RzuWf*y zC%@bPy+~fe3-u)*F8b9<7`6WLzgS-X`y+=L>h31m4L7@-HFQL!^@|um%BJ#nF(5K{U8==q1FE>J~ z$ZKFwD)}G`T2H-Zwp?r7Af>7Aq9zm$!-Qna5 zNkvB+FNJ>oG&6AFeeuzEdkwpGdDcqF-z9H_D&6hb3Ps+P+6Gm>>$nZ7d6&E`x@C?Y zsdw#=o;(nFs74-$KcuGs5D(QT0OUh@iopFtHHrY`klr5P*`b;}z{^8=O2C^#HA=v{ zLwd@g#C$w)!<1fTOz+(*f8H5@GK?XdBfpCM+ z!vJW&V*(@_gqi>(10GW#+aS~w$T#3I14;}+%{X68c#Z(I2BAlQCIgw?OJKd>RDi}>6yH60`RKJ-@|w|Wv*iy*ug#ISjb588 ze>S=gmu~4SXlW(59fGk!ZigJRvfK_iVdcFYVrvz@9dgo&vK?Y?^=>-^YxR9Q#L-Gn z2;yvo6oR-~1u0H8ROHQMyt`^TxpA~PLvB8!^Qy0xv!J(?pfJSO3MmZnv$7P1T(a^O zhFr0V7ls5{QG_8uR_}x%Ay(goA=j(~;gIWANH`?i$`TH_VdV{nL|VneA<KpnHB_Cj5=9QQ)cW|8+oFJ$%X zg?eX6?SuMdIdT@IWRdqlgR*+|L9b;=X+p!Z95tblS!7KpnAM{Rjn9%oLzA-{(NJO* z84V?6^`N2TEGaE$c2;f0kTcc~_2fA*<#f{+Vkkmb*ak~s1WeRbEMA9)Uj~AVd9DEA z#-UdL(3mFxNHz`)07%9>fk3u#XdsYp%ySheF%G>7R2cIF0kxdd!GR`Yo?zgEacD5` z#h51q7%&bE0mhAa@W8BbC>~ffZYWPwNAd9}z~k4hRgjNgyG}tfe(idN!|`h&3byfy zGjFw>1vg{9OF;xNg3^$!7^E}=z*tH{gfQOH5I81Y8X}6JNJGRh@1!9TnD5dMDU9IG z-Hel8YNZZmlMSFpvU?1mmf2E<&|}$-hEUsVvLVzyyT=gfm@Q=lboq?j54l;+Xx%4ly!&aU z0*HpMPXg3ULX!Y(6P{$iz$7#oIAX$c3pi#HdJC{O;kgaCnuOj4E|~D70DdN+DL{}3 z4-p7A2_*uc2~R4JY!aFZkW6^)0NEy?cYu5oo;0AuBs2}EFySF3YdwkEy#XZ$@xxfk zK`vpuX7P974A`7RHM#|SDw5-~^xNHWG!0dgDTtpFin;uRowFcbv{3G+??l7acI03l-p z6(RR9NJU6C#!?ZIi}6;3+{eT#LLOo$ijaKFJ4HwVX0$@W2OF>XP?@l<$t0WfaL^8}bR4SfQvn(|P9^=6?I0A|Kh2mofGg@BkDPZ6-oEVKwv zGUF)*)XhSR0c|s$62QPLv;;U}##0I$Gdnk<=BLZ!Azv`>lp%D?cV$Qq zMi2q%!yplm0gNRAGKBF)K!!2#2*@agf`E);-XS30Fy9f7?-)T9$TSA20-42FszBy3 z-YSqqOuPzY8ADNltYY4&Kvv$AI)S7~Vh*@`5XkEcdXg?9i5a#22 z6ld(F&WQE~Bz&(hUF=is4W<<}ZH!>tqRwD?10H{`u)Ts+)>aYD=-5cxc%$4IE+?ZY zQq#a^Zse_cJ!uOG>~UQ$C(}sUbDeP0=NFXU>GbC#y{$OU?kftO*H-zGY4eR{w!kP| zndxH&%&{thGT}Noxk%p6qK_}FcOkB)!P>eQW z(mBB}oK_4vvz}z>(sCb__koBz-(;dOlrM@q8H0XOPqKAs$w%e26LB6*4H`q^qPV~q zbag$+(WQlg%KJpbU1&1dJEXJ&cP9o-t0$f1e9%YbbrNx&O$~d8F6_WPk3o0Wle}G8 zo}u!-5OEiqO!f`s@4$7&peO4|S6o^uPT~E5^(()3O*F(g4 zH<@S-DT(1kW6=VyNs%rswWz#4BF?9&L38MW7)~n|E%}-h@6z%Hl{fJ6xAFb=-A}DE zDl!{>nFLyAN>Z&mo6w#J*-|GO?qnMJNi*q_d&>p2yriPF6_+QxT6G=ZFUi-KF zRXenb!CilVe)WOmJn9^)Ft2?k_0Ibv%>lVX2G+GwzXp^4g`@lld?L zNfVyt3ws$?)3#MLi*zG$gIs91wlefs7im7`Z#$BMwf}C$U*Y$EfONQN$uCOpFL3{l z(3e2OskAf*GsX$HcNjmQ zRi;S9D=kN~^JFt{+h`3@jPY7rU>{*A>a*HE(Wt{|d$a}rcZA$u+}26j9b~>uhryq@ zErOOxWgP3p6?{kAEs~U1ou7XXraf=sf5iZMac{q)&n%MEgIYv&@-}78+@M9hN=1vw zWoe4kHOQMA`KU%GS&_g#SKtpx!>S#2PFg3ucX;jhrT+hARPAUZ63*G@=hkE}0!49pG3EE_ z?J+I|4^YA%r8qFAh-*f1;N-s$8~@XzO!w$$s`D5c)xa&?DGzD3f9YP}sFp%W_4w3e zcwtC!FHR(_Jhs{XjeCKsT1ru>$LFTH3q$Ak;?Qa38O`?Z+zZaCrIe(4bT%1!4n5q9 zvr8*4Xtw_}>u2Gh!C zo9$QJ5g}?e)v1l$O>}&G(8KP?9+(_l>E%rOkAY#>QUZ*zp z_xvYW{13QOo%UzWBh)o)?%XjudOBaDE`xF28dv_Hoc_VyJNo}xuKZ{CIEwpNRzBWk zzu<{j-D?x_!ge|40IjZjXw(b0wC{g+0{y+9{|^Y4|7=d-ivQ=PD%baa&y&snU=+gS zTr@>qHNeg5y;W}{p-5n_E0@Ie1UDwQslWvVE*5fU1@8RFeJuFf4@hEi{hA`L8#K-9 zeK-f!a^6YxyK+ffPjF*`n+jY|;9?6u4N(ofWwABlof3Z$BVyl}pl0t!>z4F6*Njm1IIX(&x%0aXrC} z32rKIL4k{f+*yG;KXM-n{`LcsqIqtgE4SeKfEx$gG~hx37YVsY$VEaf68@=>P}DrP z*OgmveZY+aZW?f*fQy7&B;+C?7YYAVNceVbvK{;QP_-+|j8T1o)ySxJX6<2AkFIuT z8>POMpMkqcWHq@Y2Y{+^Rzs+_e<7;INzcH4uh5FSI|Ki{VsD(&4E*;Bow(i^`0o{{ zxGgjA-z(5@2{Z8DEBqz0T3kK^fLd`@4AiK$QTprqGw`2?B>~{xxYi+5r?yer>$5X( zPl>E%7YVtJBrEsf1`$4te-ksD4;!=GOYAtT{&sg- zJ46^-{q|=_V<0Zw=Uxm(w&CL>M~# zU(S$LKwPHJzaA0QukB8H4bBjKB-C15>I0fI<3t#L3L=}WHh;OYJ>M%t+%LnT^`_KP z3by0aMlOQ z+u)Znb+NMe3M1B)&{dz=RiEKipD(LE?^bx*RcpJN|}oC*G?{DYHMgV8fFA#_95{132+F48?ewhE(RBgE|bn z&KZ2j;(T!I0(x==JvM`8%%J;c(A_iWt{HU44Ep^Hnl{te@p)3sVOXWA)gdUSdeIy| zmBZ>g#jNXCCQQx=JB+Kuu4X8TeNL(?u6tiMQ@5>Nt={?u?#7fIF(abaj+h?NYDXkR z)YuWzA`0z@cOvfE5mO@)?1;pOP&;BugpVEZc7&51@m7SD9WgmV&yJWBfwCheMo8Nc z6C$?Q5#u9x?1*s@Q?|s|h+bP_Ohl_K5sav@B@!YEZHdtlcWsGL5ec@$$cRu|;>`#j zTjGrfCtG4fgq1BZJVMWw7#4xDC0>t^wk3u}Y_TO?i{P;(;v=SPh#?WZHpJkFR?fYN zsIehljVQDs21eYqAqGSw*buKogxU}0jldNxE{1j>f!8zF5& z^oiJFL-dZ|u_1azOr0QJjOabl*{zI!Mt4BOj(er(Vp)MpGlP>Ux{jnUSP4~o4_Hi9 z-vcYAD(wM_sJr&Sil}0Hz(Ojp2UbXh?Exv&^?P6x>Z&66ggUDTdqN#o1PiDGim(Fe z7e(+f^@AepF||n%%%|2W!t$vVir^z^i6ZO~HD3{YNX=G+J*1Kp!3Wf2Mc4x>s0iMt zhAYDEQ-c)2JgT1}ERT9Y5zM8!D#CK9_KIK*^_U_ohk8U2%%&PB!m_E_ieMI1T@jW= zRZ;}+QFkfA?oq`Q!MjvI5q6gfQv}J>^@=bubyWe(q|Pe9GO6PVUuY}wSepqrU1A)H+9MeWKEsOz>tX(3CJ~ZA_DFvPRW3* ziBmi*VyR_;BM^X2gn*bc>_a6P8Wb&Bd4=~ zyOEPCAZz602n-oI*#o(TPPTx%q0=!y*3iik7&34=0^}Mv9R}PDoD2Y21E+()kiL^P zkgM;c3ApP!sROe5PDo(rkdqRSd&o&1a6ja<3y?kJBn1rVIf((edQNb_UC#*sWc8c` zfuVy=Fd+A!6CdDy&}ls&d(df3@?2`mO|8t)p!}BtU#mvrPue*JXhdw!X8WGnx=bR>wz@@J8d4g6sw z?W%!aAygGcu3|WCG6&A2ESvgnu@Hr-wI~z)9idCh)ZI2PW`4 z;i)F@)bJ=1I59lH1fCLp&IEot+|C4kE8N5co*a%gfhUD4n7|VUd@A2VENrw-Py02$ zH>6$um?ZP!+Xz*TAr+Q@6y|qHKyK&PPe5+tmq|cw;a``46yWcVM{eSO6_4D=pB0bf z=MRfV^76ZIe&LQ^KOVV`UnU;O!@n*b$>!^iLo)eZ#UWStvf_|Sd|`3O1wNNJ ztL`V3-n>8o-jW$MB5AkQhwPoo|3&i3rSHpTG*Ry*Tcq?^3G4;L)T*OWvDR<1LrNS|!6-Ju|%Nx}R3{h6E6J%cZaeNds2T6mN`fx4ER4t|s1l zP49Z@mTZ()9tq!scP-hLgvg_<&f=fRYTunUs*E96ez z_d1dDHN^sVf+^!+u99Labol0Lo(Kd6Q^vyVCBH0_+j)z1BIlNTgocst&XM~M zGD0`6Pv_Ix8g%BmSSn61Rp;5PUXFllu;56Tv84L4V=Hfzj^?c1U4ae3g2Q3DlCw*W zG~NK6XES=~0z*N9jIe!@B}VpJ_!c-*vmK+;-?R1_^>)jH_4H6s- zlao|ma%|u=(b1gNOAxpbBsdTzDLK37_?j23^K42lM!-Etus=*#vSiV*j#oiPb4o8t zz$i$tFHAtvZ_)7;uZYgG?|KmevO$8qVZ4&+i;gwC{5qQ7^+E+U1PS(pu_a~~9IJVk z4m_LG3l%^l=*}fOpgI`#UHO!PGPQ!vj%eQJVIv!og_6=eWuCf|=SSj09 zRg4!MTE16{_2JQVY+sN)D8_EoW2Gc<8kQVdrmMw<@aSf?FNr;<&29|ka4=PjmmOMW ztHnm~=#OmQeD_(XVGl73{jf%i2tTYT zqs|X&&Vcx1F$_b0>{_*z=6KOW2DH$YrcA!|*cp z5+mX=Hjq(w85_cYT)|#v7+%5NU_@NOMl9|uW-uUu z*n14aKx{4}A`ttKQ5T3UU_h>7ix`Giu}>KhSFz6-byu-f3`h``$}kMV)-xi4ux}Z4 zLD&`sBpBPqFbu}FGa`bqos7ERf#C7VTDlpx;QD|Y2i!E^LID>Exk$)GLM{^id5|!> zoB4#ki9#2r(3L6l0~GpE3jH*N?n$8sQ|K`idOC&vm_o0l(3>gruN3+Oh0dhVHx<&w z3+c**tP;8Q7Ygl}m+Gc9=)r~bm_mAbA^mY7y|R$rT&PX!o+zX<3+bDR=;B3miON{=a} zrCL6|uch>fQaZDgzUe7l{3%`e=|C?R-?{kC#dj{g{|4W)2YXLS6#TMS zUQy1!lF|68RNMZaUd%1LNMpW8XTHc}zPQVLk;QzG!+ep)%-mbLph5SXI6(WQVZX8S zpLe;;@Kgx+AM(H-c$rua`Mec;mT@lU@u?5dCIG+e;5 z+c!P_cNMH)f`(VokY{^E>z%TbGwvSri_JR<0va{&rX4|8mn!;|=2K4sW;DF2hcN-)FsW zX_Ri6{$td$UD}+Vw*Id~%YP}f{(+qX(vrOVrXQPEeQqe*zjQ;cdwL)(Bd9CMD`@($ zMb%P6neNgC`R=uYY2=_C$zI{pk1eZ2-j?Yv-H`9zI+%7Z=v1;7IQ`hF3jMascxi(I z`&f2RQnFXlw1t(aLw_@->h#-vN0x3YbgSnumUiR?jU{_!Oix%>g}yDbT#{AnHX7XV zAZX7muiWX0V^w$GmK|HlRqQ@7xZ_cf$1Sgd>51c2HE+vomt^;JyASSo9Q5dx*VE~V z6IH!$%j}nO_jF$x-0>u6?3P#6^n^_nPh*+mlB`nqjlms-L3?g{)lW~@R!KLOxh~}@ zb>AM`Q5@uP+pA@I!mdiMvFz-Uta5km;EvLuN4LG&F@F@s|G?yR@7^%6<8JVt60aNc z7UCxz`juO&6gtZcSL)CHn4X(9GLK^m>Mb5VGiZ} z%lhQcbN9HG{<$aieZdI?yW-@XT1^8@I~!12+i%?<505K;m0Bdj-!H zU?o*AKdNSpRWA$UR~GQM*k2u(nXFdbRMsQ>9F5iNz$#;pIIu3iXD@cIHse=ltoh6z zeZ1JC-AwkXB-?@I#r`hIOv=PB3$VvHGUptoCRg#S3Eh>hYWC7i{6aUABT9F{t9{0T zorN#XU@zmByH|BtDeOf_)?oL5!!n-Ph-cDR(<)4h)r}kxt8wf`NfwPQ$zFC~H)gVz z*!S4IPv$JxG){FclXZq&r@Ly*9@b^6uwS!%*q`w;9J{>OLk=vJMcPVScWNL@xKcxx zJ+*&khRujsYGkjBRkL2R=O@|C)yx(4N6t!8b`pL;S8S-7l~%p|) zvzm3;a_q0wOKEtP?((TsNmgC;a#}U>1%B!YNA;rF>X4>A%_2UK??f2?*fHX)yu5{5}QKut{X4r>|ShSbWSy+?lL1R#7%146`=zQH^gdBdg zAD6mh@1i4&3c)Pu^)kXGw}qH4O81@#m)sI^XpyIv5hf`RqP5uDb0$o3Q;6zfO%J1Q zrZD)MuTGg}=1Ll^xtY;2ggZ4;p11VnUC=jooif$T6(a3u6JzrbZe_as^3s=QLEld5 zlqqGd+@dwVWtb1*(x%Iem%fmLzM1Qk$!4x3(vCJTVh3>hr^|(xzC;FnJET)4mAMi} zYktjW>Bn_Vl`|FxKP;0@Wu|Sqz1ddN@$)sqb+=)*NXPPPb=RlunPPFpx8C2HxwY-K z+HLFGxOTxN##Kjhgvv1E>i1l0UG{2_0DZBIk(t?F*;R&TEurZzcm{sDJoP5%$K{^8 zA*=^dQ%iZsK}P0v;Y_?%_R{Li3@f;5LFqbWJ#{T#qofjNUuq4Nw^2eVkTOtnowre3 z=@R8b%^}`KF(qG0ZOvNV#vMu*DJ3=34UM8o=PB7W*EckZD7jOTYYuH_gey5y!fV!U zXcSh$Qv7PDkVYY;lN8sQ>yXCnN+&4CY7Rjf0VNE@pk^(kahsAkMZJc)zHzIPDP`9b z;_$O$-0dwuW!37wlYRV$DZR182r5BW8YU0L?2&FR0Cn{#+MrEa9${Xs| zjT@Deo=~1q+n^iwC_SRwrzS!-Dk|ksh*S)8qk>WvC6c-ox=~&!lX8XH#=lWcDUEWL zn#jLVRw<>VOgXdPr=Oa>u?+F$h4&2gYa=7@WgFi{86^Tmo|?$Fai`Kv3Y?0G+$gQR zb>?7GKT_b8Hgra+$ptA;qrH|LC|MJ;KCZgJ{RMI2g`> zxI@Ux_PJB<4YwUap0-6!H8%!Dg*k`T#R)r6Yw(b@u=>jMCvku{vkK%u30i5LUMnFtb!TSh!l- zx6RB#NxN{o_TDx#b0y8fFSX9w%#JFl7dF-I+-7#9U#**QMSE`Q(`y_jTeX*hbwq@c zZOxrgQ8~MW5^ar~w?ysUB@}O~<2)6aBO?@RyVtoja<_~SXp3+zJnY{#lK5)(8MD~i zBqg-1Z^3|V!-GWq-DgZ=lS%SuOW%wE&4zP{uXdd=iA^N!LLc^x9e_6|B`^yX+-G&GtodPDU3=(W*nqFGU^QOi+_QS(u=QPWZ1qrOFrM~y}eM-4>{ zMD<1WMA4(ZM0G}eifWJg5Y_hJ-AK^8De`;0n_(>*#5ZY)_#ubw-xZRr^=@8oSuf77 zwZ%_rSo@t1`TfD0p)G60AzJ)?g2Q6(P-N?aH?OshVAtXnY&7%R*cOsiFbm6g>Nt5(YdODoGQmaP^E7FHHpELzPI%&p9~n71BHIBIot z%h8WV(!Xo<_*acx=@=<^b^6SU*h}f-TA%z2x+bH=jW0E~81A>3uz!mC+7hwj`TKYY41|tD_)&OXape-q{w0IE}N%TXXUPy|p4K zfOn>4P`q*ft;_P0N9YL^+X8r}Tl&Rc?~l0*pH!k-RU}>EooeX~IIq9nXr0koBOaqQ zMr=ctA=7ZxaK&)haLI7daN&TQU}irm%#p7K=$mnf?IF+!UkIIqPlR^D2SOX+9ifHL zM0iVhL#QXz5~zfigepP>;W^kd zLh_@;J#Nyj(k{}@(oWKj(hkyC>C@8o(x;?PO4~`>O4~@EkUlPbOxju+BW)#ZDQzKb zE`3z`h_sosskBLr+WtUQU)A%f&Z;L>F{-Ajhg7vxRaF&LcdCl2ZdHYLdz-GDRIiX`xh6im06^QPfrx z6txz$jGRV}BKwe?$Ts9#Bo+A_S%iE@xkn*^L@*hQ2SG3r3tt3L2XbIR0olu63hcQ{9gV&-}~XVH*IlhEwmSn$w?>%qcch> zMj}QmMl=Qg3&*&G9lfyVH~Y=~JOvo>Z;3=3QZm%&AF9-IZI!SCQVa2y;3 zhruCm0PF*MKsxvZ>;yl7?cfKn4SWZ-fKA|A@C{fG)`C>qWXTYNQAcU9 zWRiVLiWaA+FqS*ZWF3aVSh6K$H$z1u?TQM67{*Mth8aagVMfL}_GN5oMD*&M=XuWQ z9D3@!=lQ?yb#;BN?|grA>;Bx|{deCpbH?kG*8#7+UR%AuUTeMHdG>iWd)9asd*1WB z;(5W--}8*;DbE9*dp);$f<4!IzC-pQn~^ohV&pyK732k^Kk^Ln6!HLaFLEmqj9iO+ z=g~Lf-4pw2s!ER3ZR^}SJgG#{c!4~ZhkT98g70O>Xv=N4Ndha_2%^=^-a%Ho|``x zdEWFa<(c_2k!MYHDRt&`B6Usl6uLQGgx*9;p_$V}Xic>#wdS=VwM{iCHRd%UHBHnM zsyS7J+VnK#srgfpr%lx<)#lYA)lF3?RpwP9RZW#CmFATql}!~X73LKp6;0(SxZ>n~#f(H;tu?nU9H#HI1f>nvaT%HjSi= zn2(5zG!3T=n-7Z&Hw~o>nGcB!H4UZ=nh%N$HVvcYd23#ht86=j{b($wHGS(+%Ew5# z%JSuxmVKJj?aAyqZ$qryr-!n1S9ECp2Ah{*LVt6wXX3Ls47|G$zPSW#`DLj zfHj`HRhDZ!*H)>o@%*7me2wRtb4$37xOvGqnGPp3rZ z*+LW0Ebk)^K2RrP`i1DS(51;2vf*=}3Duz88Is@-NwBRXSVs~pB?(@T0JlqkDH7le z32=x6*j56pBLS9@055C;w{HSdHi0uXfkQTdZ8w2+Hi4x!ffvNV?c!jHI5Xa^-;boh?&m0tz{?L(lQ?Ams z$?r@h<{Z`YmD$)!GtZZ1V=v4+Uzm+Gn0Ypsjn$iZ)|-tzH}iaMHulWS^O@OLotbBy z*%;l-lWsOfGxMaGJqN!9kAUaF{9+KXU1Hi|N5t|iraZ)?tqb=T?I-Rp-Cwq!vcLSU z<{qo9NuX5F4bXj19*77k2i1aJfLcHupnlLeXcn{xTDNw?TCocn+V42P%Ti89*>&#@ zinmr5IgsI7ftk43zkI^a@N$(45`KLK<4z6B-bc9zGYQvF@2{IE$y#1*z9RR@{piWh zHeWOizE!3_dS-{SoAU$bA?F<~ZY~d8hFo^Iy171Z9dg~_=H~XmZOCnhyPNw1_aXNk z9&R2FJcc}WAl;A;kVD8Fo^GBGJcm4Yc)58!@EY>kf!g;&+O8<&s6A2YQQA@ZQTwBe zqmD!!i#icy8D$f7I?5@^CCVcT73FKR=)PdNRJBat)1_qs3){NL2IFDRWanDTrFzkQ zfjYX_kEm#RN>|>b=JYM~r|i#$kV%lM3_&cfx+-EnauRA&J{2pp%6hM7*;T&~TM6te zh?jOTTuG14=j{3;cw8)C{K&4;c0A%}fl>o)h$?E=Q80q+t z-LX4w&S)m4N2p#FYoPnFC&S?5Bp>!;cE{zqFSw{C)Vc$txUoLptb!(|ury8raE zrEvTv;eqBobraBCPhQ#Gx|7V$|M2!-;K=>kr}+OFh`)1Nf`2ve01uY^YzPB`js6AA zw@Ti3+j3Fl>Se?EMlBnzhj-cYC>ts;sFI%3QYd22F)37WkG>SDs7F}}^|%Krg(~b3 zltLBsEJ0BDJsb#%(9;P)<@LOTu%1=8HD#M$9_)uqTSARXRU$dA)`Z$4PeV~%bMJmqV&jeF8pXItP&U)62g6TS-D0#8g& zjOiaiW%el_LEY_x9zosd6Fh>t-M4hu?qGWICedl9{ijXq5`GGXZP_i~S9{?JKA918 zhVaVNEa4{)n4yyVgW9!3{B3d3W~cKu-=b_K6Mtg3et8%({wG_Qqk_C=?S(viftYBC z(^E^+uPL{CrYQbDlV=l0B7c_*gKX#4Z1ka|3)lLLBNWcr()|! zeCl@b1^;C+aRmWOW{!%LV_t9j@y1`oSP>Y7l_~28pzps>!Z#w|X*x@2t>v6 zpiafa7=+Hm$Sx<#GW{ZmeJ*WN&J+fvUpt%LGUudL0Z-a($xiq?q7~a3tz>DJAjWX2 zCBpwG=FN!ugCqjaq1Q`r&jo8l_SB(3Y<(5fTB>Ndn3SBlx~C}kE*VqU-{kt)|S!gv`-XbPvy#qjUKA~s@TGg4zf#u zo9t>$d5+mCWh|elC?Tox*#1mK-Px3>pW6>+B`9u^RDNuGrligzrRnF4gYpTA;*yGw zt+#8V?1hHebPJ-BMIPKj!LlX(ck#J}n&q+h)XPr&_uyJP z{|WJV>ScN!nUz!jg3QXRA18M%k~JUe1e7^%vf6qRn~=@!9VP{7D}N!@XCu~I^5%|N zrRP6B?fm3_5Dt={u$x^j`73#kE>-nJ*l*|^>_EnTq)?o!Zj zI=yTU(wX+$Gc|oz(J7Y)4ad^c_b50UKli+{NJ`kuaJ_jhe*5j-pB@DLu{>|9ANQ8(JOb1=rEo~`M*MR3=Gm#1%(X(KTP6F0Ha=?mING0`Jq z&FK$|PPpECX?!D2dp%~3r#1{g+gq_1VqDCZVz52kJ8-HU_!zLMvZMB2A_JFk#4l{1 zlrlH298O8-;`DxpI9-}C>W2 zuPGJVxD_|L+_|c?ufn*+wVYy0_uSW(_a7cPni{=p{M5CF#V6eF zy*9odcVvAk@2>Ha*RqR`#a%pkJ*rj<{uk)is#~yo@~ZY9fQf|$v7e~fW}*D_?Br1X z`s~I~{#V(;Vf=5h&BFK@*~wx2?b(fC{9W0?;r#4uv+(EL*&Qc}?pq#BN_(rQX6;ef z?C`Msgx~$vBT1=mcd1!;JZv_5c;kfXeajc*h zUd*P;Z8A~D+DVsb`l9=K>uOU*^&U$FQv6D711W)}8G)3L()K_~1eX%Yl{Mk)E}aYz z^46FN*yLS06(H-aF&(hoyL8%Rw|DLXTD-?!U}?|6rzM?1`;z#YzZK+7^I9Q!L~G>Z zR;Rpbn-JoFM?{xWrGbg1XM&6EsrI|=;{7FVr3cHE=Do~)5~6KSvF@_Px0Sk;9ekm5 z7G>_AkY#(SY=bAu?KRfK@$>f&+s&;^krBp`FHSs0xci8(w=0=GdRXf%hE2wxx zHmeMHOj_zNhH3V7d)l#_BRXmq$+Z5)-ttFXZZqRFj)0=Dx1%ez~7KfwgF*O)*w&Qyn#H|@Y0`|W*{*(8%R?N)4(sSjCf+L3G=wRhLO zrY%Vu-HQ)p8bkD6@3>@qdGDipP1}<8xECMJJO&ASz2k~;W^rLIr2f&j1NozlpBMeH zhhh%x8=+(i;s7l^QBqQ8PxKkZXZTBn+YqL-Yl&KtA$xe86`$j;6z)WvqCHBqmgFKW z3zDL}PI+b`q_(xYwHd!qKU-p6oRqb^ahZvfa&C8PH||iMFEJ-2(Y#KfPEJgWNF@d* zz`XYic+s9T;Y%SiE{IN4gA{hkik&q1L=39GPDeL7NO20^TUd)oj6qVgpdq@v!HU!P zzQSjSEA(ABYp8Ut`VmAbO)&|8Q1_;}o4#RY!Hywr&~%f+ARsky57TDm1K0_~eVS>~ z4TzCinulo%^AXGvk+?5Rj8}+Xegvn4FVkO+)MlyD8p;PN${} z#j_a)y^DwP#LL_6mfF6HP9Q1#QtqBn>NP|2>LkC&C=Fgv_wF80Vm*n=;4ZyOVm(RC z(D0iHKy^00uq6kd)bJi1Na_gUUaV}HvrYO3fK90;%0%Q$l|udkurvtb;kU!t&V?8%-F|IYI-Ga7@Xw7m}u<_$fOl$mHRq(CS+-wtQJHu!qcIUZvK+P6b!7R}Y; z+aVxAPa`3gax?Umn%-V{elI>0jd8MRd;@RA!VpSI}nKL*ciT7bv5 zLj`wX(C^}+yapsrC#fdSxc9Uix+sOzeu#zI%F)=UAswfKtx+=Ry(x!&lR_Fj#0uET z(b=d0jnl!^Zb}@~0X&dL+Mh)jU%HtX@>)r}QG$M_pdX#l-8|TGF4c9nIQO&q@?~)=qDve}x zly!F-r)i^x`{DA@ZL|xl>}{y#je#CL<8sD zb05=oA^*qlUG0JHqwvlN(krpnT-N*!P8e9jAf6&yJC5sJGC_mxkw7O{KkVd0f;9}| zDVuA(l6&rS&qzse=KTe8#?P#I#}7x7ZAwKs}-Tdg0K4x!AR zbrki!w?==;B}H1Y3>8p!#JG*|8E&;eaj&`!S~icAV#zwJfVwZneI1|SUfWUJ8(@Pr z$fF%XCKi7?jCy~1Y1a;W4UxKq;@z9rO43`*N{G1KFT%DVEHMcw8aBu656q~Vf0=a z0zFtrnuW7Ma;I7#_IdwffE7cGa=smGT$_`Idtdsakz|qvz`Cx?Iku@ZJ7M8X?S_$F zK@2*9Ofmsj_mnv&HkCe1SZJCTUC_=`yNnZ1Htt1icS?#+~oK1n;g!YBn#l>FU2y{#( z4TH2R{XSSRGmqMLZ6J&XA(Av&{|X%~Odx3`Sb6lympX%OSd?Y+CZqZ4NbAn9KzmR> zNCrkGw(p~bZDt!MqqoO6fSO1UIE@@{f5bL16u5q9Ji za%ptnUGI|Yz{}@i=D-Vb5q9AH=bu^RWEpil=;>~w# zgz)A#CPM}=ki=_cRxT_>RZf~@X?@~?Gi`$^8=H=q4u=c8Qu1S`@NL+BCC8@Gs zrlUt2NP#XaUDer_my@i^B3xMeR5`aLOB)kI+-Se-Wh-ApOT8fB25v_Pyr*xD5a7^n zhYJkQH-`&!(rBS~DZ0`03oV z(AH0DriG+GotqK@e_AsoB=G6nq|n+=YbJ$0E}feYdbhM@LTGI197m{cX$?n+wRCP= zsCjA4xKP8=xiO)dr8Uk%rti<45jyaG%^4xx_va8od*81?ysCG~(Dl38LVDrBa#ceW zLjrA*Z1ZU4mTZ?CbZHBz!-M6mh9W@%?UHN>G(9VJRt}ocLK^pA1*)N*Kmt!E+2+&A zt=I!OXl@H>xgm^DLsdZn9b^h-BW+}F&qi9yCeKE~WzWqfaa1V4uJNX{ykhae~ zkg>MeJTLccN*o$;1*+>7-)Ur+aI&xJnBY*~ zzGH&jeO12*GW+)ZBG}khWhz+Lx6f3ts;}y(AgOQPQNe<~sw09A`t}_WyxmuISP*z1R#09Y|uA(9rg5QX!hvrit>B=Bi^er05+7IG+`=N&{d zpJxWLBcEp)B9YHC1^F?bXA&|;;F*975qLNdCV^)hQb*wFlw{uE>5#0u!NZay-Qa1L zd~k#3ZTi(VSk^$rD>QqMG~>sb*WxTnYeZlv2K4b;?4?&|&=3ja&-y`|!y}^+iKXnL zd);R1H=^Z+n(H+rn|yZt@t=@~4og$?Ku@38^ zwn4cYaT#&+jv78i#O*!2I}i?gc()=p@8R8y=!Wr1BW}ZZB@hlUUNB-aj8_=Zt->pS zxUItbBZ5|`$_U6Bqb7&p3x(v~J3iGHV8(oyS2-=4HA?jf!{-TQ&N@EU7if(sn^&=v z%^IPah2b9x$;~?6)fZ@v37%IuCYv=(RSCo27s`C+cu8O2XpHr+$}SoC`_$Y1_{)Nb z*N#WD1r871NP#%{1Lj!Gawt4jO zR<_G1x^$M*adxDC?FOGR_F0y%9;Xbd0pKVEx)+08F^Xo-l4h3wduY}(s!A|^n^0z- z<0V~zOEFs0D#p;PI;wCmev6P?pW}I5f#jGEQ!4tsEnC z%STZieDLc95!H^nGzHehq<5=ql$JkF-RFbn6J%F8LNoFV5moYV~c#s_xm zN3{16Nh^YN!vOV4mOF@(dPkR;XNxbQaZ9Ab5v=MGrS{{9ep*iD}3o})VzC%-aBVi1o9zaOA3CtuqUWb{gMmy3}>LrXN zsap{eX9WBb9TQ=uu;?u`6{>`>1a&P!!a-mr!7&tOx;wh3R)r#Ayoud+zINEr>n!{l z)G{mP;;7Hg6$Hc!syoTSc);D2Ge!T`UMN zQ0x#=;8LG17N86i+l6x8Q|~SoxELt52`Rj%o?k3DZJ@{y%9*7)Es`u>`HmGpbQEcV z$U^Fgv4V{{iZz0x1=Kxb*C%)x0j&-(+nA2)0^sLlTJ2-{8IH3Uc&JS4>6rTr$9@bv zNTwBtab`HSVBr2Tt#&cn80gy6gG9$)yx=FKESsa(5gqls;K!sa-$d6I2QyxqKC!Nh z{gA+{jO9s$SH=F6$gGMLNrYF&N+vR^W49#2pT_P?WIm0BCBmt(T8T_*tYIR&CiZY5 zvnJL&5ndZ>oye?>bxefQB+_JJEa;9+XtDyPZxj2l#r4 zv>&2V&n-Va4Cj+bleXV!*VdB#PKsMw_d6+XeZSvHWvjT6)1IyB zMoxRTS{ga2Zw)YV(%hP66s^w3yL&ACI0u{9SCdB)bn9?FdssNY8Q zcvb-4lbb4VbQ|@^vjVd{xt9g%w^DaKD>$?#H$~v+Rx1Cqf_-~(aRT*n)RDRZ%{_#q z$vGX}MM0rTYSF~xkdE#LL7@t&-^3(SM|W0GsGO=gFYNEEPFm+L+DojH(L=~nfnxG2P63tMBX^ZBl z!gNF_sxV#AdR3U7XuB#*UvyLzW+1wtN=0-|UeVC)+wdxgD%UxAK|{BD!>d3lyJON{ zLzlJTRRA@!WAcoKE_1^x3>DEac}hdKIq8)@t3)Vpochax&ntb_W1+w?>V^fMN_`eV zC~%beV%{fTpY=#6aDd3Io^a1#JS&O4#c5v!UyAmZ!!ntB5yX2Mo4TZY7&*xD|{`gs#o|%)JU(eS@fh{ zVT&s3reI(;^~JD{w>ImVU|<$CY1qeJo0aym)MVi8OTA&AquQ(#L5+LV#UURZZB~+? zMkc#_N2wIGgXU8NV<81fA=Dc*pG+9bRiIRo3ebEm!&nG`QVFUI%_j=RvKJ`bMD3{c z@q@8!1xm%KH)?$lFqV}-sTdWg^*IS+ofIeqvt`y9K&ald$;+^hP67rJ)QQ?jKiEeb z0R!=w>@NOW3ZC`pzVS-g5`OV&*?7Nry=)2p_=DMa|M*|BB{1<8*?3GmkS!4q@0^Vf zi1*5t2#m*M;{)TvvL%AzFJ|L|;*+u^g5y)Odt5Pd7Bb7HHBAK}Jo_B~3e#nDxCrdeX`<)D|C-gdpQ- z&k4=bKYe&C)HU0aqaXSvX7~K0o@~VcwKR0@(yLn)Y<(;EP1O)FiWMwWtlkP1Cf05R z3l|%;f<=feSi#PTt%t)R#iZb{^I{5cSXAm+y)G`zDRv$ zaC0H_2{l_Mltgva2`#2>*9k444r+(yQnR%~A5dMjL+??yYlq&Z4r+y7r)Fz~;;F7$ zp;xHewL+7rgPNg<)NIYrSgNaL=!Kc^^LF`@PnMrT`xAr%t|~`ecPsI}ae^tyzj0Dk zGJfNvrsVSGM8w$*6Eb

    BkbrizSXH%ogL1CoC3Am?f+u;>{8^5GBkL#E5wF1R0{l ziG*!L{D}l5Vvhl0E?;6vF8Wh+kAdUdZiyvObmgZ>hV0UNs(ttzMJQsn$6NnnZA>M1 zk^x<6rP`nE{M=Wr=t!dxB{<`{<&kB8PsPqTT zX>=dEj-G;(ll@K6Qzv8+t~pIjk;Q)W>5GvwkpA#lwKP!ICH)~oF2b+R8~gD^N~PAc z#;?loPtPapKP)O#M)Y&@KQ5(qRZcHh{icMBBa8E!DvTGt6krv!IS3n>nEwYMy6c&& z{>4@QVL#TVL}*Pf|4w0lU8VGgt8$-w`|Lj}%SzZaO2=5e-vz-+HyOR(0al4>G#f;N z8tpVfK%M4I6^>^b{=7>NsC4# zYSQ8n4VpA8LO_dliT!{SwYLspBhDeoQHfNE_Z4xo!dXH z`%bQ2`RstP+?%v9_pzI_Nq5Ctw0G{Fw`gz-P1r;64h`(#d50$9k^A|Xfw4QZ%^r$(mA}{VRyXJBKKy;I%6Pf3I8*i%d!w2K zZ%BH^Vm2f-vEVO~+F0*GCfsrwd{L%@v;b7%Anh!wVUQM%5*VVLN0|=MqEU%Mw0Km* z5DkkG7^Yo9nGVy^IRBKlu1arwZn@QY`aPLcW}SLP z?kIz6Aj)h`o4|7YpOS*;p+OlZ`dx!27U=Im|w+IS1a4wdOGUv5p-00M?zu z9KfPE@Ih=ahdGFiwo(w)d@^Wb{7YN((xjFi89p3~EQo=q!xGx}obi zu;mX7aA3Vq1rCgTGn}!4!7q!u@fFxlDhYITwz>S#y~f zoFf+=fOF?E1MI)^#PvSkKl@(K{T>Hh_GVdzoZ|w#1n0iMEWx1{;H9|W1!gHOasf`l#Vs&NxReEW87_T+S%$l_04IMh!EZC! zpRU@<7F@M^3k%gRMKA)!-eQ6FLZnWB7o9N9B+5`q5R8dyxAPvCShSWre`{&oz6)vV z?3I)2l}h}olDPX%=kq^#`JE&e-!DhuU-8i#zn8ox#AYP=~P@uH=g=!Aa&x+


    i>z)yl5Vj}?vm%X(j!w2O%OChL7+XJp+!L-Ff>wV_2)0GJkbbhRv;J zN}aH(zjT%V1DE}tx?el|_B!#PwQVBtAiUlF^KG&JX1y;eNM)yA+n-X-zsQbaep$_46AN8E<(Vs{pp&|% z@@kuH4&>FovDuqf``Tt}UTu>NIB#*b&aQo`NL&wDSR}56L>GyxAz?-0Fyz!@L8Z}x z$AUXYeI5&L8&!EMxMg&*P*7&Hpir=9VA4ylW1zrGux-G{OR!}?#Y?bhVA50Y#Xy0l z;IjcAPr=#&6;HwHfk~ub`9J|uux!8wDM%bpK?)WQOnM0B4HS3?J{+j%{H@9PEv0^8 z%r5Kv6?uG3IsY;A>T$;p;IzMh)}P1Q-{##FN8IJ9_qUODr8#$b433feZt<|2&^^bvS+(S9I1g`PE8B$EK21N{QN=Nl=nO-5e;V8OU9&dFbN@K<@gn10Xm3paGC; z#q{TUVD+K58~C1uo@?NvhdMOy)rRhCup+23S8>*>=>^{%kyKvkLlcJYp!+7OXP}xB zhBHu=iRx+SjtRqQ=;n#)DX7GR;S^MOqIweg0zDvQv>3qq-lXyOCa3h{CF}MKF7ayFA=Zg*Y z!Sh82o8kE`1gqfr&Ib$Q`OXDTUgZl9ZoJAD8a%$-&V8=@U%2V7?ZEOkVy$8>;(|iy zDjpmD5lkvE`Wd>pw|WayqStT>RJgZ#GxWz^!_Ck&z16bNxgJAV=wwed6gt#n2!(d{ zRLek_J%%#S#-3_vXkCw?G_pDyr7e2+e4>Ng^Pao5Gl1~>?aFS0OlYEj-6XSD|PaR`+ zl5a0Y>~s2p z1MGeJg1ziD`huP8Dm}rs>~ng8t?Yezg0ICHI%OeQ0u!e^?*evY6esnWEabCh)R&)fhFRUROH}wv7 zLqGZr-E&^z7un0h2$isg`?$h)uvC3s9Gx{Kp#wchM+^luNaIr2Ftjc&mi}Z)LIe7S zhF}IYKyc=4n7b}-68-v=gevqT4N(`=Ab}HM!yI*a6X~H-61$;qY7wNM(N$ZY2+Hb- zcA|2X$ek_ik|vCszQr20!(4S}p7bl@5|3q6OAs{yrc5kPJIr2(hNSzCOAus=h=}3< z(^hQtTbPv&&4YewT;h?8DiLunz_b|~`4)CuhvrV-J1&8jp1Fju^EEw+b$kX>&@$de zzt<;mMcVHY!qnIFFm}r`n4FgJR{DiLiDc=S6ojs?sR_2H4ko2#EJr`nCy^-amx56A zH9d&EQ`hm^_Rb1j`@FN`DvZ7|nyB2O$9Cg|r9tNr`aX$#*x@Rew5AJ)?$(VLkdBH( zDElO?!#=Enfi+!zpdaeS|0oTLL_mEKL0I=Hn1H4W4}C{Beyw!WIfS53BG1d6Yu1<3 zwI>Q2LUN~}2~X&diNczY+)3#ACxI2O?DF9qdg6gNVh6&V3vbgC55QqM5Y}9Hi=H?J zr`CZu%!N1UiTmSbSqLpI{Dq#l-^7!_$1COC<-F8IIK0a6H9Enqq=uNR*x9@FAlzr* zZMx#>ILr%#^$fg4S6mIJ_5yKu=B1pUm#f|k{H3m?Dz3Hxp)>=3u4}mmcd-E>GXtmT zTEcJ#8xR|2;7@feRdDMX5R23B3SFz+*0;4=v$6dT5ttG9b?w$H?EQxb=MgwwyY)WS z`60q$1b#)k^&WQHL&U)mc(QhDCbs_pLTv<|sNH%Od;bAq+Xy^XyY&v%`2j*~1b%`1 z+vC+MHS_b%&%fWo2H_QU?baz;;%Bh4c!bsfJV{F&fhFP*I|tx#TH;Pv3?3ml0Kcdu z?ub>xBYqlyM{0>XU}vu)KJ>%Gw8ZVP#H)zmet58!_-QQWDx$654%1UTA4JfH=HRkN z*mvfGvh<-3aob1Ok@G<}^`Y6g>|wU!d{CM`^geF;FxzlG2&)gx#AOe$x6B7c>qGD0 zwhys+=7YlZp|^0^gY2FUH&<_!zA}g|nXcjh$Oj8-^XZ^Vy3nn-!8$fZdFVnRI9D1wWh%%?7b<}p ztYxF8LT&mYyMv&bP(EySB-^n&NL&-T4!b>)ZP*2e>4z9x4o{)5WH7NfUag5FDkGoy8>$>Y;*hI-TrjZpGsA z2g?RgTvD4J2ZOWiU?1a>n)En+xOUcz@>QvHw*CMqNsHrz&A_vv1Eh;u90#lvp1ssh z3e)18#%5e)clMJoS{ys9)KzvxKgmmrV}s2|W8ds2Icss?SgAC2cs~iy;#gucQj2ol zuGV4D)!bJM*JUOB;VM2qn}6#EMnHHOPL{(CT$pRq4=3Ve$JrVSbIqp!uTO-Tp zVo!(Ih2at=(Bpc_<@8i8X%folU_TDA!{8D)=ofm*WpptvsT;~@XZwcOdEpYq(Rq5x zM0)CbQX`bn#@-WR=Zs4jYm4&yV&mv{@{8KXR_Q2KoA26pjoMdUwrelzmELrneb&zo zh0S?^F4Q8Z(eDqCUQ2Jf#@6<;bHU~`pi{I6F#5Iu(o^Y8c=o)n-Dzx2JsPb=P@>=O zCq0(ll*T6e+F8EL;kUoe4)F{1!g{}$y02BCO1B!Aqe`U}?k+0-_z3OTw}-(0k`eKLCi9jb@*rcd>2u0nKq+bjZDwXs_L8XzKN zyj!M@$#^$U>C1S(nNpVVemw=1@ot(Dl<{tyT9WpDHN}zkemT`Cy?pjBrM(-bDx{ZB znIP@`eCnq3@_AyVz3ZmJrM>CppC1{zPNK%?{1s-jb@Qp;4CK;E=ij#hf4Al`Nqeit zKLzCK&D6h5{32wwyGBnFEp&_(CCdH35QFuCP7B>+L=Db%f2)Yla2sPL$xsRr8UbT8 zYzMPYN+$q8M~yKzbX}_a_%JkSTJ1SwhkfQ3!dr%$eP&qFd$m6ZlNmFUJ9R_(g>F5i zW>0=8t~77|`)2bB^Y*`o@RjZdM`-O~DCp+%y&A2aIXn3T`px3V;!;vq<k_$S&# z*W991Wdxh+{qa|&S}Ox?j|fYe{Ze3uCe5h5WN6y|QDC3!evAFTv1g55|L4!CeK*yy zy@@MXblJ>39t&3u!WJN16|~a)sc~fJ7ve+&eTDnKzupzD7L(Y{sg4Wr>4jb=@qHog z{PkYiDvK*0^53=JXWL)>AiJw8-o7%TGW=8c;?m;$;==3#ca%H9nCQyys$5>sjY`UU zTxg+Q@|x{_p^i2J#GV5dL=LZn}@1l6c(2aQ2Nl|--wzDRO4&*<`Ms&EW z#fP_Z2xWM8<7)}lfykG``a(0OqUGbkxrS>%=T3~)z3alcy5#O&zb|s1*uDaO_)d?B z1C}laTrSW%+jraN3J;3)KG}cWDt3k+eR|7Fg__`tn)&}$_MMcfzu!k!Ck!z$Dg;*Zo0Z~qMXLW+zI_P2yIT6XbX*JfR^-1Y zt1m6QoZ6c3rvKB7^+js@Zpx%r$5u+#zgZqkyVvwxnfgEE6rtP$9%-^>{ zAF@n)`B!zmq3u}7)TdtNJ5l~WZAJcEn*Pv&v72ebR;!r#)ZY$eRr&Aq&%dATRtaS% zUtY2NKLzn`?%mz>TW|RvSN=m(-@p0Za6Ipys4a+ozOdcK8P%FQ~5%6Lig|wLFBm`%Awf_ zox|q?$;=zd{@Dqv;U5C$|14mXd~7 znS1>&wy3whpH9q0BkvLeHm$#5*D$apcLp;hO7r{vm&sow$(j1^ZsV| z?tR$3~-r=t)Ml_%rSf6V%=SN3$rFrh!U$j?L~4q8%AY^k+B? z6Ns;YqxUJ6M*@|uaI!J4S8cRYY52W=DM72G=`Yny0^y8H(*&`8VEuDS>*YYD?GrY? zbQ1ATkV?Uf6Vrs$J|N*DrIkNWNsME&lSMQmA#eCF_Vf@eo&pIMD6KyRDv5Axwy=o% zNXRfB#&kEKr5Z?xqO|e`DsAAzW!_;DFx5c)d5Yz_KqY>TjVOz_m4rO#!#Lkfn5_bi z>QgL31C-{*Y$n=>tRf`QmErV?K)eYY)uULR4N!VFW;57EY$!sex-x!#MNnH_(4|-g z1t?99WzXJN{dxpE4_>uSeR%=Ctncxy;r>b&$8COUCkmG!Wjq;sm;{RgAi;ssS{0xa zIc~$#POK|LCLtIb=>#z>uwI(dsu7^%J!aFvB$5h|UI@l(8X+|aST99sRSQr;j@h&@ zi4O{qzaSWwXauq41qh{8H9*OA%;p7?h%ZDcAs8o?k4Oa8OHx{40ZJC5HV<2gW_idP zHjF()f`tc=&`fT9fKf6ZwYk$u+?R(8vtdjZ6I$GXgg4~YER51Gqc-WSM3uZ-t@rQz zdCdM#Dm)K!|9ex(#$xH(fV}Qen<@q|JRkYtG-ITkP#Xdux#TnsCXY30bG?PAo`ZC^ zWIWF$T(ky8ACuFZF?r8NY_7Br<#UjSEgA8-gmudch2%5@CXY5^lh{HO&q40AWE@#O z0uGE8kkcG7c~3`dF0>GNbC5q-GWeH|SYF5{r!oEW3WjYQ-ViIYkiow&My?TR)c_=t zoYv}}mpg0&e?uf>A+3L5Ji10WxV+#&PHXnhdoXNc_U8Dg`yUFxZ=Y;V@bJl7F7uu#NF+V*EX%$YBO#`H1C(LuBz}|2)}Y z8~xYBv-glwhZt{D2>0ax1rxG3_T5vo<&)RX&k-*20HZ&U)7JUt@ekR68;SC_k%#v& z;?EJ*Eidqp(?I@tyhAnujYRR=$esHbN0yIRvsf&+=GSrUKNsN-@w4%3BV6x0od+_j z^!~+nbCJ&uFyi9~>qLRkZRE70{&{IbHi}I|%e%;j2X=c}7Xp4gz{ws!wFeOG0cN^^ zH{C!{H*mQd@aqOnb_1&2fM_=`(*?Zg0*bnT%Uyt97jUu*Q0)RlyMUQa;7un`)CpYf z1pGRIlbwKSCm`Ah%ya-JuK}vp0MTo}Ogiu;9Vki%E~f*2>A=Z!Ks6l@O$TQ1z#BYJ zga%gO1Ff3uwUf9?id&Qj#hQU-!4S-}*SV2XS&Wgv)> z6-03fqR3B?2PVl`lVq1kviu}@V1k@AL3WuS%TJI8IOHr2*@Z)v=a2`+$ywuMmvOTE zIC)@Q`H9~e7A%Eg zNOo}~%R7<>9LQM?WETgryaRc_o}6V*cCjbR+mi=Qle12fT~3qbPtUt|{wXZ};1RDS zFB^YtA8w^)>9>%wNpDj-*rI0CqGs5lX3(Oh*P^D=qNdfNrqQCN){>t7y3dHgGGa6v zF&c~*HAajlMvP)3My?U#o)P1^5#x#xBhiR)!H5xR#PBy_cp5Rz7%}XO7^jRFrbdhd zMhsme#$F?aq7h@O5kuOD0XAX?7%|owxrQ4W)d6yK0H_XNFMr1Z9mu2uQFH)72aM?D z=;f|)FN&q+$x`!RskyP#Tv*FrW25H8QgdJ(XV?CL_G%k z>f9JrZmxExjO+n9djMn)uulWj(?I5FAnG)LI1Lz`2INiypwj>w0H^?v2>?+5fB*m^ z0FVO!5U{xS<;7LQV)$$MVRctcqAfT{~*>H<-^074fq(goyn0gx`h)&ZzGK&B25r2`;z03#hh zP6q(#0Bmi5stsgn15w%lLK`sB2IRB>koJu!X-pgC>=+>Pp0IS*DzH4Kw6n!}Vk9RJ z(?U5r0?5n~!p~Z1l;>RSY&kqJk{O6;qMRK9WZs33+{#?Fh&D#={95y{`u??Zqr6zV z+NCD7vN$CXujo(29FEC$OodY-q&R zAEQJ*D-OscgpaH?G%nAQykFTH`057!!F&6xHs$8R!Dg_RW} z=grIsbccm)U}`Inim`I+u&@eDZ2>GWR^}ZRCj(QP0Wpk~VTZ->z|=QDi@z1D!{TUQ z>g#g9z2J5Wlfcv_z{1~3q{G4}Ftrg7^S9#ZczDa=XN<)mVBHC;ux1M=#=-=sHMjC= zwt!$PjDd^hRzS0bIL2c6SMbcOerdMYh_N^TtTV4XCDgRq9)V=`b(6m}KE}d^cgaw8 z(Fsorb5LWuq$rPQgu8_~E@QhSb;L5`&l9fQ&e=Nj=tnaDdBWM-InqOqc**gR1e@DA z;Gsur$^4N7?b|s5LyvwSFBf;n?VPnkkJga+&k;U$r7lq(M_5fyJYNJJRw~Q{Zd*Fm zY2`K%KL$jWD-Z%N&JrGUiZxT5&RQLsh#vxY$`x(~UYsGgb&54moPw=(OvLvAkxy>* zYOcDK?Yu`m>2FNT%SAw5$)pgF%vb2a^iv4_jMQ=RCBTX>dIWRBG~1z7?Ehi!z2llZ zyZ`Z8>nIv=Ar+9c;sQiO4I(SGs8l6Ty&19B&@IkiON!t zC4m3|f*_j^0)&x}wvcU?eqNe`|x_*=XH6mJ8+-toa1&eISqfD!u&9N9SrR+9D)bBvf08(Sf?4zn3EzmX&K*`@7X3`ZH{eg^ zdR`ye=9O?257c`vFVg>!#%8I{S?W@TyL`9s@H!0dH>5Q|GwAdWH>|`lket zuaCNsrqnP$IN%P)?axv&|>5@ zqM+_0v4s>GZh5p4|G?IyAuD5`rK!Ua{8al2;v3$|sdXcAExMWLvu_z*Qw^=U_TU7> zdxK;(`7bcRW8E5xd10SzxFUQ1Y{Ts?UOt^kSX5XeyGD*nHu{D34&MC{+dl{5{Nh3%a8UgF&AHH4PVM}kS@pY(^Ql!YZ0qlFuLdqRS=h2g2A`h( zigSD33oqoZIEG0J3H{EkVafmUZbm(8_{BwXQ77@yW`0k=MTfgE0XKt}EwH6Uf;*}A zYC7ruBd`478blUZ{ttbbNb3vwvcKVuHk^6uyB2h5G4uJlMYi)0HtEKq2Un`3+!r{v z&pL}gbP8YK-HvGxp~3v5YUau#0Qb1Fv3GwuOA&9}sO{__jYq z{dZ+t`1gL18T&jST+G7_#2=myNEVtO zP55PTWADus|1#IkKJz8;RGlHpLx0JNEiBq!bI+0$7e3HL(3QA%KB!(qt6C_AsF7-s zU}ST`rbQ3l{|aXRZ13i7wbYpSJ0$<~SW~3^rT<=hr`uWmFGG{RF!lukmOl*tyN^PW z_vOdE&7LlzSA&VOi^rZ9{J#*vU5f@a*y&tsU2Cp-EedqUPUmdvS`*c4k*F8*2o6xa zb`P}9PUpwgwFau!B0vr2b@sNdJ*m2I(#OjFLIQN;jauSP;wGj>P%PU+E3yra{N&EG_{MHL%H)FcbrCq!) zFLgmIB*x}k+AsIz*Iy937Snw;ZSQ?~u?u2XVrF30p?(a-n@^K{khuYh|o3Ed|C(`@I zpV$|%h&71VsHhF&*kIr!dA$ z1Esbkvm7Y#JkqILKz%TRg|q@EvoXf!4V0cInxVCbGJ#HzDXPtgzgZaSgV6gAfkhEH ze7@-Z*}4`jjM2mhP~D6%#u+G?iJn87cpl+Y&ZS=AU^zoTbrZ%o!$8S6$t**gXozr1 zh)U_XbKoCYm8NiF1BV95%JP z2TQC3PNFf!vIa_;iDtf<#1TKICN}j!PmEe^sEL`oCb7%Usf11S=)pEs0AVOhvy_1n zB+<-KlX&0DNokOJ`V}_vA+YK=rumM(l3aq>QFY=?uf*nG7x#G5^QpB()}yJhX!W8% zH{E!Po>G2-8C-+d;^Ty4Qcu&dk$J!>IHtK(UnwWymXczgGm1%FPsd7#*e;l6n!XYt z!OT*FSm5IXXHv&sV4EUVO5RXCM4z||gl)$(pU_u|j5m`%MEu3WDd7!upXd{!iEWtXpY@gQ#GCzah`7?# z3DizqUx1ak2(0Q#Yo@@Ika1>wRpJahv6(FDLRjS7mqzSeROI)!$0xf+D&AVug|I%} zY?~VK*hQyXT~x8B*kxCM;iZ_U75ca}@n$k=MAeH+UiYrvVD78L};&BU6GL5Q8sPK7k869HRp1vrW73TG!;~(>npGbl zTG%-etEe`SSfT=O(kRV1T~Em{#tgHMsAuOCSw-C%iG_$J4AYDs=_&mZW0tg!sA=cq zT}AD_hb77bCxJBMG(9D~7_;bo#NBpIMpe`Y_plJr#OVw3UypFCc}l%|6)Ujb}`M72*zCr{fjW(A(H$n}K1K zv?yCWT*ZB}+bYCOwodXD)DyR{*S`aX_ohXi)5ATxZ+1;()jT-N837_j0M8Ks7yA_f7^K>!#8_KU`;fM+TIqyqa>feop^#6zI*A&~PBhaT1ajNDx^}slF_c(RiI8|nxx^bMkdYt;r zICYLqon%u-*wi;{YP)FMMIGs)zUiX2cTuTb)R$eB<)!_>isNd94=W3~wwbYSX>YG~48s_3Aq4A}lk(qnE+^6OW40Z)pa~TG^46C^W zgI$8vAYm{htR@%+3x?GoU@!!%CI|)#g4G1VV1clj02nL))>nK0rvk}PfnZf27!^pW z3M54ZlB@zrQh_9>K;l#&F)EN~6-cBCBtivpM+JgbfuK|%VJeVYDv%p0kn1Xtt16Jo zDiEX!1fc>6RDt-bKzvmo-YPML^<9(HmPu;;B=z|u^~of)V3L|SNqsa){cTbcbzD?> zUSwxz{N+g!V7{nZO*f9v2Qds#od%#z15}3rsKWr&ZUAaGK+z3AbOThI0jSLY)oK80 zH9)l(fLaVtGy@RL0M%>&YBs1Cg2-q~~fT6yJfxd^Kz%URPhLVMWWMQZ+FwhnlN(Kg!fuW>fAZZv%3I>vbp*F)n zn_;N$V4&|{C`lMd5{B9Y18st#Bw!#3*s;(9-|dBL+6&pZ7qWgYWbIzan!S+Kdm&VeZdvr?PK%9K)yk=h+2r5Gcn5F;fYBeg3=YG;hpju@%! zF;d%Nq~v0xw#G>P5F_<{j1)LVN;XDnON^9E%x%;oo<T~=tMa6(tCw2|X5b2vv(rBV542|e>yfH;ZfK~@KeAE!?n;xgpRGl#N zMxW!3DJqDN#jC3TqW6sKd1)wqA${roCS=SIS&w6trmj3e^%eE1EdsCBd{O#9WFhIC zIiRB$Yt?DE(=8o+u&F$5=rzoHERE|6bda!}9ftj`>23#`h;c)$Fz=Bxt~bz8gth80 z#JIk~0Ik|aL<0#X0O1+F&^F34kQf0tT*E?bBbnlY#=cJaApntwT%)CB>Dm3tf8Ye_N7q}tjP*Ur5W}@jQn6tmVh12unA)1 zRRh$BnsNRB3LhH6mcI-q=T?=Vcw7a$ z>)wGhj|#)8`&Hio-`Xlz>!pTaT~s1>_oZ4KrhL6{Oyym_LTAx{>J!BHel{UuBBKTW zSUu7Z<1u7-BBAOL@U6E(l74C<)SMk`;(k{0aTEcQ_uXj zj}`=oJO9e(Z_vHIXc|dk`vHkHX%C(n?!6zn3`jn!uwHN36|CA`iREbzo*1sZAG!qi z;jF^fddn_hhmk|$Nkv@D9>EJDp;5!z2cthqElf}+0`@*bW%>y;Y@@ojF-CugdMH6X z6|najx~88{hiyFMt%}j_qZY=iM*{X9Lr3%zDzS}f-kUJ`-PA+z>enZ{-qvwp?p8oN zZDT*A-2ac^&YsmDn)7>B-gJ`p+x)kJ|5BX`VW0A782>ibT7@H*7Fn5f2j5n7<7~Rq?iYwk!lAeR;V^f@01+AG z7v*p4XYAkXw@GtYQ(Ds&d<(H{>Q0NU&HIDBQ`rj}9OYs~=QnJt$+8+$wE1$ntPp{Qh=M9Xc zi!o{z}opkHe5`)KfSFT(r->cf36YA^4%{gixI-~La<`|8}C=ipll z5x>ZVCv4cw`)zEt?%45<$%FejP0tkI`LBEwKOxy4uh)N8@T1Oy`K3For}qYZMDX*! zK)lqA7yK>a?m@$wRaDk(*znIuf>VJ$otfM53DN3pd#b&h_uYtXCi;WH2Ym*A>fr@A zX70{^l@x8;uwlQ7%4d+-rAm~LC~KW+LUJ#ye%#q%7Lq$BWflxlqNEj_*GlO;O28%4 zn(LjN2Z)f#HYw4|yi4*ztjv34m%JP3EHLkresbTeTmWeRPIG;LvqK3aw^Pc@4TQ@N`5U%A z#Nr3!#&O+G34Y=y!LB#xe}iBq4(shMEBoo66Y4F8m8u86k(Rmw0zC~0Ijf_Snfyab z{aP0hn*(azKV<3EEv4=bN+<}?x6s*%Pu@VQzvVnoM)VSWXO%Qi23cqtLq7benDe6F zYt()YzCL$qA@TwoId|$aNNmyi4T>sR|C|iV>o%Z&LS`mITk63s1LCTaE2OzsLHws7 zg0mW&Lk;(x9bQ5X^hrs1f>4Da6&5@&uwbLlc<9fP@Xwn+U)DkHZ{X`b z=X6M!$(Akkdt4kORdZKKw_OJ@pGCzS{4oiiY(T5WI1e;JCI_XsULZtKh_o(_2LvaD~aKS8A))?003vv$MP``?w7eZOP5iptGff5B4Od|-og>@84DS;z$&o&8zK zF)j6aE)F|X4{R{M(D{{RamXGk9Z5oR7_GkCd7vBO#g;1a16dYVXyv^dEHuwNaBR~X z^!LbbX62rP4+~a~>;4M8GPmyEW5uffrBEM>zBlS}r;UzVR&stzy}8SPlIr9p>3}d$ zNqI=8jRvaZ0hU%zb#@qn9NYg~6HasguHz3>Dk zZM3XRUvK0y2viJL5n^nslaZC&)Kc%{;s8<2m6A3?fp8VZ_YeMvCL~wV>U*3A*pSH? zsemAm7CA)LS_e&VM;E^vEHtly0(=*K!Ue@yNW?#Q&@GA!|EkXyHaPwDfNt_cOTE9# zfTpUKth72Blu#K`Ypc_rmCy(M5L7IJH9ldtIqD&Qg)X19{jU=57vJw#W7GW^WPF9x z9T4hy$g%S}a@oo2TI+ARI2=(uuvJ>>4sM^@r4K-b>U&tDM>hHs(DBE-PpqQU8}wnp zcmGwQ7hhI({BwxE6KOJ~KH6pAr0V2$Y3^MR;zbDgypDBtvU01w=dX(cj74zATQ>6m zLG=YVVov2#G8}Xlx?|S%Um$&hL2EbcRB>Pb&x!ZewtFCWRfwvcPF!{}thGMP#Q{*w zm6vY2H?LU1S)GLuMuzBcC<$A5+9#Hf@CJQcaQ_qH-?m~wL6=TJo|1R8(7xftp0(aE z;(P@G%OrgJmU*Un0p{B~%gR!}sf+14v_aGUO26J%{I`kwpL*`_tNlx6TY|?%5gJyK zE{XzKR)^s2bZFVh&aL%CmjSc9M)j|LM-?FpgN;Sl&HDYt9@+~1DW8%BgJ)4N_^$K( zz0i4{u|V)F3a3Agv;Gr#ebm_fHsDgT1dD99%*DY*^?-_W?0pdIWyp>TIvaA5Z?#HI zEPoG4d;~B?RXRRI8t>0Pj`{g_bu6f(&t>YZ|3Jlmeti9^Oz1ZwSMRUiuO$ z7E*3n+5Oc4u^_FQki-i*206)?*7`=50Y}xz1JVJp%-l^Y762EEV2=0MS>$btndG0( z`<|V?Wxg8p{T2GL1;3p+!$@3^f7Tj!kQVZf&G&ahx2rf`-|gbyu9~YRZ59twstws> zuM?J&T;3{Tqx_e?2QfZE9@977K7=6e&F85lh>x#WL^mJHx`;UM?cS_B)kT&tO%De8 zM8$u%b*paM{npc3&vqH`Q}xo6R!;Bvf(vkl7QG-}r03)>(6a>hl*1*}13yYj{RRTnhb(u{S(=-?q^*5FuRo55HjVU6PTN<)mASHcR;J=>PokzfS3)UZ<4jB@ zw$+=#uZqs*d0?_2UXgNKkp1@V<&}3odhGuvf0n?lT|~=R04QD9f29R4iot5{uU5x_ z5}HHwopmt8uwM%Npmwnh!zoB zLnU-ksImxxdB=t){;LQ~pDngXGzt5H&Ag#48w76+$#c;$&r4RK*GIt}s#SCCq~Eet zMJF>fH+f;G@|SE8Dq7?rzvT34OkE6s#eN9@i}2Y|lnb(K3)utLiOx$tNv}_Z54=+K za+EI0WtM%LvcIi_R~t9h~KtoM&NhahS)37e-N zrQ{3$4d&$c`!(%D`d^O4Z%*7_>UqJhc6D9M>qCFd>>&S0QLQI{CY_h^+ddpp@y$79 z+Ut2gkH}@AKNu`jzV4youS~3qM|TB%(=Vq6emc9dR`;T|i1l$*{P#NfLX}ieg#YI^ zw*UAJx46yL&lc=ZLYr$cbJId(>Qt+)9XJwU_|X7`I55@FqH7C&e^9$(K1lj6Y3xrc z(4Q~)UHeSkl|1-RVehGNBb}D>ek;Q7i>^;uD5)ZDqp^r}>SGY|f&IT_yr9H?V3Hph zZ!Ja!;z)|f)A)o8c`jE^xm@e;+kUBm+y8tx=vDhd6dnBcHsbGU<$^zBBmPI~cmvuB z{;mafqe!_;Cc~RKdp=v__#wsLpJ)*+|4WW3MeEl^+>PfUQPFSVO+0;5d|_!Po#hgy zM7h?_)^Gcb?VrD2tKVg?KFiSi$o-$M;Fo;tC!Y5Ajr$AOe!d06{y_a^kk_%L2@OB~ zqag|aulOJMD3O)4dmj3y2g{$kxbUFsZ`RTSwB=mlO^S|eXry%)_SzD1$7J?z3gcqOh zqFpgk;lUdhQ11Zkn0Bw3sQKid8}mB_cLksS@B8Ps-mtxY&li|9*z`j?e$TqE`!D@$ zaYg%F-xl$|IFm-4`(({O-)prSz@rz@@bYPc)n6iTxb1T!kIo>^az{q!ZgsL+{{B8J zXcE5;$&J2S7aTO^QWAhHF&+meT52N4-nAywK&BxiF$*KoSS^gt*;B}qRhP(7w(;$ zLL#`6Y`tLg3s* z_IdtDw4^wtNqKhab$L!Sa&96;QnH_+g&4=ojm_1strZ5b2W~*gLGFUC^4XM~b-`wJ ziPoCIV=PH=mRnM>up`IbWYjiaRF*r@Ui4J+>t~d>jb8D z`I4Ez^6C|Jb+u$}Pgod6s5)-Wn8s5W4V6xv46Wd?H#Y(qP=ui8YIH^dQpgElQZh{A z5yAm>c`E}UoEt<_4v~DSXV`@pl zauzvPsXGi8j?YgbkuFDsvqE*=>+aq{sU8NO-)c=;AzK_Y+0j~u3|DkV@<)-+kcBg^ z8zN|UIwcq(989eaDq$i=DPxS186H7cM0q8g?d~4w4YgG!BmCLjy|X^!yoHYrk@^Ark$W(cxYW(EQ?#?L!$oI#w}d1C|H>8C@kg8Myd`5FR6$~a-XiZY}q zbRx_tTFsgud#!sSky|!&On%GH`!7*lPAjK(DyWqaK!~fZW4M z*Dv?gmO$q0p+e^-$4w}w3wOKusid7`W`prpy-UhEOSsVibJal;$~`lu1Nq9&ikhX{ zTUb7}!d&lu2@WG?g?OFtUL6?9pP1#^2&ZbtM-$M#O-RRR0~xyCqp>9wCag(Gg677F37h=f$>7^@*Xn|!`v*S`pLAc z#?PxmJAyX8wq5OBqF#T$b0!yH6B0t*jy|2)uk^+YN3?Ltk^QxVhidfh?jJ9BAP8iS z&!*gv6t5E*?qo-~YPlqMpV$nZ>&9mjxo(sL%^U{C&mu!WDzYn>3pA19dpm2{*O^CY zxgB-%?YZNaUs0C)MAJnKH9#~fQv&^Yexj)I1%-po71GT6ZY6vNMT@Z{#0F>3)_E_o zQ*8J1#)vC#B(1w~K~o$)Q}1#k4mW%ViVrHjE>x>!ri$A`r{)!$I%*{?q&Hc;{cFS_ zO0`nyQ3g*bQ2|`+H`ISjp!vN={)PeEBi^gzNfx;N7l!ID2#F^M&R0|P&RG?lauf-X zRnRs!gU)CcY6XsUl+RXU`i0!CUSA~jXM>{IU0oW(6slE*a44YR?ee?v8Fj?flGj+# z_HG5S0w8?Z=|b|ptplg-)fKM1-BSW{MfSws$kGxx69f^$?pG6DgZ`9BP3r*dJzC0c ztrrjzN3A#RnHnAM3FWdX!&VsNolBfMwfrlFsgPMU+=FZg5=Or>)DzVgOQy9I(_B@< zrV>+-EVJo+iaV7o~r!&gbM^s2p1P0ecKrG&gec4g57gUzmGQ;bDxdHUMh&Dc4^ z7*;AZV({*O&H48X)Fr@M<;Joqmd&cpzS?9kwIhFH@p`Y>)%)hQ)OF=Q$eUZcULfJ6 zW-Uu@{+Ywx61Tc$hi*Q$25#dKXWV{RthVQxFX=F88~(yA z&Z2w%icU|G9Dktw1lvQerX41GZ7II>n+5qq-m0!hBXN4u#h6ovcg+@D?&yv6+#pqJ z5}6wJCacrmvA3+~$Hv||xUws8Xvd|Jru4Y>TtjNU^Fb?l|9msw#=+(bZM{{#jnByz zUlGJztOFyAqNJwNY<8Tu zC;O4R`;oQ%!Z|u}jvu&6i_*)U;2`x$Q|dy%rzdrWpPA*KeI-efYP>e9b{{1@8+H;Knb^nq0+Xeoa#ER7R2Nf zPjMy-28~6pE|6~LAO>1G_g4=1^ITf1mp^3gWF6BA_IguDo@JYJ&QK<1yby!0yuUZY zR_Zj-#SX!gb#E~B5@_(e`XZcCZEf@5HHye+0tMyald|DcG1q)&O<-*RvFQyP}s zQWyNtg1Xv-2e-ZGlA~YMHr3vlol#hyeIl0(55hNRLq>9?7(PzAwVuB1b*>e==mu6w zJ|lUc=Jv*RY8lN0p364lxyuzc7t%XbXoYTcNy7CBRbM+<`e9S}{#YwZdr$hYBTfMm zZT>D4j}kLqdbx4MeSSfbX|rs;^8Klu0|k*rI{C%?5KHOdI1Ka0++6<5X=74rtY2J! zjy28oP+qvYoYz^J^ez=ky8dem~x6Qes3RVYv!ANd_Tl zzQJ1M{3LVdONq@D>_<2`qWbfhmgPqk=`StwKTA;&3(J}tZ7<5g3Ww^#FQ%k|hcCjy-Vc2+k3W;j8oF!q^y zAWt0sJXI~noMEd0cAhp`@*7T3--L!|Wjw-3JjQsEEZT1C-#()!#+DIimdVhiZ!$?f4L#o)eeg2Q1OB^Tf)_eaz@vC zW7Ll)KW=f?j9;fHaA8?4@xr{^pycRTSf{iLCr5cT#tt~1BLBRCt3*=L>?2Ea@5uS& z7IXTNS-JRELG4wpk28M6A&8mn_adsE(`T(_{qm32kdKx{;wGX>j6GY*eUu(MYbySV zA9k#k-=DY2eWcvCZ7O5JaD%C|lDfCz(Q8$|wwm;KuMmd&jzJ@yKK0raljoOtdMX{f z@sWq}4d;Eh^RZqfxZn%g+Y$mDC1mO4g_OG+mzb16tt1D7>qs5$OBK6!UcYYC$4u(u zDeiz{9rKl!>`giYJ8OuzH?0~BBmHYBZt07dE;3_ zvOSl$#=A6Xv+*UUOa5c)#Ct`?TJ>wzd6(qtGO(}Af7qV~Cf`-W@9Zx@Fq8|8BB?jnh64S>#GXJMx_L;|$Dc~Vm5Hj*o<(qrc%N*BIjpaSsOGEF? z`aLk&rFG$w_ZgGfm^Dd^CR?`)CGK{cPoE(u?5~bjP%^pl-5qqCWsHNytzo}M8*n`)q!hgRS~@fvG)*Z089H^nikYY0N^h_beVG0?;chpGoAp!5!+Ex~?^KGE_`f%ulEjQ9n zAV*0^nK$2;;t_SY)I9I=-hvC}3As#X!ls9v*H7#b@paHkTB!QKtiRi1C)`!fz@+#%F9=~o6>u9Nb8t{9dsPGU(8t|)ia*yWip za-t&(T=7aHeK}*?>HJXV)Rw*$EbT`CfZ|avb?VS-oG6`VsZn4Cg@k zCmg03V>)lr!`kGPriA0_r;kr(4)|3JMY(33s3$l}lQyTTS5lWz_6&Cz zk}U1roew83>1o;kiUhou>F~RX#fS+Zr*p)J#mZBLp|zF`g$(OuQnAe8^H|LNsUF86 z2;S^KUjMLS&Ep3bIX_n$2NL!v9!oAKtikIKhAV4I%PH>9s4H1R$M2WTU6Z0;Pkz$T zJwCL=;TM}ohMb~$ZejUAQ~axDPEAJYqmJBr5QpsU(?&7YJ9~T@4zy<$&5-gSbHqbr z0av#>6m~SaiJO%WPrG6B#3?&L(T!ng|D(_KUcB6^t0y0`s*KwR5!s+^EZf}E^deO+ z&Xo68Xy^r+abWucI(LavEYH5Exo=1;p~9<5H9jxawQY@Q8>;14v+*AJ^RL~dq%GV! zL*h-={koFB-AzZM#Bp^B{x&Y_ap6zxKI=_>!qFcI+7eVVzjl1}i`ff}-e*$u#zOt@ zaC%s@2cUs_N(qV71;Fh zur)2QyD$A2H!CnnSGiKawm57rbU_3OJA0b4i(bXz&FjQ3!#QYr18ZryW^k&Sv7;j; zu4m@W&?$nlmrT%oUPEI~n4*iZQB@90foNxNglCqqZ-lIEAL<(N78t!q4vSMIY%9*~|@xzL$?^rWJi zE6Z}-(d`ZC6-IcB(Rzm|JD1Y6!!;RELheSiGfv2Dje5QPEbG+nJbIufiW%j^ zyWSoxWbsOZ`uNfGU`qdZAIP!&8_T6!_9&p+)Ae2eD!Vt{g3&RExXa7tygE8<&MK!y&B- z7n1@n`^>TO^6RrGt8URBoZJW19+=MrbQwV7q2?g7gTB0a5%_lDM|&K)$tU{=(e00g zyyLR8bYW-PJ_~M%Vpbj>kv-*G%KhrmvHa1T|NHsB75Kju_`enS|I-SPsz=>KMT&#g zNeKuZvo&R>a@{DuKSwxgAUr+ZDyk3QOjrck2|;O2Sih)P5ad3UR4d@N#R*x$>3rd& za?f18aE>BmgE=w6iN>otl`Gw>=Xxu_49<&<{cb_~%Avx!okBWv?sfif1h;+~ouqvR z%#9JBix#oKA)Nw(L6WvCOK_A&@D@;lg~E6tO~|Grxfq79Gy^(H5wM>|?36?^diqQJ z&z~-)C2w|g5lu(!XS6|DG&ZjNd~#tk3BwjBW&le zoN|)3aH=7KB`cg9>!u@3m4)aPbx9V*e(pjvWwugR7|b5C7L1Q{zgA>)`=W!3{l>c+ z8h0vBZcmi;MGT4$Ys`61*R)#Go%9^}lps&zPKRG^On5t;zA?dZN>O)_TLXX9{Fy&f zo_QfLqpB|_IhP&Uklbkl*+ebAVB~2;P8bq7){|2yX)RW zI7om-og?|p;PeIg277|llqnie3zdF@#2{fmkDfG&oE~X~O=X#*Zx&5s^E>P)NNVSV z33k%ZQvugU@Hup}dcJ@%g)SLlH-<%vCyJELsg#s75W?T&lUbHXN(x$H9XW8W>xK9< z8OfciT%l~@Fe4b|v>{D$)Odo1p0FGWPcYjkDojjW_@^ZlO_g6MP}K**Ib;I=B758d z$7BfvBZT3l;#S(+(UE?ru2)-SF5h=fSrV!lFmAS+H%z-C&&dc9S8`~q$>bNM zVFb5{F`f^BTp$dGShL$CDBCSLi1A_NQC~}Zi;V@_jMPn>HDjbw-BNdLlmPm zI*qmV@`ttYkA!65V0n2d5-~PTf@+&Wo}Q?$TtbOMe@)2{WH|a_1Qif4m$$>PI`lG!{Om z$F&|3J}Ry`G_$9_tL~wdVHm#h$mwIDhindpgXTg_5Yf#fh7Z96Y8ClCc_emxK1-h@ ziyXKu%$Tn?V50%P3D;zpfZg^V*y_gEr&&KO8?zfMAQ_fg1`J+!!Bl(TOOS1ENzW^= z$czjcY$`oKAKm+;U&fc_*$&6nX=i?0nZ9(Rf9!~E|#Nr8UYtjl$HC}awTqqQV&oNNrCx?`*8-#&u%ne48a5!a! zvdH=bdRuA^tf`juop!$0=Ibacgj|SAIi_*DluVPJ9U3mbd!e-^t0eWviL36N zmPzaQW5ts{+rdZ1(D98UIT$z^ax`{gCR*E9KC+0PLbP($c0D_4*13!%Us4+|M-$tW zIy#x38&)K(l(0#G!d0>s?!jo0vtFA7*C<+ZUzr4QRuywItY_hbxv$B+?n`qjf2P}; zC3^ethj!sonO-9vjNTW0<6PTx`VFqbb}%%T>~Oqri%CvYt~v6)OhL>Q&wW1Ii4ACE z)$kyGGMi7ftZT^EG2Gg35e_9#O}AD|S<<5McFyXlHI~XwyNAVxe?>EO3d5_ES+)p$ z-V4$JA9H%fFaqnc!kc-q#6Wk~^u0Fj5dCrsl9p^7+x)G$uDqMB4YA&PJ-Xj!>Xf2x z;p6=bu44n&{n<#tk#q~Q%|uI2(n(L}yrejeW@86F-tDNb>&fca3gIPl+#FZ(8dG_* zi~cu8={>(bYvh~i?y(bUGQdj4r`UY$bv2QE-4m50XUiUDxP!08gO1p&OBpQT%<)wTy~y*G#q0w5F(N z%MRQ8nU>>PQ;lf(VBgHavlCBcNBcZQEeet{$Xa&(7UTfi1T6lP?I$Cy-#N!5THr1BRSR!aw;!eblFh3+`~nwGR@OP zxAK@#e%vY)3!VR68zaf<-JgoOidzv_aJ7%$lc2|Y8h*88ih0Xos6G31zp+=R_6fak z9H{OjGsoH~pLsfPPjs9A13rzZE)|WxQ{csQHe<=$FTCuxo(FMiZXns;|V zoq79r(KbI>gi3HuuYM72QyqRkWu{TNJk>|uD)OZJ?sbgtEPZ!^b?6P|>G<8z-IxlN z{;8>rp4EzMzpz^K$DLcJH&y1hXWE4-b-S^V)yvIYTlSSl9n9OE-Fr{;CUaBgYHiVr z+xIn!UT*%F*qf=80-uFf$Cxkg*!Mb0GJkhr?@OhtuCrd%cyoNm_;gfF{^R1_0VP|v z*}&>Pb63Vbe$;fnnN7y-Jh5{bl6gbtGPdVc*<|d`OSjE9EEB;B&?7@S44}cil@ffR zkkvigDA|t&^XcV+H3l{%c@O;}?p*fm6nfLFdW2539ekkyyTDi|>^Be5UEY}^rqz-C zx+1@#eap#*#q<-U9jP8puC~ty(|mojf^~N?&7*SN0NtVJIIf4If4({PWVHsTU`QZt zki@6YCXM@Z&q$qRJr-8QO(Px3->buq2AaGoW}mo|anGmIWZektuq>vTlwjs|bji%R z(eoaWhSWGE{*Hv`Y^R^6EnF>2y^0<*pa9GfryS!nLFB5M=r;8cesYq zR&x#x{^(-!^}$dMcSLgCxR%bCsg;fIp*Eq^{%>4w=F2rJPIY=+=(?~D$$PYvY`N&LAU9%jp# z4nol$o*=-#+A=A~5mES*AE8=49`%FV!6(*^e!?9;2-GfkrL3eNs7nM@Y(qvA%h<}) z`j`W)#JYXX)pUZ?a8m&5E?Vy4y0LXO)0WpUyp+PY*l3T4L~K4R7|pFvyb-58yveWh z+;rR#Za~CJYpC$~=(*{-v7#c0JNyb~=JtjjA3qxXHKZr7WVVZkdPdUb&FcD#laa2` z%?TiOlHXXzsu8zSmrS0NuvgBZp3P02()C9_VI(LHSKnlr*^Jn6@(Ig29o7nVB#T~e zdr)jNsO2?(lyhBcrSN*$EU8fnd0}iY$MwcRnNZ5{Vz-00CYY|})3)cB^sa)3MTn;Y zNyk`1JCp&RnsCga>8)n=+0GoSTE!iSzALNE{F&?))ARBo&%AeV4bK&uuLRzu&G36J zJZ5^1_bB3uTZX43Axl|ay0}v&pUxt|J74Abr#N1YyM_d*bnIF(b`Eoi)b?Tu4Y|sPm6_*d zJ(eyHKum?LoK1WjT-%*bP-Cj9&!lS( zv~PRv+|{qM4Y_528NzDc@IBZdI;42mFwMOT-YmJin(JC{K*)|?SQC1 zN~^^u^R+p-eRh~>ye&lBC6$o>LQcF-`aD8g^xwUkCNqMvyN(?{oWwuvQHt3TU*Z+` zpu{mS2b`%So9S&l!=d;0E|)KU>f2akH2Wiawqk?W!74GjZou`;sz5J%9 z4_I;V#5ePb6xZ|~i?Md|)!Y!&hQ0di+S8{8@~h}~mTio5(>!-E#Uc|y#Xi}v+;{3H z-rZ-yEZW8U3OAc}bV#{bIu|z>K7~CXFaOnb`S7_0$X%a9Ly_l5)Jf~}YFeIa2vEU*??jYnc7LHX>1_HNFbq1p&PVXL)3AZTSJ zCTjU}=cD(OXj=Mf*Ix=dZ~;soNoU$X7s+D4yEWgoHOCw=*q-h`aNhQTd`iqD!o*eB15WI>z+G zUOpu_@>ZN%E%%^3h7hrB`&bn+s4_vf+0Am4EV1moI`Ow^5LZnxM=L|qS#wn)$H4d7 zG0~gsiDQ{SemenL`+}cW-Zxj=C$Y?sEo%H>-skto}%N%ZYm z^gVIKd=gyJXfu6#j%6H)d1Xj9DgUHGeiolMQz%n89bMWxQ2pA~dQJXgo010to^Veh z6yYwIX`on+tOyuVt@oT5WcbzzrzYEGtX;kPxh!D*Of3<`G|ho^*MCVG4`ENgdY#55 z->P7qT$2B2EBD?siXrg4TI>xPk80W$98^@<)PrPb`S}aRlnrF(W(G*&aa=9U&O^dD zE>@tJMG#hbi0TMWdy0CHZn>)^k*xeF7V6ta*4#8%x@)KNZC316u}bUi?s3(=ar09a z&DObaO8*n0Zq=yINgpHbx3a80XdZ7`aJL7UN~uL2F?icN7%(ivyKdgIq@=AAJJKv^ zmtV3jDu1)lSh+Wk|Z=CyS10iJAkI`?(@1AAeo{7L*|Q43c@s z@YCu0L8Rh(mK>f>>QuARu6c=g{!1tOATu>tTu6BtWpOz%$g@x5$KdAceZ9?y48_`9 zDLYf9O6MzX$M=?A!F}^>VmBn~tOV>jw3S6Xc%08UgU*nJS_^TfStuUm9w+$ss*RFT zDW@~=e3|XK4&Bk3hGFJK-)6|LAgEt$tf*GgUel7tg zVNRo^P3t_FN^P&1hcH(8CUiY`psy^XPEFg*ZY0ll+5uNn+8EncFWtOmc#7ZlXkr&O z@vj?=+W%-Y!hLloFR56XGt+RVpW9dI)?~}C?P=tso}^uNNR<^ga#QR)P$IKEuQZR| zl(i)w{)v#=8N@J{uVU0^k7UpUD{@qzc2dzLfPN$N2)GlEC5 zg#VkJMv9Js{s~XNt3=VVgB#c*x>ZMOE)p54%PUofA3lcHF_lQMtCTYSKla`{tf{11 z`0Z|6?RJ26Kn0Yj2r4Kj2*?;`8c7$J}_2M8eqNJ8ct?DxFq-1B|Uz0dvbzlT58Q?+*Os=bS(ve&BLTKfWv*245V z?%rfy!VM@4pR|CHrSsk|Dy5VCcgTUXTb-7jo*Ca!{g>3SH2{+&|13OvUuc%N7QuBp z4GjfNSL}P;Z=5^ZA7+dTxR6(1g3TiHb*Uv`IPiV17lUYVx)(cG{KyyeDAC;p-~Y6h zQSR@FSgzjpEVUK@Y4bC2-vM=m&aj4#lLxN(JXV?)9V~)EYB8)scW5npdEYlt{qZB= zf7;Hn_Cg_k)X<*g{{7S3!R3eC=AI1sIy)qL`z0DZA z;%jJj@=o)I37O;3^PmN@M}9n3#!jP8blU-^Mw-d9vLdV@Wn<`|k}VrCmr)8erwz=B zW%hav&!~f{G}duy?LLX?oIMG({_EB9K=HEzUCn1zEOGO|kn;&_nE4J*FqT%Nf6bAh zc6An?B3%h1f9rRF)y(!K-cU*PnLG5P@(!6Bbg6W;iU3>xEz7{6I6r#h;qk(b?|NTU{*={r;BCr z1pt#&TKL1VP2Pd;{vJS}owh-rZmOuha|hdIU0?jjP3u*5VgJwcHnfXY;QfosA}jZ{ zHr#a@SAiJb|0_J9^w6DlG()0>g?A27sITH`KPjo3EM=H=Tzn z9ugdFWA+l0ZrTjVL=CP!_|lO;4uli|avH(xC8~Jnp69)*BB*%`RYvQ&-_EfHZGebp zQgq<`8tz-a>kyvy@8v@faLYD(UaKF>iD@O^W%Q+&x|;zHal`cbR9wiu=Ao>{qT!oJ zYw|m+)r;cJ{>pIouA4rl0pJfOuMe7iV#Ph*|GE@YY@6-E>*KORdCWcx=&-I+WyC&n zl`pzipoifqUbL#PBCYk6)%Lk+Ix6`R)Nz<)hA3CWnn<~6d&Q&iV6hGCC>?p$zH!ZEDYcOdO#!A-kOFS82cNu+K^Qqwl{3+ zYsBbawxEB{&Tk-Py_3#0W;yOx!M>iXZ>6@zwb_?mg!}E*bqs(r(7ztJRptTZ6nC(6 zic`*?E*Ag#ZU8)R@%8s{zwoq{i%EKOwWNWs1p&LF;=^_!DiDPT^Ea%9K66nVyx(YP zANCb0P=nR3_SEB3!G~aM&)p|m^^y?be`gLKoha2dNLP%`~w>@@^o?_5VmDwewj7lj(SrZ>%6O{Wyv|ZEwuu0s(0IvvJq7C zkHUwqYNjewQpDW8JcDqPTEDwf{E3L+1AmmAE&$ZFi@EE(`fF5D;3u6y*4DvVCycAx zh9(j(4YKWh{d6}h#RbQW>DJf0Q8W27{cdI3ryi=*l_FY?MA>=s$j^c z_aA1h5PSfN(LI+kJr`?WQ4d(^QFePCRiZi2>g)r{pU$&`hW%uOO#*2;wy=tP=L@l2 z8N^%cfWiO{<#nP=hcx88i#&JiWCtBoa#f|C!<(~ptGKOr_$&4lj~1u4INJ|20;kNJ z@kOS&PFrSglok6J+lr+@LT)?8;8M-dcL&D+6@h}ciWv}qD|_@86qIjWRJ)^AuixF6 zd6Jx5+gbx}`5MZI`+E&qyV3IlT6@~Kb5s;w#@X1jvNX3nXj@nJ9x#+8ScBGF@2^2? z{ek}NC646@y4a}ufe`DH5Vwx3;60-YbYgi(VzC8qh914;cO-OAvLbH>>rdtME0 z*f@9B);p{Vcfy#d8Q}mH}w3k%%K3O0{YAOMK0Wj+(Oa((KQE zUN`T65zqUJhAw?rvbv;OQ|5mj1chu2Qs3)WVj0*SApOaxQ)&qIuVSD2sRyJn>}~7Z z+dtYecf7?os4~Rnz-DxBH6?1z<$yt0TVz(*vCPqoCe_fw)ca7e4DBE1wa++H6cTusxvRy=%whs~d$_z~HHL zoZkzgqe`P~-Mjpwz9fsuQ?^-~V#dr1zD^fkHH>}r`}D9{E^4Bu99E=b=12V^)wvZl z*ds^kHTvGUY1BQmFDomhW8%g}FKwOnw&N)WQYt(DUTB|AdALWJV`JmxMWxr?p8gKo zxo2rprw4W&uD>O0J`H()<2{CFH)VL^V(}jAV0^kjGu!MARFeZBZtvfo8u6-L)j9L(Z4@tn{p^F|gm z+l^R%IjfL26*gSSpC<+1Zg$oQ2=qZ*Tz7?mxpT54zNSmw_cM6?`xbXWd}!isdsl1k zeJy1MRxj?S6iw|9#qmO~p9rbv+$a$@0svN5USj>9=GUvu4FoyEOLgd=?v&cwPY~BAIG2sN zC4Y7Lx_989B+oy->cmuek|c4qmcB;cL}SPJgMw+*-C#@RjXqjip_W}u=Q~>hCKXvc z%}o8kCzS0w8^-(dHyOTn=bg#^&K~ap5QtP+CSzlQ?%}jHihtd$V;yrbq@>J5g>kgS z*#@ ztNIWzU<6w*toT{dMd#H2Lgp{S5;SZ|=|S(3Q#}2=gK#C8Z{EFo*{&i-GUv{)4{Im2 zBvc!)%X7Eo)r}zA98~Y$eCtU&{DAkRWe%65VPk-I`Pnx9C#wEn+9D>26P_ZesEf;? z%6pD`P=J5_Jf9^%wR0O}RGAi5F8>wH8j(-1k2h`k2p`^{XQwPp$oM3Ed;Jz8vom3M z*by#vKQZsx*FXxEdQ6WUx~V-_UP3Al{fy1p5!n3I#_DAb)_B=YA2IGMvu&T=3=R}y zO~P?=?gPbFUdoczY-y(r7N`{tQ{G_zSy;7^A~-5)3fv&Uh_Edz9>kqs`X9!0mLeW% zR6L@7dn&`$xERFpp+6WJ3zGMmEWC@-2PgG6NSJV80Ez^nFFnl|KE7JRkj`NVOur@m z)QJ1FD;|NyIWv#G6WA1diG2Q5HGZUvG%s4)!xqNNtFFU^BLyqMb{25*ts*Sa3ngdV zEKx*aDfUUEFsJsQRzdbU6d)WZxdK~y^L~1v2f_8PFD+K<`V(fQITF{#%B>zsNTQt> z1~VLb6048DwZ}W-j1X5GKOd9{s&DCvYj!O!c@I}|!rNt){$;dPg+ENelk~!NGbsuW z&DR-wkropbnn#@@^)!&zD9k7xgXX>#EV-xcbeBzjyC%w{9TSWq{qcdpzyfMz5xA;I&ARM-DKr#4fL%?5e~aYp2JFP7?AX8==xSAeNLk?7-#uE8;H>sf=JcyzCE47+g5LnQ`#38RAZc39lvqBE<+J9hc%*-rP(^Iy zFgz#hYT9nm$g!=p$#qjUPPbz<3U-W+*#Cule_gzD^Rg8w6?Xc8evbSBw{uXStU-$C zB$R<3__!6=Rq0Ozr^u)=DzMtkGRahLK$cwpv@9(IUMADks{Gef3k(&TJ7e&?!3_WR z90fQ+2fKZ$#*;=KNKg2rE*wsafmzJbQ$rThc4yPnc*KiT?l5dl3b9OQi5#Qif;TI!Zq?P>Z4F>>$H z9HCk+kDB!uaga_E2IUjS%}Tp)kf^Pz&o_O1aQ~fvz8CqxTv9`O$0F*F|0nBO7U@2f z1B_5xWzLZZUFT>i3IO7cBC;0p?>Iq)1B{B`wB?oj<{ybYAv=(ZO51n`QCV>|C2-#?B z({3O6>^v4r5XxmJl-NOT(1j9HD ziRQA~<9F**P<0WNR!d8>%TR!&aDnTftWT7t@s{Kfi;~vn_}v2eY6dELMUlHgKBZ65 z(;zM#M+#=h@rU&_6d5nhLB?l;scEuuViN?$$duNU)xI8(w<}1-gbiV*A%)rsfi*5} zGDb@6KK*}0M+f+6|0nDyOjuFjx|W{&31 zFi9`{4bilnk=dW35%^IH`TlOm>U);lkZ7u_@Y0JK!uIAAx}g3%O5(D!Laa2A;gl{7 zwYdOjewb0`Y|D&@j7I-R_rM<=i*=61TG5^4Q(((}lo6sv)G!#cC+0csP`h^^=%E_NN`n!KMb zV(}OlxP{U;xUfCJzPM<=fI`!cY!+s2DWgi(Ik-A~9|PQ1@Q^AOtjl2S z^94SwCb1Wy9Njp!tN>qvK2|U#4d;9Q%|b-}sPKr5^fan?DGK7s(Vaa_zRAb2!5m!{ z!V9>OKnL-cDJ(=70lQ#b#;=+YLlu#vZX5+ER|8-cv7#XE9Df!ffPgI%)JPp%{qPq{l)e21Vvpb%KBfX5>e=~Sv%6a|5BSmKz5GxGI6l{#b0ekS$cXT$I7lzC2FJ?*DX|pOnuN3c(aN3X8*#lO&O4rzgdDaKI zNGmxw?R1UP_H`;KRM6lH>XV~$WQ#NSjc5v$*tN{*%CQj2D1d>VBbckun{IN~WunYM zv0W76>mgCZbpaJ)~8WWL&wPc6w*x* zxnFfg>7vN}noGkJGA~tZT60-hg~278Gv#thOpcN!0-r(si&HEQhbGGTLy9f%IP%60 zd5+st|BsM+>pvFmMMom$sZu_r{w;niT7LK|V}U$nb~XqCrxS9#(F#$S{H_%%~q+u+Lh~5kYHxtbF@<=QP)Mp6RPp<-Di# zYP^{3vjD0@yc&@XD{&A_+~yZP!_rKYyB|gQ*Ga|RCD8a=Nu&DgNMN4cW#Gu#9(wFX zeR%{|@^5aMB#}VpojV0X66n!s)IVl})A1%RhBzND{ixROod2Pr_4SZ5JO8F%ywxpF#-B{7l*Y!U;^a3iiOZ60(4Pg%Quz&cLbM!LOOm_W zWy^(!(EdC*E_F;#Eu$3Z6Kh6R9*TVk zp&RvxcRmoboE^Zw8Y<6 zJ2P#al~aSHG-HLA6hAKAn$*G79X>?f3!L4#yt(kr#8frO@%#|O@&Fr}^^$Mm`=?m_ zaGuQjTku0>^^(oc89Bb0V$)wuMrheqiRuk!`KM3iXBIF1T+&x|Gx0U)bqfw3MibM=h zS4M~h(2E}Rre8hKow%}8R%!&!ecY(ZuKsLzkx8pw|6ucAv!Yd}o(-;rG^lbiF(Bfw z%Ru@x2G{2A{dTUNtZsoBKv`i)v|YmJ)e#coygZ*zU65xTER{;Z4D)X${ zR``UTo;O~7yJo*9KX<*(_MLajsHzQl!^MOqj=Ug-e9-(@L+pU!&1R#z|uFi4;=PMh?@zu)O^b0Hay3rw&oXcD{i)^;$g~#NWDd zf+$(;^7=j!SfbZt75n3Q&JW2 z3DOPMt@7sZxH6xH!Z5@Y|IAE_h47_16%~4{$v2R{8m^0y7lz!Gp8()+>p~Gu`FN}2h|@QJCf7RTGSu+y zyyO1;n2V^^wRu#g=nrhBgyqVW51*l%%5d#iE}(v1v3@$NB^g`uri}???eah|dl}t4 zwh%I&9g8xrK@iu|jt9QR)eP=Sq^-G*>Q<-cE8*_E2p_dP|1y2{54Vd@8ECZ%8<~_l z_My^vfJ7by;~^LPh%LtWsbb`Gcq)7P*TwuWJ86$w{se2ehXvC4i1M8kCLSJhzotUZ z6cf7t8sU;~E2oClz}rRKb_=?P9Pt~MJ2$b92Vq$%axjJC{iImM#o9=H#wVl&pR6v` zgTMm_?dL1bufuXm{|-G06T6M->n#Qi^R6t=qa>5Nw~Yg*9xE*+L+b})^mSsxVtMBD zU$BumeG^anu94o$VHI@lswe&D74U?-K95S4_s(r^kPCj-pw7v`il+&cMkTLY@fpOS zMeQ251*S7qZfmu_|0p0kPr7w{aHPdO?LkCn@o@lj$6I^O1IChG;a~389!Fozm3^i$ z#(p9yl1uq-wk4Xu`rwQFHZhDd$HDB=hPQ z%6h9^xLft4SAF~y?emwQfm4AgPfQxgFvzM!tKp$WxJ!XPnRA(pDf>6h&GRGHAO^w&rV!w&-JLsr>5DC zG;D}mz8idc3Xpm*kZ8tFNH}eC+9q0$I=MUfP$2yR_LWEGA!PHR;7DIKuj$HtM2ICx`Ieuk^Am~#&$F)TlPjxF_K46lw5aFc>s4`i4a=daWOmuPW zRtEQ>XYE!zaMzYKl%({;=8r*MC}AGAbMNJ zHTba;WKO94{qQ;n9JV|pzLWHj`GIpOrqS1+j@wWW;3sY>C}4blX81_|BR=wHGtJ=w zw=bc+fNV|rZ(V?Ftw7^PD=n8YwKBC9rnMr{sva}dh>9z_?K*x*(++xn;=J|;M#IUq z`^87vnE9XfU%Rq*t`qADJ~+@CxO;j289@a>1$7x|W8kix*+;}%)F%^`KVR`Fa&WH` zp5n1+K~_Olio=ZE$;!j~*RCwss*Zbsr+yI5bWk{`qX{A=`h8*Bz=BIs9bxxmVYYT7 zF2TN9A9`sqJTI0;+o{Ze;-KoCs$=1IALBNTqC-Lpl)JWq0@=%yt(M4Btw^x85C5O@=H zaM8o;?skfNq@koI{ZsnbTBDX0!a(e58=74Ku3PgMyc6@sa)DWL)^ee;22yYfJQ(W@ z^FYC}Q83wLs{lnL5jZj!C7Zys3TJKmHv!z^FV#uO_Os9LU+LGQ-dxMwiR^|DEs<`j zQsLkr2o)X7U13~8vczgZHMIgvrtl7loQVKCtW(o)1TcfpZRGUj+Vrz4IhF5_kJ!xY3IZ zq9NS#>et0tGslYlzR^#g+KPhdZy=*CT-czXkSY%z+J3#oju_1xzisJhkKs*Z=13O= zD9L;YBU(xZ7_EqY*DQ7L-=7cdy2_Bl!gfIIUF2wTN)DgvKHf=&raO#@lsegD=zNy0 zay=X_Ain2FyfJ@T$cm z6Eli(_T-W^gt{cy*(u?4o2KsdStxSxw9Ni#^+jj_n$gPLnkt*4f9|)Z*>JWnp+Ez~n;_`|VN($Gde-E|~_^94$P7sI;dvLm-JM?&k!9upl7L`V6v zmh`1igQYH+bVv)x1}_t{=49Urt5N5gB!%p4zYF)g2Y@8+L?lX^! zy_0vh_h@EYwDU4eK=%AA7no4dw2v_YQ}&e`xe{fRI>4{f#+ADZn=HYAo!9|0e&Y4< zQ1*Z-?~(&S=w!F(Lg`2Hza8cF4hSPIxS6P6>)jg)2PM-{;R^=Jvo~Q4zH|4FVyJ>} zPyAtMAyeo9WY&9(G8WWjBfGf->Rc9RGUl+Xt4iGE7{PoF=Bd@CKE%^^rRq*f@SU73M}(O<(nL;}bt%;Sp*=u}s|Y0J^NC-@`_(dU^oy^x<9$-HHsFMTssq zl5jOSo>-Y-D!^xZJ|Ue&LHWwZbt4rakz|b7rxK)PtG~nOSzZl#Wth9N&9sdcfx&at zvKvGph1wAhyeNr5Y$)hqdZCwqe3De3-4ljY$6|&V>!&P((Irm|l}})B-=78VLx1-R zLyaiEYSb7rO`p#^EGk%TyD?BxxOi`>fzcT$Z^!Nrx+02Vdg~MElj7X{6R9DV_k2u< zZlq2n+4K{A`h*w>e?<167DVjykVp{@4THq;LJXrL3o~MR9)-939R3>)R?cUUuyUeI zd{in3`*pn6>KE>{y*o1~zd}ie?*h?EjWV%_!y@I-Q4HxBBzZ>gUi6P%p!vEjIg2p@?2IKQ%H`YB0AUijFq>-zSZrG4P&{7?x<9VF z-Ke!6n<(mC_yCMUrvrDa5=Rb0;1h)e6XMeZR7~382@(`3cYw%6?NH*X6o72Wbl`Xp z1!4#nBR?(RGOl<`kRX#wxh*IM{tN0TXXD-$>0p(mGyzr$^&m5l!cH7Ss<4qM0Qn+z znSlskh&#}DVlazSCo2ia=7ga2=g^t$(rZho z&$b$9phVUG8HxIl;g_?#*(-q95}9+0JzTinLSXs}yR%>H39NOCrf;yx4#k|`b(F=~!his*=xNbKqW0};A`?MY{LfUK8i3v5#T=3jJI76KM_k2< zno;HuMMe{joGh22QP<>fmF8+iSg98Ot(=`!FF7LT-laqc864J*$+1!wcWhFV5S=|jd*^UIsNTT z8auq|ZQAjR{^4 z-isNC-_6=tRa3M%841?6z+JkDtz58M;xoTZdL{b7_yfVQtrXuzEWs=MetkGG(w~({?3}!_FMs(p*0T1EcZa<#rLB~Do;O173@Kn4 zveM=30V5CrqfcYGpsH0oJ(8mA4%n^nx`;%im)9jw9^ z#g;b`T30Z80<;<(5>_LsutNuXTX>26k&-c|gI&b3uD!3Q@!Fp{UQ^4e$~nBd=HB)= z*3m=QE*^6D5tuy$IV=yR{XS|S$ZrgPVhtAn8+CSvO@~`1Mgk>P!I~-p+r6_5Q;5}5 z@J~EX!+p1{0~6}}`aH$&-3|$&)(^6<*6-&Z%?ecNG_VxJ^_NADLDxP=n{t=myLd1E zxe$86V7*Q8@Q0VwK&TN;d@^S@sXx#{UUFtb^AJZR;qI3KR|2JdHNpjS(j)`+9@E(6 zj=gQrdzXF++Lre|gwUDS9XQ=sqjkwOu$8@YrFb7Y&G3D?UOD};lLhvE-M8UZV{JjB z**h{VJjW{OpgJ`)eQ+Jtobd+Pts97Ef?;|Zy$H1dYh7H!^^bHU`!;ztx!QZ+EM7E0 zyV02$7r`6wiNgBB>P{Y*L5MDFnnmm)lQ$2ZJXH z*+1<>&s$VFkcZRxiuh(`bQJpzOqsw(#TIpeULktldQj|UtT0xCj2ks9qH60FY*b1a zIfe)=-=ojE>?87~-A3=ehIy#54=hr;)QjU zcXYq@bAWFNZ9N&Fs|#VQYk^qMoEob&gG3sX<~EfSL=PEF0nL8n<9HQPwJo#uU!pHA zsO>TQZRxy^Uhvl6ghAKE+iom4?l9EATlsR%n7FAvnL-?L?1e+lAKP%3dO9IE$D_$o zBxg5bABuQC`)jlrB^_x4d&E4CGM_PK8n$+`GzU9&zz}<<4Xq*PwXBZcT=%?7^(#a( zr8M1JjATVWt~Z9$#;Q(mhK)8&dVc3w`y93=T2VT_PRb6%fM#Uf(U8`A;)rPCgYt_kM|?2KiZ8 zXAexiA+w{Fb5(r^$DiR?h8a`D*bwipepKfA=_-=d&0jSsEKgELN#xe2sy~Ux&d)iG z`!m)HZUwHEI3%avkcz(2E@C4R($ZT{R;Mhj8V58swtd%Q4~0{*qYxpx=p@#1H01_^ zs;9oCnx#ge{GmAGo%rNX<>P%u3CgFaM{4&dQ9FBAXM%E?v(t&3jr${R76J_aBB|aw zY%b)PPPF(VwGM_SE)JtZ1?b-_-qm%+_fRteXx$9-Opx-#>Aw1KgG(YR^Fm9TDl+1) zRAeq&Y~EagLY!H0@43R@mtXLJH}k(z4LB7pU`L1nz7oUC6I(V*gu%7IWCqVHItzyZT%dxo5Ltm+P!3^>iOz?qguPKQS!j%>bkI$Y&^_9B8WRJT@9 z&aDgugr4NHGM4BJ$B)_rLI^|aaJNhvPS>D+ zGp&5UZZObKAJ&=P-`k%y!~e|VpBN1Dx!w7S#mirzj4MGC_fyD`_^A6ztQDv6%t-eU z7yD7?Q$Z@%$37%wt0UeuOTJSxnpj7vx}P>Ci7U5m7pnQ9#x>cbw+@`b>i~eRSZ9m! zO}4=6qXvA1_xtNAJ1PYlN8$b63y{;BhM_XPD29Btnw31WEizh(BR)VRCxFfxwv=a? z(5^w+25uYDJPXRu6&+IdP1L%0kF)OKK_Q42h#hC@(L0y8w`Ii1&dk?%+t~-MxM#vq zeL)*`nR|2UWM5tOLchs3>t=*7#rj%O$(c?B_J%{s%M#t`U(a+NUScUxnNKVqhFN~4 z?d_@qNlgP?wlB|lfMprGFrb^P04c%ajkqi-A-5|djUCmGq5FF<{8vr@IB1~#rdh(H z7S`#}LJu2{(0WOLNA_xtg6aV=B&^Gex}+}>6tjSlB^MTx_2tV7!~L~9veKtfLQ&qM z!Jeet8qKi;R3n+RWH>uZ-+^ct+ky;Z_g_H`jjWIjM&)+%M67BjP%2$EdLj9;W->!b z=__OgWMpOcQt07_GCB|VnV~8d8p(Py2rd7znq__2T(w`39Ft4(9>|ucLQh$cI~K_W z;vteFEdiq56HN|uWdI`yDTq}(W_GjFvD;{jm8n6=Ki+_O0P{2@!-zzbjQ|l`xuiAs zBS$gB6$V&-1;fpG-tuo31dwj%p*SdwTFoGSlZ{m=lm3$CI)@-(I>oq>PvB0X5ZWk& zDkMP&1Z=~8e_las=%x7{HI2CuNhFbnyZ4U z9$)@!<;Jzv{g);sqIx^pdA!@!!tJ@W?nw?M_um>Tw^nOip(2}7*<@}?bam3QG4LNv zzD8MJZc0@3b+q$bx2>gHk+rT7ynRj6gLW2m+XCHYt#zgM@&RQlxhcr%o~qsoYG)uU zL%LvWP7mNW(&nRZgMz@SZVzHLed1D1gGBgN_Ey z->fHspN=uF03|{^KxBmyI5L>r3H%J~L>uedJehhMwKIkWtbt1&yH@1@iX*n_0@;KW zqf*Ih{U6>Nt};YEt?({#*(M4Lv&3|u1nBd}s#K<_a8=3cX#f74D6Ce+=Me%BB`a^W z#4T zGMT#i>x1^E>?eP1@grylr7s_jyEAa!IN=l?>lU(#6qBtqQz{Hk@i zTtW@DCvzE4IWGe$lg2~jvLP)bLV720S%Sk7WrcDP8A@7RqA4=-!3r5jZY-84Tq-`! z02)YS9JyFbmal|Q*2u{EA;N1DBmmy0@zPV^sIDpJ%I3wokOdG@ zP6ZA}6?>AdEW9(rmes7X}dY6pGM^ z9y(YOPxrq4M%_QCW5~iJ)x!JQGxg2ahrOeOG`;RT#^A3cpB+`d%q2V-s9WcMto*0B zPU3Py#l;Eb@8ez86m9PV&(+)KE3L}fW0l#RSc(`IDga*&udVanUJR@|9*>8CPi>fHzle?Bbm7Ti%|a{i#wvDcsZ1scEc zZaIT@&ON+r`JESbckba;{5Z(tLOp!&W2T+U*)uIav( zzSpkNHR;|E(Dk|bF~lV?eX(k|X1+3XhYTEcLtAog)yGuR<4Nemh5<{I+9E4-t}1P6 zcswS9gPUG9r}mmJe{^~I##+17o1&TVtPyM#UjsV2#fSd5>HBDT#EyW4t<|Puz{m2- zs}wyWD@6%)xBGx3R^@&8RD{A2B7js8yOuLl+E+-v)D30=eXQ~LFwA4f2cy6i3=J?*$D{M|>x{L!nM zlk(V7cYiQh0I1*LwMhQr1&U%-bKY=={tO`nSoU3-q#2>iSA`Lpe?EQF%c zm4W$!aDL=Cj=BH1PS?(^o2t1fh3HX;rGlLzWsm-z!}!@|@b5FP!5fx8SkfZaMhFP7N&tlrF(oZ-fxBY!gvo7A*tfXY=bH~09Tg+0<7X2gIhF;y+ zuytTpUZ?BduiO51RDH0U*#=gBptkfz3a zJGW&VytDI_>(%c^4jNzl5`MA$=A>#JNV3?cCR1| zn8Q}$a7yNtgwfXRh30NnjP9k`#8d}X`BatJeW5JZ6Wbvgoi_2;1rnAm5N!!p>QA;M zsIa6<+wFgp0v2mK<{DnBXM}kEcacG+Pv8xNuhp3$yiwx=wuZc{93Yhl~E( z+m9$GKB;P#Z!a;tyr`xvXWQ>Ek1g95?x#E! z|IK%f9!2f=X;c&UbD-WolQm@CzkbwqIKMULy~vEj=sp>;dGV!n^iM zgXe;o+FCOh_{&M8mwZ7zoJ9wpEzr=3LWt5uBS1=B7 zmshF|?vUnvy#UlT=>Ab1QkBm8@~R4XS+53;?Tv=>tC#qbzEkYM-nODGHeO$gw$$5U z9K8P2X*%|YJRB@oQ~v3Euh3xf#?!oiM2CIqji(mFXEGdBliFVXr$D#8yzw-%t=xaG z${_5zYKYGHe+h4fgU)$-MW5%@1b=t_YZ%2?LT{~<1BC{M{=2w+Fnz6~AH6BW7TIIt z!!?Tb)@xlX6!qywAk`br|YOiTTD|9{8!;xeKrHFWITH-)a-j+Oy}&aWF_Om zm@8)AGuM;=P<&tCgYt_T-FM_3ykvAz=gtk2p}$+LU&QU{|MFqd)!r`qbxxAcRN?}sna;c;*@;Y+P0^!CS>@{;xxZL^GUA={2*_LI?E53+`M{~%D-okuJ z_O9lyH0TSj62ic=GoVZKLMABihZn_9u^Kt2O7m);a!fyVtm4 zEOv4@=^JPB-vp4~Qt)PFXL5nV!fbMeRehmDplCANs(v<^nQn{-0VsY0{+k{?z2Wa4 zjrZ`re0YA%(B`L;eu@QPz-Mg1hv`Vmeu8J))X}p0OKT4sm9Skq{ovQDhSsV+4(-`i zWy%hLbZE4wS~A$XROry2V|g6)-zw7yi3BV02Css`j#FL*F~Hl5HwL}0(~+;!3p!T* zH{x<;hC#2R_uKy}E~;0X8a*`VeT@E>w(;ic^yy&Bwhlct+2oE#TY!yn@fc>m=QfTU zxw_E)@r#%QAdVT>UL%(56qZ>ak2(Necf>>){E{>lCqU&`9ZBEFyPFwG`oGwF6R;+; zWN#c|l(0y`qC`OwKv~tqjYS|Kf;+OQ?Jgh+q9Q2D8i*1Q5D^qnF+fmkaAgr}M3xXx z0a+Cl5J+T=$Pyqx60*GC8@hYCr^lYT|M`FO-8*yh_&miq^{Z2L-t$(~IfOd#y!MBr zyYa%ytv$7Fo8GKt0AVv3jaCmf%a;P!gkERJF}qCk;(ndQbKe>2{YruW_s_dNfAHSv z?Jur8-jcN=b4Rasp?6ExyR03QO%(Bq(Za!&td=ZCiUS4bv)*T>aInxK5cc_s_nPBt zvKlfPKyR4+Nj&KIx!!w?_nPd6`wgk4Nv8gv?LVguZKSO8S?Kera9`oR7SDH{;JIkJ zH>AbWk+Oqg?PKaQUD#gO-lDHUy1%te^Vsy!Vk5J*&M%z^f7_q4pO1o%gQ1-KTQouT zXTl)eHjOgPjv!oYt_S@t7DaWxi|!%|+p~-YdFEy4LqHE1ir=0lZ0jLGev^^#X`lX*kc%Q0Gh4 zERTPymX=6L5i0)Nm{y;_B$0H0)qIT!3?0mkeXcb$s|ie>t3I$neC+oP>XO^SItgqq zj{3Qw?$bP=_eZhWfv6Q)6Qt9f8J%waWPjWG`L*-cnKk$I9>Vnj_CatyJ_)cDH+D>e57P5sOd@eKkAa!sP+!@yrul+t0M4;0kQe&79 zu`3`%=N~>E$&4!eGRL-%C-Tl4Uv!Z__t#|7S^mbhjGcc=usXYqFG`MswHe@`>D@o7el1^1_qP7jhH!CMA?=}dLh?|04c+(@rNVd2tNhRj}$8XWDhAQWnqG$s> z#In%+j&C+@Lp)EnHgMvy2c`?Y}DIsd-z(GrY)uTqtT#JOKzIO`+_ks6rH|9|!B=kF)XRYNqZzJ#U z#{NR)f{QV$GvO~IXNObDTOZ6b?Rx4g6SN|xbO-$8{m!iDbrC)-Rg3ribZ`D=;$+?? zH=mHbdYPuyPgJ&3v_EaR2TDb<#~1Qjs=RSsPx02N11;PcNoPsEq@!j{3Ndd$Sxja} zw-7fBI>NL0OFvMx9XPR%C2-Oq5cZjt&;~dSOnUk~;y$GerMa{2x`QXG4aNnd=>ohX z>cfq>t)O$Bh#YE>TQ|4L2#J#rwL{r%JawgPw-;yNtV`ZW5#4I4AZIvUU5T|Z+s*vI zk3Yk+GJbLjmmKMSzYsaeDqvG1x)UP0)s?8+O z^VNr#jLlb#6bS}eX5yvAcL(O(A@*5+4fN2}o-A4QPfwvGTijLG9RgNdKNw zTi$MUu-N)XQd_FG)uL9i9{o#=3@o}rTnJ}FChi~a%j7t`wR0e)fhW3^>y74Fl%2f} zq#m}26%HG2t?>`(U)t&o{yvbs$KgGt#l+xZ!{*#n$Aqc=bMN?*dtJeClFnqqY$xG1 z*x4a7F_>ST`i(j(#ghQ@W;5esV#pJWmv1;o+oRL9hqg0{v#V#WXA2~Hc$~p}T=+hT zCq_Myc%t1Wi6`m}4U&z20+Xxtwyn7Fy48iw8S#H06OXGu-mAUy(&a<)Z{+2>c{_05 zwmI)P@f|YIkNLK3378rmnruAu@HevgkkPkoJN}lu9ftiPX9S)BEYAPD zc6x^x@7U@6`GuU1!;jv^I~*khFJaB{|3I?jPW&P=D>s`SyzgvlN!Zr1>rVP_ZzIu z_kgdPgV^{|jifcS-Qg!ChXt#W(20&anr&?Ym9!GYj)kY){Mp&pna@Vd%7pF_bWWH2 zykg6a^r7gdX}@|-iAqHP*BkfQz5ba&)oxgP^J4KBIC5Jhv-A2#~7|J1l< zw<%XVE;riv6YH+#flwf(s6)cXhCC5IeFMpPoVsaExfEB^VN2G$x-H__Q1q2uPj~( zAYgdit@8DnbKgbVvY?dInI7tB80Cc12TFh;M>dY4eX;O?a5rY z!f=^a3v^!uttmT@x+CdC@csP^;A>NvLC^Kjs#7B(M}eTDeQofq+LFY;fV~qdlLB?v ztAjOuZn#L^e5vBkXv*&!q(Z+MLGc;#S#kqdWWLI5so954o1}zN)I_-clju@vdlftw zUtpj$5kAmw*%#ImRwHN!PXZI+HG-5<)OyAm6}+9$N;mxLiKhEAiQm>d#7*XSMG#>V zDQMLCUD|Wot&_TJQ6Z8~2wqJ!`=oFEs&kiht=YCHSXk@Vm)N{U-g@$GHC1V|vb=Rt zvu*TDTI_Y!sNE#gEU!3d_1Jp5NqakZ*hs9woLpPGCcS&nSGj!!Ufb-N>?WHPsl;jf z*sm!u+PX7hzV4`lo8y@|l*ba$kf!_GL{VBXyZHNX9QDOkL(jnXlg=_NK{@49R)uUiReZ8nnG_B-+gV<@wJu8on*3CTVnV>s7lmY^i8^<~el2dCAWq_hskB;Ou-buiY# zG16@adO-dshrq{2_mS~;8wTBzdEfNPw-L{;-@Pnhu|t7bW10Dz1LB82ALUxMOpHi8u3f)Izv<0U(}$)m z*4=rWQ}&+G%+dJ;&n2y-Ks~l;a1m!R7FX#T$ghJ`u1^R&$n|A#KSeW)%H+dtS@_vw_H8Y_c2Pdt+PPS!{Kn)#J6*$ z5;&=~ott{Njk;u!|HQYXJ=xo*y*9r|EuHE6I7k(>Ut496m(y7>kPgD{hmsB(l+CK_ zmrqtZCb3idTr<$?WYg6v7CPMa{IoZuI&> z=uRc#<%_=%E1sGu_H~8PJh9J4PJ(Y*Cr_Tj#|ANMD-4; z6y6@2*B!r^pCHljO!KH&*0WbFddF9JD?QUJ=%@ypYOl~G^pcrL*#5DXh`u%&$#j3* z*7J$qg6^*eHO0jS^)avqLPMY{^w|W4$AmV2GPV&TPqOD_rsLi{gc6p;JcK$sQjfn| z?>OnaFya|v=iidCppom(5_c}VE_V$iv#no9!n4F1QK{b{!5T@=5+y6}c7w~pvu`)N zPhjnexUbrmsk+oU(1DjU#)`xbKJlMO@3S^wZw0I@_o$Ai#uywrEBWYe>-qPJEU1P2 zSzJWwWC?%R=74HnD%wDF8fV#j6KY{lvz10^QZ8J!weib5Q!WT!?@L_@j+MYmExdg1 zlN!C`K3`vIfH6%w|1;B|c573bWQ=oT&(C92`%?`K(n2J#odwF-;a;i)E^e$ne|?A; zkg-BO*r6sVk>u6>vbrm-zSU|(_0( zd2)SENC+%Pzb;TXy+4H4ayW#SxtAt*{%BCJDD>%xnf)Ip&)DK}Xg{nAo9*4Olr#gY zgMRJ}fgj%H?%shi8$0Ft_)ht+Qj=#`t&g(&s|xwxcu?2L-7QUrAK$^zIABZg5yYqx zHJxL>!QgNlxE3J9W==hS^svD`CTJe#{Gwl? zU{o?+urK*YSxpQ9XOvU=FZ?Iu=a%X}6&D&j6${MP>|p{kBte%iP9XBd3nl`oVN*<+ zkVWDTJf;DFD3=68=Zbl`q_zX)B3E{iNW?5#Nvl6Lpa_69_=>(2z(fO^BxNvq)+@LuArPrx_8SYnI;~nV~Sc> zv@*s3_}ve$IYlH=*EDdXh&Np1O5%L<0$O~5F#$=8p^7$#zQF@xGuq585S;dN>y^YNCn~ zhT!GG-d37;7%wh)x`3au~%&_cd!UUR^Lg?r;4H(4-GKj@53(SBxs@2Kz zdSW|gL}<38&;G^w7C0@W&at#eXm~S-w`TZ}W%sJ2^fg^nX+OclgADa3B^1|-y#lpGYh=u02X(sffs+i za>GOq52p<}e%(sCd-cqo^Do61)HE;m>O}7Gqg?Od*z3U9K3c%5)|ip=d_~vvD**Vt zM!x0Z-pLWywk9O^44=_UnrY-U*jb)4NFi2hVErJc`0b@^Z&} zh9=ZjGR?#-u1AD{6IJ7`KbKu(CjgV+{0ZBCB&i56%U0sMCD*W`bHh2gH1RjExP!|kUCL*QCQu20on3$0I2F90L1`ZW zb%Hl1P6)Y;U@eUxEcfnAP-siS^GqV=rDz(w+adZE>?M{ze+TvYIQzMxcm+EGf2?!b z!?W>5T55%dKCg(u=Vq=oX9@+ z0KBU4F+qHNn6jgT`YjmOkDFFwZ^vJjWkg%PfeT%@9?vea#oVd6qmH3eVj2_BEVlx$ z{*h=iRcxB52Xy+T+|e)*pAS}{N$xm!fv;j6(1h`ql-=O-!Sa`jBptb#+gtlfEp-?p zX%-XOwB}azU@B1qyg;HzYIFz*^3y+@*>=%4VII^7g*}~P5b@BqwELftIm0K=cl2n?CuijHZBZ)Lb z;MXo4*i=4S1_WNBV=&hoCYQ6VN4fbV%Rc;V@i+=MbkfRCba>R{VrTU{E@tk*Kowe( zs`z&1zMPWU^ECf1c4tR1TyNBo@7Pwghbr8&#!dKwraKXIqo~~}Bq)L>qrBsw}HfIAPqH;1Q+L3D>uG zgkIc_sJfbQuVJBYBOX6J5Hw7iY9n#-d#8s7MZErZsY3}`sU5v92YR{s{)bTS%vX6o z+kEMXcEbtY(D4BS8bEvV$Wc;8NfV2TxH%#-KwN?W_(QIw@e-zJnkFjEya2|urS@ND zKYKN*3A`Fr!;Vfn)KX;fgRn2@)S8|+v&T00oXHYbSaJAG6mZBrn7gVx{D5s6U8c@_T>9$XTGpeFE7tYpzb)?A!8WPHtE;iszB9O@tZAy#@$n%N zNkn4HeUhe21z9e+U@RBcWOj%sa;C2lGtvE5?_EESv;}Mm~{U$g}WH#V$ znH6<9nleX8egnCqB{M8=J1Bol3TL+BY3yMtmjeDL!TwC9Yl)!xMG3It%A0V09hU^O z0zDjj99V$JUaSbls9^Pl+;JwxPm&Pe;A@l_i%zo;STF{s8&#q?jm`>TteX~D*K*0q zRH7Nz`O3GW_%*`GeMjCiwYHUrgGa7Yr&sb92aBc-r_a!sq7JHee?oPCXfD(d;azsmO(O zd;|*?*m9n}2kT}z;@P6v@#D+1pY~}rh83=M{h@)MHI$jmuRPhGEPhl5*%1pjf)-NfWO~D|iK+z*Uv-PspuH|8p`q=@KBBCDGw4}>tx%H3*NU% zu|DA0db7P~qx|PWvy!5E@rnYnB6Dy0p@W{NTkW;Srq8By9lX-#5L%F3H~j4g`n9+3 zbU;eiAy3k+_U>cTL1(tn3(YLc>JMWII^wEUfLb}n0>VL0GJdiTEdc7EHu|!w zRT672*u##xpw;H2Xl-$Ml2sNPVZZQbC3>zPby^?3`eNeNNc*MtAB49Y9-q(g_B~5C zYdm&cM{Rz}shUXJ8IkY(?K>T_GaPV@eZ8GuvUlz>KzU8j%|189`@V1t-Ec8|S|26X zxSnsG_p!EGw%!4!Je3|nTR{4pIk0o`;mnP&Z@4JZ#2MZjQD3j&p_)Q7zIjr@8vYke zk=Mi-eNflW?SMNsaW@Hm>`kq^}LU{jWX#BfR z;6nUn;+IkD`A%Lu(?6(MOl%DXALTe!8+>2CO^kl!*Y?^)ZmVr23;TP0hHEa_;|h`a z4xzuy*5Ft1*=O>(RB`2^7s864R~x+pcWS~t2~Hul^;PMkO!-)MC+iXUHCguHPzjT~ zXg4T8%s(>W9_>5>+WFp?V{LeF-oGj0Dbb^0xQ%10j^H6Dmmww~4R%^z>1y_%XGcVR9S5;Q0fhQ$M|KWPEPvANllQcgg6;vkyE$ z|NBkJU-U+v8TR@U#2+gO=h-pzGe*kxXO3RB-Kj9h0vi-}nc&RTd$2KWy0PAjoDrc>)Wh1l1pXMnQc*VJxyz@44AmXwf6TzbU+=f>Y) zR$6N$vGmvCvr-SWBL4*O*82EeB#kd)VZR>q$zQDg-q>pM(O(cPdOu=iwVy>yo%2EE z-m_YPa4@rLy-oXd-dhInsXw>(X)ThZ)bGr#UF*caUXIqObmE7;OnuZk{)5`hJ8&l^ zoRbpn5O4U6g@7ym9pppzaUK{LzZd>R%CLV-63*X4dV@rNg0P?!e?Zh*d>tgypf8F7 zeR6g-f0#zC(PaseI`rkJoR_6}7oD%{5ajoUyi+^0(c}BG-sP_vRbc49_}Iidvn*fA z8l$9ybmC-RrUCe^z+Y=OD`p>)II<%yWa8alBdA6Wm{tBt_!p@k9KdY+UBtA}?oW^a z2cJJdlpB-3i`Ww>sYHpp&d&3%CwM5efii7wQsYf%R6KhmaSb_jabG9eK+eMQMYbGQ zh&F(6mAcb)JN+X=ulqHrj(S&GJH9Ybu-F;YyQ@3iKg!HuA;bJYQ17|!cn;V>GR!@K zdT)TpB{PS`4D*vgy@enGcC;Ag{z1JTKqT7CL7QP78q_-pB47v2FeeA~s`teE$Cx?j zGR)(Gde`>EbHL70hItZb2Sj4c9P}CH6wnTcfE`1Ic_wHFL@t{-7&FWtgLXg!>?~)P z7l3v^B+krXCBwV~v;!hw2g@+80`2t1`(H70Sj{l61MT$2bHL78hItEU2Snn{984MJ z9iSZ$0Xyal^Ip&nh+H*uSkEvY0_}hZ*s)@mb3i*Fl3?b5W0(s-J0JpfY#8Qqf_v5b z;+00X&RB1!We;+#MClKhYoUG_NtPx;VXxnZyfSNOJ6D2MevQ(Yz+OyTs{r z%p`uODanz=<_oRw(!3_QH^k{J%%qvnA4rZv>|l{~KFw>I`$3%E!Auf_29O-tY`(~v zPV?e%)q(V0W|A=UCdrY*4i;O#qj}A6cLC``%p_6hPb5b!n=iKRrg;guH-K~wGf5o! ziR36?2LsmQG%qpt1CTCYCIO+-Bu9YF2j6CX`LI(qesdsN zh`tLpa!^b37g+)vMzk5513@F87j0lrTQnL%P=Q^s@qmXk%w+ayFmzQuvD8d zD8vw9chN=xx)YrNAuNDBvGJP=S&le*(MAz^0R06*(14ZNjLn5$5s?>dl%O7H^c=z> zSiOzkJjhza{fjp9p(oL9a|oKSPMfiL5Oc(EaGeV2CNF?GGbR`g)US>rvgUU1Y5fy zPX%%naW1k#4~o)}ktb|{*=@k8LarljL{=JbS}sf zz~4_cVAUXZ5Fa8d^r2=t74ifJ*trdPYLIlqWMqW_6t5$rKyZp-7pc^sV>URcLoyLs zQ8Gr*tvXH$bQjp|4bAG1Y=l*mj4{+%CsBdE3-(}x(*nq2#K9;T6X-sjW(E2WuooMe z7eI0m#3-5NP@FGrRWRh(APRxW%?OdqHUrUq#v<7`ho?t zLB~&-?hnhbMJ#~~CbqE;4&PcieLzF-JWj-T9X{SOVum>$Obg9-Ek~2bl<9%63R{FW zWE^oN`hpd-Uk3}P2g90e6SW~zh$qn(tf3=1esFpy?29b|1>qy=qc7l~Q#yHYdKhfX zHW398A%>$b;GrU&F*uzBld(gfA+jn;F&Av0b9J!@`b8MRE)flxtFk<%FMLgcQY8Tj z*S&xsM8cNZVRax1D!XDTHb6CWD-eWBu(fu1IuK=*b1@aRP?WBW3Lyq&XNT2=sH)tE zsj!0@=-Q|dF2i=&<>^8esuadlY=mNTFQ^c%z>eBs^&pE?KEzaPf|}`8s1UBg&e`SZ zL9|sSV=6X7@wzgqwW4_+!sni#&3Wz`@!6L!(wZ?H>f}p%P@$r zRvC-UaD;~I^3-HAV33Vy!w7;3;&O%)^pdW*y6ioe>c$Mih>a@P%NfqltGYqzviD)S z8_`A)TUDGcXShIb>b_8y&4R7om|+yLL&fiM#!hIuE>B(d0c^uYv~h&9O5){=UC=CD z^98bxU@jXoj3aidgELxh`*kY#uCPBYL?@$)lBv?DAn(Hu^1x993BwXR{Alr|Y(mPykEaIJO*e zT*WTVW5p6AC*&a7Y;zXbjKFbi(nNS5i22QRIbEbI0)_6 z#cI%tVa*#uTJ`nGdo^TBV4pXBSs4+iQW1wf44u%utsz?m`?k>y6A`NNB@XQhozZR6 zkfp(9Hh#fGkW^%@pxvMlJtR_=4pZ3VhK(Srps%3ap$d9?k+M~=#hbohBcfGouAq-V zRrPKoWnaULH@U5fh*NR9g7$ze)@wt`R)Z5IbBE{x(HBv~Sh#HvO}CV(!obM0iz1of zJ9zIVibph}xRgr4?ru@eSoppm+KN(F4F+40t#OI}2J0~J<*2g5Kxcr8R=MewBn$Ht zvV{0|6zlRPenn?R{vAnYz5zX}j9*9m+jR>#$A%*pS$e#2NXhs29KOHE^5h!_N z`enDY@z(}yh);9&$o|k~u`a+RK5e&0_Q$s1bpc1>)Ao5}e`+gU7jQN{?T|-yYa4%E zKtz0+yGM3gn}un>_4u@79@*_}!KMNC;?ul6vOl*Kn+80KPxJA}?r7ti22{qUo$<)- zY_l*6cpsm3-XpuKE!ZreCq6CMBb(7yY!)yPpBCnk&1~bF1wgK*UG&KQ(q>^Epn5ee z$|JkGE!aFj_i9?KM|MwJv3bDit7-8b*}ZLi^MDOk)2?}B_f^ztrPkB`8Pe5WSA+f~ zT4qDxYvb{Pou7wOO??fX-|bKPO1~f7Wpsa zgz|#2k#oM5%nk?MPixA{(k9|>2EH#!oASvH)B86_F>037m5W~eGa>VLA}uy8^o)UP zXS+t=%$WR(>~t$e-zCQqxKIh7kq1sdYN>0 z^wa~(CEpsOX970t+Rrd@uKlaj^Zg+G7i63djVm1Io%shr|1S;86Rc&7*w{eNp0qnb zEPY08Y+yi7S~_n;_zFDc{flQ7vL^`rKQTaCEVuI<)XF)LY`bpDojivH<^ECZqurJV zcpZb4s1-v#21?9DRe^8E($)|S<}>$I1%4h&Gb0)(dky%dUgxZ_D>%cS3rluT= zO<*kjU03+$EdQ51?eD99cc{cuBe(tI$pHSHLo`o~y!HjB0^})&sysFF+jpM|SVTEg z?WyssJ^56C5#>;wr$#~h$5R0oltWFP8inl(d;+#o4z+k{Ja6Ca6R?kRsMS;Br{I!j z4J&;LPIib!I_Fqm?P>8bIeeZlE~Sk2Wh;yQut1S9eA%p|uOTP6AdWHo$gE_zAt$szkvaUyhOb*C#oQ>d-#NTiB@Aybb(^e@MZIo zrHwhU1#vyYkIYM!H|E3@DE1D&H7{A)m=j+R*E{^pyu?cRAI9$AU(vr4^pb%OM%M2v zY5tJ2_WAZ5Wrt2)z8U!Z;i1zfS}C9BW=lNgf3maugGc**)c@H7F?TZq+KUcxeKf*5 zl9`N^qD^%rypA04Pj$3wgg;N5|GyUh|Jw|ulaQ|0e|<-nI_Ce?^zgUAw<`5Y_faJS z5A#J;wMd6IeycAfNmhB9t-~Q0qIQYmENOT2EFEss%y<1 z-ke!|HA%{nu7uPlq@f^93esFC-4&$!qx4wtj}J)lsrFxvLJZW*Qz~kq4*GtpFD6M@ z(v^_p>Ha7^7X0G_l2NLC_fZ9do#rW(wdxL8eygLBq%7%5NPR*Y3euz?&4tok zLApOmj|KnufaINO-+OeiL5q3Ho7%MwS!Y(qCrMe-m5};`G!&#sL7EGtyMlColpYKI z@d4?Yrqh5pY1q+=^j=guZNQ`+A-xqnoX+mLG;DgbNT+0eyHCw&+W^gH9f?1)&ID+B z6@8D~Et%it^YyfCpytbt=B`Vlrss-gS#77Q0yKZ>$m_Z^VtT5`y<~oe&n&CFW31~E z+mu+;T{55P!#iCSs3~D>8a6#rq*4;!=JV>bUVvtPN6gQxYbEiWK7FV40yT>}YPvR! znw~A1WqmlE8KC*R<3ZP^5z`Yz&L#1meP&r@9bdaPu}wpY-j&2Nd?rt425Oddw58S! z95iJ;v$)kze5&!7|4NE~`+qQt-=0cs_+LqiPdA?T&!PBtr49`oG-JKAXl?ipW_1j7 znzDYfC~7G7Y4q_wP4VwY{SRjOHir3+Qv8{zf`LvmR=EYYq4+#DywGNb;vYuPyHU#@Lp7cLV;e1a0 z4`%r`hWaxo97Za4pwx_2X2EU<{!e91`90Hi_*7LJV!jVe0e33InEu0fWMTE@8ksVZdu)K(TN}*jilAT2G~U<48?J@lC#P35Tyaz~R=h zd~Y>b8&WNuF4J0BA=FSuTwpqJ)RNzMexjJw=U-jR3Qub4(x-;*xC7j!4uy@b=68Bb z6j$~6lWJL`*PF_gQ7yOQxXHv(<3;8E5&;8#=Z=ZuTYdfpwJhBArbA1qp<8f)iNsOV zzHoJkfEK^gY@+yNpMO&g>*}>8kuJ4lBaVBSII3POJcVx+bm~nM+k*OQSfhzeWja(# zTO2ovII6(!RGVms?)NWi>OxaP3An%r;^-WH=iG^6xjuin8Wxj;17u&cflq2szhZFjU5QyVo)c;Op}5CG^qMsj z2cOp16AtWp@eJ52Z0*5o0A)P*8`3IqQa4knw*YIcSXF?vRLm^ES}2kWux5%L1=w|p zwguQVih2duRf=;8uo%VOXV?{rRnM>{ikZ)_MvCNTSOZ0mXV_(mw$HG7ih9qmI*N0i zVNr^``Pe0jRry#=#ms!{B1LjORzuMvAG<)&HXo~|sF#mbQJj;Hg)8>vVdpDWS zGxMyt6Tr6LKoQvfuc;sTI z6l`;`90k2x?6|_5Tr69mHwQbSP?dunQpn7~4l0myuq*|S9BjXWZ4S0qK`#f}tuQAC z%T(xnitSRUdU}IaBz!#;?tkVODUe@R$2=^anc&`{Rq)JeY)tA4D$Pyo3o5ZD_0K8| zP3)glqD|`aE0HGl`IQKh`n*b*iG5zBj7fcNCC}JCw{pz5KBw}lv3*YE7vuV;m2JlM zPb-^^>z`EC7~4OotT3*BT=~M-{&8iVas8vp2gde~Dl?4hA6DKrwtrZeXk7oGGRD~c zLFEPG`s~UeWBcq%KjZqW%HziNS(R?a^*>hbHMak;(#g2~e&rTp`}>tP#`T$%=EnA! zm008YdzFU9_V+5$#`SkAk;e9SD-p)^8I>?&`;1B%<9cc(&&ZxyIc8L!UisC?KE3jb zQGHrvn~{B5WwTLzYGsX)eQITeQ9Y&dh0(~)`w7=@o6F_4<_u@vZ@P}NpIJNDyA+tW zXjPQ_P9!#p?}Efc4Z0v#Mzy;jS47piAeToKyC6-X9=afnqf%UuMp3aYNW-XL7ooS=3e+x5K_Dt1CDMm=;wDnz9?A?2fDosh7oU?-$p zl(!RdUerD(61B()DH|p4gp@f(%;x;qShU`Ch!MJE&vZlM z9!us3vACCswxwb@@_JP4xcnR{mM!0#jvbM&O2-b#XQpEZ<;m$-mb^zgwqM>h9os9f zmyYe0pOcPd%J-&WyX33VupRQ5Y1npoavHW(-XjhBQQkHU+aj-*hJ7bLCk@*q-qK`AiD7Se{J5 z7Rh^1u+QafDcAyeJqk8oehvkjE8lwu`&7Q_4)(Ep<{j)qdGZ}>w!Ft3?2q!ccd(iA zdUvpQ<>%bNQssM7uxavDDOiepW(qb%o}7ZcCGU}fO_sM!!6wP;r93fMuALwBrbj0( zdpY}dlu2lF&(^rjD}dT8M)t9^)zz7p{j4>o~Sxt=zc{t%g{YW)zi>DQdQs3{etR%fjdz(%fLNI)ziTJ zoT|QoyPxWSzWXWFEPeOms-F7pM^*Lp-Q83Nmbo8P&06NZSJiWw`z}@eW$sR@154fQ zRkN15Z@E4coGq809u(3yn35fx&i;^O5;{6aHvl8%N@VdCWL)IKEy&A}DO-@Sk+EBl zF_FPrkkOIeTacF`_iaH&MQ+`KjEuC{f+R;8Z9zsvF4}^;7%9I6c_EU&8A*y9+>8v5 zY~PFwi>%#@Bt{l*MutW{+>8u~OxcVKj*Q)m42lfij0}wQ-i!>0+_xEdK62}3C^CMBtH@P8_FI)&&W05kBjLO@arT$A-M;o1fY3x zH2hZ)eH4C{71M%9ba({j1^<0ASuxC_Z|hWs9NMU0-5qvF2`=>zc9BtJg+Ey`Yu zo{&qx9~bfiX1?&~eDWNa0pcn;l~Ae!-4mchrx8Ag+(zZj;DZE&4)~|=#A$K?>W0W| zSk4{Kn<2Eqe+o~WB0om$61jbobH)eF5L)0j!xJaTnJ9IU+gCXPp2sIN!2`k*Ib;gz zgV1e2ZWTU=PpE_c5S}JZaS0{xPhp9pWPj8Rfm^3sH;y+=D1!eK zmN-H_iP|M_`z-ek7c@;MfZq&D94339)CF#Ba&#PTijWHr2umCyA3%MWar-2fkHa@Y zmdsl}|D4uWtLBRtYJ|-ZzWO!Ee6tvP)WMmI_i{u!%Fn2m^T5G4^jq+k#Jqm;a@0Y7##_0a zxR?og68sJ^uaCSGrNz&vk+a3YCg=(95Mo|0Sqs(4%XlNV1{X62JzMh!ZO;$o3sF0h3gN@Ni@R`s&CRwQ6 z$;~L2<57nq?p?M7Qq;@Jdqg1Zf8pfsv!e55wb&-d)4{|eJvTh{>Z{{=(ENQe% z3$$Vr8M0++GiLWlYeJ$YeYXK`CVd@%c~ibFz@{nRAAn0!4ns7pR+2?7aMlgzefjSlOtu;bS zG|$yxoTk-FvZw~mCIbCSfjVdLtpX6>yE<@aS}c-96L2;I=(hvvu7M*&nCKZ-hbfxY z5XqtyIGYFb9|h{(fg?ab;OfAoX|YKbOyFz<&>sof0B)@jW1@ww4t$yxhh)J5&Nc)6 z_dy%LtpX4bxjG1FT3nI^8#wy~d=AhCI0C>#i(MT=G%W$if(x7-1D^x50geCxz|{eu zX#pe)08o|TU6C(b>kyvm9Nq+zPd=j^&+$vG?zKX*R4m}N!S;>hv*DZj zh01l~C#|roYZh=xwE@{*JB>4@1*G9^SSst`?}D#1lFw=X$ib#n4_Y~~uC0fE41Ur` zKCk_hGR|09HQVX}OT`M_ z9jw$u4$^+j!KPP_TP3otS;6~*mp74vwHr8o>D3&o43>&Dd@$I)i5#N+k&~BRJ!O^0 zx@HX@2|m?C4%P1BjHOp|ttwb5IQV$*l_oM#yPt!lR`aczS=Vszso*C~c9ONV@FLry+pq(3P#9P)V^^7t7e z=Sk7>A)mUCp=XTpCq>vHpZXA!0!HJLqP0Ul4Iw8B7>p-H=0iS>A&(0fqfd&ghJ2br zh6)(sCq*_xKFuK}g$(7VMRr3z??O%%GPIu-Z5i@;AM&`6vGQq={g6*f$WS2z_q51q z$mc_d$#aIo)1qBNJ|9C)K4%<$TC{h_=Tpez=Zw=&iw+L?w1x~lXM{g3avSn#3o-er zuY0)c6=S=UNY_B>2huS3Efw7Vn?LS-6D;^a7`PI?XTopUznn0ADWU^jz;rt;YT7qb zbif-3_zawWSmxUM^f{MVY;~}vW#!7kF)syY9s&D-_*m)jx=olRO;Cf_o?(mL*1v+(+!HP zrDq%1@7Gat-??HAbH1&^xN>IJVca+hrWkk5VpGf!kJp>DbNajY|JMoaFzQ`lyw$S7vpXML@;%4;8?~ozRwg+O%Ti?aL zC;xB88wv?~sqsK?=x$@~_~P>z7q0Pn%ueq5^O#*M$(rP}T1`um&j0*ZKXuZf{A=H1 zhw|aRA&2s@J`>0C$-a}0<-ESfj^)C>A;)r=eiNs1X#Yv4a#;Ukr*ftKA*XUgzln3X zdjCo1a%BHw=W?z7A?I>*Kl}0x%vS!L8?-o=nRSP6PMkM#y*-g^r|IPn#dG0yzO$ru;DaWZBne|<7$7k^taW;cI-GUf;ViDb+ken>KAFaL5fW*`4f zGG;&jQ8MNL|79}fApdPL<`DmLGUhPGonGyd!C|BD~*pHeS))Pk`Ty==i)iQcx*wmS;e9VX6v z%ytd`Ok(}ZU&uc-W9h8^m%6I-#sB-hc(VyVJ-C!e*FfqA(lC&w0%<0cWHyJrp{tlr8=k46@hKz$3+MnQ znt;GVq$yv3#}*Gb0^iuc7!9bxiyg%%fGK8^_)S26lPl0q^tE)Ljs0q0tz0uCH4|40@bvc$E2Am zfCI3YqAr@4?<#hk#EYMcXMh=3pok=L6`4sa@<}2`panPsd?azrNHZ-cU<$Yl1OxoQ zflgpI^9~gdT5>=on&5SOHb&f&D;D8}9V7q_P-x(+H2@y|@PMmuJb}cE#sf`40S){q zk(mNm38WbgsUIaCWr|)i#e=|9E+C@K^tuY`@PI2|M&hu=lhrgp08G#XOi=+%1c=66 zMRkCfDF%NUkm7(ArkH{k53+%2P?RfNCLS%qh)MtyNo*A!)#DSsP=(#Ct)%IF zGYR-dTyF^lCOMNRa6Q5*NrH30bgg(~r>MhVrpS@wOYLLgUem%NNCJ(C;zMDhyQ!KV zSu3ZCH-?QmQYkI0#>wIpVWV5An(tYsCyN(^joMNv%`D}~Vi9rFnyT5v%Hb6E6GzRc zlm?aqr?`PQx{9h<&l;U5eoh=Up;F$mu1*xE5l5F%HEUTbCyK#a{wOM?n$_Z$~ zMAdx5Iz3*zn>eaQrMzM(j~81L(`9ko^rm>0e=EQ9tT}0>(;-}t(Bw1VpTO^Y#Y?Wl zsE4PG)kV546Y6%i!f1B91(g^?c-UAS*>#Dq=S%B6+FD*R9itR}h!}o~nw{FD*T~`z zaQryI&-hlKi0R%q_*!aqN>gS%%VL0o-~>nTt;&h%hj8$f)a;v0dUdSeeoo#*unXTR zi zj_(jBkN!?pPEJeH?|8*ES-E*y8h(5IgheyO+Q5E3>n&{$c-s_nlk`kTSqJ6twUEcV z=gjc6O3!^`;qg?mhU<)w#R~Ba6M)^;P(?Hz;1@Fv=y3Q=O;hQSGknLnu}vg}o#Eo0 zV?7+IvNsOk^TlB}u2zH}v{DvUjnWGDo21j_sBUz}ULKE7}EueUGA-Bs!|zW#NcdajF56cpdH?D8JN!x=$gJ^itJ z3=XFT`OI|wXdd1@7L}=?eAqmoyE#fvL;0Zj>F&I!%!SJP%{{vlqx2Rk?=?T%?H84~ zK=}vrAG)2Q^cE=ZGI!|4MrEoiyO?k4MnvhUD?6Fvy2m0j)s!8~O}m>T_0*K@{||BB z0o7!-wrfKeLJ^Uc1T0t}AT1Dz2%-`h#ezuF5PB7m(2IhK6or6M=}kou5=;m+6a@tm zBm{_5A%GzyAWbAn5yXqmng5&_Wsd%L=H9ikp0#Y(kAW7TwcjIh2EMpC>!aKamb#Fd7gO5=$(86WlOf{#Z&KoZ8#zp z2G0)N!D|3skK(Hr*~EJQ{MmcHjxNlr4_@>ht-}cO>VfCHd+JPtdG~|4-i>u4!o0fR zx89^Wx)AR^@VIwg9Y%;(2R!1PQfDH>s|{v($JB`k@oIqwy@TrLg1nmGK5y4Lj36%z z-0f{yXCla}0dDs;suL09RR_0vYt_*Oc%fiwkQ~e7IsMx0UwB1Zc7cy+C3?#-jh|z# ziT=X7xkVOyL<{XL$y9u9a_u%BuSm;w@FA^8Z$75LbCGMJe7u`lw$7tzYsDz0;1ekI zhG$zSr@=>2I~%TSp_qW-s4Wc!TPUZ%2T>au)^4Gk1nZ&R*FO`boB->fhU>40QjUXR zsE&FAQHn8G4OQRqlzRzJIk%N?se10+5G8$ZWKZ3d&6HokyHR)Q4K`DZz&lXa>ep_j z90ALq{OX^HP~c!7%CY{62*nUAo_|8e+J`jk@q&Kq_AcIIEs0dx~ zLWg7ECU6lr-4J7FVklxrKa4qSa#-Xr{SfAm$sv(L^n;j#CI>|h(hV>MCI%t~^aGd! zCI>_g(DgC;Ci)`!bUlooiJpiaeLrTu$$pXjbX|;96i*AXmUwFp6u$<>6v* zt~eu{9F7miqQ4pm0cW8?$nT^^T(U#=T!n-{d?4-+d&pVHX~J-X@m+t$2Q zE&k(~Jt(Kv3EdKIM~lP0)=6CwOX`Iiz7FA15$}^ZtkdWpKQ7rVuk=<)uAHxYw5+i# zuPmm_walnYu8gm2w6w7_uQaCAwbZCou9UBIw4|{luOz0#wZy1Iu7s~-w79W2uQ;aI zwb-awu9&ZQw5YKtuPCO-waBPQu86N_l-NkjBgPP2iAF>@A|G+Iu(2?&Fs9J8(5O(Z zkgsr*&_rk%m8S2z7kFUh(d&2B?ZMGsp2eo^OAS>?9w(7P!ZMWENuzhbcY|~*=Z&PCPz~+w4H5_s z+!%fgeiZ&I+z5UI4u>1U55o_^55f)L2jKc}J@|gOE_@$c2d)j*f@{KIa1FRR912&1 ztHM>_%5Ww4UbrG$0Sfm^hI*Osq&O=7z`(%yp6Lm>7{5 zOteTeCQ2j<6UmHQ=LU2F+5xSBRv&d(OBVxEebhKhjC~>x=sB1ilN^y8`hCoOllvm~ z>Gv@AOzw%?qu<5cHMuKtm!6HuHpv#rre|TYOtM6>=$V*IlT49JdIl!LBts;Fo{mX3 zNf$|{r(x1e(nQkeshCugRFPDA3MR!QMI@y&c~|C!%#VTx7lyU?{l&*rI^6PDr?N1^ z>GkOk(yygErXNnt)~99{-5g_ppK;YDyFybw-+uQ33@8I`s#!B`0#N?*^TTxb`0B3Yq{5auSW+P2lEDF23-e@2IU6%21f@P2e1QPgT4z{Rda|ZBb@ybZz}>@OGCZ- zLq*Q^N#rUk+DJYv47HnmJ{Cy_q)Y5pRzylx7Y5o*J@1Vq0?tX~Dk)k|w|`{p!wi@V zhz!vCG5sd}BK>p*hGD`GVbJ?9eI|V(ee_;TuSu^+FZ~tfmB}lSSM(lCk4cY654{`H zZPG2$P4B{VnRJPC(K|7nCY>Uk^bSmiNry-Wy&cnT(jM;Ethe%8ReMjp6nROfW9TMy z5jwpM(`M3UbF(4-x|FEGZn&{WN{R~6*1onr>;*N_mn{L1m>Oy#Y-vff*^{AAb5zSB zY|<9L)bz~90kPl|Q85#{(U*ONxp?4Wc6fHoyz9KtyxctB{ODZcT;5#Foa>y?oZKAW z+~{oMY~F0ltm~}NtlTW$?C6Ka4|yMAKDd4``XKj#@5AW(#`k&eW8S;IH+nDkp6~rA zw~?F2jp4d-jkt1LKJMsD<4oR6%#7=d(Tv;--^}Q{#&>z|V%{l>a*q-(`XKitoosmv zAue4x*>bFY=0z~m2)|T>K`a=`R?NJ;g;~0yP=nlQ5hkXMUjq6d7Q}{0GjBhlR_O4l z;{|-S25ZLGS_;*=uMuBgD1Q0jnt~0s+t-Y*vD{b-StGu-aO34&YYNubu3hs{e@1^y ze@MSq|D}GD{&W3G{UUv={(b#4{agAs^snlN==)U@;Ht82$oZVHR@K^z_Ku{=D zASx6o6f2YfN@HJyZ(0zR`E0<_w_M`A{`vLv)-B5s&%&P~@5Y~Or^dW<3lUM(JFdK) z{OC>KTGi#+L>sdhg__(=7GZ|l@k=2G5etDD_Xd}z15%#eIrT1@BV4yNNguI1P1_na zzkbkV8kr;o4v+$yOMzihU~wt%tR%Qg5?m$;PLc!%NP^8J!7xd%xFmR10^B75E|UN! zNq_?+z~&NQm;_i{0z4}Y?h*%=iG!2G!2#l6b8#?C94sylo)rUkiGj<+z)51@fR(CQ z3=9(ki;ID0w}890fXlXkleT~Zwt&sIfMHv};#6M9vl2K|0T{>9OS^%h~&(TbUKhOLR8I@SsmXE6&inS~k5!gOX~8nZB! zS(w5sOlB4)F$)u!g?E^Rx0!{vn1vW-;Z0^?0<$olS%_v9#xdngT}|^$M@{958@o^-Q0(GP3>E?x>Zhb*Q~p+X7XGo`L#7 z??6kS9nkmCE$Zs(XVm@F@2Hokcc{Nt-=d+eaYn;W<+91 z)&YAD+oHK`Z2}}hDMBp*7NHZN7jZBG9&t3{M8xR`^N6z%$O!ug=Lq)*uLz&Ih5nD` z%e9ZDCToUG=gMqm@)!H3j(s8a*0Y)}NXx9u`7~+fTqI>h2)=|bLP*Cd)bv{-7CgyA zO&65KSA;Mx7<{_d+n3jCs+J~As#vcX_?jccU|dPx_Zw-JdHOVaS-`^oHC71Gc?GBA zZN`fqt6ib_LgA7YUB8&}xAU6<+5x5ltnb_=O~S7K`d$X@N|m;^ChrudwjWGNUmxiE z+DquK$dSFzgBD*3h086sE!oZ2%|uhTK3%924u9QL)0b7hbw#es`ozJZo%p4$-6(ds zaiqd$uSEWp%NG*(EyB*7A)PcB~K#Mc|otBC3~UlWEh>waw5 za?MjJMXk`$wy`$;*(I`awoazaLmLZ7g~^M^qf@3nnYa4xAaaM5c3H?*{Q9D%IP?x8 zXIOk!vRuWX7h*@fPWfE9wRoEptF~R=xKUVirtaF3${&W4i52=#gZ;**ghe~*HhfU| z*`3+ZWWE zznJTjnQ7SiJ!fWQYoqlu$XidaS}QN!ScE=$7u`Nz=eERDU-kxuYyIn*@K4YuS=y-m z86=q#2;n{WXwmMv>*LJ&58gu|TB!ZwC);>bye^}7;`Z4NY?j1N043*YNqkY7Aqp_K zheuCJA<@>MmK=s&0FG;r&wF&I;E4za64?v=3W(EypZDlWDIi)p6q1qX3gCGSvisA) z(-QcJvl3JO8TwmWvV})lqq!q>3tZ;Xg7@-8t#G{+x;0zdK{rEHrbSZNB`z9Du9IRh z&?EO~)6%;9UgnueZ)|{;*}e>{^vO;wZS9N< z@QD*bd-g=%zFqg?1#@`V+sEhshGxBZ;cu2)Rf*_gmnn7oKe=R$c!jpcPYlJ^_q}6I z&QAwi4BDqxExmOhUibnfx|~(l_wLi$bF!5(6zN>m?3c<`t8)sVHYZu_g`RaT zZgxy

    XPyZE8-mQh=H}By(R)(sZZ|Xk3;p)jD3>snf7!Tk zw-p|Wa4B!`$n4a~@rR8YcUcucEnNsLh|JQ;?DBSu0vF+u-*PH5QwQpAJ{H&UDi3Pn za!*MA^)wT@n(fm{FAukb+M za6Lx0@Z3njH9kM>X3b^;?AT=@yFe#q4x8$sANt^S%7YAb_Hv%Tb(iBR}T?^XG z@8usn78%7qxg|}$ge&c;a z=g&hfOUH8VS>>iKbRqtl|DJ+lc7x`Y_=zm5sn$Q=`r8K#VCFld_57nx_A^Zbe{49C z96R-?<6ZLw(o3(0ua4pp;R>GSX$2*gZiO$A4=awIPb5P;&C;+XXWa5$o_LsgR3Q-# z@jR23S8~ej?n}Lg!AHmMSZYOwN38aughyP=79Jd_n_HN^r1y6WTNm|wk365&meb>< zjBHx~D`rPG(Uz; z>Q&URawm4!=07>@`>^XMI7Lo4 zmGAk|#;cpN+?*(Ep@&%TX4m~b;I4!|FU`QJ%D(=7rMCg0<>OWTgP+8MJ_oQr`b3uR ztF|7R6u;0NR7L4C9PpY6NYt#VCf?^)z!v@)#;99lf553O}t~H&YfZ0XNm7Bcdam_ z?G5oV@r%tpcc$NG;AEoV>y*=x@@D@ifXKQH*uGzzvq=(No3{bJN9E`hI)B_* z+xP1L3E1g8{%;`OKPuVfb<;+mqI!yJA>H=Dv!fBI8V( zH}4x5n-?7pi_|`oa_CR+As_}+(FH{Jy1s|!XaQ;Tym)w^dQ%Qv2K~|X{gIplK*5)J zH{sFhM<394pn-1hjdF~D$6w~%f+wptJ)rMK>$tuDHRlu%{qlJcL=pGnfMpG)hVDx; zv$GQWK}Zyol2tQzY;NqqsP1(5m3${|uatvYmt*sbp4+N&4kqLWamS@{)Xbfln@9w! zXE0!y2@xIddB-Ei38c|C`U*~mrIneKqVb-0Jaarj{(Yl;a3%~`Zc;%DuCcID;fOgP z)i@dsNOg{x0}{sZcR*^92Z74Zxe(o@Efdp{L&SQA8x-*bRhg+tPSC_V$zfun!!640 z1QQvYu5++=VUrx%3|Ool;3m*+nX7u-A)Ty+_6Ks+XWaxkEb~-N9CFA;=tLk?Bfwpt z(-Ny{=71wxqDz6k8nf;KU6uu^Ru08vS9B+^SR>%PK({4c)tcqS;4T*2%&w6T4{^Ge zEFR){PeLNZ|6a1h%-|THt$H2pSX=c*T5MZ2KaJW}Ekpy*t2fh*(W}L2vGi&njY_YU zp#ff2@1PxfIbUrzN_m-(zxA9xIjrDQ$|7;jVVW|O@M3GaK0Lf&Ipq^^(P5r4lkjG% zz?UCI|5^AIlJVgAp8;DYfC>f>UpM4+;q&<23`6MOnG@h8soSn((B zbgK9hFFHWt-I+A*OBlhEI|L(mac5wJ3*2>@1aIzU&9@8h4ZVVpwu|K{X``bk;TCnJXb zW{I2if;&+>zcLI?r`SbGng5ee8kSvQ!#pUabmm8);=+qWMbw^Sh^EcuMfIhMDCJ{_ z=FOVL^__|+&0~m`&E>`Q9~4pg#}I9sHB0Kn_M(m)L%h7jZ8;YAOU0vIO77Rw&kkXe zNVr`}=dbsj9jZ(s5gtyH`7acF1MW}M<^rmT&p`@*2i*DcuNi(Au!~_9^GG}2CVxFt z_DRI3sOHD|L_O4_lZdg~+z>kXI%8Z^VzHj4hbleEm=sN3te@0FJvqsED=M*6FQ|`d zILUY?n!HrMS0654ge5yaLA8BT8>GV&-5woB(kv|S|m&lh0h0g4;9MYnklKzdp zgrSc)q|b6n2X26F4)H%At(R9axN+|05byzsS6=Dhjr5yCkOvuoQ?6@Xcp9OkPcx#o zNbG1p8=-ccW?bKryrbc9}J@`ehV?-dIb$7&O;P%h?(G_i8$ zhEglk1#?7(n5IiZrWbU@8nGI3&0&;G+;=PDLu{^O9?k{A1C$hne~rZkls zOMNpGP)W*CntBuq7#oVJB;8k<%8NZVHk4FJ%2k@mkBuE0%B>_}m8P(<)UlzmN)lda z3Kt6)A0k(hij<}bVvmgvbyYrhR`4kMr{O27kp0@p^9_5$P-q(lOG09y;dmG-(T2g6 zNM3003`1qwFh(RKJ~p7kP!DYwZzPgGHarePJ+^5{kC009DYijON(3%8u)|PQHi)+p zbBhf;;ix(rW~CVZ+|L18Y`~KFXXsR}0WtdCeK&fqUR7x3c}MVn16d%)_~uUusI`ua zHBuG_$kI1azc@11Np&9}@4tx>ab#?evM?Z9-$Y3{GB!$e8;~#EL~VCu@JU%5Bxl}4 z$vZOmrMeH2pWH;LIx+;LEDn+TZ=&IymS@n1JGIWB4|YnPK_BRxH%04p_L!n|J4vQ! zoz4_fv{q-3DH_&kX^K|w)G|ejw_B>Bx3p`iqBplos-icw&#Rz?+Iv*c0_`LfG=F=F z3YxDyNCmyI-BJa;p-T38H)3QUuXG zZ9#(Qt~N_SbVpmOn~FOJ-%L`3dQs!X)mTrPNh(mU<~Ri?tEHKw4)vnNorkgpnn_wv zua-C*lr_~%(uI!DZlkSM_9~Rx;y~&wej4czREZvUPMrm$k&YD8^?{2y7=7Sej)^{y zn~b#9u!vdYz`Dsb7c+`P-8F$@~K%n2lA?UHVbmExiSlK zt}&Pe+1Fet0`0CcC<5)MT3ZB?sd`2P0jsVM7l}Q)TNy>b?gH|D9E$GFC;?hLCR^iB zuiP1B!0yN7OE?tMok0Rx;K`Xd)Ejq3C9oS$eu6{2b7xcoEePa(9BRRx@f6riAb-N4 z)}Ck7_S%jMmJS>k7c3swHZDjUSTin27SfeU&jT-`WIa`0M(yz2_A*Q$ z-#}gSB{{JKwZ{{26jc6_Tv~!s_Cy$iG~3CYB`8f##0gM&JNZKiO5YQ43Z&UV7Aw_G zpsHMBC-stYVN?0&Sa()#F9{2q!lJ3}tg_yX<({s68_PT``!<$(8ue`~@zmVaMMtm}pac^>M9d3)A%!R8$gorFz0)}4TjIvzR!d+k_v9MV%T@|ndx4jA&^vEs{==W%QAkg~}GXUuE=w1NO_0iU12V>fkR@6IR#uC^< zpVHroTJU8ogS+)9pIT9C{TOScEe=qm+fcvwG1f_UAE4}SLy7pcu+rmPD*!r55%GO@ zSvsR6VI8Gw@t}LGqoX9z%4g}o%KR(oz_NUUbYM~b+H@d3|5+Lkn|~z@n452q2E3oY zHVv4S|11@lmVYG`n3Vs_40!h86*HjuLjyD5>4$60fF~Y4I|DrW@X8q={Gq`a;K7G$ z&j9rvJ~IXCJiKBGggrDc1*$zV8rz=1G;!|@~+x2Edl$Pt| zh$u~0?}#Xvt8GMB!IgXs%_vGD6Xghyu;Q1_XL^l~rW?3H_jr;Y(C(B--I`6u0PTINLJ$O*IUI#& z990*BAj$+5P6;iu3i-(=>FpO`d8~8wvc##`vTPmb>XhMgbuQ7~o zrmyvZ>U)tXwD%+_8O>srvu=+uZ#vT!3i5zQ&>Rb;1Yo|;oE3YFdD8FMLPb5`S80a} zrTAdnT4!*N@%nT-TPVN-eucKPP-+9rx7Jyp$9Qe}c3bFr5BO!;hC->X;fUs~np{ff5=t{1(XzFiOZl*b(hoqC$!Ek7cYpxVcB`4KMJtxlj7F|FsO&bm+yXH#LHsz$+smsPWU!@DE zq%9aipStEs(5COFTC3;82KYQ<`uW28!N)cCw9}sX`6~5;k7+)7NxSD)19g-2f(h8y zZ!;ILCvOW7uy5R!Bw*jPtxUkad0Ur&eap640sFRX;)3=sx4{JMJGPCyD{{q)wTmy}9HYG?>w^d1Q|E;*ziTU`C%NWtNtI$=7u-mV;THzTmzLGAx=%v-0rW(w9*W<0aCxJ$wcF#secl_qi0( zL_B=g^?_?)BLv#J^S&Q@!B1f>1RDLk?@TYa8a9Hb5zqUM^@1y5E_fQ|eCSZqy|cc# zUEq9;J?^wCXMMA}z>hQ@xzX&-))cFYUg%P`U*0jRY`;cUT*ZE!EKJ3IgRHrV{YKdU z6?;C}Bo%vp*)kP-0og7Udm-6b75h!H;;QzWWnrrJTV%~u?ZssSRP7~YlT_0jD4Y(W zwx34}I9-;)sV8cCd$fSkS!7NLQQO0#1(=>g<~$&3yLz-pr8|&0cZk}K9<7qvaQ73K zv_lU#0{Pkp-A~+1TmOKwl&7udej*{QE{8Lbr>*0DB0eo5hr`IzhPj_Wrya`SH0Non zxu1wjTc5+J&eK+MKM|W&cb`*`rwwsGc|%LUwPjse&SlQdyV`uNEo;*pE_1fr)!r}~ zxY2e^%V{Iy$a-_5xXAjmM$M7+NF#x$dV3?|sCs9kxTtz}qvoi3FC&5IdLJX>=wV&2 z{^{foH&cYKf4V8T={gT4Wz%&*jHJkQF${6!za$nxMpPh?7TCI&oL|yZuLa^rk67~yUK$p zo=8-mCIV@n7MQ{j%MFfjL)xazC38MJ3=VZeB0oNRyct0kl|aF0#A2OXGq+?U?oA z35r7~w_`xJM6-2*CL7AFAJ8q-Y#pbuLbxRZx&@l8W3-$Q?t=l{Jk8cOG=~uGodMkk zbLE3BQx>}SG=)cL++c3dfNrLy@CdCqnCm*Am&%>))D6{0K+({HB0c*{r<+yPjdv#M_tq*Aq zZsDO$QcSGVVnePgUDrxoSe)i-$hD;FnyCwm(V&K0 zBf6f+^s$_UO!`ujYnX$2jY`^}1dR~ODL3R&?}yJ-J)h(6^U2Myr^=Hv>}m3x4Et93 zjhXgz`Q4fJ?eg$U`%Zc1O#5#6s7(7;^0}Gzee&c?`+j*&rv0G&#w>fL{O&A!mOMPm zo}Ir?+Ph@#8I}*LF-^D0r(NjaNVszsOBVclmtdY@sj!-3={65(7hZBC+_<473mUyk z;+|pG%W4i)9ESzI;Ou_IkWD}5=o{_fb(8jqSg{`#Na1jL7@+iX4!$lPUU9Tb#EQMJ zzy?lk4?{HloW1W653d;7r@{(pSYRE;xrYHrKW7)ZXSiV*`| z>>19Xfttm*MWkJD%Y@kkvSHdx(Wc?OnTJioyjg)w!;;yAO~V>Ba(?AEfZ)+At2A9S!EWa!NJiG7?rr*zr?pyLm-|ES|?HN`` zYb{+M!t^^i(Y;H?>08~nqMl)QX|2TzD40Ht6a8vQF@38O_qIpaOoevs@dX;ZmM_10q&~z#6j+=_r(?5RddAmxvM@9KjyCbQ2d-b?Qon( ze_^m~>7?tjg!8?hDJaG@GXjRhr=+I2Q(%8q#Y5x!qpNT{N4@mA&84eJ2pC zdW|0j#_M3gtnMENTJneheB6WDKtdEy8>$f#)P`DwVsk@1!lSvN5mC_G(2SU9ZfHX& z(i%Dt9<+uYL;}um00j$Q&)aI`6=R$gPMvNO^ant(;#9YT_N0;eF48jOI#t#vuju*Z|4{G zg)PT_Gt_7zn1rEc&fl@gSpu@VJszO zblpj2boO&${m89h{Q65W5Hey! z<7Yt30Zr_Z^hFc9B_q(p9?9Ei;w#A_>za$Gy9FXLqiGz~s zM~ zUUn0>OJ3)0e3!h@J?<`<-@W-RS;$@B9(l97@jbG*d)z%T(7pK{S;k%9K6%I3>w3Wx zjDH$_F5VokY0Z8s7BC^eA=`TybI8tKaU8O{S2HJfpg8BT8nr?-6Vz9sngtrKSb0`^ zu|oAO2vDhd4+N@Iy$@2TRLueHt5kgeI##Lr5Ol8cXT9q$Y<888np`<0F7`74_4s15 z6Qw1@yP1~~<}JWSx$14qM~U)|Y?w(Ovru`I8Qr`P{ zoTSYA6`Z6j`Hek^cm*jwAT8QKG?13DwK|U-z%_oqjTE{`IH)OgLSMz0j4TKB1A)`=AVBo%A6Fv0mDgL2QtAU=YdD zz6>HoI)XuLl)lX%zL3sg5Syfn8APgd9fQ~`-OeD=q*)ANi!_%(Y?WT$Pi*`DC}8oI z{W9iv+_PXrAYfvmoN_r-kwm!`>OrE!gcgt}aiL?GPCby~VHwjYB3s5`ipY`iog$9N zL`)G!Wo}Oq-^k=l5yxbTr->x(_41k9pkMMCOgPesDLMa#;q|NGegP4ZjyqkcRge$Ad0PqX`o3i|?_DENt< zMCqy`2aAA-rg-aDL|ou*S6qQdV&dbXA~W-{rY3W%R}h7BPELPC&P)LIczF+P{D-0S z{B-rkABU$ao7ev%*B^zQOf6YOKA&*Q)#N>S9Q8Me$mx5#aV}sLW&UKse>_Z^Be#DEl>Vr&d?vAO#abM|3}E6-JYwMCZo<`HT!+D z{-3XF>VK+_TDf^CBwK$_;ZOefo16o2KjeqMEAguZvlahNe?)mscWw0aEbQ9o=IPzF z(aBS#YoncK*uo`FSme(rlS_3``d-{wky9h<*S_zE8eH(_eHQ~tzGuunH=?l`DQ6?{ zG=k1Xoc44cko z6d1lZ8&_c1c=lp}A?2(`fg#E2V!C0ul}EZ^nH3`4u++*p-LS+;H{Gz13=6EP(+qJ|1!;!ZSA?(YIp32Lr#W8wJvsiM zx<3co$NGcJM=DEOqOzY)n@Wc9*PXPRmlx2FTp_>hbicN~-|Kaizt`yg-u*@^eD-E< z#V@V2qof&Oi)Ch$K4Rf3`Q6_1OUL^4bkO$#?Ag`_o~tB~P&?`>2}M=Vrfohl&Zc%g zGS;SWKJtdm-TBDtHaF)ZV{9(ZM@HLt&qqet*v>~r+MJq?yk>J?J~F~aWj^w%&9?c- zD>j?vBQM*mnU4&&nVyRbvsp6|c>y{7<&fuVPNXNYjT7mCtmQAaGYJeJaJaq`UW}fE=$!4Bs2)<^XTEsk+rv@=f<$21O$$0i9_5Uu} ze~A0o<>hK2^i$KHt@QW2iPz_un!FhUh#`5$0HR2zVgT1jDlveoq)jn^%cSXOKp3el z8gP+R7!3#^-HZkVk-Vb;0i;vW06&sSG{A?nDH`BSnvMc^k=mjFr^==s0Vm4Z90A5< zg^qxuWj7rGMrGcP0C?FcN5J7S6-U6qvQ3VF17*_=0KKv{2Y_x_p#wmt?4|=itIXR0 z04qD?08lSuFIIdnNv;SRGJ#p87+BpY$3CVG4+t5Qt~mqHFMVwa*k4*}3fNbA*A$># zdf619S!!zv&?r4%3V@bwGXJS>fL*1R zRRFxjwkiOg;sYvx^~Kv%0Beiar~p0}y;cT%DymfmEEe5W1}qd^RtC%!*(w7*6dh0o zaErDn1Kt&_Q3kv%daVSQEUH!Nduq}1JvFYBb>pj7JST&H*W8~CdbEJ&lVxK9&(ca| z%d=>im%#JUG9`g$!7?U+XWlX>foIOrHGyZ=(lUYPgQZad&wER)1Rk!XTmsLGrDOun zJ4?O*ajlfv4_9wMmo80%*n_-Llg%^-^-xRMa7W_d{ z@4pnQ-zrtCpjWL3xx0&68h?nzqPr)G1Ge4gs(DTUu#TA7ZjtekBxa);$OT0$RAr1R@m5lUr+hN1GB`@n6;*n^-wnGf z{(5269NIazILKUC>-tWACRQ@-Q&N%Pp~Ra`t>E|+RAjg-MW<0yIX(|6##c@V`DRss zU6dZBQtdcCNflrxr9G)s1CCF81=vC9Q3`cC$0w=+Y^Ss*g}RpG6IKDX!9Pj{pV~WO zN`2Siqf-h#vDd|vO7HMdD+L?x9XU-UcKAR_!AJMHoTg$re0G$AjrNY1P*EK|z)~=L zuZs!Qq{C-(DfsZ-kyBLF4j=wf@WIJTH3}EM-yq#a^)0+|$%%w{-g2Uwl74k984aLL_c?8hO&`N)C%WQ#F2IwbfWa^O*N_Zzln zNN_50;9;`G8}{*#U<`5~C%Jo+y*DKII&$D%vc)J{FeLa2av(dodxSk192`8bsK;6* zFn?OTdzomxv&w8-t+YYzO4fYAHuMQjKp@vW{1O^XT%LL8!eJP%@0+OTq*;awU z9oAWX$-Mn+?ZDt>>ujIkn^sv4Nt6_}ymN4jRhC`SkrehX&cT0v zd%J9vnm-_8wZ`t#%%j>TnS%_f@)Ub+cIJHO!s91PFOgv`$~8sFgFRH`3HIEFnemVX z&nHVo$S@1#8f@}l2UU5DJ@C5-K{NqE)H<6wU?UdR6$gn zOJtW74VcZpR?JRkI}~4ZPZFlhq+02zQTI46)GB5tvspzK&n5{|XRNIBAk;lh3wIT> zZ?heWE*?!1ela6urMH8+$8q7ZVs-+XMZBn!Buwf3eqr^ckLw?Xy89VzN=Qs>Tcjx1|cO{$A^NgbqTQ}{(ngZMD)g6 z_Yu$U3-F}iCwoeztI8a@1145TuKWTwJKlyhukk(j?glV(&&Z4^X=#g+ekAoI^N+(n zZ7=`cb${J+{%+U(8@cMvu*Hwd_^&(rxq9J~BZN;&rP(#J-0raD-d3B%h4cDfNg%6R zvW7gbK_;k%q2Ha4vvFAZjryq3MIeTuKb$YHargur@3yQVf7D3IUiW47@*mRTzb#{K z{_E<}U#sv>6M@f+`dx*8)+@vF^U|XWHRN&|Rv+QrWcU2+`HPDhPd@<%!_X`A;^4ow ze8003{8}h(6_ZV{;0(mCa_!%e{Dk5EpymEs>~n7e8@m<~0 zrG=Av`+9#&%HQW>^0vVLn+UaH5vxQf&+w}Z!u0n7SpP`hcSQ}D_*hN&+6erVZgtn& z{Iu(OEe8Itto&y}@mC#JMwk2{3jf=MHBNpGU%uTM|1(CsqTWUL<-gN8S2yC7IsGrW z=1)evVhH}>_Wu^}KXT6s8JNGaLG@F5Eo-V=GWJhZeZM98xN6!Nxg@xXrGD3Zzmm>f0jqyM z!*x|!wh}xO_Lr?U6vlq`r`IUWb{j5N)-3Jwy+B#2jIoh;JAVI?bpavvy+7)yQd4)s zd?lZa#Nhb-P-_oD?1Vq+i4wNE;eBPymj)J;I0Cvo0>X@d5=KDi5m4+1==umKdIS_X z0*V*`T^Rv|kAN$=!6Tr+5s?1~$ae&U8d(h9w@Sen@?|0Fr}}^uwG;k*3j5zj zvpYW54lKebDDa!7A&*a9vDh=gcRi!vGkbs4%1@k!R*RICRCSTjmm)9!dTMC@&e~lE z2p7dZPL*RHW!nhv^vtFc!d8jR@OqgVyw~Zi1Q_(1nX-QrRNS3 zOk(VGk73Zn4&49|nJT^n>*3VLBKRUc3#)55#phNG2Yefrn(^vFUh7Iv$&bhos@Lsdz{# z9-D%Pq~Nj1ct|oHn}mlX;jxK$NFpA42M@V}KbNWgyRhHPf$HtsQ9mp9Z?n|wyv@wK%}l(_j(eN^>TPD| zZD!zYw%^`uHE!{D0WVeRqBAQ9B&O4##hY8?(hV+2XLa zxEr=OcU#<1TO4F%492;GaYw;82pGo?#*Kk+O&}Z=gu4O4xr1;=K{yBq#}C4d0dY-0 z92SVX0mQijaYunT2oT2)#EnVenxt@8DclVyoVyh6s1yz&h2xjPjY;B;c48r&SpH7z zSO>PL1B>mz-sr%(cVLfpU?Ck?{toO|JGQAEi*3i=Xvey@V~@6DA?;ZH_A&G1jjQ^D z;_!o?@PYrB346QY#xmSh0`@Wi8%DrhBw#}b*dPKnfPnQQV0{Q!EgTkx!>Z%3YB;P4 z4y%O2D&nvZ999m8-HpTU#9?>fu-kB085|b;Wgf7SIIK7hy9I~cjKgliVTEv50UVYe zhvmazH{!4xaM*P?>>3<)8H=sY#Xir)*5+cL=3=XJv6Z=4QZBYE7h95xEy~3f=3?== z*n(UvHW!{l0tG|F_n+Tx+l6K8KLqp8dT0eMx@K5%(P)K?Y%vd>F+45Wk*Ks5BIU zLZNG+kV7bB5DLljuMHpv1IWMtk~e@B8$dn#pwfL1v=6$r4|3QC8SI1P_d$#IK|On+ z(!CJ07rM3=a@Y$Q?1kj_LW}o8J^E0oJ_PAQ*YqI=eaJu`lGle8>pQ&HE3-^iy$dPd zg%s~Xo9{yMccI_!LL2Ww>+eFp-G$cPg=FqRYwkjlcOi+p(CWL;s=LsNyU?$9oiM)$ zbVSh(BT0b(c`S>$7ApWbkM1@FUhV>lb^!&ufV^G6(_KL3E+B0ekg^L%-W9UOGz;a! z0L&SHHUp4i0F&>4rgs449T5Ev@OcNAzXP=20aEXPNjlI(2PkwPnhyBT0dqQ_O$Vgt zz+@}X)Cy2qf#_Dirxh@71+-fMsa9aJ1u(w@Xx{;(?f{cfKvNVzi2|ad0G}wpJPOc` z0;HmV$w;6n5}-r^(UE{pBw!v1Xh#B4k(egx=Na%>m*A@hHgkWbOYkcMF^y;<#52kZvoa&YTu31olFo&e=0ejHh)sc7C{P6j%A-Ij6eyMg zg-{?r3gk$EP!#9@1=6KJ3KU410xhLL(`1NEhFZu_1sTdCLn&k^mJEfEAwT|gBorG7 zg+xMrk&t5~go=a?L_)fekU}IR9SJRsgr*}Pb_CQC0aZjmc@a=b1QZ(qg+!3qlxMT_ z%1_QgY_*fo&G8fZic%94VxQjmreG$ILQNkUjjNJA1D zfk9a?2n&NWV9p+|9gcT*gcsn%Y3XJ5)ds*PMi3Za3`$bDF zH#uA2HHf8>;D!)_+Y4Y*I~42+6y`XMpZGEZKBEsOS{eoBy?}D~qy}ot@n)pb$KZxK z!axsTKMXzg1`g-RCt2bTlTN+CZ%HYQGD+M|P<;q2s)2m%0dj`CxfyET#CUh6Pta6n-74kuc1};KwyUaA7*%Xib0r(jXvRK0`RmNdUG1E&5&0ydl@7@ z4B*Eh17Dyc4|;70n3A9eAW%t=?*Q=qkbx)AL4jVQfCeIT3IvD*`C0(q4H>xCDpUyx zY!Z)ux=AF6pK8J{e{8VOq+=f8^?9J-8FcD6pqMHzcI@SR`GzBSX0pNXQ5$b)g)ZO~ z2W>G1!jt4*9>KRK8}uKw@q||B0OPUHkP(1Ql7D&x-;`{?K5B!7R%ioWu~3E)V3H)C zd<0*UY|wqw#vNL*4euKo&H@l4kdRNUcb>yj%SP6)?an7}~N2Se+n$ z^$?!+$iVujs)X4Claxb*@J9gR4J71HJCWhwWtO=u#Vp1o3Vbe<(Bmk(uSu6h&yYIDig<5_i{@-gmG*l#LM*$Qsjb z{48tSvG}M$0c3IHt>^a112| zfI_u^3I^DV5<3J6y#e~&0WFl+0Z^z0NOK2%Mh;Z zRP5KCZRY&j?+Qg*gJScK7X1RO z+YZb>DE7mA~HyZ$naL0oXh~(wGj*Cs^9l5cYfH zcav1RfRWtN7|?W3{-mW%8DYN{egjGMJ?2Tfz-%uv`Wpi9w=EI7C-e+O#}ZE?40r&& z#L^~UD@EQ4?C6n?GkWp3G#rRZmp3%ADS=qW@cfU_KLPA{Tp9>OrOGccv3UluKt61x zTLLc#rC5N!&M5lLK!cFEjN61Y7&s^(l7}fFl{IJ8KsKeg|uX0NsPNbAVB5&j{Eu4MkgFI0PrZ z+E@~~9~jN+$pTx3p=e7?6~W1?R+>a+0Hc&1EZD+^qEQ$U!3k3vOC(ogC{^iTp%=i& zgLVu^0Ut{%Y(K{QIX zFI`0!K3BJHyQA!S;QQ2G>WthwE|v$hNE>6om6e1=owb(+9SZ+m?byEY!t4Wp8DES? zypuO{!EeYZU14Ffh?qVfG`&FBTmXdsgZgzHbzk1~H%q=1?z4|A>;#N0cF#7p_&CXk z4sCXS|4>#_`e}E`0ez=!ZZ$VPs{S=o_;SqPd{Rf5JGWiwTj98?@k+*Wd)e1}9)#?b zx}ovgN5zfPvekR;hwPHNse?B@Pqb0h|Z72^3SIQ#?p4^E(i zT_i)9ABN%ww%}yMyEMbTw;McJD|E1Ybw*R;?-_?qC-A@7AY_MBfW~6SRi6~E_aufG zNZ~d3UkcFpDC3ZQ!s|fRgYVA0Ki}_17pYZ#%w;3L19si}7pIblha3?dP z+CHIXPg01HRIJ7}NB73AjPq6j0yCMR{^Qm6g$I7Bg#WQtzqOl@D_PZQ^jUa6x0|ol zI=%9T>h`ZH|E=}cpG2Ymi66cbv%d0!j+)jtY5tcVR^gY0%pmk^$Ymd7%n%4~yR3c? ze!s`?pys6wSizV+dp-XXrU#kpJSWecz2aYL#Ot&8T;#h@`>*KppI)B5UGjo~GbNjQHzYbSAFR%^VSSlC$u|?+4O;t_P;LRi?9l?nl2W?0;Cj z{PpztaltI;AL7sXoA@UM{O~GFU}Xz#`(Lr_pW}hYYQN0-zZgGXZBPyfM8I#${jNK zVvtUePp2$Sr}U&zO4BG%8s%Erq)E`y*_Zizn@eZ*?SFW3!-s=?lPr`C69q6)yO}6; zCTar{CB{SvF;T-ksE!^~Z4aub2bI}_O6)=5dr$#AC`=E^rUwP|pmz75)cMyXD4+zj zy9A|Pg4$4m5-ULom7s=;Q60so+G12uF)Fhdl~|0z7o!4-QJ7+sO)&~6M(r+c$>C4$ zAgx|`tzJm8S6;K1|KacQ{Lg<^@0C~Yg|_s{Z)8Io*zz0L(0aD~dNw4>mX~Eizp>?i zW1BDC^l`c|TLEnPc$7mx;yYqsf}I3NyW^X%KoeH130vEU#W!MsMyyyPwzdI_Z@{+m z&#gddZ22@cl**P*WkV#kJc$hv+44j-l){#eXF+i+`8XC7%aV^}K`|`(7#0-Gl8*IDuxn9zBq{COsHjwyeR3E`OXI40!Jl=o+vv!w+4;mObPzyL1Z5yT${ZyX0h zj)PZ^g8|3E^T$EIPC2;Km}5D>fyf&n0S z9t8bB&F0y&wGlT64;CS)WN(vu0P$pm6D;c+qnN+vu?COk|gJV++oPbS<= zCd4HZVv-4Wk_nN?gz#j7MIym0k#H=LV3J5Unn*aDNH~~CFiIrsPb3&5680t%_9PN^ zB@%Wd61FE2^b!d=iG*#51g%7ZMj~NLB0)8gpqxlhOeAbhB*-TceorK9OeCyN)b~C( zVxTUJ+8~S)6GjOMqlOotIu@X67odt3_z~0vBO*R!=?VRx;060IRwx@73V@+@!%*rl z)CL$z42BYdp@t<;9TKQo2~?2;DpLZLD1pLDpaLXN7zvb(1PYKq?UvA;VTO%1oW@g} z#*>`J6P!kCPUCS-qZOyolGBLdG+J;P%{h%`oJLbl<1tPn$Z0g;GywLSIZmh_NJM2#M%NB07?Y zC=&4isWL7iOP!2WCu^vaN4Ai&wve$~$QoP7BWmOtf598Q6*=olCi2}4OQ}p z3OP%Kj8!3PsE|jL$yv%|tTI_cnLMII&Qc;{mB<=O8v4)&(k9wJr-|<>7IAT7X_xoYq zaI@z+<#i~^w3ztdBv_e-=y;2FWf^E;JESPDLr@Jbh^J11#56?hTf9BXU<SV;#H~5`B2J0|3*U2mPqQ(=5Lx;eTM+mPscoI^~Y=X%v_Mpb^6WtGiS&tB=Z}5m712K&1Zi5Bx9dpTt=b)m_ z5qE6|W$q%DzQPN28O$A05dK9VYjXJ8__u5m82@AyH~t9@g>Hjz_YMQ{;SH#$o5WrF zz_;pE%jk5YF3f)ZH;A9s-*uB4A@*#RrP3DDevuExpR^)Ic6d{6GWJs@56bu4;erj6G z$AP0&^vrUmSqDa8fL!N^GR!54S=P@bq5bMJbD3r>7=>PP9TsIsA>vW>g+w%}KJzit ztQMotvkkp1fQ>rj1HNh%d*TtmLM`_JpS6mSJOY@gYHu*LRgB=lCz&_A!Mm+uNge^+ zs6*c1^;X2F>4?E{Z}42JSiDC-C#u>DbZHff_6Yb8TVlay7_3?`Z_I8>6rVga*NR~= zyHTh=K>7*A*=I!j)$(CkxupUJ<&AxCfqh`ZJt=Zw4Y4Z&p~b#-fU={8SdxKIW?wU; z6u!3Hntq0I;x+Ml2I4pN;=Po%YNAU9LXy3Bk9OLl!Vu!=1jO5}YwF}3A;i52h*w?L z)X0Uw#NQJTC0*B4$UB0G3lk8}x~?ga3xkMV_YpZ=*A&P*f`}#e5vg6*5ahyZ#7FlL zkGigHBJa3nv};{bb%z?+VoyYImzxBNd%q0j?iHkHS1sx*Byc$PLQ+KF(&w*L;B4Ey zmc2w~kO}sRp95Qp;9hH{V;}Q-6#j*@KeJL*MlX86;Y_s+Uq0cp)X) zA0q@{+_t})1_vexW`Bz|ys=qEe*8y-N>{oN>fzn874EPLirTMi7LXrrk67N7J_oh& zZrNOS*g4`jU45Eji>rUnDfh!TgB&cz38Z5&j$joQa|WbhF=&v4#W;ZRSd2XwhQ*u) zaafEU=#0hKf>v0J4QPzToC1+p%t=rQi#Y+pu^4Om49(LbDaL@}m4qdowE=@Yg~ElGDo- z?%8BqFsYW5bd>n!-*umJO^OAM5%01LU79xJuvau=7dcVpXQM7ODPjiR^hInKD_b#T zvxu8MA7^@jy1BqA{GE~^U#)x%#!VN&nFdlf)2zZ7N@aZ!He+R~Q#O3H?<@Fu?!qDc zyU4lI%e%t1~L_#$@ggf)K<4DQr>aMUoxGj9u7dzgcw!oK8U1lIfc-CX0G@JyN)&~#d zDSuS?*nFd!dCIy!Fz~>ZGxx1mE{#6@b{B1Rl%x&tx~d^Rj?~hYJMxeb<&L2 zW;u;8FZ%SXZ|1AFJc|`ys8ZRMv)X#8UFsjK_|n!4g+M}ZOX=H`Z$w4CM&s#>>eGBz zCxq;g3ek{s+%lv1`m{l{{XE|CKKPvftT>;u{`ksy8Y({^xbqaLA`w=BWlFS;2*yB} z%eYO>l&Xb4Op*FH=hW%vl)g-xu|`#2M_7KAh?RLDNr6@$&On#x$gcf#G28v@ob~UJ zbGfqrQNjP_rj;{a-}K$0T?1>XUobC{F{ixm7hBI?)O60lWS)tL9uE{&cJ5cSaE1}H zMe5B1nF_SwaE5o8OP)eyf}9x$n@K7g~jEF7+VC& zDK=rH`62&WOZ)h6Q{&n;|tJl?X;;nM=8i)(6!!XJMgFMgBpZ zjX7F~TR)}$9~CEhE*;qa_W2Kprm5r7LLSzE6lEGJlA&1cB5s?rM6Edh)?2vdt>(FS zMVfI0BdM&f#%8Qrb;=#)PZmL<0^=12gX2D3%yz%Et`o+)(GULtk+$In9{%{sSCiE= zR8Fq^Ryg~G9w>kNJop>&{?t3&@;(_`Pf@kp%dl3O2*)~bRGH=z$@ zupL{i<{RWymRFb@!FW^V(qp4FtQzADGva^DsAb?0CEDMsI~3xN^}&UBhki)(!^L=q zz7;;ex4N0<@R8&bD-jxLKOmUD=SZqqgusX|M8K1Q1Ql9sB*UTHMcp<>^6V-4FM9bR zaI3&DC0cO=qrI%J-)3w~)z=rMmtU?n@abZ<`(9;7z$yJA+|mC9(b6l%O=v^Pe4tv+<=9ai0GdRlvSlIk78UYzv?&dOKw5? zjB-@ve9Jxly#K+G^oW6!EBGglX zR;tcE*Z-sy0@#KCwO13!919EOb$uf$bvWPXW_#{Cbos-=KVm4~D=Xy+4@5B@mb)CW z&Do^Zd>iIeDxzr<7_CY>5Vdl0L7%9t0H%?F67E-=g@W*(AI!39>W547s=pC_=y|hf zrFnBkYMLmTG5*rAw2PPeGV9GC!^i zzWP~UzxZ+MIUf}6ES;7b9*(nEKttF%S4HNnE>8Qp(1ieYYnf@76&I}C-2zHnPlL$%yQSnF#MXZzNkLIFJFpVXO^v%dGAR5~^l&zUW)`NiYK6rH^n z{SEtw(pp|wBhqOfIJkx8b%&u-;Sy)7WvmvH1gosEXwW=23l9Y-cAfpR1kBEzw*uV5g_~b{U5g*4?^$vk(bz0^f zhFOJ6ifxXGngszytZn9xsRYs#+Kuqp*vltU^X{0)7fkr?I?$h}EI8*Iv#<*Uq%~e( zkE%_nO1nBMnB!wabW&h(bt1;-z$A5A%^ik)MPG)krg(^D|LyPKP-)h-9A$G}oXh zL^HxGT*_>7Y}7ugoB4SCk&!TFCn|!~{2CJQCI#ToU+dr5!r54mSBN|J?=|wjpZOV+ z8(+jOQgRH8(4ZMcGZHKMUfX)wtL0|EEE`3X99vxq`+{vf_o(S4z~ZV!s!j*mZK0{% zVZbW>NzFq4GAa2ie?cl~&VS)jujH3FwR4>!@3zkKto;7VTt6Xh<6NBMd1}@(EAw|G zg}(aLW9TJQnSAwqSM@CkOTYQ|_3re{c6C#Z{E+@VL30eI!%=i`KjJ&HrIT`GGI|kg)A!wi;IZYXPDw=T@CQZ`8g#(M4-WKuc!hb2}t9+LJU=#k>u=^}3XN&kJe=K52{V>3NbVIm8)z75- z-y#0{nIl%S^BT{(Qv!?qZ}MTCv&tI&iH?8KjxW6so;q&UhWcJm|19(Yoi~I8^|k*1 zA@i(6{fz17J8y{1ZaANY8S*!b|GANUX7gWd@61!Z|EBFPOn%1pdrtgAz^gt|%`am< z!$7NyH~&O$XsM*A zXdDNZ_8VXqb&vBL8T!c4hg5oXy+oa8TuhAL;CRAdr?IhO@tFTm_gD+BI#tViXzXQo zHAzMniJOQ{fWu(MRq$4yk!&lv_C(8gb;Ec^bvOTbL%gMh-c*G%*L&jKz|=U8Thq=l zg8NTr+a-zOsPJ99A!=y;);yiM>yx#luza|c)>Gf9cU?);Vx8zGXXCn%@&1EjEWGLX zbkb<0=*IrBw^Y7WuH%FtE8N6%OfgPRSC_oTq^FJB(OuNhzfGtzo{sY!dG|8ta?&VO zhRUVA9DGs}6y!ta+G+(0pUk*7HR#oScrQ zr6Ic)`YV#sV&eU z2_`LYZot_y!0j=2oHBmA2#vJr$h`m>6vUUM`g6+wOoq0)LQS6((7hU zf4*f?H_ca*d-FqRMhg8-)fl~_P`K&A4#P6xBWL^~c6#p$3sxE>M~hyt)KiW(xanp% z$@$RYTWVI$ys=Tt^bs0y@}8tz?-^Y4(WdpWP4N{E^3xwXAK39)xiZAxYFTdde%~x# ze>d-dEM!3k<5qI!@TrZCmd$WGJ@xf_%t^9Ex7`*^hE){rkL*617lwe?%Xgx=s+>~a zuAYe-uErkiPQ+2CwyZj-s23G4+u4mJImgV6&uI2qNVB_E8+$|P$c{L(3I}D5)EZly z#L(j-#w&&S--KwSaSxJOBR8tw%9avMaNf8FO}C5|MTsywKU}1qJ?LzF*HY7*w@NO_ zqJnAKdt$MdE-#CDOWS3-d2Tua%<{no5P)U-0p1pf{#o4 zM#o$5N9gs%sxK@mCYLJ{pG0KD?U1;LeE7O9O{?KFhV-s4H$4t74t^DZE_0Q3d*Z+0((prLa@tLGLysj|wvMv3Th*_5`wTTX!BAy>hJ*I! z&pS0Acy@kctMA?3ZDjLi*CE#ggBGdtUaKiDm*4f5kadq;S$5?rZSn2lwno*On$(TA zrcO-nij~do$n^Pec2_;Dy7P!Qlrs9vmiJh8!^rBv{GBi6m>r9w-QP!zzGTlViQaFF zqM>8B60&z{D6Yot;e&@XFd|DrXd3aZS7AYZkr(cpHf~Ni@@RC?F{|SD8MNqb`evT( z*y!$Mq|yqh$2Px_N-B0wy%`;p;#z9mx>jeha=CX%7U!i9!dzX`OvC99CxW%RS4_7DpsxZ`gVw;Fo|dxi$G$HG?k^j^%ObdsA2gtKkZa9hs^Ao7A672S+d8e%o}-I&%f3-CC0mvD0@Io|YtL zz7cH-sz_o=#}ngp$tOvO0c4f2QogPi);m|EL+Z?AzjqCnzHqPT2oL*&TV~KVRdv}j z?+CZ^+6{)+-Rwm&;pySbRdiNZYEHl3?tyUSan@<8fLzk{RuIlXgyr>&^=iLvS}}@+ zXXu^QotP}j=OclUzA$ntw`_3m)@4znzQ2{TY?PBo}TGkE~;k4v|dIf$K2JPq_A1eyDxcb#X6x6 zbT~@m%9i@N54>+1dXf-rAFT^v(Vs^XR8^CtiJ*Y9*%!;`ksi9?c>FCfx$&8y$_ zR4r@i#*6BDI!ifb4E6Xo-Mrf5=%uZ@?o5R!v1)-iTqH|R?2O;~V}oq6Hs`AIAzv?c zHC6ARi=W23iAbbwnsJr1Mf^}wCsP=nM?F_P>E!ypXVVDdz4q3mTbFs9emb!|I$WvM ztNylsU11_i1`xbZ_}Qv}*0j3$&Z)m5j6Bw|6Yjj2 z^!ScHYtuvI$&M+!QFo`Cb-TWmBh5i_1UaH_x({LU;u@ZKq60d1Ro*208vg!oz0Il3 zUNkpE^NBH^r$s7T!EmuO@M`j25j{+Yu{~l^-+WxfY?IyuXQ~UQpH22~tM4bs=TXDa zwGo^wkLleM6V3S^O~3lGoDS}~^4r2y1yKv{>}pUfbUn14vnbeah|aF&5uFY$>)sgG zH(~k;T;|%D+h#caDqa&NPs*9d!gev9Z|$_KW?%5xJ$g-Vf_@9k^&bKB7f$Z9Ab59V zUTogV8*@-8lljBN@zL-C$8em8u>asI!;zg}y`>ut^n4kC6kithtJOqrnPobBn_=}4 zQa8|K(TTQi7?veuTOrdetM1gILT)$3QJj68f;vMqn^sgC72J7hN~=+)k#X7Nw|GS} z->MVG0w+CV4|sgKhsG})gjIvrjt|(bHV;=N7ty0jF1q0R-%>k1iR>`fh$ zr{8T4tUu+u+iedCX*d5UJt&U(?bR=>IFV?`t z`<~1IRa{(LOo}KbW3=#>u@{|4JgpPi@ScZgi02J`EW5ldsyahHw+sy%6Q;Gr5z}k% zdA%l_Eb}}u75Z?}i-OdV8=l$v_k}%iRrNp>Z;jvZ)DWYeRvac1tB2I}aoH9>eD>m{ zcYH3CQPz#`M0R8}{mF##ic0z(^&K$cT$6j|yr>{rXNiBxL3v2>n8zB>x76BxhyCS0 z;xx9^Pru1sa235iGV>PML?@F5M~@|!9LZeP-PgxX&3!f3qH53$I5fTMZH)*@7AERj8$+S(-Y|(N~_A{ zAvd@89_LX*$LI#bQygKhydzV*iIP4Sp5usm*#vLI?G0;FNph6GX3sg;)AIQPWD=X96U^?Yyg{D$P*M1h`OZ;&4eSBuD+L=4G51%|cOLVpUU6Y_= zw^ob)OM13)t0?s!7TY%$_CM8wsTX%nj9I|rneAu&e2Mi>AMn5CVo;A0URb-SFg?^f z{OFPQHqsiqQ9Cj=eb5|1oivPPu5w+pA|lnrbpHVrYMOgBOEx<8ly!GS>hq+s*JrMc zhco~hlclzH4J?J|7Mmy3&5HBZDgqgXrmR&qn+#%4_@5k!t_Xe7a8`p-`C8TXgQhK~ zfu$EcZTDoJVAskh}dvW`dVo_P}FIFeRjl+{qC zL1}1I<#uY?8aJ^VqWv77JaHV!s_4yXplfJ1+lHB2JuFwX`|u#jPsTR>azm%}h*#tn zmuI4E2c@1=(O84ZlRD*kqJLg)9(0R}oQ&E^O)IakaZ`M9HY%=c%3@>JpU=5QmCyQf zA75gp*#{oq&aYnb&sV=Le(MIwve2iGS9a_QeO+;m=Ea}_dtjwYlzLeoU9|6omYzhR zqD792YeWErVdb){7n-II>PHwGn_wfN?QpGgdCS?yc^;aB$d~1VNO;?@(fI4!4FmA~_BXC5u(*n^f+Q@y?Has$P6jWSzTrDVLJ zO%U;Ek4rZxaCYop{o*onfcX683wjN}vE;rl)AvdgG8;{H$xbSCTzK36-ps(YiUiYJ$2XmugfkN#OT5p#;-Fw*#*FT4chza9 z>m72$-Oi}nlVU910Jl+iSjwE2JF0IAe|%r~`D0BDc=#^j2E3O-w1pv}Uic;}EM2en zL09+sNjRE!J2Q}#y8a0(?C{JFEJ@OD;@=MUsvAzzL~GV(-bV9I`Yo(96pCafm=3MI zBPL5xO-ZRe%{)Exim|cae?n4Jq{j6sfFnal6>$J+W^=e(@$W*+a%P2A_j51i}Lr$WHjVHNN zB*Y*Ur|ajhaxah5(T#Kt=9ND8r3Uj_ZO%@O@=^07H{ynC`|q5$GM1fbLnD1oMc(#H z`dgi)pJ85AYYXqDZ3hah414L}`%AWN?t*>$a>K{NJSwKB zJ8FjGVxK8AFe!B_&$o!TkiBr)zSR|RcRaP$Et4x{%=h-v%iRJj>wJZ{#pt*?@Or(s zP5WfxRX44x%_^qmtc=5VkzFU|0hWC4}aW0?J|Du!k5M)w!G}~D5 za98RnwLGh>s=l;N1^IbIUU{;|l9a6632nXS1++BtPDiKRT3NlX)+nA>h8r#j9Q0p{ zUZ}KlRLr(`daa%N$Qt;N@2m#&T~*RRQZ;0Gj7mn_j%#{&ovF5-Y8h7ZCEW@BfgB6JLxoFG9OnD zdjua(HScXoxFsi^6fcd^!=;tNgLZPtU3`nn>s$5cmnhslF~c5ZDhEgGH5t2JB74P} z;W+w|VAURpf*Zl?yqPgdWJTVKuB{VoGqwH6xQSMTxy~wQ=8U<;Y~$7!T2wf1psRmS zWx4;J(Si1*IUeI)xn*rfhMp}QRqRDIQ!JjkyN(X5d@;T)n%Qm=9~Xl{qy2cbl#bNee;YM$5{!G%w~bXd@HoTL^u&H2gM)+>o!?hy-@xc2AGi^y5E{ zpjfQ8P9ogN_X>Tu#QcFNeFAqXUUZPmQyiv)!6L%EJXfLAbGMx4+Vj$cw{iU*h_7gM zgu5Z;N}2avtiSYYzUn65?XB!N-RGWB+nMrH^p_pkNmXGq&Ic;KhqK4}U343DMHym@ zdip~ZkGq!SagFEid&*N{?1g0b@&)-rIUMwn-OW#aJ))R7ygRu}Hq?@fGTja*@rV}v z6z?KVqnP6m!rzYMpRG`JjNEpSB%{KK4|Xq7veLE9Yi%#m(;nb|poE?k4soH%^GV4d z^b%Y9AlG39O1*&?BSkX)P4p~hu~9{?bZOgU6M5jdT8h+eFgk-P?uZ(mt}D zQ9kR*;T)*kNPkwt>aC6Rz$^gbIlYO8+wTXi-x1;T0&W;x`F`-Umxt0ZD_qOjuB@JW zwYN(LigKGeTE>i8pV&l(h}B0X6y%jlkE&+>_RQP(q`%YvDP+v?89aFOBBM89IzfV+ zt@e;?IMXoX7-J0*az2{8K_m|?UTo3aMm=Ubm5r$6O@E@fI;uJ&K0zp=)QXrq@m_Hc z^XziRmTPM(gp)*#!jknyX2>o4Wc&D>QzHfIeVb&O)#7=?^{<+CU*?Yz9UhVTcp*0N zNFMKEZtEtlPs9yIHN*VO&Vt1*eW!+*-tA<$_)Q3v3MbwZ_Di&z?p^XG_Vv1#CgeLg z{%(n7ruOIo1CJ+7JQlmJ*dSbXZY*BTE@GJ|opt?^U1E@#5-2R5xeOUpyxc=CG)L?Y zr`$z@FSrYT!$8cOtPGxM)4H_VrdMnd{TkVFyF?M-(Z$xt)U%?K$fXf>!Yc<7qgK?7 z@1ogf^L8dP854$`*7wvxOg20p?kk!%KtbN{uRAxD(oP*RT1Ph|Y1Z*N1`MVmPV`UH z`Q1~r-)i7GXWOoWN;kTjpYKfR?XqmDUzaGcTFH%+O;%Fw5x#w8{Q9_Z*NtjNs}RkN z$35qZop^iMgJ6yr&|oWHI5J>2-gx>@?Zw2AH7=oy)Nc1ag1eHgS)^FKbNJ4}q8wz% zZ|QsF4(3kgvrOg(aL}b8YsqdC%Eb^K6YPO4I*PRAFECl$}Euq0)O!GP$+^VWG;lq^9W?q!BI&q?&()XG&_F8*%l?VMu^R>I( zx@)npTXC&aWO^Y}_J=c$!vT)2v^{|IaTXdYdH7edXu$!lWYEXMH}mQg?5I7B3D9(~)z!$hVDGTX<; zrCp{j2RFB@*>>8OXRFC&DULW%-kL9;%)5M#Ov%3Q*luaNptm!($p%jJ@Xhhw<(X{- zKF3uV!}C2|yr+ix8+w|ZCw}eH)Z;ysX+yRROi^Rok$NRF!*IA1OlC6g<-OLvMX#6- zd(Dn;p>xH$-80$mx>FJ*5_Su?II(%-IiG7q-6Qp+XqhT8m?9)HcNSQS4_ zZ>L(GC5=M$#>KeF7J{w)LX**0Q1=l~T{8yf0R@g}Q?1L6Hk3;bwo=}b<^9vLD-x($=NGxu!kx;!|i&ue0nT-wy>UPSNM-)vJoyM24M!;y0&9EB&%8UWyue z4E{Kqw&Xq9^Sqn1(!u5>Q|>nR*x^rAaIA1R(zaT+#LcSjo?n6OGttZ4jIFuCnv9(z z8lIQhhR^YtU6;8

    8F3{Aunz)4l$x*INUS`*L`ohVU_UpTe;&E zx7v``5LoAF!%%T6PHU5x(nRXe+SgX~zNqEA15Tt-@hQK7)m1nZpW*SUmpa_ZPfgPM zlxN@Q>H@1~=Xj?AdRbWgLil=_n3So#H#yQ%PUg8RuGO|zO-(LR6a-gj_mz2$k1MU~y@}+Fe{2ntX*U=* z=uKjO_?0ZqlPdD)6~{d-Fq_iWy*l8~qTih&l_-W-RQoL&9iXMgRB9>viR{q!pfZ+*ltr+zv@me!u1?UG#YpT&7`X0bkJ z$8%UFZP}XzrdYMRy&pJ+g$ykpl{EmmuY41y;X?espoRkdjZ77_}X1(uj7cM4< z;S{CeB!_>cI%@g5l@qh2r;eA`)(hb9$>P=a= z$V#p(uQzoK6E78d6n-!J%q8zrV`aW5fal@uHo`5zA-#e+c^x~sR?$<`b)Pl{TY7jO zO8T~Q>;iV?hfg%AWvw@!YVm@5Bkt=^x=YH-B8M#KD0qlW^*FP1jPr6_Gi(OWWiED1 zUpnu6g=f&52=>8ce;bt(BnX9dx4!}G1MPCxS<``}cL&lFenMW^;_Pu9{2^|!H} zzZGGc7g}+5d#;GTh>RC~IyhK8U8LmmxV4`#9@;yX#x}hgo!{CwRX<3L(}V9E#W@YF zeXGglJs6Yc9l5$`Y=XycU-r|ya9Ntil~%U>&J=v*WGiD`sU}y~4y}M})Ud&VFddRg;Cl+5R z4{nl_Rg^OuToZiujMBWVRg7(aTshkp7(?3R>q5V_aIpOu_0jC&Wu#lExS=uDK-VB{+eEz*HI&iS{qjkFlTW*c zitjLMX#8aZEhqt}Gr`4+@+NWq{T=n}u^=R`KA!u4Gc_7?>q*&2Q4u{(&#y)^Wes)Y z00PJDcIKABsS_uBk63vRy`MVe+bx&hS7qflT>LPM*D*D<&u37qqGlU6o6allcjomK zMObmUVM=_SLDk)=t<9$*5u+2aX#*zN5A*!(8pSkirzkicK|G?0f25_Stv=o#DLU9$ zQ>}z58{d;s3|pdqy?Uwtc^sB8bwY_aX?06se(ubfpVYLlFX@CiLD^ zkRn}rM-Y(SLlY1J2@s0(5{eLt7<%v7T-W{F?|R>7?`Q9|*Z%hI=R?*U=S(syYci8L zj{oudpOYr=zgNIoe)YdCfCjYu8$c^>`x`*x4#chWTE^iHuC7*>LzALWn4>n#UK^$e z6+be}ay`<1>~YE>6L69W*+P^ji-wpEwZZcaQ>^ELtZPjl(s9d z5UHabAAJ1#{Cs?gsoly(MmEHEcXlZ_X!p0zhML_*npz1Nt7Wji+xeb&=Q(yW6m__P zyVQyAlny#OUdqjpySfg3bUlufx>!FtFjm?%>Xf?(4qRS6*bgntyI~o3(jM^Z#syvs zVd{;zFsFHo((>tT7!c0?WUF_rS|Ip(^VH+w=KSdBcJ9@Z=*zScjQ>=r>&?{-rvAQ; zg^Wx)?)dDORXaB?$HHv>@OengP;Pir(fJ1t83^8|@55_<1w!9AC58g zEa}zlAAdLPmgBRsu)B%m4L5nYE^-z(^2_qguOJNzqh1{DI`F{`l)f_jA#xXGR-66Kxc@&3kEabud)O+a< z9lZ{NzXW5&)(I^<7CPy%eN(`^S}Uen7sAHWy6-07-uB!vv%OjO8almU$#5fd%&94} zshMGy*A-`65x7zV?{RG7DfB@Erd{*{cEP@PV}v1ue)!aT9LdyReSKyXyxXq zTS#)V#Q?+HPCUh-RX*dXTGVA>PcYqN@%3vx3F3tDo2L%9%`kcOIabc?_|GS5{g;KS z*RKz5p8AC(e_xzQySx5fUVV|3b2nb^M2+XNF!}oRz|B+JkmS0Pk$)}a)n{3YcK+^& zm0lK3T)+Nv^VBOOxz+pdpXG(#&!rPJhs(kSv7_7fZO4z@j{fe46&=HS6_;3xcH_rR z)iy5+kFH-|FjU`epH?V&-9*~9AhqL-{eiCJaA_iKUywTS#{NQA7Tyjz7yM#ttC`X#;=5`iDWd1D#8wvW5- z+YY=rRlDsu-nbk*0(=<4`bWawhQtP?(#==iEWj?B*gaeNf6OlsT?)US4nF(&9j$Z- zLVXDS{y&uJk@B<<_k*jf@hs}UzVqA=@j&-2`8;s)F|4;$`MbYmOQTyQ@OrVkcgIHw za~b)UOY)9aXJFIy0ptPwDXvO37uBBJEpYrjS|~Ya-?5#Rzjq8itUfziSG2T)kK%_> zDK+fhU$W@L;?3ZpfG4f&H`3==@d)T5)cE=wa4sLfc~if&zZCH|p>R6axA$weGE_dG zvnx-!yKS4$MdQgv==l8-H3!z++n$t*!~tepNFtYMSFJDh@r&Dbo%j9oPAc$C^%Ep6 zB;lX08&6DjcPa7$-y=9Dx&Qt_(S7`~Hh2l;(OP@JoF$abH~Ri9pyrJV3iuB|4Ylz+ z+-%WIR%hp@?@`|_N5HML!rvS>jxqV&*y`72LU@Kat7cm1#8lvj*3a_()#tsf3M1aR z9Mn2IsHh5!xK3DJl6%~sm@Fs#ERT6|ByNrZZw?CO90^*DqR)KyC5k3f_e&IICfk>& zN0~)Yl#HV96)34iA1koP+n&c1A{M6>V@@H9Z_oD9Q3GPL?XFozlxn%!B zTrWhqv3GFK2Uxdr{|qSj&fOER(#G8!z~07P5n$cQogYxp!krwj(##zhz~0Oq7+~GR z?HW)3O&zXHA4chStLMu7W^E&1$IK6(Fn#~B- z;Ipmq!othbc$PeKmY^FQHQY^}W#D;Z3g(|0&hz%hI(vp`#|68&dpoR+galk1{U@i% z|C4`2C(u{28kYTkvXE?2q5oaa6~kjtmn)yJmn$nJ&-Z>&SwK%dF^b$QbmAtXQFhM- zLcI2|I-PkD=ZBk{qifBBf2XFjX~@4b_iD3T`A*!~R#y@&na*yAZ18nN(uQ3LE$wN? z^-vy5h|k3ywo~>tKOQr=+Swl;ua8D<*wIQw9R`ix524%GyZ(K+3tv1Mp=YwdZWeV~ z`<8ZEpG9GD)6dD|`gn(3yq3C0i?3MuN`hc#Gz1 zkIRjdkzVAp`J0x%;}sA!6!%ZOU;D5rQD5(R?y~X(qYnmQQld% ze8}-6YnI$i+TkJhHn-gOY$KpH_G~E_$BOK{hSORGpI^#%cz)V$TFTAN$&vFLw?eE1 zx%D0gx$TeB`no=RE*{Vud%ir*BbT*wb3F?$JXBNTwo@xv%8>~=Sv?wOy;&aLC`o&s zdzcj+lHPfFlw>)G%^I(FlNQinU7V&~?DX|M_;qx4aCP=C7TY2mS-Swl1@Cs%+wIOE zpx@PB9c~En{aqhYgNE~n10?w{HF>vIrPy<$ac;G?ytPl3e(lZ8GJe6HNBvE-B^$V= zkF3}8SvSFImY^AVUw8MDp^f5|8 ztq^zqQ-yfwI=tvt@E;W-c>7-!;_82@5OZ<=REWm4|8W^&sU-euU1gikLRh#n;OgjT zYi0j@_M`LQ-0v<{RqPU3(#>_vZ-;Zc>WuwfI^^DGp}6IrFSp}eT~6)K-m4%~idEKC zQdOo@U@9jnb5I-z2SJA{oGdk6-*FDPmlff=y?S$u3e{)q3^?zpg0)sNZk-sO%$d*L zKB~tK^eyCgBq27Zp|ps+SYXAn`pldx zTHLmHb%s0ru70id@1Gws_5?;$i}!?Z?sO|$(W+iGY1I1v4>M8>BQg+48^1B)6c&sX z-!;3}j7tk&GFj|6D);eDW^Fk%-Pv3_{F@AtmGX11y-x4Mkaixu4;44s(A!BkY@>dk z8^3fa8GPz;0zZw5ANC;{GW6((|GC&7dbM_b-VeXeO`A6D#vXW1yQ~&Sa3jOgv+%Jk z8ZB?Ud$t#-7V>`WMCE$O(x?Kbb8|7YUoUhw{#4eR37Sc8)_)wL1tW3;;*i2qI79W5 zW8AGTWBdYloVW~>cX1CmuP>{ccYh?J{qNwv55j++3IBh( z3IdZ?{yY7<-l;F#Rb$PH#qHM@o|)=Azov$3+`e?oaGk+^d;9xsc~^V>^%k8Y+l}b& z*?*R9E6o2_mx-^IZ{_~T8>g#HJy)5|>&yBGwT<{V?cq4>PHDf3t^Umt`78LXbNjia z%!W8EPB7$jll$hv*TSjvd=q}jaPKrPJLK?e3Da!ljbpPFCI2poc~u6bhxeW{~|$a_WTzTG(b2vSN{9y zmP==c&r#c9Vf`Vha6BUZxqN$b;91tv)$vi++W8@_{V{d$|7?MN`)><0{e9Vs&0VXn z#SU2|XCoy`GI9aEUxRR^Y-AU=_UG#$HyXed`e3B$eccS}^5V{@L^u05J3677d3)o- z+h)+s+J|>gISHRr#|Cp~%e0x^rdZs5OWX8Z)k!N6KR09NF6wyzQJd9z^s$8!A@+Fy zYn9a%QBe^~IoX!9T#f9?cm2Qhp>d=g?eknx+IF8qaZ=o^?D?@Sx{d!v!!HUqcnh=K z%ATKa7R>aw+v3!cbSz~gNtZriTw{&*iguSOe+csZK(|xg$dNt|45F0*b;F+uvGjcM zi=v(ed*2U%g_3;?jc+||M=hQjhNUbM6iV*ix0AX`^pO+Zz3K~WYTo07caTw3V~kdO z_9Yg6RP!x$dIZ~EY0O-S65kx>?(d~8uGJUT^{GQw>I*w?Qx|L3O%R+{^z)%T-mCEu z^vqeUeAsrp)n(|+t?(F$RmAv{4D`!=l_di19KZ4}%#giR$0}umo z0U#mFC>k-RDak}pm#LPN>5-IU58G=qbEK!Kn!m|yLbWjt#kDjHM<-7^uV1uJoaUjh zW`lm$EjJw%9>o*ar+=iTo#sNSshD%|GYVr(h@Z{%yTGrflY1@sPvYPkYJYzd1#(G9kpHDEL_z8X*V^`G7p65M6}5= zS7BWu;B6h9T1!QY{3mIJ??DR6wJFLwGZ=D9dU(KsMPED4u)^n@nV)8 zrsc96focJ*I^0~cb!8#+s zws_gKal9)n-f0@(4{6BS`%4b8`H%#ae}!%Ar84ucPM6AP%2 zs-8N%sq-%}YYdM!gJK8LQxC@OpM7ksu;C$Jma5eM8Hf=ttu9u5%$&C@UgWj?-Lc#t zphD8oBV{SsfM0Ak4sn)&3z3T-vnG zG!9+Ue4KwHA6!S&M)i66Z{L9QB#t$PSS`4N+{`Dyp%Ge+HvbP)-$?$^3TewPAi31# zqpS3YUtS66yUE>chEqFGu0Mh7=1%OlD8Jjq>}Y(yZRYh^p19ve$x7uOfn21EaHPCy zo-6YUpc+gcHhg@y{ufE_#%@Gt~#)eh!ePfwfm2k`V^uT&KUApM!!IV({$P4=b{jMXze~m&(a5LZ0>+E0ZnA(X@|2pfx#ezx^M+1j>NM!xL@mLKGc`&`na!2C3W-`6ss29Qf4 zAY)Sp4Ri;pi?RUC6sa9P?)s8uP7-aZ&s43lkOB?DQ7xMf2sNtkJJ4R2P~8| zF5C>AnUz7B6n#$LBhrBQC=#)`l$8T~;s?v*%K*qo0!vwTCg z&wRsAzHNV~lsX-0<*qd3`(~k?#HNIcN)@(uLliMrRdtUb3LUQ!TX91S`kJ54fBYvs+|y3s+6of0 zoP&Tc=!2Nv;EDI7Pm?yySob-Fpy}$QdHt@9wOa(i>mr>V=G;31Us;g+(}J_df~`u0 z?Wr}~UsbUDRMvJF;5Q~N^1Lua6MWewi8I1>hsVnj}H zcv1SNow+mcZvF0*8yF;DGc+i)zwrZD?ITy$K0p;q&wN1ma|+($R`$hs!k58ANL8wG z)$NJ7p@T;{>O)MzbM4D0eX}ZvW+FMIuqm>#@3FE;st{K1WgX#^Gou5@S97bVs*)Jv zV4jwVd{JRy_?AbIADhp_VK?v4YCqrBu{(QR$#U9fGf+X+HaAIvX+?M@JIu!3`7D$J z(u?t{(LVM&bGx;C!H3_p&N(vHM=1yuEDH|(0@xsgkY1%e5uNG^TsE2u0D_`Ll2iSQ zxh&8CQfOaH5&4u@_&)sc#FW>Zn`y+eae!);OLD{&k1ja~IdMn7n%j=Y`RCpCM zux6F>d26k?BmXkN^r_TUbHZF3-=XVgKeMrpi0) znN;z{c1-lcguxbi@^Q7Y1ots6XPs`ESB}xOJ&-;v@gE6{NuFopL(jCu8973jmEO)+ z-$?CP&LE#0nmIAkXabzhU~bP>^;F5yGt3OJD&axme4fDeir7&>0bR8`Lqs|Q##D_j z)Jq$*r{dh-JYzmpLk|Ye7ZtP!UOyi85NBYSN_HD_kEQFP-%&QXS`g*sF}ywPfS(as zxsR=4gv~*JZ4`IOb(fOE-J+d|Q{0pa`CdA~LDk+6y4htW*9JJk@ue0rXO9Z5u5C#t z_9VQrwz2V1iQ1x+i&?xu#WbM=qT$yk#GpDH`^Yfn%>7K69wkJCbXn~_6a;YcN|ib& zJ5ux3X8C~mGN%1RD*6apPTW9kX@5K+C3#Nv!`sPR;^JHZtm38$FY}I=*2pyy+lL(B z&87O`(Z7LB@r(4(&|mJpT~=O4qodn!kxfAC1F0PXp^4vZZj@pQ&Me4u0WTI2p1@fx zwWUnkBL-X4T>X_N18wJBr5E;$`zd~ zAshZO;H|vU>@eaXuDMP(0oEH08MoJ|6?#827!@UmMZ0}_W zpTJpXr68$jKwK#(hFm5)mlOXP=ob?cc&6@?39ClfzMYCIycfoWCrlFy8mg;mKnyV~ zllb`GJRV5<=$1KUOA)caQfbukb(y5((qb{zPjHY^!Ob$2R=5+xMZ3@k@Mo)mZuLJ) zbuybF{P~cm1z;BPApK@j$?>(bIKdA=ubd{1RF=fpn5dzf(GPDk+hum2E^Qs=j*Yjc zPKlq2c2^H^A%$IP=a-v1F%tKb3(;_eK&ld=)oPB1HQRM4&m{&L_;*CHpZNTn%zAH# z6(3jO6dCC5T*EfD32fqvA<~R&`2%aHp~NSp@kV9a;ssx5TTp z&t5K*&*op3*&ZRiStC@w*1FMvRGsLpT8OhI{hDm3Y)bxQlcnSvQrsv}3h9!F*9%PNk!?P$ z`k3$BveSPeRlkYxpZy`E<#2qM(H*&7cp*3<5N(jUpzNnm7S_9U+s=G8ZWI5H4re}2r5Z{Zca*hmwt9d80@kS=$JBp0IQ zL!gIgS9atW+-!KWIHD!hd@1&w+UsWyam!nDFxAP2B!i%{`KJ}Sl z8;M$e4r+us>FU!lsL6Bt457hanT|k#V2QA0KA6n388|78VPB~RVGXYOF_#*hApnio zbrzXzBIwQNQO;H%Iqc9vRZS`;O4@b&*Ac=Fb=OJC1xa51J8mfyO#_ zf7IHOnxf-5?db^)etBYp$?MHF?VX#=ix13jTZD|Nd!kcp#^08S$BFYr-7g2>n_KI5 z+Z_BD!7(OhUt`(q={$v>Iq>Ofx|wq$MQHB9GY=!&l3vY`2FP%I;vq|1H-Rq5?aXQW z5{{2-TaNNoz%vdiQsK%`<80LkS5MD*h~Gv_(3ceCZgaRa2w#rsp77O!Am%a8K+RO!Ox{Nf z)8yW5{1hy?lGf*l&w)qO-a%4CmAVMYc%iSJJyJvG|O(?rqy3(Nr;;7Y(Vl-HT zi;%wF+Zr>SDBJywezro5iCLrAWT`hZlDDQz07; z5@EG828z&Pv55OUwO?D--nJf#oLXen&3H)oRO+dn4l=PD){kt|I!BS9qDKsFqwgt_ z42G_`4ily8Fv-tuv2ZdkigOUBS;>Nlv@h}CXk5WY%Qu$8@MbIRxX}xZU_{v3<;Hhoz)GY^95WW8KWo;XCsyX0ec??r=(_@cp0_DJ-}+b z_xLAzajSc#rS^yXS|tPC6UKL7+oEVQq|fY<8(=^uW|TommzE4<4TCd~rRkDQEkNdf zLt5nP?b>=QBPf{)>G5Q&^C%{7ewS@#XW048I~2sU2vG<&!R^yJ(AK0*7`cIg5gWC) zc{X%#KNQP9v4=mV&SRz1Lvo;@c|m&V_MovwJ{@_CvqPmi9NIvKvzYwfueMy>?(*mmC0Oq@bRjOr#lg`%9)? z!K(qO6+D06L|$*SkMQ+vfM0%wXI@T%ekJs=m?vS^jW;r^4Z&~ij(pM^Ut}$3I(*|V zl&Z3B=BNxvHG8ZoKdgMiaBpbMl0>4x3T{9yOr+hwD@~T$2w*T5p1NGctW;R&nKq_n zAoS`BjS9EZT$UH-SLox8jy(LW&VF?Cc8M?>!M=|*AJ*q8V~+vi30^1!KYcA5TwoVm zrIWs;4Tma(54p-X(fJ6?@dSg5oYjvOS)&<3#gsE*z`IEq^{SFxA*Sv9(jp5&iJ)8)5LQCD_Pw2U*2sMK1`l zs@M1D9#YYnPESXP6-;H^2NMpDHPA!l_2$J3^un0!eb6tFnzB6;iH4=LjmEKW)X7tA`|?A@mzF}w3I`3 z^3c~@@D5>tsxDuMBzpxvr8M5xR)g!A&(@Pcj)na>^IrC)!;c%~N7inN8-u@AIAoIF zjbp4^1EJ2emRXWpO0vj|3Wgj%=dlCY;w!Z0bI!tBzA} zd{FCIK5GQyGJqfix*HMX*Dt4+>tzq*z-Tt59B3-0@fR6_f48m449v3+17qI!yy4ZP zG;6fU%p*#!S*16vS(YuF&$+T>5V9{QcuR90lb*v|1JH13zJv?K`4;J_N%2yiUPNXg zJmK-goigQN4=U`!U@aL|<%Lx)?pDL!d;N1kJkxK;#FLkoR++$kg50r-!(Sn!tzT!r zoCL>VO7?2X=q1lz z!lYO_tgCE!o=Cn_8|FthOoql>%9%AQ=9rsV%PPW5j^wxsXka2O^46KRGrd92M>BZx zP>Df0BqNC+S=Z3Q5hdM&hLLBts)b>RIa9aYSVcs(0t1+bl7re-q$rLabcP2$wxF6exFVs zOw+|Hma=gpq_LO5t69uExerAuYz)-zc%zSC8CnZ^zdY>RfUPm8y!^jzn%~qwcr)mA zu<;&Xwt9FpRwi^K21dCGw9_6C7zHfGPSDTBjQE4tC_*HXzq*H z7Q&=TR-sWtMoGka%?olhCe`WTcXro%ba*0*`J(po16BkpQ)(hHc3*P9qj)@*Zi;Z) zQYmZS>E+-iHtU&z{J&f)$Tw!J9pp*KS~_h!)I2rVrc4qLc6PGsu#aF77`Lq?uahGF zL*Wm}LbfmN@li`Z5x0BM#F>qmzj)0ZBk0}CG?jT6o}fj4Emz*#q?l6cZ&*PAu4v`u z3sXCFE=NmXp2$fm)qdj9NxA@+PZD-@1Hch^a}V>zBLPxYN6nE_!oXa_-CeoqE(vrk7b8&!bk?=ZBS0w$#O?}< zg*EixpS=N|u+bA@o~A5hn}p1IojdHnzE&DKdz;RFSZRSPe%#`_5ExKgHTji8d9W%LNC@-UJ1$BNT71Om?i}sKJ>7kg!%L*Bp#m12{@e;cr-Q_c= zVODi)TCJzel0RuRGeT6mLPggyhTJ)}HQ%lB3k%;Ag27JhOm6EYozwY!oDd~rJDuI} zx^{F#OV^K7MPf>C<>{EV&z7drT8tGpx%4;R`E^pS$d&krX$@yE>E-^PWbK}c#(q+T zUj=nM2xu6b4 zZJXoeVLBcosXGq4oWuLLH$FE~K`Jp-q=sz{JZX-=&lcns6H%-+acnYJUX!etie|w6 z7J@ggEFZuXhJ7UtAV{_m{AfP&Z0&IBw=7N{!ckeggW6FW^9pl`IXoV(jZ9qQ>D<&D zxkgkL47o&4E!SAUUrf60#IY%$POcw%?Cd)8jKGxhZs*HAruxfBlX?xsm+qnAS0<7A z(+ZD&c!=pX&vO~Fy`Rz0GC$tUwY45hWAF{MEV}Tt!#%2)?2*YO65vKJqV?V&ds16k z)wjWx@!gs8di-5h7FvlxioRNtKqSky%xOo8tblC`uyHDE-{x4WLWHq^H2p=|!0$XS z8FCJZIT84?lDd3VF*&$#mGBo;ou0DVx1gGo%4W_Z{I_2V2i0GY90-Gx1XV<6g~HPg zgbSSzZzSLOX(g9=Z({Y}@9DE0`jXvfw>jUA@{yC8-Xkqf+MFk67p$e z<6J>ru7E>(;2S_Whph>ChYgCK2Freeq@(1)Dy!Pd%->_|@KBLtj<}0xNo1ss{MZuT zx`x^S1SOki9e?6^`h*!3qbPfRkCfPHdo5o<~-%KKv+;#?Y_-f{kMeT-_Wc9#rn5X4uYd(B&{uU*Q z8nq8TL$!qeu$??U;jbjqZ=MVok+srm;}D&-&J}95J9+>Z{xtSn19B5bJZt=e1g-th zS6uw+mBmP%44@`F_IdH(_-DkTFmOnP>Kj+D<%w1)3DlN=Fn3R@L~&AN8jYI z2#w8Ib_dU;t@+I}GdU747dDmZ4N}s5@smm#F2vgY0Q(C;5*9LKo$JInj_Q~TgB!$U zIOy{PHBw*xr8O~1TCr`$gG?t)RTOfh8CdcUOfIb z%#;q0&&>8690DnA%z5YXse)4CXX}I=)GtlZb-JsD9O-Eb23!sXWTY6N?i*$ z=HDMl4?F-7C=dRjAWHa{#n~mM+a!y4@dXU}kW%_RdnWYl(LuxBTpc~Q-uVy|j-P0r zALR;=@=I@oyb?#gB8tJ*iaO=o5geS3Gr-In(i_zjt8M`D36=&KRCP4~9_4d*^(G$1 z52I5}lGWbIL>qd4bzV1g<2aTm7q1iE;Mj_^LVY1I%E@3CnRpI#n4iD?7O_{~Yg2cW zt({H`%;m(}lGLfsf1;x!a#r}xUhdo)&(_5Jy=e3uizyUVE6>0=V{E!5KG%R%Xt=Yd z)gW?kJC}KFfs&=LvA5#kpN#hPsPcu{DviV3P4CnPI(FRoBgQ(|U6UPo)UZ9Y7!d`v zGeH@K0otnUlUz%|&+e~<9tQ`b_@^FGCmfkN(K;Ue(_j|2M?6<8kzaoJOi@C*wkjBxtO~&nW_m|dxz?oPDvDRiu+WgIdI>=;hx>y|l)yboy+*7TFa9eJaa)bDq2KQy z1GYZTmnqiQS+i^?Biiw2AO&qrnX%`CF$ktyA?r!KfC?O{;VJrfIKtQ&8~s3y!<+ug z*&sK@+pa8b;m7F8eEJl1Joe>ltG>Y(+oUHBCGS(ko{BvDl5Haw079qpQI^$HhiSc` z#8`U|)(AT{@Hx3ij%2)Y`8t-$9Q2TQ?7%Y%k|k}ONrtOLJ^yBMlY3rr+E6lNo#c6A4uXyt{{Hh}c9tU|_+3-o5V6PqBz=W%WR7J>u&I3f4xKO8RujWe)IJF7H?_zz;oj&#AdZ#9$}}OtjU5yG zSgxUh2`|T9p+PvTH_5J{z3V7qsnPo#OHTS--6upZj4ZQ_LsY$4UW?(gaCkN;&&&I8 zHpM1Kq2=e9Ore)g#w$tITctnxwl(QsU5Cvho48j znk|X?q;3Ug8}|DVemsw_9VES*`Ilfy%V-k47)VS0w4|XS^CdA$h#=SJ?V5o4H~_$k z1UxlHid+dS>kdDqBiBapZ5Id4J;QA*YRy__o*TnjC#gt+BJy}eaTbEan z)&Xo|Mx2c-#RVij1Qo8nNaDU@Ol@DG{)z$6m0TxasZ^CetNA2=@6(_giEKLD!$L_E zRcsSYIqLy#dGs`{<~{4z8WJ}Gc1JTA+x>$inyL?$We*oYOoJv)`!&`-_uq{2>-U=U zC|y5vmagx8%?H=A&$E#4=Ov%j`5e4)>AghX<-@Q4v~3JrP6 zp06f&6%^m4$^Z7GVC)5{6j_eAWQ8?da!SWqLVX1(RAKh1R#)`5{sMwBN|ZnqejR=9 z3LuH)&ihXM>u+m@2H7zTMl2Yw6Y>E?iAEemLsb7mf#ZZq-Am$AkD7e`a!Wr3!Nam* zQV>ZV7l-v-Q_H`Kej9e^g|#Puzdgdv6~;}@D6VB9%xWvNrYqj6+Evavl)q}+F#$4| zrM9pcR!#bP0t;Fx@@f*KD_Z=4fs1sm1%_#O_mCdF5=EWAOdTVv6AiQX0i*1&iie(k zvpP%Z3uOvFH~?SO)m1;@(`9B3@^Ix&XCp1C_y5+pAv(1wLL8IP zxh~`-O38lj!=HzMc#<(rd#bD(5^dgUbjgNeYH;#YI;U|f@`^YiBfG(Ecm(V$d zQFb+FwS*Xl-~#pG7|{CBT*?iPBG$-(Z6a9Dx5C~pClXfqWHxy&Hy>b2E_oF^*^q1z z8edAmJ&!L=u1DaZWx6q#QpdF{1j%^Ub{HjWUsncR#k4udqG3~z-z3p86SfZ?*AG@H zKb&?K1lVf*%Kx@Y&@%Bf56%#w_2uP+x?pBxgioLCM=OotS3%5f5##E{isl*AYx@iz zl-uLWqLlg)_q{l0@*8FbdX;5s9~ix(*$$kz^bpxweN6(zyki@#b#LG|q;`^3sbDtQ z{Msv?pH=C!Z7Lh-Y-O*y{UY&|mSF-5Pu4wC2lYxzaN=~^0jum4mYS1V!yc?|e2=T~ zE=}F`_{5sAm4snggWRJ)74|Iun#jE{dZ;4Z-8)m~FTN5Fy;?G!F%YBuasj28qo@Xku zEYI=2JRN)7Z|FyLL~cLn_Td5D6JyOy;#ZV}x+hB@do4my4PP<4@1bSCIA3v_r{nax+o z?J z`8yY5!VYyqL#5j6AU%c(GXr%zVzWY5XO6*?wH^pQm0ABJP!`KSA?inILJPF#@$agu z`!l{m&g*v%vF=_#J_{trZ>}%&cNvdNPsm|PHgq?F=k`Iy?Rf_8SMKv=+L}c12b+t3 z9C)PvI`8X{W(7XaH{H#KmuxJu)x`V{#VbWBewdOw`p$GzxB{NNMvJ+{Xf_yl-gB@j z7t(DuqW$r9osM9vS-1eP%HX3RqGMj5Rf{jjL1tGeMLLE&HN8dhRtj z9nMOdmi6FmjFZYEin~)4lzKr!rkVUoI@w5SaWWozRr=IgtJnydV3!5_XcZCs+H|{M z#YOL*Ws#9AC1QnKQm>`Vd1+K9Hpl$wTt5#?{Nj&(PEyYzl5Rbsec6_>ZjklDbcTf!mNC6cBQb&~*Ge@bsUsbQ~+*3Yy5mZ6J&+8^`wFCLs*h@5(QeuGkm=Ukk z9%hK}giyTo{|oYpKrgl-&W5U1(2&yj(T}`D70Ij*G=iZ94PVH0UEEJJf0WT`v8gyO zIS$LsxsjM^9ypS(&X?yNM|`o_NUtJGH_zr0ouZo)HWTdvt&Yagzy~_MsS}VZQ}4|^ z+xXT?X(|&0N&Pt#Ay`72$edyH+B5oLfWDWNY%R zXzG5)i0jMD1~#~BN{#B3UQ$-CXsO8cSb?U^Xqf0=grqbwp-*+xjL!1P)H~F{t5DF; z)P#_P!@A5H;r2G=v8N~s0juQ5Z0&A+nnSPhiUKR1l(aa+Bi&PUz-IsfZeJz}gMp8a zXUD0-aoN%|ou)fBNrKewv#qg>p8}%z^^}CID?vCnb&c@yoOqynxfh}5lS^~$W ztCe(lzIi0H=96wItE8+IN9|8POsF&$m2z*YW67dY4jZ~pRY;dmbsf>W9&Pyc#hy2R z&pY{!)KK;Zy6ugT1@^6L3cU3lG!xc4P<>~HAI6i;rI0_6z=uz>s#OkkJKsbrbKqX0 zmLroT=w|+WDhxl@3>Zlt&*Rw>LrR2Eu>eQ2M_`pNckc>eQyo4?wmO2(9&zs5sU|{- z*B?HgEyFt2IwV*g3)-CZEa!_mm2~HsG9cC*Uc;URWF~y<79kqxAyemV!Z&enpxd=j zZ8sGu((V-lxL&PthF54p+iof_h2ah>@o$Ogz*{LqUzmWWN&&RxUSwLG z&P|uW-|myBP>{j!^s{)1F{B6}G*J13WjsnA4qip<4EEI0k3Odp{iF{h z0@=;2^56SPM4@ZkQKh)_l!N1>{sWIDextzN$LSA+wVX2c{da2biE&RpevPQGdx4LQ zdt!X|i%LI(W(6@F8C$l-e1aW=CyIJYJ>S-~_u^eHtEJUigX$Z>@u&93L{hfPnBT2j9IK-K=fIw^dl6+}vMNXSJw3ZX#w)3b_k%(L-s z#|OPH&CD8g^rPcOtin_TUgLRxXa%QiD+TQsd|R^epswtTZH_#fGF`ckCjX{8k!(Is z_2dPyg$B!83uPs|n$2n75$o14gHQE8AXzpEWrozr@4BI7xvADw!4YbZw3M3no6zdl z#7i(yV|>=coC`j{>3#+a4ILa+4o0ihv*jbrY*GP}hewFX{$%rQWW1xfcX!KGO<>jPSKV@TV z8IERHN(EIJkfa|;mF|{=NJSD{^R+@#zL^cOemQfw8qD(f!#plPjBXhJIy8lM^-@#z zM^W*qMIlQ7Na^1Q7`_CdNS-tSyHoQ8iHGt-7$kxEaB8#r_vH zzq4((OFGN3-Tp>~r8JLI7O{fCnpR=tw!)730mk|W7=d$`POwgAKgpEWcCYPDccfQP z1FH}zQFZ1S%A--f+14P(;7+3@1IbpATa7GiPl&b7cH*NDp%UogCrg%As%f<>(Gf72ITFQjXamhhuH`rM&@G&2bqym zt9Iy`z9tzo;o`HF6-+r^Ww`oKw2_?(emxXYH13+Y2n`|8jY(5`y+vNCSYI?MJ4K*0 zCp?24f#@LqG{^|?7b0wGGYVF$m`rjGZTOFNNf#9D`w}ezvu69oep2Ec;qT3Hx@ z#-02bWbd>~1X{)9VZK2XH4(n&z6k)wX0_rFNcI=GQ$0p~MpI6m0~Q04Cr2a4hh~hoQkb6%_saWxBP&ig8+^eN!z{A}L2=dC2SZl#{;iQVD&7QilBYU zB(e<^q*zcSKlTQv3%H-?uJ z6NORMSiKx?dctMSjo@6+<%$S<(7n_ndF0AP$L6Tc87|NmqFpgeU8-R*n4rv}nkr7b zGmi-Ge%A;|NK8)dZ$)?%@cC|zWy#yx342!z{#^ZhK70(Im)d!|{(Qgixc&M0^zn38 z;Op`c&+SmtxmZ%j;IVNxing=@%BbIH>aCvwHITJ?8dRUEJQ!=@9#1D=4duR==? za`oW{No@3SLPo-2UD`#Hjo^yJ`Ppr(GqxA+3aAi;>*Ow_ZG_!n2(}QXqRw91qBmP7 z3zf$92men?7u}Q&bcxKyuCbzQUhFa`KKTAS{fzbWw{#_sUN+le485O?^Y$0KCqtXr z7!?IR)p^;c|E$dM4{D1%%5&YI2tPOr?I2GvQ1wjnH>BOSlOmJW0$#nOy1_DYD*o90 zfy!knx^nx6HIQFEexl1wn;ecQJA-~+(Ga!5+TrnZxVB^rf}8bdDAbQ0$w0(c!P2_U z_^Sx1+*AtBn?i?cuiyCvT|St39uIK2dBWHIZ+1_;E?1f5xAlEJT%FuqKOUZ*$Cdsb zj}gZqq2~7b`Cv_)vQ%gLJr$g*kW4?c9vd3=)Zv{$;X%n57|I268xW;$3Le^zKK{iW zy+lRrdAfMpKVu&5e(X5y2`GqwU%Fi#E!`3F@p`zJz7CUKk#V|5{TRgthosf&=Muf0 z9dtHw_wsn=ns)K}I6Xal6&4nFT3+w}N3_>J4%Wam_-WAfG|c?reWR;8@Y#{wYv4il zvij+Kbv`~kpWL4N{`UJz`=6)X<=YUnol^3+AV~So^ZiYY(lodsGy{2NXZ?rEQsL@T zdbo0%&jI4Dg)d?k7jMyjfx7-~=!BgG6N?&Ov_4Q_g)817J(G^E2Y-LrW`ui-)XJ$>6?5xDR_tN+~2`5f&8ck1|P#G`Enq$|fw zejt5-AF7MrAJIpv&w82gn;z`V&ruFs9<3rVOs#}{jtWK*JJNQbMSo3RV!($&*&>B* zxYyb}{_kM(*x_FLUxCBpi+k;V0S=ar;qLz>*glT9JN#E*{aE7e@Lz%bL zsqPN{71%wlxI6ssz(gqc-M-Uwok_c8GWC4GtkZR!Nxx+>?HtNHA`}IKkc}(UR(c)1 zk<3DTF1!du>9oOI54_Ayd!LNbF}jA)>PHyRyiQmzwKtrPsJ1Si&N@#9t#dV4nCl1V+wFn4>Qa%6&~0)sA2kiXi3 zS_=qcN3|z|>ZWX)RT~u&>?T+SEOBIp#wu>8cVS#vn!z+#qe1hLURB37m)Mb!%%Qhs z-}+a4z))Wf4*-UpDGudVrUw7m|5h_B6zK9tTk;sb=qq_|Kq}G{Z5^pEKoxo1?-ih5+j_$gG4+I6`m@t|UTT52 z?D4kEDoqZ~1hm8yi!KO~G%MG!lAp}qC$0U!TLi%(sY@yoVAg{0=IGY!T&AS7miPxx zeyIDH&o!<6M#W!STXg5pvu%!3wpylnlins67g$)JJyU#no0fhv^c z`NT8%+b|m5nQHn}j=)88jrGeb+nBVtwqqLEl8p!;U;Lt--Gm(2_Ai8Q23=2Gl`By} z@jyEyP-iPR~KIV~$WKu9&Iz zNDPSD@?4s<9Jf2Kl1dSqf+q(z#Buf0M+AcqFnnm^kGYuFc6Hpzq6=eFAV}(I#TD$Jcr7PU^4uYjBKK?27A@Njeo?jFs@rw|>?iYls|sP!`28us?)eX)-M! zGQ%Wt?RCA1Ig#<*Oc%vR>9>Vnlz}$5CUGqTm7kwM#+UPm$W?LS$=clBSgJK;}&}8`&epe~9I}Z>ZGoy6~m(^nA#EEE+=SNo+lt>-a(bULMG@7Y=*1g!mMrfnB zIdfCoQtpu~f`4wYL1Ir!8X4_k{}Z&$#aJE-@I2Hx+Tj}*0OAatmz>L_`$0>IxrFoYhyN;paz^C0bm*9=3lX}%jb4D^c!A> z5c%x(`Z+)Do*m7O;?DMFHFiBnWq-C>VSP5+ee8IS{1bj@Q+s>#`_UhHMZ@ma;j!K1 z>!yhDDSn5V&;QJiQ)bAU_wM zEd;)1#|u8}oZf=Dr4&bK9><=yNLJSZBE#)&gl}p zx2w$V+=Uvdf3crNVk}!2JT&7oQ8DHj zn+#xQ=|>tv`H!5+*eS`MV=PvnttFdtnlLB-wcGf-|D%>s20;eg8t@7qbj=|zw z>&M{`vkN^|g@x2-g>7n)Vbxnz%@<30VJmY~BUfeI^3rk}{WT_r*?!ZN#e^F0v`Y16 z=hb19*HFUcPD`WLw)5f^tN)IO1=j(I=f1(HeT^Noj1e|e71`MFM=#NRp;(i)iI2f| zIS|Ed1bPo>l!1Qtv&w`*n6V(L!_kOK9@R>n$PxCB(XYiXE{fXRFj&FbCXh)b+9{Ci z)#X(c0HG-_Zd)bGcX}Xt&Wf%Rq#V}{wQ(o$ubw#*xY?PPFt|?Wnc&t!A)4=|aHVFu zEVrDkGze?KHmPnN)p;A9yk`g(XUS~I8mnrO@IJ5gQ}FCsTHIU&ud4Cf@a4<)Bv_bb zKCEDUHJwEu=JEGUA+H*_=rUXujm7+;X1#w$keU6h6UIl6+y4FbR;bM~m;smef3B?EHz*1xsB86o#FFuq$7&=#t4GhL<;yE-idA)SL7q$h&k(2j`YQ!%7# zVI&zb;H{ml%4d_Tf-%-#F+it3;GelXOjLqDxR3dOTG1xQ3oNtEX9-8ar|~+TYrjBe z4?%@0UF*#YAp4X*Kan^2`*#5jDXLXCE&6Hh{W|k9$=SJ?2#I>zT#f{?jQ16sv7|*- ziVMF<2iQeK#CYgb#PyuXt|exMu~sa}xrI>JjP=DPL$7S6K3jQl+C=;KnL76?t!rn$ zlY@w~Elv)VjKv^+2j(;`D!RUGrqg(yhD7ltk2->KVYS9qs~KN0IP zPF{ji|Lgnnuk~y2-_hVkla9Z4cLzK6xVRnm9|{7w({H)21BNQ-qffKqM6J=ETM}$| zxE-SS46V&nTNJtK<96DhwwcLD%dce)!g}CULF;zksvoPF7rVb6X$*+F1pNJ&dSAZ0 zZ3*lDOwe6GeS`VWZoU0S9yf~nHUDB#K*;jb=u5LL^KyVF3C($L1dqMhr(P=ZhWBc@ z5_Z~^ISwO=xvzg!=6%RLd(9+t(&t!%s6C3cZQc;vQD15x?D~YwSU~$sKEao0C)K8i zH>nF$653U-_L<3lOc1v$LI-@ztNG_p%=Tt> zf{nall+2C1YB?EeF&jdTR#6{9jy5YVg=!l|snhHg{%RlX&n8rB!KDs(KT|!K1ncf7 z{y7BzvDv%|K+kMm1CV1juLJmHHgB-P4cSM0#0C~>JmNUjslmAr>D6-02F+@@=Ag)# z|EqkNEqDdK%!a2Xhw;ya0sJiIqqthSs;SM}poy*KlK^^F^J#z_D;4l!%WiKQMKeq- zbw(SspY?n>pxb)B8t`sCUk?zpnQsR8*|=btZv|a69~ppATaJvtOf5$yVEL9KGq8P2 z@jvNA5d|IBW{`o7YYWIh$F&s{pyS#Gir?G_mnCG!<2Z7$(Eer;LwMINp3DEgU^5*5 zLgfG6@t>hiE~d@CGYTw^ruua_3&I>b&z9?TINF!3{8{3ct!J)ur!W6!`+s!&F9_rM z_@6_1ZBC${wWyKO5|rea7*UZD66E9{?Azs{-3V~pY9REHkLgr^3Dg*AK7Y+?>z1$*&@Z@ia7FaBnjI8W?0}8xZ;VIMcW4 zW_Q27EFa&l?)36JzbH7raF1TfE6mKI^}Y^Y>c7A559fYdKkN^q;`VyqAMW41Ew3Lx zo9mB@(*YJ7_()fJbabQYw`P${#w75*oq@msB^o>~b z-*fHh22-v6-=^%pE%|>tvj0!`@eZ{AFB`Ba;ohP6TF1v)1p}h&ZUG)4L7wN=>#WvV zg`T&+y}r*E`@63m-iL1nju(^6B%{~oXLpFZAppMLfC0-X1^D=J?eY>dQ`YW&+z+rdAi%QV4mIX=IufMen07g{{kHpCd)(9IVfX4>*x%ptsQ{$hy;dN<1m!Q}6DU-zKivennU^`Fdz++?X*EdpbB7-h(|EoW*t~^HQ z!3P2S&kxJT#r=M2lx6A>3j_-Tp^!V{}W#i&40?uB37W4kGFudgF@64dk0H~2K!fQ?^h#cI3_x4{5yI&y; z-(N4e(>X%BL$Aqnw7RW)X9hTazDUs48H2v|sGl_qH{FgT}kPxOQmoYt| zfPtv3{|x?>@fpkbk%sr_ldz}$u!A$6_w13=A}8k;OPcBL`0{6Txi2>CtBKY~wADvh zC3{o7{}zbgQpRFe;MZR}`d2R1pBJ=#C*V>Zrmw%+ibk)jh<(mJ9pagL_`SYRTvAX4 z>f?UO@#<@T?)etIfBq*x_1XPH^y_ap^PwBai)_rlXw|o5CA!wH6s^FSUoK+~V4s;{ z!M`^@UCd_LPky>Q+;kUHm0Yx6u??@WD?es;$=8Ide&|GapZ* zJujDkaUI^IkU#x-K5XPzHt!8IKOGup6aUG_@Yv&gp8x2-xahtUHyze0t zmZRY0+t3Z6vhkkoA)NXvzS-|Ao;$1e{LrEGdDGzoHLG{;@Sk?;)a-A=H?0?NY{Vwt zgs{HcK-YDZGgEtqR{4C%+$V=tpV5+g6IBe77?AqGJE2DMI@OzX6kYZ#poD&H(MB*; zQ-b|*)4FB2XGsY2CRlJ8Ovp7Mrj2WS2BI83sF1`H_1|WHwiJ)%J$RnqR2-%|O;cwG zmHQ-J4bzPwUZ!spXnb)>ifW%2ggXrmR_kHUzpK@AryFIlZ(_yM_&X?b*PN_hvoKaT z@N|cq*0KNgW}1zat3=@9TeYG^_&2R&46C9?B~pHSUG^cmv4*ExwWp@=yg#-}6<-dk zuyH+1KY#IwycTcr2|E}EZDm@j2)&z-*tCE{$0-Tl%J6G7-HXWa{qIvx!vue~>fY-( zI-P%{Nn@vDw*^@SI6B2CqcqIt=L=oRHMfn)rQn4RBd!>i_Dnv0{g3E*%ffc!5T0E# zWsC^HWu*XBWT9hxvyDPJV-jX&a?OJA!rTIB+lsvqX;^p^gtRX!Ot_?Ur~ypkzl&|; z*h2JYm|oLI&1hvRy7hlNYJvJ9=Js2-gWmK?jv~_uPR#?>$k}X5w%~B1(Q|@NyW(By zo_F<#-t9o9v2ul)){$h?Dfk-uCR~Lz6-di6NeZm2%SL0dTr=bNm)|qxXm)tm`$`{N|X~9+Pq#qg1msK!v zH3`jLq+`|>mOXi<{!E?8`(_RBDV|bp;2g6F*tb9YnaWUMIc7^gMzVI zkJ3)VXZE2am8$`3!WKujx{+|d+muJ%l9ilPDPILHEGC3?NmO53c39q#+D2nj*Oa4N zF&*NE5K|&X=8b-%N9?BaW|d#_GDi!CcW|zHBqG4Sh0MI;2fEP zB^KRPFtLf{tsCsb0j9pyLEld6EU!wYI-%kR3U|44s@>XM z3jR?OB88w&LLz!q{hrl~I7RJD(qp~WybLlbKHx=?ts&Twn3G|s z2RnzSUs*CgJA+;Tajl4_I*DjVc`_Okj*+7029G7J zQNZZ1&b4~Hjf(7fD|_wUbi#^BYj!!f;;e9@QW!zD>q)hFtqNtJr0(@Z0&-b-$3c*B zyICbOIXYf4e{PsoB{_|I5*qzDS(Lqo8WUF!?R&*JJ#7)640^F_EhGWE6#MmB^fzG) zP5rMfdbd^!D8uq~xPGRibym~V95g72hDzRCvf>~Z3vtOhAVU=xOlqKjB(G{qhb8g@ z@5<{87GjcydU@p>N!Tp|i_0!2fwoPLm6tn9PgYE6=R7F}I!J~y+Qx;M`t3VK>3Ral zyRUplGh{ki_r};bkIHQy@lCIE4vqN7V$-<<1k6c=KemoGFbjbi@Xk&Q)u%53KFPtc zs}R(4p?52%VeJJ_zlKu(JW<@xI4TpR{B^j_ciN1#^oWfVq+xK_hQ*1) zVm*pAlySEhkN{~JbiT)Q=ayrcG$d9{te-gq<7JV;2)WLeY`~s0kqlJCmMl4E@?%Z| zaOW#aM!zsYrJ!d#?+(1V_Won7SvJLjT97&UhFJ2q$}X5NN$H&wz@i#D0xgj+%m9B; zEE&?VO8#EIp@gpVm0EW!@~3@h>^GLmlTv_EI!_8KfjNu#Z)B#L-D>!u1>@NLn;Otq zp$1u6e|1kwq)*cZSui6xn)NzjX5@8^dred1*ghue5Lw9<_!(la1Nc{bE4*w&L$%kk z{G0UFP|>VN3{T}x01XaV%6fLWX(N!%O6T-$;UU3FUVE+9zCaXX8v+W8&Je3oE?F@p zFDtqG0JG$*R1-{myc`rRo`qpTFt{xi)Kg$lF&gAhYe*M$6yVWdzTZ2+d!ti@Y2y^j8G>7p-|A05606cA+z0Ns~% z0ItA2Q`W9`Wjz8z@37Q^I*0sW{iJ!}to)h7>EwHc_-c4eM5Sf4^2`aDv`x4wC#C6d z#xxRy?>`wsJ}+;)uU({Oip+U^aXB-=A#1Nz!jmF3+f2};+SgR6I`Mt^&T8}b(dN5s zl*|9VN;I`5*%wbZK6s-`xo7nBEBg_i;hbo8OKS11zC(P(Z+vVqK@5O4_;U^+(2fym zam@#A-8F$I#vpAcPmV^t*12jci@QWw$Tl6cxT4}E4B|+hdPfL9H{q}TG~(Q$JTYGf zbR5kTGNE7d@Lz54HKr3Y;-xJ6r#FWhr0t!^zuSU}vYQ$f@jxuo2I=3$YZ}k`Jpp zThfn(O|QM7rZ7T`E@N<7TmFsNsynBGBI1+XCQRt_qG(A~wucq40K@VnXy`Pno6n<*;axK^3qJbyT#i53bHe zVa}$ev+3G|TgJTABCQaijak1zoLJ@gZ(B`oAg-JYlJZE28;E_#@P+y9sx}uuROZi! zyJE*tzT5sIT> z-cI@po9ZelAg-hVgM*y`G1CP@u2vg?&v_(#2oYVbNXZt1y?5#e5Ep;K7j;ekBJsKb zODjYSv`beIm);)4Zv-06Pw6Kr=sF)W47<^SkMrSY$;H`;$YL`q<~M)0D&$acDT``K zLXX2QR?lND!zU_%K}zBc!!f~T?6ebDUGC&W7G7=Iyf+=?UQdf{W$D4`>TJ<9nR$+s z?mJ7>1p;9?X66$V%S_>7&ZqeKEhktnp=H(nN62GjRdfW3I5K_7AvtpKYd%cu(I`$ei!vz}qR2{0D!{!q1lzSW*Nd}f z+Kcm03Q=mPyrzK0 zZrp%3whZH-*?~%~ke7=3q6x@?_4mzrV%VB`3YD&OWb4%I8`3Vc1k#f^yj_r^bZ_}d z%*xI=fDocN7$CssNpHSAt~kk!agDT$)#`B6lGy{*f$kON>qITm{PM|Cb%VzOQGrH5 z7;FX-vfFGHz(1Y{k1CwwZJ1!oX-C5wSFPaPMv-*t^0N24C|ezUT+9(cQ?i|;C#erNHRD7UyI+cfFzKDaz((+k zU;%!e->nHI%Vq1SYj*(xti+|B^-K+GBq=)Vb8cdvy8*;jj%8p8izz^6?3vpv|KFnZg zSp?2C7yEKYZ#v-2ar;C9CbQyb2z-!$3nC;X5@cH1iBPyLx4h^$;zf_~5A8pL{bU+j z*JfBB43VN?Q4x4D>9?8AakgCuKsHB=CJi;_Oe)Xmbo5gv=R*PDd&f}W3%Z6RBKe_t za9FyIu|~yMVm*t0bT5sInI>QEOp1CN?!kdZ}mFR^nFz>y5OnY`i#P!LzLvuGTK_(Br&W(3p! zJ}HlrY4|h+FFG)uodSE!<419fdHu*q@;9-YW3YmgEQa$dL-&cD$BwQ{Iwr{bM%G8v z)lko!Wx&a5IHl+yQGxCKJ{Vzt-E@J{7^z|n6rwpC)1_Hf%MgaF z=AwX;UHS<0mq*R5PmA@kM9g6`-K)qK1>qK6(~1Nh6RYHxZrz`-mjhHG=Q$_jd(w#j z&;zIx`~t`-}~rT*}mNv}k>%v+7RQ&xTKMQ6)b+e%^j zigEc*mR&WDo*>zuY!Pg1DP0x7dAtP*8vu_9ldU(*Zpd<(q@?7`RHiIodQTON?5C2P z9vt<+0J|bXj04nNF_RqgFD1|!XTx7p%YSTh-+0gwvY0W54*RJ|NVys~;(eRRW8nuQ z>56|+#_*)f8!k3c+a|kfNveWgR-Kv9apC2$=rNcK8oh!GgCSk?5lLj?)WfKv=(^5~ zO^xA7Ei?W&)|3_C^?r-RhHP#l74$^=u@8-k2p}3br;M;fF+H z82KWzRV9!<*I-XGdh?;E}AIX$NOr7TIZ;T`GNAz%iRzlc~)mEOZ}n8*Sm zN{da7HD`bbzIDxT%vr=vgYNG0DcZI?1sh}gP+5`C zv0=UL7;}$1stA&M<7*9AYYvL(SWDJz%sw_c3cTNS7QxzNJUiY*9JQ7c8L}D9O~u1+ zNOBvD<3XuhB(r7188>c14XRKI>lK$K)mBb=n2Jcsn)4=uW90#)bH}+sGR1M-y zNhdS*GrFj5SjvINJXx?-5ZM^GPoO{67p5XHY$T<#gzp*NAQbd5L>+q%Q@}LQiazjB z3s!_e16@5r7=UV>7StPA&drg59)(86AvI8_qFTk1GlR!Ua1hQ802yH5U93WSRrYPX zndwr~71mcvj&ePeg4K*rT`01tv+T<11aHSykaIoZjtSa{uLb@BEZ0aqb`u>>u8itY z^q6cJcXTU>@;#TIJERmx%g_ZT4VfoLjPeJyV>yPhY;ifUItxlk4xS-^xfFL#Y;R*A zeU^Y6W}*@Sy3$qJ>J+c81d{qmW4LxEAyubMh2=Jf11_!m?o&e9328tZjZ_@&+Z8(p z4r@Ra6OVCN{|vs`vuQG|z^v^Q!8Txi2?uI)YS&qB|Gu79q}LYTb}}hQP_TDstjMg#Z9xnI4}T0{ey(Vlo>gSDRUj8Ko9iJL?KqU zAP24JD>0VZ@c=;*`wk1npT`lIl9pbyNsMnVnFHGTplFddj+p5ol1oM-6I< zU)PayC8hFDwwsJONTGV@FX@V;8oRcB!IFT<2^2pA0)Z%yb#a*xn^Nz#oRP_VIaE|M z^!TChxI0|RIXJ4inh-K*va5nEbCoDrN?mlR-CueFS!a(4expl;2)|Xi9H!&@g3K^& z*JAta#U*Pkzy4LG^7L;-eeN)_qb=^@Yzjw=#6mhM3(s+)(TYUrWgSJsL4 z4OX6CAR8Ht4x;lH7ucd%2Mh2Uq+jIO5it_GSiT4>)ZR|yRz)I|9Z=b)l)E4#SVrUo zmWd-K9VSwsk;y04j}6LZrcqwTBZRGUX$88sApx#TQf-3>E9FHRZPFlVAJ+OKjyBnfQ6YV;Qb|Nk-Jwj800G-NrR?-;GUh0w zAE%XU2C*E$nf>;ts1RL9{q4$9iW_SbrD1w>FsUt$&jIC{4Y7p8hH>i*DbY}egIRie z`5?BDS}0EGR?pEuGDzA0%vXe;i0_=R7F)g9?~>}f=?NS3k_4P zK`z5ljC?DIP>Pavzhcx-2u4R2IF1!^I;tt|>mk_+1Q-SpDlaFz4e-;cj7+@Hv?VE2<8Z%=f3|awSJZP;DNX`Q$>sY*17bo@(mEn|t}?>;Yhg z*h-)AScem=TN|zla>2r^45YOAkm!)!mt3qSO8Yh=qyt77(tq zI~<2+Vs5w)S^iOIPk@omR|-I~hex#70Z8@{k_*2tt}QH1X;6Mp^6Z33t{>to>G+6L@>J8Ls~S2npxBVjh*^=1X_;-Rmr=?|cJVA@gt!s9(HOd#UUE(tNE*X^5dnKCYNLE? zsG3g?a#>Fn-n!Ww#VDFaeE`wGY^uE4%7XQHTDv5u>70&+t%-0UE)8Uy$oHw)S+27X; z7FJKYk{W7Z{_eVf`Hy(n#6;oq#v&}DMi2L4C}$Hx$#{d8M4OS{$p%lB`BU-1Az^Gn z6g16R4VhsGLI8?B1)~B@9n4-*PGTuwL_ZOc8J!MKfZ`MquY`dQtRpEC+Nrz*I7c#0 zv!Iv`J^a}~LI1_bz-_jBJt26Wth-t(8Uu;qkOwrU)!`3?Hvl$bTnF9N$yQ-3P74U*`K`5yZOvBLVkxovYg&D!|{$g}MEKzlk;T3_A zBx4hf;b7@j%Iu%d!xzK z2!}86i7D|g@tV40Ou$hsoZ&Rlt5)h|%9}QKp$00fWfoS3(BV?qC_*cU<={AE>90TBt>0*{)cI>|u7@bW$dX&bZV=fJ$33CBR6-59 z&ysW=3!RS{3f%gXvxY=uv%G0s6D1gkz$!%O8Kco20p5(n8e=Q(4K)4q6^ZohMhr%f zv6VOu=ZvRhN~vBnN077DkW?Xh4k1p)h)@I8rZcG6MVURSzoXQz1b@r?P3Vg>Os|V? znnORODqR$Q#VS~}{v(nhs=7iBK3-g9kY&-%q6+;`m6gFgT(yvwZo4WZ%sSmnX_5FL ze|{M}H*aii?H_D_64tB*LZL^qTpfPnJi-=8Rn5rMpqK8moj|PU=bN*d;OfLxr82xx zpc$`2^}%0*t?bm8fwMNpCLXNAxOHtF{!|c2zVD$mu75)UN~vsuWSxeR)XvFOOB;i3 zBxGf0xkv218)p(kVJ2)3tsv*HP%x{|wwf>QpVLLXFgtCSds_6(wo3GcN^G zXvmL2;#PK&23W{qmG+I<0;XEDxn5R=5bem+%Y9>NS^k!dy@C68ZdMZS;ogB)>J{A%662*ehm76E#qV_s)B`)@3!KgM>lOjv- z>}T|)UTE-{9VtPy2ITD$CJ$plT3)f5GWCFaGLWk~PKAjr?pC<^szsPLOxbFeiU^FO z+fUGWS^;=Or#MlrL^Wt9J5{Wra7bwpo;F#GS9x)d<2&2w` zH&b)3RAqLo``5lRPTy}ag?+D@2Cd?ju7h=H>F>nili>7D&S5MgyIQlC0uQ_ODw5$$ zC_T^u;An=he#L-_KU%FCfyB6DGtXVvoN849apl~B1|Aw=x8QipxBlrGm{Hswd==!Tj0Yx~{LVCup*7J_lK(XAJ=So{wYXAJn+|3e zv_2dBKIEaLr-{*>l!GPjPNzO5M!V^4&piWQ0IWS9SoUSBfHJeHdE zQ0eH~l52GxrugL7F_8vICp537o~dTINbQYkU_r%z_9YoWKI&uxGFSaj_|Sf#!Adce zu{N9hd9&=NOr1iS0?^NLxejzrUWkPPK~y>!&Vq}_p^lG1lYKBdgrno;me5RR;%nad z(&5yPaYjh>B~t*QuR_E`?fXIz%7d3O6yA zQ=US@wv48iH+OGG0%gxV}qM^V_KsC^=4{$;xqo*lZ2T`_1UUc zU-7;%*nB;}$C2Iq{v*XN%rNNW=qM|MzaPelCudpwU9hxXKG(=_>WwqLyf6cu3|&=Y zY{p0AIa%m=qP9L3VmdttBG(__JLHXwKR&4i2vmf|8Mfqrh#yaD+x2Di8ZO5VI<84i z+r|u;tGQFa(C+;Xo&S(DU;>&vm1we~8ZTLTfN7=!kNtV9EGfPt@Rd#B`NPx}mB5y6 zv%qIjlgx<_Z#4onq_6q|=@3UrXi;3MuFz z+*dC2MYSQ~3L{ry=(i*%*(OWF31{S`xpx~PmS-?Ra5Ga+9<_nTycepVMkSLPSxqqv zSuke6#kwD&)`iq=x>dTIo$gDL;%tkB4(1u1m~YWktGdUocQ-9g0_+6Zx70M-+>NO)gQz*RM7@h_F)fSfn*B3C`j-;{cUQF<$*REo3_2r>^d&^iu@(iDG?TDDn&k^G zB3C$3wkQ4M16by52uAQQnt!1a**H|35)9+IWttp@4JWM03K}w1aG^6a%KlWIr{;Cu zm;w^Zttfbt4b#bk##2-^##NTSX|FqI7T3}>Xi5tCu;e8i*`Ar|)C3loaC+^Lj7ew6 zlOpn4)fi>DD!l@zKc-N=j{!60a<%oeIUn=jS2ny^1wqASqkHTX*p=o|JZfo4=9G-b zJpC}pn7|U(@FD~l+E>NX-`dn6U}F+fYmE-s(CsMF7ggg?B#ktU3~E zYh9_k)k;EkOdj<+Lwlu>!hVZY6ZT=qiYH*Yp=t=wTazVb7Fk}VuwYKT5{#`{=BiDJ z9{*D#91xbZg0F5^4bLfHYoN|Um{NA0$s!!puL4vzq7vw~3G zW6n68nol#c7o8P>%~6OvxmGtKj?~+F5jc6B5xAC%q%tW-nl-J=DDsEHCSLyX#xH~Q7@@S*sz zhhf74xZI=DzBo)+Ms?_)e$j^5s^TlqjIv&=07ZMvr+6hQiLX{teIZoGC4Nz7w)LLG zQ7-&|fFk$G6H<>#O=-(wd2AWj*o8fvSft+@m>SL37h^E#;6h*#3~ZrFO8h3YVpf7p1eEXdJo$=lK$8kztU?-bv~Mp3HN*x&~yvpwuji&UuyTM>GrwoDba z=mV0p2ss8K(sL7B$mJ}7(WZ+FjV>ZeYU)@8ta(%thR>#sGi}@^Az*?373J!E=#3vkm-n$duwvlU`}|LWEp3^ zO=Q81ANL6kT1&^_H&d3y8IIE#TH@aYncTJIK*d;pMNy*>BAimvF(;XiKiWs>g6#G*O_hIkhZ2wy zEtOuAY^&c_yeDPMDRZaYrR*30R9AYpmKD2|G?=~lt{`RcizuuWlW;-fN2!u@%%Tjc z_n4R^JM@B(Exkq9LY*-#7t2IVvxN$o1zwJHa*uS+$aBD3g1uCfF(Vtv0pfmk^GKaa z4Ixu!i^V;~$oH*sS2Za&ZFk>nf1 zVtr8DJ1OFNDvf}!pKrI`20De|T}lVow)pFOs7I_+Cz`t2qAgFK4}~}7LA~(iW}_amxsY_Nx*u|{f0(CRdB!*wLGv)6hv~wC3SR3F|l&qBVYs-3QO)V%r z#v6QowyR_!c+rhONvE4@?6lShM^fdrVs;7YRk?_~m#PwxNx+9!lKfQ8lhN}H7Dqa@ zJ%Wu2$vWH7P4`8`>G;OaW_sM)M*XXS>9;U7j0CG@RU=b^^z6=j%&`)W-!@j*Fj+31 z+9sOn^f}mJ5oNa)meUv9k<~-wmcL}>f#tP6#giTIH@w!Bxfm^|Sg327QcwE4P#EE< zwGnCl??St+hM~tI%+23$Jma2eNGDZGI$$>2#3L%smHK~@UyzxbRUDf;J1;(|{lvip zI$;mrSWbj3TPLgay(S3fr0stv^-DOl>Rpp^U9G~r7+rbJM<~Su4bh1lq_@g&WNzYLP|Rjle8 zb?DBLhi_8g<<`YR_$zsB z>TDHr`h=%$gfT`-R4IrXHS1bkPk<|7%xx65Fyguo zzGW+}E)7B5@YFxVs&-1y20;^hk9k=B1`W|_6w#;XJxsliAOB9&s9Vbu5k^0Nd7S%e zT)WlJ1Z(!mwxJ{(^9|}7jnH5b`LYAQP?ZIh7|B#`Pw*6s@9qkTGnQI9mIWFEP{#Me zE_!{igrJKk($28EZo_zA=2xVQo$4NZB{JTAn?)G(Cf!j6m=3Am4eKA`LVps(eB7gP z!BcCr=s*2zMg3LmrtaqkrSxQpQN7btztBvT7}{@o_zjq-fo$2V`Mr{47VSFWWG^kJ zCaTyn_QdK=b z2UW+8*0U1~gB(8V!2obCk+0V74uGH6OQ#Y*R{d|ujdwe~51`lB_u(VF_SLz>ngcNz zrOHO>#aX7yv!g1=`E$QqC!QO#6|XI*Y^kE6bkB9=xvkAs`&lUl^JQD^rF~hhKjhSa zbW}9Syn+n|J$s4{2Yp_Pwok#@bZ%7ev(?0fK6&tf8XbK1HKV+rr|1CFb32E*SoCPA zoFe=0`CUl;0-@@=u}I8vb`1&pH2CkG<}j^4IKeCi6AmZAA@HOllEYR?By@1UIV<@K zZfKbI`-mMw=37gvXGoZBPF2}G%3o4Mg{t3wJz8=H9igPu9bTw!A0OMI6Y$+Ws8WPu zQqqUUThfii&;vE;ZvTUmH_;pZ3GR0xBDTeI(Q&cV`juvI4 zoJdQI*+}Zwi#gy5Rx(&HNY6akU7+j+Q@&p@M^^9AK63w*tHe4L{G+ec9b}biItrX_ zW>S4P`21E?{ZR44*nkx`|M6Lqgz}sUF|$n*cuoSBR}HgK_rn%WA&idd253Lm)kdtl z1}CLXbCotSO89(aq0QY*X^9tQ;AhB-eSiYlH#eL}+VTFX2BH`%rWsPeCgwxZINczq zi{Hn={`PYH{(61;Y|w7ClmFo|3v#*B79Vl8c93!f=x$-`TE7@3?1k`nT-+RARZXrD z3J7qwa`8T1UG!g_ArC(E?%A!aUf>j}G@$6!pN!-!z6|(j8tAFVbd*=mr99_i&@jJZ zoy9yBo^f{ybaDB**`N2j?NOQms82h9I(N@#eDB_8cJcE+-!6{Ux!FZCzC+awpY%N! zC{)L9_r7`DUN>W0%jkIM?>9uB#wB>&x*GdDNBy;D>G$$KH$n zvOPGWPC3*MT(Gcz$2vPd3Sl%u*Z&La%w}H@A07w+v6t$<0<(Zf5*h^p2~k3(#%nU< zPP}bjacB!%eK(gL6#7Pc)ZiMQ^QIKdEfjs11_{Au4~qwSF;CNOFZ*_<`;fbFJIJ$| z_?fIO5klq4H0D$FbI5VC{qW1xrqk?;SLmOl9=Oi|jq6C(-7_x_ZKZQ&yVd!c;?@*7 zp<|(+47Q3i#_;+Jz1oX@CZ?C=%EwEp1y${oGi-DH_)=51FD(-~H=$3Fm^87H8rm=O zY+0UxJBls;$xfFT1;50p7?}^NO+BIPP%XjySXxXdqbr33$1!D8lXQgKd?Z?}vlAn4W zGm7a91^7?YeNgDsgUIqvn%2?J+63xHo$^L%s-}j9B^%h$ovE5?E*)>GAB~XMF7rll z!k0bbj&w#{GnY%_=bRd6WEoOjmfa*&KUuhjPwA$Rsej|36o+18!JI8u4!pCJFK3$DNwxYXzv+8rb8$&84H zb^VfpMW)ZU%F=*1bCGIw#)EF09%mhaJR>rra!H2?3gOi+3VykKkAXt^UM~irnygoy z{FCL(r9m=(3U_F?IJsCkHjKKd|@{Hw4oUdw7OCt%a~OH+aWA2-gRP z+hIo0G{1A#*i>{!Ra6p{+%lCY{B8_gtZF%%q{T&cEPHH3dJzE!K?!UW3(tooN{ZyQ zIa+SIIIx~BRb#_02XHe^rT( zFVPDp2y+ELes|3d}N>=rm0BGK^lWY$z`jfZ9u!&I95O&V=EMk2w$64{nsiZ`joj1)G@$$mu;wJ+fg#LIqJqtgbB$K!KW-Dt^ki7nIqG2B9LA zGbAmlLcV;K-d4?TG6=)4*}+@TK#xn7%%AHiKe2YrL;lGp0(18pA~HPx(y<;{_^nx` z5*er1=AFi`&9AxNOW|kJX)CebxQ3{Q^AP&Ia~nULG||YfR8s~}sHuTGC?k=Smf#O- z&XO@s*dvtk~M z-f60E}!g$|dYN>r-UhC-yNJ@K-!Q07j^2 z(k&3&1`y$3OyVAL_ba2%6MhVV&(Bf%s_`t#p(=27rS;$?N_A^!HedkyYEh9gUwEhP$HYcUqi|0WEpejr_4wM=65H!@oAB=y zf^h}2a4)SZCN*(G#&IrPEDdU3w$9|`#;b+7?{ElYEt-LYGUU;u`E|-6TsFEo!%Vo1*A1+i!tbCatby8>-s|yNoawC6^ z<)JNQu_8cAp%uE21QgjZTN+egyNs0LWkmOUVMT-G&AXGengb4C|61y?$T2bzvsKIf z=1BluAV!C;WzOX z*mL=5lReCM`0!<^sh{~>*9eE+dIbx-VQTn!2nj2Lp!A;$RJ~8LqP5*~7i8~0VALP2 z=CYcJB6y!x2{cD#>EG>1UkO+3NLN6iU-C;WHUi08EL+}^WOS(3TrM=(r5;+Ei}x8YJ7-4pI!(hx>$xCwEKUa}INe3w?J8veF{$FxeH zGo=M~s4*%H_DAorVqlf(2bugtym94(>Ve16=EJ)AroPKQ9v#x7`AnR;1-Bz%UN>#{ z&80~igoQ{Wp0FBRUQJeR0iU^j(=XIJ3ehEl81=2pG|@Qo#Y*yoW_eqCeh8uc;X%m4)^p}Ce{7Hp;tHGn zgQ42B7{b@mv$`!TRaDF_DHPqBMdxYR$SId&j>&hxA zbre3mZ@&z3kv`js7_*i-jh$zWlC6glsEkDZFsq0N`921_eu;e)iaIunlhUH}k&|7G z^l(&W8Ph9N?noQKIr$UY-QxZL8~sL0C7(n1sikfmft8?C>MTAo#}*qo4B{R_n`vgj4 ziY0PxT}i!xr<^S<ogb14bv<2sCu7?JUTi09Jg>w1asUaI4F zmw>cWYd>N1cO(V(X>3HgtNhaH^Lv5!;q-_ggJuN(lC| zahX#IfS~g9PtZGwwVW7~GZG0=zC|nVl11u}&JHq*^9mh!EI|}5)-3!K{Ugz_!erHaMuR$R_5@@Fhl<1rXDNOP32S%*WZ_M>>#;aqzq#Ha$k>ILn@-anKNY}m zSTnvo))Eqq`I@(4WqJ1wq>UoAV;Yc?Yl$M}iLRb3#@P6R6IZ#=bYdU7Or66qDcRR( z!^$)$rk&OhftAK~CYVZg>-JIDC=^MxFlyp`*(YyWw%sKu{k$3;SWTC@!fesmc6|Px z;*Dtj+F5efO|rlb1R&L~)_ivzCMNE?Z2tSjj(qhFNUVpuPsNbW71mw{d=XgK{B(xZ ztUj=5vg0Y=HZ0qbU4`d)ENvjkAHoJE7uwm$qC*c@VW%=%H36j()n1nZYZ>kLFAu}h zX6otS>IpKlG%c)k|K@`UdcktWK`<$7xV*<0+~?NKr=!LKRRd=L>Q>bM^~xJXl* zJhOz=wSx3lJY#j4H^f^&_=X&4!DqOyNsI2#^pp@GCFi_V<*7|E)zQ`KF8>z{u-;vX z6tJSn%(ZF#CthO#zed-GWfTR*wx5zVXH7mPhYR9`Mnn+@cl;k(3PM9Fg+7{9v06Eu zX$XVQhesJsdSe{g?Vg)_ns3$?C~rm{?c~5`g3-%WkX6Q4z=Y-(Fb)vl^vwCM;_jm7 zX6?E#>pSPOI;kKZ{mn#Nc4PkXb-6&d2``*ov{6O@amXoi$aB)}NB|4~O2@YH17oV9 zYBk#CZy`fet52459hI?b0x}HK}ca1KOzsFEK*0_ zEXvMW(7Vc|B@#B;96eaQjgSlpKLpE>Y1nZQBG2n7z6ZmPrdDzy+U+6O!lG9lE2DG?q%|>$Sx)e z0<+lhqGptl)B$AB2W|t5$gc{X?Do8kU;cnJjzAV^amJonCYUbAnc#`Y{J?PxZxi$~ z!jF`-i#B;MfF<{`{cbeA?UF^CyclbLt92?}WJvBS3l)9h`N^c2uk_)cwW*RmS#npJ z8l|udNqnT`f2kcRD;E9W`T4sfUzR-T<=`K+ZCUa)lHSbSSu)lv!XUEwZqstcX%txj zZ%?LtJ22bSH*i3?_50}*(wH8B!?xPBMbyWA=-sVobIl_;uh1{$ z4V{B8yj|{hhp$jLc~`)y%q)Btv6xLh(>JdWC6No>ZASnd$yYp)?N>aJJHp}Y3pbmG zF5dlAIH8Luv1@PwslhW^t@r7V-WRkXlA!bWYI^rCFGg9b>QqTtKPL)DIRy?0D1fS5>m>##UGyq8f<;hVK+1KW~BIW z0G>;Z?pSyi9Q%On)9(=RQwxjlr~PC32`KBQ;>&N=Pvbu;37(|IZ`wbVgly%>BiyA_ zY5Pl#pVy0CxTbClfdo14+xB50*UrLta>9J>LF?e-fqREqma|;Zz_)cOpC5hkf5lk8 z5rEgZ750-+z717uX}EpT3Ev?@8`ZLmu!pljJO`a0s zad~>Y|1<~4Y;TUx)o>G_hcPGqgm>?LQuQ^l#!)!O(j_tMjxY+V88mn|vkeZ5>A2N9(qSGGP(-ahIu^i5 zPC6DqV>#;LA99ca%aI4bu8m(8cs;%S5^xfejgIh}%*mM_n)WnWv1Jj?+B(@}uTV1% zMmMuFaK;sU9Bp+3xwG*-#RvB~*gQjw8Gw*Mj$f~ne*N{{Y0_gchu1A~1}w4QD%ZtG zkMqL|$Ntn6@Z!#I8GOa!32Esd^SaIucueDc+~Vv4-}v;t9-ctb**7kqs&1lh*8=dH zY9W=9ur^(I@3bYsZhM=VlVb)F4W6`?K>WgwKYA(WB5N820xC>ieRzlgUFt$e z8MR2ACkJ7ip#z0HL|c+ZS}({NFUR?LoYP4D6woXK>3`_9j-I%m zA;NJ&^xy~|zWQlY&k-XfG-9!GppCqBF#&(Wf;TD=Act&YK8{H0k}2~V#bXs;-W5Mt zw^%+T;q^S9KCjNHE0Dq?O3IbwY2=|+hvyiZuX5a+KW}KtS+sA7ZHFG&v{iqtPm|$( zeks;*7w_U5o-<@eC%QcU3dFGFqHXAZMWeyr|0Pr{E|FwGHPFu4#-TFx1z$<&V(X`< zVFb>*fYC+VI`u#?kjQ4#ow^$np02FVERl z6(?X>Vbd53c3;vDcftY$Bajc3%TaM~&-4vZCr->m%?C%l7#&$fzRY_F4ybbL@CeCR zaFjC{Ug>iTaYge`t}tq8OJdqm121@Bw1Yu? zSbD+N6L=Me+@aLBwPUt$MCKD<#7Tfx_%==H=^ zBnHegYa7aFFbgr23aokR?GF9KOjl$~zo+)+_T&_=@%xIXkdz6;%OHySn&wd!`mWy+TT=y=? zSAM){-*}MXt~sKi?L;L#hqQVnN15U)C`t#n(x)dJApG~#5)Z}t`1Dbi??39>i#Pan zt;5iazb2p5M+CNTx%^8k!VwGn_5ka>( z3BLX0z+@2#gR^A|RT5d0qhyH-t#HnW{2jHsLP;dBI7^3;hE#?%-xO6h_)Vt!`*nrD z+}|RdgSp?5rJ+$oOd_6{{6jw0*A0?OEwfryBidEfW^h(|<;M#J%W z=$`Rf2Zbf}PRe=B@7_Kxc(9tPdq7om&~F=$jQhHT7(%)3Z0pBTz_2rmKBB#&);Tlx zo2h)8uhIWhf8vYQ%1MS5o~VG4*vrQ1Q!!hQ4x{kn$XFCDN;cs(SvHmnNi~Xg_PS=O zKMn0$7FVn(#uvWYT;HasP@J)DQeQ}YhvLob z^%&FI2$M!%Wx8rHcG2v@jp3GTo;W?`g=}9LetGDFGR5P_Ih%J=T903sVV?-Pedxk6Sqa@}a?0k4l-%dkwySNsLGF=ltwx}c9S z*rtcInL&;?BuHE!*L&_umc}k`RQ#J+xA#!WSN)LzaX3Z6=xi!o3g@nkxy3N#2~Y12 z(CvcFHerr~uZ~OAB3Wl1N@TYj1uId-*^N(xr$>8*Feoi(tS*XY*qZXV7W3F)l#1qv zK-XiUP3R{<*h;k)y@sw490?XL4135yf7pR;A&R`TasjEX^2io$Nb%_Q=-ehO4(cBD z_NI2q;w}rR=}LDPW(GgqQ|^P%aHLqt%%b24a$wCah5_ta`ujy^yFTtrWFgtyyAgcK zY3Zf?lEueO46*nkAAL5=_>LG$_|P^k@gMra*eBGXO!t`LCRCH_Q#*AP(iwjJmTV*B zhbFQ=HOO{17j3rjaSRzE7!#DYMhMFuE zF*37MW?%rnLj$+ED>95U1{$;tQ)s?f#XVozEJRzl^*Wv+z-WrP!hBQ>G@VTqgd_~S zgb48=jmS4;Q)i&}C)->`*n(`gktQmB&C_AI^1Y#lqgxbj-zxG7Tdby(APD0ER1eY@ zGgUPWAH{P`N@eQnvsw$D!vg722`HkS>}PdcT~qy7h=N z^WDp%@|{q^aP-zESaAs2j9O!y2)|eA?coPfj~t5c4|H!ME1^7C&f$LodV7w}MX90V z=W24l4BYiWK=b+D$0N!VJ7PL`wLE z5Torx7DHTy+hJ}Lc{#FQSwEc6i2K>_ZSd+WEfwBbh>5;AEx79{jyGuPH9J5`o!$TH z@zYS$_kETe9)HSpzme1NFvRoW`3MO48Wb`czjxU`?FPBOYExs3KK=_j6hw}7-1-Dy z9%0-8gc)EZLvCJarY^hq>2Sti+BA@2?W;g`{Y>AB7Pr0{5@Bm8VLI9IEYy&yYPdg=^n>we?ZZ6vF5GSH+WpxpE1907ldftjh{qP<5#Y;^!{{SXQvzaYAZO*^$Hp{wYdMe|(XDPp z_f1f~A$Q}NsrTX94UCWNnVZja$r@N^A3>3GtwN|Kgh98{mv7}}$QW?gc_tPXhO&ay zxMsmx5KMWJ*8W0wG|2&_&Y>e*H*7f~(*`8bU)6EtSc(Sd%1hSz3QL9J&J557}9Ev;XsoH62zE#{y|Gn>m|8glQ$FaJl4$`8mp?9&Cm(4=_76ZMa zV^W81y-9H`VakXnI>$U8>5f~123gbugh10=2s^ufcJi5rXO+y@xB5J{LHg^xrg}PwRubeyaC>x3Z2}L z!8A@F$YOT-C$G)pR>;}^f%wJo8PBgG5AtvX@wpydd;QKe$YWC*0*0(T}b!p=_n%pDk9HLPIf0TqRvgP zi|3WP0E|Vc2a%A4?B0V8h94d_+`8^TWnEeA_p<`4ne7W3n`%|7>FJqWJiL5a(&;zi zcAXyyeVOzKo<52ui}>!WVK`WJG`uUlnXmlD4sXPDjs@UbU=&)L4en&bd)YYE%j)Ff z^763v_O!pe+v2p-%kqAh1Uy`VhWqc%uE!p_)>o4BRyMJJdA!meOKctOnH2%}EXRGX z+d2ClrrY9ygZ|keTe*LQ1V7=3MJGV{F5{2iaw6MhJoh|W2h{Pyf`c3cWqlp=ZLdh( z`2L50V3F$o5)drXqZ?k!cmMA!4Z=Szc8~ivmH|&s=j&=_a7AWVaFmR?I^G_>7n@jJ z_eaA{3S$tLlJ);We%$uo$d5Op1)*&JL4G{=N`9=v&j`Mo&6@vSZJ5vqaN{7hY_e$T z3O$py-*& zr~WT0g!vZfzf&P3kq26xX_WE9J6$js zf>R=NteTPrt0v_-d79ntI-abFZnd)Hz7bmN`1}=v@N{ydh9wTbc=ks3WoPT?h{15r zc*dHUe~B=JJj9~?eNX1|mBk!Jw@5@M!f1VsWobzdIqTFCoBx~b4t> z(OqV3IAAmNNkKjF_Aq-P(=(Jg2gNrur(5BCNUVF$cCwQZQq`1P=6Ozeyf3cgA+bwQ)5+%Bnb^zXRvis2(#$PSbjcX|`hi;( zwD;l$4!-RQ)v(<^nX7|;nZC1z{S~#7wFx;06*yT&A$;9lxlsc6JluT!H9D~%TztM) zhb>gk;DztNMZj?L>(66wd_L>_bHH>f1#9!`s9rC5$*$lnf0)M_O47|g!bMgbI^CDW zz*@nr*G2OMF+B;0cdX=1(8j#>jnI~E-p7a;C&{gBT&{2v6WL41(WB5IL8bf|Z!fPS z$DgmdCSw@cI*xP;^p+elp7$Kvl_C8mqc=pa%onaNZdt}tZ`vj9jZB_Fc@qOSrdj;E zV)H~86SJyZF$1LF;b&~g%h|;3BC|i+V=o}Unf&Zj?|Fyf2+(w+2ek{3F@5>l0hud# zfa%)!SSQisOx}Rk2=1A}g01UO%1RN`x`~HMF8Z>Zl(j{%kK|zIQY`8-jyo{wJQL?Wv0zhwa(6cInQjc&1vR_;;F|hTZFwJ!4?;Xlr?D=?t8` zi;;yoGi(9D{vMq9#j6i)zN-6v52qtAhdFZ=HR&XeI{)D)%m3Xf@;uyc*)DIpm$Qho ztStNt07R_r3#en>?-fjF161&M#()0}@`tNr7t$%mvj1`Q^VE`ZjZWNj3?HX`+m+k! z45$+Zqnk|WT}%)9!KYfFa&w7uS_VGKZa=@z@OFB)5#Q|spaM_td2hJ450Vi@q0(7Yp(aOMr5%_yR?k^CZ3#pPx_WQNj>k3 z(h1b!ntsjj4zD(9Q8gYcQ4WZ1yvJ3f>c5Gvu`Q0?6R~r0WfsShx!5x9DuJ~NV08Vm z0?ZhUiRUekS=RYMLL~jIzXgAw*9OGIGwFfmJ=9OGq!{&_FQHCPbB-D%)56z|Ki}$; z$0NI$Qj@SPDa&vZzomrOm~5;qkaDyO8s*w6Eh$M9Y|UD+xw)`^YOyGAnEAOLUcw)Q zExAuATsxig;Pl1YRmf`6A;*n-kZ0NWi#MBaxU9WndtSw|E74>lLz-oK&87)h;*%Z1 zSB3(LGfk6F?`B#ikMxy;>;<_*P!qn{)kK3y`YT#^#)s$a!}n~@?oXcm^Cw%|t#j%` z1Mxz1X>KQb-XO`LW*_ktC9Fm7=9q%pj**#W5Kk88B(5AM^Uq{!3!&*s=@1%RnZsR) zJsH`@x-9LcLQQZ9GpSf!WLlk5H~N-hbd3m5?lkhZpnVa&3@&Aw`WPmT8plRiP9lTC zb3UV%A5oK6z8)#8QBJ~OuS@z@#{tkI&GYl%@TwL+XBajL=d`%hm3h0X*O&6PDRx<1 zf)|BU8{Xi76A!hIgTon>_)SG1Se&E*g3_yPe<7Tptrr-U3a!UscFzl^mClH`ty==> z+pXJ-y#)ubC29R}v(J?=YN)FhS2~DG(<;5*Vm7BSYsIzo68&Yhl3fjzcRjf59q1@xYpA*yW;`tiYcA_#w8Dyl6Bo8t|hwW|gJnQN?a z|IASR^7BTe**Sfu~BSWFCYCnvYS0xx z6)uEwjy|>77_N%9_~44TT2$BrE4@C%C5E2D%#AM9Hgd7oagMG!q|JwGcvt6y?R`E$ z0?L*_n^8xMD|cG}M?n^18?@`ka}RN0WT>{_rDkWf_*Ms3@omwUfkgsA>F4d!7XIDw zjp)9I!@_xV4Bby-MM??9ATX$fGqiqq|Rx@C59=~@L-!@H)?XkGF0eZn`JFOQ~ zlmFRub!?7>YuO`&`F?zZ!K^bz*Qwb@JH3LUs0rQsa!aL=GVeVKw#V9{&Zj)bna)9- z-Ej_M5STD&ttE0v+G}agQ7c$?*FvsN@04V@jOXNFOsXSO5->&hwuMe=Yel7`jV>EW z@iy#|JcqNQ=`K2El@cVIeBz3s z130ggw3c@j^}2OZ`+aN@wDKjlqZM46Xh+`$ByP6mxo*3|#z{JfyA21^C5>`KpTsP( zhZuHwsilJJ82NbWrz-5>q^N_Jg)-GU6L5Q0tKpy?$30l7$`dw8=lz%4+N!wa7Fq)DMUM|B zE0syJ?cJj<@@t9qK!?Lg&`q^tmj@8*sK}MC+tn7>cJ$?}M;mae0=TF((pYLKBiuJp z!GI|X^uM*g`MlJZ*-tj}8YRR%*#{mxb`agxBy@cK;2_rqvgOWw8_Zo4V ze!sM;8Q~`dkhp`S1mjKPbUsvFJE?6lC2g@cW-Lb?H~ZRRAuqMi=HzxT7mQB7lCx7v z&#|9$exL6g0v2FvAgF627v5lX;<_wjWwzlDIz6w*{$gx!QiBQrLxmIY|M9TiT!xbWKibdDY;ahNP`l^ODiG`pXkKQRru3RC>{0nv~j54}JIM zKGSrO9Q-cKzeLm`j?-injVtuuBujc^*YA88rY{gA=*0URG*4m9ox=gqaC(S zGh-dLlYew4AQl=kk>_O@Nx~|zbQ=DV|I`Lh<46zRhyRiK|K9MIitt$k|IZZ{CC^5Q z{j(#r&AVOCQvg%*FNKIvWNvr(NPrl8q)B|FU2`V!0B+o8d?&W#ybFv!viqOfv1$G2 z0kuC8_TL-+QeEs9ZT`6;it=_crUm@D65$i6z2pn>rCUN5SuNW386CjPm~d!K<)sqm zZey4KDki3R&Hwv?;3f2DI`j8mH{agBXTfx~?#7hOHWrk0u6=q#4X0?h^qs)_9d)IjOo}?D zlpWgtC4>Gq%{6lU%qcr!!*rVg#Bcot2=7B!pSp>%#!*IzP@V%M_+w~>DOdQ0DObsc zDSeS8__Hz#ChExx>6vZ9ETp8h43|F5CMGvtW6UOY8ZOPK{ZrHT@16cqbx_(I#Ak*Z z{Xn6e_wIz)c0xWj5qo~t`p8VZr9f`d*G6fQ@AcW_GldBgb$|qp^vHeqAE~c6p!P?? z#fALwFo91^M30mBQQ|-I;!ZR3 z;?Cmo;!^%$=q=j6PXayWKU3jf&CNajU*^Jpn+0+PG|_+hyBk8DlH24A_sK}0inh;) z0pbt5q`ZPJ6PIpcyRURfJ7=j5M7F1_ko!hu2 zmtp_i`a>3fZ2!Np`d=y!cqIpE(-sAm2=(J{-`8W{UN#T`=YwWfy_bW+5d9yMd6JjZ zZr?5+Cga`A#P_ORuD1JiX1wP?atW>s$r=erHWuB=Et5b;|D;Vz(H9 zvt3>pjl2k;q+z_$#OOt2fjzOQkJ*FD=vM{;XGV1x#ezeex_4ARHvy&d%VZ%0pgJF%2@ z{x?L4=`Fx+y2Y;odMYJ*>eqWaQ5K)2HoY#{Q1G z)}u~9ZpgR&E8t+!8o!sv?dkYMQ0Sr5v=SV|D7@eATkro`YH0t5rRHCqy~eN3-tvES z_M$ibH)n4Ej4&eI%kps3>%jn01yN^utKDDd4;4K~{?%{->Q7w(11Fq>tx0O9&()VF zH1!&Rmy9PN4`~)gG@)`p+^6q>D_=RrV`XyeC$pLoY~ioS&~_<}PR^UXU!9H&(Q;k9 zk7az0cN|Xx7Y5+T6!WWrm-wUf8jh*I13&yg#PtOp%o9ci1pBndCBi9(RLEp53X{>S z)n`51MWCo@?#It3#`Ik0xu2rSIVHzOu`eVmOEYGbZGFp1`Mugd&=w`c<>)5=;WZJ1 z8V2iZ(}K@<<#}sdjC<te`}V*-Zl><(4%Ey)P)eGuRKtDZ-D z1V^3NBY$nIUn$yH+pWYvm==a17s1L1N1cSDTy3nK)=;(BzZIx}qAd)=E+2fWw3D`5 zKLg3z%l=+yVHkDMs2p|FNj$36PTFaW6pQ;?ffR_*QaR)zT1o4u6L%D@tv2QPr%2$+U4WFjlwE;@yPVyCl)9YVfrPuV9=*!moaZ8Y zlrhg&0nmhNAhgs8lQIhhNldz{RLruUr6MWBD2%0;91oF1SsZH6YW zn2yKPv6xQ6%&?eF!vtAOXJHapD#c5$`0+RFD@W)y?5jrjHtefMWH#(;Mszl`d|*BO zdZ&3^B?_l?T`h{GbzLJ$sdZf|%DFtVP2`qcMBloUTSd>aoZCd-x}4iZ&$E&{Xk#N> z7?r|79-t7w@3!w0@?VA7GE*xit)rq8%(vua zd0NtNGBsafwO^Rx100Zn23Ojo1xE?T2j@`tP6axcH+LY)9$ja%xoXAoO0(Ssu$7(Mza(dWU)}dn% zkJn_d2LYnjo%B1<0*&W!M3poGv6i^W6?K}JoEZMHH}o#Md6@w6(O%dR&zH(S4*#pB zD!o?*R~Nc#%YcWWRnTMG>dO^_jwXb%=r0fVYsMz@55c1|*ocY&0VD~on@u@NSFfi*1YO|U@r$*8M$6qX zKMlNQy@VKZ+-9MHn@ zNJ)#NQqtXBn}E_VQksD@2uMi75QGt-CIBJdY&OkvTj;c{<1(>|FJi!}QBu>oze@IrvkYIrt}?5q7@XVbHb9oW(OfUnaTZ zQ&s5&L_x-74tCu1^bn4R>9PXiR0`f=Xk=}0N7nR}@v990_#>c^#5wPX&Sv!T_i%lP z|Cuw>n)G6%_~@nlNPS1fne!?$u#w6q@91l;U69AlyzCnG1p$|0gJy>Tja0#TM+#uO z+coUP@kWJ%-_SSM;$fw9e9zr%#n`(HU5`jv5dK&}4H4H*mb? z__ z#V#m6el+l0U>pPdCqcw$J`a}<%~skgfFA3X+HMC+3}1FTXE>aFE*ZWv9eG+=xfYo1 zBNS5o1GSG_)avrZZ%p}_8^I85`%ieev{s<&@&8a@HviKd|I=K?Fx5=_uL6^;sp|hP z8K$t{m1Npd`DW7bY&lkt>Bk-5?NypAg}s!jRqV}Otjm?Fwx_37`d%_ku;@RlBfb6p zSBH9zrSGNE7Rslziha3#o-C)fviRD5LotQfMQmoW1lsoJ(RABIoM*BCY{zs37|%-JOWOqCpQUbdjcDdcoSZbc%&4pz5g9?eY-{E>mb;Rk`4ul~TQh>8NY`M*-JhkC zfM5iNhteeWFH;}#w{{La_LfI56H2k`q$Ol@{H#HRWKvuXZnb)c0JgV@pE3#DKX1K| z<14~KbR+JV*+>ajF-(xY;O_CV`!dq>D9i##^0;5KzJ+eqSV~SK`!o0TK; zcge~I?O``}hs^Dwipd?SSMv}ubOOv4oW)=ny4WKl#(M0g?E@N`7AWVZLC{i%8w^|m zrctzdmP^D1mbY~j&zV~>o$39M`(+uzs|=TU6a-`3s@nb7l!^QO{#CiG7(ZnE_$ILjYqPSxpnowHZMjxW{kZF}ICw2WRMNptn>(DJVfn3>9Ic;Hs(t}eDr;vV zPgwfs!wzsNqBm~#tb&O}&b1#f%Vu2Xig0B-JBsl>_6*2Ozxc!R#c#zna*xQVjqtGZ zVx;Y;J=Aj^Zz?B-0jX%HFnm^(0viA3kx3@~Lv3UrT{JG+GkWDB`-_fZuNnkKd;Q(9 zM?bqkJLYP(b`zbJA{RdjXa^7QZH+6+szPtkfwFqw_cZPJlfO&nAkov-4!b8|6LBl4 z9w`JbcC&X_k;6OURSOk2KuE9(G=6ekbu;RAeN{fwmN?Il^I(5`B_3us zE$n1yET9p?<15Hd2^+hxv}&F@dWf~M-NnUO-3Vmenp9ScaQ6?snlr{6U19AJdgsT% zApSg{*iguI+vD)+Wz4Yaft>LnCL(z(4kO*=(RMW&^gR4`KAoN^9!dSy9h9rd)8>qq zdslllO|@#Y2TrAR4M9=)$?ZcLt`}X=BiWGUZA9JmA|%qbIWRdq%-|qJPSylt#~!rm zbh~a1qZ9Uh>4E(suyQ#o4 z9qJU*H^-K$u0#3GK!GRM4ZX?ROKh@Oly;Q?4D|E3+nME1sTmzXpQ09@?p&X20QsCx zA;DoNJGNOMR(guvXd(Y%vt$+Z)UL6^C-iSo9Up%^#VNKv%I1ueNF%MW&bE&+r=qnq zV)pVDg&%456~1%RussLY6;mCz6<&n*{fhjQuxm~>%E-J#slfC`qotVZDwFy;&7q)V#y;`t5|jU9Msk^jP>`4L2S_2djB`>o$FFe9}yANhEX($b3a* z+zXnj9*BIK&lmYJx}EVGqosqNlg!`C+>0YMI$~qW?D)u+Hj5=aopx@r=SS6^Q5tpn z>q?IAw0K;NY#xF@$449h*-%csS));>MYoH8k>-k25KwU`+ zvvwXS-PBD-+JA7BMMnAuO#5~+HfCAPEuY!&P%yenHup4gGG75C2?bda5V~^wu&OVo z`l9VgCO$nL4)jMQ6dshhthJ!Ss2W}9_LAeGMs14_v-VbKyGq-`F0?Rz0xMI{4?o{EcyGazcDTJh2Hja>q-WQ?J>a@1m zQ+V<4RR9BVh-=$bJVw|mmrJ3dI@b$s9$Hfn0t!f;+1a(IF<&uQ4bf_u8ABqbi7qKMMve4 zd7U-vnguoF9yX;KBPKz6+t}c9R}`Kmx&|`UK?xTs^DcFbhPm1A2_Nr zh+K4zLF@|(_M%2)c?dqc!(q-~(tZor*||;T&WkgdAin(rlDS+RLYPC)j|JPcl*qvC zy9If-8UKhk?B>;&oo877(!WFHD4j6t^DNiQt)l1>uXsL->av!ZhFxDV)tp^0C+A$^q)YG(G4nQ4rz^Um3v$T&X z*7zSritCH;>ecw!t5u1q8|O`bX`$hfUAY#X6^?v0rmvwu;QgZw37~I%PWScH*A(BC z9nV8!3}*iveuN5#FJS8$Y{6keh6Dy@{4<`U7|6%-7a}@nT9jMy_!^r%d1TPzFzx)! zZeVq0*+&=A@d&>x66h#%7ZvwMih$3A3T){+{q1;Q>Z4Uv<0F!-l=ABBHP2hUiW-sV z#$9k2(fri2$YA3ZkyW!nu33;qwS&3rH!K&`JvG5;A|dZ*4q*eXf>|N|97LE5wH&gkMMBJ`&K<@DY8`I=lSkFm+KDXsvYZj{+}cko6>?@kM&N=uE+jA5?Y@F9c&IuELNyn zo~wqVaJE$qGuY z2X@kg%)@DQMRR0$JD{OoyY9f);3|fxHx#6P*sN+1-%<-@umi4Gu>IIA{KvPX z7=^?hKNS6$`RG_7`5e3r4N}dH(cbmFW&oxfF&{=kKl#W^#dk2XM217j_My~gKar@N0+I594 z&)ULO#t^|SoU+g*xaaYNE8yxX*~6Tb$Wx-=cIc~!%c#){;`I*PBO=oZ=otBJGn3^a z@QA>0Nu3^AL*kDD6;M>8+vTIy%!UC9n48jokedVTMf060SfLd5tbO~qQ!2pYd}Dej zodcnz1qSDDmtv-K#U3=vVmM4HFtov&XfT#g=ZfCxN&d~1PsjeCs=ExS9!o8&Txq;Z z;_|nU-14P_Vk&7)=bIX5BNCf0Ee<9x*%y=wl?#>@k9i|Sw`g;p;YAkut~Y-!f2M8D zBo}jatwQr~B&J>Tn!?30_&5#NAxAjAofxB960+(t@QK)Ie?>`PCGv2QBj$XP${{PG zNooaxxjosFd$)^Xxx{E}d!m~>BL16Tyv23tJUCxu;IRj8JKDEZZbSkuIwm}fm7h-GuBDg|PDo1Id!nB$GB(HiXpGjJ|y>$VBGa_=0g&2xhT zT`<}bSZ?Ki`T~d~!iCSX2C;K9K>8s3YbjirC3cISE?47L!P#|*)h?#qJh@VWG$B`5 z#%1Mcl$}Ad$e=GKD%uD*oq@s;J|~;~1L}T7xY&Q0D@OE@wH-+5Z@9_CYa&J^Xh|6S ztJFmDMW*PMuJq+G=6YjS41+9GMBiZPjOmu`4d~J52yAEN&%cnNB4~fH?9X+V>o+?G z_8*d{D6N7se*I;;_m5`TUk) zDD|Du`Kt9kpTYXCGswK#p&ZAhxp3wM)Q@bc;)v*jqc%7H-bY=5JZQUDc4B$nNtMR1%%webcFa_T29|M zbC~A>g3gB)jXz^+r}1omE)2i2`oFKUZ4(n>gw1s8NR2SPZ$9zj^u75cEGI`+W8ZZn zBOtSYSM|F&)Oluhck&O1RX6p#rGQdOU;pWfVG4A(Jj*d=#kF0?ht$2AJRQco?iH`h zDB&u^Ub?wVe_DV`m#W4&FfZOex8Lhp?=a>ukHezJE_5pJ$oO&byV!Drk$C8zv%d8+U9TE~| zZ6T{%$8sR7O42VKB}aqFLt|50zZ1-*DsD$79gb$(tQ`XU{CXwKxk(ZT&AcB%us@ezd)A+Roy(j`Gdg+<;hR^zFh6Z+9My{hSJz%feLby>S)T5_11o#r(K>?RwGi~q zzyL5$L&**3eLG6$<$y|$|A9Gw^)JuiNtVAwu$LzX721H5M_q*^gA4`@AMaI$-%ZOq zQ06NQzL=4=VxeP|^3RPrB-Q9;;m5&F=T(^uC7x+7X#O39|415_0sTk+qVN^N(F$LQ zk?s$NP}y8;NSrM}%4VG)dx>U%y+gN{tQ>L7?SE1$(|?vuxiN&A$fe>zvb5)En{16J z#*VjQ0mXz9@8Bov*mPsn95VB~=2r8ec{YZsd+QGCFNd)B_X9CaBK#Oel6nUX?vU~F zsQi989NPmyWOK;HA~|6Si!s)+$3Kx|qnZqxLSR&*bV95@)w~6xhB7?-?`vERQjieS z+dcDYR+C!P;VpJndVK>}KYepfXS5yBv9~zdIBMJ01knOEp}(W5nnWgo;OF1=hec<# z&WT=o-F{dYtr!9Zg3gckLZ+pvk(P*w6$s#v@1u`^PI@#Sv+2jyU2lo~H`6bE#2Tk} z=tveUFDf+*IZ>_f4VeCDi}MSOJokFkKPh_67^{A&GgwVc10&Z|SqX0RGiK)SgkYP|HH_PDqqUFR54mjE$IfX_opkE6%SQ72wivF0R;;G}=skP&vd}lI&<*7~Hi0o{l zV^ztY{85-Z-^&$iUDLmTJ|5MBmHqYcDYMjemF43Dg>jntD*)+neEvo(55{c4J+E;l zl{#BPFaqRGaz_9tk8vd*xfT%w}T~uTq!Zy%e-{szGdLE`9Hnz_;F+d@r-%uHxcEi)pnPBFMoSQJ~ zvub**_`@c6kfgu7;e#BHCTrmvvqnm`ms5E8mDc@_oNUU5o=`V_8B14D+IvW5V~tEH z_^wY@-P?8%`KaZGVgR68w14ct;4NqG!lq5eW47dl_-8EKJ*rXog#g05R2|MgI7x~$ z8 z>iu1#u~r9i-irdtB;ppW*MnEC)Q?(pvrH7<{V4-E)W*LVAZDtMBQd;5+qLIu8X^_Y z87neY-!N4+c@jKWPN%H(`t?G8^c4P{EA#zir6*g7ln8^=IKN0F|7k8^gR4Bv-o=*_ zcKand?KsXPrr0|6d4eB1t{+}V=se00!V&u7%5I^7SHXT?D0F(PihZ2Eyx;$+G$(s} z3PlK)5hpcy)e-yq&)<_U6Nn=gJO$GQJ)@+)(A~zAu=q##s#oQ#N8*_zHui(fV!!lO zzfvx?b2N0sX<5`AA?+O;f)d@iw5f=) zHsf2(UcaQ42TDBTOp|qC;3TOL%u=MazI(lC-souk;B~D<&DeQ}_AT#cwlH%XQ6-@# zGo8*#1F!Pqy!lCMIzUwF^D*BVe@*{5IWm!qdw^pmp7u05dkvRRmFAfzZdScd1NCp# zXO9%7W5b+LRQoLZH&L%+$)9KsPZ8zYvBian9{=_goEtQG_a>r@YR{n~#x-r+kqAdj zGz@FtGN`Vu{t5Q$Red*e+{rb*^9^^f1Ab_3q%A}LJmN5t^zV_=Da;tea zTFU?Gmhl5-=RbKh7J1AH6Enl{Mv61rcE?9cq$kzeIcw6i;IK75b5fm#3cshaS?m2& zMZ`9j{HE@{u}ee0g^#Rlw&o#|CSL)&-C2KGKbOG{tiRQ}Ksh*9Mbt)c=E^ycn@CW! zo;0ClA&Gg9U>oD94fBXJ%X_cxE~EcioO7j!7TViyooKAi5CE3=M!K0S+39wce_CPj z{PPp7%^XK<98Q-(>A~2G?37;_)1z#xF^ZheEC1yAyfYRb>`fwy2^i3Gu8^6YXr_OV ztWK|)&f(fBjSpW~sU(-ID>NyewpD|ZrT3{gzD3kxR`O6s{UHqltNgC-F*~Pd{_iRO z1xhQd$9CI(<76$)Om)^DiP?sTd3=){ka{~BuvSL)CvQESqeg`BE0GtloFT|F;iWi3 zJy~9-we0(#2qg>i&5t7+AvH|BHZ^Hh)5@-4HFyt zE_dTfC$srUL%kS!xsS_3Mkp#K=Hj=iUHfE=%&-X5`=>3|T`S<=V0>p~6pmbiNK_9O zJ0N8OJc^f*stmW6K#8D$lkF=6NfRbE8eoOV?8FfKPtW>aq81{SfeE0^ww|*UuCqQZ zb>5P>Z=oSisD#q?D58MEq0*N}nBdE!${lD8Xk>+D6vL)*&zuRD^)+@!r;w^GB!I_iftVe*9|wQJ zf4xlh&d7ML@6Oul;Z!aU{?0gs)huIrTh$MMDR|NU`5&>E<+pN0ghXhFW(mVDi|%Pcd8kA)2UAA?x$U|^6oS3hhf+ka!`6c8Lp3en=7Ad7Njd%P zA>@DX@6w4yP!@)2V(W%q*1GRsJ2Lk_{eW@56yBQ;sxEww9e@(St+z3s>g(66+C+<{ zBb1}p;%$ul4tol`ZjWa83!g4Z*W{p#E8zb}M~KX;@-Hux^JAMZz4)@!airEpL*ya! zU_W8(_Qm$WeD>4S%PU=1yDv)?_U`1;bvl1rj4N*8Kn|?MreI~z5|F3s;1JSWj{z&8 z&DZY!$?oDYN7yagnT?}*+)Q%Zlj(O_Ir&pzaXM*q6-dO`SK^f$W0ktSchIR~ali3t z_xkVklidiYodxYNmi9BUg5KJ#IfhSvZfQiDH`8OhLb1Tt_WxK;Vl|QQyXDj4+m8RD zo zQOxWzz7kcqw(sAf4qJBGJ(sz!N1Ol-be9$_^aP=h8=FVHe!w6fK$sc?=u;1?X{*Qc zcR_>iI*c*%QvX}1r(xTjVo$uTf{yPuMf*5g(JnSawSukaaOV3Y?agIeC{8&Kd8Cfw zq@+Y!VmF246q|;Q>z^7_!i~k8aBYvdPRpPCwW<%5-LYClyJ1<#K!E4@vIe!*@Ml-3F{LqNceEIJkI6;7P0QRWZ3$t^1DUrMI3MSIIERi-T#&!yLub1 zvZd1I%Ks!Oi_$j!GYme|vn+ikl_ngs?89BItD37un`u)B{2$L?AMO@i8qxQR2Ecz! zJ?Zz0%3^-lO9TIq>As1WiGQ8FxQjQrYBck%3LhoYCd#K6Vt*t^Zsw{n^cnxdsU;_l z24{X|>p!TX@wAIr%VY_#jmo2`vy1qf$r55)vN0^&$<-IWLcYm0pqpCCt8I}tA>G?D zAf!21>hfL|AKbr8Hx}}?_C_!H<^A=w&{J$vD1D)NzHt!AfiYiXTx9r`V+HG-H%f^P z0I(o!$eb>@rk;5b!rNuyl1ps@!wz9Ovk5a&+nzpSsz`K~BlodU}p=Ms{K(@`q+QT8x@HWm<4_rI+06%_8+VXdRK#m zFuUkrC|SqlRzf-MN$vT$phG9w2>SO=9L7(uoTQEexxg6U9W}+hnkr0P3WkalBb0ga z58Ji-cMcEBj-cO*0YBny%t^WHSLc8c{J+&5kyCX=%s|O74Yo5phM)sXF3|7pa&y;> zqoxyeym7aCb+=1FzIP2Wz_{6An5KZ2Nf#K#e~0kD-t$ZU@PI~v=}W7shu41{*V=wO z3+Q9`!UPTsA@{Wm`SV>pI>h>!didWHM#MY@{$IPTS9{BWHLupN#4Hp$hTjrB>-8Uv z_7G-~b+9pB6sD+-rBGELey2`HB^hs``~O?UoAPE4HXVn z)*7)^*}MJzUZ1Az}W=HS*#Q6RFPX>YoX5ycRU#<~-Gcw~#@&!9=xbJRMxNQ@g z)&$Im;YJ^&PX9_R$vw#yLS=c2+lv#y&iS`p%trk@TSDj-t{1=#i&Oxq9>YHPOh~+t zvUgr%=3h=;0}Psi8Pw+*^5b-t_V30r5{r#F13RSwA)NqI6uL)lHE3~opwq2$eIPEb z)b=$t12Y4M-0rvR`Ukw!yKM#%Wnc}N-B2%Xwb9rpi3Lru=(H;Ca0i{;eOG50?y5HK zD&#?Ou5LET!e2z&8VY}Zpt4ZKqD*H^F0Tvt+SyML9X)iUD^!?wh)s2GFRwP*Qv6g_ zJ8mc;MaF2)Q6*+7iETyK(VoY>A{y9z z2?S{|$>K}7zCD%M^vm8t%6?eM!*D(t<${4UG7y$a%gV)Zup%k@Zyq~#<(~i_I|M{= zYUVN@m!C3Hln4I-#W5L@VI@Z89|2)Y%`6FXZyiBDtGx_6i?QD0K7!17Ea8B|Y>WiN z9}dbF*FM(0aAIIO$5i3%J$Q1P z-8P%V&vi@t@i?h+W^2KzRxIBh7_sra{%CYF83_;DsPK}RRMlz)G!|v_xtM9)077Gb zEi}}nZMuf$?_>E|26cRY=D-$jD%clNwSUc#_lQp z7!X<9hibBKbCbjtui84NAv8EqPU)tD0&#RMge4uCLH^O5-=0Bv=&&?RyXL7EJ3Ack zzu#IkWQ)0~;?LCCs^}{_b)z11%xGN3)K`ml=yg_)$Gb-PmkVaGtEpI1d15wW+bwzF zx=%!JHn-W{N3DDv*V4(VY95Fa{t22P56RbLfw}%-*?iUY~W@PLaNtHD0Y{vP$7%oXcbw(m*cAo+SZAA+0U&hWL2^I&BS!ldD`0W{*e&0SyTAicl%L3QM%x* zaziXvuY4atCEo|RE!Dp7$OQ*3{Nn46<*#8JVNa5S)=Ig1FI@!{|FG*hq?JS&5sUk$ z>QI?vM16`iO=jO<>5|RXh)8^;zf-CF^bb7;epD#U=Hj@e5-Gm}mFnX3N_|Ipas1&I zLE}e;2Zy);{4@z?LdJSxOmLPsMF85h?h$^h*qRE8gSpq;>7nQ^v0&r3*BRx(IyZ3SMe! zuM}}Pl;fDkJgAH#ZSm=hAT+4m-jSjtoV||X4Hw^iSV=f#QmPgI`6RIf?PYe;6JJVz z;d(n6FIHjJr=Pq0GFdXzB*KK;8=s7S3$^6@Sm4>jbAJA0r`&#CHNioi3r5e0M6UP) zYa_LXTi5x#_*0{}PeYFbOFIk`J9A*)p0n}80Oa$O>+i0~P#nVgHI#@HbJoWq`K%n_ zdgps@l*&FJE{vbv|80pfSQz!m#%bT?eIhEL?w#GxtH(9*aAu*rPEC_+E}q$&=HpC^ za6f_8{%e{_1%(xTXBwp)=I$K|^-UYi980!WWSWNDL^z5}6{qC0TJL}W&M>O5(Ud1J z2DzuJ_YA^Dy+~E-zi^4^NE#{ny-9!K&Z|4uoAZoLE4x3Y>}~i>G;?uOA7e8+GtZzr zMb)Q6axTu}{H!kPySQY<0FSeRPi^87la>`dDjJ8ag_dS?)zFv5Yki}ZO)Txaxj~eWTpHz zD=sK~`rZoPC!-7S>F1~P&x?oTI!}Fk+RhWoh(!352=Mn|>cxvnLc_Mn_qnSIL{ram zg*WJ#DZPIXV<{LGS^T&$ov9Aj1F?_5jgmjO<-!6dDhwWR%g29xA+OIU_((yyfb(wm z*EQQEeRO|X0LyCF%5rvVw_FmDv zF<;x}YN;10a>gB8KSI7!zBe_=h;p3N&JX7SFbo$%}8`qF-w4Bh! zQ6AUUO<&Qpbk_Pw6=Dtj=`hSL%t4g&NheTLLqQ40PC@x6FVBzISY~r=0xdmfF0$9j z)PEW@f_cEX!1-?^*d2PCpO`D{2<+te0JMR<0?&AtDYtH z{If2k7B`!sWwZCsYxVtp3LVq{*SSLErm&0D(|e(<6->R~D2HsrqLnZjk@tR5jeifc zQ_6PKp3PA+-B*Rxd>xQ2h%S{(y`+$IBvox{ix1f>cyO_}RM)D{@+L&Pmel{kCP7L^ z_kN+Tw0w@L9O^O6XUaL*<2UBB(W0unQw!_Gsv3!Y2J_Dns}!z9gvvDr9w=E@FC0%G zJsmvAxY;OK6H4*)-dJzP5}Zw<5{6-hDVk5-@|SaPW#rX2;<;)lUxXWVy?aVaLMl&L z!`z%sOSEqmdk%F}kyi}KaA#o)Y<()q{`*G-_@ko5hF+h2#&5zopcz&5}I}xZAew;k<7!-UXdk;aSbC?D-vdi(j z=DA!Qzpn3ifn@(YlMnwGlg@+EjLWvLbyM-l>%VQj?UI@{_roxc1~dpv(aKx}u(o@D z4`WHW91s3&w+mw?+4R&%Zq#j`Al~A=XbN%~j>1hiSNu5Otmj z-3db%5BP)v#r(B?nF6i>Z?LK&iBS>0dI|n%C<*sKY^KMfZ(e-1IUB@zQeGODx#5;O ze-vrLX}3cX$i!p|tYD&>?Pa_CA0*5!=tE=|jYn~WQvt2}REPBL7el8jL7w;K__HWx zFHz5g10M&Rc^+J_Zl7jfavYSJmYg=DCB&U~!%G1~-Ha7rLKnjPG0(+}$ev0%s=4SP zV+JeOnx7gI&INI<5cdrKI~;gxpF|!&ek{lftpH4sa9F&B=W&JwJfW-c1RSDFrCyCr zjuup5Ca+6ZmM;jb+h;{>OKg#^wqHwx9W5Imwi0V41D~52B4K?w3olx3`yI5rk;HGd zY)`*4?3P0Zv;|HPF1jQpNtf(TZM^&K?B?WlJz)XZVd3P}(l>VV=EZf$-NKqp>=O>B zj|hlTx0B>Iq7oD9Pj=Qffn0ee%A`BA~$qaR&z zt}=Mjg`uy@FpPopUeA|CVP9p;AjX10c0PC3j!+U8zLS$Hug41|K z9g5uc&$+RS{^8#-2|iuncN=H8FmrF82ut6W(0*=p?a1Wm#R@UUn;l2wFuA*BpaNb6 zGEz;;xQV+`t8Vi?=+F4YS5`hx-NqoBvU(#<7Sh+;+2XKPsJnSPdE3&}poGZ}OzF~y zNr+C)Zuui{?UN7Q?ZF6EYiINv30jI9FKxv{5@ct6)(=^}70bJ4in~%6SSwjAf_V1e&>+}8m$fOfZ_){%8u6H-0&;iPx0A^q89MhOHp=zR~z{eWo| z#rkf6m{PCjb2T57t-3AP9P9qL$7P{Hy<0Ry$jzSHG8hK!Ap9jB)snOeCvWyH2`Ezr zJN1d3;kTPfexSD@-PRPr)S)c9rt3p!jV))-OE9OsueLq zNXzz7x@QOScaVVAUc8G@-w#9B2d#; zra2A&JA7ahpx8^;)&ioT5<~Ky)r-~>wkKhp5a*U_wKFlQL&{mmDU?Mf7ZoTGH2QSh zor|7#DmDtV*H8n5G@%W_~dlFV#$Qu+GfXWf%AR+?U;xB z2%vOx_}n}}f>Qw~?&&bqJ~r}rPysqY=BKf6rnfrEYrx5Vv-`A0V~BdMXtnz^(dJFW&h+Ms$}|9b#$*}1 z!_O=cWEr*oMPz@vW8Iy#4Jd536YbOf-j~Pf6USY?&)_HpO6U`J4AQb)_2KDH&g>{||7Q`isx9QO3CxD!^fTL#TD?JoX(PLTa@Qc9FB zkapouvkAP+2llWY@3PYr>V7K}cY9NoI_zVV5U?({ui^`v9HVy1^lBGWZic@_ewa`6T6w0ZSTPgc@m$O znBaKuwngVomPGQ?gTCRAsijq1UlBO%d>3J(?{8;N_9?mm*Tu-r=eILE zf%H{kK_iC_+Esuf--pD31U>u~lKY=_N&1%hrO(cRSwitvv?V0P{x2kOJ)8^KL&v}P z1I@RYD~Qk{{Cs`wpSk3>GWfvL?JJ;5*?T_a9=-2P=ZwCSeBEjGf2*N;`OU|~edI&0 z>=hC5`@quedi7l}fN5eNI6!GW38`@)BiNOlnl$zZ?0#u>)D^t5%eYOyN*&EK`Fl=j z{%8I1%UWb~Gpq*XIr3L(#zK28KY}M?2aI-zrBSoPD@Zt znPq>*y`8OsFA_f)*%|_}_CkDqBRJsQ^=qw_knor5>w#mVZsHPgEIn^&?>saXS~m>2 zC7$ETtnWsBu&2>b+-3&qeG~|zK9hK4*8oT~2prl;tRcDb@$AZjq(R0%&G@pPeXetm zj4VXSWZ5L;kvwkwMPGNieJ)Tvx}6ySd#d}zWWUr#OD>Kl;POews+-Tpb^z-phj704 zk`_~Zt1Q?f&fX`%Cqk&WF-f44q@4=&-Umi{nz~T4Rg#-_@?iVU7ZHB@>wJ^kY9ooW zdsb*w z5^R!SB3JIQ6ZzgvD;yz1ahxnvRG8Z5VU!?9EvHD=vk@>Z zbsBcIfcly?$kjbQPW@jy;Dm%fnn3CgwTL@B%$M)bQ3+>0bQm0_OCu2`9 zS1VZJLtx(GF;P9W`wdAQE|-jDe-(eA3K<^eon;5nQNxKQ^3evDuKBQ0dqQ|Xfy+Y~ z_rXp`K7cPQVb^XJaFZJzwPMJP`x%9u&;HVE0hAEw1!ON|mE<5jxaV-UvMykUr`b1` zr~6G|#(wL4qU_r}t{#f}RJA<=!vgu)WFt1IfWIu~7S3{8e?NM)v*c**fp5n)$STe& z?c(&kKJ|Btq$`G(W;1wUFUj~w8(n>#key3RV(J%Joolg zP?ONp^Nc_HJ>F(SmwDbZy8LIB-7Ff2PFlWOsIj@Y%zG=s+Uw(+-3L2=D=wV#M9Kv9 zLhEPbKH`^DLAB!}BEW}Kf|6YNh*rU?s*rbM?h*pq4FLnZvXTUO%TS+J+Ks5JALdtQ zFAq4Tbyi+`#!-mZkJ+Ys+#fCXESIliIWN>R@QQ#qUtPH989RThJt^p=CkNpZd_8=< z8v5Z0lmC4@xhtPsKwZ4V{e<=#o9XeNIAvglZ+JQkU!oI&#c@BIrBQ;l_DgVn9OkRZzv`18qn_i9G!N@iRySxh;y z%Zp{RnA!)hXp|;W!+GHD)m*YC@y^A-Jo{NUXaxYU_kKVEu? z#3Lhrn#(HN*N(0JgzE~)^6j&e(OwwlFyaamE@5m zXYw@aRGl(zsaslqFMUjs>Scl@as5woxA-CAk)rq^8I!qPf&CA9WvFh49OZ!O_P7!^ z18dW!BY2HK3D+DC@san>uT(9m_r+!D&Qu27jv=2A-Bo z2wupgNxSdX-9yCSqV^BM&I>YK)ilM*~Dz@`X@2*5|-hB)_xm{apvxr*shfW zak(iL54s@=_*8BrjY?ITZU#z?*G0>GT1PkAW=FP2Zd)+^cjLHLprd)C*Psz6hv04g{pa@M>P)UV9O5)*_Fj6T-p>QS3HZHZZ%!SG-4qC9l`wnDM2non zA?Sq1c;<+|`b7VADJ(Q2L2^HbpULJX4B+Cgq)UiBX_d!PKwlqrH0@k07o(#MrdNI7 z^uMW>ZMN)9zElE@A&l#B}RcjsTNa2pnPhWTN7<;7%|g<(&d>oXLJ=j== zi_EjoAn-bw5^otHIFY42w#!zVUX(%u=6h5*b0?8*D=YW=sPhcP(PJt@agWlPTk(JE$oiEC#?JbV(-o4n%b5`VT`CaB8oB-3}IAeWhTL( z45AE*ih`hqK?TDkV*)`zP$5JG1Y`)v5fK>$8Dt2E1Z4^c%1l6pFo!@wAmfYYc7J_O zpL_fEuiy9jz3yNCu~&9#t*TY4c2%v~tM*=7wcByx82c%*N>^jf+{ntkb~(jka6ll8 z5x7#|mvoWo<7oPi8lU|)|6vL+ZjzZ?qSYS8H4i*?XY%Q*?+< zE@EX!1sm$PK7F^ZEu>PqefeR(RCtAl#r}<}HD_Di*BZtcy6g3t$4!ZHm4z=@CxpI+ zT$*;$FV?qgHh=A;d09W^LTE=#`ALkOC_a9lM~QC1G3k$3>O|A`xJl{J*fiQs_Xd0T z-An$a7tQ;-TG*YA4}jJM373?%mD!(5OS}WW*Qh=H`lF|@Li|r)oEuzbUHUrNanz)f zFrXu!)(nq#^m{v6!fTa*#M^uHgXYYrTDXqh<oP@P|T>$}wS&$+x}^AJa@z+>HwN?DIWrK%6<+iFtWuNT(r-F8=}9|`i0 z35|c3w0z_2%X!g99N-T1i42aWX77oF5269~B)L}``GjThs@;gC_a(%P`8gSSq0wOK zmJDE4e7)=QID&=|X2kNXbbqIot>x0l&sl$SajEI!x3b0#xm=y_9J#uiR@QXN`H~PDA{dICE!GKvr(PVxcGYJfE}O)8d|;J0B1T=iObJv2nR! zI@LtD4S->r7cB2h&P5SU+&5IrmW1~EWl0@e(g|+8npI=nV)=SbC~R%7jb?IF`OJ?! zxz2a>;_kObbB@1+V}RDg=CLuyV(HQGMU&XIjA0M=+G_VYtF5oI-d+sT>A&)msx_3= zPGf~$UiF~QI$mhJ2u|7daigj12TpC6mH)CaS9r8Ve;Ii3>lI6hg>|3Z(YlAdZ1i+y zAhFw~-${8Y^^I(gxs!T0rEL6qaaXl7oBZ~6yjpRbN2a~mwmtEUZWZp$`4j=z*uoGutk$~{L<;Pt`m>sg&j*} zFJAXr3%ooD$P<4k>-lc3pOy=GX#KA2K-4ghcItT)XUw-kq3p-?j;n2Vouq43JLt0E z(TQEwwLQ>to6tLYTCK<7PL+n|?6JaG6KU5{Tz=SiuhZ)%TJHzCEhx* zs69J%D;Ep)sg#uJ$80I?=ly1u>6@>4IJ;CKA#-~{*}iK&d%S?+Q&Ucc9oibc0p3!l zACTd;hxgfiz%HWHpBBN(tJ7b_VjklM>Dc-a$@Jc)ri&7rPl&g?cjrj++PA%=MsggQ zt-%tSb#hk-$icRIlJL}i%h*;)$j#68kuw^am9NrWzP5%O(Y&bt1dw(m*v3Q~V(RYMtczwO&K0Rv9&BIjyYJ1C(fch}tgF|S}k{mk>XB?PxE z#2y!QvFh2g1Y_xUVQ`+ww6o`1c;puRCG$BUxmUp_@7fGv>P^#M(dlSaovK#B&MWuD zOUkb|x0cD~dal+vF3Sk+$cO7l+B9N+#t}mscDvZ`lQ*wvUoiHeZ^=2!9hAD8=Cl=4 zj~g(lq;{3>S4&a*+I^?Qbnjh1ajrI_Vi7k~O>m1_DCrV4akHH{JVHLJ<0%r6qz0$e zh7hujC%>7QpdQNgtSJmMB1&mX&r9#zdqwf!`Pr81Bi>%x>UJSZxj*fTs|LTQ#ke5tWud6l=|4BSvizjudz9I3td2aiaZK^? zA*RxEy~s*@@(BKH%t9u&EPi3ZzX;pPb^reOyTGevabA-LGDbfSFqS2F;*2}NMVu08`L-JtvFt6_HCH9R*DEywltA5TXNYCIDURVIoby)=^mpA38mtK;)4+uz2>1SGT?QX||5SrJ@Xe6E{*%xuS#_J3s7v3;Heeu+(;jHsY=mp)xIX zaxn^CGnnfcxunfVdU5Z0B;4CL@9XIg5ovcmc`e-K=S5~7H!$1{7;j{3wWwdjy}ROO zwaWE8MG4c3HAu>Iy#B8DA=^R+{Zo3gnk)S(@9hCrw+2AVJM4eDb3cxHJLt|hgq-t# zd$wbn3Ox4AwBr5Ax75s_w1>^J-i3`Xb^0cE)lOw}$r{3x0zT=Y z9x0JLsby-2?`@S$)GIfiypYLUcz?Nj?@y20%=Z>*mkY6XlSAxDwcpZ5?douA!y~Rp z=hn@-4r!TO;-QmU1v?6Qvm08SUyfcVaW(D=4cMKlEY&_hkQ7F+BQ~oEj~<@)JvP&X ziy78kexmPzg+~ygC;BF@Fb=EN1gORTJT1y z%xz$;_{SD4_hSK1DidxqYA&DSJ!&=OGP@E2Yrb+m6U-~LcTQd})~+g)-ksia&s?WQ z`CW||_XAKyKB^{aV_@$J`$ViaPJ_3agCTCm-19E=)v;0GJoc&VnSZlT6EUv6C88tz zRO=as$2DSe`EfR(_N}rXAIjHg#Z13y_8-*PbGiL)g`|sc{LW`#>r8m^Et_ho+;!bI zA6OSPFK1O|iS5WjET;Gl67G$G$E$6!J6~w!i!h$i#*Xbfa$Vx!M(UMCv=m`)2O)oU zMqGD2o&Q=g{QK@LbE(5(FF905*KabXT@VkPOKP*O;k>;ELtiZ1br92vmi5}BK(wwH z0}5Lp8{lwv+!|QNWB*{|usc~gA!Jb-2l08~D+3X4i!-zMQKRxPUS{2QUb4MOt8&@u z-e8^>@Jw%woZDhoaTfdB1z1-Bv3lJ=!&1G-U8uR{Aw3ljMl72pZjVrbr+)!kO3^hB zP1*TKS?#{?zQbKFw=kkqs=d8(e}=jA>#H4;@%DqMhMy|SQk*QDZ7##Fi5|^`TOUx- zYx(+0c@Pu&_DNB}^m_)!iZJ#5nIU9OcRM#ljR&O*zfbx~kNABM^JJ$73d@>BBzC%Xn>18j7?&(Q2VfEoc;we&^kqNx$L1yfElJ#Y*c&h+^%mz zZ2g&6BlUIKTzNLnT;awLUMn)Fa8o?6FkKD4-S0$K8G|x48`!tgX3(E=FIFz6Pdt5U z%ZI&7TLs?22M0QyW;wRZyezjbeLJuoIC8OBGo!Z&uX-6u5a;1!%%MKgzd7bcP7e&- zSIV0pC*Os>r}hK&;w4y@GK(r* z5n01^t|dr~^qAg~eyVqX(`5TDSVhHs?b24y#2dG?%745k6z*tEy^;#-9mon%&yw<` z*Fok;d5N>{9AA6DS84>p6en-4;RtQFyYl1DkW;DbMdPT1_Zyvt2>05W z5ka_3WqcxO3aVwS@GMow_Db!i|a(4`e8z<_gAcRfp~(urwO}8(r$>J_G<~cV&_q3Jcrw;OA=>Z zL<}Xz-@;5X5=SFY9c-Ss^0its%etl48V_in@&<{xl#%&8gBQ8#k?S=h7PoxD>68yYl^ zpQ#JK$SYDky#8h2umI=t>Et!SzGb-7_nM8n86{-HS6XT&IWq(PU%JoSjGuS>^aw7b z_;a)=Tfa;zXt{WeIq!dis8o0CV^fKVx#{*J-%ASOOTwJ5W+v0AX9(QqBbsl%2aqbQ zfhUTu__ZX9-?(N>@ZN+7o8E@=RVMD&$R&Zv$&Lw3Z`yu1)p z9fU=rdC#i?lDDPi%y`XoQ+}N9o$K*)o(=twdu26;JQLb=r9JFE5NvMKK0UcWe3-iS zF*m;cn&N=?^zx@G@%}Zhn?_dr@`2oaR}!qw5)Sz8W}lgSpQMWP_Ug58B=2l8=kEaY zdVS1}>^VlTs_H~u`R<2|8jPj2A2Z(6zoJZDYwb?FoX7!t45%S0-dw3HOc@RLQG7vj zF4LMncZ%y@AAE3qS@ubWQTT^;l$d|T@zR|&;?9-3@!qu03TJcmuVR8VYsY|5)>7?n z6$#e!dkGKjy{qn0?WxW>;(1F8yh~XB$-vqRIe0*6K>vqH*dpRF-BHi~y6jEw*y;O& ztJYYSF7^;=XkQX z`%O+lJZMuZNykIOEM)jew&D2ehRU7yw~yyq-H>%VVE9|+F1DGa!x z4K<;qDmF)xs_D;5pBNytbZbY;n)bdx#Jh`$N4T!%ok0kEGkQ9hyH`kc>OQ;}HWT5% zQEUCq)fW>@tuvhCzt+oy?G%i#a+Pj;+4&QHv|X zcf|-ZKLQ5bDfPWIhm8qmKlf4LPXZ8u3m4KW{eqv@h-N2@67yCTG$db7QXx`eyXi!_ zoLx$pNS2?j$6{|QCv-?zqHDf8zrJ&5X+SOoc??5uscZUP?FBcx66%;~(53=Ecn@*J zaLo8djql%d_QN7guQKlLnr_PE9ZE)ww4Ua5O2qx|10py2 zYCO+XCNOu;NVWE%CcZ_B$JH8ulKfyZrs%%DvfeM*;sL^Hz@ssnS?lpjqX1T%U;38A zQuj8*8%8Iy?vL!cQ8=rwxm7>^{yAL}#l2eTM!I?9MRtTQSNGIEyOw<{tgOs^`J|Wc zVEs-fvvrj0yD~pD?Muf5Z3f^P4!H8*aAP|}CaZe;^!rV}2$2W5aOY>Zi+yE5MLoy% z2Iwwd^=oJkEL5~U@L6k1>-?rZ@V%H6;07Yjz-4;iEY@T(|M9AWPW;w|ja;+UL6_DS zX&5KFc&WaQ&Yrevq@bc$D@R#l@%mV9(AcYWZJ1 za_-&D445>ls&tougI;T=d$EX&HI?ikFj<*v>}wQ!yjq%ZOwV=bqHk12nb<)OCLv>^ zuBmIl9c%r<{Oh4XMR@MWm5`adVlflwk)Blk25nY93?2v6ygBUhr88`!%(i(cQ=&5j zB4Mra<&}n>xG|~E4->EYIzg)NhPUJPB#!r(`bOB1=1dC!+%HBE-z{6gv#Akr*zhbmlt*dVZQU}^llHhN#*K|Q5q z+jt{;dXosG(SE{&lu-6nY`enN3jekUZwJC@?5ADcH1_!3J9iPICdJ%=K)wY1M1YeG z7b;ZSsGQjap+mW&BcXQ-4O{ikv3f@rF~QemrW|jDcL0N=z*}TomdR39j=&CUR=5ji(mB$<`iAWblucz}k?XEvPxx{{KR>K#Z39nhq zwQgQXK{jGiNg-lw6dGD%{o%8J%ALoQjr({Wy#)PiV;vp3zPN^>`y6-q(ahtn@ite0 z+&ND?^E(k;#`Wi|uqbuV;UHEf)r<{dUsJgQuaY}Z0EzD zliXL%riCdAa%6l`L`y_DK4kjt7~vd|{PH*NI<58n;nLI%NA0gF-R%fl{U7=DKP4>O zliE)b2&2_E#gPwh+(TvrRtGG$v_H}6YB*T|SJFlZoLHXH4cr*>zTsrR+e^j z8{5m73A&WKOHb)G;WtIk;!OHfd%@wex6k+^JiI4ASwAn^Q@BJOMnWQ3bME`~BA8@b zX!y!v`3`IrB!o@DHf*e}wL&?)Xy)8ytSgFhh&Qu|mPGSEle$=fk0}Wl=Wg_3sZ=z( zB^Qm}a3@B_rWA%M12cK6JxZ<-o90A6=gRUbob&R9<<=#1>t&H=xG$*}?In`PN-4Ex ze&CGtBc@37*RHNmGy2Nu=U$6B!%AX9aL-AP zRgy>bgC|EOTk!KGDe=69F!o$O&NyPDWzxbH7{QEvgKY>|{c0AvL7VgBt;fhWUB>V# zqyyNV(@OB$r7g0&dQ3^bBrn{eeL%j6;`d&SLklq&%t}mJpUetk<+v_TT-T0=hkmWX zw?KUGaPm^HyWODEBX;D;*AY`GKc2LjWT5cr)3P=VwXPZxym01}YiJN}OK!s^y@2Xk z7N*1P)#8b%BiRhy1{W_j1M{NHBD};jYOZuy;8tz1!1_mZnSPA|4mq57F?>w;(&Xhex)Wvu~=}s249ojJAyOO+ywit=$ zv6Wig;i)EEq?BvP2`=(t`#^IO<$kI~4@UmeCoRX&s0)(|7@|juk4(~7-EY0;Q`&Ki z#}pYOYW>07Y9B9Ibi^5gz14k7bJ4wLHc;n}9?E!j)n|p>?AX zw}6=8IM>5vU$o$v6Pr~j$$fPH*vR&RDJ=ucMs-X-%OQNA3q5d-%|ZP1(dO^F(A#Df z9(AzYEF5U7w(cPYXZ5Z2rv_8H-%iI?3@@nKy(7ygOfuiBO}!equztY*!n?B0*+3;3 zJoj;&X6xPY;@aAZ1z@ldnmD|oRT4Rztk_kn0ovM0+<3AYzX=b}uto0i=C+X=o5Adlh)*Cgm z^?Tv17UX@;Q(70Y#0R9FUCJj<+0-$Nsbk3pJF;Z>`_+!>R}Bci&1tyDA+M46W=3sZ z*v)SGj_1EX*m^^cj-OOYF_NZmpkrrXrIZ24+J#nmIU7x3J%@@W^J-ge0>!W zDh?sGo`C|LJ~OMl8I`sbXo<-NMowCS$Dq?|I`ou`(V#TBd82n@m|C*h=?d5DGXagb zl2+-@sU%NVGyv-CchZY8ew86<=(Xk||F)M!YS=vz+kbZ)Qg@|ujT6Cc9=&Zl4 z%$R|qY2PWb(7t{kw?2#=EX|n2!zsM=px+&V316`ME4J+n^fMOMte-r5I8H_;1HmdYB~uITbex#0$z}y^ z6o=+cCF)62i(E@2c{21dv^AEoLAi^ncj0*6mOy@Hkio?z+lN@vtOy*Ol-x5WMT$8& zpdWmJ@05SaE^|Ia)Vr}6)0eSOo}?Fh+M;^f)H?>cdX)E$Ms?*q`Db^8ywX09+epFG zPgkxG@XWOrq%pPwa#VQ!L6F9~rOlth-Vt0ih7gH7iOq7jIYw&T&hJXqq+Yb0qx?mj#dsV(K0;#{D*nD!VX^!Y}leIjol5l@KpSsRl`0!jYrM|D#Dxstj4PV`cw$8bWi(MCA zlo~oox_TtdIzwZr*2SqZLv6^%_35>j=&Zf2#O?UYU_7#j87_ORp5;oaQoc-`>v=dgA#wv zg-P5PN@9zy@Rk$C5N#;zCf!SQZomhJ$Ij5VJ^4-gMQDS#iht-0U;;Gc1&B&W=Cl#eNN(_-aOH&}Jz!n_~~-^)|ysxHBQV$xSdPC3)?k z3@7s)Cv@d7gHhc=SI}#p4}sw!Hz+H}%gZ5idDHAnrq4L1Q@hUK`zP!|q8HD*0)Moh zY(0lB&LSyIEI+YzI>CCQMur~3&T5RKk}b68YjkoCGxouyoH1N>CZ!NlwFiHWhlxAf zvf~ol?(y21J`8#A5ivDkGM8w1!3dAX4b=fBm>#JGSK}6(-hm{i+0%D6^zoTM?-+Np zqa`=&7ukZw{5|LurEi`e-sC?Z(bN6hnx=c1GX!8#20wMBWY<&0;*5u1wN^Lng9!C- zy>Mv5Q(^LuF1P%CQV}hf&~+*A$M6-R5Zm{hhG$|Nmr+1Y9}6gYBmTYuYGGxBYo52n zG`<;yR!x5Us7^_zJLpZfhUDJ{QGd(Anes~8n{pX?s`DQXikG}nQioRcz=#>u6{+8l z_w~^=b#5Q&W1qcFrx)Z-=iz|Z-(Kj*zg!P4YTK0kvVrP)asKMM;&3tUCU2(%4*a|U z?HHS-M)aaccGof6DR%`8!f6b_lJnSEpQ>F$H+*Qxdw*CjTstXeePQuz&JPERC^!jP z%N<-JCh*qP(N$IS=0Q`$mvvOen@1F8D7po|!_~??sbnK03s=J#sxi)&Kh1u=No0J*5Y+i8fF}dqq~U>F=qeHQw6yxL76@YCgwX zrMGchX${QRj_dT8;`IP^a)>SPC}$XjJgB8`CD9HTEqbF{Bt?)f`+bvwU)tYxs*GYjJ@H z8zt$48k@5yTst7!JhyH*@@-lTg0Bg79cf@^s=D+7c$*TCz-C$4RXEpHx@c5;M9|{*ZFYhvsKi~TNDL=;A{X5Buc%U&G zxxxOrK}kQhrGY!eIw77RW!gaX2#t`Rb|2glc9O>O$fEGxu4jtNHCUN(j`{?pFqXo$ zej0YFEV7(oDt~hn2@Y@-Z1;1+J&|AEK-jg_Nve zhL}4Gq$ejlT#fH)*xnC55=dI>Ln+xvvQ%%no~|2w`_l9Y#Y zgwBXCtfq>xPPOT|GK>;CDWTt%6Td(n*I6%7E%YCEAecKct+F@^lO@bIG#_Ccl>5td z03j0f8_h~M6ZX6oel9liN&``+&eiiJFo+RV6`E%-lQBY7dzUiy@Hm=SzL}|CTi1M! zdhPBnft0YLSHr`|K(gT4U}8y_B3pu}LE;LsXscv{dS@HefzL>s%le!<&c!8&`ktsx z>e0hn-LJ>@Vu2>%xvNaN1*zoVgR;aAnGby+Us&T!+KIlM9c!3taWDHRtT@Zj;Ga`v z=}W?QDoNh;l<;!Y=mXY=VJ;$p_nw32(u0NZi%_IGMFTqU0G+~nVfMMblPj!1;JPkX zau~xK%#9E0ypQ}SP-Xn6#ui9Ox5fq}L@tyaNxxLJAP`(PDMJZqH5#KP{LktC$l-sK z;D3zaKhqPC^tin^F0IKHXuPp8xx+TAA!KC&9lAoqGw=57a5ZMW=Pg-qC{MV|_EuMu zFy`Fa4&G)HkkYV;W|!bN^HrNGp7>1?o~;lu!$Waq=i=CdYfu!myKc+{Lcub-Hn9?Jqp}tD>@+YxYJeS#$;)>r$C!<-t(1d3840Upc z2D4<7Jt>C8;X^iRTWwj`x{&qnt+rVatDF7&%*sX;r-V1xbD1A;p}9R2-g1g9mqUGn z=QO8aq1Rkmppea<)Lyb}mPvSm7~{+@y1R0IF#&%aX!8~x0UH&D)86b=%yLTwe!G7hJXH}h7< zacKJRD`9>(>CPL*GD(Lx^b~%yLZkJPg}G}{(6R6}-i9lb_1$M}f*7%eM{ioQJ9wMS zo?fxdax!}3XWV7ZZ=aie*uQ=DX<#Yw7BnI_<=n)L93p=)KA+be%x)y{nEiN56bo;R zTn}wv>Pc!WOrUw2Jk*9Tk5=x=nJ42N(KYA|PVO69Aew6uQSZ~BbAYr^^p*d+`CMal>G+YGp`y_t_s|6p*#Susxni7TC&tCyu z!cWJw;v(Y3xYRXIT>Zw+9z3)TH6l!(;4jRfX%sA!Gs}*_LN-_1`;iH`=+grmB@R#o z*y*5>0R)x7g{t+9unF%x&R;>ed>@C^~%pFNlP`Z>iQ0--G4c|4uvLZ?vXF8g%; zs|;2;-4nWmW{|p%$)eV$)>?(RGvy^&;nZN^h9Y+qb+H%Qz-}f*(8hr{lMv=&OH?Zk z63Vv*4G5a?3x-Jy9M8BP(eFr2kgc`>!`O@495}|LTrmCO-x!J_2c_inavFgaq~Oa1maTf z7@BsQk}1Ar=rx@iI_%A>g4!;Oony*)eM5p738-I7y??&P>?Ctm__;V7t!Ye}N5@|- z`^di=<0no2lG|9TpD!G=xEQuc(rdj~a)K3Xv?HIzW{lx@V~>wq^U0WA#RFga{nY9z zVLpM*A-ITCPh2BV{o;5v`;80lX$r5R9D*9@DnwP=6rq&+bA_IK?VtP- zDpwk(}AoS&@?ok8;hLP7;rcOg+wc4EiE!aN+dR zqyCC3>)m zd62mKpIRNxc<0-#lPfCm`p=jc&qlty{;CIBhw^FjxitGH@#kAZ5AFDuuK%Utn@;ZW zkV=T~lX(8wpQ#p!Zyo;|JpMicMT^6Hf&FDHe;~*wvv=~)KF2PR-P#2i5*MG^Pj}=$ zh*$MAKjh|Mpi>~1`{cTKbX3;gs%cBJKW$KL$mi#;n)87VCr|7d(7!qPbnlLqY)r21 zo?(4NbK_o#i(T=mU-JDu9JKZc5~D!5{HELf1$fdtStoawc7c9OivH~1tofVl{S}e( zTB3xL#DD7JpUM3rBtAR*rZG2+T9?R;G7|2d<`UX%UGds(NtqJvUzCk>+Me1gR!cVW z3I72WOq1MvV7& zOTtM^e^C~57X79Ae}F-C+Md}*cqGZ^vOAP5%s12Q6aIuO=G^%2#miXRlX`3X?AAO< z{r<+U+oeT&_h$GsMsNLVHvU1EM-|O~+1j7k{|9(dihlZW0XCCzxj#GdP0d@RSUt4H-|X{0z~$b6w2Tg8Pn~D>^p5-| z@dcjd7u_68bP7PZ^J(^nBra~d@Y^rC{4-*F!I%6n4~Ns{-*s|TwF~(E;O`;*-t?+w zX525Q@ozTz+w>CNdj1l@-(csDOpsf(PIgK7pM4gPAe}gsp+G)vu+JK9{Gt50Vmi(KaeSLj?g5Ep zPjdxrRbu>^zou}-?)xvN@dwzyQ2qPJ_Z+VO(&WEG_3u+ST5$T;#risQ@QH09B!~T( zd2p9qp=plb2`{@}zW;Ax?oC#EQQ0+gSi2xo;`>wkrH=fV_&c8FC)^y2bPA5-zRbQJ z!yo(Ks2NJL-z)LGEB?-x{2&hpGxHBRxsrb%vaQAB*Jbh^i{w%S6<`$nUqb;{H(NsYL=^0szuO5xi3 zZlk01$LQ~WAhQRMr9;T<5oGBYGJ70Z`V*Nwg)E&xX3ru^=aJcq$kJtG_6o9e6`4&# zmaZeSH;|=FWHuXF%0*@iph^L#Y(Z4%c2u?ys&p4Bdk?B~A1Zr4s`LOVTNG7#7?mxK zDwRZKOQT9u|ROwYzwlk{K1(kgr zReBSZ?S?A#KxN-TmEJ*Rd!tGbsBB+UsXr?F9;!45l^udA4MSz4P^A&5>_}AU15|c2 zsx$_b{RpM$wNodrL#{JbH%cEqzh^amPDkiCBANh}G&X)laFBG9?7hAJ&e^vo3z`ZQ zexzQFa)yWPeSlHXwbacUlp9LD6Xgy6xc3uAMbA<%Z&Yq9H7p7RXYK_gsOVej=S|4{ zOnnj+4>#NgOHeVeG{~Eln@N2cl>rai_aH&V(9$q(PHsN6AgU1lao?u|6(dWdyd}Bi z)Q?ehaOOS$R>j!TIFBl~n%WlC0XGzeVO2~lP4eh+>#0LgBk(Zc2Urz|B_wZCj+r_i zwFv(x{0XZ9wS?Y+3T~L?Lj~Dpvrs{SQ_7}-f~Rho3JRUdHx=A->fTG3o0`I-$Ol4d zj|8HGPCSZ?dSr9s^vytcq3_1q!Y_q+<)Dg}-=6e6xw2alRB;3#4eC1rkOk3>0OUZT zAOHw-90UM^96$g?&^-`98I%M9sDdg$fMcLO5I`M70|7KaqVfQ3&~bUdagc*NKo@jR z9-t3Ok_Q-qD&zsipgws31Vobun1V#X02t^v7;p;Y00x+Y?tuZ8pd>Kh45$JOI1B0n z18hJvFu)EZssK0-I<5e4068cCE`jbT031O{3V^Gi3I%{Os80dl0-`AZu7gAs0XIR% z6#;G_2StDf=$<0r7AQ#(a0gVOc>dP?v=8HoFdvYB63h=IrvwWCnJU2oLD!XF!JtSb zSSTo435EnUDxJ@J3VxPh?hmq(%{uFEXa)PKqF|+v_90Rk_863{42uIbD#I|Kab*}5 zB%lI&29i^OC4x*#QCK}_{3r|$5;z8H0?8eNHG@o#!CFAqkHK0&k;h3k=_y3?~xf{mw@odrLizU3_V z<#fKYV8`iMXTk2%%GU(@PV;waJwU#l@i@aLNoOB4QzvIFsq43{j;p0&IY^O4u^g00 zw^&ZN$hcTegvg3m4q8M&JSS3Qzj)4l5jpXk2O^r{IZ+~};yKYGcH%h?MXrnI#EAHc z=fsLcisw8Mc`BasSR`9K=ZQ$Ecut&1qj*ldNVj+nMr2$(CqZOIJO?WxAd&M_WWPkt zGZ8t7oaZ8%5;=(?rV=?xB6bov$s*S!a$bn|O5~)7L`vkOiaeFbNfXJI$ayJ39+L7< z8;`f^5}J+g?h;yww<8K|#CH>g1Tc2pLPD7CZlV1cyB;BNOm~lv9LDaOkTRzGn~)~P zu2)DO)7>j%im~exvcz=v3E5%nz6&{Gy1xru$80#~G%D;0zyQQlIxg-CNCOBHAMOfx z4M3f|Y~+g2UaSCB=nyuNYT0qf-1+A5 zesRBNJ`ygiu+X^-{g>3_0H-+s0N1DQR zg0oFwyTOg7u)W}MQ3z)&Qz;b3V9k8j{ zdGfB4>LM9lf;E=D#RsOe`5!m(RTrI_sBw6DWw$9<^b`OFK7I;t3hZzSU=F@_3SbFN zIt4fbt~doa3+_7wumRIf0qnq{rvc}|$4>(szz(MYm%#T<102CgrvX>N6{i8t;J(uU z7clKK;5t~;9B>nS+#KKrHt5fn_nM0r?6y?K2=-at!Uzsn=3@j$EN3x-V~a z5(H;0^AiLYEoT!1S1gsWf;7upSiud;e5@ebauzEnV5R(2P|)huQ$ZoC{HKC@tY)7I z?zd8YCMark>zSaqRsJ(UX{*_1f^t^M&jrC&x1I|sTjf6&JZ3ffTu{?WIZ^Pq)vZK9 zeXIOLL1U}gL_t$4t70nC8!*#Kt2Nj89aaD@$E5!}ar zfD%lz0jz*UZ2_y`aJRUde1KD9j*Fv_Rm0VLaSOB> zJ9Kd$&YDhaUTA7z27G{|ED-5T=Thk9#elC|=W^m{mh%T;sn-?uyZ%(PjvU;+d*gPl zsP38nriJ_R2Q9tf@rjyI!iZ0l6rw{$uX>s=5)hS+(DZtUG|=+8k(;D@=D%=pZ#k|R z<2Tc#KO*V5jA`!ln_1Q$k@Z~0Ht+SH*=H~U@?3t_e9nJH-C#t~b2+gY<3D3#FrrHK zD3M0I2q<^jw-3r_pNKICXq85!=`cDb@(enzBGPpjof8Cu0J1b9Q-{$tF=G(GkVfEi z7{rO~h5T105KUvt`4JT!rU-GNe1y+hw*K~!!STk2JuFR(K``i7+@iT zc&o$co5(W^xGaNsr^EO@K`;#PltC2fFn&zT7zPB(AWC!?{S(`b0%BzlWjc(32}PrT z6d6Rh4r6e_+$i9U4B~?hV`##|D4tqPhKNgg!S5!60`*wmCsqPl4G~xL7=@F0fgM)|lwV`c z|5q%?E-n8X+|z6N!FuT#wdee6zt)hZMtq;gl3yeS5Umh~Uke{%&JQX7KV5uFMaJ_3 z|Noz70{HTw_RgDRMA?XX(6U(jUbs(Ce9Htmt8C=>iz~bTG#&k|De6B5b~V&T;6KZr zf7JBr=pnOeO#*7kH6$H9RF)BhsD=v-K`=Z1?`X=m3_**SdDs(LdEO06qRj3r|(%w%2&Sx5?{ zh>jV%kC{BIvc2)+)8Ca2sE#4S*Smsl^FssqFsX~zb9dc!xQ|h-u;2FI3}{FR`#dZ= zI>Hc&#RWrt7GY=Vu_0@@p$z#k?jydFzIbn}`~W{J_%9@awmJ<7pR>A@vjnHaPUQb$ z?>nHH+Lpf|Mg^4;RGQKR1w_OSSGsginiK&e(nO6kO$;RkY>1SAG!a5jiVZ~22-1S0 zpjbc%0hAEwO$ebSK*|?<-+k}acVD~j{qJ38owLT-GkbQKJ@eb!wU05GG3Yxz;ajEO zS!E~_WWSGYU+a4mb6Xdmz z?p2?8<9+1~#=aBN1}CbLI;)apg8cT;{pvF_-dAQY&YzgZoT$3hS(PgjbZQ^{RDI^n z_mwvpPftwOoTw`4ta>aH6tIsTP@kFkzA}?BcVe1*qUw2PRh>*w$Ub^VeP-7C$}Gk{ z|7ipNs*jyjtujG~eRM>9CjNaTo^jrP8slHp(^=Ig6BM?O9#)^3{k}4r@zj61#=nZu zSv4&agf{TQstwMk za<`sw%8v(xfJ1;_01^Q}1^^X-9Rb)4f!z?;4S#JnwC=c@7Y`7?2S6MEX#hY0fP?@N z0!RoT;U5YKPd{>q-*^AH`QA@=S8QlInJat7DK{Pv0uBL!0Z0S@830rSb_8HI1a?DU zH~h8T@cJ^H)26$4j`Qh$+n(H^IHO7A1@A*Mh!XWA0FA_(Cqz|gUM7E-C?;_lt zjA=yOexbT`I%XB&?mN|Q!q+U*eS}{m^QH!b*S|p2Sf)D(zeMg%#f+kEH>%#Bj@e4M z`$6?S>h^xZUxfW@2T-?viPWo}o{o_v+P`KNkNgumcMCy{ZQ)P+__sVVg+9R;_u8}d9ZvD_^-+S@&B64FjRc*`LeMROs+B1ptv({JE8s4 z-R%uJnF|99+4Rhgpx*GcWtm3;vHaIP5PLKbdn`~ar)*{O=Vim=f-`dh;SrIgRqW5p zQoesX#s$C537n4*sML?7%GU;oeR-=J8X-W^kEF>f28xN7t-Shq*~@Xk-Z_EUZ!cnA zJV?wwNG!2zzAa$nh-_=gzJ>uc*u|qB z{hw6Q_o>_DrsO*}wruk|m0r5%pgJsYn}yq_f^G4K(b(;wA^i)E@@jdY%){xKQ^|U z_B-{o^ofU??89xEuAe?#);sZ(cD^2hn?Uk$bgxTZgSVDq^xfBb&&(A5F8Z|pDDsK|cAAIsnB|D@9A@VTS5 z1e?#Ae|;OmC5|POKB4~dD=IW$4u0!E+4+3G+L2FzZa;e&h|todx%`{&((o%TVe`ie19Ia;mKIedAYXj|E%?|_BCenc{YE1q2)aKRAtQ* zsOP7LS^@3pPyb2r*yhggL+6nWPdEBiZ{yGIUuompK$uS?|}NB|5?{hpHTnzZ`x)`#Y^`a zM>N(V^J9qnLR_=TAJ}%Ks!oiqg+=uq(OB2uep`nk`>I4zJ%MN)Qs{Thr|9rT*e$+l zeS`ab9f~_&rIA2%4=Ehrt6Vq2p7T|b4eq5nl$(5&W&-gv-zwIp$YUd{hp&=qaIesz zeB!IL5{Th^E56EeBTTUGh=z27`wMJ{w_5e3eXtdn3OuzG`;@ zkw{DJoj4tWd{tg~x=Xt;WbD?&sMksqvoL zHaKzZ9J1|M8>3XYD<0G|gc;Xry>0D8{iweb0JhVr;-mBDTqqdDsTtgt+ zDk{%Xq16))k0V`6Dm6N(ZC@v@oku!XR+dts)QN||NY`hT8q^}9u3Id!)P?S=Nqgf? zN+w3C!j{`nB(e+l>bb>+-bSt;u9TR8iYz=7Lc3l*=CSDhHko)s6}H!oayGkgR?ls9 zc&W>buja@b_ts?MT~(N~9i=q85TWn3I-=BN&R28%jr-?hV!0{|VMm$bTksXAQWuu5 zCgY8JUo!EvDlFBGvWrlNFmPLqE_GS()tr6fK9)@EP=yiseGm#~`3g*_3)@$7-s;(7 zLm15S0j4yK<6E@&#+{W+oKl62*ik+aQgq=V#oH++)Mv4}p(Rh}h+s9Ch&^RfPNB1r z+uO)eilCn+xY1oWg}6=)rfE;{&M9o;E22s%%ltHj8r>l&L=`pIA$!VgzJ;;d+Y6K-CpB0RzmHpmZG6S$Qp##SP3cDWohd|Lwev%96FDF8dn$6xk(84G&cv+~{tRLQGVHZFZnI-Yz8a z6)~kQYyC7;8{O?vh&gI7GY86bzJ-}v?6p#t^?sTfjqa}NNXdA<(`;f(DUyDg+Z)}X zDa0x@7}|jX$}L=P?q(BLN|E-{+|}srmqL7}2E#j04&)Z5nlp6Wk^W;7A=L1@y2r}+ z4*jAAd&O@*w{Wk8TWmt9i=3aPZlgOgg*d4O8*resI;`4}av{HPy^WhqS}A3lpQd}GJ28cL zMjb|Vq`cr;@D=H$lT*E%Kc5k!>{Ogqx*{#;uUomh>xQ;2Emu$_k~rwa-P_==2D$vu8WCmY>cQiuiWFo(mG`+SQ-Zeuqc=>oI( zoEl{^^Gdir`O~NNse;1ULvE|HN?mmPG(#HQ`%;Ln)nTcJDZB0zB5d7O<4axk`)MK? z-N#ah9qKURVanM%g|mD`cBzY=pJrI2`*aF%SRK}Om{Q8Ouyb2YD0MOL(?mDAFQyP# z>af|vlqtRiUy)PlV&tb8)#$z~l_<0ow%&=d>uw>!-fi`*QkMgMnwJ~hSEmxCx5D;1 zQO@4&r{0XJG+Tyzuwo)jq^oGd5q{RMPbF^K3WGXPUfeBAb#SxEEv1k#E6Q+$oi`^DD}3bT>#PVz$Br zohhsC7jAZPgWWBabnq*>ZC&ZH|ZD<|iUyN_0t+?xHJI9-?7o@>7Av{hVvBD>o-0VVmfZX5B^Cj(RLo zD_&P#PC{xZ)6>kmi{h(1Hinh$p9&1?r!Ks%Tz(y?p+Zly>@G^G@=ysYvz`jf=%?;~ zQ+b)M*i28e?k-BL^4Jzu<~|kpte+bGrgC{QQez7}?O=CNMwQ2&u(H!rft~%-3cf}1 zZ!eQ3Qg>@%5&AC+d$vr7g;_tzcPye7xjL$yQuBXl&3_n)Z4;DgnkI_mF^?&m;RKE^ z@-gAR9H?#dxwmfrNYHTtA4FGH%TH)W91%m5QS1KuL*?f!m-m$l#rd`)wnsYtl=LHYLd>tVeE z`*A6@{Al?PI|7l{EWh)Kh3GiMG(zR0yu%X<$#IBzMBYdFkS7-M;}FXTl@|G=Cl*`A zWvn9#THHrp=j&fM1s%88GoExP;!>;p$P){_@g%zlFj-!hXkj{@K zaQy~OZBRM3lOKrx-Pl-)#)mq$8*pD4yHQ<^7VPo|(EnCk{`WHh=<%O}93r_WKTs0^ zyXRp;bswcTmZenyqCbHrm^+uvNQ1%;3hLKK%AYgB8Rs;pH2bU@-nn{4dPVrbW%~6| z^3z7RsGJ7VW*?Q|ooi>LSA`#3u3vv4`o3Yr8{-6T zHC$@;F&*BiJR>a;eo$Dy{*wH;132Sb4SCHzPQyDjW~3#<53bg)zbrp}02g(u;bpUr z@9@suGt#p6fgl5N>pzS#xAL!~$-19Dc@JO!CjgNEWWn!|1^4m`(r&t+K79{h04D&E z0A#`Mkp=nrX=(26r%&Aj7{CcYBmi0Pdt^aT{?oKi?x$h*00wXZ5D7pQ{2p15o1c)j z-+lDtJ%9n607L?i1;0lYl-=X5=+2gO|vqd$U{5{bbedVB)5FSE~%{?YRJ9RhRqFQfsz@a!H96^4NdrWS2^lrOF zwZZ7DLoqHKP0q~i-Z(pWSKFdG*D%lm+m*mAm*a%!F7%zBX&4{Ieg3FW_J2dcAK1V2 zvbut?vkyGc%<9TYI+{LFCsj=6e$|@bPFNU1+o=srhG88G{a&9dFzh#+J58QdywM56 zF3wcw6qZ|T$ogov*eQmYqH%i^YPyE7m^t>VG4AsR*m+hTjX0!9KnFB=aQn;X^9np} zgJEdL^jo|EYj|Kf8jpD^ikV0L^i3sk~cJ@ zfSvlti6AXZGYzV-oX$I>?y1oxg|ccU9^02rppimDA)Yi6uUgA9J$eH{uadjanfYqw z>#JBX?#LS{G5z^YRRSuR&IsPWu;-j11|6DqLMvYv z(!2CIB+lEK@u|)S?M;T&m5U#=j!{W$1vDp!z^s(#r5%&MIpD9Lk7Z07E`AxfqhN^T zH1AcYVm=xVsyQl&R6wyla;OB<_Y!Uv1fO6=={P2mm}s$ zVvf!((s*3MZr<0~w{|>^A&S++r1NGx4W40n(FCpABo-2BNWyZyzHOUR#fCHAM$2WB zxRd@!5}M?xJ>gSn$c@8>@utN*HwJLuGPA4Ljg>FzTDcRiR0-AGWLNA$x0V7;8gmwl zWiIpt(A${DXryq?*Vs1RR4X0L>h)JZ&v7|S%)+DjAOd!wm)rJ0mBiu(VODjgau@1w zbkC}V$$?l`&l+_!;}B`8whqY~Xr*^^&Z3!t*zww_LG+?_begu--IeK0O!QQ6Z+Tyc zV@KwO+2BW?8lIv$B!fEBf;wG-Iw?W(^3!PnW#?#^j?n7y82?wHU7=~H$BaTZy6AJ- zl*uGgVgEz}f7o+RyAb+eS&}=0+=u7#vKo5R{I22GJRH?8+5`J-pkg^wW<;y zj`hsB^Q&>_o2t@%#4THbDs8xgDJ})&8}X^%f z79rc_dpVp93Xa(vt+@&QzaZGI|EMCmS5RnTjGi@p^1w;i0P*DklNV>6d8GQsGqwcW zKGJqFe;R!Klh&<4>&tuRJ~8#NV>jp14V}2#v4$=fLlkru^xlZ#xf&?>gS4SNjT$X;j=E9b<2oyGFBIenKg%8^)36mJY` z#4#5j1r3wfF|!2y-Q0zh?VeF!?tCg&#IT-AM1?MJFMc3!!?-gfJdJjajXBB*=xQZF zpVK--*zNk++_V0ZOR;uu8_X#R*vOeCW)n94HftG^U3c{( z_J#<1TC6pa=XGJYIK+*##g8LHa}9$t3rh_O#iDr*H4p^JWqt*-oi8p~64UKtxx_|f zo*B=5K)3leDdYT-J{leEHp8!yyKS(2E6rGPnqN^mcAl%S2uq#kSHJX%&97eTO=1+0 zTsl1;prT#BE>+i<<5z#ao?naJac%nj{zYQrQgz=~OZ50FZM`|&hrLUtg}q!pz5k8g z{YDFVG0WO&DhK!tK-SYUAI-s1nfzu#uPxEwZ}j9fCKcX=kauQq#*lsJm513Q$xLQ3 zt(rMaMbVzGV@y*q{6BgE*hLNT@9^)6-c`SQ|Bm`@oK0xn=UC>G}MoAT(xtI)dQI_Ed0tr_7;f$SeKWXnr>7{hHRG>SElPY^x;| zU+^>XonC*mOxm@?_7*1>Si6At$%YvFD&ZsGrvFLt;k+wY4%+}zaPoIFHSMP{F<2@ zx0X#={NbqoHq5u4g-ed+PwwZ0we|)niez|!WUw%n%O}x2A84L0JQX;zAHR|33T;ca z-xq(#;n}I&BIfMJH3aU$a53&jq_>H?Ak%e;IBRsUSMU3- z))1b~m%kqhFbJNG`HbtLg`vQr_~qAzxSh5O#gC$2n%TQP z*y@@xR!uo;p`#eb`5E~Vo4B-eKUWmv4%N8L^Jn*qX1xz?{+d}p&El_`^}NmdtJy$= zdd-dAO!bDVtM5h~THsIYrz81GLuqs?hNt~ z?fI3Jy7Fhj%7+-gI!uL3wXe5>?cbN-re+y62Kh&8emYn{J>f+8A{+9g1Zhp{s9c5`V`0*&dS9{DgE=~WJ)n3!%zd9P| z`RSV3j`hACiD=_}D>`L-VTsko`&M?!!6TYE#`}b*O7Pfb{R7&<)XnhBX7mB=Rh{bn z{dWyHwT)*axsN)<~Vb!Jeh(%m6JR{2u-MHcN6~nI^xQ{|ubsu`f zJfaQ1%5fisvFavz#6I&23fxCgtUA$NF^{W;UzNCzVpw(Cd&NHU3(DL_Nvt|sz9z=- z>t^nw3|3v>CW}DM$V!(JCX8sgR|-$pVLJ}9y$&;?q5Q&nY=k7;TL4F9Wb`nW4;Z)umRf< z#P+J=+cYRV-H7ct$M$OF+wco=*bXGytB-HPFUVs%BG_Kjd>ej20o#FPdo7!amV2%6 zR1tf!`A|4=gj#?kbjODyN2&Lb#og~049}18Iv)D_X@8}bAm4WDM`({ziOA2{E2!nj zq3)sx<^;70$?P7FVA83z$d&wmMaG<@zC=pV0#Hl_^)*tJR)k_sQQsl8Y2zsVp|crj zLDNJtXQ*VPD=h%coTYXk{b@yL<{Y&PiKLCAnM~>zWHe0^!(>tWk?FJm40E13jJ!iD z!Y~)8Uy)_BaSU^jI*DwcX=0gd>I|}-7Jy}Ps4V0Vtq9BHQrSo*Z5+$wEf-iNARM{E z2%G~-6#OCp5)!wJj0Axt6>9~eRtV`BeGvqSi(ie51e=~FofKa${wT6`Ie3R+qM)G2 zRq}_c{@mu>g@pu2v)W0OJ+iV85s?>QkgB**l+h}%lj0+gprp_fquS*lE%C@Gqcvb( z#U0B8WrfH_wJSi{;s%4o1-`D7vnI+< zXopFT4CsS+=oNuI;M+=yB2lM=>`iiHK@{=wD+2q#rAj#>Q2|21CQn3fu3(4=1`Ay> zG1>^~6IYBj`VRa~DOgkxE|h0tBnKK3KOSwQ3+_^y6cr2=dTC-L51JOwi8j&)e^qjK z-MY;=&Dw_dD8I09dusZq&GLdhLQz6%Od}OR%ON|i)*6GwHzkS-UKZMB8mR;lfdpTz zH32Jb5{C%J2pu$y+yq(=d33eb47_7gB1AA==!|KkGDr?Gd9~I8Y_v&SLNG}v&NNa5 zv>CD^rq&8vq<}8(mbqX5;{l+Z4P`Fi%Lu%t#$%137-p=rA~I)8snA zyF#XBMq5FLAvxEKoWX^gg4YWc3i+BDX@EQ+U#=Osf~z)7t`~eHbji$U8^{Nu7%QDx z_ReESbH*i{XI$;(_O)m(EuvNN%O-It!DmA6%p$jg5RgZ)wNUWnrbH>hYN4-Yky;=$ zWHPqa6D*)CE-m<6XpMQ~4$x)Dj<{Mcu()!fwBReDZRU|XLGh5_xLO~uqO!P*;2WWX z=8?NVsgOr;wSM3o%84?9?}g5oNA3pUA(L^n{$L|zaaqBSLUHDidqDY+9q~tK!ncj{ z%#HSf9zc%A8^ORwl_zBdJA_`E8|?!1%S^e2X7GU66!ZM(gsyRzQh{^flz8yt>A*IC)oxYHYX1++*Iw{@_VI>HJf?p^r%11GT5-oD{K!Xq+oIp6ZML9=4 zYF6lxMUFmb0uqW7K!JOe6&0f9g_`a-0ALJf)nY5XBLiw8$|8aUm?6KqMHX zqNo@pC@gN7V+2|up_3?Z0W6`CqZqY3rY|#WkI+hCJxhTDAc#a_V(lfcnu@)W;A-Kc zmI5Xq8Hwh^+AClkm1ZSDF=3RYfGJ2xLOiMVD%euRev{x@;Ver5GmyH3eNyc;@DY{f zO@dOwRh9zgpq&zlNwsm{lPdPgf*XXpECnn;-$^tl)h2*LRhpFr6@=NA0+zk36)D9= z^P(wQ#`BUX#m4jUWCg7QC8fm&J}TZ&X;u+b71py7um(9uBwnvg0pC@z-z=yheAG(7 z2IMBue7!aeT&~i*Sx`$DWhHPB83_wnAGZfx zl2A*Dyag8B9H|y%Dy(LG+yN9PaWW+`7c9S7Kt0M**wXsABPd1UZc1c6c>Cr^^{9iw zC#{bk24zVMr$pWX8*CQX8f7nh)%v&-C{IEyHS!+Vc5~#`C@0~&*2kSeg%T%IBOkn+ zMVoqB&cvH~TFu~1J*{VoOg(MbebuHe1;)eH$6Y~H62qyH#bC^4fo)O8g@tU69|66T zP)my}0ViyZ+!p04tY&lE4fIapWLo57aQ0>a&8U;YmNv)TL1c-$X`wawwij#DjGls@ zY<{FE7$^+4sr3NSBm~lp%E2!;@7OMQRyf6`_9$pb!YJLS0!-fgXuIHf;U_k=P!L@r zGTo>O+`oB;mSDIr*{1dwh$&H%v+~ZB5VbXso{DS@uD%ypD@o?>P zZkp0eB$cnZF-@oFsH|s&qaS>IRj(c6FtY~|1#g)FjS;ak(BC2O} zyz68(h;{}yUh&zN^|&WTL^VL?be-D<(a*u3uJrB8D(k5gQ4Q2NTbHpxlo~vG#Ya1< zvgeD4YLHHNUFQZ-TJXY^KJ6@0kASFZu+D`#nT?`7!K-8`bA8#r7L;kNZ%>NAJ=QV#vezivd|@`c7_; z{X>Sp4|4Zg0C#>6&oln1@W(mMulIT0QS`>y7b(gG+#5SzHQHvJeV1~sVB;RMKUyaA z>%2)`@lz1{$1=HJXZ(Fg(1(a~UozLyxYG0WjiTstqnSRmABD@$T=%ba_Kywew`+~* zLI0z1demQQKkDO1Y=S48dwZEQ??&zG#t+BgLdqh4-4cF#P`@8~%HMHsRs8ub^&cJ* zF4t7yTKjNw0|H|?0&t8gdIlrqUHwJ&` zYq0m7wYu3X-2BVJgTEsr7}$m8{5Dwq7c;I&y(d8!XSVp?(aCu6W}R(8@UF|r>RAVSd{?O+{&f*X z&;N2r@2~6su~z#iPRSO24U?6>+Y=)zf3GJ)R{p**KUb`qHsZHq zJyv~kklsT5;~+hDeQS^&w|+WEPoQCKupX#kcd#C~;c&3tiiUH+dO{7!!FsD29tZ2K zX=n}B6K$9d))Q}78=@!CuscL=UBlrJJ;{c1A$rmc$su~O4Ua?gHa4_|=*h4Adx`nO z1^(Ao0V#$|W-CyKuV3IBf2e;0B7%gvKRM>890 z!X`3TlES7wD_D?9N~1|(3z-e1kBa{&NdA|H{HFq^e=Ts-##F%u{{yI>teD@*1Iph zD^z#uWITTZhyYIjF#)6k015yW0=ojRKLW>s-##F1vpo^AD>m6J>s_y}r0Qdxj8|^} z5#R|RCV*4`Kmou)U{?V4N8niS+XtkLS?`{|0SMp&AP#^u0H6RsLI4Q?Bm|J~4~2wB zv);XW0}#LmKpX&R06+nNga8r(NC+U|9|{TevfkCb0SMp&AP#^u0H6RsLI4Q?Bm|J~ z4~2v;#q0;03@;nz8>Si(4dV@q4CSyD-2S~UV3*p3iXq38tIR{=CuMuS2`r0~jwx50 zhsV#zj_hrL<+U#fa;wb4;-_Q>_BO&&+SQ63{vw!@9pC#2_N4t}@yBDi)#efLOM-e> zT)TAf&SO4R^`Y?$*}iXrBgG}he5&ii<7Z{Z_EKPl?Ms5b(D=T+3GF&RVIH=ZAL|Q` zAKUw5CM&+%yK{yCl?-9N{pp!?@|JazvZkKuXDS9ZsI z$+XWpUEa3cH$*9R^?y)g<94*M*HJRPv^?ci{RKhzTGS08jw15ZD!f{Si18{PqEfQb&}3I2&9P4bwk5*5fYL9$pTJ z08aoh0i*%|3IG-Yy8^I30>^^iJ|KOpi<$AaHJAhp#M$p_8`KZy3w?;7oK5i1KX2Sk7;fS3SM0RRO63;##E!W_@{ z>qPiN>W9TbI`eSo+C0a_jYHByt0gRNty{0IW%Oc&N0QQNS@jZ=ua-^@HM$AydI`^3 zoC5OiH#vPmK%Y);Wa{53rftLmU#bA#?EH~FRRZonfIAT24g|OZ0q#J6JCNVL0|CAp z0lpgnz8m?4GZSzJ0^ETBcObwW2yh1i+=2Y|9SHE<2=Ltq@ZHESoSA?-5a133xC8kg z-GKnf2P7Ymd_eL6AL#tCzf0=v`Jgo!%wuguAyBC`6*b{CTRn-z^{(B@RTiPRN%p_Eh2DBg8GO!rZjzL zw@+0=D2^fD_f2qwUefJT-4KqOl^@fmL=-ZQ(7lHKEYF|khkpO|qs7_h{~XVs=Ld^_ z@Z-yWk{>Mo(T^`5JOAf+y3hYP9)t6Lj;H4QpW%53{H*u??AOd0xzUT!SXMN4UJEn# zn80KEW9J{xxL-;0bnKiVk3kri@ATwN1!0(m99PmJznDy#?+M~pBC*~#aVrTN4tA0^ zkLA#KUvRuLT(%f@hR4b^>?O=&c@ycRg>m-tATGy~H;bJh#4Su?=j_ZmBiIE3o4}p4 z<9gOTDKw3Zp$cX*zo535bw=9Y`@_R8i&B2lpS}X3_Fb^ zaf&h1p1fERui-6wM9h%=wT!e_HlIo0%|+ALb4@&g0%o=n+h0cFekSk+in+z4g-Q~u zX9zo2$zzZ>12pa^zv7@Lr;;~^<4*Awd$hP;X}tGhScV94XP1-9k?1=eA z-eLo(&5#?mRPoWJ&bXmL3~OA1xvR@=S` zQ>Br_VKt~0+j$Tk891Y_FU-uuj5ItzVfs;+ZWN{ig=s-y-k~t{C`>I1Q-Q)fL17-E zFn3Xy+bB#H3X_JyB%(0aP?$?73MPbgOFlSJh6DZ7a6viEeaYA8iQ5Z`U<^T$# zhw5I2>J~s@7O|LFEQXH7j9@W+SPT`5X~$wdVli*Am{(W~35$7##XQDhim;eFSj;Uf zCKHQE#bR(+Obixt5sN`%F-R;X1dD-TF@9K#Cl=#|#T>?B4q-7CSd1|iql?At#bUIu z7t(XH2RV0BB=Wcze|P(E?=SVq9k8jX7GSFCJlnna(jH_9ZA z9?clvQL~|5yN-2CnzpJ>*9)Z_N59Aj*io~(Ui&#qUYZ8#(>;z_7DvZ2#5ujBw_DZ{&l`Tb^>eW4ls*9x~8ROe)KEKkgWF3>DjrQt7QMs}7 zbBuuPP5XF9>6KFK`RQ}j^xlr4rl#4N%;f|_!}i7O@P%*EXI9^enCXRSRYO}2=Jk-B z=E#s8_05nS!2!*kAvG?SBI zu*pLW28PV}=S_=;x&x+;B_nTEHT6Tc(07JKzge{DhE8=~N2XO3_d_?+)x#{_ELwFR znmTqJdA$nP4^^Qng{^$EXwj`d)t!V)s0!+bDicKJeP0h9Fjzm8k!W`bCEwqwNLW6v z@_NX~VEL3xqFp44+t;c<;IR4|h71g585uY`6so(gRh}@*%4-WQfL;SrQBq z-=D^AN?$|pAIRk^p*&H`zNI92nt zJ~2rzb-dT@ydrHJr(iDT6P;97?nT~lLV9PMl=*6(%SkVEyi#_UODn~RnXmM@nDo5- zxYLd;(vY~7=F5Ddl4^C1uiLRgS}=}h#`DG|k;*;ET7y!ISf<(HF+&T~!vt2F=wS$9%OOO0~s~n9cNteXKfiOhqd}>Q!uy*<>&BV`T@le|w-5F}Bm}YcJws z#Sv)Uc3Y|Z*pFtzz2`nY>p1GWeY;d@>>IQG-jI*wM~uV!TOKa{LLPFyCoIsxUAE-vi|usPqSig*Q5vK&g88p)|Xs!HGAOgoOEBuIc2N) z`dioR%Bf@ zt*M{)E=We`!980f*T%%unRM2#cvSg?ev?q=(u(*-Vi*74z8C&3hXp zjb#nr{8Yj%=7FiR_tww6P0Vxp+y`BD3YOLzH(!%5iOG3scU!e2`L$J-%4vz+F*jb? z+}>Pr-O?&c#aco+Ch?`kZRL`r*Oo_Bwo0sxx%$%Nwo*x=rR7GIRT9f$B3~NbRw%)} zw&+qGg-l-!eW`m}t|Y@ixRK5nO zyL$Yk)@`Yh*wUp?|t<2HY>EzPnvSwnKK+P_r2y|yIgwdv7KTOr9;Enh0% zhLl{jG~Kvq73AVoqn8S|#Y&=In{+9S2BQWU&${o3JyD{FpSrr+RK|M^!>mqQQiQA!iHx~gmn@fLzyVj0gUo)`b_>IPF0?-h`Xp~g=XUBuO|t~C|* z2172ejNpp<#AU9oHeK!wf<%QH?NO8z7rwg8RM1-h63H^0QJ52(k6tw4cyYzCp@vTs zC}Jbg(J0zHn1Y~wf&xk5b= zsJ3zEJFd`Uh|o5wv%f18ih#GhcUEWVnp7T|ox8R~*K(gt<*cZND6oZ1dKt2#rS5cX~Q&I&Dy zDcub-26vdo)pnz3C;H=X1qzlx!78+$^Hb1jDnZh-=N%R%E35w9WC`}t;8N6m%)!?SkY4kc2VQ^80HZ*V0%#5kQ z&7ceDCTPOw;t|@=fcq zRwUd4nnJI`5at%kX+s=?NoEWQ$3_p*O|XQ8#SYpK*MP-*L&9;w% z%Ok-?%M?NOg0BU-_9I5g`G=!zsox<6$*G4eY^gekUh?I`D{ZOTh;B0CaKj<$KEx-o z-{I&()V&DG3~F|bE2T;3a3sFfPaZI<7+m0wBcRIS-$Y~-TlixT>*~o1}6qBDhwp+_-ARdtK zIi_37ZAIK6XE?fA%c&!B$uW*n)^chH0y)gF-AYaskx4$~m~JJv1(8m6b9A+m+l)vi zTRBQu$*CZ4od-$S|)ukp5X z=FEkCW|?{MEAUbXC^^KT%nUDya3gy;AkFaW5zb@>2W>O_I)nq+*nw$^UyC?I-sw&8S*#Z12ggW__J@Nov7_ph0WUqYyFN9DcN7*xt@hcH>vk_GLVhIk&F)5@?bGsJV@^Q}Eii^Tk+ZqTh-ZRx8Ho@#z>$27n%!pB=n?8*%A z3-FQFTXskT{5*W1HOWrf0MCN=v_{!6_3=zNwKc@9Odmf7?`-w5L+aya;ccxBcG~** z8TiLmV>_lEej5J1b*EjK9)1e`rd7!fsfTC4U$sK)wDs_l@aL_9c1&G79bVnaur1TY zPr#qG_ShnI@#An}>qlE{UHn&gN$Yc4=6?Jbyr{LrwroFs6n?MumMwBWegvN1nq;fJ zA3qGg)f#2X{0=_^$G3*qmVJjGglDvR*&@Hg55QAf9c;C~!}r6JT8(X)I`}?#eCtlz zG97#`JZ2hwPuo>8;z9L5_nu+!oz+>Q!4;$tcEMO?LQqBZ$V|c8Owpi<%8{=H4>A)1 zE1r$?7sO_Y23C}fbQQod69OuTBjf^`OwoXf$0P3wG%^#;RFsUoELfc>dZwayq^e-- zW&*6DXyi%3+nb`WiU%Ww1rKf}oUXVxl2;IWQ}lGjosp~p*v*7f75O761vWQDPgUfO z#1&}VOgLF_YvfYF>YJh`D+nW~g0YMQ{|f$m3*Kgk`d4I*oGEyak#M3SW5l;0HbeA8 zMf%9m0$4_ZUq$MOQ-Mu}s9#0$$iV`Qj0E3`q!H7C)fu9`6}S<-g0UM3J{9pJdrqK9 zVb4=J6PG<}a*Qo@QeVO)TFnlw+)1s2i?;4P*sz280=}ws^TFsH)aUROt?Lh3?4Z`d zL9NRVuG~Q-!MQE7HVs>n`lic5l(4wwz1Hp zK7qHinAxnyrN~mI$DET1b^DnW^JKCErgf0ys}=YL45#!*g~{!*h;VPV3&(0m6-t1kTE?yJs6w;hp)FJ^ ze^n?R4sUsHrK$?ef(N%$TMcc2X2Q?36j|Nb0=)@8*@Cz7-vZ5m`?kbescwPZfFEx` zTMcc7ro)f61X$hK3{8U{X@Oe#Z-%DAomviAscwd*!0lV~t%g*f$?$_MnpSsIpx5D+ zEpk@=D$pdjX^V)JstPm_Zq&lD98!kj;Cg3kXg2+gdxnGaDw95#SICZ>%*#x&HLs8! zIo^-dYI$bazDX_?uF-POGJTWWHMnX^hNbH!xfr-|ON^z|Cb_F{g_bbOb|p?)#pgs@ zlZrJXJMtJ&(1=jp zhD2MV3V{)UyhYpx!wT*&EB7b4; zJx|${E4svF@tHR_`skingoEF$&=S?UE|o5Yr+XYfqZ6| z&*<9m`_Qmk>LW(=AzLoAw~-dM#Grc`4Kq+Z{5~eCg9qrJR=aWojra@*%TD)#IN6@D*F#qaob<_Ube zq`pR)a)0@hooM1B;}!GBJ^DyUwA7o}x3LYey=K$iq0jk7H`Xl~si^afEUsx_?UH&M zvy}#7G_w^41Rb*#1{e<6@&i2%*>VFP9kMqLJa@?6Fi_%MAj%0YQgsu>ppCw&*~QeYW5b zBsg1ONH92?H^>Oe<_`7*Wpf5U24%Acp9f_x4weLEFAUxa%AOxg3d&{;Mg?Uv2Sb9g z=LWrkvS$Y!g0g1@jf1kM2X_W#PYo&sWitjLLD`dof>E8v zGT9kDw`8(!_LRtEXZAdo$5<9K?qSGe6M6(?vvYbNvb5cM^i2CAuB~vL z+bAN|GN~ehYnj|8BG#H?jBB0L6A>d%nu_4alLtk_+9sVuaBY)EMZ_qRz9KlvzvFI5&JZmCxZJlStug*dGd(}?(<}oh*;O; zOA%bxLghNN1f~v5$m4p7r}K;eiad;P0onmXp?Lau`iQgQQViwHKJlYlj}rr zJ(C+n#d;@IL~*^7+eF3sCijTq`X=>6#rh{rMREPXgyygV?ZR@Z@XY2@2hxS*w!qVy z-43`4%WZ}yH(MQ$5|&eeDtfB%Nkd`7Aij?T00wM%KMMQ~ZmC!OO-I9Q)phf}& zh$N5@0;FgD!QH*vzk7f8e#_PWJTuRkbKaTL-t&Gw@0(1XoJQkSK032zhn;BDU1g** z%Pfqv6pVixvX6XUaqD7L4_q9=5H~&oywY-?Xr= z8^3b380$ARY-6LdD;McEC2T|E64wHxpJCYA##yd`NI!!xx5i1X7Dzw+FqcM^3wMm4 zUYJv3ze~XwzsX^CjqhCo$M{VOTi*EG#bS)##IU7}l`dR_--NKmjkjD15Pn~TEojVk z2}JnmhRtnEaj`)7>4cdz5?r|2e&fSXjfY(dwEf0~&1m#<3Dou*8#cA^D;EoGKV+Ex z0FjxYfr3r9^_HAv-L6}ff6&i$JjT!}LwwN5H96*`Ri^bpD_22cK&5J~l=6<`1(hnf z1C(+SLtQH8exPKLyws&qZaXE0#88!va9b%mNnWbbA#NkZk;G7z4sz=$CL}Lq=>Yd3 zML}dJN(EdR4(OItD_-?c#I+k?ws z`e6cPzTLQU%&#yOGT$!T8KyIaEA{2zl9@{|1ybKmTmo|zCQ$0zfjh~Zgt3tNw&RX5 zRnACKIA7*r=YnD1*SKv=Kj*+<-&eS=nO`|u4Ew&s zZDcw-bBBDJaT}ORoC}70U*OghI|`wE=-jZSM1BgIWha43dFt) zoE`JMQ=r)QIc_=gxs!#M7lSbO8JXy=x95s$-5~1;?n~xXr}#lu18zR^ypzWus~$Ip zdCF=0Agc~%$_#g^6R~P>GnxCG;zg_)+;rwvCl3*;8fVB{<1}8xs=`fXu5hXwU_Hit z!JOw5Kfro~8^@gHPI*lJ-fjBi|*s)H?qT}R@ZpU~bs}eWDc;)CJWL4k> z88wdMg{*R%fN|fkPQWU|@fcSf;{~izTo2>CqlbV+!*w!FIgS^wN^otAaL2lS)&tyI z#y-dRe%5{5YsOYbkABt zW1WLhFX{@8%&>Q8`GCsDB{3E{q<=u=;o=#_4n7}HmvJW;x(-GkP`Nk~LuTK?MNx1u zj9&Y6E-D8X!FXfu!$oD|aEu0fBQEL^E|^hj-_nE1!UZu3?bCZu7jc1%OnaXmR3^@! zk!Ww!gUY~R8Bz8v-KccjPR0TI^lsDzoDXBWy-zple3npF?VCAn);XLv!`{B73zdfR zWGu8#??Qc#^I#a;`*fks;#Mi8ymcqMcDEDglRPMA@};pyF{R zj01M*9jG{*5o5cZPY3E0ZVF?aolytsBuIjajUwR0&l_xw*eE z<3BI7O>aX*YR3=fjXx{ zI9r|5DYRBEPxy;D#RYl%<-Nbw)BnZf{+adrI4<(enW+cxEz4cK`culeLG|NrdSmiciaQ}`+?1r1UTXMF{I*D6(!EDlmlgC{(9z9;zUO^*D55-3k;pgjE%)U$+Kj z5z49#om#gQ6&T8@37t{54^EkA%6$Kf6uh2j$0;wRl2U-^M6e94P0K|UfDDFE<&EN$Wh%R&k_g!XB++}$~FHn ztNvfQwa>(|;W_?OLCpWtY8zSLLT%s#)&Xn>FbrTSfSC|xLYN6*Cj6wBP|rdfVgo0z z4q!WgVE|JB%!Du#!b}J=;U~p}r_vT)+Rzd3g~b-T%*PE=y{?7Wz#>=^uuZ^FfJp)7 zLf9+7{t*rf{ysoTNjq?911DgnMYCOIZNqY}YoRu<2-XB_6EGBDQh>P-_6o3ngu{Zr z50Flz8DDzV9x%>gnO)|ihKXMLLu_CXtO?jAU?{+(0COSi6=44ehXsEhASI=3zx1p# zV7kRUyUdz~#a{bEZD0|s3D_oJD8Qrub0O>%VE+h*1%DqPi6-Pe1L$91XJM~Wi>W!( zCqDf}qVvCOdGqT%aDKn>%}-n5{JiDOZ}!0X{l+&xZH4o@jc}!T-16o(d*HN2eI|$Oj`){-V~Bmq*3Usuko|caSjS)O8`Hv|W`7QX zd*VRu=e7qwrbDCSaTRUxESz#7JJ7@7!zJli8x2rug!V*2S(H!o?Qj0z4BZeK_Ox zWKHon|L+zU%nqEvXuRX{0SuHJV&q>gOt`SPe)G~(uEMPa>KkP(<*zUD3d*8r?H$MC zCzx7B!WmczuqD7KfKl+jf&x!d%P3k`$M^AY237)W2`~y^6u>BeQ2?XhlS9EG)8a^4 zXUFMyI0GvIwgeajFbZH4z$k!G@X4Vd$h0_$*3)q@9?rl@fGq(=0gM6|1uzO=6nt_h zFflz6N$co{i-$9?5@1V!Q2?U=Mgfch7zLjk3U-C>#84nuq$iQ9eM|+1d+!Y0oqFNL z=IsYP{SPe36lMI$VZ+&1`!qS-*lO9X{T0t5GesJKdxpXOlg4{~b-7}E!2ROc{lB_o zZrXp&c%S<(mmiD|yJt1+|K(DfVZA@u*#B3To5n%47w_!<)g?1@|0UzV+Mh3%#U5NT z1Dy>xkiyg-lufey&qC0vyedv-U>!)GX+|$Qp0S#HS=<=)!70qt;gc%9jN&7@)_)5mZhtnOf*;IxPw@Cac6@hFD!<$Y+f2QpxcqNoD0w z?|UmK$pi|9>V!}OMd$%Sfe;rBiF_n7kR%cnVEcq5wGW``4KC+4qgA!e6!kq&kpqd* zs$L-oa(yD?P2L!VRF$nxbyU6sRL#I!>Jc>>Q1n?S-cg#~Rmne)R8^2FpCl2IAa8Y- z9`vDw!U7df$$)AojiU5%s$?slIH|D1SKnwdeeA(gGJY^fkTTv?LIcfcx}wiRh*1x{ zw}50Ein`B&f>%(X;S!R%!3dO=dSalK;BYy~f7u#!m{ zNkfxFP2%xrLg|;Jq22;OPNFM{7J-Tr6jh%P;Pg*2d@majE*n-+A;rMih-snCwc$Yb;US5D2x<)j+W1T`^=79gXJc5eG#D zBoUQ3^oB{2i;RRxs-_Y)b_B%V-%+7xB9jIi%Gt6eCZOmQRw~}TPNon?s)Xu3Dp67{ zRJY%Y(3ES+y@^te6i7f!77?i)!T&naIHGwj8o2teF ziywL=A8`Z~f^?y9Kx71{AC-d&;!2ZXldKXCSuh|=LX+g!LjgotzmPa`|L_Y%mMY$x zG?HXRS|_^ly3?sr2FfXuNtX-BBI<1gt8+*7Q3~8Q2JDw zRrxj@`y9)r5~Av`{a8>b-2=+^P~>|^@;zAj9-xffXAji~?LjedtEw4`7qKJ!2yIk1 zA+RHQ`xl-E3K!qMD!VG1v^Pjfd z0TzEl{|@xUSE}|D=_QFr${#AKCdqqGTN$x!N94C(4!gTx@NZKsc3~~)l_}RS_~ZB( zdkbJO!(>1?P}(#6YIB9^JYCP9(jwtx&FTjSX7vL}{!NlwPAWZew*vjTGf@(AUXL%3 zkT{$mHtk`HW#q14QVE}5l9lP>j0Kq$l_G&pl6P(Vu)2TA4??l>JNq_ajoioHZ7WFZ z7wi3M!~@+lCa(HsnLWq zE;!%>8IVsv7dW6qA%TXP;ycil>UX#usZk6QAyv%jz9Naty8f2}om zGz~26%w$KtW%p^TNnyuE)FvwVAV?Z1im1Gm$0qG`RH$Cm*z&sp$UH6B3z}L0c1wi? zptQjQ5^*U6WfdB)Bsv%ql`;D1DT-*HR_cxyK9z;vrt4FN$kz*d8)psf0x7+NtcA1A3 zX5`{iE*Gcvzod8J%RrE=_aQCjbvl7S3l!-EL)!{`1SP7D>5qVFKyY&!nsQrV_ta@x zAg1q?j0H+>PpTmZwp9bgzUZ71uvu95misKdKrz@QBoXem#MhV$i9qB_xeY(OMRjn8 zFbGsqRj27kQvt>E$|F0KW#h+#39d!P>PG47`0!}pW~6%gndTySRi_MXr2%05;hp8*1f9>0N$lMHaCTcb@cUL?mC4|nWuNa4~+ zgl*RkI0y*jTk>EafXZ%F%<5;jp$0I6%x#8q0-!yGV@ z@7+TZpMZ)kwn&BCNCH*F4x}cx)C=FWeMg@u?$#r`lQ}_(9QJEANf%2G$E!!`_>M}p zqV>agY)<|rLdtUD`<4#=xHfhRf0+iN>R^)cD=5sHgjYzZF@(cPqawu$wZeWEJ4h)> z_o)pUkj$FeEUqKROq{De%aoD(qhcly2eoI2Zu0L%O(OT)1)pnJY>bK7bBn45Qe@5q zRS0hH!D~Fa^L$=2NUXdAm9bk%#$%pQR>q;5A9FWHu8%x5P=b0KbW+jF$1x#wuO6u2 zTB&8WB&ABI?wvsMQ6{*ym!v>#2TA=hvM;|IB3@{b%!CM4=S4G-#CXSJ;32bN(O0Dy zTdR@ice0%`Nvg-xDm|kP{ulU3_+3;Af7l+9ltZF&l1xI9JqI8*#3_LW%Sm!R6Y9yK z%0Rtqoj*>K2@wQOB2(1#6KT-Bh3$dsl9UD$d&Qnr`0x+jM-DnEwt{SdG#)RIRYJWg zp{lA5l&zkh1}rMvRbr>U$L=K=`#+~B2h|I$*exp*6Uz5v^P8b_EhME<=q(x&M$n)V zNlOl-x}DgpQ7Nv1u35R}MVu9ViX-08Yb!`pFYY!w3&_(bQ1|*3RNHd!vi4i*2hn>y z@efL2_$IphZfcA7u$(1Elcb$bt&|;U7S5DRIy%~XU?&BV4$0VLHFXhI@{^WSb(&Rk zlln=UeJ1p&myR9A(M1AsRf`qrm7=I10#MiRXGw!2wn@i?x3OjKOUMvUJrqxZIze?h zq$&X+PC7~6Wkv6uE~EB5#!s3AeS@VKL#lVdkjz#Fs(YYeNWIQfEydEGs?ozMAjzIF zcmH9HEyc+#$28XH$@lP-K;KHpE$3RO3cq0^Yt~Neu1!Gn+3sMKs5wby1qlk$C0O-) z?~-zvAW*3!Qv-vTP-S((mh;ROJX10S5X^BHEwT->7^2H zOD<}w&n|mNQA;ms5i5z0sLW(YJ=9H-0qS?4T9icLS+^m*h|zsZR~e^93Ox>K-JS=h#Rb~ zGlKeUK@|m5aL8C4`8y#*AVB0DR27?;GYSROc+DcaNZ+^N6c#h}1>P-3Wm1l+i%-p- zK~xEAz;m&kdKG)YmKq6WwK6pDK}m~xIapmJT4AGnhbL%w1%wouu#yDzP}IXuJ%tpy z(&X8I3X4?=K$Qegi8P&af{BujTSH`a)N<)94(P41HJbYA4%u6X6BsqhJP~x8UHOlM z0|E6E(Zi5_I;hS%Ogy@VhraubFy3)3A836q7#1jOmrqwtyBRRl5zWF9r$+lFQ1`U+ zhy8X@${`68Qa0aGDLaPynb1f}E$Pl)u$BbW@fGhOgQ)T$~)1dR%2CDW4Z-xBYFoj((qV81zN;IkZIS6$-CPRA( zDmC_5kXrK7e9LWiop>x|59G=BqEL0>2n*l<2ktofW<30X+ znbC_)srXqTUXQ zNcb4}{tA&i4l1!Wl#R9GUs*`|l}_ z0EW@xZfWrAXlH2nUIkra#+DlJI7ul)qqt36+YK3D8(-a4x1UnElHTG$me$GKQkWLI z@Tk(^bbvJJIq`iuKj_<$5|x~#0!ff67_*j1IW(m{cox*JeyajYwuv-=L}`Lth^)bT ztpd}-4P+tj4yI9~PYA=wT`iEi($m;hUGEuLiLK!Cvk0Ii6D#SK&9qQEjB+bM1Zdzc z0@c!eYyyF=@ef=GD27afy8q!ZtTf7U8KAmCZRt;CeJ{MIk_N0q(Pg!FhNOzS4@BL_ zmJKeG#|PboLIsI>C5k*IaP;e&dPaRaRsLLi>Y`yE;#InP7)kFVamkWMie?f75+!w3 zm<|0ZN`nm>z)zL-D*z%v#f*Mu1xYlj(kxY$hR+uaII`_zS0^a-$}4Y@v*<4sN7fXv zOO8c36I3P6^TUjD;l4#Y`(N5_2@z8T4?5^2hiwvsY8OttHb5B#> zL;RcRkYfAC_yWDln5yygl{M}T&3R(r3@ z#uy!k9Kmd<2>TvVzhgt9BuI``=W9M;>p|)_XlRfkzlI^L@DT^ymgi&Hw;?8!iKQ=r zzSbJp*+SYbzRv~P_Ex?oK|gl=N<%Jvg!p(Tdg#t-q3Vp#hc4`v?J~auu`JHE1y2E8 z9<%Gir88Hn{6NeywW5Ul6b}WHq%CZ<7*LHo6|P<^RhsoFo)hoh2m;cD7)&x4-+D2 z?v=Z^X#hsq&0j^Bv=~bL=yZFrP(LK@Sm~{#Ry>00<6Y<~HM^yS)bNJN1%4G3NmVNz zs+h8srNB&$XESKw_YVX|u|_yY){{tM2S!r(_9QS#9KN0Iet_D7%h>ycH38X9bFt!#PlfQm#g$(~}uBNN{g6oGGAqW;ki^VWMTiom2EjOPRAT91dMOF}x2{lOazlPq>ZbFA5M=l9u7UCj*jbcE-HuW`+1Ek6p z?rTV~kfKrz$X}0sw#|WV?f3^EhWmtiL=%FuLJ=L0pJah$s?`aBB(V2U8`HT>@tlk# zDPQm#G|{zc_$xl7>hXr;S(?cOLk$gdFb_(q1Rtoef=mAhm~4HGsZHz%TH>I(j@%kv z1fifBxf$B&90YRvYE%3=tI2Jak}MsGFJBPodyge`LA!{{eB=t81-wSnnfQS z`aeD>Gy`v@o3U-wJA;E|_Z6h=sJcNv#Pk?p7;2$HbStv*R+1rsmAf$36t@P3e~ zMeNat`=2>>%byv6YLkl*J;UkPXH+P)hma~-hE>Xi-?l}y2GXU&>?TQ@b9$g<+pY*K zDKtMHTYUxt$h?N+YO&@#%qC&H=2xm?p!Z4%k${B4!G;RVac~z~IGo9Zpdl*6(L^aB z67wdB1r_=rhSJe;F;hZ>rjvqo0SE5LV-*DN68gg)@2_Yq+OQO^i{?<0sM4 z{c=xYYwuH_5UFk}v1PS{~rENJy znv1e6hZAKC)Nxkztnb;}v!iEk&%vJWh{fEsNP9o0x|rhLNK0O1F)z}RA6d+gwCsy4 z?sK^Uv>cs!*|{#pQW#k*jI#EL_KAG^ zMZN-&uh6OW$kFOt zXqJQQJYbfi(^+ViGqJPRY$f|<4##<6b@M7G>*_75o$RZdS39{@Z*g;4SKU$0##I!Y zj=Y;tkQDhip&&W(MM6PJWM@J_Dh==0t>_g`8tyfxUd23iQ+Mq>?k-NT&+X&{-#;Ci&Rb z^DU-MY!QkY;5zVarkc1l}mW?Bhd*qE)4K@8CnKkjoXn#Y^9>DLDFq`$Gs{CUeO*=KW zA^Wyp*AW{Z*E7JMZudV)G0)+M)*UAs;xDvURsZ@KJ@kfA-Thf#L!D`iQRNfmAO4WE z;loA^=ugKH&)hq@F!J7f${&)!j#TdmW?gv!IZB@KheQ`+iP0?0A8%+L?hJQZxVjBOlTF;ch(P}^SHUo259Gb;^hi~thQi3j}a7350yx-&dh`;S~J3eky%huj# z@gI_`EX3&SdV2l;n1rNvXjY)kiSXl=kB@(O%u#FlaZ0~* z^<)1}C)o??olClMFeA-%M0u+>TG8td5c&$ps-Zg+%+Zh};pi;?ct`WNm%w;*iGN?` z$A)UYsvn)@Z(94gi}KQ z-!dY!H03G5H@FK^$N5I^r(^A442&Aw$0_}`!2&X&^wqP<9`1f|b6u3&&Ku3`CxfDmrkDVn9;wx?1XIce5VZ2?k83D)i2a+)_<(#ydRSOsLoQ0R#RnWJ@oCHdts+e~hh#_;~UVp3xM zUsf#1OdPe?Kd%^Hc26$vXd(_l_9QSYTXeLv2s14_|e{AOT zfL&)2UH6a6oEdQBOoQwG@!6(f#52090(G+IgrzpFTNS9AJwGg`F>%%YFW4Dh;{A&U zo=>})u>_B+S=cjyov{?}UptVpu*$Y)B0FO_-v9ML$^lJb5<9~V@82_!Qm83RW@k9z z{YM5;dNl<-c7_Yyf1D^~VYO|KK0Ct=?>}9XazIlsU}vnw`_C7p6lw~F?2HX~|COSY zUQJ;NJ7Xi>-(6Fu#;mQ%6%UVrT5e z`^RbO)Y|sUU}ps2{V!>$28&Bbeznq$1zJ7MiwO@CUkB z?$zgbPxz9KQ+Np+%Y{R9vBGP>u}U}<#3{W79iIx9Yd&&%4LLRozXowCuMx+0!Z1** z@{&4ogy%q<+Dq=p7ybZ>)m}-D!C z*P4R7Xx5I5wa{^KHTwdwSwG{r)->b|v)yB37wPPFHJgCgq2GF3%LrL!);>1YO6QoX z*+j%%{TU~;jFC^wc8`l)rjzSxHVJW1f7=Ny6XaX7_HnVcIuBjVCL_Mn&p4qq3&}Iv zJwDbkVe{@6W2Z;b6-6j znYN!(|J_RGtlY1f<{!v+-MA{(5OG1Dc2a9T(q?w9ZtPb&A**tyAaeAjC$$zJSIsv2 zB6gF`nN_({5!dwXPHBCK+&nw?i`cC?w^!v(Llo;rp3+*3+&kNBLhN>(##On~5fAif zr?f1Q-_6dQ5bLKiuqxLGLD!d_(pri>eHc)wweewfc5(S~ke(k=2RwD1tetYWV0ezPKtrOQ=@vF5^hz@=I z_=k4L`q>*NYvXh-t=5`_=+)mC|Ih*1I=gYQc7#sFYAsWQNdHXyLnq{i*&Fq=V-jAU z|HAY9)fX?QhZcH#lm5hW_$p${%#$w2adTYsV~^|1aBDY1=oqX`IJOEoea=Ju*ftwjK&qXXSSetlkJ@T754-I24=+NBS=OLCFBqSc&fZRXFWlHQtomRK@`3O6M z`ov>i$fI)}PKnLYk-D{8?DZ|r8dq0f@U6a^!GhDrHX&2z9Ge<@MQ6d9-3t*L48A>m zYzs1Hj@Go;>pE-K?EVt*wZZk%$F?Gi=Ny|BdsFAzHMyG9+1<8Who)1##>EnWGssPH@k92_2{F>H(CJ#^ zVugq?Xisv%B9(InjIAl+S-B2E~XCA;iFPBaghpyNpgq|o5b0z-V65G!bJE>qIU?V{EXCyj za)bE=CbG$Fvw7~!*jGBY-E&tUiVZa1_#8s+H8(SfeWTOpp1Tt9z<`#bbp-jHd9F$9 zdz}IITw4U)K$@Zzf;?_+hKlXb(f7!;L)04BrD}bL{N6kl72B<|)Fam(!7zwS)e1xA znVZeJgYS79Z;9tUuCv4o9*?)8M-Lu1SM;6I!#0{#m=Da-9?&`I;o^kg7?_b=qL9zb zLrk@Ybgp=~I3xH5yU8vwNS674skT(7+QS8d5F6x@T?oiN^ANPQQm4zq1wbea+Q}{? zq|$r$h~vT%wvsp8`tK#BUT#H&T6G1znhzD9y?2SU~R4k z0%IsWt97P&M0Wu?cF=eM`uO0&1?Url_SdPVCp+sgU#vN?`+Ju($d|K24XSuuBDp7v7Rlj~eIAp8x@(p)l;&*p~A z*IurBWu41ch#{q%W>vOjtatx*Cw5}sp%`^Klc9ZUz^|@aot{K{$ z*Sdk+JTLdl*sZ#^*XM3U6dTrY4G3mU7KVhmO@W4l1x*EpgvFvFuA#lnq*XqMa>MrX zE>t9b-oPU5-MUCmmu-kDL$eDmcaX{RLKbTW=+5xch?3J>;c%1BOJWb}hIr=gM6?-7FKE$_kLQ_L#)j&i@yzu_^cdQu zYn35i%*(Zm4cEQxnd^rT7)GXRRUkX(nOVg~={9=i?m~_3oA$K=o+!VVEt;a~i`MFDDPw6h*kh=#lc}ir4RyA_Yd^79V zMBR-Wa`z&pPoZUK)go=?=UT_6=!R_YrA!_X?WX9BXz!uuj~MKs7>vx`LornItUDy; z+$_B>6jyGOzfjz~tv5mO!)@{eMa6BsiQ?*|Uh_k364TaQ+RzzbXpv@@S=}(#Yd+g| z(ee2BKe_R1QLtc4-M#9cYW(`ff39dE?1^A(tKVU+??DdZ@Y&nRwP zbz*en0W^;}lYXs?|M;oB7`E|US~YE%uT9m8WxmU+wl4EsSrxv_*RJaPGGB+P`^$Wt zs$MPg#Z(P1^L443w%m6Wr)b4;UpG$C*5!70`QOsc#4S2NI~NypfR-_CQrV?A{sCHU z+@gcDD{(;wY1iY54$^MM@ek5&$1Mt?{SX%vM5DzO1<@+v_(8OXaf=Sos^fwV(dy%h z4$+>)@ek1&;}#vJy^ISwOlyrRI!t55@ekA5;}#vEb;Si8p?!!eIzsD<;~$|7#4QS@ z4aEfo)1+}l!8BzYKbWQ!zbJ%;j1LN->BJX>&?d(7LumT(i$ZBr(RZ}D{>tJ5O7l`Y-_ou<_epvi5|DmB)X&7|LFhh{^aq!J z{g?(Af7|p&`25E_)o|4FNCfBFqob2tXa4ob-W-|lr7`_OE$QFIF7(OxF0qjMI-|3` z*rwZNOYySqRa=%P{GK5GV`lg};`(V^sf(gsnEh+3h4P8PmD;w%;2zof;1vfOY*YL4 zf{W!ncZ2_fwb0NVUH0EPyi(torWzwZLQ}VqAE{~V3~)#6S@ZI7`AJLvRlWYT`TfEB z{1e92b#!ace__%8N|S$r?yjTrzD{2izT|78)!{3?PG24F{E|A|t-Ibbas5YE@h4UO zr$mFa2TuQ2Ua2lT(J!O*e4^jQ*87Qmms(#X`sGNAh7&ab*8iNJjC$V@JMN#bg#Rgp z{}K%SG>Bb3x)GTnExHkxLwvr7C=6-&BH~Vn&jjdT>43z%C2K-l`Cgi5T={-lQe62# z+S9o5BeV(e_rA09x)x${DsADV4IKerSZuM&eB3bgFGYp6lI?r!L;tB~|G5_bcb|Iv zN&)VmQ_Rbcfs3T&C&3_Tc>-7@El&nCmh$Jb?>OS2VXpt(VgBm_+K)W30c#w8AF6JPuaGXX9U%tmBYwY@;wTORd0RP_nB*mfPnGw`b#Y_n`bc{*F z?a&D(lG~wEOd{@tnwUuLgqoW~+znl1BDov7%p~GosH2JGUg&C*h#x{dO(Z{rZZV0t zAG*^-azE5Ri+c1yXpo8If$Sj8B(-m3r68Cy&Q=h@nQpuM!+cx8cbt{B0vyNPRuIPd z##Ru{*>5X2$~kH)h~UK83L-fdYz0xAYqo-D&I4OP45!vsfakon6%aT*wgMtyD! zjI$FQ<4m^`#B%1_3667C+6hi@-0cJ>Ip5d`PI30z3F0_M?F8|hI6FZC=YpLek#o&X zaGLYLPLRZ@wG$+BUfT&$I6ZcPRL+Rqe+|xmdnx)cTaGO6_S$k}#qg_LTTpM_Ut=A4 zqPoTkIsKn)5dXb&lu}MF?axxsOGR0ZAmRS7iN{f~Z3GFYu+=#9zoy22Tl@dRiwido zuMlj$|3r-Q|7s?+4a>c*h1#T~9k{fC6EM@F*)9_n!J2?=0)_%i3NRPKUIF%xa9Hs7 z0n(#}iC+6dY)+&ZUwYOaFwSC`T_!AoH38cM3FbrTSfSC|xLYN6*Cj6wBFeUAcT_&8sI)Lo}h5<|kFcZQ|2s0tfgr5`>o=CH| z%Y+kH2e2K$Fo3B5W z&K}XJL#R=~Iqm}Hrks2ykEql^RJZ+;kAkM0d}oiC)Dcv_{af4*+;5qqg1X355sG79 zi%aItU|Q$+JAD*9#VuyuajF|c{WjxVH-dVHJH)*6n@k-J#SC?-6QO>aajqLiwcvc2 zp}%C}vp<)kAp7$;(z8F0qa^$DILO(b$6@7&JxcGxzmeS)@8dpYq-TE)f(PQm+;@zU z>|jUsQF=fAo$NCZpdEpu#6D%K?2g!iEr(zctO?jAU?{+(0COSi6=47PIfMmm&MZg_DO;dHC`k{R zu-&)5bJcXCyR=^uuZ^FfJp)7Lf9+7 z{_+10VSx-gE_0GFWpPlW!ZA4|dCf5YE@!{2|y-+%x8_uueu2H@Wez`q%Qe=`98X5f!86Z|d^ek%&U z42E9@!!Lv3m%;GMVEAP){4)4odl@_&QWU?_nZ@h)Mz18A_NwZZ=@D22YXY_j7z!{c zz+4D>1=v5rVZq-7NJ~rwN`#(-OnvNA=hNcD^yw`c+^RT;?{sV%?KBZfR;+VA`}<_F2Bz`vp{k}NXiIM>D` zOJ)eIe-sRx@||DD+>~q+-f8o%tcxZOn;!jKj?eC&aYz5<_w)K^+#mntSKX(4BfBRK z#*KJMpn9Oix=hd=l8ID)vJ!vX(^^@RVkKQ;%VX(VYpNtzCI z1_BBhlQfbJ%KMq>EJz&)iP6v-XF$Qns-Fwh1rRR+x)-DF3MNY5Vbwe;z7BUv_!&$(h}uXa`Jw)jASE5%k+98ATAex(m+K&Tlqd1Dq?H? zS+!FLiEK#nDm_5aFH|2^_i#Z~hb^cQL)>_*N`RL2Q=t(qNmc~$scL~0Q7WUT1SDlJ zNmc^NH4n;sAOWb#2bB;}ngl2$Bykm1nMRrhaj@!PJy8A}tCstK9oT9rsbbh$jV4N{ z9YT`GiZmjLAxeyR>Q0KvPp*P)&`DWD2)sd(OL`@clxs2uP**Bdkcz|Rp&@rjS|Y0h z8vykZwJ3OiYciffl8XIKFpVZ**fvspO}kiioS8lW!(QATG!|$(Ur$tW(pm~`d2gfK zz^o%g%d3YtTQR_Q7$6t}9KrwxFu;Bc;Ew@zV*o!4umc15V1TbNz$Ogfg#kP$09X!yGyuF0 zfOi3q3V=5Oun+*R0bo7=<^o_g0A2*ZbO1aDfM)?PRaR`wUe1ZxkAIph%n(glM#t8c zJe3|XW-sQ%>{G8$`;x(We2!Z@_vF^DJyM@2<;PdxzX(B$C(rg zpWyAR8kI^|S3VZOD3vS?T|fl7#03|I#03J)m3bXQlCuK|>LJxI*Sl?uXmWxCs8DoN z>Iw0Y?5gq{=CCwY=&jUTGMRb=B2Vm3p(vE?T-AKx$|!1s<5z8ggO+KMjEbr*)jXkP zRB(f%XPeJp@%NH*75QDNxx)ETRt=8p+B^m=zn7e;IMJmt7n(*1>m5DXYzK?aGUM*JY;mjxq*~EGa7YrooB{PZmK@JX0kxQS|Szs_i(E&0u z8vlZWGo=hllj|&;FlOvv9yLMF);V6%0fb4fKu~n-c5p!l5)V zTLZh635jJuOV-R(G8P0w5R2P`W#ewBQ{eK{wy>Djg(epWtn zI5MVMVcupYnw(g&mV!C+l(z4s&3@xM$v*Cj{Li0#Vz=*{jl1#XWN&v1|Fp9Ww1BT| z<{O_)_HZ}%KYO;`E?}z7IOEu4z#Z*RK3hlge`(z_BQ)98-NZlTY^|ODIqTXPdy=i( zjr^0&*3kBTZGCOVreq8EDgKFPtL^qqwT_$Nnv8bW^N&AUMceao>HZnk$wuxI{7;>I zY`5p!Qui6Nll9!k`=2=bh_?IdrSoSPB#(C=;~#tWq22DOOUKR7PKMS%dr4eQM zoe@ptt$tmWM~v<#y;;-O8QxU3&F_k(m(kUvrZpcraZRPIz5$k&M(2|n)^v4-Hqo~E z&b6FubSml5n)c4%rjpj3T}uSh!;?zausRPnJ=nJM$`Z!(eMxuLv~~tH-EZ9yu%vkU z)}+EUFFOx3{jhDv+$E{g*CbtD)7TlF&1eR~CCspOA}b(dlVbr+3#-^L*|U z)o*2ed2rgj)7xr_^0wZ&v3BdFFV|1YKfST0AaBc^!uqd$zx;Aq+Ud15`FWe}TwnV& z>dP;tojC1ML&@{LQ&7K!wU9RzciOHdEAOj2*Vb;iwD8GP|I^iyYR-; zEvFaMq~)!+4#Z z_=!W+QF%_*IXd2Wi?S*268oyd^Bk3xx*i{{xXJ>s$$4fasdP;dBtNL)> zO6#nIuPWy6m{OGZvid;Ya_fsaU*YFFOvy@oR=qFJ#yT@$gVX$(Q<4&^tM}wBwa(Dl zpqwW+j7hAh_RF)hPEYWxnD^E&DDj8t?Rkr>FX(vU=am`mOuSjWHSbI7^Zlf=ic(W( ztDrz{?z~jPHHnwqHg;MxrM0f>ntQ}>MPjDghR%6S-)~!YWv-Xuyu`C^Ydg)G&bF=% zm}_Y`Eiuu}t#fwMnQd$5&Yf&HHZj)Cr4!vmZuRIg7Z?mDM7cS2&T2~C=5fWGVbGlr z>Sotz(v;Hb9$;Q<@G9Ye+wxB1rsQqzbInr?Y7+LiE$uXFN@`uxHRp)I{eAj2Zs~Wy6 zUrI>8IH51bhyi_y)mW8@}|RW9{q;+ zMsbltG4H{R<{Oqp4mWwlH}2gizTt3#ckM>n4a*x2*LgWN^m+;Fh5pf$W<_qB#W3cE zWP7C^AN1yfWPa&gy*|QTp?M7DjiRxwUbhUX%Fy)etN^c-V#EEP)tA%C{D6Hlo*a=Hs~!QJQL!G=pn@wwucJCm+S$(6$F-0Ohii+ z)oc$nMl87udaDS1LL3RLRCKXDAdEuN0eV9OrBF=LK8A2XudI-B?MRlxVcy-ss%wIa4nfiM=(t7vK;-f- zX6mNIxx1}Ck(ybyk9V%n^V;OhGJjrj;i7A)8D)ETCkrQBo19UG<>3qYS5wo={CHu7 zPp?i+FWbpGR9JL1^+MTpUO-{e)yWsie0aWvL041Hmu=;JUFdmr^7*nYybXnmuBM(V z+r)D#oN#sWxiW8_Qz8FKYFgP>yyb;YuS`xW^W-fqEV`2Vec3wR+`^`L=y_%G*|OEViG_=reA1lgk`= zebxa9()ZQdnovjz{{8F^BWOerBaQl}X8zWX{PYPf0vT393 z>gPwOjJpfjuJK zvPBmPiXbF3QBhG5qN1Wg$WjngBy<4+A&H6-6$vT=DkKpRQIJGMn$Sh1iGUCwK%@x~ zB1}Snkc1E*&AhCA_HEAn?%4aB^XKgK-HaNec&jpN)~si~&-1?2m}6AiqZ76V$p47g z(022LVn10Y!niH;MDc#|!U%)5izlM?lYfg?({}QN?SArth!t)7Pbl`0=SM7S+kK*V zA9-HHqP7huqV|#JM*P;c;)Lx!@|=jdZF5g3U}O-j=1AbhFtP?c&FNz+3bVb>BmS^# zPg{KaYV#Mk!ilUzk8(c1i=D_bXdx#P9_2)yMh|cv!EK$$Q|LaFnik(LS6AHob2O7D_$LXD<|c+ixux1dL!q~aUClj3vI%QIL_M5t3|Ib zsN|Rbtj8<9S|XcYWIaV)g5DEud>!|6>>`vXT>sklDe5Bhj&S8`=ci+TLgR#sUvGMf z`V)FnIPdlRr(;McK{%5wNJAl^IH5GVDs9XMiWLrLXQiQhpwYtq?6|Zs1T;$6mF=5` zLO>>=Z0LW8TnkVSV-iK?h|TW$&zVssFQH=6(`vO4s{TY zy(&nixkI7C!B@%27Vc0xVehN(WSSdvudw5lN3w+*)LPi|YELrlIMhm5`)XCP#c}8^ zVdX2?W11_}LRj>Q^Vq@_xDv-UhR2II|AJ(jDNN2vBeSS??U`5*&~_@)C9s8Zwb4+9&OjkI})%7k%+c#Z9HPO z3Go2EOIoHc^nMlih;*oQjqvm<*GIaCN>>SwzcPPBI#{|wc<|MdN4f_~^@I+uL=W+< zyr3gF8xZm6b*+da0UHo^(5qYBj+kvg+(s{Nbvy$6j)+4qZQXe!=Xb;{^gmjSj|BXV zxQSlSy6lMA?}!`dIjw3Jz!X71t6D}~a!e6;^i&JaCBPJcLr+M`+M80CuA|=5Wt^rI z!pGL8B{UWJ3Q*7Q^Pe?>2J87Pvqo~AzfZ`3m}Ks$p_POUh~sAsi&=v@QY^h?PH-`1 zNTXUGnGSiC z)~@YI;-uu}+O>l`dgq>9ZLn#$4ixj6w&oLxpiCPNB$k01PP z_M#^e?qXwho`1)_lH@2@(VUGq>5ihX2;n+o*kF?s~!f~^LTa^>_em3 zahq7PZyM0yEer{NmW8a0xRLyA<4_=-Lvo*K%g zniOWvkG(D_NjkI*PLlrI#!i&}*alCOHg97;kbc_+ ze<1z5jh!GZ*#=LLzTd{aFMYEOeqZ`x8~dL0={ER1>BDX8yVASc;CH1rwy}v)%r-bt z8nTUjL-r3l_zl?{J2pW!WeX?B#%sxz!f~=@TXwAMn=L$6_Su$= zm6h1Sv9kBJ>=@Y_TX>A@g)KW;_S6<0EqiFo#>np4!ZETNw(KYw#ugqW3$a~4@p&oy z`}nt|?C;~vOW}>1e> zD9#*9ir2Y{c^vO@6_XlIyxRPNbrnO2*SUsy8Siop^E#e*4U-$sx`v^}>jYvx#=8V! zisFfZn9_JwAf_^2=Q`$Vyvub=Z9MThraqo^9n%!A6NG7vcL~CD#1n%sUGc0SOmDmn z3ez9&g2D{O6H%DqcoqsX7OxYGk;c0OViqM|26nEF=;`FpG(- z5R5KSCls@i=n{%qOC*M3))QHw7!#sS7-l2UC9DKK)?HcZGGvq)c$>7M^w7}iOxN4GD@vV*mS&pYCM_@BKeQlo$!*=` zr7#SW*cEa)sZio=s8=NMGfXPV+#jHWFh--B8~2;(AdJu<%@zBBg$P6R_2&HjISUc% z(N~(u`vVpt4A6efiTlkKBG#e*Y{u;eenaS^y_7i0}BwV(8rs1?$23(ScyK;Y`i~U0b&LEVDqy5W(yF@(fgX!`+)ffJ+wpf z=)Ro!2wk*YGjCtOe8e)eb#voBv-yan=v~bf`+#|fCFmW^`TKI_Ar_;zHIw%R%tI_f zZ*ET9XEqP<5A^TNxP8D}gbv!MIcQ(bT*N~3x@N?_fVqg@(5su>_L-Ij zgeDDR1{k8I6Wle7Dc~m+OK4IvCV?MRFM_+8F#$AF?FmgPh7|ZlH6*yJ7~{ZasuJI% zWQ+kN)NZ`Hk}(3jr^1=1w5qMMt8@e{Z~RpOcyj9vgk?Z&w)7(GA;wG7uZ$>;*EP}6bllZ+q0pHwWaNzUj1 z&QZN^?s7&OaDr-&YnouR0!OHZIQI!ga~gjWzvLsaGI}_jRoU`_6Kf%Zdu(UBqDKJP$##)R)N`PKUIF>dFDFQku9$1S}$S0tQvIk2WfqVpN zDXXv+Bart%B}EoP6G3P|5rq?DA%eUEXq18&+At&+cuh%;u^5KD0VtI47@83B8c3yh z#8?O+uK*HdPYi7c@)C%rtctN1g1i9m6j?M4fKY%)3MbkEfMftDNEkT_s5WfKPV6LJ%n zN12Zq<3k9*3|SC`;zMwNlw1`x)(gP`!{n?eR4*hN=qJZTjqxB+Ko{9J3dMs&0Ig)_ zsIeYMI8aaC6ou-6gaTj5^P|SPA;CZ?SrCcph6Dj0$yJeKU64Q^mz))e>VjMaUXtS? z$2uVaz;m*1B&ri~8F)-~jvV^|@dfUaH$|d;$QAoe(?%Y>u^x&giy}x|24Xg2Zivw3 zGQ5E6)?O=EU-sHdtQaj@eaGLBIq1(>z0FIN*BS>wGlfc2*e^zwc z7;wO0HZLb}7;b$WhQ zfu&?~G^v>}D+c5xXx(N83{Yi=!bxm~BQTNC5U$H+*aIUOdEulc1_bz(kr1xi#IOZ= zG6KU%jSL&0EyFcjw~?_2_?}@NPWsN+4OC|=3D^D3ums98L}8=`#!lc@;+0@pM0LP@m@1HdmMAyl`Pp$~Xx1cs7o7;Avj8LpwaHH=lj z@eK1&QZ-`*a4=&@sBSew4{*p3g^;Qk%K+<)h7jE<#u8vhMqUW%D`OF`IU^xN_bWpO zFvU5Rbi5k`s@;UIN5pL+s3m0zV7)!T$f@YRt0de4kxt#`l^!@4t1`Gd`gaOVr+- z^Hq*t_?uLYReWcaV-4R|b#=K}c&L^B|3_u(ZU1ur-+I;m(K~ZQUA22HL2!Q7k6pEU z)Auqyc|GcyUGDxkyJy%QqpZKj!ixiq=%E$0)9V$1M(;zv)Gji#y>9d&w5Ha;FzUL| z$I!ak?S{qIjS4~=Yaxb;>qehKTWXIQ+6EaFhPKz9H;f80Dw?+FwhIyzHAeNso7)Eq zDii11TJ+e3&hCNrl!OVsC)(TU@=DNxwnQ&m60amu(36O@)$J|82!17|+md=qVgw_J zWwyHfl32k+Vz(`cUxF8?5|y^PKTB>17LW|>NIy$%36_%V?R5J}ZVOhEyzEGQCGi3y z64p+)zvQl9Gb!DU)L(L6u!B@)rzY^swOiFWc8w|IIo7A9??Opl3a3d1I$-6yFFRK0Ru$u1zr}gsU5; zZU6s@$NwUX+F}2Jk7>dA|2a5Sbq*QdwUrhFtsiJl0ckXhlJ*h1#f~jgQ*2;C~=N>hoJ{=3)2jYu#%1 z*CAGSlDB6Z*LG=<(E5ay3R+RnYN0kNXycVC$^!rEACL2|aJ0=o9n{Kec8U5Sb6u_g*FX4jnf1q*-AG(dU&zDs=yao!S0 zi?&kMT{9W`)#t9Qv>0gp;C~klZV{K5SYS$i_Gt|*2wG3jvOvoMEeo_P(6Zof$AZU1 zR}+iql3#sVLkoh|6SOSQvOvoMEeo_P_}j7IKGEEShAHXq(;8Y3w4R`4ftCeY7HC2i-guEv{cZFf>sN)Q9&CYwQ0eBmLOGPOJsdx zeI_tyEP7^J>=UOyObe50T^d|cL(kP-G$39!`hOWYmorjBFVtQ&`0tDrssCIarT%j{ znflMACH0@n?$m!SA9nbjul@MovO$AY>g62`=TiTzqv%19LAO=Wi<53X8xGtJ3@yVG5s%4AbL^bAv)UXzX}GdWTKKqP_d}sNQFWp zwd5zEWZIeUSeMaBD)WqL5>&jQc0C%NsjXfV~hG;kz=peZ0= zyj|6RzGbgcdO?RENkdjn^*87R8^lgdBk?MsrPDOe5~&&>Fe+7Irx_JT9~mq8!q+6w z$7;A|9}(1&V4v8T8Wt5dQ-fO;?KNc1FGA?tM(yixcrJen`F@)3L4U952*Y#`&!0}sf0<;yPF@l zY`l*^kVxp@WP_!XYA8VuhVQwJm(&(~7C#3MefN|xMFN(`Zhs0ufYl5n4ifkfD@wpk zexX5QT6#(S3oblU-AZKTgloPc9cL^-Qay9dOMlNy*iEP2bXFUn%mb z#B!uBZWjXCXlfcfJg~E(e5Oi=JF0#d{VRg-kybNM_+%E1o#9A@OcJz@wxv?lE6--# zK{nva>t#=5`Q7iCYdo6dIh5c^?|Pd9R1LO-Fg_)bJ&hIB{*w90;33hCDzPV1`74gNer4rNz^u2cXw&@j)AbUjz|sVsG=isq>eXqs z&q4M3M-qqGUzhO;zidxFKrMH8p>Q_4!}4^|F$EdM`my7Ts(8yJWrb>(LmzKlQ5`rq zIsBmpr+h}}cR}|`5{JBYf3Ww7%9zILj&zspb4sd;!Wucj2YX2kfrA9@pvu-%%_Fk( zR6*imOa5*!@0q4nOfcmajYew}9^SoIGy#4NaG~ZP(*k>WEV9zdI04w%YtO8~n)~|@ zq{zNrAd0Qi+*`R~%6ZT`_~R*X{AvsAPaa-bM_8FRMZK~UuO}%@iw@f7LluOdX6MO) z_8EJpdHm~zc(3+e#T#S+sHh2s&D135F;yc~;7m&zbE><0f3M7CWEnmOd^0m60c9g^ z0X37_FFmXzFlQe~ybMpBud=kQfk`nMQ|Jj$rl2(CM?>lU%-lF?lo5YGuJqbnHI!DR zU_JPmgLJHz0cScqO?(U*i;-t4q>^%}WV)UuN=Xu=Bq_{IfxO~r@0v1Xjm&bnW?LG! z33QsCYUr+Y)U0DJm}-(S2is*#nyA8|Z|1+j5~5KIf5T8FaY>sF!p_O@ireDoC6lP& zFTMApd0h6fazdDn6X?)OJ;1D`D&0P22}#YFR_kyQ1}th!Cq5xINS zRRTKU`#rF@A*vBHsT_bc$);5xbx?X(#^feG0M7r6qDbYFn#bcoJfq6kUK931O_#8h zu+_|sxkgqA?4Pb$IKYh;T?JeseLl^%SvI8-FqPv>pEyAkA3rfINS&!t ziE^r(U{^Ly8P6_AFuST@OA~P17iJ17k6HvLsN+Bd4vemnppqsCf4DgN6o8;G z0ZG7U%nRXKaw^+}NNBe*>ir(!O%KKs3-cZsVyM+}R<}XGSMmGN&55Gl0fH`^wH( z^R0JbTNw%@w?|1&7*li4C^VvvSTKTD0hW&U5)jd1=H80gpL><`M6sINq0ofm36axk zk7tEWNX@x4?st$dEg{U}jMsF!FjX}!$k^#k_)$Pg*1$|Fqcs6WY%wVN>(Mn}=_5(G z&bTAVlT2SJf_+ua!^!<0@uBWY)gVT!@mJ+PJp_K}{W9DaSGuePxHyf{YXCY2sQdv1 z6BKalrhJX&Xxu?0-$8@LjmN4KW{mO{mDpw$kQoK;H32q7UWI?TY*0-aY_OkE`ClY- ztSS@a7$mZT4JIc^Rlu_z*?{mb=KyBIRvEH{wX@c3bY?A18+=RgD##7Vj>!AT}T zRg=JCPE7Kfl+(xvI68K^V=H&U<(E?RhfV_}B6G(03I(Z?L*I~gw`P0i(Ue-JZ!h#kDl4oSxV4Upg zlgU-v@N)a<39@=r{4U3`-If_{C}Tz30lsKZdXqrsh@!OIU!&-cIR%TZiB=CG7Z7s5 z1c4zFY!v0OW&frqGA7L0ieg6K>z&5QJt_Q2n28O1PkSI%=mRTP{fbVI1NM&CGVo1j zH&p6_3oN07vH%I`4mVbTe{D~;JW;Q1`!EV^MWh#L~w})7I79YQWFNJReb6%sXq&>w3%H= zEZ;Z5fC;?24@AwKaTL#+Fe^zSjF?H1a9Huz@F^7v&Mw;=JYZ3mu=2D?Vrz0ZU48AMHk<~Et za#Eg^RJPdJ2Q;tXx1TZ_vGYLc@VS^cfjER5TU^og^7#Xn3V)+4w`_~bAVGybl z#KZ@`7EXv86$U2p8-l53>I#0N%1(LFR5Z|iz$x~jF+|U@7~lp3a(z1GQZlBJyM`wh zC4>gWnogD?w#?XdNWxtDyNqDoG9o3-lr^z}n^>Xx<&fm495|~Ey%#<4LBWaghZ(v2 z8coivbD*OBgtbjKuU(aFPjxEm(0in(5F6`}_3}t`e78i7oo-Z0+gCCps*qS~WEqFv zQ%L9Eq{D{kv#-~2oe zI7+;ze5cL|XzCXHR5=H0sCT=pKW5Kk*sPP&BbuxCwkv}&^-OT@ukOtb%3Ll@~NCh<_czAS{ z8iYAmQS_F^0*oXiStA*yGRj8jMv>9f&yx>un*)J#3wz?8mTFOe?G zphwR#v7xurjpwy{r*fv#MFDT(N42u4SKbWfMP>Hf!I+c;3qGqignzQ_T-?KV8qvAF zYz#J2eg@O~M|pavWAGYE@nZ3>&$xp8ued|%aviV;Q7Ka+!GGw?(Tr=r&Sd%s=mMjF z`m=EZ93jm1RW@Z_|hE*}f7=IEg4`uaGiv;VO zco)cn6&v!oR~XAi1nXmq(;~?L9&@3oYHUq0F*fmsJf!T41Mmax+b^M|?xsL&6EYaV zR`QETI*vDniRT8vs`2Y}P9_l^4Qkp&?d80~ijxX+-!EUQ^ z3u8xciof7}oJT($X3%h6em?tq{*r&+7jX-sdwd>>+V3fypN7jlqRQy@c>Qm< z%TY*XbyGRaQD*(wl)fSq`}vXQ;f~u$SCqn<%8s3^(Y-X!>tr|#lfTj3GO*pvncILo z-GdU$TpQ_D`;@T-KHMHEStQ2{DNmHuit@qA)G)8OM$8}d%yBd}X?7fAO!SN>!uR+j z#cUo>*o>X&sh|;~GoZY~U*vt2&O)BAacF)PC6#dS9hSk}gE5tJ9q%cZXWWG}iSC&LI)8U=!)-=d$|`DOzm+mSiM}bXMVtC&E$5aB zgu;>DsFWz)=jjl>6m6_Xw+{f+pW*^HO-y`r{5q*TRzzn>L%bX*vJhaZaOTUmGFmLG zs{g{XQQ7O?xM#lLJ|Q=ayi@Mo-Vu$hppQzbg7P87HvNT#PQhOEKs$#X_?eRRFtK@S zUQYMk%Zi=Vfojj&w|p9D$|i?kRNc;(^|pt8(#avCa$yi%zhtE zsuCJ~NrBrK=`E_RQ%(+*Qw?Qy-Kt@SXGGav^-$pb?&71e2m&hoo$LauH^8ff6{N<= zdx}#8Hc{pLuZ7u8V}ZGWk@gCgnEY%7E_sbIj~J2f+kp$3ENQ7=t7wcYOjwz;u(h-?Tl&0}*YS*-`ntX`Kf

    A4BcM!sm}o6)y4+ zH!F5&u#D#RE64)VC^H$!`)&ggW?BOA_#S_|XvF0)y%#{_9g(CR2m9Ie37boo595;Z zP2hMYueLBMtcTia7pxa@nIHMegvQGw>J*1IdQea?qp>Byt7tYcHMr8q!~}ZN?b68H znaIT*@^bc0BBrOlHw>f7>Q<<81m}wSxQCk{K6+f9qnIA)olt=QW2*I{sXU+V@$PC- z*MY`TW*7IfJ<4nPes{ITJDh(D+3?i5p>oiq81iR!Mf|KU^vb)r_9kPpBjUv8|y>5ZPsf-=y=hy>pFEgnQ(!Z83z;Emk z7(@o02^-Dr&2hjr=1^#rbTva+QW^P?^l1%S9ztD?Oc!=1s4=DeL37xJjg{~adYZ&( zxv5D~LQokiVsyNA=W7;ZM7`)oaq?<9H-v`@HRk3DG}n!j%y>MMI`nxse^r8Al5!kI z6$yZ zv57_Zl=uV23Yqik0G@7QxdSMf+^HJHsm^O8>m4m#~& z2cE+5<1uHE6gwOg^bedGoE~LFpoNvqh zW4Kq~F!qe^58*mki*?q~f;w>~6D1v`Vs@87#n~@O-SX~bl!~G=o{l~GPq;f&PMu}` zE4f9gNGO3;V(Q$Tl-{TOBFATBIv@rv1biP|TFJ6qRKoQAQusrM7)RnFzT4RG%{k0-CsC2+!;sM6Te1J4;l0tm3(tnk9gcZZf$-HKgrN2 zC$Y}P_`77ikJrm?YbKo>uRM4O!6{Y7NV=j+9n*2O>KpE4#oHzu#yOumxs}W%Ro?~v zqP<%ueH6~t2F1poMA6@r`!m#b=F-gyVcH^Y`HYjelG3RZ6$g%XxAm?6h=YZEG68lM zFA_OFdw_5bH&)z?aq|r_argKTn7m7|UD}{XC<)Fm)qBGia|Im}k$boym3mUahU>QU zjbnQn17U4_bu4Eeg$_FCO|a~)sHRdp;*&lop!62bp8^XMsK!vEx-Wa_)8>Qq8xB4E zllvjBaPn}7=f|i`==VFP6UN&Yq^>)G(O*z9=S|A@W&&uHmzS%fv$FceoS1I`2QN1* zEe-#eC-u&O(k>nPmdg^ky(y&nJT9((A$In>8VGOiNE-H`<8u zpKmV8{KZ@u@x(B~gSgT?CJ*7$oohJ!^w9$6!o1DH1_zdm7J55bnO{7f*#218Bjv8w zK@Y=x?vwBGyd1>U19hpy^;Y*DW)G_6&KtJF_}6|FS?)h!d3Ixa=H{y}yQWt<(+o-~ zhf3GB4s#!`wBBNL>Pox(ceY3Oo@4$eug5uDN#x!BxcRqi+78K&6F*iSHOT#%a3Lx} zCwYI&ha)H6`Q9u_`5pUQ+S^ZlOzk+on;!ArWv08m(sSn<2G)|?-n!8E zkGB`ktm|6rfAQ*j8-MGgekHg0E6HZu&Arc`haa6hV_Z9MWv8gw|;9VNMErSEaFXk6~zeFuwzpXa3f*xRwypW(lF<7Ky+ndh!YPM!JV z^w}VEM9@paGg$1^&!>zQmy+2(Vx@D=Sj!RZg&WX?b6OKjcWm0Sv9z#cL+7O(cUDQZ z(4zaSmnpy1T(&-WYYyi)({IzE@B=&HSji6?vT1d7iu-EfuxsUZnCGIq8}iRs)QQW6 zm*92;>j*QtD79BxUYyV^6)j7@hr+&6*PYIacvJB7f%_$64_Eh%cw?+bDt&1MtExpz`Gmz?$YwRHOMRL_Q4 zv%I@BGub<^`-gXqJ$f%u+_fOV$6P+ZH2>gbny%8+}%4`H~`aPxt}e1q;e1G{Myi! zfO}CRPmo8l3Satf$0j8r%KaZ$oHew3*>(qN)t`HP_|;OciqRA1XK@cwZdv1({q~AK zGrVqdZP50cqDy4)hW-mbRttxY)xSz!dF{!TmSa0VV(ZYk*x~NgdW#GjPQ2kBg>I>S zeB)JVw7_fru`8#DE3RG8uMSN;UAIoqZM~iMBENBd*gzjz_HjK@eli2M6MioXCA$&B zxwP{n?OYL*^YKrQCHnELqD#w$$MY8?6umyz3OWa!ShPH_;*1&a;J)D1ZBE4&T z?hEtRxnXDLo>vPM|>x2ZWO*g zKIk#+_SV3Pf4|^p4zlnFudQ>R^k>k=B!5}H#UIZKLT$P~a-M84jX%99!t$}xA(?aN z>E}Tajc;^0y~N7&_~ltC)_IRtbOidm^(u{`*#tccd!cL55n^3*G;E_Nb0L50>y<~3 zO`wh~w3uvPO?xnu5j?Xc{^i-|wv9y>AOA=@``~a6c?~1EJj?O6JL*c+!7qiz`dbX& zt=e4P^m@lz;x!-a@!dudo9dgb9&hV>A9yr1(b?Ew1j>hQZ9nxbrkW1(S+&jh>8?W5 z>44V*rpG=oO5BO6Tpiay-|NH8fm>5I2M3LPfIPFfe=|3E&cIRY+h)gl$F@!Til2rI z%z!-oyn#-M6MP3%yF7x2_$LY zxQqS9J*Z^g1{CCSR>q2Fp?OAsb)@>b_|*zF20?5-X1w3Ek&{F{?AJPV^`QSc*s=lc zl0C1(JUX|lPRB3ao05IY?Q-s)9-UT(M(6JlpFbl$cQs}W64HYY%AM|Dsi!L72DWEQ zM5~9PcMkSBA?}o&If6N)io(puq_aDI;40G{&Tlg(rl%BeUX(BLjOHV~0DCBEgi zT!8@My-8E`MGFm=ZGRf}nRRa0_B(uxV3|XK3&&YO*>NkXW9aCGs}F;TZ7j2d9J!xU zbUHed?EUril7fPXf`8=iz4vBs*K$7S zh^He)HDWR&L{m(Q+Nvo_;x_g(qmC|o^pm(|b5H)QFX_=I3p9CDO858tm`JI=-0uSu zi}_>dzTRTKeB4WLF<^zf!u7G!$2R=Suo={%ou*b@F-Nzo?J4D+0l;^<{OW-p4X#+O z9ESK|`1Oti>3iHo6=g~{tRd)cj(a0^jyzj<)aB7$!H?iD;{d z7p%3S_m7-Ny4QrupX$a6AExU13CB}(FQk~AfkIWU2jIpNy3oPwO$caZcGd;yrZ94{ zC`0qY8<@!OV>H!8xYNR+I@o3pg8_oE6L*h6${u5vG*P)XFaWjWMl&qFMeL_7DUE)| z`6EX~Z+HLqjxfX%(aTiWKHJ5mDAqpL*tyfYRTQg zun@irH4c0tdhDI{t8_DWOOPYk-GV!CAGKFIp_DyNHMFTdr6>e^_%|+)T3WN_kexrtXm={^ z0?sOXu2H)!(jRZon;2~2Ic&~g-IV=AnY5gF5p{*?H5#5Sw=y4m^DM@tqw!tFdHaGG zfA#mpx&MBPi1W{8XA95!r#sFL6cuPh#{WLSZ1canX0KoJifcmsgxiO{ZoK&D{gHE< zcdT5yNZb5hMMKE>bc6|Nl*z1wnu36YxU7O24$ML$B2Wlm^1TLAP*EmcOOHf0m_}(N z{)(aR)usfje3;11OXg)&aB7%4XZHzZe5SgO(Q`hy3+B+9lHMC$Ma56`b)U8byCsr~ zG3=UW^e3GcG=MEwp_0vhZvv`q`4r#X>S=(NmDS!C;rB~IK}urTa6Wh=a7PB%X}{U8c8*#jQYa&~D4~J1iq2 z#&!j;Bc2wBuijwdnURcGMQ?&@AObaneoNj%uhE@1M{iNDm^k-F*}Of=@t*_q-V<&R z=W5OW+BoRU`LF2Kg9_GS$`FrlG`f?d&@XdIXs4IaeN1U4poYjY%PX)o8%^iHhN6;8 zC8bj_4Cv!XN3T)GJU98sz*r|cFF9KsBdqlC-|p2l(9)uTMb&SyxAb9F)&--b_gl>O z;)XgtRC-Tn9)(s&I!b5`?C&3dFoYYV!~sJupiGk%uJ&rR0=*UAVHN@)6m}e@fc4ZY0+E1tSmss za;=z27k;`hZ$J*uOVe@D-|@MdWzPy+=xm`BwTNs3kQw;tsr;@7rk;5<&(2kI0s`_l z1(#cH36cYJTr73@Bl#5dW+Zr7@#{)NOsP!fxk?{c+QHSKpLNO6VY%eA`=pp6lyRyV zuSNE~$KGvd+SmJ3LRhxX0z$mrPj(FGCnIceoAQl)$$B5)&UlV^iDU`?(6oiO1X12! zrnxU}UBle%Eg>^7!H+FR&*=NSS~t;TqBR?sJf7axGWs3TlSAMDQOtJ6m`}agk{N#Zbk>Faee@=;Y_nLY4 z%RcN}(9{I#WIq9W(IziSa#_&%3G%drDD~}Rs16wv67?$eSB=|L3+y4_?u3@7lsu#(n`Gz z#y-ME!|pK}Y5)tp`4Q1C4rdCj78}$`@+7FNV!_?w?iK8fhj(Ociw+6j6FuhgD0cY% z5b2|`jr7(23A1Y>GjjEGj>+8Kn7NehbKiie(v2E>H7mV+7(8o8#HRUhYY1DrmPn|J zYFC0hmgumAf2i*Fp0Xy#nGwUr{I8DGD#tWu2m7Xpx|3w~Npi!*7sdhWf37U<+0wM} zwd}5W|FH#%^X0U=pic#4#~ovdqRcF>+f5TaF;_xe=Ai;&?t#~XjU{|sIchbC*lBR^ zrl}pZf35G;$lRE=7{14Pg>=#US6j>1kG{lxt6~1*a$%61bSUu&b z{`5>S8u>2a>cd}0duXvozGOe-N7BS2%~*962gO>tee<3t@FKm?U8wT9vdXC59My(; zTwJ(7NB_VlCz9#d0P%vTuDxYy*DqN5@A}VGd*7*_Y<_sj;&>+4#ZW2DtF<$Asei4g znvyvi{aRFOo)NY8!D_ery(G_)4bNtR-lNtL5O%s`dfTN>cUD|3J&~;NFaMfjAy;EQ zgnwmW4w=q4Atk3FYwq9S3?a6SUU9RmZb)7fUa2|(Tj0y0S1mt)e1i$Bu=JL1ofueY zdfWYvG~$mM>1U^-&M9Kw#g!j>yPZ;8+%Ut>pB7<9UAe@?4EX4$%8W~SNVlj*VE2-=ab@yK=9P1Wlz1zo%NqR96X z+4S%|*Nq1b)&G)sNYDJz@jaHN+ne6{U_YK!GSiHMMWXa^D}%O6pWUu&ZMJ|Q4WSXO zc>}@&z^QjIKaakiq4(#n&I!IgG0^LWLwe!XUC({KYN0(^E_{`CKmOG===r*HCo`qJ zQ%;csIDVdsC3nJ^o3i#H>iD5Or^L_Sm2KR8py#gUj6A%e@KA8o!x>3_Pe)?zrmtSZ;z19ZPKsJJGA2XW&3+;+nx0H{F&g74Yv19C^~0&EueJQ6RHI9#{YA| zRMhKpYxMJS??S>VC;ba|b-gFAXvu{I2YMQGb8CO4^iI<2(T#s*UN8enX}OQL9rhd{uMzboo<2C7yuG8xKhWv%$k;lmxoR=}5G?B*Bl5%kc3_xK6Z!{y-e`{_n8@`<7;r+YPV)y8k}^)2TU^{jPwd?~Je zuyty^^xE%d=WVuMy0T0!E6bXkM1!2TcjEE(6?YG|_UpTTUFD`}S&3Tu6_bd~xa}PG zzLNjAq%63$?Dpwv#bu}L6EYWcFE$#SJd;T5xZ8b|bGmBXB{yL$Q}^-MQV|mUSikEr zlv?n-BV}`#x_jFqH>2RRrB0EewGKaauh_Lx9F>jU04ZI*e!hk=e(OUg%D3u$bB?6+ zk_s4~8u*gHs~V8hNrTN;WmwC|nusZSL^re1e;&dXWMQ!vK~wQ1_7rqip;$2pD56%_AX-7DD{I)Q>?|a zI3DKmj;@gX4)l}pBByfKRYl&-_bymgI$=Pf*YQQAdo`jCfWUG9?LUH56@O_{g|pQ^ zlup;VI#KBnXM9iAq^b?;&9Gz1_KG^Ha&;5#KHww=oU+5I3l~>;A@iOc!f*$l2A7{a z*jeWHL>js*^T<_;T@*0E6twczxUOd|)(21d^}cuP>{w#Ip*PV->;%>fls1+0H1Gzd zXTO(rRT2y*2_;l+;_UW<>%t8`b)MKZ8PBbnwzP$ShuEjeh8q&#WM=u)kLiX4HkmNg zA4g3344h%3=ng%CKb}u+Y<%DHx^nCf;YPdYp`U(hAC=^LEk|7^&^~VP?4+6pRh@|+ zxaL-RY6ZPbVBYvyhf{t4pq)N{DsHsH=~P?;J&z4e+X0$3ZlMx{Ms3j8K9Ui}fZ5F! z@Uh?WNy53#hQjxJ9%}Eaw;rO&i1`o0Q@WPD9;cBHML)AtwL2^G&L2z=uS_x=JIhP= zJC z{o?+Yh0SK<3hUwrbLe^CdQ)yc^MwQ{UiyDViwy<`Z*UH6*0&Kz`oa>2d*Zk+m55ZK~wgb zEg$+cm%QUe_KKZ8^-13%+;jF0=?$wE{`-c=zwMFywF5$TSuiw258FDs5z-V`HZeRj z``P+`+Yt#vJQUq>y5+6^tMqs7Z+~^3q$T{@uE<`xx4!)RK||eL zB@GIW%EuDgr6ZW>VqSqh%#!7V>}pK-%q4;82>{qtnN&?sNJ>wt6c%;A&nV|G4F<<- zxkN0T=7Jc_9{lN5+=Fe}Sd`$kyu1CQ@c&@%J%gJ1-o9Teii#khAczRkF(}d_iXa^V z(j^d3dJ=*H2}rdd9hBZe=!6nNO(>!LkfMmxkOYE&(u9N}y~FW$-~Z>uGiRPN=f!z( zUfeT#X7AZ=_MSD@y4Lz$Ykfa!=kBl)BxSfNlkOb9w3djp_G!E`0J%0LW8f1m&vkLl zf8%K9?%iz}W}wHFBMAR-14m!>vCwMvi{qjIKg6|TOzNf2t?cKI1OFBj6QY5iJis-o zpU*sNVlvVU>lYX?? zQ2PzG{knCiYS*pBzaB_b_*NX(`9)>d)Y+d#A`VxEb}ERF|5Y`!(q|c*Joo(s#Af@= zUAdknsg%CSZxS)tQxSVt3a_xu_6XfJJ97JG58ACU(HP&GJG&y0{gX>=d*@~Q*Oj^Z zN5R$<)|i!{lbXIhc?Qbc_bQ~BZ^F;H_ShrS!%M~v(#AC3(N>O^Wy+2{Tf~nVQ)z6> zH!De(^8R5SOd-Yo(Ea||HoN@emG#cmV{FZh)!dr(+?w;pRBT;&wBN&35++%(<>C^= zePwSsT*xggNX5D=9HGn^v{o7M}H3?&WH%_Vha#w}EH~sPVJ?#Hs zbJ(9|u&MUsz*nsT#CQ|7HyLX?6&Uq~`_J4`mdlm4@lSDKL}7shQn+MXHY$1V0=ABPc*P;(Y z+9^}fhl$fx_JE1o*DF45``8_C&Gt<(r#lB%M;(a1ueNqJ+*fcF!Pd=j%7^wv5eI0K zh`-%1Iu%#reD$xR$hIk&->3hw-JOXy_GW?>Kq`i~F$ye^lu9Vd`tsC@YqEZnxrYE49a)_xCxQN6}h;yc$&dUpFuP z7(fr4ZMOHQ3-THaK>Ha`dscW~v=Cwz5O=%8)uJlzf5op=Rj05#=xF(rzUlF$bD+Du z!|6VB*Dd_HKY69ju5n-|)n@ue!0)j=)9^_P(+KZlWDjR=jJu2K=Z|`_QTMT>_86ui z!yo=6#Wj0s;LEKG4@+scV0SM#)F|%@SbS0+?)i9uGWv*K%w6NLZN&Zk?_yJ+H z(wFx*`sF3FgO!l3<6OCps^=jFPVis3B&RazOK9eQfd0oMdZ2m1#P__1D|GH4c4NUR{F+U-bwdG0HdRF|Zg z+;;Pd$cd@V!GDEc9{U&Zp0P85JpUz9x8T#qtMLa48M^-w@mlxK?|7I}M#?_qo}QjQ z;bj%p_T?S!QoC}O!bhW6(TfH`4n2o|?r5}qx>fJkR(7FWQuu*-&E*GDZZ>!8&&1gr z{40f39|p{(FtV%!#n1oEmaafdsn3feSDpt3|M>_rL7ijg_vY2D#nNQRGzVMY^dO+jW1ipD2zS2(LfA;vW>_$=@G_Z1Q+C^e| zJQWuw$g8Cr^I*QC`i8=#CtlFZ-0KcePtsSf21(QmXuNb+>ywS1h`N5gHQ9F>(zP4R zE1s@Ly518g$Avfc;XC3NFJO}qQ?lcWr7Xav&k2Uh=wiX*?{<0jMrfb?)MHYSs-#-7{f$F}#h zL>_dWx951&W(|2<5v+fWJ^o>1#7d?1BjJ~=i^PyZJ z1JvdFL^nC=>R)w@5ffxtNbe$$vFEj}g|-;yG7Z$u=6j3xoQuC8YCdBY{EBy0L$AvY ztdv6C2vKg=s8~U)XxPhm3&-JUX2|lF!atkXu?p{+j4hCwh7TO!Z_-5W|FCG-1p4{A zy@z8i7PiM4^o0#giY;uE@qTk|3m z^wn|PsiNNT_w!7JG)hlmTj$q6l?i5ws-Yc)@ zr5gO?f?!4&C@^UO-18cGX@RikS*ho)9?vc-zxHK-_v-!#TkoC7zKwER>5HcIs@jYn zDwpWKT75C|8pRosQ5Em4vS<-!7jTlCv9&^7ProX^prkZZNb%)G_YsYv)g>rjqVzhO zz#ObQne=|>$ES+#*4&1&p9E}hm!T-t5!7Aa>d5QF?;=p)n?@Z)k8HT7p<#Q)Q>+Dv zrB~zrnBI7DML+%qgyX4=9o>UBz{ed1y=?|czur-PnaDrWpeZm`@cc^LfMJnD=)AfK zO7t#(+F`V4I7y;!=n+a-LpwfO_Wl&PuR$J#rPzvlx!_o@P(Z>-n~C+=%1IO4$-a3z z78+U?5DMp}IQHbtYcb&PmJG%CQ>jIcDOuOj7afubX-x(t*DVL*+Gt^%I_V-Q75!=E z5T@})UPsGe&g;!#c1jV8!t^7{V^_>ZINppf&e1Fn@ylzAv|}SSYMJ7Mc#G$ zYk{@3Av@d8x_d%&Ii3P)l)DF#cQoFgtNLDLD-M%nGj}b!__xjVo7E49k7uHRRbj~n!;>rZ<9&j>typMSr!iIE|})?+3;G; zo3$4$+&jMON>dhAwGUr-XJHO1pOMAHB{#~itF!sq+N9*TbT+m~#mh%mcySekXd!V6 zoF^}rTxk4Y#@S93yDuy?lv56lONs%s*r&xS{7yti>J%5QiW`vhvh*UmgS$iVpx%n8DHEMM zPjI%i2cf~cAy#4399HKKD1Rtm1*-t!hZW{~Df**!~>%Yb`Huo-do zIf(7{izm}MWm$w$=&7re*j>H%EW7biym_Hhe?s8BcYhTT94?NR9TcP$l`X4nXInx-Jtyip&Nil;odrs5~XYgxAz2nSY7ls*XP1ZD; z68nu7*_UW(Zt#j+?Ob5TLpm=gWQ$%CrIe{ZbP*)MKE;06pq%QNj(&kT@JM=I!xyls zgDnPmDCSy2BhEyV2V-jrX-tRG1#JsHXQ&oz;d7e_<=14T9#M9& zlVJBqTTYrFLhg|91PQUc$~)-)fbfaaz`)5YHgfQ&g!NrUa}I#Me1ib8F=h`kY@f*z zImmFYj;41|1oZbWV6JhbHyPCgAsMH}>;oWn^qUV#_sboG<|u`wzEjvH7-ILVo*1N8 z5C|D!z&_k0RD{D2d78!TZg$ljul?r4Y#f}zrw+W*l1iNAMWUpV8eQj*mCFxRugyXz@ynH4Wd~y>(>2OB0O9*8tl?Cl(jNvpIc=zn1a>`sjrEqPy-ouQ zjRdi-Eq?9kvbiEmIm@DrpD4BkEi4JOaU3h$)qyEP<%5Y7K9OIb*vCX}KCZeni74O) z*@7E3Mo9zaA@l{)nUu$H4QW2h{(@f$mS`wp491_Zs+~BbnP3VDYHKP}xR0DZwZdEYEe-m~WjsTtr|Y`tRC+ zjD}5!@v1K{?!MvS-L5X;g(!08O0HN1XH5pfLt*4f?$(l#$)&3IOGd=^ZFLgkFrXFx zI84Uok>2CN&1Mbr@1@1BK{}|jftl}dnO}#qX+!Vvb$U)$mO3Jpt4f5O3zHEzlO37g z7ZrLr`hM zF{2*Njshce%}j;vj9se;D^P6hM!RWT<@U6y!V6ncIHQNnZJdV3GR6!p@PWoSgc_-` zk=@2pYGNLBCDn6kkc<2xuM=(*SW9?@*_|$yY6zkJlJd=XUX(Ebj+J$69Ma_wVQIE7 zZxWHfcA=i&R&_OQQJW6c`5LYwI&VhjrGdl$e4G870$~vRdIY<`jPYz@+5^2fxJ>}w z5HzO_&jGx@S|e&%4lA>W9$teDE`rT^z5(LN=JVH1zvu<8I?R_ATK;y0&)ltsHfF>} z>1r0SH1Iyjm+mVfdEe!7_qNKRUt zvGf@l^?hJNlf|K?k3OM4#o0ZP(?a$TF_QBD1JcE;M~D5g=n3?o>4S2Z$VW zdI0@n`Uc`4G`?Fv%)W*I&r5V?&se!c(Yd?58E?@| zYDQ4OD(|WkKtaBy2u!pSmg|I1DekN&r?aviW9Gy`;o&!Axg&e78_xnGa(pZzkWYIq z5u=3GM81tIzMsjY(gtM1t_ACU&y%#i5-=`RtmOm@rcpk0�-_`Uo3Y2@v-38sW& ztddrdyduIv1)66bTjdhTSxW=fiV+~mMD^`WH&97HwyuG3N#!$HGN1Y>fFScH+Ny{i zmM?PJkPEH1zLQJ0|AnQBYl1@)>7Ath_=OCGYpDYjv*UVv{`%WQf9md*mFN4zdsi() zw<61rT9C=W-&j|5FiGn=@SX#@;cC7>xJQ%bW)pNE>hD0OmNG~;)O6Za{{F%k^e%0(kfXzmrNjy~$WEG|NN zfOMvOwHqj(r|wcyzjm$Z;M;2KD&wCZ95}iwXoT z>kW(>2{RGZRqTB!Y7xb-K)sf<+#Jr`cEjGPu#&Uc)Zpu;NSNMcO4?pQ5EAv_-4nH% z2JYU1R4+Td*^a&8+NY_H@p2wXPcAkb{Yr<`&kVHNS%FQd_D>B9mdb~X zg{QbVRoz1wEnv#o+Dpl3mAi6kv;VZ8P z6@?yfo`LSs#^we(!f4lpfWHbpev?{#>V12GvwPd-Vc)`HA=pwNFQtmnuU`R_!@l&< z34h%YU!GNK$04X}=vkREJ!Ni{kV4xPQLr}#zO_XgW@tLJiTUJ`!CLqxUX+1Ffc&yz z?+@J(k#Q(o{nlkpL#y4S`jU0B>Xp?i<4*2`kAkt#CcQf|;AiE(!ZYSvSYj@_Dx5C1 zPG4bkHZP=s6P5@i4YV`ao0`@HX~P8ad%zEL6SV*lKjYUb3|gYgUVv^ z1=;Q|mSMPp%f2T4BlT3KOR!nF_(I+SEPf!ezp9r~f=T{U4%}!;eMgqG*ZQ1!t&Fb9 zfk}Hh?Gl+$cs-+J@W!apL(A=3eQ!z^q2oe?ct&PXU7G-&vf0k<&AfvixDhb|my3-6 z$L5Ah`o*)9PFwNOa4qs^DwCr%ea$34V3p!f@kk%{a^5h@+``&bs$s6W!%_E>Vg5hO zA44fIkG(2|2)FWSDstrK)vs@k9mG-P!aUMW83HcSH1PLYEOU(8XBG+{uw2=<9IeR% zCp6+g${egX_FwpqAr=8S#Q5&@ui04r{FE0WNwD++7Eh3NbEHDXJ$1VCg*+C_k%A}> znCDL|sCd|GS3X1B$K3s~K1UAnEDrsA!ZJwyqbvmGR2RLW?!n@uF!*acH-Q z?uvn~tJqd1BFt7EbjJ(x2fHtpVzN~h?m!`dSIpP%QFi1xSFnEf9`#p4yTBqQ?hc^! z0KMOVL0xvgv*h0m`JGKXj-OEMs?%*BNdjY}7Mw!qQARy~h%TbL?>xSZsMgZxPHgj` znIFb_e=`5H2o$Bp#8>w{K?iFpW9~ZxG`^G+TvOPIb!t{l8JV% zW85h@xmx^xs1kL!qke-+jIo$gn3>zM0sJQofSXuii?Vj4l>=I*RF3P*_H+uV_fv#D z)%r7_l6oHDCFg*H1U<@+EW-Jv5`M-p0+q+TykAjN+`i`Mo2kJD6-V}TWflx$6tkbQ zdGfmrLWNx}tf2~s+8p41Pf9*(d|Tv9U7Yc$;0K^{+=okJTnqAW`y^}eZ(=WtUYXO~ z_^w?EnlH6B0M>Z*2Dt?*`nlk*)Lj?yI~WGdp6i`KYjHHHZIEqMJwM>Xk$FhlyMhb# z!TVaVZcE(Ec~!FFSeIL%Alyc`wDjudIaAS8^jsgG^YhY|(QGNU{uxHOpO*+K(H5&?;>ezg(dZEDAq$6Vl=4tPc0exGR&BzB!(vVJz zlB+k`HsKym_kPg9sshMs&nQ2KaVmI?0)7SYNvQ&05_PJwp#3?Xt2wOGYzXH+$e@2t zW7W^TakbUJ^_1FKhV=@am2N48tt`$P4zAw5Nv?}>T8}r^Xfl@N)i;aV@N;0~u1jY72_ltQsD^TMpTdpeQDMi_>@b<8} zCguyQs3Be$?eNalyTg)Xc{g(GsegrH|zA){i(q??vDqwI6xR9H?7%H@?h4T6E z+RC%lj6Z2^Vw5Zt0VX17in{_XYfUlnyDgeWWQZrY;Ne_8V7V94uX8|lV=4-KRc|? z_V);?Dj>gxl}D#v`y8>$2cuVjR=|Fmbp)jrqQ_G%|I1&^nbU3(U$uu<@e zsXESAU)Zsz#N2^o>Yl3q^xiO115O7xE2_t8tuJSsQ*xD7Nf$Z7cXyO9{%BiVhP;zw zBkbPbk>tjtKe!&7yixK4LT)@J6 zlXWtvy6{I@%9)vZp9|0P=A4LL`EndZyViN^CU|$h$N4>Juo7MQ2ZkZgsjhtE-Xdr- z0)OHzyA$m~U@)L!KJ^biFvn2r{v!}Q!>A9h}OF8)IP9S8|5umVls>E%TN3`wB}hOXf!rR zjhx2xDc)7PDT;ERrb^jCzphS-9Y*C2Lk}q)m7Qewc&1h0O5YO?>tfD&%xCMk;lo zv`jL85jA1k3)MX5uNK>Kxfb-n3SMD>sf|(CcAI$93Odcz;*R?blfarSVXa@cQHU6z z)Gf=rtB*Sd9yD6YUDJ6K%XQssTI#kB1n^NB5r8k9$hql5HPe#gxpXdu>>Y82>gSgi2%kB>KTQBnzgw zk*V7Q0C>196C0ZF5=tMGf@UmJI=Xy-SrnIC{iy;E!tS>lpbGp~J|zU~Q?7HA%T-jZ z8BN4i$w!JQ=FNm3eyxq3J&DRLg8l=2g^02A6U0J4`GJ?^Z46RZKhTxRX)xWT6Oj2K zr3IRw>ff^B?nB26i!Ip-KKvR~6oR-Q`taAD)J5$N0HZ{k#W)^b3n?I1|F;J3>K90P+BI%)&^MSW^gK$8kl2*DByjHu}GixtX$w zY(uP^2pLl1t9)hUd=x=aiaZ)msdiiX2gD@_A%dR(@OyBODSIn)Orcmm4a%!uXm$rXrY{!Cm94qcf~?+ zZ_ah=fIElb)KP`jEaZpc!j~#uh>LR~$Y>~E=uo6G!mFHWv{p_3jHRl=K5535LUt3c zNV`oFV4s;t!q)oKU4?Kr8Qk68)He{rU-FYSUq0ACtv-P=x_UZaGG>0{wE}wz>E|)% zN#ccckyj35HmvpJlgE}#RHz@~g!Fu_9&s(HXT+@Vb7UkpzpXUW4)M%pFny7ma0X;$ z8m;x^Y?YbI?cRc3h_jCE$CMx&|7DVbDIZezWu0}DI*+#bLrPu^GDzV$*lvTI0KwnH z!r&qs_aTY?yAM}m@KcVcZ>G{G7E1HW@kEN@s7h5;`^|i7Gb} zmKm&kjc(~>Ty4M0x|9TnVAF+5HxXK~LtrbO7Rpz89V#a%D_e!4kXO4W-wx>wLi>yQ zm~no9PN)wMRZ$Z!2HFatA56081R=k{6SVKJk7vPGjf&1}*Y=KGyB>k)3zM<*kh_ao zdZ&O-rF^Cd@ynt;huGOJ+VYO2qx8`tg>-@{OuUtt( z#g|W8{vwy=!f`1vR7)mr+Mr6fY;A#-_$=|CXX1Dy-YNe{XG!u+)RuN}vN!94ObpCz zs`QE36}HW3c?BaKwY|+**HNe1VKdAtT;*BJ{eiAb%AH~1{+|jvaIOgC?`Wt=%0iDu zhWlIZ7F#}uYgMS?rp1@RCm2DY109;NoF=lq2u^N9`5E>Ckd^}+^k+5YxFxXNZ(=XZ zTN{!!p?!)ab`y4lCm*8d*83`e*t&VkBBEj90Dxy`+SNcVY_IMHaQh7c_Riz$fS8)WjbZ(r zj1B^83ozU5f(vTmHO)QAD0W@J-^sHLU15DEGVI zjo51H3p79wSytv}0P%SVZV?*HSaCrAWI|**6}>47Wn>o@mgKLNpJ~5-DcYDa^*M31 zV#6`{AU1NmhZ#JSI&6tiHS50uEt59uylkTAG7uJCA)&d}N%2kn@%Ea>JtU34j ztm`EUm0b}*Afo8;CzuKLHwAPDd!Uh;PaM(G@WhKN)!_d zpX8L1@RLY?vq2m>^&L@G33GNy9KxgJe$^!w=Z@aa z(K36``#4FBF_U0fLsPj&ZqkMr$O0H-$vh$s=^L|ow(ijiQ!d%R=R#zZ*)u4*U*x`}YH^=#~tm>qyB(&FS(8oRE&qpY; zE6@D3zw0J?3p|3uPvNiSgi$LPzX=167Rt+K(|t;wT4c;!iP-6~eC`E&2&ZSLhvN?E z4$lE2#@_O%SbKhdYC|2|yCET?t=pfW+7BHolb`YHA;u$3VJ3joZ9VwbEjPkL0GX#1 zK164jK+Fa#<7dv2HnkzlNs$M-=Hp_ob>}?-qxVtfUVOTPsD0+?4|Y6oqjeX;R*APd zzb^yBNJATi@xiQr%)bwTqO^?!!xW9_@HEQY1=!^uETO1)#|*L5Ep=DPIt8|FJU-kI zK2Dt_E|~RpVmTE)!bGPYXhBGPJ{jfWE|B0B%2G$5vOUh2B=v0!%9h7a=dYxiJXbRe z+-?Cpj{**GWen>YRZN>H5Qma^5&Nf2M92z_MeA;mQaBp{2BkWi_*$e<19U$mY8WgH zKLW`eqZY-CWMe&gsK(w=GeNY2%&c71CCyyD3g|ohGGv(ejzvMa7QjDL^>`ANtj_#^ z+LzPo#0H^|R2O@87^Tb~aF*s^Kk9X>Cy{qVY=9&sT3a1?S+Z^x4z#MY;n(#cNmR2Z zGN8(~UlclO$`GRtT3hZ)g&ZW6&l$UelI~iW6yaLZ>qXI?Pl#!$UITQ9Z|OPIF!+Ai zPxK=;I$-HJ6dz71sYEoQ9+dXH%7>dd%9kU4g=$8;K%B$Ixz-Q`5(t^YbFiHo$Xd7C z3vJY1FZcv66Zvr02}RYrcolh4KT&fWnzN0#L9$1;OTAOiz`nG`)coN5Q81=Wlzgqe z?F2npn*3G9@S;<|_#F?#Ks)@`%ln6<)Ukn%5_ns(Sr9&`U`%7=+A&&nulI zpQ{~WaShSa23nVw|ISYzegSExiQQ(LKn9!`kMhW<;Fhj(5Xb3nJx0OX1#Lzi$6Q?MyzpSS!S_j zdj@7b)LfFT^sqnr^_4`dz&x@qI5S0QM#NFyv@l3sH^XI#xfRo!HkT|yUX%u0lNS`Q zHF|}U9R%9)Q^<$g?l=_5?sN?BP4B{h35G*R%q>mdr>{h^Vo3arSE2{}9X95%M0UBy z&d6*cqp0C9j|-uVoQT0*{fYV`YKUr4_3Ny?rxD($p|%TPJ>M&TX!d~$vN^plVce^F znZAa&MV|q5mkxjA8ZN!VxDl+h1eI*Ys<*$~qY89u03jHbtN$ZM^0gcnX=R+=f-^ctWBe1<3#Xy`7?}qPQPc>&3y2uxdoB+U9bhFQv=cop-|HMF#kO&q&VTIj;1iD(E@>wW*iroW+y@ry zS`Y{(a_l>JI_c*~+0tM>SM4~Vh@h7XVx31$%cALd^f&`hvQj)MGE_|GZtWe=KAB-` zn5ZK^l9}L!Uj$t}3v%sQZXUhOc6Kqo9SuS3g#~-*CIHJ)gToc4HvBP&FI=6Zy6HXEa1I*z_(IiBL+q_2^C7 z7LBSZV+h6ab=||CA7VX8E>1MsT*4NPl9C$)v80AiExi(CXLnLp{?dL{qrvSIMYE?o z>%+#tmt%F1D@6YrMOPG7BS{thfCZNEBHU7VuAXV>P1XLXXwfp-mG8M8RUTbG&# z?XD@q;{WJ8D2;C;gw!vyJiBS;T5G)c+lbs!qEX6!r>s-bDEKvkAK*zN@4d4h?w4=vs~`dF>=1VxyD6lp2%={fGr_DJBy| zFM3EDpZmK}<0x}EHLgw;(|3Np>l9t&I*?Mlbo;vI9Uxh(((Poc<+a(d`7st3zN`4b zKcq>grRx&w2*>vuAgAeJFw-VVsrIGwYUPx z*4+%E7rwO&WX4!>px?6l{m#6$x?^jc-KWpne$GCe2j%0WhKTY&hBq@vNR`*Ic&*4a z#b%-z%LBmq!SPXq?vh?&BsONxZP|=7;SK=U{)+w?%uGdi)qD_pf(eEq(O-wf3Km|c zDOiMSDw?7%PA8yK#^)>jY2TlH8`B`%&$~>a0zdwZFktCe*IeT5>c%0I3Bt*|PDIuN z@oH!k=v0I%Y4NZ?tW9h?e@pA;FQH}Kl>bxV_{9N2QXJQnrU8zr0Pb+b(|1?kw5B;_ z(Lj3C;{$W_D4y#dQZV>5nr4%<++(oU#UvEZ*m!a1r~_)ve9hB`cgg4rn{43U3Q@Mp)l@dAEvb=KL}h8ZK{ zbQdy=H~d=dCBJ!m;2H@LJSK0veHVe&WFF+#3VhU!q?~9hoFaw$VYViiX~CFNY8yh_ zLX;Q)Q6+oVJih9pNEup#OCX=01b#FOSY^ftCqC&dX>tmyEw}u1$%`D!oA)SJWL|}S zT|PTQHD(E}neICZ=c-0dr*~)R-Q+H6005-{DL=;}sda8*v+65QZ$L$1c}hC-4ihS= zJA{w=7T4PM^y-Up_h~MG%3Z}H`yc>AiT0(x*a#()mbw zc9>yoO%UUZF0Q9bjO|LNS1{Ncds;DQe9OX^@<$=F26%{ji4)B4I_`WPF*#2+za#L^ z1r%I*{DHdThZNpf4qSbt>Nc)d?Yr~?W?b$^XJ3@}E!^c-h$p%a1oq>@ws!oV$1K}e z6j%=RR`>3wZO?()#|}2_-XQ2y<%(Nn3E0H@jGNgZgDM1KwPL{|qv5FDc3tJuJ zDHTjTc#W&!yO9|Z^(z9pE{<#Bf|EWg883M2k6B8=PJ_?_=k)|Gy4~K<4#Dsx z{^opY6PLr^!%o(oB*n&b^!KL0iU;y89)`gAKZdFP%0;N$ihqtygrV{bl@7y5KYfHF zW!|}=6P)E_d*6D(&NDfcFs_WTcxitr^)VK!Joe4?S!0qH$-*32U?!cl-x-^*UL8Qujw;BZr@2HoK$;6d=vre+BIM=?LSuE5$=MqwLoGlC*-v5^8a$IQOY zbblb{WcBnU;@-Nw9bP+yzf`M7&0EQRnLP4n+uI^1uu@|zUHzOEyeow#X*VTH_mgqS zhrVD1xRk6s)aqz7Vr4A9D1ZGu1R2#VqB`A}#c)jISDe(Rh;f#~ z#Mryw0%*_Qkwd@!q0x}pYb04Q_3ztoCG<|$U z>^1G=qnGrbLy_8%DHau9_ze{`hsyS*8F&5`I^px9)q0)|Ez8%6*5ui2w)}i2iJwg$ zIvagK(?#CMWsRF}FH@~&5JeS2w8qkpq+BEPJ-^EyiP3RN{xE|yo&8|Hh&Jnnopmf6H>|~mELH9oT#Zs}fKzK$UW@xbx=m9v+$Q=!03o3f& zr)Pmmw<6{?&z-<(3-fJOfbI^9rnS?}CAvB2d%5bVW+1tfo5c#`k1|86sn34z=#1-E zS*j|0D`Wb`Tu$&Mo5IEkio~B5(jf}*1DG{z$Q@+sM`|DtABX^#h@JSXBTz&QQrd%7 z8`<4g>kX&gw+8Gi3>si#9T-;}6Gtg*9*iM2h>uD8N|TZWjQ?%K$@DuJMrRZd{y9{s z?*zeQdB5H*3Qs-ow~>MGi_&!lf%UIeUN=A*<}STRK>BuCe&Y(t+EPq>t+73j&S}Z^ z1so^PzE^vZ)UZidG%fSJfB?K6*6)yiU}um?wyb-RfdI}gh>%U++WD%XU3OT>sQG7O zqjjAcLlqEojtBC#>SU=b$=H4@+jT&w9g5WbGJ&nK8NXGxR8_!?9y@8}51W1(o{TE* zKhum5Dvfoezsg{g)RYwmrBm@ z$}_XXEolN*keLB|iA$)1LMlH|euX~tp*XtHMzc8lqEce+IdTJu9AVU%wl#2pa4x&;<6nF-&UaSJTk6%E-z4qga$s$ z^g!1tFvKgO08a2+!M6;Q{mW znytX6g=V@%RZGO!AX^xJPeJff{aJvhQTUJf&p*5L8AX3fwJC1`rrJ7{AL{?XPZT|Q zPewx3dc6i<^%%uSie58nsiJQQ-a~;r_^cz|A^*e$DdF)3cuyIZMP7fv&r^Z`rpO;d z1I#ztkxkDtH6Jo^&yY{qLq33mt=?BaElax!@%J8HeNTA&+vZHU$rE&(!y0c86ZFud zexxCjVa4>YaZw%_>8f`?(?tF4mG0Btmc=e<<8v)%HxXfF%R)nn?KEpHJi`&YaL>QP zC0Ygb;Mc;A3ck7y;j{50i^wX0@Ntb`e%T>TMVCmkcvbAI@sBE(BZ|d6otR^PC05Cm zI_Q|sPPI_|qv|ZZ`Zo)@^Z!C-ZV>`~kCf*x@*LBSRgaH~2!Th_$HCi>=HG%ZeLnRK zt&=D1nn}w>T&bzc@~dCAC)4}O6jy(wQ^Z<+hp8R@X|F4>JF1(WZbu$+N!s)rAI!C; zzB%eyUuiv_8(U}J{+;dn>7!-1;Gfs2HeDU;unc5_^Uzhc+s9cC_kZ<{hGgTjNDHqv zeb(wRmv2^tL2qu#l4G<(cFGc8N}p1GT)O#*u2Fe7l!&dsP)k{#8aCgIq6ZE;}R8RHPc<7A1b4)K+ZdCVg85 z$gt7rHM?_Gvw{h=x!kPi?eV7tqdrdu`c)+EN;y`Qg?9HBUksIU42nP`jQ{+`2Sglr zARr&)T6!}FArKpD3!U%qL`DY_%2nQCbfI6)otEN7)O!j{B$!K;-JE<1R{v7p^|CqZ z<-<%h%bmkLrSH1Out&!OdrJ7*`#TqO&$sjn?M&YYXa2w8!axqCs`g+j*3Ac_N9gwU z$&|YvXQB3A1ycfA1(sTXud7sn^yA+~4Y1#xOxjA$>s`t$+h5;bpsC-#Y?zIXQk|{^ zSxfD@PXPrrkmex5{G%NnkJbN!A@J+K|7Hlhw`34pxAT891WNxu8Uo{SpNx-C`ui={ zL+3voSHErm+E}*zJ^QQErs2i*;?K^h!)=8@%h0Dj#{_@BS4j0M6OYPw9h(;`blA~^ zC~0}KJ`&Uj<{ik?c5?XW{nDv#z5i3w^HEoqQCAb&22;gHPl0CFdX$Ch-XVn(71U!(kozH^sO+_|fJz!PI@bl}VWFJO_2GA?%E2d z#_5u1s|xVD^%*ZIF@EFtAyIdPNeE3qvL~*l{DWJMB?=SyazAwAg+zXXk*~X_b5vl( z7j#m*!8oOz(&`KSg~t-1t!w{c2Bt&zvOTu{ z%TI>xe3L2?@^=7DRxr#nCBMX5fLUwgcftDkFoOAT`cB^G zv7TkJhxk0T&8L`a1R3AF{Mp>Gsg20YGINjrtog1+t?Ws+7+4*2z3TLxn`)Lk zS7K%RIzDd?jScVck=dR3tD?WB=uYIp!Rxyy9DT!)sD@_JzMqv%`Dw*Ejm_?|L15r z_JnQgQIB2mu?^FI?O#`+`_anP1ob&zNE30lX|mpqx&2W=Av?+-OY3H3l`W(E%vx!< z=o$Op!DWaTpxxU{kGf1wU@+s~*04IKbFN~diL_MqX<_qdr+;J6w(Pj4?&HqNqkoAq zrA|M9b2eEwB_?idNc(zNotf|Q+_2n#nXPoE6~E-Yi9C3&y)jI^+iX8}1Un4#3ThZ2 zGDp)q$1t0B-hZ=++9Gd%t$iRq_LZU7!{|NLaB~wv!qfjW7iuqA79k_f9DE4{DJ8S(tpK3dQYvr39Oz^ zJox0)_br(_q~)E??pXKg@$jCXp6(rFj`O^sJ#79#Gl0G;Y@RibgU=oNPfv#)P8zN} zJNEm_Jm@{H*SMc{6o7dU!Cyfn#hd*_AK;h$tK%+Nd=V6xdQ<)NwhP$af?3fcso+XE z3ntw(NVQ*(5`u6ST!|_VN;pRSKG$HhZqw_7w=F7p`O#qpC{9z!cQyHX{4dtQc0lsm zTC45vf8YLvI`p6>7M|E+3^G0nLD2e^(}mjL6uAG4{=Pb!lq(mY7*o9P^MMX)^n!i&x9=d zf$3;}e-rk8rZv2Ly=yjawchQ9jMTB{thV28eA9Q<_*zogQ-2WyxbwI(~v;>{(D z%n@RB>vQMvrdogNu_Pe-c<1ktzg8=|U(n)@&-=dL`3t!GtA1~ zi?vs|_iB6H_kCSil?P`Bwc4eS#d*Bfe+`}N{gD)eZ88eg}XZb1#uV`w78oG!#;f3giK?DU%&f=5!c8}2$K8i z1Xf~*!)!LGno)E0J^t@YdPl>b&3t;hLos4M_{o0;jikVrL7wUD^lB zBE5k$tHhu&$7-#mWvV)X%Nm^(nYE@Vvs)(an3WbWFY5Ui#7LVk?^=WH4TH zvgO2SSIcfvMA*ID9p^J3TYx{T`~A_;mfh@Q0DCiWfkN_=P|)vZOJj$Ga?D)$z%MKc z>%oSh=Vk(?I%71A=kJdM{z%}D1pY|i zj|Bcm;QwR^d=@sEKSqGTd-$Egk&UB+OO5+!R|l{a*E7{oGzZ&5hI_AWk(6pPH_CYB zg@E;&njLn*=R^A_uzcUD7#0OWK)rs!<+F*1og<+|o7+5&9_z~Yz}~o292QA3v^?lJ zhMnJq%pRoCU=MzfAaXs?i0}xKu|t(?8+P1&GR_c-=EQGVvB~yBVzcRBxQLM61K)3p z06VICV)>S%%kgsBJfUY^M_U2>7Nv+3AIH<>#aj85E~u5`lL(K~EgM(Q8gD#+xIP}5 zJM95&vw(MHt>;mU_j$rxOcr!~eQ*fq1KQcNLggV0OOFN}cMsc&pd;&TyCarn3+`v@ z)nYeD>>HUwYQ;avEoox`q0KudIz{zd=rKm6p3Qqh#Jy9NN3mb zBCv>189Hh?-AI5EI(FfN1o1n&f9)8j|9!`(`C!i0nV zbJK@csJdTW=Z^ev`_JVcIsEAa|CbGheP!%~J?Vp{h<)!>)_H*$aQO=QyG8-#K;2QrXnB8N@3-efli| zx;PX5UEPW9yTCr>KMhkhT||t}sw?c=M*+xy;Uoq{7>dq`iF#CfJ&`Zt^tIZh*JLBd5Cjx;P+*w5 zN?Y^{qC>U2yo5`^7$=L}!`+3o6PxmBVF531&zfVXc5AU=AR`9WW9IYX)(Ab$Y}d)P zruh3&M?y%g(~s?`wvY}C%GxTww}=gEdARp;?c->RkAV;Hm#TBqK|%2e`O8*tV-VuO z&8bv7;M0!KPLkINQ9Y_~nnrNY8K&7~rC_(NeyCI2r-5s+v3>E%HQ4sa5`pVquhXL^ zy-JOn?nM(mA>u&a3G3OTA+YeaSEu;?{N(47LhE9{BiGkkP^C#qX>p2^H&|2Ky=T*H zw++v_l+5TY(-W)ih0ozGRHg^+xQi1Vdd}-jeAo~>c5Do)gb04`Z_jk?u&3A!uq556keOZfH(CZ(7 zE%uF>Vj5kJiXoR^t>~I>exi;p&z6Mb6?~t5 z^i`kpD?edRg*g3U1r0hP20EXtbQS;)z zk2&@*01K8e^JL^?g>-!J zwrk4z4P&|6RVBi=TS%^JmZ076?2$d&PWT-bfUD^*gFt4QsrJ{gIN#0 zJe4rF!VyE>=@MxS^j#MO3cf!56~aLDm~Gouj^fk(Y5Wke{zhP@!z5HuN#K;$Fb?Fq+>)t&ELHSD8THQ|8)~ z_C4;TxRLzA_vVSnqDNE`>l11pHGvh^*CLkFhnn;IS38l?SH4I&@{saj;%uN!A+;@YmNq6$iM-%N> zj#`_VcT;~ADNzPfIA!to@$s-mR_L0E>VLNBuE^ZFpJGpq_f)e$Td?D>M-x!=B&FD2o)$h5r9i9>)tS^Ajx3HnBe{^ zGj;vv3UYi`rl4l>=n-$O&?y4b2;MhE*|%a+)ySJvOX5qm&GNS)s3K^;IX_~Im*h)W zn%mnWI^DrjW9pE*DBYa1XBPeX_shh7ifLBvrcQq%1aHYOYePb_AQ0eJa^(3z9%ksS zU2BfNyzOw~HVLw}Yv{*GNG>394sl{}cyDg)3T78zQd-*5Q-x3`^=Tb6b^CHES|XOK ziyMxp?W*AHaK}(;ML7C;sxR&dB+WtuKA&bCoy8Oc*|HUI(NFje)R^~T?Y_7VeUP?c zk1ny{_x!Z=!O0#qd6`hG>0ta-u7Kv6x>4GwPt@hECU3bz z*_Ph}riLRK8t>dMNgasmdG;Z2K;Av=JKu8F#3-K$YA;QXE;lIWO7 znIl8=)eGo+&hQ9dUGoee-F8}&0Xwc<$Q`{uweEE6;_-Fx;J|<2wW~pW&G#wG2z1V-cU3jEHz$)@ioY>a^*qp4?HP?a`X$_A(Y^V_ZA$xAp?)5{ zmI^HBbbosjDl>ycot`2(&eL6SVn^VXU(^LzrgxTnCWleBXM;~1X+@12ecIV1axVo* z8P~onoIlJcEjj}G`nWsP^f%NV&@DL+vYRfb)d+j%mj;m{2{sP-9Jj2o3YZc-2t2OAlZqnC&A@D_>M7*#@w?>^I z4kw~;2>hM|jlV^f;bN1uHMuK^;*pXy8+7sGYizi(RAXK*L+)^AnP1-7(XHcOrVN5Yusjf${OW%l)D zVv3PI%V#K{7W2ZQc59791p1V3|2?Dxhd$g#_-*6vuoQw07lh+LaQ8>uQJqGGe`-tD6Lk*tQbz&L;i$6QkQB3#S3T=dbl}&lQus zAyTOt*%n&|6H4~y2Yib5CW7)NXX7f_V<(c!6&XI$Y|1(jZxuAE%{`})pWYHI60n22 zne8<~A03%64ZA)fE;#FvS~a-6NMMb*cetP4v`E$E%#p^`{{$~RpPi-Zq*i=zQn?4 zKDb@Wk$`G|`>UpBR}OwA+d6snsZAH%%Kg(;5yBTxn*lQN?*B=^e_%v8#U&p2DN2pA z#3*wgqg0lp`s_7_V6uP9*{kP!I-j*Szdwx7p^V_7Mqu`Ogb_@_N1tjSXC*wf`<<9k zw!Jq$x}#n$J{(X_`$02?nT()Vx3ag~83ifZ9sN-CmC?&QPUQrTLCQu67D*1L$#PzN zL`gXLPTI3Xw`#!D*QY5pWOHC9RNk3|#AQ=9C)M=le2<8gz*(vqD=)rV5TpN{v&Gk?C`I8G)? zR{UZx?KSfG^ljU1@#$)oyyHh`I~#^at2uJF5^WAE8iKk5<5v_U4YPP~ldN)VpEesA zB1h*lXCYrc+B{_zuJ@I1((N-V+->p417TBXkyBl5Jmqv4byjv`1c3ZNY zp>yK18WL_Ac4%qLTpLfQxRDL$q=~84ToI1C8&ogbQ}dE3St{`%JwvT%q+ne9Gy$kj zcHWA;;hBx%io?>5C zvA=pHnL~Mx^)-M;<6G5XvNZeYySmUx7a<6L@;p05R@(r^O-j!`` zeU9rAua22Mi*G~R|ec5Xb16?Lfh+KGH$Hf`>_4zbuISHu^N!D@HC(ffR ze!1vX$(cQ7xzfVYz1M^@KJ>;tnl^m|k4Jbi3~XZ_OYm;F6|ia_bA5_74ZdgNSbz4F z^Gr@w^d!1kHc9gga4R*4*Dm3Gi+NRx3a{6V=#)w7i6blSUf$@X~911 zG9>ZcaP%fZX5h-czR~ny^`noh>mKy&Q>x+w|sis0ry`=A+!*YH6H(2^dJi zz%Er5b_2MscQe&Lx?2t&Z{T4XHOaNuzpG{M&z-EOL!{K%j8X_M5_EefS3>mJQBK9W zl_-@)-if0^cT*ucFUG_b2ov_mIAp3rk|B}qJSXAe!r1BZ&O~_w* zA>4<}H`<@W6HhtF?^4i2pYGl8)A8r2nf{vS8dWw4jhzY3pE6fl+SrRRB+GmwqsUzTZe4|+}3X}!Zf1(qv82`xR)oeVS7Si(x)*Rnby>(yjRYl1jG)Z03 zC_9foc)8Fm_nxRcxO)g=_1DLD22cnDwpJkfq3J30_1JZ$E)L_JtWKQ+lIM4ZZ z&AVHEm6{>K_X8O=M)#4@oAOyFE%YqR-;t`Usm(6{)sYNGCUUBR2}`*=*q`3o>A>5j z#hpU=QbkF3lT+_wjidOuR!Y7{NdgsMjcZB;EN*(UH#MVkl8gAP1k2rQxm%LVN`f`> z9*$TPcy|bltEb+f9Fwq}qEuwEz+_M|S$a7B$ZGjQRu~nw$Ch*Z2?eM?)HE0n^Riw* zGfh$fDVw8^c};U#>bp(8s2nRyDye;dtZ0fNN3gU0_mm&IDqJSM=7zcOsHqCjnpzg%u~4fuh*1}hKyLDu-uNV zPt^T#-IG(~H9^(+zJ{}EG$1NGV|=Il?QB*%ZOsb*Eo1XW?qcJ|8-X2G15^%!R!j7umGmz;HkKygN9_sT>-s+a# z=)}E}PfAcyYlud1+DSyxmF|e!%Tb~270@qMZZVyuQ0aKLxL4)o3d+Xj{Cw}d6Jlss z`MfA+y5u8GWZ>1hT?X=S`R7&}O7OTS!K|R+HB+btg++!5-REQu0a%=*^5Q2Q2^P5` z=_qp8yDkpJXbrL!KO%O@5eE7DfF3%lhxVbKtK}57cRh?|sv`T}O#+i7lk%1veD9>M zbOD1Hz@N#+T)-Pi;u^|wzFi!YQ;-?Tcmk`!KZa6=nb)j2#ZCT21kU+7-TD%NXT zP#fQdwGrO52rI3o@X!Rq(D8BIwHa>VUtk>M_t-njU;1-NX+g)@TUMe-XNY;l9gG{g zl#%-Hie3;lOur;On-Di(XEBSH>QEZ!XjtG0Tj6=V*+sTNk~|o0H@Z<38C_13mlHo) zo5T|TQ@j1yX8IVL3mL0pNja~LMbTeDwDW5_qZA1rs4Fmc zs0R5xE0(pJxx8pNI>Qt9!Q0hSNP#83hg0usiRSa&_bt?HVI8NW_43=ZdB%x{Go;+z zLvAHsHC8{%J)OAa=cM7fu#jdF)E^z+Y(;2UUbjT4Vfp%L-a^$lI^l79&kwSq_@*G< z+c!veYO7xLM>zAk-({V&(HVDd5LDWI5U1+u>tv$Mc#o*EE1;Z|P&0i>ZnZ1J9GfH^ zi(=BTy_9{mvF0k1;i?KWSc5bVxOxAzg}k*qrGWx>+eAgd>4yoz1v%@&*u052ORa@{ zb@qzD5LS&I|BL|-swzGhwcs=#?L$b8Y@sr`;GAE!No$Ys?o_;z+&3EqCkCo8>(2nk z(vQ( zsZ;EsHToeps^+S)+5O%1X!_L_Dx2@Rf{S`5&( zuc9@<5M{%Z+Xg@vfP&HCrXf!O%Y39nQG~+irj=J`+xT?;%XeY@pDMzbFl5xEo$&^S z(L;qhnG=B21p!%3@|(e~JVRELhFQ*B{)ZoI!!aS)Nj;HR)Bf>!>UDcE+`r)14YUf*{i_AcZ5ht6q+*d|dL^7|USIIN8?QBrc?@6XJ1{R_r>X z;T&Xb6i#72=(kCoNm{az0m$T`u^v%qew<~7?#t*@={|q*%%WJ{`!O#0Z~Ouvcu#jFdP#gvH+pR+PJnXmUup zZnw0+P4`s%y=5+9N*#oH&HmhLcaoQmj)#cD`6g4_v$%)odihtk3knu#NsqLFYbGA7 zmh-AJ8VrXmE3`s!UMWp-+06q{N6hLf6Lp;9!)gW}N69v2=IhGS&8@L4D`_jl3rTJ%e6>_82shQf8@#^8H2m(~ zz2!Y>)}xM+Wlt-Zbd%RjH~B=~Pkc<`*4?h7Zcnq^zWYnMeCEC9N0Y9+*u25vy7wwE zWMr!5NSw}@O=Rk&swipX`|bo<;75!)9z)>Phujml+b>eDFVw_JFp0908|;M($r!Tc z^WIKq%@bNGF&k3fi$`$wNUvh}^dE=0r0a3%yk_s0vm2>ZeU|eEkT*#5j-=39xL?P0 zBZq&Zm@H`1N>JANZpW3t&oo5U|L%{1;t+TB3iy{_s>r5SmrK-OP6oyj^Xyhv01u)yrA zcv8*v4nOi{6YJFSUVB$JH4np}%K_iR9-NkMn9lM@GTb%ajY{&-@}Tl}oCu$j`y3wM zym2B&DWyVKz!6_qZTi<|%u+Dyevo6$p{o86gOMK4N~63I@QAwW&TE<5R$rF0sH`!2 zF)b@iq%_2WZ96P6yssy&k@@5!f!EU??{e^oM{|x{zZ;tE!1R`)W4&uHC)?hfjYv8S z)`e-*a#w7r9LTXwul_kilDp(XdRy_w?OYlry;Y5{*P@x%EQ=*EIxkq%dTXQ=xaW;% z?~z(Yvo&4k$*9fAXT3Qv$^81+HFFQnTn(Zq>)=f2G@rt^!*Zp@=qH4W&KuFA8Iu*! zBJ#ut7?0XVMPX@4hB6_hoTpin^Q85GYq5&j1kCJ_ps)CN& ztW&9_T6Up(E_FpJj!Rr?dzn%1+YOk)sl7`$^qDufZhg5LUfy(1L6bm4A{oJaJ(IX6 z=wRZza_L?Bo3T&=-bH{<%!Wfj!RC%{Y8aH4hEaY*-I#qM{th5}F2Rv2{S@L7Y3i5} z8MjrBmEaFsTdRfUx|%E%9J<_04gsn+>a7W}--uATE0Iqa{+Om{`F7ZuLMc^qoq@H1 zYitjZt2;jx0K_Z5DsWE%`MIF21{t8&%%$K(zlkdubTpX3^{k(QdxR2kVEPMZD_}yX{?Hkaj=m(`i=dQG@F;J# z0l<1`)&(M`uUw<%Zv~mWW;Lz<^Lt`Zdb3HdE!b_({Lr+#)ythlR;)B5GPD}JP;jS_GDZnK-N2rIF}MV zAkxRK9MXYZRxtQ-V|;$7VDMk1;Jt(&6I1V4ux1eKvf+L_ms!tu+TiCyn#Q#T zkNNl6lbJa&1g$nN&DBwJLbcp<-&~;e2iAz`K7?<7zqya(vbVg3ua}P4^%B|<)800M z<*(IF-}MCb!pajt@i55wd)CG)(1w9m%28gm97jfg4X;nw7l$5Do5H9A$_pxPpuOY)kIAK8Kz@>$z74ij#_5|6ZLjd@5@r}jQ_|?XaH^aw6I5Mt? zmG5UNG$N~Dc;Duf(h9icHTUGzQz1^%LW-pGlz^x{X2PJh_BqLvfjnATbc5=Vlh}{~ z8z5fpH-joYZhIR@_+h%0lu(4n82W&Pa!ajp>`-PwfG3$ zq{9wGVjB*CoRz_B`yc`t&IGU_-2)_U-`wv5iXOKqvHIilu9!&p``ruQ&39+0*N^?$ zzL9*1Ug*pmMTSeXPAM)khlJ%mC1i2tm$@+dmtm>dd9Gg0cwbn(+)Qvzwj=z_)X~LQ$guckFZ3<6L1Fn9x^@JG==?KNI5j)TI7+H#Og1GD;D&T%U&gSu&n(M@xXTWr_ILUa-1 zxfoX(TvzPD-I3^sP#Z)vs|+vFt4-B{wPw+% z2c$Z(9#?4`{iNHlOr)w$b@Ko2KKNnoS#sE_?F5s-&-^%B&{E|MPk#7cJaP-Gr|_jY zbo14(@Fk^<7y-WrBDRFPvv#|W2WRn%$SxveF^ggW)&&7H%%CpeT26*^ER1#Zaf_Ml z;7&n2oiTnc>Ht$P`Q@Xg?~?=aq~>efg~4uf{R#S%Mb@fa(0mFl%f^~l!uITBGf!Fy zkkQcUk_sC*Cu^ymW+be%vnF1+`q~^oA8I=@YKNErb=7nWFUP@!xHCYK&5Rol)KGz* zGULLT@VkfM`S_?Tn6T%688EItkVDCO%8Uv}!=nzuvq6uWe??Iqgo~s8$G~Vq_&W+JTnsllBrXE!H2>Im zpoJ>?pDLH3qP4#?*Z-z*{+~zk9SiOSoIL_})BpMTBZ5D@;D0d?1`M&jc#p<~lG$S| zG5HWt-R=^ntl200J?u_?h7hpUUOk|i{a`&7xcthJg*Q$(?r|Iu(iSO(WI_YIG=()W zZy;l7qv4~zqjsZ)qspVAqa34jqr{`wk?j%q$lyrJ2xKH}Bz(ko#BRiJM0rGXgkywm zgm?ryygdvb9vp5Nh76|-hY$M>+YK8ID-Vkfa}3iB6AvH3R$vpbPFM{r7Zwlmhq=Mb zVOlV07$58jj2w0yb~LdvF`;Fq4baxqR??Qz7QvJFu+uvUTfZ)frqOiDlDB))6UI`n z#>8K^Bpo-KADvuoeGon?SPybKUc&Y~i1m63iaLrKQRe15&V^^3n^t9o4s2wD-T@!o zmAy*(LGt0Bpa1V7X!lWQ$0Gw_mv)VJx26_;uG#D!M1HOh_>G3<1%dUM$EyecPu|3$G12mC*L=pj7&OM&p$)oo@;e+JPYKihtJg2sdQ{U8XPo-ZLMS%U0){t*> zDXzlHsY*y>4vj8C5&Hu0W-l|FP`DsRi>Q0GkdTOgs8ER_ZG>3!5yAX?iOaEm>9mMT zf{-96u5z`Zck1}%DXc~mu;CE~o|DLFj#bWt}IgBMot%Qf8sD7iKC0A{L zr!Yy{bu8bB8mgSNVfjWZj3s|}ihJ!rVU~2h)f=@it3owR-om0MrY4!7 z{03^McGfNHHzr|)#cHX%g;mnk6Zz9gUyu>-j3nzfu3;MxwPoJIW@#uaA4{$IHjCQk z)TZKqSkpL5&*n{7*haaU6klPFwB}Sk2aTqA%=Vl#H7}iXNr2kE5gvtx_n|aZnc>D3qR?`Juf0P z*Zf{QgvRc5mjaa!~@0-7!<)=dT zv==vtJ&c+7M$j85T9bDkTaC=9=n4q7s8vI(sCs$iKLxS6qKr22)Im>Ivp<)5z3I`;{#&%6$&>lbNFi5na+&o(=nS`Mw+c_f`ckDO>(AiuhJlM)u*}RC2X&JDnnb~?V zAZYpX%E0XuFGE3AqNc^*&Bm9Lf0sKTka|%SAFmViwDSPR7Kg ze$m>LD7eUYQc(SByz^*nfW9No@s^VoHGdIQw{qyYMRov4+tDY1Alb)Gkn!9rUlZ&+ z#J-wsNm4E0^s6qmcAFs<>6U|XBq$&DBYdg4mOme88z7e3RxX+{MrRd<`&}0Ux9%@K zxq{5+UUhSl=4qOPa#!fBR^xsZTo*8%WBw~YWX7?3l2^DOYqQtMdD#j+nJhjTs%&aC z{>5lhz_f+!-^tvbrQln}>f^PeNlhU#xmDRuWw`**{AbDdr}Bn>WnLLDt?Bx^vc$rT zN^~|CqOIDwbY!1)k;A`9Z2C2lQ1p1#N%@p70&w`NVHmoDs~vyGDF+a z;Nt(5gR&H;+=X6C)mqpqWxu4>ct`5-vqR)O$Ze4)Kg=a^?CT2T1IWN}nJ0h7nkG+t z0EiGEvg&h*82efyc}0-O3X8`MQ{9L0EBj58rY5%y+GXM2l$#(UdsAO9Yo7R=AqJlw z+t7KDsJ4D<3x7dl$$-yD`wFV=cfl_%3T_cO7_>{dULAGa310)=op4r2%5_&SAAB{? zoA`ol<5uuhn|Sx|q7JBwJWugi>1Q~Sw8)#x`3;EvNQt6@13KcyzS@<7Q?B!Zm3}=s z^V02CQw}bgQvA=RQ2(PTgohVRdG)I))cLo@zi_R(J=xhVJSFzg}~g3decI=(KD zAdu#7k^HL8(68$71*Fd5FBsXEc=A20FS?*0(Z_?1AK-+wdglZ`>=y&e<2XPxCcJNyNA;omMXhN94S-a zYTUi%2f?_bfK?}pW=q_TySAO@5E1xvKEPh1`OLnnD_~uO!lFgp+!~#ZI6K}O0^@T& zMtFOLcO3kU`3j4Q>hkjK*cTD=!CirrJ76tJ-+e*Qv$|)3z|K{*0&w6K9lKWRc8jM^ zo#z>bo-Vz35S~(HuZ;VO5b<^4FHI<7oJZPJ9~Wb1D`TMWWQwBYqB&r~A|o4eZ|xl} zU3fY8!etw=#xLa2v35Vi&1F*us9d!i^|U#^mhuvc&efQ%#G`6$do6LO8J?1c$0;I@ z@D&9-z==$O3_){<+5;x;lLd$4wdJw%#Te~I3x-fk#Bw7p-?)5IOu&G>Wl~Jg_Yk^} z`v2mw9a6a%13usW34_8_TAK}#|IzJE;`l$g-DUJ<7ys>cN80Su@)?8Fl-gX zEFAQ8*=r(w4NAx@jNd5#{`Qo*`9JlWS29Q$Lzj{mmV_&lghwbB^(N2*Fn(vJ`dkou zpKv-PQ%n+3R^{sLtV$FY5BS^{>4e^Hrl2TtnfjWeaKz?}z8gTDJ50pyBxAP})O|=| zKS-xlrW8-{!nuvk#5H?&|Av*j&&~yCDD8<8)46w@<2TU!1q!4;{MdRoJ?^B`d64vP zm%PVRwZtd!c?7}yftcRZnfqA5U7;!MjYb-;UPXp27Yu2H**hWeFjMRs9N8XlyQg+R zUTp50-97R1{tk}XnW>&+XiNuod%ob|xr1waz0`txWjADUmFrt$l)}k>z;Bem<2Ta! z4b*FJEhWbiOnCo@GrRJU zWNPoAW83^$yKs969|*lEf$h->dZ8y6gIa(TjM)^HDjRK^nx6mLADY zG9JiDa+JVaN6ayPSN?|~-SC;@Euu;XxG2#eXCySNn7V>)1d~= zpBJ5o(XZP%?V5qJ+s{h^^DUxAm>I&8KUwnA!t8Gst=Ddf> zj%l=TZA_KjjF|$2A;99D>;4Ek<9(!CA|{Nm`bT(n>s1wfhIY2&Wv$0(SyV{Du`fs3if>4<-Uhw6Cj}+^Qm3r|PiTJ^$@a`?MCDCA*F0^bpu1 zAMqG56_it}Tz}FVhRA&fcIdeM)M*au1TLjT^re9QX=#G4oxRt|W)_JK4#}B@vjVK@ z;+%gw1vyunmXWPZ7wU*=9f}eUPo9x!PdE+@UKwC`tS(+qp%0P*U^db^&?z_g{U<)t z7Npg`sigH*{sk0kPprV>#ktuk9CUa_hN~AX#nnXpjS@TY*zYUqiob-tV=fuC2U*gEP_b&kQx%Sx(0tUO~T(n$1fN*`y8^HUKxLaGQ5Dvg)>oo`asVeV&(I)E~T$xx3|TSW@kU3 ziA<)k&9L8Ai{jDGt)>k&iEo%Mo5$;N@MVUdLYb7^oAW?z2yW7+Trceb=kbL%j$LE$ z9N^5trCNxTf&emr=C>(ZhHUf+tPNo{mTkIYeU+HaR( zYw2+H0-fMuT-~o%0Ft}Diy){Q+yy4pZyJzb15u)5Cw86HqMu?<@U|;n0A8%u+1}5N z?fJbE)AbfC<%_>cLvRKZoDkgF?O!f9d{{4C?i=T#rdwOzFVA>1PS$%4%_gl9jCQtF zE9`G^DmHKu;UI(@;e$$&wZ4INu}z2LrDZGh5VWl?eMmW>F|Y56YjIv5-gz1_8F<_t2{{_b$c#61sN8V4-~+zG;tIdgu*)7z7Lv4(-`+>Ooyi2M&*e<6i~a!l`>`{;ChFR{N;*>n3eN=xvY z!|b+7hJBH5I-9Y-kN>kDM*AX0)I+f^%~zT&?kW@43i$8SWm9IPCJvK24esLo*+Hif zF_jVZS1x?@P@Ji_T$%58|DK-B@DRF~Z|f?XCXKbBSOWv+^w+g3VoGR*Il7gz$?fNc zx-GXIk^tEU_A7opo zaBPgll7AH1s^epK0T^_lIDhnAYXt$&T#tGy=6d%$u3Lgm8mw?)mRS70(h2nz40f#$ z@+>C}m{UHVen{6eg&h)K5JNHi>a%1x03%hqAN_NZokiMf77eL9@duKY9I01UK~?@n z-v=Wl+E;Dshmxh<*OfQ9Rj<8#7c}rCHTxzQ71?K-pkQ3C^3xTH<;)Y~1g6vC#CDsF zFWXh_I(q&eF(OgW!0BYslrP455Xk!`nMYtyEy3FR+r#0a`T?qj+z>Nt0f@`Pg zynt&jL;YylYKrhpE}{SkiyzJj^8y>04(bQK*3NPbF*!)4iRHcolV-~G*L^NMPQg9$ z!8l(2hVR}w;N3Hl#VOn}e!ir^HA`^c_)F~VK$eHo`t3-N=HyNW?Ug;b>tWqzGI8i* zSq^cgQWBr$M(nZGG0dsOuioso`#IWvMdEzOwV4sy7y)~7*s5>=5`knZG`QNdoJ+w! z!&&lR#i+NO-fUTR>UORT<-LjHFfK--n2F<%@>ODzXL}I`!kJ9Twa3>1kh&6}B zLsi)YX2zhaUdLGc)QIsoK3J`sYkdHT59sOwgZCRwP6re|z}mq+AiHy!aWoiy@Q-yR z%=FH6-*2X;czQ2E23uNMVjAgy8us!O*U-tM$5mzl-=l&b6ih}1(-hQI^ci`?{<`oMYolksf^; zk01vu{SSK`yd};$IeMs12%M!!La6_)i4a&xZFFB}#8O z#VxV)YQ`5m*S3T#*ziqpHbZJWE8MLJiu#Ha4ZGFf_?+eUE^~0^K3el$`|3=-9szW| zTs!xeTneWum9ETj-t;iza>n<@*Pp$LGROU-8yr2wV$jcub{bY;_YdhsW_->~kA)Gq zAHYJ~mo>^?n}ZE8e9sFW3Sd8N+M_K&ho5tWSIiNE$)6#SOs&_^jv_BPN9=p-0+Kk$ zFL4U`J>#l>w%hXTQhCX`dC+HD;paJ~6jHV}H-7-MXvd8jp&w3;=PoATbN+yUR`w1U zeGsQ1vPAf7SxlRN6D%qW*iPdh8Lv$9m344OqD$KVfw)u3fB1vWDRSQ+7W*dUuZ6Ou zcacTKuS$)tm1+WEaF?m9)EUkb0D%ITxj$sipK;b)Mj)ZiVgL1$J;Tqk3ljT+AgR0KbA*bCDG-M_s+%vD_#;jjQFSSjD6F|6?r@?RiLi z7WrKUOTiy@B|XW>6YZ{7pXO_~SFobeMdDc}s5bBBwOB9tx%NKPuItDsw&!E741cfn z`Q+65e!Kd9QgJ)vX5l^%NKcmn&GD&CJ3C&x(g|)nJWYq8!CN>-l?&6QRVKec)U*;2 z9~@=nmk1UZLAqsD-Z<-hVoSl#az;t&ZDcHNc(wV-_>H|hhHgne{R8}1o6&C;#h}v( z@>o;X4?{%61~yGy>sYl`q>bjQ4_hfG7Q`5Lfq`lJ^xMl9lJUdOJUe~_pZJf9R>?;u zelO$qW=w2bO;V*k38Z#Zm+$Z;hTd47AY>YWYqi2#vBw=xE%Fj9juDUVG-1yI+#g{# zP2S>ay*%cgVjB}p@7(Y$<8;-KTRPKrn`N>4F|}vLoX*Qr!bg?$gZCY$IOq~zILG!= zKN5r;y=dt-ClilaGr~gEqda+!eU?wojh|!w+UEdjr{Nq+U($6Q$J{%Jgze!@xV z=CacQ-bfT*jxZ-xI|$Uew`f@q_Y&#Wxvriuao}J*gPd3)b)M6ERT`<%?!4I%lfSv| z4!DbZP-4h20P7e)_#UDNihu9kDRIqt7u(~$UywKrN_mh^CAG+F(uZ~bF9<|Po7gsXiiY?#&p_bF$A!{H1!n%vI#KM`(%ZS4y#NG?|P zaU2TQ#eWjOboq{&w1=|AS~-^H8egR&j?rgn!`6+j7TarQf`%CCc+gK7!JEkNZTBu` zQ=dk&)c1#3O^|6EDj}f{#=s73wXa$+IX}ePwNY9qfgH~ROmMAUp7_Rl<(b~OWVm|Y zFYR`2p7-48<2!#K`v0&L{9$PLKaQc{TjR(70%7j-TxF2M5ZMxM4_Xqo0J}E}O^Hkk z1I}g^ke@?vo8UAsPD4)=w+n8?K;Zik{Eade>*#g+QbPbJFQ`uelytI)LH)07eIQ^4 zv;TbljGf1x?Qf5aZ0ua2Lpk+*IT2Ti;iy$?$TV6)R9c8N33{EcY=w+*(zj}Ii$SzP zUU>A6Sy&W}RWA2Iqh_7(aP3<^*E;5to1vBIOPj4*X|yHEDj;9%Mx!54+e_^H$g>(0 z9`dh4SJr!7iz=n(;_mKvK+q;41_d>&iVFF5^s8lTk|fJ9%p;?hhtBZ+`~NK?`{(LS z!A^_Dh+U7Ap}mp+4*KarbxEnylQBGe9dgv-7l=#4@{k=`rSS}x2FAcjLA?k!!zSP6 zHhY)7;1O0nA3RxCQ!(Kl|Dxw&iDO%bOe(MF7nzXlEHUK$QvN&(JTPlVR=SFI7g*&e z6$UsyX7>q7YyFSF`A^TVCg^RkLLIgv5@PKT&Lfp@piecP$mYKToS$Kc2$eYK0_!#z zq_P*)`Rp#^EfdSU+;)t2v6!S0HH{p_zt7TxJTY}?#9Ew~&5<8;a^`7l>e$-d52h-O zsbm3dRF36y$_Jv;kL{N?+aB}RomJl*Shw{}Hoi3yu3Zu*X9SbSLOr zdjeaC{BAB=B{w}-I1#NIk1N7ZN>J|U+xx%ajHEyK+!}fIQ>M4G@lGeJ`kEr`#?WN! zRQz#ELM`L^&xW^zZ4m8bH3*gZhHX8}6%vyB(kbE0-MYfhLDUH<<1x5f@*H_OeWwjw zUU2%b=E-$gZhG3?i2Wz_KhqBIc@$p%@ z6T05VBzjanYiWz>8`5VS*RW+{B{CG`5#_HWrP`?Fl4ot4cszz#JDvW2*!v5psJH%q z6sH+_=!T)B1Oy2IC1>aux>H2DBviU&=#Wql>266$B@|GU20>ClIwYj$euksZbIy6r z^IgCHf8BN0y=#4!pY`J1`@Oe&_Uyf9_8upNC3fim7mX2G6DDbIc}hH2XNEstEI=r~ z?jZKw64&Boug|CCssh}*bdkQCn!-l`!sB{^6emR7^$qp)I$=j}{PujuE zP~#{|deZjc`o5_g`oJQ3Ymenbn6-`p=<#%hRWjZ^5n}BbAeJAJ`XD6?y5)PxDLE1fdF&cKOy4i8((A z&6GM{a}$xOJ$m>Z>Jo*B$T1&`-fq^CI0xT#Hni$K2saXVOm;U%kX>3O$8W-`K>5W+ zK|FzunX9_L0ES?UPhopBth?t)e*5iCNGeNg#Wg+-Gealn1f^9Yt3!hk#p~wteClwT zoty8TTMOiq=8LGWf!7!fm~$Gr;~tAat^A5H_#F(kqUl?!21epBDh+NKFik$v%Cp5& z7ry&Jv4*d3Fo8+cyH!gq%z1`RZrw5Ox#7zJwnJvd+AE_Ic{Q9EZj9;{!9 zbCmpd)C~CY5p9~wXOyK>$cYfb6V1d

    `OX$#*L;TAP}$^$G0^?r0JgCe>cRW0LAxn-;Vz z)bPi86&|HlK2x0jA8r`v<1jfjId?ZGx3##Q`_R#(H4Pz89nFu3n|4rwWkqSyT5CaT z0&{3+NWbPpA;4bs8{M#Ox+jEfAc3{Z)S|Q6TlEHVOUE`tbNo=Mk>pp%M-JU4ubnny z?4n9Epr)`ik)F*1vZ@qS4jQ_FXj5JVkMowv=j%xlWor&K{8k{N(3tcd>&Vv0YKP>C z?wl_O!y0UFE@_r#s!}P{CP`TSB17%ll(GmzIp%M$WyLXuun$Ae==RJj{Wiki!EcF@ zz0ljpUU*E`pW8S!`c$YszZ>Xhtjj zfZ`3hFY)H`W|}ntooW_MYatKJ`qW1EddS$6Ud}664}GbLNW$t1-?5Z$htpK|I1;E9 z_K+yt9sSYtW0hlJHF%=H=aY>%h&>~7#*U;`v@Q2^B#tdNJcWNF?VB*&D_7|5q;!wro7E#XsbzGBh;rW2 z?;NpFeL&dr#D|QO*s54ob6tlkemc^}tzeJit0-H19=%(?97s>W*=z9!mq^%9{y@F% zaM7Z8XjrSr3?1}gbe87g(+}hv7GG_zk+s0d>jztgd4C*No;thwmD9$jjlLTP$=)4g z?Qe?}+5hND5KY!g#1{=-j;yh(#>(g8CcpN6<&b+6Y__;BD4U=!WY(r%_jYtuKX*+q zEHRaen>i}CApKfYM6HM~r5V#)bgRS51`F#Q-Gl4Os`_8jWQy)JTKnNE>eCgel$&O} z5{>RY@aNLDfdxA>*r_au7V@@Z87oYEN_i^WXDsnmD&Sl5 zCoNTrSdeB}EV{5YRN7XH);lDwF0Yc*k`a|G=exU!Sgnh!(fFr749zL6V^^+kT6$sc z!c%|C#KWQHNjN3msm@_gTla;1f4qPF+f2$9dh6Fl6-MPaUkpZyq}l|~X!^zQ0Td)tN-Q)OdDljyRq@6y88>6jf<)XG?`#0gu)pQt{V$d7f9S>g_2=Cky^(oR=@qWff*$i7GH-H!s~osL#Nc^&i(L z!OOcC^zXi-Q;8BRCvkc}V|?&_{8bdzW#M>DL5JCv?^p=kf=a@~I=w>KoBDCmu#O+? zBWP`9JzeZVrq*nc1y6zV@^~t!^YYgP-OEy^jJcD5^YX2BsPpoWzKN(A>IYy4Za#@V z{8F!kNvCr!QcfLwQGND;{#cv)^`o5LeQC}1oQWnCqVM6+He_r*IiU6g$ITD8Bf{fN zrTRar{r4Y{b&XdoH)gEZCzcSg5%R6-ay#-|tMTQ2N@Ui@UGe+8yb7IJST+G`d{1*9 zrtZ+FkKP1(f4vUJa0+WRqLN$PR+y4ch~q?gztg_d?nBSLY-kADh+0SfOV|Usiq=Pa zll2&qcKV_L0>oo!A;IUS9FQlYqZrk-W{2TYsf|fpg!Okg{CCU>uDkRh^4NxFx0TMw zx$BfyxLMTtQq&1u)M`ZDlw8V2kbWq3;>2l6z15xWQ_!uoSvmxI6Q4rPHsn--)g<)H zPPmfk8&6SU5uXS^DT-zwD<x+2G4r9QZY zAz3v;#ZL!cIfvOSUF)e{TMpI(4jxak2e0WKthyPNJdF|)@GlM4#;Y#qbM#jp2;|OG z`7n0uJSbt|M6z?CBqv|J=gw|Pi!bKA`kpM-eT=sMP>|w5Wu*4jniY-WY(;9|{vD0n zp-yv;9!z%k{DyirBAcbMB%3dplV2)zUB0bu*WnwRwa43*8>p0>r3Wg%5L&XF*s)7C zrA^shlXsrav5N}3&Vi-U8Ne4qr_8Cc%Ox}u-@~HJ@hY^&`1HRHsgOU=Jv?nz*+_{i1=3Xy=^XS7I!TXchKiu@>kby^+ z^@<+!%GB>Krm}L~F5kQ*oG;N3dN{KA1-td`&U19z%^AiWqs=mCGjB^vK;@^>lIZtL zU2_*zs!&@T*CcI+dj`~aCS%X>pK#r~tqHfeVd%42iXRb~5$V^daPyUFre?J*^G~=m z@yv|;%#cj25v`wU$s8qLTij?S?UzThQ&Z{UR#?UyMCue`@he2_Y zjT_su6N?9sLBI3+7Z{L_ESEZwpB)xUJNWjpRFk##vbaBR$!7aAy6UxL9Z9>(z4v7y zk#z-m@#bvz`MKw;IluLH*BVo=`W$`QYUh%d3a&>K-wVUrE_s-yxly21kGz99Jyvm& z{>dEOS-&2~dD_>_u8VG~Y0_nlwrz=Q4LS3*efDelP9uqr2!Yc?OY_}o7w1P68XK5L zM@Sil3S>=|h!!8O(P

    zbf+V-OD>HSZ
    zB9E4c5Xo5IuPPc3DLkk?57N3QChQ`Vo*JXWAos_r(CFfc?hinBbOuPXWui1~pAs(#
    zvUnUhVk)gk69<9?RM%w^ehL-c0)qb6fr>|f8xBLFz7knfH3<>mFC$&bV_b5gS@KNa
    zBTr`$D356eSYFD^K0tJ8_Wx^Nbj&KtVxkKE>i=kHxCpDo&-Y#H;Y
    zusAoD#z35c(NFM<)x0B1u5#sed-BPE9Zd
    zEoD_diCMZ}Rljavk!&a)8!e9}gU>H2{ScW=3u3FI6-%4&yCRT5BS@qx(9-EtgY!Ae
    zqi7JT21_WF!Pj`8Wda-{p-UPvfDDnY91qZE0)r^0r|WdS1&{$
    zF>-_+FDjP|m9O3pmf;We!YO_MSeJu71faGm3{Zw;I(bD(A~B+^0g`ypnupYQTfY(X
    zN7xASIVw1q7Qb!S0t*fIKQ*KniL_SWDejJBobr~qjsVvv#z&eZe$CFrK>wpCA8RkN
    zTMOO#cNjj;8O)MuIBx{|AkRn$=mt3#*SM0-=&7#JR2P6?_|lskUs#jf;bR=CR6h^P
    z3=bwpIgmFR;M{vq;AR1+4jwpa2+Vrorx5*~-E5$PhKVuW!+n!Jo2Hr^PTs)0xeRV*
    zdCh0T3Op#bwuGFz=Mexb6(g6}=Q3|F9i~xQ8VEuli^cae#3q*G%u`X(g_mCaff-IO|
    zy^(a{FuTr-?*6TrK=SQU#{vfU&+ZoddVm)k9etsahzi~)5|iJ1L1H^mNBOf1N#K8N
    zbG+edoiX_pnnz6&A+u>+7SeD@Hs?1<0{f3QVD86+R4?|)Q?e4
    zts83z=tJoHvASQz{JoJYX+QcIeJvdp@eq{wJ4Zo~*-8!g^HSfo2h^7&=0J#21l$DR
    z%*Ww?6*4gpn5+jnB{Dgdi3j|8~>1oK@?8&YfLd*p)YG%5-Vf6~HBolJ~
    zFV1Gg=LP<^N05V6FmVywmW}F>p2~y`%P4fPIyG%&m#5~lX30i$GFD8cIiL8Ag;p`Thl+rvK-0dnLT
    zgbU0l6x{idlKf$HGQ-Uk7UKXPG36I3|H~{qG6X4V^nO;0rxxH_8DkfUktw
    z4TyH5c=Ldy;=aBxhS{*8J&J&4HI4fWI0Z|iklRef*?Ml^yqoE$;1e~wq=|?y{<7sc
    zkUn}E|
    zF@1<5CCA^8s}XPv-N}_QDeNx^K)}oC!*JC=ziCJHNI0EbT}PPasFegTgfnEFdctV+
    z-;!cyQ!p+-BD5n{HxQaQ#V!=F(1TnpvJ(%rRwl3+NvG*fE|g{AUUGF9oK4Sgp)5L;
    z`lynV{?MI>gg%ZME+7!@L)IY@2008u3UzhV)fHhsE+7=%Mt5?i3~@Se0bHzguw;RU
    zV@S0YVQgvTB1|Nq*B}jewPZJ9zf)c6w!!$^5vW2fz~F@W4rlquF0cctku`Sv2IGrH
    zp!;j>jB8VuY5+!P2ppFImol_>MJuF=l8%v&p5YPkhD~%8ydk9uI&27Nhw;ROUnEHY
    z?r&Mznp(RHt8?1^yoc*v9c89-~CO
    zmE~N&t;=(|9HLq+!0K0fPLJ3~-DwZ?+in`Pnd`T`hqMiS-j+m?p3N7WB{CdJ%p!R%
    z0STU=R}C<#lMYJD6*qfaog>Ils)LJ7*L*>QUAUlJmvP6%7*9s`#R;(4zFqTO#&}@?
    z6vUZ7si%NkHMKkvsD=VwII&kV0i}OXJk5sCJJ2W~OWKCYfph|fO;mg*FlKe$5OJPQ
    z6m!6!3yD-gIGEY6)YO!nDkd;VM`i|m`BP)D6*X8hQ_UMU_57a12
    z#F>8(FzLS_;Hmv6S4w?)C))K8zoFzoLm0G|C}vX~o0)YgyoH@hl9NP)aKUtzZJqx|
    z75Sdrg@y76Ic%5|C-2ns2$;(`9#OGcwRb?Jj(6{D$XymnsVvWF@x?CW5frRcN;Z(e
    zO=6&sm}ugTr7^|zV?Gj`NKCZs_#d>*JhLCQbuH670wCGR)DN;F^&xjRegUUH_GK%>@f@pe64sVvKxR_rBx`kNp1FQ(DJNHGsbMD|HJDFEGn5)#88
    zvsonq2$|@L26~{VrBW&VWMl&rVA4#Rb7(xDbOUuq-&n~H_;+q>WIWbzMiS>^{*8>T
    z_Ye!`f;p))p;Pw3fI(7BTQGNq;%fZ`Kdnd^x#t=lO`LAgsg4BL)p)OHwhILv!}j}k
    zf>Xj*W)5~y#zP4VylA*;E?_v1Slba3@b>}vo>mA8SI7g-`=OQhn6PeMgX2y!oNa~QW}gB+IOpn|ZIV4I+iA&=y>N>`TxC;e)g}VKn_-U$;vT6q!hgkhS!5Fdx-ByF(_Yr}XjPG`|AkJB6_40ys
    zb_aVfL;rO1X>g~9Fk}x(F91P<8>WnsXYR$6HaRe64?+-{sf!sT
    zy?*2c9|=c_E(U?<5JVD5wA#LPoZ3Ah3Qe3bR@au;uNPRKeJZ{X+AvD2+i=21-fBid
    zZ{iF8MewXT?hq3hz5PDdpkO}_7YRfK5)pvzA}qgT;Ejm##$eOXB8)24V#2_C2beID
    zgx(uE76}ahKZlQ{+TMZmt`a$>!!g{K2hhPX%%efux`i{YVJYDHEgu*=J)GUE=ozrp
    zbh4Vhoc8u}ieZ#y4x?0tW6ErW-#B@SXz4PH9f`*2VsFsdMP4d(PHi7GXzhsNhv@G5
    z=$ZN7xwbBqAz9?I^BEvNj_0zl0}-NEBwubx$Au-1s(9L($;Wq>C?!^9Uy{T52Cg(9
    zg4;JSwNaI9ujmZT=BOuzq`*Z&qGza7!Tpf?uWyKfC@*p{y~#Lz+Kv8I(5G&nDfzbK
    zoZ)e>s;Vw5iLTT!$MDu2$uUmw_rpI9)H>&?;fKfB2NZvGL5{xBzK565o4kpJ_Wb?X
    zAHXS<-hD&h^#~=3(LxP*Vp#c`Uy+$@P62dT82Eg@zavC1E=>6qy_R!y7(3Y5k6fXv
    zl~hL|_PJy6)${=JMvwAJPWlVnGdSZCd1H6;Lz2clzvZYhU-+Xc1Tlz9N2%I*+3Rsd
    zZ0Zm@3P*1=C{xnv|A9x8gA9iYs-$5`DfB{)6TLDMG`SD+$=RY`0M+uw|BYNAy5Mwj
    z5<@F!533GRMPR-l{YE>cRJnU5D!8XVcu<5W12q~K^J)m#pfH0=qBF*rkU`-$I4~nT
    z%NG0g6Ud}}l`#Fk$x}L
    zE*mqe;yAQp>K|bhca$2ohJGv2A!cX58Mz+CU+}M6`f|Q;B5_!wHcn6@Zu?N{a1;ac
    z=__vB1)jhugMNfDgR#f_771@ix+e5zE9{lYp-F>+?$!>wSM~mdf}g{>%kYx+jzOHx
    zRHQPac5G^BBU0~8m0?B7yGso)gUl+!7mGeP`gXyYSaLGvd1vT++1Rj2AIAopG-3?9
    zRpu|MW!sLLK6IMS=ThFVr)KoePSb8MEt*SRJA}T_HLe>YKut@58whrD2MF8l36kb?r#6JBn}cjvjXV^T1k_MGpfkXNDDW`|qFign&Z7rvdg
    z-JY^D3E7E%tH%Z-v2?60N*hP5{~6KW=NVc%-{lV3UwOW4EIv!CMg1VA?ziovUmpka
    z9wcgEPh(%Z!_zT`dpT!F4*Q_bzU_>$+smWbmA>q2z>Z=q0`XFM%dWpu4o`}2SI|+=
    zpXF)j;+CSfI6OA}oUnD_b}tx81?g)>%YQO0}&cs&Bd2V_H}CV!x_=fB<_W
    zsLzvp>1lD@-OSA>O^IZvvNG5LqioO&GwD`J(f
    z!bg4d(ykYF;Jdb*K#JuZDThiBeO6pcnA*`
    z77heXbHn-k2Y?_Svm#jvgg&E00;s`*(jKSic0h7V88i>EJ+BOXhUCC^gY+5QBV-yhI2b(^E`~)C;j^K!_RI+7)b?P}f@JLL^DG2$WBn5D%QAi;#I5kZa0tWaT
    zpwo@(DeYOL100fs@*f>S$D}!j$}0&2*L+|8z@v4W6W(_YxtA3ge};!FN)Mx8b%3b)
    zg9wOezyONI_(&jpiYi50EJqOO$N;tdDF1QD%7&#b?oUe25o!B@$*wQFf-?WEw7Ngm
    zAZzh(`B4g5F*v&TE)9RGoayOS=(*;|0jc9Y<Kd4qBM0S}r8;6TZT&Lo&GDx5?txtA$a&u*}|=8L3vGuHFE$TAFg
    zI9iFBC+hk!`}+S!G^uT^%WAU)rn@L`$vlK8USz_MH%097gVAn&e=YJ?%4lZY#+elc
    z>0eOZuay~{0g!ag4@J-_zU*EU$D{z3e%(T{{3?D$`8?y^aJ3(1r|v&a@r_pA-UZxl
    z49=41r=uy~1cI6>5;~Tj9P>5jA;MEYj1sYnSYXE?JmQIR~;KtD@c?JH-pd81tgstmDWpokxs~VVCj}JlZHgBdK+Kl!np7LbU7a*
    z54v?Y8_7Y(_(22*AcAzrY6ystyj{IW>@jW=f2?drUq^T22Ad7uttiu8;YbRog`O00Ufzj7)&T2!Vi{FZJuz+i;Feihbp+EK)5G}@mfJVru5NABi
    z7Qk{A=|pA5f6E&#tH5nE8s|odFr2|4D`>z=8t|JABAY$oh(Qtoq?wIu58mGBgCd2(
    zS>o<aJBxCg~raW|AiEC%~M2*5%7q89okLUJ$B5SxOWZ=1g}cW(3z_&nTkB#q_-
    zYysIikaes3-(W9sk=}idmaAkbiP){dP1n&B)c;)rjcn`%*m
    zWG@;VUK4DJ&v0o27bCOcKW4?B8U>=Va+HlhFtYw036Oju8TJ;rI>cFQ$k!vQj4#cVGN4L@aW@=?X@n+duS;g!*
    zdTg`qfiU&y+^1i!h7vx7l?Xy%q4Qx#Yq}qUJg-IzTmXVfG#>=Qgp1<;yL8o5>r4(^
    z4X5d3RYxYvS?o(heG^TNk-=GQnS+(*$dNVuAW~*DgnkoicbJRRgKR-phWB<|D}^NJ
    zrxkc>5G{=syRuAEOI>gWE^*xwgP*jxMuQ#b-(9y>X2to;2nw0&F8!1<1j3}Zly=vK
    zMWA($(jWD9<2WBA1$`Atf+BD_c@@zVpG=p0G&${412`H=L*KB=sr}
    z-q&V=fNMvuotKhn%t3%5;KpoW($=>oc=tTxQ#)l13ub!A0v=
    z?cL$ZLe3N1qiUBuBdV?WQOo&MwLIax$x@)wM>4^%uYkBhF^!&I{X~lzFas9zNOSF`
    zhFJn<^HGKtjyu%8{&VxPd%C1rk;0P{DMm77jQ*G(qAjOrmk65{=~YEP^%Q3%Gm(n!
    zeVY|*Ueb1cs#3*In6C@OZmA57F3x#GaWj&Ps0UB6vr1qwx$Fn|Y+iKfgc=Q46*~GT
    z|48D$(g!{$PbfMiyV5Bjir4}onHd?3OaNk5S$!4&__jni@8s{XJnqT-Y^WG>rlX=uqiA
    zks#%E0aB_wjv$`stM<$hHe#4W>0+C0ir5!ea*+_1J?W5qpl47;Uztw)_tqT1ux2%=
    z#1WVMi*#nf!eNs8G4(mEyaq=Y!g
    zb|J7RZ)l7pMhoq_0>}dVnp+56+*(FJ4-*e#k;rJ1eGMLKT`<)
    zWGt?tBQbi9VADM6G>;Rp%1k-z)fRE5D19LkJ;_3lfw9#B9b&Y8=oNY>6=hjErT
    zJ_MV}DU6sfYx3+a+z*O|%@1Cpx%P@@<_z&{&qWCQeP5cZ9qe-T42z$Fs$KyI0#P_c
    z!i$WhfiY@Qp{g2}o7|>Hta9hKNgz%@F}@pcf`Cv`N`AyYk%F9-U`$#i$D4?TO4GY4
    zg*PoFLuTf{a>cfx9@}4}ud#}h4amKcCe+8@6Lk>Wsty`xJk|zIBKE4q*t7){r&2A|
    z0K)mDNz8)|g>efGJLe8zVYP|(1Ev0671dG`AY5IV^qDbJOE0(M!ok+#$foR#dJi?60>`
    z4wWzqHZEJ12g6&}&K>il(FVI1(haVnSmVzUeApdj6ms^T@X*60dgG*cybz4!08--W
    z55#QvK~+M*7JF{|Gl5M>$OwOpvzyaFj-+u6K*-*BN_aWnR4mQ==T-TBQ
    z9#Je-t=hoTrh1~C*VlszGrPEZP*Zo~^lZa)d+^n-bm$k|^5hy^Yd}|8i1~olMSFtW
    zMckiz3#V~G%f5ZOt!8mkb2RU)Mh{G8g%6=5^de)RJckp!sQTKdh8+4Wy=$@lP_OQp
    zD&&LCz$Fc+FhTO>UKWv=Pd;f+9pYc2T05f=A9Y?&oFS$&e3hI$V!eHalO4)_W|-Jv
    z7NV)aDXA{s{Pqwe-EzFz{7_ww#45`2nNqUYnpa4z>wp_~M->*HSMKH|Kxqk1BAprG
    zLNIaZp>SlTdnUWcC&kPoBO`AAa6#Er8oASl*v9cahjBdUzUcMS!zp?M{HIg?X7Jaq
    znp7t_&+}y|py2>QrLBLMLRSlTC1teg8Y$5?%bE)>9q62_NSFpts1?jb}Y?QHPZ
    z^gpd7Dlm+{hFM46=Ect1^_)_VH=S2BPi!jV_ztskigmFgH=(rI%;4@l*E?w0RSTKs
    zom%qQa?@wFkhz@@XDvled#Ni)s^7o(Y|CNjNU$J@rc%%p3dA-b$p8
    z--o!k~rQc*~(mxjapM@xa9RuXsW#5
    z@FmaqXFTGW-(Si)%WtUc{~8sBCk%b{vz4j}rL=kKUvfY<1=0};DEDJx}qF$;!
    z3}SryVB*%S9{P!rmm
    z^}3f-va|Eny4C7VcCG3M+eas_-}kFvbr+xbGW(*Pu!ZSliUO-6&{+q4D2sC1YUCt0l@SpGfuTyc?d6DlzPA1wE(MXtg@TJk%$=
    zUq%zBj4S3wO=Y7_&b1e-Qzz!Diie(tA+(IsV?(oh+^jpID@GTx-{rOuiU!>+%DgdT
    z%ldv@%olu4U+L$#vNQ96ctfX+pBah_T1Zj-n}gAI9ia_&%urx{{(x%=Lpa)Z-x+}S
    z)m``#ho&vnmvnsO34e9oxJPcI9V@+HNr;*b9Nqud4uCW7_37VYROtd15_X?$4d^3h
    zxIE1G&pq46qO1~VB)%tfiR{lU#|#^p#bnbL90Mvrnd`7w?UGbq&V$0+UF8{p2JrrO
    zqM9+3zk%T-`dD&puP`w5Mk3^O6>YAPZ8YnBzbiFL
    zXD@AAfvE4$;83Z>R43vpL%mP8xbF3e_W_A;6G=CJ0Hr3mb`fIg(>|-mjc3_vFXGK$iTNJ3&iUW{!tH!himTc}l=S*NnYr8s
    zT)9`&M0pNo1K>NbiRxV?K{(E-dN%Sbv+AdvkrzD7>(bqA!J?OkFlJ-H^z3AD`uwwi
    zPN(dCO#88|=43DYaDHJNln}ZW`>ksRwni8&CPv$CT(U3Z47o$5%SOT-=%%S~u5wo;EQQIp^xUy$1N*`vDU&+wOn8
    z#+$5iEt%dw|8v1=mx}FaPGnX^vHQb~#QLM2nSof%OpH^lVceeMdD;gv>u|1d!PnjT
    zPLaD*ck^0Gc!ECzZpyUwQC$bdkIzoS=Zgw#zhcdg2RsNJD#&DHn(E=oYD|6@HuyZF
    zy0WlAgVBp+^L|#h?MPMJhBjKH$AWTM@wx^`JiAKK!_;-ONm5$8j`}*_f_b~$1HE7l
    zk-wue3Z=}@KUqE2h1<=9*0|1(oJ^oKy~89fm+u`>Yjt2AB)j!{`95MiFnGv(^T(kj
    zN?Y*AjuX{e@&!6wLwPZ%j`1|L&SL-WtIo>iXzmsL-XN+L+>j=!fBcr%5<3+dH8#7pS-0yz(+;`4?`DZxi7<0~1))>FFoSBQ|(3TvPEhc)fX}RH4qiM*l
    zZ}fD6-{{w~sl|tGgpA}Jc1ur>@H@EUefG2Q;%E5+I>`|!FD$nG>?cR-yeKhHY~Wn=
    z?RsKjA2e0H;yJ6LRl|k<-0`N+d>Y$S9NxIbp7xh
    zZz%Z4;p8Qa^eH5lf&A)guE9w@JMBe^F}7Fd17-NW<`Cb&CI#wS;E`~azY#-+3+EG>c&8iXNYI~
    zlUZ7diA&3;6=8$N7rQd*$1ftB*-)76kF&VG@3~KpT{H|hpWtQBZxbhKC{&;+-(f33
    z3l$!@G$3xDpIxNxvvi`P-gu^kQvBgDI#oc#?2wt%?k3e6!5yh9^7=_Bo!v|B3&ppB
    zsn$kHR~nB+9rAkDyHdB}B}YtNJtHI)i*)z(iJMmqyP-eOK6;Nl|8vXJqQ&nc?Umz|
    zlWuhPS-oDf_ktoC&uGS-``-XxID0b&Q$D7ILWr`o>ggAo5=guDGmv&E!E@k~mT`$(
    z#kd;nP<$Lt!t!hF!pdU($Ya^GW~<6Pr_X6ttq%6`GqjhFNb?O_HO-8YGSo#+wn9fo
    zA6!l?tLD3_lA8V1%+Py^(z2`2_~`riAMt(5qm_FVQeGN4wOl%EmT6W5_r~;^Gc9>d
    z=k5@C$YY?S;()BV)y)!Fi&jK11?#<16!@BIRsockQjV
    z#a9)J^xlxr|Caqx``)U$f9vxju20Y{UzLTw%*lMruG`EQd*IE9Zp!G#y`;FP?HB{H&h4K-JN^;$n9gzR+nD6UgklAF6F
    zo^a46@_EY_iyL#EEtMeYX1H`>e5fAgz6ls5A;QPH#rp%o5>wY
    zU*tO$ChJ&|UxtGDvMW%Q3Q4IsURUnTt{$gn-EWq9=o>~+Us|;=?!fnIRCxA8w55mT
    zPMu9k!R9+myLVF`UK$ZL$?v041PO)pwehQum)hBy-%&5lh7cVJ)U1SC21`5Db4JJI
    zLzV5-4RpHS>t@Xl2nsv7C5tY?uLsWVxEFVY-EG%eubicM+BF|0Mmn8rG
    z;Maq@ZcMo)WW5&*OL=+Yc|M<5OVTT2jrx%8S7f&UM5}5hp*CuWT
    zU0VP>cYDSAUbs43*c66UUi7G|y-G|NG@Y;8`ZjgZl;+NxVa2pBXuh)lb)KeFs#8w>
    zO<$|$@5)F;jrsY&NYMJO_p$|P6X{oYM+#G%QByL&ou_(r;q^mJ_gooEz2DSly4_3F
    z>4&!MZBq~EkXRIsbyx^;`;II)xUJ>86@|rW2&mc-n4+hp^^-fxrd@ODa@_r7i1%ok
    zkng>B$96?qiEgS3sf#_IYILij(p_33T{+1#rsWb!BIbwpyB^^=KVNA#t1X35OIO_E
    zh0hxs)um)z*pM34)~tTbtEj$tedqqEH@=Ixb$v&ZgHs~L%t-daZ4objfNA$IOO_WK
    zzu(9o=`BCduUQm+U}QbZE}`p+gL1dl%>kRrBb4(-wO5J?-Evb%`B!b@L$+PrZSI@a
    zMT(&|Uw2lJG+TdJqN-ghHRhotxslc!ct7yrF%M&*T@=Giz8%VYI(_g2PL
    zr#!|u2OT^*ytJ3^i9Y*0i=*8p*6zAdkHy~dV9m0wIH*LD^$DJVaVg-{S96cI-|&U*
    zW*61_?ogQGp*E@2w`X+IeQe{NXo!5kj7hX=wKD~rXV_YdWoC|*FS9uXQM|fnXf<`D
    zc%`ay(@NT^R{fElt4r*ZJDjcDMb1|45W9zSJZh00m&3$%7QU-DtMSV`sFdeUKX5R_
    z$K~ej`ZMHXX9hx)La~{}bptcssya9)S63%H5LOkG;(VFqC%*Fs_Dqely_!C;tm2sz
    z_x`_}`sMsA-p$z~W>P$}aw(u8)mdY#bhn!qqgHE#!f<2OYAvmT)f%F;>QTGYqr9|6
    zmM^o9-gr>$rusIgK5^PUfjwGu%KM^6k7e{E_9KectZ`TUjs4
    zn|@eoGIZQL)>8Y1eHl?P|*0k@)=+LPkzqm?K;
    zXvlfVv2Uu#Yc;op1u?m_J!m(fG=FW68
    z#E5CDhEm>e9#x(Fy&k)mQ2Yo@nQs*E%lDX$8=4cBTxPIn$ioTfRd>fxo|ts+9jx#c
    z8Y_@gNi!qzXzuQto7*3Y3^!EjzaBTme6)8NeJj^5>1DB|E^{m%yRZM#*>Z2yGfNj1
    zNxL>pd_4hSn
    zr(eRhutVlpB=WIIHj{G#cAQ=G>Y9eSFQvZ}SG0aKL2DuJDw=J1xouUMIb1vCx6Jub
    z#S6PSvr3`S91M@G28J=~nGaYq5uvQkg$U7a@30~gU(nCkjx~(AQr6QGjHB||q`FZV
    z45M=;z4cYxR5629Z-xCan?Ch}eX8&XyEbpOg>&{>?w8Zo-#Hh06RSq*8)^N5^Us^y
    z3^rl>zR=`Yrk&}Vnalo?w#{7gim`@b>4Kxi!I8q{+9L_4s-_0c`W=l=
    z1Jf>GpY-Lk4yQdh=Gk}5WBn3yshNUwyx9@(;0{s_S{oLCIvNuy<}MNi*bm$L0PD5TjAakivX>VP(>=_!Z1ya3Haaciidsk54(?6=8Jt%f
    z12*_QERdFrC}lvq8|q(qE=9wKmF0d$DEU%aCux_fuvqiZS%uZsV@*qq8>Zuyuo?(<
    z>VPKOZ63{@GS=vJV=O6hek7Uk9^o8ZE*P<$RDQqOy87@6sbw%AM>^q@)g&%tC=W<{
    z5*MQVsq+qzoO9@@_0cGzQ`J>1pJP7n<=N+NaZbB3ix%&*s@JmWB9b*uGu=|`z2M9_
    z=3AT2njd->ruj~0xsKK2FsGHUggyE6*e{!+S2@_hIlf~!ZDy(I<-03D&?=f?@nW&K
    z?mc#g=c?#TsH&eO`=%D%-qXspVD-|x2*T6JXF6`TmRoJ*0B>m30G2V6y~^3iwyLWu
    z@$6wuH4Cv9l%f{cK??p;h3p*0Hugx77Gv$C5N9*=2IptZo+TQ=Pb*w^LZIjQlCOCE
    zB&)5=ulj?d$OyWsOX%2Xu{R_)4LS<`J6@=0sj!Sj5#J&?D-pgMzK58J}B^Re@Y-))Ozs>sK#-U+S)NP;e7fKht&4qw1h%XQKCy
    z$5o-ZRR_KK)Mh8Ydj9D5{4wzsn3^~rF83LJz2;EBv#;mwbPs*^xUVWB+IhS%CueKc
    z5g7|_?*hw?gKgPovkyC=x;t!3>#pQGiaK`;b$G1Lxl-y*o;^5_y)|#*f>S~CcLSK8fm<_}I~Z!MeF);ALg)vF`4J
    zjEhZ*95sv%0so1OZDH%wZgmLUpMblRWlxBkCx2}3pmmNG<>ZuEAKjKcV0~0Dn+H0&
    zBO3|f+MS)9lS@lbM@6%7(9vDlWa#Mb%FwZ+5~^_5oeft9+zDWBYH@@j=s6L$ty>jHPHm2e`_mr~i&L9R>wn%t7yp4^?>-$S2{6XX!x
    zogZ^@=K78zivK{gpJ)C>Bxx!1y`$qlCj2t%<)e;{v(f@>9kv_p#|fsl$_V{swKzc~
    zi!Vh*zl>QUF4BkE*5=(ffr}HIQYrKH{#%NUA~fw=mli#K;0&#N>>EEk_ZK
    zBjgF6%s5i^T%(6(dmXxiZhsW)ZtiQ3;U(`OH?C9?P55=6(!fxI_CVn;PA***y
    z_U*?nPX0!rho(g`F7NNI$$PAY6R_Vq_3Lcau?^q&y5oA|NNx0>KM+v8dCj*Eb36V(
    z`sTl`#nWa4>7iNrZ1B3X^IGed?u*8W9?r$&ENA^SdTmYqIw@3e?|}#3_~Xz&Chka6
    z-)eo%I|N?ne@3ffcIDnf~ThI9|-QJ!L=*U
    zNv~cbr|YthbPfAF-|1AB>t0ioQ>CYNqbt^9>i5fvj7R^5Z+{TT+R1E9
    zW3FXyQyuyb#KcwkCMQ~dF(mh3MSxt|xjQ!>Iv88VerGOood>pBGuF+F-r4HRgA3cij9U)Ko+#L|=Sxd10%lXYP
    z&$L}@SNu1e-wdlfRlVl>AD83|m$Yo|zfYrGpoKYK>W{7>*--m{a!9tgNeQ
    zqXS0~wj;Jbe28%*$g;4Tqw-e{%vS6dzkV4@a69b7xl(@__}8;l4yI3WqQhC@uO*yy
    zd+#whVg0?PUx%IMUAK0v{@RdV#Jb8a*3$cL*vS#A>%iBt6!$@Z_b4!7or{|h;MD+T
    ztgCfwCD#PMy7a4}zxa75s8qH_QujMOns4duINbS=L*
    zEjP}?q|eX46W5{Y*WCDJ;9rYx!^9^z{{Mc$ud7(^yR{a$zh3Vbv9!+RHEVyfUWk|d
    z+V$g@bfB_kEjGUz_nR@Enszs)9shID&l-8ZTY``5EOkXO?#4bHy=Lt=FW+2@Ix0cpa}-Nhyl$ppallB#DG^Zpfv`x
    z!GPB=pdALZ$AGsm;B5@(gaKVJpeqKvivc|_AO-{8!+<^*&<_LpW55R(5RU-~7%&I}
    zKE!}T3>bz1!!h7v3>b+4pJKph3>bp}V=-Vn228|&FEHRs4EP!YzQKU+FyMO(n1%r}
    zFyKcFn27qR%*TL*7_b-vmSDhA3|NiU0nLHKKpaTb9
    zmVldhyiyRB0y$I#wOl_;NAPwtuQO=bv!x%P+kJ^@*V;s?^9v08`j5p
    zbi{zBk_=v6RWS2DbsNmt+Mh>992hJC`FPdAru$R@n5VV=h6vaRL72C7FOLoaV3bVr
    z@g4^^KG5C{!&&dys3Qfys4o1xCo`l!OV>!(F5*nc@7lN@kf&O2
    zCY%MWA4u(l#aOp(Bs^nso)mg3t;2}Djb$?-IJpzO~U^p&G95!r?;^R>VzETqf2<~78E=dA5X^r9I(E$3X
    zoSN_g8*oXIutjSrGkJ6QOY#;g*UoL35Az^zU
    z{>`}4fNtq=A;M!Y8sE1MCJUY3j5`BdEG-ctJOy*{efwb`6to$44zMddzLO9Gw&DAv
    zVJN72GtL0;C@tAZhzFPPeKN4)5dW65U7^Q$wc&&pAP~?e3p)*+-h#UTJS{DO6JBRD
    zsh_{L-3SWWf-?bLmmU`;yaUYx`s88eQ1uqvMIgJhM3|5UVgmXUVAc@-R-8FdS$bUL
    z>4k2aZrAPy-H*Eso1QCf^*-wz;vJz~5+HRD<_bk`<+%ckms*Pw3c%g~DJ9rFsBJ5c
    z6|hp;D@ve%>j+XH>;VM8cpzX)nKeKt1NRW5lwl7ca~O{;AXe535URlA1gS%?$51qk
    z=Q)lQ00oMge
    zp1o_;hRx^>90~9%U$?KtoiEbq{?}th`J2yp
    zYLBVPcJVFTMZzz)q#9k@e$K{T2p0jwm#^DTkX1e%44r|Q*t`+KJpt0n{r3|NDBA}^
    zXJM8$qe8eSptyXUGyzl&3WmhTN*$V)uXprSaD{DMle*yN$CLCV&5|FJhlH)zAZ254#G3=R51H9}tfU0Pa
    z<26+FdAQyLmSnSCxaKQY=+bJ
    z7KzgQQ|3$jG-;@7KjZH6>8$HocgY!(@Rrold^2;ktu!a7>)`v<0QWM`6E<*7TxI~Z
    ztLih#pz_Y4u!uXc#DQy!2Fo3t8pOYJ{)YUY%0gJ6;&zAqr!D`Jx;pQ+vc5U4-oB(Z
    zNvrei%1`%Ra-S=n>~5V{z1E;&yLDIj@51musrsL~mP0X2KRRjGPb$|5!#_8?+!b?J
    z=bN2Df;z?u6mGhQVDi$8^5kuxG*a6O(qSs037g6Bmm)IdD^CTlRY|6_rN>
    zyRQunA7CRaBA)c1;bDVll=#5x49wZqrXm@;a>Yfne9t&?FXfStr85
    z-ea47dJ_Gx3YB2j)&SofW2+N!+y3b`{ZC0=sw#~HyY>c?-7yY25svoR+w^mi=&CAx
    z1iQ|L$lWn+IuTCxE!*@9lDyPZW(aoO4Nbdaymca+?H9M{Quh&%SG0{T@+bd-uzpxt0Yr1)QF@qaDkg*O)u9SZtQ
    zsyoLt>KeD(8$3_eb8P$gI;*izHKtAXrw;pDyMH(C_a*aRQ}!=0N<$@Z&p*y|4HZhH
    zbo5u4ag5FQ%`my__m^I)e2E->_VuLkwe5d|jDHEw{+`pYByouRr8j*BM)kHt=
    z9o;BF`sDT67@;Sy{{N|9|JQ;rlax2~zu@uTEE`P~r4Q25EkBpX#F)G7ba1u#V(@+}
    zIAK`EuJ)e@4l{3sCI6+0ZszT!(!=_>!<$=A_Ql9q8V5L}x#)|eeJ@RTy_J&PDWDIe
    z?J7HbJ$D$^`m!(Pu;n*`L$`~*c-_x^sAq(ToBws_dS}&VmDqD-|JLNEFWqbv
    zU#-h68(A?_E8}Gc0jz$1`X$EUP%X|a>F>DrQ>w>F4t#Hu`R4K`Bww
    zkx$*ud>H!lL!-K?MYXQNcYC?rsn^LBw=-ixpMGk*r)trltI%$*w>$MF`IKYkyU?du
    zjrpn;ZNoRO?^bjqXF0};8s*h2hIMJ3_DQ=_?~-3R
    zWwwNh6g1vavzXMSb=gyQr(($aoim3+MT#2VsaY)Q(z@;YcBlG~UpZ$khKf)c$JH!Y
    zy0jkq_2Q}iWCA*J}{ceT{j*8x45vmM{QI^Vt&LbCZklTrRR`PkHfwaq^`w^RC&-Gs0Aj4&GBf-rHZYgjBv)kc14E
    zaAUyD18x~`p@55oTqNWoAr}e%t&s31mqR=Wp+VuG2rF_UCOpaWkvC
    za>R3}^H16+lpE1M;#}ipT=(3yJ96%ItlH3@n5A<>a7ZI^&ZzFs+Auz1IHd7p&ZO?o
    z+BkbeX{a!A?n2$4wNc>O{bVjJ)^n&(VdTn@sTwb{x<4<=M|MQ#2%?5=R2S%~89A2}
    zD>(EgW+l67KAC$PYdGX1KVp0&p@wc!_h)6*mOaym{mdoY7;y7|TLxSx;36Rx3Asqf
    zMZ$kCB+S!@tu5mcZVb42z%2tV6mXI7|4SsalAL5SwAe!lY-~2mb7I(IV!&gf*Ta$N
    z;ke@Axa8qD@8LM(;W(+fl=nM^>F<>yex*1DzheoX!`=*26iKyE)&R%=+1}RXOKMS*HinwIi$6
    zu~jkW$|ueYPNzl@<;wZQux8#w`(QThaYve-Q-NgOgaxM)-jSy5RKUvJkErPGPCMdM
    z(49MBF_29ocBCmg6%^-AwD)Jz9(JV3IaS;!$jPAvb)-o-75H)Zec3cZN7^o@f~%Yv
    zoDTkSat7yPq3yldviXdd``!|BW=CYQFcxot0Qrq^R30s9*=S7qjNcN%N=#A
    zj>*jIPktSGl!@3L>55B3&o=lPy%2mjul-C@wu$f6N}@R;l7@TWn=zF9CgXa*shj65
    zBSkmq2A{uH4J*5$rd#g>i|+!C7AW8KupRtSS@J#f&ZpgFc51p`oM16sz)uCr*F0>?
    ze^mN^51skAyX?A}ZmkpSX&0bSplsq{`|?NSan1~m;F_B5XD8U>E&y4eeA2@f|D$pp
    zXU2!!WwvU%RZg(5E?{?ovWkc8HI9z9&^sJ~jhb$S6YOCZu(LpUpNH*9j*hm_nT*|K
    zkeY6p6O7OWY%NfRd)V&d=-|xY2&~m~sZOv5T>wvk@BmrkmpgbLz63cq*2b{4_){
    zW#YoXtCq80qS}=XTVD%bpSpYCk{aQY6Va{<_&Hzssk^N#t@3nB=>7M*2h7z7ADoCb
    zT|jxhGQr)JpH|7=!cO?WrY$&nFTGFnzFLvjgePUK%q1^nol}w9f!k~(kLfdg6-hSP
    zkQ{EjPpH&;spMH|Xy|o+{IzuPakb%YH^0_s+xg_Ia=^kYpNtDd3T5w0XdN(`d7RJEqYW10d6A(}44)(Ix@frqRX$%BIm5
    z0;Ei%&j;)jpq3(We5=n?#=s&^C$I2~akPJ`o^g60IGu!zB86z84|Rtvae+@5?If%FYyNbbw~NP4V!
    zb_E>!_L3CAkhoS3mp%uTz=zL5h49yBpIg4H4n5PeVa)yVDRJE`1vEhYzD6ANXrDgn`?k
    zp}TNtG~@yw)`f1vU+Y5la64V-8eCczvW5?zf-K>$PeJBzyHk(}T>2Db1Rp*LorAwV
    z37v-9orF%prB6b~;lnzR2K==Sgo4}YKq_!)9SDREpMVa)U!Qr}(
    zHY5&ztqlQiJ8cLKm)3^1!-tPUF!*cE_xs>>$DxgI>EqD4$eFmhDD|FkpVZZETJA?@
    zkK%T^(@3oZmT%Vz{+S^_#|Ik%%XlwCU2
    z7{*^R0EX~-2EZWxhygHwmootR@w*IwKKy0_pchZq2YT>h`an1SyFSo`|Ev#m;`8)@
    z4*XkvpdJ5AANYG~@O3fhPPBec&5jP9OM+-=z;U;y3F9
    z4S4!FpdLSV4yeO_KL>ole?ABNjL$m<)Z*Ws18VTk&H;}ph_X#15l|D?*XV-
    zs8RvS7a}M?*+RMs&_^L&1t?9ZQXYCIM39GG3+c*3FNApIp?IN6IVeVmAO}4a(v^cA
    z3-QW9VM3L%&_f}DEJP5}m4zM%@ybGeLX|SmJt2Y&IAZ1tH%3kbzL;KIn`PVP9Un=e^#s;AC`syw|-^
    zdfYwN>-EQuEe}rZ!94;+H`_h}g*WRw0(WjkJ_2`a=6M7PZXO5+w{0#92e)n}hl86p
    zp-rp-FxAkSuGIJkZ@PdLctA0UBD{xTA{%1Hjs*}ZhWFIwg-?ODMvtPum(huFyOUE(X!YGZ0o3OZqpR&n1E<*eQXzEmx9
    zruMj?0oG^**NDKgdgu7!wAvWj!-5B~Ms(a~#7S1~3}2{L{TOb5Z$waFPwa8j0~g+z
    zZBn@(ZTVROFDCiiw1QTt9fIwC8H>0~L@YD$2cNAL|0=arFyAj@0rwtZ$xIyQGu8@P
    zp?(vL@ynRUy+nvJ6G!=UwD?!3^@8{OGUjlxh)zc0FyBvFL3C=ZpoL$?EG`le%SasL
    zLu&EUsa1mNei<`3BEpi9*w439D`=ToCMf2YF^$6`#2Jabd^}qG%T$Wsif_gg&Ii%C
    zn%K=Zrx~FeBU&K7O9^EV|+7y
    z;I1LWR}w4V#M_=FGRenLdjt$*f&G?!4c~c9%&c~V;8UOQK^zhR
    zr(0L^X=^UeXp;mze8LBCyAe&x)|Gt9nwS~w5W$N+;r+Osh{$E@ay}`|$+C4R-wsX8v^Gvq#3#HL$Af?`TbJ;y*Ib^`#tJTahxg!CC7YJ4i}_}bVWzab
    z1zWtsyK!@pkxSNve0|53C$&8Vv%SN+a5PDiCB~gYQ}*583(LHc=fXzf>W?ORo?wTL
    zQg#*TtT>-xhmKK>FxB3$r5AeMu%0(#%kCvFP{do=u2cOt<<48E29=59=yLLTGNoJWM-p6(&uxq7QO39qg0NXW;
    zU8~KJ&?(|WY}cpkT8JazbVk^&@f#sQWR)U5!FJ8&
    z=;25>ohi0!F-H$uNrpiYpJBUJa`dp3k~y6@wrd?n4@bhJh%c~RTR3_+5>97{?b^-J
    z!;!Ek;&is_Fh>tZ!s)ECT_-tuI1)BRoXK`w#t3&H3
    z_;r4I>ls2k%R@bLf|YdpSivefCRVVT9uzD1nI0W0SVQ0XWKQ%zRr<{RBZy~nNkzadUk~SjX7w;z$kj?Qx_=`kpw_S2`F+`bIwZgP8
    zqyhTzc+w!Z3*oKwVM&Nf
    zxG~`70k;geP{2h(E)sH)kc))>K1lf1ncbYTj@IG7kXo|hVeNZ1qgc_rN9?_`U`q}^
    ztpmT1TDsx^`POC>JDT^XzjsEoe)CF8yU!tD6kvL_mrpa0pUc{TexmiB|<
    zyV!GL)tSu9`O50J$&Pxp=ojjoT7<11`fKC<%GQ5McYLY!!t7Mu|APB}H^j|T=~gqF
    z4irDAh{{+N6R|4qPciq|6+*dtfFZ^@78R)AmHYUgMpoubzcIWI3YyKj?Elvy*kYS6
    zQSa$7hpjS)Q(An!$E07L3veiQ{bKd?u$C1W>pK&Zesylhq1f+>Xa;?Ys#WQ*Lz~Zz
    z-3qF+E{cM!|8>Ho(U39Oj{+UMa-aTN6HTksaiu(YN~P%gm4~KQAIBXo`Zm5xw^+EI
    zvUZ98PDp%*O9qCOOr%vg!EN}N?N@e8g=KTkg;PdPIb^_HRj#Olqgf~nuz
    znsifog*?S#C@O_6dkrbF~o2PhARqCPOxpITMVDqQlDt4+U7uQ))78H)PExcAhmVOGJy
    zPp>xpCjFEm<=s$JR^Iub#4}Y?+8kVxA3bNuPOQPk~RgzF^v9t|9WjJyQI4
    zaE``&_sz;58%x(MSG>y(Dfm|jxAlzj5S1bf3T5^)YbY^#8S9?~TQ|L16
    zL1)%b50ggu%wBY6k=T9M&IL6#owBUN>SM1^Rsz|VTeVh|Sc?>fD0V4}f3)vz|SMoyV>UvUpf(>?!uR6{Cy7Vj8kJ1M1mx?6>TpJT{|e*(!b&
    z!6dPj*n(`9)@m|)nlgx;XEW+q&6MRpb|br*-HTnSXMLxzpR-5UOW1{U)~6o!H})xZ
    z7o|mut;H;0M`2l03~%f0%ZkBd&-Sc1vkGb%XzXeZb}pUWr!_al{!C$YP}r?(
    z274g~yCletz%tWWwb)rD_9S~IfoaD6+{21sFC#c^b#r3G<|t)NvCY_H>=mu$D0Tvc
    zX~-Jj#BI5QJ&2ulo}sX#*z_J&R}Z@uyWokfr<7Axs+5>fYy^9HYPyH9lEdZ%Gm!5-
    zcGa21SW(kF%Vv_sS5=rxTJP9>BV!bH0((G)fzaw=FMnoL87^q~yJ`UFSo3afD|tULJD>VZ>S?LHOekoqPaPf6oOj!~lE=gB
    z^z)lMrll62P*7K|I5Ofl=k|%7_l?nM=QpXWr52q~P+OlmG*Ue4W=zi`Gdd0YCiiQp
    zJx(a7u2&o!Va>SBE$6v0I#vB9cWJ2+6ACKoQ~O6WXWSx|^K=-Ul78{)f3tkxTyjFk
    zXD!yii0YghiJlicqYz*v%TVu&+DtjKe5y2khhOlt0^Vv5W2HBWkD|U@K~3N0cXL_+
    zX9Y0wm+2pv8bb_?euhR5L!*B10pNp%KH-h+=3&GBh4BG{P7fAq{JMZK29R|aElWOZpSZ)7_jxn1Bmiq$)fBMS;>
    zq857_a7aM`4b)_B297KsppF{u4aXq`IM#Ldy5Pv$1XNKiy+%0XHUSk>T`!0u!vqeY
    zDtou%kT3xdRopxMfV@@UAS%1J;Q?~1zyVZRZ^i@i76EzG>)!AO$Snf0sQ6x&2jtBH
    z(x|7sMh}p|Q^!lm4-&8|(MxlNk~6s@vzV$f36h6a>p=5EQH851n792;9*W9eO~yRy
    zH+_iJx=O|b_X|H1mA#sP@#=357L~dB+;8%NIK^eTv~eq+l6Zm3Xz8P^dxdPMqu#RJvmepR9O-
    z3#PPjGoOt3a~J#4N1OSi#iJrgiX3yO0WzYJS1=xZeF4c)39IYTNO7Xefzl*?^j`5G
    z7x7YvA1x(`pFZ>RR^{}vLfD-A&mmwswQ%Fy7Dm+wzvMm)b3vt}>~{UfV>OHWI&in^
    zKOC#n>_KiZTa>Cuh9iu93}&<|WP}l>%e$DJJCE@913@}_!u(O|`j-RC5Mh3(<-WE+
    z9YmN9YPv5suuL-S9%{5NI#5S441?1y$0wOeotO=7h@aYa{6F4!e!Y=*uOPiG|%lz3Gc4=!k{cqZ0ctgtA>>cBtq+
    z^GcmvH+F_e$DTt)yBzE@FMW&G*?dDNY+vkYRJe;|AFA{wzKs9IjCYtSS8N41>T_I`&}NAR>3$$sXJ1%2>UjThP#H2T429}%3(`S`5m8`&Zcy3YX1(OI0#UJw
    zM=V@F4qAWCdu+Qu`HaAmSh*t?Twe?x|Lpa6U|+JHKt$~RBWGM65AOMV>cL;A2-YmRHc)c(aPKPhRXW$d?G58R?
    z58eg;4sV7xntW;6oX&G~q#(>QZc@F|J)36V`Z*1EWh8$guI|DBC{!bhd0ZfA(lrb=I4#
    zpe*~W(^;}v{8^)!)tPTHgEH+iPiM+z@@I~Is{ZumQ_v^-Pp3c0e&YW$`my@sn~y;s
    z?LVIWDEpEBwgPG`tw@Mnys*QD2tN;Do#
    z@;pA}(>+u8%`0%OZdl^W(Py6Elx6paLSwJM*}6fA&qp75?wQi)zEk)O6F5`XFHvzc
    z*mL`oczllPNoy7Beb#X6jn?y4gH}yeF8
    zUo7oida<~7@x{X4g%|UC=U>e2oqI96clO21-kBHEd#7Ja?VWltxp(r##NLS)Klc83
    zLEB4vF}`>F#n|4l7o&SeUySS>c`>|q_{GrPp?8C4F7aQgHV-mCZO(6AZ5Cv9+Kk_<
    z`eM+<(--+KR+|Qyo;Kw-tu_fVIc>skQf(Y$eA<}bxcWlSh0_%!uRvBVf+H1R1h
    zlK7YyP7EUwi4TcEL;?{{d_eRk`VoDI_lOvx2k|b^mFPlrBHkw6BH9z}h}VcVL~G(z
    zq9xISXimIHG$CFf8W9bM=ZI&Br-{16lf)Cm<3vrO22q`eBB~Nqh=+(E@gVU4QU1N`
    z0U4w;azAn(5{cZ4ltS)7B9M|u38Xl3H&P6_3ke`aks?T8BpkUDDTLgC+>R7P3Lv*3
    zVaTmzyEc7QsZl9a$yfO}?_Not>$+a^IeKoSJ9tm1WT<$kSSS!G5(*C$3f&$m5DE+3
    z63QRS8@e%cL+H9t7Lh?*AubaaiSxu+;xuuR_=7l393>7D2Z{Z}USc<~lh{sdBeoEm
    zh+l~f#5&^7#2R8Xv65I$EG3q_FFr7~Z?vgrPo!ZO+2}@+fKD7iU0`UJUa?-GUcO$g
    zUbbGQ-bcL*y)?b|dhhh!=)Kl^srN!JQ7>LERxc)ar@V>NNhcMjeNJ$vjZX89gN{v(
    z<&K$-FC8Nt@s4*LuQ{4Ho^(`k+~){)+~_!ed+>JC?eg21w_o0lyp6wo_x82hCbv)C
    zR=K_JHvIO++w%^C4owc_4w()w9U>j@4tE`{IhZ({bWm~F=Ky!u=rDh4a9l>5lXuhn
    z@oJr2!{e%IIeEEXiX77%-5j2u?dbeIT%)R%otN>&*D=k(?eg>5_Ri+vcdBYxd2hZP
    zb4yEFVdIngY+SLBfV4Z75LTp%TFo0TTBm
    zd?maj+$HWvI7-}jWVJdN6LSI5p;*^B8#4(8@5{H>N#P9VBev=}yWBg;I
    zqt&BtMuSG}M^BH+j#`CRgaz-~sUWR;?sjxE_`%wyq}=aw)Yg$2s{@v~Aqv=6E@=$+
    z(!5!-%^cRzXa($$b)Iq><2o*HR&?8N4Nut;&kU2nJXww|Lzkkd=n^yqU5qY57orQ$
    z`RF`!E;{TBTOos52seuaLCCZk`VlhBFi
    z1av$)4jqerj*dY;Lr0^d&`;4%(2?i}^keiRbU2!X4nv2ciR?qlR&q}b>so_{BIcsb
    z9KiNQ%F)~#bA=!gZx
    z3}ON?h8RNhA-WLX5zUB3#23V8L~RvJfBGq&Jg-tBYsN%`D6;&90hRo7tFM
    zGqW?Z7riAK!QL~A&|u*zu^G#PLTqZEmMnu0$7ZZ-aQBHRCIShsmG)L9kDU#gxw&%o
    zI8n=GC|$n%3FY;6GuFcsmUP%eiKF#y=*#gNH9uEgEjqJYucB0
    z>iRP(6h(!eP@xDa^oRs3ZX*5R49-N1yCUz6}nG_uvEyG3VBl@FDm3oh1{u-
    z8x^`kg`BC7BNcL>LN}?<4Jve<3fWR2hzeOzp(|AAG8MW+h0LgsDHSrNLg%TFAr;c6
    zLT9Ov9u-1Up;J^yhYD#^{||HT0oCN$#*IUxrUHRlC9II(qymb8kbvx3gCk;HECU3?
    z-XSu=2x1C|tU3r&Kv5AvWGDtfhKwjAAc%kvW(cT27y$w@{Db}awpFmw_wD!n&$-X<
    zT)Ccvlk2(mb>C00(k-{rF1OMwx6&xLLX}&om0PKnTfxh%l*_Glm0Ky6TkR~jk}se6
    zUOOA
    zI|jQ3djxw0`&RYTur47)Pl}!uy(;=Zv`KVAbdwlD?4;OPv8!Sa#G1q=#5R40`0nI)
    zC2w9lDoQ%q@u+YK9xcJcCCE=w3`~qD9DhWQTm1gv@TKoBgwLMah2+$9nVnc9)HpSP
    zrVcf*MH%a_j;QkOu8G_dxhL{a^||00o00K{23fpj)7OpogF*peoRTvb37i+Ehj=Gqo4Tj)9kNKAS|pUiN2dYC3k7%r#|;HGUO+Ap%8Nv;QC8qwK^aq)RtP|OclV33SV4)J
    z?nIy~_cx}o=b?i!v#QuvL798qPXV*vm7S8M-32VNHxH>krR5pM9gj1LGmEo`J0r=r
    z`*@}MuIT4)?{*bsF@uUU+)+Soe_02FmI&}Bev#R=CBn_Jtw*d1mY^#VXtLlwQ^6K1
    zommv4c52%~Sxo7xgNqU-EqdI;CF1tem=Ae}f)~BeEm~+%)H{nrs8E+3xBlk?K%uvK
    zn`M8v2?#!$`m5-s#-JbfE;0{s^fDkn!~kx2q@EPLS`&0!<7b8cf_X;$XA$7V9z8hZ
    zhY&!Od#a@9ri!5dFq4du2BOydnLVJKslDhthf#~5xCFvWFo>LPDs-+em&tB(e;E>v
    zWaalHfF3#1tSe+?vRmCNLWGg5`#rv(M@}?sMTc~DvwQx!qbhr3fw$RZXCZM>fR_Z^
    zUQrA^=&kx5*>pAUQ{JB5x};de%#n8^3ZssrcSk(}ZTkb`W?;4pq46KAzLrd%*C865
    z+2J3z9%lPAG=9|T>qxt&1tJ(J6O}#IIC?j^S~+F!HOpI;#tMbUo(CV|p76MKx&12k
    zcHi#p#yblSJ`c3x|4@j2zU$ELV;-TG6|b^V`@Y%EmnTTr{78=b6}F=B&$s*Jv+Y(-z2b6YcQ}Wx|5{SsnW`jW+M2ruZ&mg5
    zLNG{pBN(hCV$ynlkEPX*I
    z%*M0N&z}Zk^GAnGyJv@PuQlF9b1JtHW~Ia%$o3x#B{f+=J>&jIHaCW>&QGnst>?fb}Mi!+Fex`;r@g`Byb@lZO9%mZdK=Hp*ZS2)b|AQrM}
    z(`mXy=|J^J-K1)mAzLhcEcEwl?qAOyLnLo(sR$eFsho80^;IWmC()pWSTXtR(2Xtm
    z3v0{;*y|*X%V>c9)8ACYm;=~QNs01Nv|Cm6g-DRhMkLr-q_y=v+!Ft!+9z^Y`lTAx
    zNu;Iq9{e=^VYPiEQ~HV;*h!?hH4|=(zgKO-oTWZ?w`qCoexE%zkQE4OGnVdgf+)78p|Oo`^B`Y9DpHRuUXqCn;tZ5vp2UA;mWRaIH|6xXgtW-v1wV
    zGTdWvL}RcwX{&7ACDDNcp~RaN8jU@Wx9-cZ3wKwtqbfgM_KC8tc=pSswOaw`sc?pb
    zsj`ZNb8`BdE75ev&=U{Y8Pf9dp;xX{Jb&KP*XQoxp{}8klmsy}
    z6#Z}5S{jAf?>vlSYag?kIghj5EG3M+@Ue!@C^0vB=p%MT`&iHL
    z^r)A=zh(i>rmTh>GovtgsyfrzI6gZMYeTDXjd`t*c?zBBY!siBXJV66V-ZuR;B>0`
    zuJft*yLl&V?$j8@#49MCDxGg41omfjyBoQyE2<}nL+XS??be3opRX`2AImz&FRz&G
    z?gJAq;&l+b^`CS@ye78tYLV*$U~EmWv}=!yR@$_Vu7*VW>|l4r!~
    z=R*I6JK)^iY3A(w@Y%i^trpX}sweHz>oC`1_HOk=zQZ(`-l-5%*J_bX9u%$j3>|n}
    zLG0)$Af86YwdhiO5x}4Ui=%yTgNgq{ojeEJg!LRj(!zc2b1WS&0xoOx!>wIlpkZH
    z7-`g&g)nu!am?X*qu#B6L%gfa
    zJ9FJd1A$^Itmr`Cj1H`%K6HfFGwVP7KOj>^W*M^UD+W@hA&?YGA2G*YG@-tnr*T0C
    z_;MiiJp`LV86xHch(4|_=WBec3uF(Z&Osa28CAzAx26s;W6TuQ
    zj2ttYPG;OXzVDXTA?8)Ho%0LVNZG2#9IrR&Wdt1O-E2M>d8KW~JbUpM#Otv$?wpDk
    zF=L(o5M43mtl@rFWACD(c20-uh*!X)*`m_5ogaKS?>Rzl-abW@?Yla&$GoB+g%@}9
    z4J){Lk?{M)jZRv93a^w;Y|EOg1}JEyKK)8iv2uLaY=4hwcI6@13Dn${sb)MoaC&B*
    zXj8Q~W8MF7*JGDsuHjy%o7!)5f!98=r|qxWwb%8SM>zF#6YItRc>g2&$NOLH8ko(<
    z`xbKI?w(sh|0cLogiv?B9^SR^rM$yvoAI1d`w#o)mEQ**6Np%cJ1LL`klbEoFNg;1
    z@jZ8JO~ek|Nx?K>Nt-fz09tm>`*X*>jX>j0ev`IQ@>ZGsTJ)|xzCpnTr+iL+4)nbh
    zv3A`x>!g(sarQY4xSQpNDDmqSxG>&vR>RmGq3FA#UkQ6cg@|`PgLl&#`^do>OFwk|
    zSC}0B4&`xAP5NQOn@BVPk&t#wHj-<9)gVnAnNKuEY2h#RSt2Ms4
    zguaAGxOr@Qr7J|6LLpfyhww4g
    zN|EN$6$q*W=4I<`xFP;pwN_++G(ZjPh-qrQ13!+xRgHWF#OdKYfASTJHE
    zpGZ>{D@aUpYZlyWv7~g4?2xWltT8byG#UP!nquF;h_G;g&2{Bn6Ayz+XfKQ<-jDyd
    zU_Sp1!ZN7;@C$!Xg7S~NQ}!hy%$DmjKO!u{`VTkw?@CZc^IqAPK$tCQGv^VOm--Jk
    zP8VCfp*Kcnz)tH{le6aIW^?)YQF?Fmb6BD-IxuS?Za#MwKTRKxehU-1_wtqz0SD)Ak=W=Ey-1*r}BTWrw`{zYYz3Gwg}>xLGk_c!)iK73OP4(Pa{
    zjIBK3P#0S;(}uKU^=G#GlM?T{@z%PQJY~kxW|EJVK4Tge&ZwX)2l|CO{BI>Hd-5K-
    zmb6A5wVXa3Tzcw%0I$d>+9S4`_<~W-XHb3IN=f|dVUn-a=U3J8uBCqFo4uC$1Bc)E
    zgRUvx;GH^K5)oBD+-e@&aq8hqJB2G);StQ)w~V
    zvu(osOnc8zY4O{$ZNrL8dp)SMYumH!!e&i-&r@mFw`bdjX`1z3rY4OPSH}J7XV!az
    zO3T=u?HE>M)|*GA-P@k+6gF!n)~JloBV#hXm&sA>MCDb0p@;374D);LWULLiI5jQgZm!apFIP}!l!>f56c8Re`zQ^P-
    zR=}oMSk%6)40?%Cz
    z5M+<}4agk;}Xop5NP_Mcb=5p!itkq=%moHbtCoSGCrRKrzBz7x%WW+HOV&H^Qcg
    z^X;qln-Q)UVQ-0hJ5<@25fY8C>Ee6`akQD$%~gQ*;_T)Tel?$1{PUtlJ-AiC+oMGm
    zN?eI>;p*OyT-q5Wu4MS(>RxIt%~pvk6&_yQdpnorsKk{HudVJa$fda|ab?1V=^iCK
    zzqBJ>Bx&N7l{c~uqQ
    zNw6|!nMfEnS6%ZYIGD4{j9Z<9nMVIItg;BXB|q?eDXmI5do0{9~W|)H6&p?Th&1(6k4*l5-qb;
    z^JGG~C2Q1}<{CU`v?}ny2cwKWgGu_xI=Kx$04cCipMDB=>p?08{R?!O
    zfQsw62t_%z=rh_H71y8$Cpk9s8SPsY*YgpXa_rsDXd)`El!!q&w$3x!cPg&GL?~`%
    z8$P3LQgIEAaN5kact(>{aSe&c+{|`;hE8)-x{JQy2)T>C?kI8>o$5H3flhH8%0MSO
    zvNF)u92pttB*)?mbfRNc20FnpDFYqukQIl%=#UhL4s(czLtk+4k3)w#ILDzw9I%W3
    z8^{3@hyKMuD-KO@P>MsJcYws9gB(QS(B~ZHV$p#PL$PSG11lCC;J}DQyV|ot&@T3j
    z5VW&>aR}PUJ}U(6_@>@rk7Iu}gQleFS{pH_!Y*OZ_NcltA{15GbqpFp)s-3Hq{{AO
    z(9~63>mo8$*<1!qOLZVoUM7EWCkC4;g|e>}PauTaVsA)2vaeQ4AVk}y1tO*lwgw`&
    z25DqOzrj{AqSGKP0MTr)H2_g(kmir5HrVQqC^1O$LlhWn^+RMEPU<@S^NEt<~2?UIy=E#&0)C3tYf*K-IxTrodfQw?0DZ{8HGGG|R
    zAXA1=m1KYo3gbnwM*ZXkutsTlQSc~)7XZ(vpGnjr);ePIrTJOa`!fkI9kCCkz3)}q
    zWD?pPv5%zr_o{<432aAfp|p2)^{q_8pd+?Onx9==l}X?^VxLHR->>Fm5@sE-rPBQS
    z)oZf|04K^lRFf~@9*W^h$wF270Od{D92G2SEpB@drRR3NInE
    zZ$@f5u#dOVVvyOlBK;iLXWD4-$n4vZMGkC_Hrh30c6#Kj1N(d%?K(314s(Vkvd_dd
    zBhtx{o!mydgLJ(cnd!*RZlh%(T{B(W+d~RmaP1)vU4Cv4dElbg9+K~}r(MS)-cK{a
    zh0==r#Rbrc^mn1OAkVr0T9D2zl%vQAYrs+DkTt~+*=G$fM6#?YN03d{fFnqTH3fsL
    zv<6_1#nzO=$OqPd!^kXa$|2-UYrr97k~QTZ@~So9ATq+5asc^@HK1Hg1WPGXTZ09Z
    zsm-0Hl&Vdf29!?HOX3d`V+mMW$RjDbXEh;(fVGF9r0Fr$gmePd5%Nfy{;-;GpMZ6S
    zpk(N;stJz>SXam+8Tw>3p_G7ihoB&IaXNucz{%92R^mr`c)VWO=TER
    zZBw@b=w4%)0w}XFRRNUIcuoP7-iT8G-D=cR0Hrm`DS)mw3Mhb58oPIbt~HkJ1SK}6
    z?gYg*p4$nEZN%*a#Wd>e1YK#A+u5^3MWjDD(@5bJ~80euvPGC}XNLHvxautpp}@3Thep)2qRb@VmOwLJc-XnRZQ9z
    zbVf8MKKE5MaqEA_Gd$WdZ<*>(;%}oWCTyvl5sirleN~lQ
    zY;Q5|Jcz%Fq%_(ZwnZFF-07gYZP*R%c!4?mj_|~nbss8`MF$NMs(e|w(AX^cu0aCR
    zmz58dxJN%SNOGKAhsUH=h4^B6ZQpS8(`iO^zHM6!-3eBF#ZI6|2*MD
    zAodl^dy;N5PdFWjZGrJ8>A~{^d?5BU%zFxj&#@J17Aa4VGEx1;X$OuekQE=O#=dX+
    z{*_2+!fRvI_l`R(ncbQ3erovpw(nkvJZX!Uz(?BNZ4gmQh)1h7+V7BN>ZQfoA=nYN
    z2@N8u38rY(8v7kmOu4jpJOUqKd$BvU|yFI7?LNfM8Gqjm+G$M1>B>A-?wxmr7=ceuQM@MSX^3Wt6^%ROMxTj8r96
    zKSHW9L7yR2nXE4&U3o(vBVBo0KSH|CzxdwR?xJ;Y=kIFQ!Ck&h7EKTdwe}I&!_rhD?o4=RY0^jnz
    z;ua?1AknN5LPgkZXC6C9Jl+VoiWu0&gdZdxYJ^-y*llBOI7s}l5%McyU@LR%0C8_4
    zB-evfo3VTQZ$I@^nKS8?U6jn2U5P
    zV%ECyvfGQ_A>AG`!(DmF?Zy2_w?gJ&S6)hM^HhP$O!jJc&Q!5`8J;dDT$1Cg9x{rz;
    zWWMy}<#cLgqN3k1ulVv(JGIhL(Qlb2e0jfi)-RrFQ%ru52%PAjDJE&TCD$c}^7>yF
    zlMrqx4AQh?@{7m}
    z$8o~UlO_AZY?9;p!(5Uj2f{p);|9V4k|p0fpP3U&vqJiwxhZetd*-&hm+zT7@>Jh5
    zcjXIw6X-Ad-##FzyCfe;GXi@uEh0EAgU!y2;s#2h&^6UObj=;(5_Dz18!g
    zRl14SMThiOuZx6q6Yq9~-uAxn=WTHa=kH1-=i@Dd-)>~EWcx{xE0rTY^tZw!z8?0SW1o>zDf$$!=l
    ze$CpPC}T|wbastqcH|anAoWBM(%ChfX`WlS6Undar?#>-B+8s2>N>lI
    zGCOh#rI7rm{YI^w>t}LMDUL?&l0Z=W+cgc+Svx-?kQx?jz6{42cEO;(M&l^+YH
    zION8GuQ=q!gVP*x6TtZn8i6y?I+|viRj+~D9pK5}K?isWc-8@)3Kn*RUkA%L!f${T
    z9pPzUO-J}m@L@;zE%0eaW+0kp*)V5-_yA%C{>Za@F$Y9^SYVctd2U^EwWtqVW~ndF
    zt#d9M^`Vnl>cw;Gm^;kpk1(Fd%4ZXLrN0_nM~m0hJAFO$#}0^~2c2uHXxOaUs$%R=
    zZB-doiBVOFb!JpO!)7t67}z03)eEc=v#JT}%&cm~W-+T+*db&)
    zRaZ5HeK)&vlWuTt`pj3pJ0dvG{4wK1`Z%ElJIF*r4`ik8{_h2wuAbhGDZ-z?mLE#dGir;!{=CF`V3{^AJ=|hnik?+#
    zyWc>LoLXd6)c3iOg-U03&h@8{CZ;_7>_y=?EPF0g@k38dAnmW@lIC39d)KWBR-?I(
    zYCe+%Z6S2IdHHHJ`awR!9Rc-$>0m%UNU$ZoGOEV3@?zu7sH!n@Fnmm8GIQW
    z48cEzBO&-oxDfz2?6DS)|pTmHfj|JuHVZaaSOI}D$
    zDIFLXnVIqX3t7@dDxH`9!U~*|UTLw~39Ho17vx=0$zShjar%zQ&tzLp-v5^O)yn&*
    zWTt-#VR5^+HNGcGmMhsCwVfN+8?}oo$&OOv#<8QcxsrWR`?+y_QAfCvoTyXWI8Kx~
    zSF%6Kh8x!(<-(O5i1Osd4MYWSCEr8^bIZeIl4$=n@T9fwmwyTglk(5zg_cPtw%T?%
    zc)L>Vbu4_j_6`=lLL1G(S83a_PIQ@u+*3Ksg>#^%x$u6d6Bj-J_2a_dK*PE4x6ni`
    z{2ert3m=3Qap6PIS}uGT+Rk0A4-S^9^1W7jOFortIeGu!B@Aj%WHp_p;C3*-DY!i>
    zd2B+X|u-PfNJ4~1lC%|O*a1WRwA5Mg6
    z^5JJ;hxw}&@RJDgbuU-xge`ocrk4#pd#E)=wkQv8>@`#qW&S!>QPXSk+WIyH#nU!6
    znOV*;G57NFP8k){)EJq*{t{Db_UZpKGyOwtpHK+{e!-sz_q)W7^*3Es5N@_&C1FX9
    z7grw~m;Xfl($?4tHoQtxWX1nBOMI4UzcSSKP~g%VY!*i&4=ZtGFKm#o5=j~aE86w@`edZDf~R>{GBTMXGca~H!K^3
    zc0%tq;~YUZn!6o93C%bM(B>|wEip4E2Q=q4g`_+kJt0XJsyJ#jy;DRtxF>zjBJ+>#_RsC^r#3aX#-S1*Aq)0OK*{Aj64Kkbiip$XUsA1&y%
    z7Cur?W-WZUAk|vt12CZ@vRQ=h%}D_IJFV4)$@h
    zN(Y~H+?)<}ckHxgw2hS|c*170C3w`P#{xWL
    zbKL^`#>UD5+-I}d0^DuWV-9B7TsH@|+O#h<#(%QWKZ-ifFZkq6CyIB2sm2}JMiQ21VsYbWJ`h6=UN|BVWELs21
    zr;w40l&SK@K^xo?MrJHwQstgS$gA2cU#U~dSKitD{DhGa3!hZEWf9U;8{sQ;OnKuw
    z8@(rt^w_Zpm1`CuFKeBBrH&|Hd26%h3FB65?u1H=#k%NP7C}mL_X)bq8yW)=dx@<=
    zz^;p`Ehb2zcE7B)X{9lwV~?>_9I)#mYm*34s=H5A+dQK&Bx5(QRjjb;TqsQFgIh*Zx8gyZ9{r@)f(buEyfN`h#7R0-L&mh}YDe?JCx%ZJlc`xWwn|
    z0uPun`x+PlPO?V>!i$u5X^Ma^haIeFUFvG#yV{GDBZhc?Lt5ih9@
    zZ7O?A`?uH9tmCOl6_;&{@*)mV4?b7{yQ
    zj*n8RxMYLOi$GJKKU0x3?cY*sVjX``Y2T%e5#9)psR0o!AZKDw|E#t*Q0L
    zNr@@F46;$rVXUJzl&MIWtOL|yaZtloYGl$+_UI(PJ*^$Iq9v@WL|eRc54UcCvS}x=5@qaHj9Js)?jo@#@DV|wXbFu
    zH2u9EESF%JR|g30-rsPPgTh46l&!uP6Ejb$@92%-i9Tc;A6
    z#^7utYCOZ%!DI0Z&L*NpCAMx)>{bS64N;>ETc;e$uAQ8?47khjchpEWGfbyeWN@4v
    zH4@AWZ&N859IT@T)y(h~bzcStP5-Nf{QM8L`IiIFY`j>lU5qu`O>oF21CAi_TlI#&0=Cd`Xuq
    zI@4lX-f(vLlEN%Hug4m{;edQeK^C1Uu`L6fVG@bdIU7(NutF&~9RFaYXr+5%h5Rb?
    zj!cTdc5aK6c+S!GAVp$3x5UOiFaX>hdph@zFSo(*6rQ<_mL#0~pm5(meQa+5cO0}kxkIvWb
    z`_Lx8dxb76;$H`cC`bIF_3YYz3{&n)hIT}$uaSb3A0&7q#L@*Ny~Z&IB(l-QDvg<4Yw
    z;y7?y(nV9~8LC|zXM-*2yeSk%9f;+O*^qoqp%zrTSWbft$-@+ir4CRzxi%z6Q>Yo$
    zj>@@YLpoy$HK7i~a0oUeGgGKhbGGBh>B;BWH?u4G;7V<{rr}CcmWAoXDyq(n%aP!Z
    z`}xg0d@|Q;L3U6s&5yNwUF$Z)k(e5v^_}s2GFM-LT#F*)w%JEQhw
    zj%qR(sP3XE+Z@!*@gmefA+E}Y!$-N~sgVtL25
    zb3IzjT_4wKdlzzcd$TwpMJ%`2_F<1l=B@>`PrM3W?e=ctpo>^`vF$@1vF5IMPGWh>
    zdh$@~s{!?uopxoB1%sHi$
    zXbwBqe=k*x-nM-4xHK=Lt2(SuK&>RHbBs8+b^M-L$%$H&iDl)92RMfv_j>IifEo@t3{Ey{MLM5&9)Da!LdK^=KMzRR>^Yc0xphOE>T
    zHD4mLv>OGMcl{jdoyLm6Uk{}V)D>oC@3H!;m$NG~e4{s^cef#{Dy&GIAR*7~tu#-f
    z{VU7?&#~g&$t_j9$37Mk^@F`6bFKT~Ut&I#Y4RFsi!yJ|b<*^5p=S@bK9nsgz_)k}
    zB^-UOJNI^4Ey|L;41Bil`*ACn?6F%nxb|qz_zBwFrccNim@yeUL*URqAyCE(%$PtT
    zrwcptDfz*jql=vpGhV2O`3q^oHU3KR{307Pc=`q$FFiXVgB2*^%ktmX%hwXv7Kz9Btg+8MVT-
    ze`N8i6jtm0>8_EBy5xn0>6uyfz({mT>8CGzfKI1v7RRG5F%R$M6`X1=sA)4YZCCgr
    zb8>8;cJbK3zv!ob+_Tji^82NOL#?0r>!PN4P2*yL<4Z%rzY*}Sw<5*b{r^sM+UUJP
    zbaI_tddPmcI5nu#^)VX#$&P;yQ3JD!$A{X!tjWI$dfJAmI=cEGT(EQwd-dkLYR#JS
    ze=Ybw!S_>f``>bmK7V}5Lx{(C(7#F#z(mL|umNjr;L
    zp87@T|4!OjH2YQO+S!u&lo9by#Qd#Xt`A?>#+azns>G4=S(Vu>nh`uUEMCj|_j6f{qn)M@GDZS5oY=
    zFIGyQ2Bwb|KRM0g{BO&`m3`u$2&>9nIu!AFSuy!>AAOY`>viV*r
    zqG-8`nID>0Fk;`5`#q~JCm3yCXkriGB!ELV&Y6qkgbR3$mF|AuQnm2-nI%7Zsd|E!
    z?(S`=ntg0CBQuCe)za2ykz&&mMZ?^Hx?4@gIoN}vG>E@dBUIUzOmT9`rgqJ
    zyE3qOTuUoCw3?UW1O2|fqKEW@mKbGV^QhL`wa^k?jyH6?uA+lo37q0
    z(C$Oo5bWwaATMywlvw40eygs)EYR{p*&yufJRmzTXHx9Jf_}4Z*?XYz2fcJy-vl6u
    z?EX^B`Mv%T-PE^0)p@T{
    z8r^f~02BW1$zlwiK1oNe9>|{2(}Mjn
    z02uX$bvgWb=JF@o<768cuXIq(_Ia0pVpIsS+fDo{_c@tS!4QYI;qvbm(zOEu%Flfd;if0
    zv7|A5RUKS4uy$Hc3bu_6u=aP45IZ@ne^*=YAuxTatO|589)BquK%`z*2r0A3D(q
    z==96+6FWPi&(-eE0wzxCP=V(Z7^!=!m~Wq8v{vgS;NEdL18Da%r_y@X
    zeK`OWS4I)`!3l~fbPKN4iaQ7VepK!os9qTb=+iAACfzLv&@wp(6dIKigvyo8stbHF
    zI{e*?16Hn-`2XiL)8`>f`o>TYM(aWtaL-uxb||fqLiH*8Ud*UB=uq+0;&G<ozwVl7vh3+(cRJOr||*{ae=zkUD0p(`04q$09|xf^z+4^X`7Gp
    z)2;4|&fw$U%*T;*(Vfv@e0=wOoVPAIAv&xdpE?s~r&Apt?a+^pn~Ag0LB~fQ?8pB)
    z6NlHSj*H&ekH0VzXQhLVi(cQ4KQ~h{b8DrbED`v*M1R-crRZ96hcUh|N+&&u5AP4a
    zkYM{d=@EQ*Zve&zwy&5jG6H|14-X+>
    ztf2ei=py~_^CS!o3W%f6ao}VU#u8c)OCRFEeMuM#C?%H9G#~oUX!v*ievH$TynoQZ
    zg5qZ+k7@5X87=qV#-PVfrPLJ!=^
    zM?^B2w>IimnEbP7Kp%Z(j6FR~@SRiFA}7h}Mn!eKUz|Ku6^^l6rwNX8>Iia@lx|c6
    zOujlAz~*0*_`HYxDElBk0F)xT%77K}HUM%@*RUA|9E>JT(sSRlPtFtWEU2FdgkCT$)82-MV8C0^#6~*&E&J*O;lO+KtpSir
    zI)(>ez?;#$2Kv|=_Sp%7`g?Vm07#^cAsPlut0x|dDuK#dMpN$73tQN&gM@dJ>Jz?6
    zB<-kTsJulq;69zy!ln%p+9uV9e3Lx2qaH)$&7&#V{A02GqHL!u8Xm%Ohp_{#DlmMPq7xGVDbaFBMIRplejKCE0|FW9pSYNmg1>dy?fP
    zi*M?59)sP#6OC%33%_H}PmKA#_t6X}PSWW-0=s`Z8ugMM{+9h@V$AHlk77XaRh`a*
    zu=_WoQH^xrx9ot4vAyqoWCDsKbUOFL?%#+;HPFM~@U*WQN756k+56dJ$zwiDpJE#=
    z3l->yWVCSvU8b77mOVxu^LgS^Y_4Uo8#)4xHV&sJRTE@E2OxV#^
    z(V_3?s*~(X@5g?c_emvd9o2=V!;Zd;4t-0{pI{$*uU)#h$w!JJYoT?MZ@|PFqV3+$
    zw@$FvzaJBr_b2yncsusXgpZ!D7EBu&4LxcQ9hyeZZ(ujP8MB`7+2gAvp$!d(9^D@u
    zdV{Xoz`pcm?57DIIp09iO7p7se717FV;?a+vV
    z9F}Z;NZxOLj~N$svP{+;=&Q@{P51MrJKOqt+fAvpXXZ)g{?HJzrEuFT?fD0jfq~j8
    zb4d(^9s7;Ir|xdq(D6;)5&TyvgM^)r@Vhzh#-QWNVNYB1vdMs}gN
    z;+nS)Pt>lnr&be73Y8ym?+X~AKgS!dv?%z-K84JGw)AU*+zcuQzx9zF%ihZJ{HZ7k
    z*Ro-7?~mqoHz}%vqc*bHSru12)$>Gkn|rGF7Mp}_92~Wh-JMlI@l?+d{qU-%WN)!?
    zDDT~LCY@a53=+Sv2K6A21OW8f%TC-6G
    zU1NQkP=7jc#AitM=o9vJZ!KPwLFZVdCe(+{Do;z
    zb{Fq;w~$r8FPdZwtjh!pyb9vIDo=0~vL|qwJR~z)TWEXMcC3^Pdl@{;l^HaC`6i#@=>@2LginTeE
    zt+L%X{&FVfZ`f}a<`p{6_wM@PGq_>B?|H~&&~}+a$g9pjy#6`w`bUP$A=e)YulI#q
    z{wtBz7j;?l5ch||zYwz9H4k}R9s1$*vAi^EgXd9~ml&c~AwOvR?2)?ozczy|1GWPf
    z8O4vrew{S@N^9ga)^M}Eoo3Cf>GY!oXwUMpad)2e7?VqaSf2_qG
    znO{o3Syr|362Dxg8$QRU_vM}#Z{0~6x~k=M2&%a~`r0BpHb6Gg6VcpYVu1gIFg37~
    zz2yDsqxk1Q>P_!{s(ruI%ECKu(?+Emy`21@qcDH*Sng*y1WXDcSGp(=sZg|+_&qPzOd;!Dr3AbirBug$-o
    zDE;+^wf|#l|3Xe=O(*r-Klil-$nV8UlHZw664#&9{=glRuwo#43{&2`q9b@91Xl7{
    z6jh{a^{MV}Y~mkSd*@W{`FWw`W8BN9I+Dvrw@)DI;Z&~aD9Dv
    zcX4B{7~zbf4qjQY&g8SBR<25;?h1b0SN>n!pp}$<&F#Adp=Hy3lArt!e=XDgZ0+1Z
    z9QVMAF>d9g=Ai{%$=JV8o$>1aZ&q(-O1==w&${KpmSs$0y(2p_ggU-G-bXLx)mwx&3h*&Qa5oWG3wfae%wq^D5xZ+3IVxg0plL5$4dho9;0=c055(^05O)<|0z+^I
    z5%>H{oweZi9=Km1-)#o|_)PFSk8TQNR1UcJnV{&nHuAy>$z03p?ra>Lb!o-CwsNko
    zn!^d8#r2M@)jKIDa!TMwalN0{3Y-+2J1L+eu6J~8*$Kg+lLC9i^$gdZJ0ZwADS#B$
    zJF-^qgdpRj0C=Nb)fzpxAO<7w{YJgYH3D!!EsTKZM!kwPWqSmbFakmw^`5Rdw?`0y
    z5m>iTuY8T(9zhX|z&9K9%GL<%5u7_Lu<*TJ>6$WS!J)$f@4we8SyKiQblxY>_MIMb
    z%{h=DcAr4=cY11T^gy|E#0s_RVpAHqG(B5ks_=5&UuDId*JQ5&a=YHM3sD>#^614V
    zSi1AaiqrN!!i&u7%gzdxnG`II4nCO_ER7CS=ESL7pb-}c<^r3Cfz)B3(J&A^3~U|(
    zQip&>LqM<%u-O_&wFVkl1Hsn7W;~FJ2O8mlU_70?3%O!AZ;I3W`+>=_>70=dq}~G>
    z-2;N}0h_ac)GVM;77&~TY|aEyGl52#KyW6o`7V%p7ie@B2)+w!&Hz#~fJPZWa0amX
    z4v@MTXtWs!-VAJ(15)LHMsh%~9B^^Bh6)23!GK^Guo()ZLV-q5AQ%d4mIYE}fkv`G
    zux#k4MunwS6#-XHz&#=03JJJ}1Y9lwcaMO}AmDBhaBv6QE(hFB2i$fC-2cbkdq*{u
    zt?%O_C8#vJ5K7e8kSe{$8n7~=(mT?mgY*(33MhdX>C)6tL@823O`-?{Y0^vRgx)(L
    zA^Aq>dG^U6d+)bAd%y3>Icj4fU}M5-V{*{OWUtM1$Mfw^
    zB7T3-OE|W7fLa19QAB_i5pakIXzvC@bpy1z0f)K)?OlMVuD}oX*4;C5I7MPk&BjW{!$P11P6o&|k
    zBjY)T$RqX)IJ*d(J>wy}$V2uF7`q6JJ;R?}#6M)STW)Jc$2g!#&*6sth3L*uBEEic
    zUnQ+wfJ3_g?K=TcI{{id0f%-1+Sve6Yyd4bz#%q3I{**`0B8XKhX8mXYuLd#>NO|I)UzR5_V|UrfbMrs9WE@!hHT
    z=2ZNbRQ#t@d`T)kHx>Ub75^p`|1uT-A{GBE75^v|@12TwNyXn!#haz#^%(6){QXG0
    zStMRB60aGFzZ!|Z5Q$fe#7jov&qU&nMdE>x_)z;{RB
    znhD}{L2XZi-@s8o*#X-fV{GXe{%$JI+>GHCyzslc@XNe#8D97aUid*?I4duFg$F*u
    z18?JjSMtCMc;G2K@JJr`6CSu558Q$WewPP+ndg>Q{rpUf1^4w_w$V=J+tYB~RXmH2
    ziNA_OxnPVT_w_8c(RSwBQ}Eao{AX_yBNd5E!I-<;*Wa;?wld$Ig!8W8AA6gqsz@XV
    z#@yh(p2{}b%zS$S9=nXcXJe8jEpZnZBgA<<0x)`oxg{B{+l)VMGd-sM_o)BlUhnt4
    zgW~zW5BZ0%Xfvk&(6QRi#Y9p;BAP$uDp%ccwv#WImk4kh5?;i`WS@dW5P!@CuDT;^
    zC!aGfFfTw@N*O{1@Nl1wE#2n(N+YLC$%Ix|G
    zF7y$PFfq9xApzov;o_(R08TP9yTak=c>Hw}lVcJR+&nP=jyfj5$sNqD58*<1Jco(N
    zi&GNS`(uW7)y=b<^kiC6f=?vir*E3LoLY5N+Fl773V#^e?=Jp7?pCtjhZV>(fnWK6
    z=hrdWEh52uDCQ^jIv8szjw#O=UXzXQ(KeYBmRLF%v%b5|hc&gFDbEOgC3_(LaHx~F1jLig&C$UIonj7H`IzI)Q4N5aGY90rb2{-q+nF4f
    zmDt0_6U50;5x8K3$Cz`-O0e+pz&SbafGT6=ifnjO2Y$fTWLidI8OY6u&?l}J9sbBmqhEo)T^Q-uw@DIag5H(lLK*!rBFVHtj)
    zrN71%bb|PK+&K`60Du6qc^Ld?H9pwf1b8s{{uX#yAgt{03?&S8vHdHPq<|=
    za9YCQ0FUf$1dtUF%480OA4TJXZ<*jvOBfyC5!;Pm1ep*f^ULtpZwA!&0+^Z1cfemi
    z#H*h(IdVjTYYz`#CxVFuu)_=jIQRF|#1GEE?RWS5Q|;TU$4y2`wc7fM$%o?-TKjm;
    z>_SAa0Io6(ABUU1z@NTi^8C02bRW;LU5Mu_0ClEeFnkb&A6GYVIxa8!Xx8!78s9h-
    zuz`hVIlda>8(#-(l*1hyUv=<}F9SBp;9`!is`(giz1}E=&pN!q@QqIcHcH@G4zJSq
    z#>W90#c&6QSJ8aqB)~=yT+HD^P{_^euekS*e`4{LfOB1cMc+4$W$_k=H(Z;hh;3{A
    zx2^utmY+ciauOk5>)DqVnG(v*fyBR7vMl>g$0oUQ
    z?u{U!h1tgY$TjZc6HR2s(cbgdZ2Oka`3tHyGIx0$QQ#(x!dsPNV!`CyBqMmM@N+NL
    zec9*w4yu1rRDc;?q|`7f^bI`_6dU+60*d|CQ=0^mi?IuwD3Nx?Gk)xa>H2i_|^H9KxCKIt|55UjuSzd&m%i60h!xRfW(zKJr3zr5z
    zw_tgJI2X28{VWqN^vK7Zq{Hy#W6up)UWA-;*sCte6nptd&Q8)nc>b~HcUfLMJ9l%h
    zy3iwD)<@7ImMj;bN6c85pht{YK3zVd$MWbh|L5JS!rRz*f}h0pL)v!F!$@2KeR1
    zbCdW>sTjC8dk=I#j7e^9Q4n5~W%h^@Gkl8Mz<*$p`7o$x6`#VErR+2apW-rbA9%-n
    zSb*~VoKEcFQ+AUfrkJ9iPX58>y&Yd|H;+^Atfw!AFNQDN9!_h}Xwb;Gs1x*o%T(Z#
    z;$yd+Q+wKvG61$}L4ti6sqnT7QK)z(qR0KJgVG61!VBWf8vC@5&i5Cig~+uLREw)L
    z$rG^H7r5&+#;GLn+YI>ZGbl|G42wnK>IlY`Byp+5XkKzHf@;Bll{`UD=Fszg2e8Ob
    zYq!=_clWCeQcnAR13zq;g+KG#9Q-dy`3jw&94>Q#Dn*1F&y+;FX~s7otV$6$Wu1Zk
    zO=CO*ofp=is5tfOMvMECLg9SF*E75AwXI7p5D#`|efmYKdv7|fIe*@F2GJ~-GLtp)mVOdA?frV7x;Ki_S@ZA&F+zf
    z@AC~)X3p4a+mv1<-tErH{b9Pk16xhlD~$tH+@{n${{hhj5#IVK1M5|%HZHx^IPk|s
    z*O*g6A>GAIzxG4a(!P68HxEJ>~zaWiX+hMu46ypc$;D3i)aM
    z77t|iu$t+A12obc-_P!6_F6+^`RNnYoUf-2ZG66vr1QHV0z_Jrav}2;QxOLKP%#KN
    zItf+VNEoqVz@!ty`U%o7gYxXO^OovHUbrHK+NM*2d?n{|59ST0ovDKZFXN19QJZv3(^lTa$K3Fa8Ad
    z`pG_N?!{C+Va1ZQn<^zf~@dd)qz=_WX7OqbTxhGa17{lK!XvlyJSD
    zqH$;A(jOPCeh4$jGim^ob{kNC6S{7((@`^|r0tP~T-vz{l=*$ZHg*QnC31Vd|*n6%4bU@?29
    zQ{O}C1E{|u7?OZWY9MG>k6>o;zrn7J^iElXQtUu2m7fsQB;^tqT
    zoF=<*39TXhgCOEuf55)qHyp#^#T{A}1ns*Bo_md2`%3V!CaFj-3Ie^j?IE)l9hAVB
    z1QfA>U~fe#lyV{R7SAAh+@b#lUNz^fS~o?2{)C`VOsEo7+~$8=jJAfV<}{s>-S`Ym
    z>KxzC=Vyl2uv>omyI|E>UYC#fWjnkU5M`cF
    zBW197BIA;gMQDYZDn%Yc3CzR9gS?I6&09tdz)&75kD0EI4~&-zPOA*C}9CD<<9_b!rFcWf4S3j@@7)!
    z#C|0|vowwA<);a%IW4E$H$ML^Xa#v@5DLwLg0<18_f3R0Ytnsb7a3qNFw`JaOTqD+
    zswx{kI&{zXK^&m0QgP<72V6
    zJ^sxIf1C@7YLGc23L~M3YT!mRYOIN{WKHstUQ`Bp3ED&G=l_+8Y{O7~fLF~o8rBC9
    zp#Q3ftwld7cb`T)FX5&MzU7)
    z3oqHKv)ez5zzd0AsQ9l6)wARK1OALKcPyP&Lm4*{#BE5?(u>!C0ZzReCd9(ojQOqd+h;4P>zsJnzs(*5<0ba+7g+hBGpbl5Taw({|
    zX2J~{QmVAeZD6s`4Tj-vK|n&k^OBZLKZ*Vmza=NX^bs370K;k${wzO;=_wQH&MM>&
    z;>haVV84TA#!ou6kN(gU-4gwF?~ao%_tYkKkW$m>u=tuE=r~SiT^3QC&|Gqu@
    z&3^5lCEEYkuT2x#D7Q$TrCrQ`3TH#(W1u&$gHNTR_O}q8+Eg(t^&8dLl7Xzn$Zp@+
    zaeZe__J!&9dnji{J`B&4fqdxp_0+ybq0g?n7ve7{r|tP$^BBeBW|I0p
    z|NeCsfB4a)RFp;wABuj_-zJ#jY04Jr+bPPTJC;8ZhlWYlRiSOM;F}GIN
    zzoGYFAOvXZxiaV%+q&a3_L)T5$v>;k4AoC@x*li{4;8x!KAeUMX(3eFkcOmPoPovi
    zp$74m@p4DDrXviN!gww9He7`P7i==L%w~$@U(@(mSNT;m0d$|lEwdN|Bwq}rzJ}6%
    z2E*Q>Ag!ymnl;DckCLuwUfn`oZmp
    zQFF}g{XcS-8hC;!(hosefo3X)l@0Fq5ZsUM*JI$%f!3e-;Ewj~7>s6bJ_VmI56o~_`@
    zK6l>#Y+zfZJ=;#*ca64gt2#TKDGxljjPFeU@_9sbDJw*8jk
    zY{Pth12gtX|G5p@_r+%0mdKs>Yd$V_tl_wLCMvp}aLtyKBD2^G^s)&p%ZD22fyFat
    zoIjCxWfl=YFKDPT8k%?u-1rt{)k=`M|L@?=jtkSaM^_8>Q1pWRL_cE>nHjU^WIy?D
    z!oJU+cs5*6wmO=ax2I7zcueH8>!Be9_J#O=2S3MTqi}`WT_3ZmPW@|{!Qbi*eoVOU
    zW~A5e4pXdsL7{W#1wUylvTChqT@~1`#sIg<=zq)cZ;=F+-zpb>p0Vxz^J8Po#68E-
    z$U=RaEyO>6CE#CvHsD_?6n;53nG0a-AO4qX+J)oZxb%mX?>CF>dpVR^u-(Op_4A$F
    zVkOLBpe3Je&x5HQWfI`0f
    z-d8hk8oLNm-GA-6M{oG%xc@iS`-^hX)&8{g{xS;|{i=*3MVLK!tLvh_jOy9`6OGp3
    zKYtO&wErjG>sQ7_Q2BX#EpVeZOzeNUwUQL6`EEJ>U%+rbfjW|CCVu$Q@*Ru2XVg+7%(x1O5s$Bo&81^eI_{H#Miw(Bd
    zh2Nc^wJLj3_O@4#+&h$2{$INy%V#7Rbo_r7*gHEuZ5sW7IQ|tQ8CYuHtV`_$Vlu?cBm*N8z_TDnj-zU4@%Ep#QmN$hrI1Jj1nqq8KAZ
    zwT*A+f#m!(ufX_}y*UB4Ic1C^Z%j`U9UBwNJ$+Wkaq*j*kj-nNRuH2yV85zhf7@b^
    zc67G5k+PTTYEmQ3Wrjpwf%#K3=tE<;%`rL!Ne}L);*!LS%2xWZ}Rmp`VcTcDhcz0ER8>kno!!ZE
    zhap&RfAWekeYhiAyq~uGvcHUsoboV+tqg?Ci5nw`{yt=qeoqZXL(I5*b5NtP&ma-D
    zwpa{^4A`MLTpj8eNov>~Ay+2TpYRLV$r^-?qv=d&_tEl?{DjnG|r
    z=A_y>bt}k&a=&+7Q|)E04*5PW+@M?As@72XVNzdNxu>7|Y8$tsm@zFbdM$D7H4OHu
    zT4HW4(YS1#*3=T64J-3hnx;;4442Zm^5lIsrpY5+{mOoui`D({y`7T&zGTwEdkB4I
    zIIq84*=J#KrfVeJ$`R+kv|8inzf@g2*N64pOrkAx$+8CDm^_*>OEZQRb#4~ZwB6|)z1qVS#>)vNxl3&n
    zr(B=C+RR_HsE3BtH!z!bv#58osVUmDTwz#&Fl&VjP17qY0#pjP)J;->vt3hy(J?`fK)bSs3qysGV(ZWnb*7OL=}7Hi9fF3-Pf
    zxOupjcA{_da6dM8iq53dA%8S<)0B_WJDQ-jh{FuK^p$xlyrG6ea7F72T^?f${oZV0
    z%PU(0fxgi1k$klmac7f3+q@s*~DTcq2`^4j}vc#U=N0{mwr
    zoNz9Q9-<^@yB@cl?Eb|d$9Uta&dz8j{u+8;ncwDg%}Dr?Tw{`-heD8Nw`en?z7jSJ
    z${vADX$IEOXNQVz(L8!2B{wNQ^K81gmLn3^M7;Y#TC)=GhuefEMax)mwU-i6oH?D3TaoE!W6w@=GEQ@Nv1#rs
    zd+YaE1p9X$-7q?iP|TQf56lR<9m#f*pAFz|@$|gPhZ$*7BmTKmN10=F4s~wiJxqyi
    zu4_(RJqcSN_q)f?yBw(5Yu~@9?)NXci~8gRUTL>`+2_5_)mQ}gdR&^=8>RNO_awav
    zluKJl^cvzoB&h{@SR9;OtroY<&9IirBW7Rj?1d6zou}Pmvbajwy!>@wHmRN
    zIgCW>8s{Al^E^KY@82Jrq-kmH)fsvsuU-SjuTNi_94f6{!iYE0SB7RhASTIbH$kja
    z6WS?vQF_be6HB;Kd51#U+?WV>|0iTf-l?oqTy4%d%Pegui|c_A*PBelK>Kof6>n*d
    z3avkjRzt@GG`h?egM7ue`UQt}$u8FLvM=i+inH
    zmDVA$1}hF9hLZaa))IW@%(bp3$cC(<&@hnR8pffgFul0Cwx`Epc2crKuYTpOt6{qV
    z!p)*Trr5NuM<|u_jvfRun!DT}gy9!7E)4zoOX`ES(zQYMQL!t%3(g{R$^<#Bn{1(^
    z%2*y#TADBoz4FVTqUeSgMBNUE1LJjox9_e}T6-<|ax@|jFW&!h{;7}GmnccK!-|$V
    zn*|k<<%Cs-fX5|F0zyH(hnMdZN{z}!xGGH+6PJ6gd(R#$C+=2U`W$5cHTIPMn_;U3
    z+>{s&e5dg!NJVBZ^&BltlY1U?qkWTZS3{k<8OApPo5LWkvq0o|Jd1F7iOMaqa(8Ib
    zMFUBEpfa0PxoTHmefK1VfpYfPwPS2lUiRW9&5Da&om^keD|veC8Zs4*p*|fw`0>47guKD$kWz$#QGsz~_m@!EvTx-rs?JA$If^2@EE8_DT~xy|Ph*%!;Is^X5x0-LKo4Ao~w&LHlKl{+S-OllF<
    z%NF67&|4`Hd#A@Rhs>6yhxFdqCuMF(HqO@9nXTPA!GEgr>d$3{NyUKeZO<*bBY0iRHm_@AmQZ`cQMoEw6(Z(9yH7@QeL=L~P
    ztq%C`j)WCp_GP@8Lg;E{mAwkN`haS
    zoog+R`6a%|#E@$>1DAyJ5+gIYxZfk#!h{d>s*Dyq$73=>(+tS!t4aDZo_4wWs#is`
    zXF^0YM`ti6O!nzrB?@YZHr3__SPCOYeAfrrja>8gDbA^PY$}b8K<%*yv)p=`R#)R9
    zc1!WE+RpkGvWi4b7SYD}Vo)>UmSmPvCz|l3T=_Sui!AOi(^sv66
    zrTP2WCXDx+FyI*LmyJh-nour#i@pjoY_5C*o2jHDw%t^&<+G^M1&}?)q5X9eP;k0v
    z&%~0MTYh-ECGTK~?%=uuuOgk29NDFejn?hEN!#Ibj{W@D4*BaEPAnf|Fjmn-a;<$v-HZsD7iv&>|#?N
    zv$kZGteu`;b}s%RSDtoKzy7T_Zpb^aBKqJFzW9PnK~7P0Q)AeaZA*8cly-vSCpDjh
    zY^-dgD(Bl(i%SrLEG0d(*nQ6evE3N{X5Isn^8&;;pz>c8m{_ZErvlhCA
    zOo7#hi=LoHGeNb(gW;CedhK^i5+%x11g4yxu}&0aeg3Flj+XEYM)B((X%8(ct$VJ>
    z&vqdy^zEb14wo|OM(?%fC<
    zD_BKcuUaL2$vrWeTgzv8kB7@&+d;rXY^Na@^dUh_U~IMh@ICJei$bqqu=SFPbzAIa
    zb}ZV%;8x2Fs@C(y&Z6F!1EpMQj>H1d+PcL++up>=_1qOhnw}u?fD`*oiyP{E3Ek?K
    z3xR=YBE%5EtOp@K4d;X{-*(3tKbWdV>{p90-8uD4IuClJfPcp){jO&MC~NHJ1?8{1
    zfC&IomxAe`h0`A8OAnH{dr-6<^@(U1t+SH&XP^B%8wS-;Ua<%7Ni;(bAp~Rd`b7)F
    zcwglp{2?ychYqG&8MnwK3DpvWQBT512WlJy6vSQ#T8f}OB)ub-G@%=$c`VvX3_EA@
    z_BB{*5&@n~R*rU6j_yJ;VIwUoXmsP5fyA@f`
    z%tTAY!qtrV$L$Njv8l6pW4Ce}kA@bsh~_0pIY0~J)&|vtH@Fn}4?5Th*}kv|eqIrm
    z+b>TQO&r?EF7mcbA$iy>RwZ09zIR$V`t7pwRBqoPb+I)u4_<-0GrmD!u|^KJ2QT(y
    zs3G4S?JaDoNz`y%zjJR>>#Gzfr;>cA?&TZyX0;B?&A4DEmg%M|oeq}DY?o6(wrfc4
    z;ae`r2aGb-G*p6K8>iY6OqCeyj(h3{w8$
    z*>$Zz)N3Cp#p>}R%d$g>=!DKkxgBy((jeYX3LlWz*sIOFGpp{HJUHC+BGBGkjvf$hos>Oy{8y~~3q_=iJ>_YRIAvu5A{Q+b)$<)Xg5YS*6m
    zy{I}GgW9d7+$dX;(jtf8t6U#AU7X)F!EYhb^xVz+)kmadIm@(rS*6W+PUb4~sqW`D
    zC!7L{1||0uuwG1OeX6G$u9EyF=S~@=;Gl_hAu{?&;+u?Nn{k;igZ%o&;TLa?2=S{l
    zE4t31EM>Cu*PA|83LAQUH09TQ`bc%n)9%`s=T}dJ=Xu*lN}G&VyO{iJmfUYNARv;8
    z6dXOCH>Dq#f_J#S8*n4gSTmUnDF4qmP+PE9_tTH
    za}z`zXR=(+xfhLuooeQb(=ndBK2v=ymbvCV5^Bw7&zy-tvK95#nNzpmRN*C5&5(UqMyQS^?-M_Wto+Tyiw)pidrD|}qhD{zAxy`EJ9%oK
    z4GxW7@Dpr7b}O_8J?@}EzXOy%0j9<6+9Cq6InLYz+WEc*X4t}Q#WHdbf*_T
    zGFnswQm3`d_g*|-uVDd4DEn&5i>y6?NHPN^Zi4Zg~*@0l8(SRHYSdv;x6<4v7t
    z=~v?WGsmW1Cb>S7oa8SNvi*`7d{O4{#MkkcXbita-_Sg`1&n07XHnXqITeV8Eu|O3F(%(AAjm4zM5sb#)vE83pbWY4n8hn^h{mh%M`P|l=k=py`#-CzSnd)Wa{X*_Y1U7jN5fg=by#aw9SR}IG%3q3^IRvlXhY~l<3weSh`EP6@6Fmwv%
    z-!AF0t5fZGh|$f8yh2);d5eva%VIe`e_p*LP{0B&*_*I+x7l-QF8w*>Dw-kxt3rz55C}7e4)s#
    zNTF`kZNw%i=ECEYq`Kow$6XZ)KMq<+l;rvB)euj_6;ns2XQq1ozw_43
    zGK+g()=-0UTrqbY6-IL%Znn7lw6=yGcvCN6do}l=@9eJbRXWAtHFoyGi^3QoT|tau
    z7zuV5Mx{~PB&_Xb-H9_0e_49CrdxUt1v0lv7ZzQkmww;e_=tjUi3iMCw)ZF
    zVU!t=zc*zv4Fg*mb;QwX_39wTMv3JD81g6)6YbcS2!oN$sHBXZ;Li-Wl@RTZIeVPQ4y0Y4IiE&ov-cu_mcOoR%Bj
    zLDqVIG+_~0JqEkZK^zHpTv){UIg>ZNHhalgmGL;wg=G!o7(|6O+xGtUq)t`S%PhST
    z(I9V?{I$i%*7kyl+)nvyb=$?ZuEli6)y{>MILzVf#s=)hVEsrfY-zD?Np*RSdznXY
    zuypLOI2N|NJTe6m@8jy2U!Gd%g$3!xtN5)_7pEN)vHs*?$A-QF1jty`i#o|z{`F}O
    z@kXOEUmtfesf(E1U*@+M*H2woEX9Z$)6-M{i)(RvH>2rJtD7T4R~=$Mb*9U>id(Et
    zl`=a$22-2M-^gD9(x*n&8jTE=EMR1y$yG)ILfqew#fp(F@OMp3dn7kyE_6534cQx{
    z%9LwF$qJpDCV0#cJT?g){>fqTiwOqwOr&^i&i?FZ>#69DkRCs;^19wlQEi+-_N`jf
    zTv$&Ce`%u2W^KWwck*XJrddu>*oM&DSh(dai!#CdNn){y^^
    zX&8|efOC`R1qfK_OM1u2Yol}z=bqDaI&Uo@3?DR&6#x}mt6Z2En3{iT34dv^Z3n5b_pd-trFyD__Qc}so#)1SbtBt$f{|;C>cGK
    zb$Kr5Ia1n1$o7qvhe~;gu1lWcVqHnzu<685Y_YPFem#7`cuoEVU1f8kek2@L?n&w8
    z7Kh%HU|N1IflYuXo4zMj@X+Y|39K^BR1U&uj=`G}9)3INzi@-Y#;%WCT%CYwJI
    zhvMKMvaE-V-+G}>9(6(I;d-v?7e-D12~ok#An0ww7s*M>C5`laKN*@^e(T?IB^W&@
    z*RtQGA^zY
    zxjV?Lap|TY`!@=RwR7K655$neka+qn`3jGz6R$vZ^q2f~^lt5ILVB*7V+r5#4`F)H
    zlaE0;lJ6yM2%i%F+Rfc;+B16~F?QydyLQB_<;2TPKB#elf>2~Z(}=U9+3Q^W1DNX}
    z2ZJ$Q=Hd3O$@s~tMb$|u(lCgel7fZIJJmFtb966b|SLs_5*`=Gmler)6&Yh
    zEsKr7^lND93-4OB4VWWsoistlsg`=W(k53%;%X0LbjYOwpmBPEWk)+KBKO@iz4xOt
    zn$}+$g)+n=d2VL<;1API6G6F7cQakC*z0kb@^FPrhhpFK!e}+xn&r2z$7Wyx|s`nPfF6
    zpAuekAK~(h)Zft*S4$0z{5oDVrB#0q80dbC!3T^U@^*
    ziyJ`)-9{2TK)8!47JH3TXC~w|CVk(cKRF8~{ycF^R?JT|ctuY@>&)F
    zjfPgm-jtq6auIOs+wgDNkToq7MZIGaMq9?|bB(rZ`a4LD#R|HQ`ihshN{N^Q>4)?)
    zsT-WfM#LxVR(GDc)&IoFi+?hueZ|#lx*e*y@PPBu**CZlyHeZUf#qduZX&ujF6ZGe
    z1=1%(Lx&oWCu!o%YNIimbVi=4m2NBw_Mw90W=rjni?e&^)mh0ePjmV3$1UDixl!-e
    zS(t!xCzf*DmAfl@BCb2~jlNJxI57}3q9fv
    z#Ao)xmkTxT{UR)e5~Hhm<~m!aL5*2y_PfC>aW4(E6oSdF@g}D**xB~m{Us&(7|*F>
    zhs|Zt&*Dl#O1kbYn7GDS3{k)ilh;dhM-vSlktRNZ#0nwTj-*~yNNK+V_XD32BDyb;
    zvbwKOF1I{!#?W5e!6$?$+}r1v5C_P__12l>J*__{Yuk0*Di2-%_JM;KNdqGq*uHrs
    zIw2xKL`l~})58T5Fgk31ss$U44VUXnJbgRCCu*`Mq=9&9p~k-3iNDhd@QR~YqGXnsH;n$7+le}2Ie`#m!`p#0lP*0_~dhU`S2SpN!c-wR6f2fYU_et|pc
    zj=K)PHsHp&%&@!M)R?b0dn}iKHfa5#(tP*|9%e>$n6p?@#TYGAw=fu~h4gEwhp=tN`(qE6z=pHVN@TTOz8@uLr
    z+B&gJlf=`~P%ZhW{2C$r^@?dwyASFo|3?Ye!){$Jx1C6PU^GcMnm6V$$06tX$;XG0
    zMaSYiDHS7A8bvJQR!fkkl>Cv6jU*DR1pO_)nvb}?u4;|}V+le=u(gT@8*>Opd%pnG1AB_uh(t%Zt)4Lg135C?
    zt$VL~s64NduY}wytopIGG0k_M_V8-U@-laBh_C(I`!`3T=OfDs?AV*S3mY+%X7kw6
    z^mX?)>JcZtpdv|0ZttmP$39o&3?ijkDvoA**lpJ44#r7}IW%rQ?sU}5yAYJ-6
    zM#RouMm*P$y!AYF{lkrgz${t>*XQH=kK35L
    z-)FK8ZzBk!NelQla48o7Rx3Td8@Q79_9=mtE%XMCpMoSU**GLZZmQ9bPzVYN&vy*v
    zeHfH((U76RH20*Zu^=}-JgS=Td03aw`(cMVSE1^(yBnm3p5Ct0%mgZ9?faV7(Go<_
    zaG7488Jp{$b2o7_bTjOrdHXeHX>{dd4aWS_Qg!;J?nMCBg*oSvhK4X_(c#Kes*U10
    zdN{pUw7=X=*ErC&x=-B3f31#s6W9c)%r-wVcD%6xl;#NY^cz6(Ic#L;cSR49Kw&b1
    z0*pUI=RcdS-BvD792#6%TANmwrtucO_ZjTcFtX)-mY81=&
    z`*LAn4mYMgmaH@0?AXYd|F_@^#1bY#e1i2xe1fJ(kv+(1k30yTYi4;llxg
    z8qPC3?Fu!;OVty7AR}XWwn5w;2;C`T!3vP1YOAE|e42Iu=9sqb<)cIMyO!a~7k@o{
    zHYtyO`TEiq8rdc<30dDKA<;QuzJ98bmAZOu7q^GfizNNgd-sayTE+dQ5YZKECG+}N
    zco0?N+3uw@-UBZB2|=Az*ko&`UjDIFicQ>FtE|S$L-LiI=D`!mKjA@EJ3ZefO!qjQ
    zx~TSYp?ao0$$oHugH8OtjGJa5UM9+Cy+w}J?g|xG;4Kqx8ogKg_Vw;5uk#>J
    z$}GYx5=c`EMDb4}W5+A4x;_i$VsQH5(>B3tgpP`}L}fPT7uAf(2uj+&hdT@IcQ(;~
    ziiM#kYG7rX)WHL%knkkp&$LuD?3EuW-jeXsf^O$L1+i8u+q!@3r<=RLn-TL(AgxYM
    zkjeVz&_Jw0OG18vUGYe5fTQEuF^Qh$tJy?SAX_bGt|~~l-1N?Pn#Q5{0|(}b;wNN#
    zW(KA|u3mco;cfY_CtpS8VdRhtBP72Rx(yG?ShC`G
    zW$)mi&cv>ZCP@X6iSnUNS0mG>lP&!@F0Uao26jJlZ{*?|c+80x_$fBKy(gRp=LIZjIgfcvz5KFf7uaB++4I
    zAA9O1cO7B=u3*w~jM2bi4aRe*AL~Wg=nAhrHRhNIq0BB)>4wyN_v%Z6jr#>*Ym;sD
    zlR8IbGAG`p5MADOpv6*+uh<*LiG@UFM`@PSFbdJcU2CSIQ|)ywVzYW6i_NqT9>H=oWa0>1qT$^VJS
    zT0GOJkSMzTE}kb1;&N^}F7I1R&*8iw64&i78b0XErEkYd(0Wvt5~j~PRO*cI0=fKb
    z3@PGLdYdh1pf*r#fsv~yEZ=!gjLEw*wyIp+gPV`|RJc_(B*V4SL2A5kb0qF#wcWm2
    zJ6Q{wecr9RNjEGD>n&v8Vkacy?wn16IpT!vm2k5!&1HjvdiiH{hy0GyDZh%QVJ5y=
    zIAZGPO%RPTziu4uN9(vJK$jAYrT11kqp5Q$jM}U|wVRK7M*?3X?_q4LADx~v-dUu^
    z+hjP)r$|o%c+GJvCXO8U>gjd%rOuUxV8%3P)S;njtNw)V8-9#B4ZGn)(@vRNf)vQm
    z-!mRu@7%J0^AG`t-n9Uc%umWRadWU!8w@nB!gC1aUZ+Fc5Ov#8|O7b+8&(yEc
    z=^oH@^tKoELN%qGUOzzVwZuC18BVwb_|9J>8w+{cQ?nzDq`ocK=oDSmjyIOu|1*g=
    zcmbzTv^r-w>}dg!@`!s~Hj}7zAKwe3xIOGRa-u(e#j$}Jm)EmnJnyD7cP7QiDqa$i
    z%jIJ?mqU{#TvvoF4Xe}ENQ-Sm5N&z7euOO6*UvHKJk6-b$Ye`4
    zVb>;ydiS*GV}{$6;~;js86_20LDO|Fz3*}lBpws}`Nl%81+uZbx;oSPeFbwJQ#5uX
    z)az!^nj}m=xILpF6!JZwIHeh`x4c=EyDl-tC^Z7T~s+D6)`3scJN}6R8$L6-$x2ji-AT
    zK78>UD7w(pYi`88%a_>g`6Uv0k6zpsR(-PKraK1b6Q8Rdi|LPgc*R=C-bbh4l6VVO
    zx91s2ji>Xh*~PlM5_VBJI(-ucE|qx;}evTa2Si*fGp%1Y0f
    zo&{-(EuD|4i_3QFte19XkN>Jh%#3rh&JSc$61%ifrXw(uT%Zdra?p~;>PL~)@6QYi
    zG=}JT@AVY~>?Mkqi`&0kpFi3+Ci;1v1FKT7kX$7kuWF{7niFk$|B0YQyvqC5rXz*@
    zPw&Li%#xTZs#VdkXwn+G{J8m^
    z&!YYMp;!xq+^7X*&}r|J?6x{3!I$}7Y?9c_v)M%S!Shp-o{GbMBAltR({6=y+554A
    z86K)%On2*hO>qZ}JxbE8a;sZA`%YW_Cq7ZGOk3U??`rcOEu6J<&9P2UHCun1>7U-#
    zA=qhp|qADC@>S-%J1UN;kDi>){iNebNe<{ZK
    zJV>`wj4>_Rv7h&eR$0m3W5DL;;}xE+uj`rT(EZWp(fX-`_xp7+B-QxS9g=1}kE5@<
    z^Y3c!!-YMzB#E?~#S#-Xp9D4EC$%<3%mzB>v
    z)$EZCA9Yrdx)(WUjFOSMi#vjFHaRYjTv))=4i3SmYHsXn$gtF%y2$8#}#E7=Yy6~Wu
    zm&Flb5e^%5du{4;;wj5c;IAy@4Y`)4M43=Qw!{|ocW3VNr?)?#xGA%ibuUa>O@nHn
    zNn!=avIEYp$73t^EJ1_giw6ixhgR=y5a^hsFsgxNAbL0=jL+IHQDV07D>u*U1f=)+
    z-Q`Xw8+&hlfyXh=o@v?gs=a&F-?#_@fvJ<&wL}wiRLrK6>ybygQj^
    zD{p5a8&m@A>f&#&@AwMa=Ug?vm-Oxx)w!6}u{7-d7z;?l
    zcO%B6`+Gbc)#ZYWI-SSS+yQp3I_;9?qn@>rr9aPVTAlLDk7*yj3e`R+q`^mSb@xWl
    z_@})eJ192~v?Gz|vF@+Nhu6gCZJ=AF${5D~<;U+hUK
    zQoZ(oH;Y-rwuiYpGQ)<;+pORaoLoEZFImq&@$q``7wh!#O9iMycG>xqZbo*UuPwT(
    z0a~IVB_Om%$r~&#lJ}(j#6YeY>0Q|_;e7M#Ppis(Y4#czPM^zM+9B_ckp)!Vb)`Ol
    z2@Q_|UsRfA`W{AyXj!6xoN+72>H>+T+sistuKJcPavNMs)pn}hD&zYQKmXh`8o0lD
    z6Y)M_?!JMrqC7Y!%xm#Op;40tPq8!pWl7^})L_kMTGN#*Cz0Bhxs778o^O}Q6$`Iz
    zC5KQ8T-W)Txhssd(tYR>hO}`so;!4>0;AsF$h)IyCQv98D3Dz-YCj`5IiJ1sBy=oT
    z_Gj&CDae{BhtoKk?G*oeU_#@>9ktFFftRS7!mbu;2cJ?N)ctmB7z^Gs)W~s;Q0I2M
    zE%>PKJKKj3-8!0rAD`gLQ{p+iSNgs_5r|H|Y~gqdJ@l#lCdD@fZ8(Hbr6yX2QSq*%
    zq`2yh+`!jCdFc|fsFxg=kI-BwTWUDFL4#Ah(_!%2;A0)YbWS#hz>o_*)3%MimI`3*
    zyBrb-oj_&l6}|DsTq#9^d0>YXJjLs)Ti2s!5AF@BuR>3rHS>yttUf!NuTvob=pep)
    zZI{N8)a0jFutHQgF89t*p8t5Bnu;%9&_PBiSCuz3Q2DmDFvd45U4PZEdzkh5*`7|#_66okZMSwemye>~2c*d}@vsAJX0%Wp7F|K<
    zu^bVD!vhs+PI-rTXv_C9u40i$XV?6l_gU($79R$Eqwovz4r`ja_QQJ?T8A9C`gTp0!ou^C}G<`5y46?
    zq7*3sA<_v^QBY|j2+~DFKza=|p$Gv2M0zLGPyz`MAcR2jU17WTz4voJ-;eM8`@IJT
    zXV%QDHCMPYOlD@CYhA*Bv}Q$FS-q}MRau4Eq}L`(j)p;BSe#qaCHXEcuj(a_7e
    z1fpV3Ygv$eE>}*TdEuZeNSeYOHtKfDMnH=Y0tDI=HQm*~G~G(Rc?V=sC-+HFgxiAA
    zbec=ZEEx(HzER(odaFk3q_dDY(pA$d3A!rv-Q}dp`(bF5y6S$InfKHaIax;|oqsX8
    zU@z0cr=X+XIPKmF%82HeT)frjY|c+Q5vef=H)K({zK};T*vCk##0!^AY$iARr@JMZ
    z_#1*aaI3!7GZyyC*MH3jRLhz@JIU6T`sk9odZ{$X4C^>4H?d#(MC87D;fv>-M7yMy
    zI7P{W4j{r=;j3p;D+2_DZ_W$p4}>5VXIFX2FjB-Q21D;I>Je$GB<ZZ!gx2
    z@0fM}0jmhHnNodqIz8gW0rAt?q<*l;sg}T(1B0amh>+wpn7wGk&y2AqNoy;Qnz>B*
    zJ$&+??s~I(&33RiHaQVR#q_8J75KTgSoxwKuFm7b!}7-tTh-*5J?iL-ILr$_d8ZyrM24R47%p>6P}O8Kbx}#FY=*5+x5f17#D=1V^OsLI7_MpF$XeR-5nn
    z@T~BloU7)s&lYwYmNRu6Jd;iDwA>dKLLQB-d*2zqW+Pm19sKRy%#ryx4YIdmm>)XN0zb<8fgQ}518mYF9CAAo?`zA*eL{F5K%d|+m
    zEq%$IH7cNUk_(hsSb>2<2L{|%pC3(9
    zjqwU8cGY_`Uvb0kyOHSW%b)qC_X=gQf9L*{Q^bAA&hX~7DD1cF31#=<3~$&Z{sirz
    zi)jB7FNb{&xqAhSg$ysvCFmIEmAN%UKM1Dv2C3AnF5cihTwyf1%_cH?YcyPuclWyqkSrr=`_snog0f!QQBzaVrR
    z_LK=o_E_T82W2SVv*v9bQ!O_#eq9CG4*VL$sLcxtbMns}7MhU{BKNATJJZj0xn_#e
    zrfw8G%+=&ye=R;+Z|YW1U-iA@yr}r$;?jsQHQT7iC+8Ittis<&cEvUW3sWl-)^xDX7+qFA?+*j;rgxU?z)IfV*V;z
    z8KiJ0r#k12p8M*J*{zuEq!AG52)xOt}>?aLP0e
    zr|V`-miHa2N4Hd6;D_oBpaN5CByjWG;M<*>e(!Jh+SqlGw9o-61v1WKiV-hE=MWdn8Y0%1
    zK%6Ahd;5Sf}!`QPo^xa5>T
    zk_Z;pxyQZ{HIiNde2Wd0+kI9azDkS``gK8wK}lUfArKGnu)6b=4d#{PEKmBZMm)s&
    zb+hZ|>Ve_A0Y;-#9CjJet~+lvM2iW%qw$eh?N4E9J*}Mo80hQR-HiKv+xYj>
    z4D_F8;9{t!80zo
    znbfuSA|W=m4(m2Tm<^CjAU>9Zv9*W|o!X8OVn#mh!!R9UetV3m3egelkG*zUbNmG|>7*um?(*pVG-IAoz*V}};gD?Hfw#+A#x1fi8
    z1U6=X;KO8d(|IL^q1^2)@{T)tv>>FF4mo~P`x;KG(y`X$e$99V*vax
    z^cJmWI#SYbxe`dcfbya&Ob4r!y^S&LVPjs*9hb$b*dYq6ZpkyU@CiMhM;xfAb+Hm(
    zOT$nH!^S-P@q7PA{KWf=PWf70F2UB-HO`J=#wJxZDWTfW$76Y7VFfSi@3*$KLPRq#
    zd6t%_?Kvg1HwDy3Mlt43gLvZIS=ki2k$*-Bc;(FuJyAtsD9JnSsq^kqlTU9ON{>We
    ztrmG0z4<3OF#}$5+&;(8z0_|$Uc+N+m9W@AEGo+L_kz(@@KsgRDz0t%UX8#s1a)ho
    zff}9!BsrLD7_wT?M~mRMJwG=LblKr#aSXy75eVKPESO_AmVx99d5kS&IL@ECve1A<
    z+QsN6Rc~*$4T+;^Z6H~+FL2)xgHP!GX#Nfajmr-BFf@Wnw%hPY+csNc6N^RUc7KMa
    zle?p-dEPSG!k@X&P@=pUlehcWPkRJ&qVKv`i;CTJyn^uf_&&1<;r)>#Z^D>g;V=VC
    zPpND9{E(gfBK(TcC+azbNvm)4`&b^+&?r|cT;uULK!bQc
    ztq8)C+cvh*=ABKTstCJ1wEQrD!WNIrOV%j&b7K%_MPwPA=ei#W*?ds*5a#%{kuy|s6oPd6DJqB0)1^j_Z9-R)T6foT=H
    z?RW(yL8?)6@%u{*f>=YibhTIRoBPm+>&K8H>jbgMD*1LvM=X~4O@pexP5Si(5XLu*1PpX@1=kyw{uC?@zXSpVYQHotC-KyVIE*)>o#L95r|$w(RT<6n*(6p8F+(Yj6Pt>m?HY~V1rWjr%mTDp2${2icVjoR
    zW7e!>iCq@GnCfhtuQ6z&knz}8sWAS#Wy5Ch2yvh~dF1e5b;^k3$EM~k95}*Iah#3d
    z97YR$0%aP}HF^)=hr)7cB~MUupAle!Xjln_s?sIOx%!z$ozN5pPJO~Ty`4MuHXTBM
    zhI_X@u^?y6=y+0=D(tmam@~3y#QI1Bg>ttKSeep-`Sh93D>-Y}Fo>brP5mx&MxF+R
    z#U6`df{H>GcKKan`*H+SCio;CG2B<@cUu=`m
    z5}(QLXEBphirG`YmaMjg?7jY%7Z?ZdFJFRqV4h4qPGjN6Z%P%HWs%o7swHDE>k9Nw
    zt%r?8_=XbcVH0o8sZWk$w&)*yM~|ICs;{H?XXO&LHuFOjdAcgqSiWe>s_pTWwZAF2
    z=&damQ-?|j)3n6qcY=>6(Gm7|%$rL0N1+4J@|mhOI#63*d6Rb~qyqSouiPiH#!q$a`iI_@^+}6zOa$<~r+5TZ1m3jR(7Zp+D&Qy^sXR@{n
    z^GtnglME9hO|AV%j-pJNo1xH>jy_oGl9ej+>+P8Eh@ccXzvBXz0p9zu+`4Hy
    zXs60ynI)?DzSzZMdhg_hWEFmH)t$C=)FbL~6q1ro5qsGSojXqv8`(SY?y9k4Hb&+5
    zU$o&)&Zq```0k6Z#~A!#?|5RT1JI2|xD-6LbQk&yGqy`r=^C#&sb*dh)iJV}%Wyf@
    zNjs<2`RF@ArSSwGT6IpYPZN0
    zHrDG@*9gk`^Weo7`*pa=z7U+oJnrFoMq~WQ&y_dK+1-&d*L)N5Uj{jUh%JHl
    zvgX>g>BwYMr8?^4Pdv!qFBi}_U=!~8R9@DYFPCS2leZ`V3@bbGy{G?&;x
    zf2s*ON1mw<&!(Q~QS(k=rnW#Z`Pw&q{g&1@f@Qlk?f19tHQjb|3qLPnIyVp~;w4zY
    z2ho5o1(((e$~-tP?zQ6cUObNr8%cC-q)Z+6_m)!M4wvXFozW$F=xsdpFWc4&t4>b{i>+wp~TG2xBb;ChsEbGeK7KF0!smjV%cfnYO12$Zmzx;4Q_A7
    zY$Bw&g*Y7Bxowwx`)kP-->i|ykq)(Ip*?hMPf)vsCuM-p^E0)=Br-?5>+5@u@SktfigO%`;$4K858h`Xj~%;E;u1Mgu1k;vR+udq+ZLnN=-{3syr8->s7CUC%OoDgRDd
    z)7p{b6MO_9TO<#RZ&(^r&`BG(N>(D0YK*-6-mSMEejxXDH`k8gE4^NL`#(
    zfJ@0%s{h*DKe;%0yU)|c7e`%5BGY|QvSivQo&v8Pcc0fziVa=0Qt8B<*$sKm^kZ~|
    zEUw%<`UM1>o4DmOwN00k;RU~sPCOgr<%#sNmr+C3@Nk09cWt9SJEO^{+DR+6aNmgW
    zk_h$X;Sij#)M!M
    z^daK%*>d1}Mzxy5e8eAe{#sW~x+zm#dWrXJsWu%EGZ`|I?8f$ji^4R=Gb^Lan64_A
    z&`ktJO0@EoN~{eV=K{~|s$hG#nJ#dkG$5TBRY*TYJa9MLITx8-sAk;VQhNKv0Ym2>@k4$
    zH8Rus&MjN+90?8?H|Fr5D05PVrv|pXT0V4Crm1HaFI@{+o8gntJceIyFKP*qxvhA6
    zHO1Opduxqd;Yi=%OmWwiDpQ@nFsGR0Ta|uIDU;oNoBaJNG1gy^M}*gox5s%N1|#6K
    zt;bvQU9XQuMW;+%Rz@E=$FBq7Gi7|#UF}WT=TG-IZDS{aJaV>DUfxf=Ni1f3QsFR2
    zTsr)M8mFMJi(eJV?1DSMR$6^j;5&^x%$=SW22cNRDna@t0McJ2!B_-7@~LzN9b8$aEXe{Fq`W;Qr;mGiXySIgJ$M&(~8Kt^2C
    zKIjh!(&~7PZPd2U4B7{NRP69|{)*&0m;7C2Wx*ggo3o{9DFRA?*~fE%{f3)X+#-7F
    zFE(`rG!zX<6{<>1@T`GzqduJ$z1ld_c}2nc+vQQofGV4&4aMqU(_EZ})Y*XR2ZC{F
    zP=C4FiRW1CS~-`fk>C5Za#Q}~j3C#N{15?tB}j}`++ESs#%lSdA+1o;Rk>0~%VNH8
    zA9mx+p*STtynTO-&zMakp2aPfy-I@`r|+`|ib_WAeJzpS?aqmre)0
    zQ*6ThI;GEX2&ZRu0lP=fa`Ac2+am`C)JJsG;M>R<-UyL8S8g>law{-pU?Bds-
    zH$`ueqg>gk#`~@;*4lRy;C3;_}$cw>Bzx8*Tf@GaVgggs
    z;EaNCQS~{yqq3-P9j4FgMH=a@6)o2$6p9K~SWD^@L;LZa|qnv`CIb6#RMJS&+s4Pn6b3DX*
    z9Ya9?S^6fLJ^4~?$gLzZgr|kSCu5?aOH(6hW+yB!Jo^d1wqu3ET&d9fT88zxq0Pw2Owe*H}Kt3zG{}G*HQAp(uR5ij2tO8_q7qP
    znyLnG3-mo$*h%%R&wjEib@j^bj}-=`RtnrZCVD*)PSERBL!^L`$86Ay!0zkBb(eb~
    z99h6O-O1^nTd^nRlyVbh29K3>jJlEoUN135%rvu7K25|-LVwtSx=ym0
    z_t8-<4;qlG8?$k`kH?$nF-#T5%bg}%jmSySJn&T9}OJuqz5ImmGO*gHh
    zz?2A%T>h!Jl-!$>agTGsb8+&y{9|#BVNAbd36ekF(`JA~bvHDa%4nHMs1;Uz>#Olo
    zg~LR-UYh96pj;yS6Gki?OLfe^{
    z>25H-ellPZQD_#={DgW+K32WFuw)$bn^X`jxiqg4ZGffWcXCZYqa)7a*gL`xRoXLq1#_a9+3^y6Kd-lg*4Uz6hlNM5V=M(Nsii(_%Ptm-*
    zL&RgsJW(RsadtgMs{VW#l$fNY?Ha`!D*isW4IMq(=9!i5!93piEXO7a|3oJ7m}E)^
    zQt}51p!=q~7v-ulZH5zLTW?*M{Fb(^>gX^@lQ}#u4L>hO_Vgp!sE?)ljrV~jKH0f1
    zT^lW54ydg3ILyZ9v*wVC`YI{=m|2zw>5}9kZFZHf9LXJ?`P7G4J4eP(SCWZiX+r9o
    zSzH@qr%8;AoT$^KAp7oK0$o5q-7(D^(~
    z@dh$=37-g~EWOs@D{!5oT+Z&Bf9fc2(eI^W19{ic-gs+x^!jtd>H8LjU}H@9Fh=k*
    zFF%nd@>Ni=)b&9>e^m=-_Tf*nk5$PI#TyWjZYMsuL1!|h$~An{2|4Rx2kPmuQFC4X
    zTCYEr86Gbb{UZ3im8hJ))ZJ@34USTiit6#@LTlvXQ+oStHSbj?}v+a_fZ9meTfj
    zA^j7RmV}%23BFnS{M;vxMh7$u0-@M`H{`WXiL4*|S$*<&Jp7f0-VGPz^HYFNj7wj{
    zqbcTTn!w5v65Ot&6SBnI_#{S17>ho&+&mk1*Yr$vFS5IgCDX&&FON!j}i|w&pl~
    z**R3P-N>^C!gn{@@#>~)=hc3=p#6-_KiXe9-QuB-{22h{c^&!k$=5286MmUK7hSwI
    z0wka1Yk!*Q6GD5Bjc-_ddzbvHIM2l4=B;ffskK1gau*rqKC$q{AmrFl^u3xG0T?MG
    z=l02&idqw6T4cX9*WCB&Z1~!4iicp&n*X=hCUt
    z8};9H4b%yGw*YE-bAd6RWWP;T9^^?952?Bp-7?^aA&oxnIld-lcqhq8scm=0l
    zwDuOi4V!Xd*FAgV6654InEYt_e)$j=l!@%Q=<;$S#XL%MP=jhIe^=)@F$H4TgVfNh
    z^>XuMht_R;^hxq4;f>?8Z*W@Uur;*bR>4nglpvL=B$uFm!G$7&K~}Uj8jPd{{Bj?8
    z=K|SU+us&?EXL|;zi+GGh^sT?AdeHqFla=V2U)`oN3xHuL;pcgw{~8>gW22Hb_nB%
    zeN}zH5k%fNU)m(uPjPvW1H&IA6SdqZyg%txZ%!uFBGZ+Vy5&7xiK(;y$`^TXLBMeT
    z(9bpZN@V#%^AKdf?Sc)>J{LNG+ibV9k`etZ6b~8J4-7#xN
    ztdO1bFs_fyYF8Ogt!_88jBgnhgJr@q`yV`LbkuU}j(9TBQz0?zcEj#YM!ly~cu`4>
    z$4k`E4=>07i>%JoKu8ij2+f0zK#u8^hWf@PMgcYAL;a*=kr!$X2s`nqN4)>qZdJEYq{IVQo?X+#wD{OAJ%eeQ2hW<%NEXk!T
    z*PQ63sPpXdkU8}*$F-CQ?il_Btw^nl1>@uhw@{-(aDe;-2Qu=LstNzSpif_fA&-C7
    zG%H)$^d0JZbM)Bip+d2oF7dU9!G6JhGPgCJ3{q4um$m@lz^yo3{3
    zV!mpUB5IinQ|mGxxs42x5be17h1h7_tqp=xD_u#2tqTri#UqK)7*YJ-najpSXe}=J
    z35LPKvs;zo5?T1c;YF`|qJ5DgWIpps&bj*{rTVn;%lWcdLy(UtEuo*aBL&~deLN+3
    zkkF}hW#`3);AS=MNu9M8)xSDws9h1Xfg95}w$@P_
    z7XvJ~gDQ!lu}kpIoIuFU%+y=T)OUg-R;%F0-*jfBL$00K)Sc4)-U)Xus`gi_ya*(B
    zoGW^SFOS&&RZ!XgK^b^u7T4Npy=Xngo?}jlEm-7fzFR0+x@OQ2-7#rP5b=VTnO+yj
    zQTkg<0bAahjTb;Fvso4~t18HCmCB;H83)hxWWc#>k&h;@0X?npiw
    z(_z=u?kQpOJzI72a6iufxr~53u46HVrl>6+vakP;YG!?d8vd1W{pgbD_vYaT@aNw3
    z+$XeAA4b=`PUrwW*eBuJKJINg_M*mFiD9rU|
    za>u}*9d9Fi9|Fl8eV*u-&?O2bPto_==wx!vR+nZi`A0K
    z45>qb#013;U(#-*ydfq}aaM9q4J7t)7szGv)Hm?2LzVU?)=Wq|x^%BLTz~w+B{7fs~Ezl20xLLS5IMhMhVrpmC>FCKRVOqu}$o
    zbgo1WLqbln4V@aCea9P-0}>$~#RJ(IpS~AT2C_A7R{Y}1x`5G5buMpxfA6F>-`6V~
    zR&MeY1(gRR9KZiUwr}ZpD~L}AN+zCmw1;|Z*{p53d*HI1G8@ZAxCM`<4(gx_!RPon
    z9g@^c)>ehWsKfdj((2%f~2vJFQ}gKq?~r;Ar1!6`^kXLISQ38A{EwY48LJK4{`#qi0tuX(}}
    z>@c*og$C*j!qbg+H8kiouRFqLXQQ-d!@yIjuYA_=rx&4$!WZ-g_>29r+hDl-_fa{1XTPBRT~+
    z$A|s|d(-n!$uub7*@!Cr)JdQZ==@c)niGZk)QP;eX`qAGnKL~PM-hJ4_(?V(@BItX
    zOPta+40N2*e*PEerUvxfE;9W-BHgtVZSRd$thN~(l@E1^?M3QROl`dtpDg%6umR`l
    zqpYwOZH*LKz3p(|(T)6~KHrYKIOlDT;iT>Nh9tu(NA$!w`U#i5Jg*%82KuU`#W&04
    z1KT=TTBcWr7WxJaaHT6_LptlLD_%1|0*lIxK9#=%W8v?6K%jbNe{K%*!}n&UmpJRh
    zo@}J$L4!EwR*~#aIwQB2QtS2bsr454R1eBgc>c+ALAGSe8WdmiBE5C;wRurE*F$R)
    zT7HgVS4)uJe?O=J<3oggtO0^$p8|*T!6kqVQ2PyZtv=M5QF8^@!%4k1!k4
    z7mMMA4A;tOYgj?QF8k%UK+WDmZfBRrwXwbtBE6}^PSPt^cQJtuH~ET_KjN|z1ZcvE0)6K%=6rLi1Z
    zv?Y&~{SWNPpOVsd<0=ipzlCHSp#sx|^NNJR=NQufP1NQe3~?f9K8R*%x(u46|K;B5
    zk(bBNPd=J0tee0fkdL@cJnowx&IsULx+XQ1ADgXqI5UtSGt=MW7khR$_89il-s`VK
    zULkN7zM*FPmbSh$-pnX{aqa`+n!DEZMKxKX~&G`TehkY~0*sPD$>jNe$e2#Q0)u
    zEEd|f=Rk^$8*jvx!?MSMKozL^kv1*EWu;?ZriCs1TjHKc=^HN5yZCuYRx3WzyR~A{
    z1!-)v>d{ZQ>}{^7y1f*YgZRiFJW5a-Ejsn$LbPrrh40zXM+)4>Dn7LF+31D;@k6Mb
    zz>&`qH~qF!x&*F3rr&|D$0#3sMJo91r4x%Z{!heH+n
    zR}KVR;j?MCoV|OMw|X3&F{EY}C$+O_?NxCNGd9Z``iY6xWWgT%C*_4u_;g
    z=!y7#>ykStw71Ovjp9v;i<{DI%pVN(*s#lEC#?%J-bI$P+k9!gX%^Bb`{jw&B-=>I
    z2M=du|H7~qr>$3wnsXsb$6H>>*7*7tzYO=EFTmW3UT^OsBDXi1{a3BcP9A*cYO_b#
    z*KEM)jPIt@v%EUFDB7_@ZUK8%#G;yI?+~1}_J^Wd8!c}rK?D((CzB
    zSvzl-2T@2utj6|C;<82Hwn|}1>cs1ucWpWE)<_-@BkeKVbu505;hpc263_PA9P_F@
    z5jA({klAh{m&0b4V{^aMu*yxZ!nBXDbWYuUv{QHdPQi}i4YNzJvxfjhGwS{VRI8EY
    zQDQCD{MWa`ZwVdWmI=+qi-sdu#eI2Xze+XZ@FXXUzO%MI38TNBDn9u`{I+_6R;*rX
    zjs5YJ?eUxY-jbHie(#caCJ-5aOzen%`TJWxz8uyTxnn2tM|<(178ONmv*mmg?2s6Pzn*Y8Pfyblz!i!(8LC=-w#
    zs`uqsGes=KvUTXSUB(ygv-W8(%};RIChfxANw>>bI^ymVcnEi=R+t@kN3d-FUBX$3
    zH+C6a3J*TQ+}#4kgN?=1uO=p?)G3U;sZ)5sb`I3C4|e_)Jh2vara!=^^y^4(b9s=5
    zsa<8xQ>1&TX{TxLfV1QvG;tAV(nIMNnqatC
    zk5olB=N|S%pDI6NZOuR^e%r_ek8fajC0k&~VK;_Y9Z
    zzP-y*@h^!d>-6pXSh16_?skV&ikIxW19_eITYmWA6@7DXVW5sfdf4```0YHX%h0bn
    zXz8=7hmjgX%BasaU*8NljR*2Vfap_}q!sqGfnNfqtPCjFs{(ycgj3!KiX1R0OED;Z
    z7B4;g+Zx0EbX?iAtVA;OJ@ZE&+8kY+nqK-OeLXdOv*7+@es3g+j;?mA*b!og|Wl_oy%`4$Nlh2vAmcB-C$9@?+tR|T@!C3?Oj_vOD}ntx-OLF+L%OM@&bF9
    z!ohC9FYxLU$cg)vZa4eQEIBEqUP1TOhDIKJ^@wm%N-
    z%<-$LU)|AO-4XHXSAmu;g%4Pf*6x4X4Epc1JogIBy|mXx@9)l1^sUeRtB?P52e`QZ
    z)kliT);^ndgCXE;5Lg$++zPOp^-^6rA{?`O=bbMd*%18a8~yV=SEL?^D&OB9wrj}l
    z#p1zy_4iWas@vV-?NoJzl}@}&V{i*<_4nXmOb`QXV#!^=k>{4ja?
    z{`d`pw(jHeqvGn}+aR2^$UB!~l98&M@3=|h1!2C8{!T9Vb<-h>HP1oIPs!_i)9KsO
    z$mu+kG=c`DWw~H2{Y@G3Ln|@(UaH^&5Xg@PT{Oh=HhfF}toRSG=3M;#dG&bY_s`Fl
    z__M(c-_DC^u%HN6@mty{{s$t|mqzOU=^{^ot%
    zIOfFhNP=nV=i=T+Z3E+aa&za!ZvRj4mhlGXl8#5-k}JQjE7LHgCs%$vQmtX?o-T><
    zv}#9m(?h}ZPzjObgh<($DYcm?EZ7^>g$2`8L66$N;+E~rp6$&su$4sE%5;L?%)qWc
    zRplypQg4OGaIv4#@}mZeOi!Ji~fLzy&{VEEB2BT4>bQ
    z?c94)`ir!n^_Ad4+E)v+-2=}W!YwWR?kz?wd*k=-9kT<2}%8siy@N&kJyLJQe*5HkRX!VMT-_B^AeZ{K4K3V+A9HjU#`)
    z+O|Dv^Ft+P#}?TZ+P23v$AKNi90}R6+2G`7MwHgLASGP3w-IH@@XrE~=3ijj7S|A`
    zPRo
    ze!o+Mz0GXxeqT2Jz_A&v(DD|h@Zwc^4EPFp3O{4n(~QO;AmU6OPjO~07wh6cg*}iA
    z7J!@On)<1Fxp2Kyy(kv=u;393v}hM@qyZZ2U)hpt<6r{r69fQ`5pZ1@=rVQWX*dXt
    zswi7x|BNm>h^+Nzl_r+oC85i@OOux@sa0EF#iAHrgP!i)r|A!;T^Ba+r(M78v3(u(
    zD=BPgGA72d``e)9^Ig!_+Vew&Xe7{UJTCqMbq
    zBbdhs52KCUU4K+Xyjz1CpKyoz%?Ea6T5$Yzg?ME=&wNsuvt9-r82zv-<=_;O9Vbh#
    zV~C{(g6+^A`4^-I#
    z`pUbIabVeN`QiSw)87^MrJW}5NN~udo&GO~%>SbJnzFk$q4Q`|;OD==D?0Ft;?F1+
    z?9%{@-W}4ps%w8xr+%NN1V=5Op8Y}i0Y?D;ti7s!>*^hW$2$KFUUz?Mu#B$?F%JD{
    z^gs36t1m0P1&bWq;&zq^O`zqcuI`z3{Q3hYL+syxziaXjH`wnU8)!z6GBCI>X
    zDnSna)!mNPe_*5H{7LYB(SO1~c5NK)RRnOI{{?1}L7Y(vN)yktAoqDW#m6aQd4Eol
    z5Q_gLa?-nv@hV@jo*)V-_7teS(yq|=61g3{0#qgdGsNqaXQLW<&h5f_bu&F}<@Nlu
    zstT~@CCy|*l^ITPB4wU1Jl+bo7)en3w!&(8JL~NZBOUE$HJq+ynOu#+WyQmS%yukJ
    zYqI(}gJ2mq{Q!v;T%g@u~yUQ@UVqRHVZ6)E&L{sK~2eaGO9jO-ek-&Fy=FP)l>Ho128VT}yLH
    zLc)2Va%HPG*zMkZz4!kGIK+#u3EfeV{{^k+8+Y|uXuBNsz~@_Tw%J!_oKmt|`TDe*
    zGG!k1E{@`Um)}9JZN}wI?)zDjr-4kTDFAy+aYb&`RegrA
    z_qj3DO^5crwG|7?w(@d|QDPQYv={3O=Xv4dotO80vk#D!#J5-lPDwN{mm2Pv@d^dYI&54N9AoP1ex-%EJAr0QOXou(I>UI`eNVOH)D*K#Tte
    zB&*}0(F>W9x6SAN18k1@Q$@Lo?rG`&4K@slihEijFig5Xea~I}5B$FxaK8Snx3jQx
    zPs^0lV8M2_WLqXWJxbAzo;2T)Fy?zm3#LcT{g+pS=w
    zWJ6ER`1=TS1+3rf;4095+lOT+*1~FzB(m(-X~E)w{Cj}$CyV-VYj+vYqs5O}xckh~
    zJps~WgQ0sS50%a}EO%sbXQGYzK!+cN&Nl9>03>9n^o4r^nK3%BM6RXI`}Q)Q
    z2zqJd58~855$@TOiaO0Mv?c1mG8H{9w54?pXDR}U>8#4A&0A^ZJL(63f$|?adiLKc
    z|HDqf?#o{$J6G`Ckjw`EuU9F*Sav){EuDT#BbH|}?bR@N+U$K};-G&fkLUZ@G)2}L
    z(=Vx7mWCEGyT(|_yU`-cEI+tDP4uxH@K#+_r~(#6db)m$=ah6g&<)5l3%Jj6x&~;h
    z0VUW$dp=nb%C+Q%zkLqT;)sAndx~6~Mblqgh9veSQ4PFU0^umGbZl4$oWd~u!?-sR*`#8{X2BCRvPkWLZ~qAiT21?bQ8pnM}=
    z>d&M-L3NGwtkC>d9Kd+fl|5kD4^@A(BO`Z~n*_f)t2PBkkzVy8f>m3+<)4M@8T!9h
    zmpjW%ivNFBm+RRBz)x))mBz4XQ**TFttuk?C5W9yx{NAaafYc2l$V07L(GP@4xhYR
    zN;3$R7|Oos1+8;@O+8N+8kiCN8#Jxh&um7)!xktJEb#t`$K>H@#!a*?@7?$8aRCD;
    zUwjfS>5f2(A7qO>+2==_qjbJrp`EP>w&%{B}n&F6pi5++Kp(=ZO6uRO(mAE&$YG`Vtsq*${=%2#Q3X`n0
    z(>fydPXSnAdjBsg%veuxGQ`S#g#>%8=1m13OQ@o6&A?V5%N{GFO<kjZ_@h!f3=WshjyVUSKlKMmcd$I@)*t(&qdx5Phse0
    zzy??ST)`7(yTw!5!QGUG>9>)0l3%NecjLY9toW&Nuhp}8l~X3_u7
    z^sa65-<|nMT$@)r_Z7nJ-d<;shq`}ep69OkSNtO1^4z}OcwoG416DWl^nDUn8SSF<
    zjpr70ejD)?e@injaQS+3Nu(oMR;DW91O4?dFeA*|RI|ll7QnWwwHd1G_U``syOj4o
    z(%;>G{KdA^uJ}Fq>ARc`)gQU+khm4mZ
    zvxYpl-g%-jd3ofgC+?;o_KC>iVi2za1>sm{_o@(m?@v!*HzT_G0S?E8WG
    zxhe>~rIkF+RR`i>^l!fQR;mL<#-7^xf7p8ysHV1Le^^Cv3Lp+40y2n#Qv_55R6s?7
    zUPVw)h=>7XhytQANq|HJ!*DJ}L`4}xP@EW*K@>PL35X0)hKNE$h9C%mB#=4h3-;~q
    z+uirB?)TQ$@4deNtaYets&>_`UAyYHHz7I4u>SNCNo?>E$rquHD6`eIc_Cqm!HOzx53z5K>lV@%J=gRC9(r!K-<
    z97EpREFT&$GF{7fy7TmbQvs~x)Qlhc1!eqM`rH}y-c*k6x8|9i>uew0T=%16-d1TE
    zj?IiSU^+i|DN7{zuy&OPBl5et2X`{Xk-nba+DNzwy_2@;F1KbQ29`(O|AqABPt380
    z-E_av>V9K+Wa@8__3j4@8`jJJ4Vi&`_+M=N)!IyJ7bN}9Xp4Gjv?!wy(uNpSnDypu
    z?Adq1({CwhTP$N)HngTjE0viij#}omrs9;2q!R`$Z;@vHVBl!H4;z{rcB*&J&ct3z
    zTr1mTr#_RMwvee^?%h(t7+h7fwa+Yy`|dHfygYLI{O>dDni=-8@E!EwRL$0=omlVc
    zum#M}I}n?t*nRK2#N1@RnQGEYT*?W3p%8A+*V@Yg=cS>Cjl*Gdp{DoIWLc({j(uzmjy%%yo;U2d#e~
    z?0lzN*DjjBPL^u@P-S1TS-Wn-9^$|DM>3Qr@@Ot0up1tMV=wx<$<1#`!^>Z`<}DlTp;`5Sf0qO-iGT^Ug{6
    zf4MR;MCoUDXx=sY3kSqW)y0j>$Jv@psbby_rQScQbVOQ5tG0k%_j&k_*{}`hyd|ubWt?-6K2RST}3iQ7yFCkME)SJqm?iXWJm;~bkT==
    zJU=C$AZWsf1)2mwv$n`y*ckra7B8`dIMLvn`a9VHU;DdA*2#Eb7pc^bA)5c`;n)5%
    zn-ZYDzDWb262?q2Ryo#M3P~SfCi&iliI~!qw9>=HPobyc3~@P~%dmJ~s{dHT*T+vc
    zLFE!jlLfmKFI<~WfQ&Kb)6mxf-Ut)wQ^63XK#@3~FhznUpAx<`@ai$*9$U#bP@HeU
    zAF&XRJjF;xN-+`Q5NP6)^oAt<(71)99uiDRpQrW|1JR|AHh>7blqgU=H1-q{P5Bev
    zUf_bFe2kc@Pw4Xo#nBmrDSgPSBaXC)J=r0#041%^L=*k-CP?gmDatHk8j=h`pBY1+
    zl0JMihC(C*4~IVO(hmz}5CX2I&d+Pv
    zOmJrlbu3)zNg)H%o49?B+2z=Ed5TP4DMe8)gfZokRW
    zT*>rPq4^TJqc5|R&_6R8$bX0t)%Y_$OFrOrm=UI1
    zG9XC?6IyV+T{KiV5Z>2XGad1|GGB%6PpF=m$0tJUs6{^01wxnSLz1Qb1i=&?kAcK{
    zvfdMxKWuLWKrWh-FMh_{*%eMN@5eEs@sHk=g1mMWkUoE0$YP48^~x(=NQ6^e1TF?I
    znX)BFvt&B|{1|s~S|0=5`&i=d0!8q55%!e}K|(^Q#fX6EES*L-ldJkfbNnMG8b=Wg
    zfuHEmcNLH?m}(SR2fi2WIXPCLR7Gfw{*rOAXS&{DB14d2S3?-F$lD-&Y!7dKI7Sq$
    zc^?#yC_#gzphO>fuPGkh%8xWbmXn4oMzNBoCQGEBFsiW?+nShtw;1FU&>uzgW_8YmO~*up83V1slyAa0m+9-AjL&o4fwdWZ0K;S^tmz_RwgDIR*zpRtoI
    zy4zZ4!3y}M^mPtXIRB02i^oEK!4@ZBntj7!nr$`{$L#UF~^#)LF{&NO#HZhK=|-F
    zc&C3N;sE!@9G*jh41&NGqZo8}!a_XN1@b;7;n;eZITDGOF160S0QrZNa|w`DujJ9F
    z^kyv$m!z`Uk4E?gWQ)fNqDr{P$80{Nl{b$*pCSAbj?t353ZDrM2?Kpk+BHxI8I@dy
    zBz%2g$T(#7gcTv?CS4L{Fraq
    zFY9Nx^CxdjFt#R-H9h>8)XLV+U}%nppG1X+H$gv}g=Zu}XE!4VH0OQkn8DN6KXa+P
    z%Dv%W#SafY=|j~s1u7mIB^}U&rYv#dduCtgA!3zRcqvwemop*pFSC+JXSG-|+st+Q
    z)$o7^D+phE;6iU^Nq8AfI7DbQhMegle*zoi#>&?#ws`a~Fr^H7DPzv`$*LAj0vlyJ
    zEcC7{=JupQ9~mQ>Z*TW!T$mtyAmBBc8KI(SHZPZPC5*8jY_;IHuq9jPODh?4}0givcn
    zh{^=l1qw~4e}sgCb0)?h$!dQ>`xO1DPJhH8H#(?mno%2PQP#NKF`SXIzO6N6ywo@<
    zoL?cH47)b=3DYmknUdS-Beu)ymX!BsuX+_gFK_+Us>Jj=9%eyT*a?3a()@}_>aI_>
    zZ`rCLJiYomyFxhFENogh5Mes4V^Kg)I&BeEN|=<;rF+#PM*;{5^u-}ug7is!kWeL5
    zxQq#1@F>rh;HB?=d~_5i7K=y4m5{En$o$01Rt#=D`JGK7L!WJQ-g_{@a=@A(FWGbQ
    zgyb9QYZ#x`QV6B#2tGCP(;UJXPEhcZ%9JE#_Lonw!gDu!`MlA_dN6up`waAo1QyUX
    z$p`5zNmYc7WdfnNix8GUzX!SR?B)JBB20~y%%>pzRTC>TSh!g0wZDt6xiTHMp)x2V
    zF~Xv-cmA;Rn7XuKGgBg-7|M(-9Hs
    z-$(K6S0s>qp&V4=YJTmLF#FDa*N3KsTxba7vorh%qY_N`R>-tpECPQ(l$cu~UCY_L
    z=%5yfI+MfeTs0r+bcqw^lHQrc~zEMg(4|>cW^8FkxVV(3;>c+kEYGLRHkZz@=wqnws7UVwn
    z3SqF57#sK}*JeB&2)G@dcayC-UG|f?W8)8|x8um?+*P{MCq092r!zKqOCr*rrZP4#
    z%HD?N9SzvYm5y~h_v1IgNzd)&ouaK4j*l4|rcOS{c*^9L5siy2Qpe+(C%95i>lhb;Zx5b@59d$)hQD&EJgR~<((oz@7LXuj?e
    z_O6e6cPbFkic@b;Z&ZJ)-lYCcy;;2_VAm)0wxI#{&Xm*LL*(uA=eOVjPJU9SZMQCI
    z&h!iL{iKV1-mBiby}q=0_POs)>6V{|5T&LKe%CLi)O@}=Ankqsllll;s}AjdU3#PH
    zo}o*nreFN7Upn|xRgUreismE!0W{}oq<4iW%TF;PB7r4bEIDd
    zN}Qdtgm(Y6uKae>0*X^JZrQw)O|+;g)oIgF8x7w%Zix$0_B+{&y(+w-`8mdAuJvU$
    zTYM>Qi3%y_Y2xwgfv?&>tbdfKlo8I8!|$*PV)Z2|1s4e;SB*bUKSK7dXpF8w>uD-F
    zr-hri2DAFw!xmL;kmOi5KODB7S}$ymSOg8!e7s=6pJ-n1(U}s?8>rFK{65TWjJ7oo
    zpRtt@e(C(-Z+rBgd?
    zYd-xKvfUPTFZ}yZT#z(me>1B;&#!5%LD%a5N
    z!{7X3y%2xrOLXk7#@ok=Y?KD@&nA9x;gWmvfR3WYw|CMR|JA4Tn5}>6Na0b>NN4qn
    zufOP4EeoQARx4Tn&A(RqtKk=-^YykN?Eans9X(A`enX@6Ve^ZughWzm_`rZpyrzmY
    z%74T9rPmG%!c5?Q^!i)H&!XKsrSbWzk-vzy@D_$j9hBOgVeQ9v57qnOdxz$q!+-50
    zeSWTC5y-YTwg|TU8{JUeO8j4ye(j&f1^b!A|Ha(jNCJ}Fep@mySTc``9C}kX(0|Ch
    zhFc8{+bx3PaBOE&h}?r(AD(*HMxYN{V*|fyji+hCW)l2|3IoLGNh~j
    z_liI4+)Ozj&CWmhaUK(=X8-x#*UshK>=&jFUfDm_;68k@$LJsI)s5x-p1owD_h0P&
    z-Q-T5Yq(&2?p&jd`B~PHL{f5i`#_eirV;;9qqU9sqC(R@HU2?n%7x7=|8Hpdo!dLJ
    z{C{ichyJ4Rv%e{6WX~$I*&$7ge;CEaeB?ZRrXHnEy@+f=o*eJHxGd?_i_dQc&GXJl
    zvHtVhv_Ct%E}dZVCug6OZQb=d)-FE17uFo`Jd{OEB&COQ2D0=t&G^HO*0$1*xKiIV
    zk4gjhTauQZiQkc=e42sCzcKTRe}y&V+BYz
    z#PDquipi@2cVa`@nB=}a^^td?aT>ofh`!D8m+L;D?uCDreos5)@Y{j@!{+0EA$in0
    z_oTQP!n&aM_oV9^dj>^z@?ZP|d38p81}lC`q!vh-DUY=9&;LMdPB_mb->--Nvm+Wf
    z&)*bnR;olM`~Rj$s=B?W98Jp8^4lZ0x=sEY?7ixJw50BTDbJ9HQ-zd#4t3
    ztZ_)P^V>VMuw&hQdmi~1kL<`Jqj_X!9@&LQ#_-5)Jo0HC*^@{1=8@0v$UZ!>ACK(M
    zBVXW=19;>h9yx?Z4&{-;t1BLh5gJdaG`krR02L>~Dz
    zkDSCK-{X-}c;qx5IfF;ev`lx9=VA}Zsw6&d1NY&{DDXA;E}s{~UlYVm0*Cx=+EWBTmJr{4M56)&gHeoEY9G~Q9>
    zIC`og%!t`h;V3*M4l|nQsB&D4F$*`E?5J~8!$gD|u{s(ZH)1Nnjo2N{jyo~pa3f9!
    z)$t(4EW&82qr=e|6A@t~y%Un752hl*h}+TQ7={r?81Xv#9080O-bi{6WXBXt1l~x{
    z@y+omrUGvy>=<)=g%RV8L>-fkO&BwRk+@^Z@e?M3U?l0_JC0&12u4tc*incP6K2m^
    zI7@lf%30d8*3UAWrG9z#Y}i6qyID1J5Qx`pv*hO+?7BRA4oung#;kyk`deZa88}_;
    zn*&3*Hq45YzuVNZpf!AC=ir>!aC`3eqo>UzQ`P8w_7|n-AONIy^g434wn-Yc5gCV8Io)
    z1+aat>*ho%Bd))`JeRn}VBHnBg|I`e*g28Q5b3XH%_FWe*nh=s5zN8$(VWN?h`iUA
    z=MgshMjaBo)f8psC+$3o@ihYbHxn?^Ke}!7r7GA^7^toaht)TD{cxfUstSL
    zq#C04^{n|s6N82;Zi=u#*GF=Zs}SR_OXljXjM6|%zkWTRXl}r}(x(Kw?7CoXq!vQH
    zYWD)-UIWC{z9q02SLd5Hw`}f=Eqh-0%p_%3%I=hXDf`t{&RvVxQ02Oic+kN0s`fHi
    zvMX(_oE~CF)t7}tTZ7A2wU@&vu84VZ>k&Uyxh^8w8<4MRuYf&qb($x)5pkmG%Oawq
    z!RxEqaM&}~yYu9bh_h9$i;2z#U#@Daz+Sr2=E>_QAz*(u3~72MdkV$F+n^vSb1iJM+ogqahY;@7TFZ#{
    z4BDbHbz$4xUM!S5jJQ}$UPeqan25^MgPFVW7RuQnu2vs?rQDzuQ*F0gKFdJ;+U)hP
    z18#PUYK|iAR<|vee`v7l+UyOmBW^bq)i@v?RoktQe`;{@+U$)mv|HPvn&XHU)om-}
    z^9{nU&E5oab6c{w#tG3-ee|vKJLeYXHs|-kMBMXd1*Ju$FN{~tJ&E{Qt))UNGibY(
    zxfvGh_F}QzDFmyUtU|0Xn7Edy55v3h7R$LJ=G15*h*btlh?xd3qMHd!&KaF2UqH5UXpnRuUTx?1-6LV0YYJz~sCTTWZKFiOmM*iJ4nr>25rj92T*+
    zMoX1QHMl{{+y={bvktr3pmnr{tV--KC?IAU!SdWLDaiRE+-tPdh#w8wh?(1AC2lVi
    z$T*TO$vbkt<@2Aa0%)M&*<{_sFo=u;^vgfUg3PDuX%+erE7{o-o
    z?Sc)sty78&MYPmh)*x~W9!0yE!M?j;l_J9sy*0Bmi9CabXg6~h$L*0)WCUWo=CUSH
    zWH21W8V1HxS;92kXDM68AokT7td?JD
    zcq^uAA571Ex3Xm%!lAZcwLILgD5lB^rtf}P+45(EN3DU5ysBY)Oce@d?EYHW@;V~0
    zwm?T-!;lkGWeu}%pS5)3*yZ=|bz@iF>#iHS`hNSmu}Hz^1$A$`7g@le_
    zBbpgb#Ae#U@b0{2a%9AuI<0lYJ%&r-GVNeQca!CE6vWax@;aiGp+Q{c5!iM2OUvc%
    zBUaaGttTEZw2R9;3cKU}V!2#4VoM!)J<-PSd|akIEZv>AT<#HKZ=Kc#;t|6eahVRV
    zY&P33#|#VNGLOOX+;hb--8UYsitWDn$Sk(|)+3kL?mN6P{e;_%T7h*1
    z8|66j!3TivWe(zxEs(u
    z0ekO$7cN(fcv6=a{5WxsE1>NR>vpHX%Jg~{)U$U?US&t?g$mRGQ`_D*UiKL
    zLo(pftg9R8q$2kU(OvgtGcm;QHK2V8#&W-_B3FSJsdLpQh8cbVv@tM&J55CnL
    z=o1Nsvwqfgh0QsQK*&`g7QJybAVwN${jBW7&wmm8==^`2CCcoZ^jPwziTs_v*)GD2Pt$0$8n-wAa6^%1WF&QR7~wx7TpyO59nZMoK2bYq)A9?jlhmEz{e3
    zxNjv6N7Ts3WOxtHR>fT)w%#K)++S|4oaqJY#j#-jA+5(I?t_o&oOHyasb0f}q1v3{*Qdb+>h;v!N%*~QS
    zYumb=b3?(*(~|VDEnqu`tYCJ=lGNSyd^@K=!OYK+^r?-#o%343?1ClfbK6#9PMd;R
    zkR_?7Ex?%bMZql8lGNMw+?X?=U>0FXqPMY)IkObau2_=3v~4xvEKxMOW=ZO63ozkm
    zDVoJvlD@V*H{lp4n#Eg^`rFthoZX6M36`XR*Z;i*bGFsjiTu^tPUh-$4<~HLWj8Y{
    z{hZLGVrqaN$5qYjBZ^d_{p*hZX(jSYnM{P!>|9oLzKe3%Wcz7c5zq!i4IpyCj@uc*qC)SxG=tnrjlXT*f`pl8gM;S`{JUz{p-u|C1
    zjLFXDgN<7MJ9~C7ZgZi=p&Y$sqtsg5_Ck%rIS-eOPS$3$>Unwo8w-Qi5jFqcX#T$r
    zXrhtzeYis}(wF7UzczZH59jbAeMMgMwb3(u8KlL8ymC#P>x*{#Z+(lq45oj{x8{wm
    zl;C2(^so8#JZTC4UPwL^=}4FUqshJaKO)IPtEy&Cn%YF(HnO#pGun2em^k38?pZdL318;Le(ap%2RF5r_NHdDWEP;Yc8NJQL`zeBGj4-sak3_
    zFvnx9b()NGzpP1Ks7Q+KP`6jAr9H5XCs)NG2WPHN4?RM(W&#_g(pSs%A&)JTtF
    z{eIi%D7kan`cOLO8py=Y*thX-K4V+r<$JL2<54}>&Uj)Ewmbe!54Ja6z8Cv79@UE-
    ziYNABN8;b~V#nj<=~z}gijJL*C(^Nk_&0Q{BwqdtcFuLw7p(ks;uq|q>uu>t78rS8&Vpm^BeZ}ftCw|3lxc=rVcJp=leysHC%Kg~w*NOeu
    z9oOIVfAQFU*sK5FOQMGkooTzJYjdW(osqNtcxTc2_>}kM>lIR0NB%*p;-5_`Em@FV
    z=$|aiuJTV7O`8hhZySkhKG3&G;-#DMzkfmg&Fd{Q|8FlTS%_sJ{-;2pdw$E6#D%Gc
    zzIe>hTV`_ab>rN_GL|e7vYe2Wf(#U7SSZ^SWc#D+Snw|#kS=VAO+1i#=&Q$4y+o6H
    zwT){I>&jTNNXT+RRthpukYS;0SCH+GvSYzNJs=Hz@lZT0BeEFC@<3JwGANKCp$rLS
    zNGLeIASS>`m@fH!eRcW62^R%L!R2$Us4cg|b~iwm-^_1^>bU
    z>B5%y#51Xf`aM+jKA7Bl)41($wu~i6I~M%Y1Jck}kIjc=
    zL>2>C9>~f-1_d%Clp&!E31vw5ABBW(GW&8@JgGE%v6i
    zgS-Bq4beJ=wW-Bh{*e~aMf@#xR;uRUADDH}RDZDi7W-(+AGP6Z>OWY1n|-|HkJ`w;
    zh`-H#nu-||ZD2T>f^Yf9TK=f4rkZ=ZQX6GN76Vxx$jU$l1u`U*A)yQjWk~p+g@k3h
    zQk!aIL>2>C9>~f-1_d%C{C^S&EzCYcJWc7N!wIGO5EOoIDm;EF{L0kH2(zS9twCJ@
    z)Txsf%#xf#Bs-ek?O@-|6!(t;sK+
    zXOHAOHuc%il8k$v-JSE;$cG)@g0uJRY`<%?!pkNCZST_Ae#hvxr;Q8R{@C#e%(>1K
    z+_UWDoX2Z@HZ&&hc$OWP^H|$wLqoEl@FpP+?Z2Tu`DJ1D*__8KeKx#F#ua9t$a%cN
    zXG7f%(I=^0olb`sqbWC=ASZjZP6wh2KOK47BCGoCBH%sc#X9{(KQGN{xUsU^8yNpc2`w2;L!R&kT0T-P
    zlnkdJyS;$CkCZbd!}pLUynyQ;DQ+dhNyu(b;M_+Fx@7n^@`NXF_#@>=$#5dF+XLAA
    zk#eA9I01RW15o@(*;6u1LUx}9#%UDOlHqvdiPJy}jbd0b3?RGRfjk;zeaUbv@`O8Z
    zokr0v8IDGFy8-8Dl$9mJ*N`XNfWtJ3a>;NcvfCBdOrtC;8NPx%;R-0yD6>n33CL~?
    zFy2Ms7Y|1uPhfzSF3M!_a2T@t6p+_N`Bpp}iac=&xZXwSD;^F(cDn%Qx+ouuhl7wO
    zT!6z}6l(Es0J8feu(^xUSUh|IdEz9X*hQ%-9`;9eI|JjLl(OQJ3}o61^VX9&IRQzB
    zPwfc%#(BU_I_&CkgV;&QD8_mtU!no8PD)ZS))Tn{4WK$Hq+;x8c;$4hIBX=AJh#eG6G1d|J
    z@)+RNL2)d`9z*Up2B119w#8U`BJ57&%fmqS2MVJI
    zYl_@)7$AP2(2KCfxw%jGyP!sPu-JQV-VR#-*yn)Dp^>vklM+2{z&}uGim(Pq!9ig9
    zJ>^vqb~6%p5a@hQd0vFwgcSS)l)R@rEy8X<;(h||yr*OpVb>uA2Y}%Bl$0W@E)sVD
    zIR2iJScF}J6zm7Kzo!62*wskfegOWS5?O@RLJF*bX(}bG2&;j_Sp%I^NX%Jy{IqIf#=}yDOPkZY6&{f4{noU)#^okh7Rj(AQrBWY(?;*rlSMB;m!A~{Jp5j=s+)c
    z{5>m-7xfN0&=YQR&&t+|dK3M4=cbo+l(0=N8!4|gWt5_w(8ebMA6k{3XwdSzYo+2v
    zjY4x=;mEsI^S!8-(HsnXEXj)NNexGHPQi1NtQeluOK6S@+%L(h%aa<2=A48hldS4J
    zsprugXZYA1t5Q#@FPd`#o_ojYp(hoK=AhwzcdU{;sUB#K6C8QRD%z9kism@N$8KAN
    zcv4TIImh9-x2?QAsZMClF}UAtD@RYN1DfLiN8Yxw_N3aOIri|eTUN%N)I(^_QF!hx
    zD?Lx@el+I@-0zl^iYIj+nqvn?-m;qSN!^X+*uuvWt+*c4U1-i>cy6KK)>OtLv=KKWDy=nE(gSrmQIRN*&X_e$bU4ypf
    z?hLB257`yeWY66d^nuOJ)7v;&`S4u$%{{6*KxG33`D{3DQ}Svcy@9g!*>KDz)ztvL
    zfwJn^FmY3|HgKwevf|lr)FxGJz`TK?@ND?%rerNZqk%H-+3@8}s#<`go+2(B#&1g2
    z1bXW!Q-#Cfn^ZM{%6iIJ;V^DfvIdY|PZ=y6zO+eI1Hji)dJ2bwHzltEPSsO73WozX
    zsjdPdAixg-Tp+*(0?Z%)2?47hKmh{85@1XM^hkha2~Z&cawR~T1c;Xa5fZ>p0=P&3
    z8wp@00gw`4l>|_b0AevPCI))MK(iRA5CgelAWaO!i-8C+;3o!L#DI+$FcSkvF|bMu
    zD2M^E2pAIqJtCl41XPHCToI5a0^&tLgb45xJ#CM$6YdeuH_~dqVz+p&Sxl?$+>K8a
    z_Q-#H3DU|y+6$0Y3erkIS}{l~0%^}d+B1+=2+|5bT0Th218KP+EeE7M1!+$}+GCLR
    z2&6p(Y1ts{0Z6+K(y~Ar1*BzyG%`rb0BPwUEe)ilg0vKnmJHJFfwa3IEeWLE0cp2E
    z+AWZl2-0qXv;>fL1Ei5a+I5f?57K@HX#hxz18K1!Ee51TgES&Yy9UytKw2b7y9&~-
    zfV9gXjR4Z{AT0uVs0FZVOq+I}M=Rul3NIM78
    z{6Ly7Nb><{XF=K-kcI_m-XP5jq8q4rPaf6VLS;-g?|<<^UBOAeRHAae#OZ5WxZbIDiWW
    zu;Bn^9017yR&f9Y4j^U&V{D*@4K%ZX3O10-2GZC-JR68$1Ac74g$>xS0W&s$WCN?%
    zfC3v3vw$%c(8B_nSwIB~$YlX(EFhi*M6duq7U04HY*>I93qZ1fRV+Y(1&Aksu}Pq3
    z5@?JAdLycGl2*u
    z;Ku}9n1BrvFk=EpCa{VLC@=x>I50L2^o#?|<3PnYkUKv5WNUtBIgWj{<>}7+>2d-4
    zY}->y=pzOCPJMa+`uG6y{Xm%hW}!BfktS$8F80osJV)oB6I+f6q6Je*M&C6@Xcq0y
    z2#>rWyAP7gJh7#pFj_FJWHhch^4X&OCE-yBWXG0dj!QM90i}q^5qiD!(
    z8I-wMV(BM}7VwmeSehdP7VU2dkCGs}tx)DUiRGA7z*jQjXpRh9w7(}jdI;IsLz#I}
    z4RN$Upky?yIr812y_@i;6tW9|GRLJFQh`v(h^IL+VbT7T@TeZLOO%cQS^7z$1tKLQ
    zf#wLuqJ4z$s0*@tE*%519Fq#fN=72h5uQami|~jcoriP`sRk4+kSG~RG)F`h?E=Ch
    zu5=#KF{Bz&0i-nha`fESmUF*o%S|kqb7}wVUA40`-tzU{@Y_u?)EOW8!>^{=Ca@1&
    zG`DHdd&|G`mJe^D`h-`f+TLRy2r&0+(QDzin`Z2?HihQy$k@wB91d4av%SxL9B6*8
    z<>3c@)Q*g!jKq=f6KS?N?8iaoWi1c8_;Yt=xHA&JhbO1m7O@`(n}2M1_=z92Gvgv7
    zaXh>`&9^&yi#*F2+|
    zk;n^I&9EI{KMps)*ZPpnk224A&qx%6pUANN&VC$WUPdY>4OMJv4nrq%pKW7$p6P9R
    zU!L?JRR19R)E${X77|%9$jac4gA5YrXR%AGU(X$<`8^EB_O<)RA8!?OA(WQx*PT0W
    zec_e6i}N-`E_QF+oZ^69{Y*p;xXCWsjyZj6-`*Y0x<+02t`lK-hV|H7<1l?U=GyNS
    zQ58uA+@;f}s(TO8!WPPyvVh2vK~@MdWcYs?6&w5|9mcapQ!Y=%zZQ78Mpzb9u6Lp%
    za{LuLjTeljL`}t43%uMSY6>dPIMGdV{53j_myD*wOvTphtf*4?1Ra*^AJb`UHL8|1)l3!0pNU8;syuRnzA4xLZl|%$sM^D+
    z<_`k-vk}XRD-%x859Rtl>NGw!s`hlMxkDiD6OmY4Nj*XL&GmoLY3wqpmOs_pC6M=x
    zSXNT0?94hZ+c0H^1=;CIc5?jx;nW;^EBavQi(iu_;fmv;kj+X-tn=&t?Sz}+1;syy
    zhCUMH#=zKV`hKB*wpf$cEmWmDg+XaeH4k>3eEUC428AbnlI%?S`?&mFnEtK5hGW35
    zi=D#f0s3x}Mw=>Q>KqGsTi}FYC!XrR-7^vY$(euqAiqwye>IX#8Tn4^wLX1PQ}vZDi=nLh0qm6w_w^Z6_z3^=ry7CtXU-;HhLTNx9-LM`;g&FGE)
    zIuU&6vfk#WqvXgszxF?x1XZ5s(7br8@{LN4Anq9E*@(~a2`d0knbs%EUF-vsu7KX%Hd{mOTpTBlJs5_`hxmEcl>4E6wEzDikMtl63
    zpP)3tg}3>K`Gf00^FftIqD8kc6qb(zKH+WtQNHH}@bI9@6VXq%F;7@N$MA)3^N;ff
    zH-M)GRdPgew=vIHKF9IYxA`ado*Th)gDQEVx3@7bSw4>V$+!8Z_=6ik{GdvKXwe-^
    zHOt2duiTXH&iC8|ULRC>Ci>|P<}J$yjW=w{_u>z30@DXoibQdDFz;DDC-6s_^3U=;
    zk^d8!D)4E;D|hGb6?$T!w#+G`%B5pT6!D27%+H+!#(z3Y{5Nyj2ooe|I!x&KroT&k
    z`yq5AecGs!$8pS%us+OeApZrx{V$=jtnN7h34I|WvKYwnKvo7a
    zD3Bqc3<+gOC_}>kC?rgXmc9!oBeEFC@<3JwGANKCp$rLSNGLIwT}aWQaO2;wcQ&0rEb9&{G~=+yQ;l6eqp~1&+{nx`+==S%}$?
    z^Pq*qn82n>o`Yfrf$Io;0tF$E1*8dKB>vEZRDo*?^%2AbC||Ns;xDm)rZdE2bdbOc
    zfqG_41wcvO&^H2y5IPM?G9;kLLXsrC?}LO7-Gs8BE>P43eQ$-Vp=Ho~saqlv)Km{K
    zAQC~G4-E=4Abo+RUl9YGxWEsv&Sl5;nxL5Rrtfk`%&pDM4rqu?SU!UV^A4
    zRgB?umP5R>P>9fb*8+khf_!Ka6blJ#e@H@@3V}wU0lHWziJuP*`AbHaQp@}hf|yHS
    zhY%oJh)!Ua5+oLUAwf)+Oi5z{F+fo%B=VOCGNdu#gFJ6&j4o-Wi+czHx|IEtFbRoM
    z2ohV#D+1dX5?Sz^MVgX2P>=$OUlAtrp=l=XplB2<=V(eG9-H1r7k|?fb$~+l-Y?SE
    z)(lA&^F72n5<$;~CbI-epe@u7@iCQs1b#3ym?BQ{pTbm93H%HV<(Rv%^yO74@QCS3GkwSbB#pe9N>Mz!G8y)XSK%${HNZ3BU}+p(Sj)8e%2k{U3*))SiF(8E{=2}CG`h2*&=xFB1Vyb(
    zzag#$J8W6M-ZjnT_`^KIzK2noOYtb4a-Uw5<`TR)Z}Q8-NKHljb{_SMUZf@rzZq{)
    zLO<5RT*?TEV8437cljvV?1S&x1};vfcf4aB9ST{+eig%asS<6*;JY@1i<9WbnwU2S
    zLn>IWocJz}MVsyMUF*Qbx9A;>%=ZH!F05DU_$~>e&DQv?)!^a;`mqM)(t!~1K+veJISwXI3F>RBH#93US0iED8-hz&jDwh5o
    z!ZT5d(OkCB%%H7Wg~JV-cwkR}xYF0UB)s$$N9Rq`A#p{o@hldXK*;CD-{6bs^wEf=
    z(mdO+UnI#l7-T|=PDvZl+dxPtl!)=nk8|A?M=2~Rv7m`uMc^l2t|}}l(W9lh`dp1)ShALO%k|*b
    zPdCm#{lX=<42dQB2X~E^ZDKloE-VsOXpoNYa+-9ivs^)P*kw0qQdhH_bW|k1^z&Ek
    zIO}y9-yd;aJjkJ8J}NYeeL$*gDRbyH=S~-5cMk2RNYQT`6+L=qAhi^j2c_A-IBKG
    zf=-mRyGBf?`jFdQYd4LkP}q?6UF$EJUCNu*ToMHte`@ySKI?fKtoJG2ms{BL#+<(N
    z+QwJ=FWmmRr7kFL@e0Qcn~(8+#xrAm=N2l|>Mn8K?K`VbTr;HPY~d>^7^}%vayIwn
    z74+12DLI?@atfMjbd{WU`A!s6)C?&)@AUm%kXw_j=)A*sxFD^@OVQcXcc37?Mpx0<
    z#P>@7GU1?TO)v;vnJF9l~K-}VBV8eIkFZN6;}y)sR~x)6#m3jDkXpNaS;`?_!`rRlU|0WRQoVV)
    z@X|zoBa@rxW*IxSW_u#-CwHA#zct$vXnWn|V#jp0lW05K`(ylcw#U=9xYxvtt=ln+faqC*rtzAc0yBpbSNQt{_SO$&H*N|@RI>6Fu
    z3|K=-*k#39(ztaE>Bg=-?iEB-+`hQIal7Np<95aEh%<@X9=9!SOPoR6=D1C98{*c*
    z>Bg;zTOFqrrxB+fry7TdgU2n8TN<|{PB9J^wfTQY1)uD_f<2e)8&rPfo&
    zf#dDRYmXNlzkmGJ@u=g0$32ca9N%|*+wnEWmmi;doO5j8So^WsV@1dAAG>udsv~&x
    zI~V7hUKsXfdZo}Cri(C$#{P~>pm;g&;`L!g6)gM-69hiwjP9F{xGb>P?!
    z*tgr)+85d1x4&f{WglqoVeeqS&)&%1_LA9`sqn6T7uyiC&r`l#SuVE0X0)l3#^9=*
    zdEe91Qi8|3%lsIXX=))k3A>u747#45BwaLXoYLwFI7zx-RyX)@-XOdW-UI&#?|@U`
    z&G1He9lQ!&0WX6W!wccL@W=26@Jx6b{2u%^JOLgLkA+`@Ux7!!L*YU23vfU98Mr6h
    z4ekO*!;is_z-{0M;8yTGa5K0m+z4(6N5a>`*TS{otKci)E8xm-1^7bvJos$5_!z%q
    zD~>d5mdU-`%MK+m%+k2$d!L7rhRp79U3&vUNrPs$xpuu3EbVi>`SD6vv{sS
    z@AFHfuV%4at=@o3>D^1NP%kH3K5|+4GL?`(I6_b+Q1J=)BY0&zHKKl541M&1e3kax
    zhf6?ca6#p8MMK4-ikJ#)#r}$Q6$>hc%NxocmB*B0%lDVBD_>AP{Ho#AqgOGnu&?&N
    zTK8(ftKpXoFCV>(d5L|w|K+-u3tkSFHIzLniz&mF?JwIOthl-2>WYgi+*cf3v3JFm6{}Y)T`^|`Yx&pZ@0V9Ef3}>m{O0mq&$IM&
    z^s;nybhFm#tj$`ZvnESNM<;8w&gv{}9qlYF9jz=)9nCBa9gVD2I;*nOb=0%ebkwp`
    zbyTxf>a5H{=peFGbX2n7I`FI&IxDi4>nzV&rn4+-sm{_YWgX?LB|1y8lysD`6m=A{
    z6m%4_U^=j@#X5_#7HwSA)ELvazi~lhLqkl%{)Pn&4fQeg`|B6fH@t~?v;WP4Hw|?$
    zb^GfU)HT$`)b6icP}@)wQ?tKjK}|z-f%TPb@mt~z;y1@{sz|>eseF}s@!~sv7<1nZ
    z6bW@56_5HE1)$VA2hKfQ(qOPK@pe~~bC_E|x6^;r+;ZUKdOQ;Z3Fe(TY
    zhzdYmL|s6gNBN`9q5M$3C?C{W)EN{O<&E+}d7?Z}r%~=GHlcflruPxBRbL
    zXQp4nAx6$Ts?W?x7)Ys2xt|i1;*qj1WlhT5l!4^hpgQ`YVpIh9BGhx#GgKj}
    z0F{r*tE^TAyW~i&6@A~gTbG)aHCOqy_yzc;*nd**9(uRky2P}w`HEkQPr%8P58djW
    zLpj^6i%lOl@9}Fn6L2WSzFYnM(9hegi%c_{XZy8ypSD)4Qm;@iQ!iF8RL@m^to}ef
    zQ$0=np89R|1oe3JSoLe_SJWfaL)C-SFR1&epHcTzcT;y!N2?!GKca4MPY(s4J^0s4rBXr#@R}qxR9c
    zV^%`Zx>|)j56b+u|k_qW9A%P(tAr2w?Lbiph30WR8H-r;B5ZoSI8(b88
    zKloN~RB&LhN3cWizTj=aYl4>t&kg1T4Ft6Z)dm#>-4D7I6crR0PAA^`8?4vDXU9y=q<+7>DtS%cP%k^6p6vRaZnI#hCX6D!XKTO

    D#BZC%Tl#YK_K`V3(tC6C=0+M0xI9a zQpKXMK6n!g4~x!s{RRDb{W<+v{crj+`qTPT`jh$-`s4az`lI?I`osD|`h)sk^#{Ih zYMOWKTG*Z`mnD}imm^1%Bgy5;<;gwcejfQcXyXi@#CKh$HfoOJ>i6zpNr8pSFM}4w zL0_i#1V9u^kD>QRU3f>>$ZQ|S2a5!C*PuxO5_%k3p281^U;ZK0@hd_8;5MB+0R|MgAf$&>FcnAo;354ea;aNfWnf3UQ_4v>0 z@$~ihw)OZo>+!GG<6o}F7p%vV*5jY7$KP9zPh8)3WPI+PIv_dS9Pk=a>WRkUE_@&) z(JiMA$dE@RHw!F@Zk{uHu1R{4_mu9MuKj-5b9mqJB|7k_V03Rpirj1#8`KF2VAKH& z0>JD6Fe(5>3BV`-7#M(&2Qab#Mh3v_1TfnH%r*eC1;9uE7$|@d1u!DOiAN2OjM5v@ zjUG2VHhR+V#OUdCX2kJ~?wSa}+k#I83k7Qg-wS>b#LIc;zeO0SHK-Y>H>ewFG-wzh z8<0kt4Vp$;4O&Lp4cbOJ4LU};)A#kd2HZ=E1H-fp;NKbvh5$t3wV$EFfngevk;rZOBtdA*2TK z9`Xh94Z|gE_zthLiDVtpJ;^WZPBNqg`zd0??u0eeiP*q6BUycQx`idW+8S~ z%ug&rEG}3-UYDSosGFpFNB6GoJ>4|jhq~#yPjoYMvvhNGNxFHu&vjqu7SQJ~{&GRx zmQ!_QA1$YjEdjobFc)&+!`_XtazV^t^5|Zk#YNiULXw?6!m-FzHKV4>MSgKnwYaca zT;QLef_yEzd%+zYAxRoo4-VJC@;Y&H0oz{hUx*p%Paf3FL$be=n~0O2vYB~yKLuMg zg)EXcPC_tN=F&%Z9hEz}`zZXVV$`hik1JRcj-PMG?&TQlE&l)|C$V{Zbi0B(>OzXt z{*0J>VPn-^_}5|lN4`qQ+uA%qs0Hi{z~r-<;d^&un$2wCptM`h1?*|RF*p9H{cm_lUoaaWf-p!bN^L%ifKOXw{BXUReF12H?F^6d< z-EUsMadS>XL;tnnVZ#&dSFS7Hgx>qOeg}t)n)cNq8R?IB{!xH)h8{MD-4}@1s+}As zY50my`eS;r(v-8I5ufy9Jbxp=`GvlI0vqaw(A7*%l~^tL{hOT~ch&yzPul5WU$GP~ z1V$s7C~nvamUhAO=L4L7PDWz#95AmaG2swyIiZ8{E~o?|(H2uq2@Id%>KCk%b1_TE zCR$<06xZ-Jt_;D0axSI`nM4$ZNU;eo;5sMRa=c+Hx1l+zLITR2g zYkNp|B4X8(^~Gsv;Jy?1vB!XKBOUn#`#+M~D(Ef@>>f8n5+eB+l(rM>%#Y8z3j75b znMC|K6R|6;@Vh#)xqZi@Bk}|h7|GHuzvArGH=C1o>^vfWOGKVzW|v)Iah0^$a>rDX z#HjQFc3N)~&)}<*#A(Y1k>{42RPB7?hiSld4zqV53P>kw=ZOWiMr#jLNqRB)ibn9i zUL&ra?6x;WyakBq$5c-cnSQNqnge7BFDpKcY`l1ja$qPp*lk^ulx;HW7;gx@CMLuU z9Cg{&=l%;j`-&=DG^ZH%9(I?3mV_LVjeT*ISM+C5sw(c>KyyMC$-@3wl~eR<(MVPH za|2BY86;Et%-%pfYjUo=HMxcK-M=+RYd~9a@-E+OJ$}9qSQx{6pCNwt?+9{=9U)fx zwAv#F6eT7__%21^HLCy#CMI?yDrhmB{6t;h=HHNGFGUSViBDbw&l=zZ9spWzF_NE& zu@_tQ)t4hThxl|{`{mlyH5ZXYghY^X14VosETwEPK4!JIgnquUTfQyao}L2 zi%_C`F3N%U%H~?ZwsQxnB3uL$Wpj-k9=|etGZxUhP@DZ8WA>Gp;nixQG4NE}s|j3m z7H|EWnJuazjlXjPpuNWQeI$Cgx2mZPBtyLn!T$@nL(N>@Q!Rsvu{P{?w`$p&6Dg-b zqF%G^xaOm-g%p=eiQzk5jZbF+??&jKr-v{4FOs#yEs09Dj%79e@e{I4ll+XMX6D&S zR*o-fT;iK#(@gec95pr1R6;rC)L6wom$f&^e|pr!{Hc<$V|vZe`1q%NFJfkuFM;qS zp@7XxOink^4BIM)7!ZU`a>ZN%A^9XyIQZ%>Lt1N1=^{!)-Q2Hson7rRh|HWdL&E>@Fs(p)h_$Kxgw0;Jc)=gxtM+bvf!P}>1flBHDEJ`%2$Vb ziPzOK9l&ylz-pJY7uPX$7m8_KcRs6AJ#qa5&52og7S7LTPIq3bM|!gN4>Toa+!Q=^wln_XY;=9dl!C- zn^*Y#ZWYVZ&l^tPtJ`-YT!@=j=)IhZ+3BnX)V-{Ir^0V>@e01*rE==@!v=$U(fjzr zg}8VH-b>Go6uq2JL}vgwP4t#8!{s+bBIr!7sSs6 z0`XpQb*2_sDi&w*-a0(2JbXERQnt>-B16UWOvYQ|hYt_y#e2!t8CyJ2Ie8}S?f!@1 zhuPvMch#M;NUxx%&50pb`m-SRF+i)h9zQiRtR>mm2h95W+~pe@qyKlrv6A|ZYYP70 zi29c!Y6e;y)V|aiDDv**PaX;Z;HUL3C)8ePakTfP&O-g~ULN*P2n5&ED^8zz(dhD| z>15i&Jx7k+vXECroy}>qdh-0F{lomj$KowyRgKT4Hy(Ww|8{7owve{}N%%>&hm(iu zZdgdG8lFuWum1eL<8Hwyy#_BQne9*hbx|s(L2iHjA!g9a>jhABr}OKyYoB)zTk&`I zp1B)R#dqjmH(p5h(Wq)y>jaN_U4M~QoY)gB7>_z}^(P2YIzT~PK&Do(9bPT)KQcWR z-TXdN)%+Z(**2@h!sEGi*Q=o}Il26BC(FnA;jWf4FT%YnAHN6>w3H!-<18PO!(%LE z3c_z$J}wAPv6Lwce`NW%uyRJrggwOm@Qz2ucK;-}8Zp>+tR5CV&&Fu%?}#IfqECMh z|AzkbJ$#+*X%-xp?I#x8X4})OI8obAthjBqr`O?RZ9lEUDcPQ8!>QYTV#DpTJ-r@x z*!I(U+)3NhAe@EmC(up_6UK=hKY>I)4>~18PV6+X!S?>kgO{(d%kesiPQM7MJ>sTc zopu=cv|_aYJzRUQ;*U_nlzuhO!qoXGB;CYR}Z7p-o2eMj`1kdtE+@FdR14V1R=^wUJg93QP&lB*dM7%h*E`kC`~%> z)SzxE?y^5s^*G7|lBYyFXG-MdZ8swEinkl(^6qXo%H!2+H+sfcX4)SJ=ir-*?4 zmFms0yCf`;RX*ewNla3;eO&dn*h3OY$b-I_qX|7PUmi|dbXB!)TOTSOwNI>4i9RpN zRB1j_`>r)z(a8Q@)&A&cQ7Ki^v$c(_PZW*qpHvw~XNlUYHlM9+YRynY*^{aqqU%J{ zR87y-Hn(OeTGC*Le zEvYlxRM6qQ)CPa?+wlEk@uy1#>K1>ez)l4n-AjGruX!78FeZ4WbbE9^ub%d;z@zi6 z-xMe9zgBm|mPlUK779AL&^o91&3>YKAohbKpnWUo=y%)GJ8-6UYXrlSjO{yqf?E>` zW0*;xG+4xJ&sHbB+A9fyipq9cX8tel=b*-SM3y?e5#Ign=>c^9r+%RT`Zm*tZl%xa zTJmQYd(aJ4x`1WAUz~jdd-BGj@BkONU=< z4w4)B-yzq3FZl)Y!j1(~#!pan(oTNI>Y@3C#ApHI+|~fa2RXR$p14wpx5E3sMHrwr zfA%l)%@!{^OC!|H6fU+s`{erkKPTth8LRXt^`jB$XTGVSb^7E!`E%aMxi@y;Vrc}8 z@W$;!%!l|^>#M7W^XK9&X?U60dGA;~FqC&_omxFS9Gid&Z8D79P<Vsh(Qk0jQ=pV0OFYZTZY=cT{GK}3&tzR*dGR8B8(A@^zx``II z0dEj?YCA=7GaY&r8@C^EXq#Kv<|(;<7)W1{n)TPeF|NQK7I{5 zXMhOW4%OLgxP?uI}h4BYcHfhtN?qXjZNAyTT$M|V8oE-CEHv%_j$dhXRX^52_dAxb`Ff|)9YlW$+ zd81DsNW)H_K+MZPg9K>WTpU}%`Qs>?4Cp)$up3SyK)axq1!%fl99zS0#8FNdxRnOz zvl$rbaaC^(AB&@e7|{1Wz-~W@;NJy}5HLJ&I61FE?Z`h2@);Gmhkt_Nz|f8y;xY-y zogLk;GOByzZ&8S|^2w?~ zU@`9W=x}Bg#n+x*PC|4ol)TyL922frO?hHZZzW*|Q3xOTWTa4x1h;eSj#3Lo#y<^f zOf$YjsVBgaj|#^;;&y%)4t%D3zDV~e#A;b1QenwP!ZDAzog2gTzEE~~(h~}?hSrG3 zuw-N5n5W#%P2s>7${9~Oxe#k(jUd93QNl5q+|JG6di|6qo>%K`wYvP%P!>OJd3E*R z=MEm>PM;39pQES+(zldhucHw)@Z=t0@ECXcOgM9n;u}cUDZ}1JBi_Q3KMR8=xYNId z+s`xn`)qNKk_UuiCb^ww!-09q=Rmqo8MYLS=!Pc`6UeSt`){oYeC)68z4a5!FpFN6 zh%e*L7hOE5XmgR=a^QA(^In;sVASJs?KQRUicl7aVK9%r4o-Osur-b@@CwW0fZ$Mq zu7?WTTvwlBA9IC2Rd~!5@^mA90;| zihabj`w`KOYu6*J9oLRW3U*vu9}Q!;#2*ncT#!dt442>|1q|1wN5i&UypM>sT%3=v zwp^e`3btIVkA~4)^ACw=u9=5eG}pvK1vFQ|J**hl^Lq+nT%>!$qFh<`h@xCi?rEjJ ze4LoKIOt5IKdZ%BIU(T6&;*EKEswqvPFWeamq>4{#kx2l_9{b@A%-*_eP>*rGC)ss zD;;yNH>K6{aGb{Rs{osB(|PK!SDX+Bl%Z)5+8g=tSjvLT$5_g|Ok*r%PNpK3GAr{e zmhw&JK`doP=4LEqS|%)(G9_~{mNF^h5KEbm!7haM!fh8qd*DV3p&#Ko3!zN7!a^tm zzGWel4&S&C+6`w}2q3v+ueCT_4>U?M$JZ?U;6`nc} z`WhBD5c&!hJP=w9!w!U2!E6VVLzTlc&I`PMGdWatFv()X28n~`o|{h_*ujyRcNWGA)2>32RCL0ux8TN zH?OmVKb}};0Z*7%XAZwIu?_|InOJ8AKRvO|6por$X972vSZ54JOsqQvmzh|15-u{a zP7fyYaos+c$j5cMuniyA>A$G8?nCrA)t;}_ruvg4=NEn&9P6PIsxlTQsIa@o= z#UQi9aCef+qq*OcgQK~d$vV;8Z^^-jxNkkvImCVa8O!28=2^J`cf_+`gMNDh-ur6T z_CODb(H`*P-{MZ40Z7m2R`0P2=Mjl}pvS~$k9hI#aPQ6lBhTpm@3A`P5%=~$Pl)v~ zm3S~}5BES%iP4_$ax~#?oCQuir)R#$qRu0-^2^V2zt0ao&)uA_bDsNcKFfJ-T7J1p zzmMbKSA~3#o#n=SkgetLbC8W?!*h_8CGj~(%e>(R2x(5d0a7=Qy8%L&V{d@=m>b;y zshBI=04bSoya7@$AHEKPnKxVq$(s|egJjL)u7hOEvDZO6&5f>uwwo(l2W>OocpbFf zOaTj8XSNXwVlf-O09r6@xB!|nm7H~g)tOh(6`0uDZU`D8*%=B}0ZWVGrJ{kvD!LI9 z`_v85j7UBQ1@8e%i{aCvfwn3-mWeHNLv$jNFF?Tvurw4e6$3!3>2XYKjT@pDk?dh* z8w%Qi(g_7gp|*sAwxU=kJOm_$N(}))P{AP}VU%qMND!qH0^qCpcT zI%v?C$rdzd#DoP68Zzm&0ev+ow*mE=q}qT!n*`f{`b=zXKs_cpHXx?S78?-VgvAEb zWzua8`e0IS4Qe+@wFb4Blxu*BPNiyq3Qh%UfL@%k)c`#|rK2&Ss8Q9JJPQQ}f~B?b zX?KCPdiro5_L~P{K`r?^6dVke*1=1q0+0s!#!pzTiwJi0WHxbdXw2Biv4hW&LE_-c z;B;L)GZpY{pzD0XZo7!ste(ti-F*!7!MOYwsNFdA7^uxS_!y|g*!CEx$ynzY=$-MF zW1u(2EXP3g#@$Cjb;jjKK~&>zSy0x=a#>Ku$y8a;las-+p!AcrvY>}2b!0(lC%4Fg z?ww?j1>HT_y$f{bWce;o(#h0apv05GyFi4Kw!1*_Cv|p#Zk*h*%Va5GvITQ}Ox*(J z>Q3$EhKk)oLzpyv+{)*XZ=xVeDxigenp$CPCxn@fSr!_~WO)wlv5GXjUDbjv|VO)YRhSM4&r6w5rOAtnI z+Pg^RI76ibaTHE_8<{rF*j|E=-%UFlnf8|P;2A<@H|v*5#L;omg_ZvAdrh7BwP z{uba`KfQJW`^gXCsR=ETpjq>A<5z-Y0NOeSNTj6lL@F_FxdjEh7BRG!8jnf8+5Ktzbh(?F5-FB!%} zgn&G4eWcV&#(pAVqdbi*l3C7BAtHF>Y3m}>${E{&Sm)_s3)uZZh%{~JTS>#ueAVN4k8gnPJpI7});I`}p$%=8H0 zQr55}LN8~uHqt(tVVs6&l%c6aNJTUDry&|-Xvz`HD27TJqE?2c6p8F)ou zb&fP-;Zh`2lwJAg9I4O3rQnuEw#!q&OJ^%@S&-69T^q5W!gVm=_VQ z8)`sR$TGUp(4O=LHJ~ii9#sjmC%r}uD&|HT4{Q@mGN_a|NeVX}*di!qP`Pk|6kt3c zA(*6J*?EHGX*>WG6w|LPJwZBeJRmBVbg(kz1j*5OKtxdNU}fM5lC|-GkYLh*N}CfT z6XQVv62xc#Bw)!}X{bZmWHi7g(9Ti`(;;yhbx8!XB93gu$ZmHD4WB8apZv9wHq->2gD$XnW<(LnQr^F4qJ!wpX$rB59v= zi4`c4s_Zo=*h^tv7#X)j&I#3*R;FDT`Dlrp7OF3)w7)RYXo(ybsxPjTx-e2Kz^7IfbMM99O7h zJ32CV5?Ly699{_=38;tl(p_r9t|bzc_??_6w4)-oIgcn~&hB)n3nL{G_we^RQ96%` zBy&b8W88PT(8As%64m*goGF7xMN&Aa${4?$F7;s}i9{{_UT4a}(UAl*WRD=9Z>6%$ z$Q3grU69YGa?E`@k{PTDou$iI(N zy+m?=n+{ZlmY{NksS|e3ZRDFHpZk%F;ig|I_m!a1g{k9q@iy{r`@d;z!t)4obZF)B&;$e6n$jgf)6BzPppuE#{aIKtD6G$3ngU3ovFRFyD-xP-*SNz0Fh z9ZDg-=0E65kv*Q!!&!vHBuLATg`G(u*6~;QQq+$p^l{E2F{#qDz913ayf^#@{V0cz zCw%7A(8N5JmY)cFl0s}cC4W4!=?n?6yZKS&+h?dmA?|l}))w+d`u!HOtjt4^JI;`{ z?G}1a>Gup3Bb4#RPSGO!fVrzcMzkI2M5b!Q!-J%^GD4!2>S-t^fs81-YbP=lBX%4l zQDlTfD!<)F*$8Ar+PR&`+#T`o04Y~SNT{;#KFU-eBf{>)iOgLQI}Q}w@77P7u9FE= ziaaHlYaMg5$K7q+(O6n=pMXno$n0LFP z)Wc2E75W9d?of!QP+nZ`3^DDyqBO!i(-npUCht(nPoaFc_>W_lyP}Zc@#zfb=hG&; z1C1lQ8X3~hrw{B7G>Ytc$LM=Dt-3q#Xk^!0M*6eq9lHY!BD>x&oS#h#?hf2f?J{z` zQE8n^J|r~tiXmj)u{Bc6!7WJMHMa5tiL53xRmEsVbwDD;?A_4vu9212B=RnysS3s| zRL7=BF*`SHc~^Yp2O=3FG(};UqB=Qea)rAl7#TMH?;_!TBbxBi-pUun)7M)hOPN|` zs1)T(f?he4VG~p-*E=)JtZWoId?G_3NN_TT!eWXl=i*0U*yN&Y!v!-HE(`XJJ?4oq zr*NG@VYua@FyX413V6XuB4vvyszTP4qw+&486q$h$}lzR;EWK{b(55FWvjGKC36Z) z1v7+Or`O~K(kLOR@M%F){MnxWfK5Z0P z|7_YI^7yl9oyhvUX+-4lylI8V`rK)m$m6-wTO#X8(;|__Nz)r6>xt7F`e)Y{py|GX zDQ16wd2+q&Et-vm$WiXdGZ>}aE|0?BJR{x~a(c&@qpMnJSZr0kBb5J+;$wk2&OLSp zqrIEwaX9F?!aboqjg%`Es1w}EXEFM_d7gytc&_kJD8Dh{dlaMW{&cL&f>^}&Fh*JG zG@s1ELM3|f{HJix3}eVHtnzj@U`uuHy*3s_9;F8Y9t{Oz^Ge(Nn z0N+1qU4En9?vAm7Wm3a^3y5%G_Ff9bDj|Ve%md>9%d8DgEFdZi<9aD?trBi?TY6wz zV3~E{1qH;t!t8w%hE+l`cajI@A}o^@-d6Bx=#+t0_{Zg-rZPTKPS^~VZDmzJc6aMx z{2=T#bItCsDkyOG=;^Jgi%*hUA|S1N$)ngFdJPbdm*X>&_=Vu!(^Er@C&fYMkgMsJtEgf%p02f_bX}rt= zjOO0jKHPkVLaLb1MIg+zwSiZv5OZK}?I)c02ZaY>w=M!VU0dIt|65e`&)9@@IS-nx zUW$t%@3~ElxiitNAPJqk3AO-ab1LCl#&EQiM%n?KsCtj7V zAPt=i15&Xl0q_k9<`g1|3vWVKkb`=K14JxJ$aQ%hVLAVmd-+=ovaqa+>#p2DD zeTVT=cR7w9`ApOj?@gB~b5^i)6nYG-_iRlCOEqG`)oYFL;$IZ@i{E+-2za*M2irGd zqSb3p;5EM}7>El!0k(U#K3IXrzgLCVqN$5&JvgK>=n*ciq1=JNYw)33UT&7xisfzb%LxC+!0=_fk>;RsD=Du_g1rL>y_A6E6C1=-Xl9b}kf~P};DBl3 zjwyv6iTn;A;S$P&&tni%ugO!0KQN{6Sz^xz;PEAtCtuzmrbUyd2=6$h@KqxJgV^%= zwPFygE>oWJT7i`n1JSDe%@61F#q;jVm^-So3eJ*~13+p3>KWgSNz6N~sP}l2S%q_w zUS9!X04ksF-6W=6E2qGIPXZ8fIfwwW%?@32Ayv|g^zP9$fkUpIue?Ic=8xfz`Jb55-)63B^gDNlX8RTc$Z|I6ebDjG%e|kA~lPi$#QEJb<9$17R6)*`QKeCfmKt?n5%^EGO&nFYm%{;XT}%2U^=V%nc6$6x|=ecRL`2) zJE_{4G97s>nr$6Q9C+-=XR-fU~nOR4K_=P9hSZRg3Y>weGkw9fWD z&;7dYHlEvcwrxDu>$+QcBI;~gc|z;DTX_8HY+HET>$;nH&eqvB^VrpOH}P21**5W< zteg8hn>AcOSx#j~HTs{g;CWWkV^i~(yBEfGsodT7GyGcL{-NOgrM~?`!Ap&Yo0VG4 zxcB^S5ok5z)>9oXY4-cY3MJ}XKy^SiaXA4(ryi^nG(0fxRtO^QqFSjVY!L0>5jJ6M z+YvTFZJiM|e(fzIY@4)MM%ehYyNB6$wabUuxV2M<**LX>huJu^ZHL)F+B(B*Y}#9f z*;ut%hS|Prbq}%4Yq8MSx{%#dTSfZR^W%(|e0AN1S)J4@Z=Dst?B%Z(ARZFz+os9Kqf(iwDEL zHWaZHYY>ar3N_-2*vJ~#BDQ=DqawBhHEbf=Ej6P=wwr1SiEP)^HYT#gsSPKv#i%tT zutlj66WAiu;u6@ds9_V>aB4;gY+-5&32dQi8xz=q)rJXdfocr|Hh;CxT9=n|HH)VB zCz2eCUrCbLch^mM8C0t5(-fK^7!Czn-P>~t>cPBBz2>kcu4T-NPk zh+I}FF_~P}ZDJz1tXsu4vh@TsCKPebmC4nGM=5$%2wa!0h`Nl3D2`IC?&QR0)3+oGvY zLJ~#eo`fWb20sZQh+?0F+!D2Y5)v~HN^G}&qPhcKGhIa9#PXY9Ktd}Vp&10Rkp>2H9lZ#VEtc^Cbg)e;q zz|3P!wV_RX(i4j#NvxqZ^c`RNIIwlTacRr-@_yBR+GTAy>tfI(o&~N=VC^)aD89ZX zz-|sslNj~RBVABM4pC;6ZukRf&GLBW(gc|ZW zzXRTkW0f?aNBR2R0;F-QtR@r*?u!9POzal~R2}Rb16*Tb83-r>+!qbFF|qFv&^=)1 zXy61B`xXIJ0ry1#ii>+B5Ktwsa}=csG9QE)yN-N$7tcDLg`_)h%F*C4jeOte$f^I6``@? z(n9DCwg?ds8VF|}LN~TW2#CfOPAURA2Z_%$1_r=lML_8w zF-v3M3pl9|NEswP)EM{#7Aph-2Z?ty2719s1AOKspRIO#T!(o1}(wv-s<-BojO1{Wm)PQ65enu{Y?BM~^*ON>=>u?H6= z06Tk$cr_OcSR(;o?}e!&tGeu~3oH{{ z>h1kncmBqM1s*NEeYGHH+dnt<8s)EWhuw8Ld3V5FT|uRta- zxbq1ZVW-wG$fPZ9cLIjPsWlifiN<{;V5S^f{UMV!xGVyu&#~19GHH!FPjG7q=jf*w z`1xc?q&&srzt9u?d>%-oJjQc;q5JyzBuk_`!s9>F&Ha3CN~Ao%b9|<&`T0aiq}<2j zKhYt6K4B6msd$c0^ci0te~FY7Jif2(mk!Z#3BJdQLVkbu&8Xu83_gAxxcqS# zs)jrWK7I|*`#8*_hTI1}9tW^}9G*iUwZO+?0f0F?fIzB)kH-L)nZq3jT512a|@*dSDFz!T*xU#%G&ZL%j1=6el zY<5Duyctze8#MTJ&Ye%Y0~ned&eNPb%cuPwxG*_< zLvzlKPrDTuni%%boU`E5ZU!z)44=}RJISZr2n>x6D{Iaj=F@%)To@nb*PPqOr`-Sy zjSY_>7t}r2BnHcXuioTY@ZK(Zr$&b#USkN$6dx=C&U=&l!FxZ@Q#3l<@fw3zKz#58 z@YRdl4&K{Nx6$Z0i`V#yO%or?1I~Mqso=eBB#y)tdMl$I$!m|q+JVvqFFdLtt_Wb> ziTr`bJBjY3+;I+PS%*!BxT*jFPUJcs??n1e^lhH5>+~Z^{`$C_SHrxJ(k;N0{j?uXR}7u1xB{b#eU=6CiW%0QsPQ_n z&ZqP{qMeWF0ivCc=+>g059pero%iYDqMfPqc}Qmp{R5;knO*|vyiLCY=}e#pKss;H zts$K^>6(zv>vVBQXB>TAq%&rw^G6@)GXGwL+aIA%)yL-Y>H!@@U9MhTWHK;2bqTPej-Z8iBdReu zZvN5Cb5JyMTC}kDdo9O3r4}QDAbtU(lLTQEn3_p*Qf1$+6X=l%oN0~Lh zUo1BWUYrguwN-Yfv@li;UuQ~#dZu#b7UH|gMrX|ezt;?*XXfR~mWck*ldA7qPNUNT ze!mbw+xth4s8+T7b}=omctEmZw_yk49|MVBhGda_&wXrG2VSDTi0Nn9xxJVlyPKWk z77*Qk&I--^Cfd?MF0GnS&CVH__x|m8_umuW%Nh#HGbiz5MD{o7?CQ1hhYIg0>xz&p z3(^k!9yqr!KRrJ?GCReb8d}_o5ZhI@cma7iJ}WoZ#5BLA#?-t`R?gDVF(cC@{!fWt zrWbRf>Z_K?7wP{ytMnYST!G1qi%Yf1 zDqYU6&Z*~y>heBz?Lj4Gh<$0~yR0IwO<&*7H0v#uoXcbYJyS`2@87JE%}IxUKEqi| zpw^P%EZ%%4@6@kJ&429ihqHaUH1%9l1X&bkfnGTO3xr$bKMbo+1%9LqQ?m<8Q_r8$ zI1R5Jmz*_u)*k_t^rGM&XYo?Nk6GCok~;O4)D>cgNx@>>v3&UdGCPF*dV0BwJ%;e* zZPModFz=ZR_@xcBj7_RZ<@`m0^Q*V{O)B)YkGi;v@^8xFR;*sunA}YUbh#~c>3?5x z{IJgVOFi7*JpZclJL#SOxVA3Z`|`Q@?2nHQE#s3+zh^DW;XtcNP!4^~&Xx9lW#`K2 z;#MvMa#1W7Vs#lQ6PiCqDifMH7qoP=ta{F(BByfJ;#p4RH;V^3l`|GMb1J7T!g4C7 zEH36$PFgtRR8CkJ=Twec>|e}V3m+qeZ}e>&wL+#uR(X(oV4LeJ=>;-K;mdv7hOOrA zM3#DxykVoX$`1vPHH1C-wheaFx0C);HvZ1T{up4d(fOR66S#hS>&!Ax@b7)jzuWly z>pth-ZG8UsKIh-gf&abF`FC^R-|u$*r8L#L+rC^-Tuca@^KY#=!8to;sP*%%=U=u$ zFSG0U$5!aY?}Hf4a-Rue{R&T>2j!I+O$xtSptPbl&T?adSO?(#^Prrvh6!Qv0;L(< z@QvF%h_xSXH4nN|8uv~Z`;Br8P5jKQ8W8dbUONs-EZz7{*!COc2papDdv`#{LwM#m zD84lAt+3Izz~pBuq^zP#1MEMor2I@1&m^l&$~xPQoHxXUnzx3t>( z@+A$cVGwtz(Mw^OY07$ZTsQY7f7Vbqau_sIyz#DZ_Za1p4Yr>9t8a)VJhKlpQ5=^d z+%QJ*wNa?&?(q#l!lU{?BgGq2gv-YQ{nJCL`h+R$;ES4I<4FsmtNY! z1qMK&B??60^^^O`ZMIZ%GktWS@GvF_TRa>sTs*RGuT6Iqce{_S7~Fvgaw*0}3ulh( zQ?*I0;(q6&D+=Gw1f4D(juK8D*{5t{Tg6TF(S^XbGeHi;*eGG+ult@_>pbU9_j=g~ zyZIh;vPj{q@cys+9$T|K=f3Op@*V8rdyruf@rh|_P$K0cQ)hdgjb~ZExxPAhGzn03{J5PguT`pZ% zjIepr)HT*AmX4nD{)_GPx0lpDl_ZbZ#Q4I~iwSz2RLD@zl__`1CAEs8WV&_CFihHq zz*bD1`Oq^x=2=@ip?0Mp8L*1Mz|s{6dI?lWZO?{Y_jMj> z!3D`>RxxN;I-CFysBP4qPd)CV?rJ^-T?LUVXm{b@q4w2vzJBAWw8-bbd6vsETlmNlh7Xq)zAB`?o2Sb_J4F zV!Z1dd9A$^u18b3&jZ`c%Um+ilTGmUF9hoe>Z5Ot$LDh&27Wl<{BAtg*{@BhWFWz2 z&r$g7L&Dd$RM*drdE-Ae9}V2J*>e~^`+#u%Efx0J@y2+rzF*t6l7TpzJqGaEG{V<6 z)WJ`V9^<)t{n|uJ2BK{C9L$-$?-F3`RSFYB69RIm`7Mqu{kfk#+r|oAe678TVPZCf z`E06Ji{p#_+&0g){z4Z|Yp+6>nDtb9-_O0kUsB~CI~Z9d53z7=BVG7d(Y!T zJ^kAJXhoo}Z5c8LEztr$-cP_!Q1ia^oR}Yd5a?@EhD=9Gyn`SAOu&y*mB00DnjZ}d z^wlZr8Y@^q;<>!1R#gf7HaW}a^S*vbLjyvHj-qN+If^lJBi-66$O9Owe1&Wc5#$4# zloRmS#{}s{D%+QynekB{KVS6{WU@`lF?j4Fg7iD;<9aZQSiKNIKazT*vgaiuaA{}uvK{>|D_6EF7lRZ5GBVUU zr5wg*OX$m?GFy5+_m2*H`c4)i{jBd6qpZ-)&4h;&j*bE5D0FiZ!EwUTB*5Gl-P}mn zG2wV1z}yJk{EpBw?x-4IeiYsOmhf=Aq*Sb<-CQ4i>kYwV+;LNYxgPpf13_@yku|_t zJ8yqsW*m(B0rlIbTx4Np3@kE@`utSF;g{A$T$!+kDJbZG@r;s!Ye%KGur4GUQB7 z*pX0bM0f5MawZ3i8bUqWohw4l1i_AkP!D(KZX{>2!KlGh+3ws8!^e89ix`axyzMxyoT=`o&B(QMs1m*cc8T?c4;X78#leOaQ{^I~xHl=u8Yrju6z!P?c4pwZTI(5oQU+SQ&=)Y`Mt zs{n89x_Hv2wTIoSAjsMkO(<&V>Gdq|wsy59XteZHc@|u-cC{oFHTOL7EO54VH795^ z_uxEB&?E81{!(!CE9%t9sAsXi7#v+i^&S~DDfZ`wqbsOW!=tLj{@idhh3Y*#Dp>5# z21l3A;)fQ$BDt(~ubJV+DNlHY=S;fa48d;(?>FP^H{Dp8Z${d0UbqI2 zo#BB=zU<*A`?C3#JLjoMLB2!q3-dj9%8Ew_Q}d(w=*|ghQlM`={K8C+cv&%> zFf}{sh3@=HO$zYMfnS*Dxl>yFX6n+94wW@BC$i6Tq!^acvREMd*a+A6GkjMfo?~XL zQ1$Qa>*+B(!Jzofs=KY7xRsSbC6Np_xp^^G{(J5{4bX)k_FBzP~1>v;bp%J~n*y{d8YVWDdS>Vz^l1 z&xk|k_*ZC<%OwRZHsj96KhMg|H)+nVS?rd#%bv4z{8QqBR#)$-(^}y_!~cAOdATX` z(%kt!2aSJ!`gk@by{l||cJU`--wvU_%^xWX`aA`nUwF7k{Nt(SU(Y%JVUD~sS>`QW zJSDq|7r%NDt$gPSavPfFbT6YUC)PaB8?S-)G1l#*a~fXftFynhJ{K>_w{P(p_;$SO zTxJfbwCbB^{~s1hGvuGf%H-pWHLKf?jB4#KXEj>+JN->j9%x^V$3@1+&VB3Co}U)g zF0G2q8DXBA``vOO`~3G4>t6EuPf6e}1*x&Cs{l!vgovwe7=D$UJIe(4zxM;cA zd;fptm1iYh>zQzRCl^OhN{ryFGmh(P;8fnzhNdNIhzq0wmMR_i5 z-(NnWc9orzVxtv?HwzmevD4Ew^5FKjRCiTAu|UrpHjRir-x?ANiy zf!1mR{{M1k)l%bXjoi{I&aU8`?ku%2mdy|PFO*fR@D_^1f>+3Jsumkpf2)4~Wh(SX z*|rb8p!9Jq$8G;Oq;>^{R(J)A@8zr3=Kn=lYjWa$Ce)#_^g&m@^X)OS#d&qd-7G?Q zTLm(lniA4JR|BPd z+)warK*K#=(9GQJj}UMD+>tS;k%8?@hQ`M6LMnPm+JI;k_Jp{amhP z7Jp>;mpdogIDh8&#|x-W$32%?hPmY!{y~eMZ~gDsoBfvZ;syM_dqpcV?QXN|yZAwl zoPkLcF-UShf#1L(k3UND2Xjr%-%~O&$u$u_!GPg^`RM=M%lel>{Z4mRX7u-(Zk>5{ zq*XB1`X?}%vF=$$!Ix9|Uuk`!|FAkQ`+ELmtgB`C7tCPFks1PoC@qwb zP!m$;Mep$*6+B1pd!IjkzxQ$Yd}q(vL)dGtwO8BY zfO|#!#|wEdgXB)P%wcPoqZGD~4o5I%RB~J|qti1We#@JiBR7(Q?laa^aHd{Hdu5RH z=$0{TjU%YYb)QT7pG?%GT<_kER(wRd_11opSuedEeffg*-6*rw)=O_icb>O)i85Pmz4T`E z<@46Zqs$grFTEb!`MdSDD6=`%ORq&={@r?El-X44rGG?sBCH1^&4ibi`bA$xSXW1y z4P9R96m?n2y85PB#`&cVQJu@J({GwRJHON}O0M?xRD64!j8CzVg=fn|B zKeA8j`N#uSH@<7ClSLKW!XXkt%$e9^m==mE&xDNn@WI-XBP{=DNgb0OXKq2W+$V(`M0EMJMAuphi=TJy0 z6!HuTNr6J1LLtdeND>s12!%X>LK2{mn}(1ZhLG!q5Pw6+HABc%L&zV75Ue4@&k%w! zg!mdld<-Gph7d19h^HY0Z3sabLOcv1?uH*(;}f73ij)aOvO z#G))BF^iaxMGVg(`ezZ{vWOS67F&7wh=4{Qv=M+e0Ph622%gubT0UsGgXQvfn0luUt>DY9e= zk{42%=E=T20h+Xy{fg5bcW%lww2ufr8<3q10JDJ5Spa+%AUg{HWPnf^04@W_$^gJj zAao`Gp9#p$1b`Vp=nMcp1CX5o0Mmg`3qhp}3f?u0PyUydEltqRqp zz;!9Ix)eZ%5~@Ri>riBMD1bI4RGR|VrpRhjfE|?39TfNuitG*wphXGQqQJE%vRV{i zJ0&!=3Z7aen_2}ts|tNq1%Fm0`>YB`sR~W0f~Qo;rc?n>t3sbv!Jk&iKCJ?ht3s2j z;K^08$yGp7RcKNbJgG`HsR~G}3QeqnCsxTORsm0{LZ4K%kB4kc(f;(i{`5e9`c;3r zmp|RbpKj|Dz+o8-wYqg1vmH&CUG#7~UNWFBrqSh2dSt z@czK?d@(#v49^|IyMp04V0bnd-X#q0cMQ)0!#j)Nox<>rVR%O{yaO2CUJP$1hG&T3 z>0x*~Fubi8-ewGM1BSO2!+Yt@d+yGA>dt%O&U@(2i*x5ix%2M3^X|Aqkx#x0(Eq&j z_;Kt8zcaZ5urfXGBf_nxs;sBt)=^c~QE_XjDr>2@HB^-~RNQK+%4#ZZ6;)*w6}OVA zvXY8ZrK+e>aVk_56)H}dsv-yAWC0af05=~{nGfLR0V?wV++09qE`XZ@sLTOyvjLUa z0B#nbG7G@T04g#7ZYH2I6Tr;?RAvCU>43^~05=U#nFipd0xDAh+!R1%3V{0!P#LPi z4OXcPR^bM!R0gVW+$t4r6^>J-!l`08{6O`s$f=WCw=Xt+LhopvT6Q*fVqhTTAq*C7Ra~A+8Ui_j>)Yarps~i5YI08ATpbaM01h4TqK`^%;q4`9HcG>$?r#I_ao8$ zNZo!UpN-6BBhhT6E*r`3LuU6O(S1nWzEUfRQz^hH0h~$zPBGwA3~=57PVWFt5#Uq= z7uh;EGmRSQTN>%gjr94AbP0{#PopzvbPA1LK%>8)(c@|K5E>mrqdU^*mNfcN8r_IS z-$F}o&x^Gq`q&Z8?T8RNqLLj^WJ_$eC6a83v9`nX!|jtZ&ky%%2%R9^ABugv0}^0J z>enOn>5+Q$NGv^4ryi+YkMvQG#MC1(^hnKmB)T4nrbnvRBh~7Ws`W@zJrYHaRH;WQ z*W)ciE7_tHY*CACQF6AZxwa@7ThugL6ljAI*`W9~s9_rv*9O&RgJRjB+HFuw8&tCm zie`hVwLwvBP?a{QQX5o}4T@}oBH5s_ZBRrT6u}0SYQtXAZ~dBW^_qR=H5>k#z5g{E z`kJltnyvAgz5cZ?@3+akohQ4uDZFb^iH@EYSdu$Ji3h*c{DJk7jI&W^9aR zY=~y4MKjh%GuA~j)aqZumE4CQFX3PqnaqA`eQ1R@%Mh}aNO zH$>C{5q*G&S|FlEh^P)CdJhp*K}2N`(L0FfHAIvT5#>NcuOOnA5Ycmp=qW_>1R{C} z5ye47Q4rC6i0BSP6bupFg5a+3S6TBEt$7Qrd9$r~Q>=M{%e=wMyq?RvkC&lTjUSl2 z7s``s_rx^yODNA0AXSI0@k7?CL)Q2~Yt=z({D8IUfHj_Lt;)5=bF5W4*7$yF)qZO{ z+gg=vjqkHo?X$-BTC4V!XfZUJk*58`ARb*?#J-0?6+=nC<1GnP4I0w4j~Fyc&lIvx zU{Hxr(lNYcGF77%Y1%^!8u60EWh46MU{F_~Bq-hzM@<8e3M``iFg;OlHk#XJkLuSZ zjl@`bQ8e{RL{s(D#-VYPEBJ7pP^Y@zh#L8NFeGj|M9pz|1+7WLV z4QL!e(h`Z4HC_cnJf3AZpm7js`h+;QhCbTOc6CQ-8<3VM+gDpURi)`zD<~1|L+FVm z>_Zn(kG7G3C`;?AwC&akibVTh`nnSKoQtTd+en2`mR4118rBL5MEf9mVllho0_yxW zQd*SdnX0r+)(Y}O``h$&#b=K@*i>1UI9#e)U*hn4RadctMO9LJ+hiO~MD5JG7#X#v9G1@>Wg&1g{^pxmjpp;CsHBd?-ni(i15)T?EJt68F zC?ya#8Yn$pvnTr~fGVf2*Kb~fH1BW-h;zI{l(=LW0jN@XmtM0H(!AXP8Rr;G#JFT_ z0Z_&CB)#UvNb@!a<2c7#M9GybWdKz~x7KT(i!}e(ecgvhZS5mw3)%5dp9E?vi#zqNUqwfSoYnJCBKh-8pRmJ-PlB3ayy#%gle_3;HcL>h`$16a}Nb0I#KNX z>G*=DM4CJA9$-~XpKIV_gw$lQ>*ETdh%{H;3BanHK3CsI8L8RMJ{?zZi%7e|TLiE^ zm)GnghGRPOt@R0Jz`vV{qIr`q+))|r=fAdeBQ#?%N<{7j-qEVU0DAUTk*qcIHJc9; z^;7-$M5dU{hl*ZM{a8fi7@Kb-a-{mvh|Ce+LRNo&3%jHGncye?&E@C* zi)Q%*L{H!&wVSwQ-WR(6sgYAfr$L^#kUCqp@SSqel?CP;hATJ1C%c7|xIeYp?$z=a zU!DG%h(G6L@t5uzIoTuK=D}(5HV}=B3wfvEPD-*xKoBpO)SA)6hp<&bg}^Cid(oT?11Q43>o% z`xy?kkCT|gUs{uN{yb?oSH-(6K@Ixza527YRpW|llnEem7#LOWv&7|e1=@cLePj7@ zlXp;wZ@%N}Uqi>4%>SC%ulTvAx(;^8@4k40;P_BNhjDq>j|kcxJ%Nd85xBG77yh|{ z4<1@k3~LnoQ@#a>S64@reQ0jiPeUKo?bMG4_0Plp!5z<24x4N?N*{Swv-qDILb0D` zXq$!u=WW-w0B6kbp_YYf{HlRX%l+p1&pUXRJmq5fFTK785s_QuNNYh+eje}z!9O1V zU*r44;Rp*;_BYIaeRETcushJ$#L&B~dT|)ebL3runu^ijk4fojMbV9Mfs_f*)AEM` z!D}#P0$@tc47X`(T1|jaWEi_FYz-__tT)vDUv4zi{Q^(poEOY1EbcOQm;h3CfordJ zvX$FU!zdCqZtH%5s|TtCv1ezgI?PHiSP^FGmp{}#PVyW5tNm3sEk;fWJ%g^`(?f1PMLaZ^B5w8r_${|0C&B1Tm3kvFD4FU*pYa zQ1bkvq4A)h4)x;jw}1s;IpUZ~Lhet4keG~}Nc7e7CcO1%;(L(( zBMMH>yRADgK`rzr8UU+xbVacyU3`!7ElBkJI~F*|zUlgDU{iCO%EIodu7U0M3l=XE zY#X}RQCb!@=mY&%V?xi*5xU%rM&4B~Ha1yd( z*KRX$S_5%cS@R5$AM~eD6+M9;lLp&O_Iri4Wja_@c6|%dswr0#bkOJBei~5NG_PEh zPVf7D$J30zYWz%w61to)$?;V>@FhF=F5?;$NCs9E2h!uBDBl7Dm_NBAZblx_<(?l_ z(U}0T(X;-V-$|zYXGtY%zmwI!eldOXYd&nM_Q>Av#j+4@pQhF}QBv296rq%^T89ZB zt!lfnUI%%>?WZA2aBngt`XNUP@%&Y=Nw}!WmYJZ5pFZSm%<{k2vv0L=e^dC!HU|x$ z>w93MoL*-)`UX)Z0CY{EU}>y#`UDX9rUi(9iMemq(Z8pFm^mpu??I#`S2~GPp3G5yp|RY!6>|UgD!G-WFsTjke4aA+lV;+x_zZCgdOKsMz0&_4 zMB{;lmjkY}YXXD@Or(9jkLW+?Y=33o{wuG**S=TLxp9dYJ;-u64t2#P7ZxY6?VnQ-B-u#L}9v|}ed6jD%I{^^m ze(QVJ{4Y;kT{C%y#E&!SY)?AQzY!&HVsf{ZIGGjvvCz6d*DkVOc^!`Z7DQSKz)^b3 zeag>+b^PQ*`b9|&>X*KmOmX*P(*kg}e##?%UTUvAR5~D?!RBP8y4p{+j785nQ^@+E7^6b=dJ+=$DIq^Q{a2 z&ZAGf0GRI* z1*Dsc&d*}MjT4|> zE}?8n(~5_bpNHXG^nYck3FRl{tmM~>$jod9w@RH!fHxkA$T>>icl|sVBEO_mlWEiB zS=J=xLiz!@ZnB8^7da~_jW0jkod1e40aW2p$X8zbV}Bmf34xfSZ2Jp{#iE!-+PlK_ zJnjD`Nw9J;HQEz+=e8cFH5eDjI%0@y)BhWiGxHkj;d!Q-^r-MCN=qqIPjgI48p+FLO#q%a@8%O|4oSNoHH?pA)aKlypLvT5{_ zv;D8j;7@s$u)KeX8$|VwMA6(#_8WaR`2FFf`h6-szd(6Xtr|@ElsNw1Dqgt1+Rv!e zzZI`bEuo|U!eO{@Lw(RKr1n;2J|_oVT0o-41t3QT#=j+^x=730bH5yQ{Hr323={sA zxEOD*@n7KKN2{&_Jn#=I<=@SwSvI^o%HK_J;wAarG?TdH^^*8^L@(l~}Kl$vRwv)n^ z;Yr#1krC_}PWhLSo5&*_8TX%v&#UCkf7_IQ?G~VJ`rnJ_N4$Nu`9g=5Z-n-hefM>$ z`{&j6MQ~s4>|nJ%_#ZTt!hG7V$;B$z>tr9Aj?M#-`OG=s5d}f0QSPB#1u|k4WH2zocU^!k2W44fm zx-b#NxlmA#ecLPwfJN(p2VpTHIqY^m*eNrNhKZ$EDV>iMnT(HBlPPVh93OONELKEB zfIL&Ccz`P{mc#NXXzVZqJ0!vmO7M~a8IUd>rZL4`ea#Y1EFP>b^oI#0XtJo2N)`xA zuwW`eTCWR}h*Yags9{nr7EB-u`<+2p!`L7VTbNYI2SFpcH~<7CFvK1M?0`hZs)5#7 z9tObv%9jdanb`41`!Iqbf9$ZxxlqD%mdb!6ESm`;g;ZLzaC~!k z{K>yj7+9Q57L8I1Mc!DjEdT+s$<57em70EnROdo*I6};6Mu3Bn;OqFoK4+{nfhp~W zNvb2ku4YZLfJf3~ip7yk$pD`z5r=_epty@rDB_{PPP}A@IX*LFar37WOdyKwMo1M9 zQeiCDFE6_VCP+p|8DMcMQ@hkBKxP=R`vO;PB|H)&iuft7Jt1gd`)hMo3`tL$_A zkMc*?(FgySxzjvPQOGs1s()ZMf6>}me_Y?%p}lLpopqj1UTQ{~_-WN<{f{5m1Jzyq zEV^M4`(23CR@DBnTAeu5Sy0ekzc!*s>OCSZrgPy~EO%YkAiuf4>&UU*k&krVhy&T= zYI}GbE&@&%1=I<+x^bb$y@9AkZ!H1FbWL6_opv@}rDk`e+n{JM%T4Gj_rLK8o zFGa-Ssq-%eEVEfAI>sX*R~A2Slh}ybrEjPK$s%K86RDTcx`FKy>QUQ7u-xU=nX)?2soi6p zgzFf$YrNwlX@l9Iw1nZZF~7womz?5C;OmaR)+^26*aqz0m`5aKz=5o-owdlovg)Ke z`uM(%K9rS0H`c_2itFFAz}fl3usKa?FdvwTnK{@4Wk6 z#E1&FBlecOEseT61l)KT^74ZEt47)2N5ILU!N8DPafXqzZ#- z-Vr`{DIcvVF`fQ$>JSjoS8^h~r^QvbQCP9gXs*&d?_6CaKI-Fa$Y3tYkT){qk^AP1 z@Aif!=b6o2H#!ZTE?;!5kGgPA@J3PR*89;{;9;>yug53w%j`cE?YO+evOnMe9$ph$ zz`c`Q57~Z*wbrW{yH}-QR4Mie>$W%)wJj^TqH+s0J>m&0DnfNhqufD=>sP`S#l zSk&6n=(JfaKJnz?&>r%_xk>vQsv}%tm!mWNf}Sqzc3h^mc18EQJ-ZkHL;DEIqvxr` z(gm+Ak2J2?S-MN_6*aJA+pc3P`2nepNyfVtJNQ+HZTQd>^-N<4P4xP_ zuR-$l+?%iF-LiNnJQJ`_O>%vHtpV$Na_FKc8i<}o_(gwDRk-GY~oOA_Cl zjhZnh1E}e}F#Zk7!PiHD@Yf%wNTvy71~hlc5^7e>_l_!4cfWNjJ07ngfHuc3v9Fh1 z=%?{!%Nn}t{ zDbsK4J5E0YP5!tUrcFs@o>)lt+XD14=RTiD=dmu&XPb5jv>IC&3Y^3YV%r}SEyfk-sfFR7*a86UZ+-@~|jlY)cMbO{Pc*TmfaD)%y zMzd)~I(t+|8kpwexn$cu#q*Mo=3zoyH)+7B^>L0ulV-9ZXU%gXxMX|Z$>#Yd8o607 z$%~iwdK)n6qbf)h*ca3AD-4Yot;{&k4K?((`-V(@5k5N(lUPaKxc3UOtDJG>wq^zB zD;;@R&eM_M)rMI95h(=Q&Vh#qz!@bV6X-i6WERKmO5|?e*y()qP{BQl7dCuvX6;$2 zVvNH59ZOvAfUeUccxBWRCIg6J^_%MEVL0^sUcPgtV71yK9RfTn*Odl4a z=QC1UxM3RA%1~ahUyMBPGli-fkNs)>g7n);S>`n5AytZV$)jz)@KHM~}g#^pp7*ke2sw^;Fa`fmii<8?WzLw=9!@$9$ zf!lIJUHea3lqbKscVb73UiQZ54bf02r{Lx#pX zx4KGqKN@+oH%c50(Q&sqE8T3`6MJSvSzU;d#}IB89^c1IeC&LD;rgkSz_iNv1wqp6 zMM2y3MH=dPJ`XsjoF+i7V>Zlc3*nltlYk`)AI|+5jOSVX!AV3 zy2lT8zHHt(kP@_uG3&VQth)JtyZ?GdhNbQb+Lnir^MlJJB#OjT-ha~_r`59w+!>h8 zMG!T4s$dW7xz?$~3z3cTeb?@0xw`xv)Cx2Vs8ebZIACt%CBr;bO~`>!_+Mn&~VSV5GT@mHD!+KZWE|KfhWK zW1gj2lZb&mftm1oQFm7J*ic@Je7TZS*4oGFGZ=2Q^|hSymaQHOo3n!&Bsh`v}ymzlW23Aq$XA4TKA?~8vY}Y;{n_uwK$R{qvss|Spy>p zd3u@qqrIkwyk#~l?bhJt5%M^ICF1yjylWTu4SWY4CfSDRQ^!{h!5rVYfh8YVXrIMN zPb1xlomz`tC@10XVQz$=d~ainS&br{5!PIZ^a7J&z$*-rh#KXS>W*vn|&!^!;-|?t*9a>VJe%et({Z(gW8$vqnG~3sYzH!4eJ$9?yUN6dQ8W^eLmd zFY_H?t(CS#t;+o&UTAqUGkeY zX86#j4Aclsyez@;iy%2J7i7N3PkRE!x6ga#7a92Ksr@Um%gdU8TkOQM499eDHTS|k zlMs3H@WtrZ#VH>o5rm*#Y{l7l2URW%>N|AFC&S45)xH;aCB5mI)!=Ho!Z4u;e1yD2 z%|yc0u`;azTd!(8mA6_yqVe=M*oteqDKnUB?q}?uKO$d>!)M4IE%SKM(X$(gFga>@ zkR9`T&4W6X!BaGBo-idQxy0bzXlI{6{(GS@FyFGK&lVk3? zEW2FGFkN|PSuqJW=iCqEF*($Ib`0N-<7OTl87nP1d7R8s;L!Q1o)W|z7w^jwrhG4$ zJ)Cz>3L1>ZZ`eo=N(;zj7}-T^DcfQ#v@@64>2`IRV(Z1Hk^!Y6+j(KzT8eL}?LE8g zzRbHtt8ojT&8b`XZqaV~uFb1fLlz#sHMTQq%`LgT4lCPcEmWEnKVwdW*yPr_y>So4 zgj|6GnKbGh>s-(|d*3ms+I8;=?1clvJF5xnh6i`J5Fcl{ZR$d$g}Q>*d)yE!R%$ zx-zqba{j}L{lNKd3kO4gt*zd&MXarUveUY^UX$I^z4fN7fA`kg8rnKVipvfJ+;v}e zC?MT^*^$WR72^bxfGHliF4nQ0dei)Sw?30C>fM?qJFRbPhU}ictyyV1tVGd6A1de^p*TB zkiKz!?caGzCQy9`L{ph_{Vr;;G@iGfp2-(={b$7I%Sd^eT z{vus0L1l`knHb^HCl0$cM($AS#bytVFBU$0Jh~WQ^genQ8`xmF-HU5+ZI*>hCBE_y z`J&<#L9J>cYbAf>GnJgW_Sg!8Q^!+pkK{a4q17F}HKNiZUmubBPDQCcHE7(0zB!em zFkqEdGdI=dYE)~wt@0xJo=qp^V%d)#6s#uDEhn|*E)ZF~`X0PV^9T6kH%4IV43dP5na% zI)1Y1ZAW`M9xid27(z=}EM3%K*JFVwcq=wixb_6pWv>S-hD z_SbIHG=@9m=UyR|(i)$vwU{d{N_zvUH@g=gS6wk~5T>187?sftP(-9vK~871U5r*W zEWT`X6`%&VoxE%mU==-&rB;!9aHQfbKQ}M~w|7Y}$!J&FhGj+V5bph)4N_20TckC8 zY=>WqoNnv1wi+SXMH#H;VP}Q*!Lo<)&WXVrac^ee(YNO1_)$_)Q_d6-{EAo0o+{ky41u`2fp~3F9+xN|NI+pk8u&~J+r_YEZ5OXvv3yk#g8ft{Dy-o+OFI?pt zKOG4#GBuo)s@sq*A%;_Qn^zN~WCoA`GboOjtHK6Njc^^by;nngkF|mPIOe$F^rccqv+j&dAhZRnlTOvM=OHXSMLk& zf15ifT>ChN)chQ)Ti^1jh((T<6-ZTDu#sHw9wHUIvqpj)SNd@qJ06g5Nk@Bg zC0ySjzu|K^=&;RERgXQ9bbZlw(^=s%ZxU^(q+G0_8jRG+%X}Br9C&bj(^}S(%h~f- z*W9)n8;!r+=5RFR(eDLnIMvPI@>02*h(Xb9MtMJedN%WDyj|qy@|HyE_Az{46?ROB zIC&>mZ-jhHNQjF6QaUq49^?B2oK916y27*K`8{gPyLh1wzQOhqe7l;*GZTr&aS8Tg zNciar_8NyVojLr6Qj)>d)}xU8BcQsUJQ%6W!)iSGk|?HLnVDIHK6^96a>|(p^#t{h z$_&8~j#BNBN@kveRd|J6YODpV*&w&j@@Tg)uisUerJdr8*4lz)sC`=tnbK;ZWH-tsD{=K?bDFB4(E!l_+{DFDM3dnk()Y5 zu)ENwyQP2-k$|BXC?!9RE+rXez~sg zYR+$~30Q+P=xv|DgUx2?_-LAv#?4Nh45x;&`Ag7<2*&Kh0nAbT;~5rnKQzx>=oEZS zciZ)XsJK}~gU%ET;iMYjenF0tiI1DrZbaXb-J4g16_L&~B0nC<-SN}hxHOR{B(yac-a2 zdzH)HRZf#nm1}9e*OK!@TSNZr%PQgTURvd}CF!Is7r56se3i2vO?~vN^N-4x@vj!d zE~0ys(jP4U=tLfO4)Clngkcj01~Ke?D@oe-Q1b<7QDHv{T0H#nN80A~`bEyJQ#$hp zCn(Qob<2&>Jm+(zRinYdr=Q>HJoS7U_|{+vL9=qybB*bl>T5=`-{H+sRsjXIDMZm~ zG2AAOTf7ktZMh76vPqznuW=5I=}S zY<%{iHm?N_t~YH0w!Xp`-+Su%cR;jh4V-M>!X?ebS`>% zs#W+w$J7WO*TdtUg~;5k-{V!LdjCNVfvA+r)VE#Wv|+x=^|?282PHjpXdE@$Pal{` z|FBVsqd~KB(RIbn?^%$xM)@spFksB@Ns<4U&Z=tZsfydmc#r$?&UfUmX0&l!`{gh1 z%endd7VWOQOZKIGi4Q;AZ{E0T+mm;r`}X$ddn>0b(99?(k0WclSshn-XJ6*;uIzOO zby&rsmgs)?wI6hOvD%K>>v~;+wvFY1qjw)leYVP_b#8Xm)%~r(qa9Lu&y0I>$pPA( zsRDx&D^JeJH_CW_)Y0h!wN9EpyE^64@ue+Uo0t7oXk9yMlrOVwS46?wMXR)+1J;DO zZJ?|QtQ9e{+vvt`RuSN~Vh@Qz2MAxKCFP#6k|Qhvv)ks~{LSwymF8&PjUs-0Tz}wD zcG$sHq;_@`$Jq+ewNgg9R4AXB8Yb~LFNv(h%i^3#PnaN!H&BWP;C#99pH(MJ2+!rM z3c&#L9;`rXnPYhJpUU{D5c#(9I`_nxEjt3@8?F?}v&Q^mA;run(<8Q$ZGAxmIWD6{ zp*-T$YC&Gtcz7@8<5K|Z+3Zit1evW z*Et_vs9wfsdcq#kOlumo;d7lK36XjoM3GpQTy7^vv9b39uZ_~gmf0}s3SPP>j}D?UQ6k%YPqwx4cz>wGG3 zs7)t#1aR4ynQhWNJ`XH^WAg)JrOhlPEQMT(*(uS46P|-NH*WnjtDg!K?-c96R{9=S znm?i8L{oCawE7eLCUfs2P&>+d6ov$UFlBp>G4;u*d9Xs-J|S~3oQRZx^({r{noP}Y z0a=J@(87HLWK{Io37P6~y9&6) zc^g|?Y>8Mia(I4eBTqu1si#oZWkb}^-0O$Ngx0pS%ZJ&r7%lj^fYxoYiPmE-h2v%j z=O`E2%ItE+7wnNlzG?+27eQTo4#@pn_t6F(KIZQ#`iO9oY>_9eQN}_v_i(RAu$(HQ zQwG;!FeD(Pu6r!+80h0IZvU9zpDcR2nQJ^he=k^h{+tLrCn29Y3wC~<$c8wVi`rMf zkp)=7Nt~Cy8|(htPJ{(Z+GLE@ZtYE*!)!=Ks%d8NbxmY(xKP+wmG@Z7lvG`3*Q=c1 zfkO3XYXyAFFt_v#-gI+&{%=C{`gPySRZOPATXM62) zIsf`7;oVV0WF2TAE#>V!%g5_(nvS9iO)N^mS3-pTOKDMme%d9FyasvYMmHmFfc5P4 zO!?3^bFN~>jN%8>D{d2`&$%X+s za?%_@Up8zy*$Xvb0cC_!uYnu^uj51aU{4o9R}-N>1_MPw&e*3Sgf9qV`X;c%lHxbj zW|Fp%NF-GQQrjt^0qz1(HeTS2!18azrKlNsdHb$ZE9krq!|)lj({f<*;9#w{VDZN+ z9@w4=Hi5&4!ZBVmp%4mQ!Ag5RblZqVlvTK+I2S>DUQ%JJ*66j+yI46@KYklq5JY6g zOQo%cExJVgD7W=tUcvzfAMY%%op%7P4}p4ldX9AW1eG=iz{*Oo`g;0JI2=bZBG3(h zIje#LmBan*Zs@_zkHr4!o{xjoDXs4YhiX$bJ5;RTd|ge3gc8SfekMi}ww+QUygA{R z4``hxmAr~ve(~P_xBvM)8=)bv1z2aI*Vq*|PxmaZfl>;BELk;1A^Nk1dT_o=#b|1_ znv8Lni7uGuC8&4<3YZ9A(H;iaCISUrN=7WMrfewCtB_1K^g+?1HMnF{QYDitsltkc zB+ZT^Mm^=V?bGyowy24FNZi6S?=fa*7`~wrKPx*ER7FT=Qam_RoXzwb9TfzKhD~IQ zjhmCX1D*taa3!}+dvp|+qTpUd;P2$Lvk9Rp&Y^e**-)Kkyd2I5n^(Z^Q{}`vP_5z` z)snHVk$fmAm7tn!JmijSE|nX-sfsHcYnC$Mp>irP13jbWFa$@W$#1^HRaG_;u&iKC z-0EQ#cT6cf-!*is?@&wwcJTHpI6n8z;VPGyd!WNmb%2;|EqrLwiqM2pW#KxpR6hfT z3g|rQY{<#rqDR(Qh`W2e_o2~SZLU~LK*Lv6(&dJ#N&*l;$f_VYP>RDjCYpBwnC*JBU#O*h%HA(O0jft zj#0B|gNVV^)$F|PKfb3In~g`-&JACrD^BhY3os@eDT(CsLQUOFWkr3}!wN_;L?S7O zj4EV0XUA$yNv;h*ntS1(@}VXwDk?_a!nQ%)N4cRvge*KXD>>N*hJkY0R-k;n#vFXy zy*+*1pS`cfx{uxv3oCo1U6*sngO#wmQ(`0GQcY)#sV%D(pHsz0&gplpZS8P-UM*OE zE>81N-IC^5x{R(#N|vryOoyZ8+G7XmNlPMhiB3Z=tc5T;m-t95;X8fkeB`mMp>J9vLRN{&U zmIJOGL&Y8W_)1sZsSD_xd2#o8msrF_;sdwk8t{{OUO2m`-C*>_s#cO(!?5c!ehs>| zMRSwMiXzwEQx?(Q%m=H(j&ICDTR1Qy1t24cNSgK=Ep}M$dO2%{XuIYxHE`<##ah-0 z*L30Zn|{Bo*Vt#h|6FVC3dZK$sA%3Q+8v=;%HG_amT9{CqIa$v4O}0-x#GT+-^P8> zyS+HW+twnQIwI_4G>7S&16@~c&Dm(yDu5=ObecsJKWj0Ih&C4$RA`oKZG@J+x}Tf# z`|a1^=yjT<<((b=UQ_ZnKJXgy5F~bYjMUw|vUKecOc$wp#?r^_^I8WL#m6_cqB}h9 zN6>AUyR z5jn~3NSKqAbhC_xvmMO-9HJ#4gIS$3F&`pe6mT-E+XhWcv zX5i!Hb|(i7+Hcn=6f90%yh39KH?@-Sv)7^E1a-vCYY(o~muI}SYwa1l5ht%U>5GWR^tvo!wWkctE zusI+I?{8NKlMVB00V_etF)7ocB~{AYI$qLz@adVf!+QRCA0V?zR%YzkK~T!^js2jM z$6Wos?eTlXZSZe=LFpxh#ZdWn5 zefGz_^!&8zXVpcPd|Ly( z%HsCY%qX;Ou+&!pWZFq!6)xNR5)%HbKM-7IJ|LsdNLxgwr5k3U%@?%F9}TTK*!3iW z*Yhcj-Ft`Yd_y0{(Olu$zU1NIQL=3(c=$c|Zo}p;WrqvE!$qF94TR1(MCoPM!x`@K z4-Hebi4W9=;SXb~lEp#v6Y~!rn`{ z88hA6V;k2gX{yyTuygFTR3RTJ{VPwKYPJ62#gos|!sUEodzozudN&^8>k6bvEzG*4 zX?e2`m!CD>4=h-8^Ujv*r&e{$UUhJm;_g{T_c|?DVvo;0%OfrKF#*Rpe@sMQCse&4akY8@T0aNjcLl2B75m51?5=s6Fx-2b{=164d~I^d zKKBA_r(F4mvgyXl^)go6ZM!AjzgAbHVDZ%Lg?Bk27S3zE*HkKPY%SyNch%iay4w&E z=d{*+O{`K%OIiE=MY`K_?iPl$IIeYCW3RNaxs0|yLuY&D-K2uxy{Ix^zn#ud#@+iN z!46L@u2E15Y%F`be~!)&;jV8;mHiXTHLT^cX=PdapJ@+0zk4ZUp8XTkHQCDp>&u?) zZ&-gq=Pn`e($PzHs{@wLt}Das@7*z!bazKcu*%mz!8yFc+%x}9_hB=j2Z_&z2 z+?O5K9kGCs@yL9Wd!@hA!S`FHD+os z_G0JOv$6@DTPMDx^-ah+SQ8|2Hx7cclGLpcmLnGtZ@!IK$Z&oBtEf}qJY?<1R}*B<|G{^ zlgD^6k$f_UcP^wGLOeAP9LcAYD7umf4zcOqM%B?EZxoFvi}tPwPcD^ltucp>)uNdg z%h3U{vMTI_vwjZ4)Nw+*oNNC4OJbB!S=Nqmc{7Z05%Od#7;gpmf`=D7m_-510zDsT z8ylTWCi53X3j9cnxbllQs9`e>*AM0x-8(t{JtRMf4W*Qi)aFu{c*s{Y)?)cpkf`gD zFCOWgn;Q_hy-QOvRTA|^nt9i$U0$3!I7hmL3eucS11gcuMmgw)K8XdQuM3r`JoDfI zzr(w?9oZ(1T{aVaRd5jMwOhPKx)3}7J^lY^Lo#!W-$x*~lC$ZCzB&4a2F-?J?Op%M zqpx^@pVF(?&uLg^D-*F|MSQMUqDjSJ_`W`_S4PI%hY7Kfgt)&?r-c#z&6s+WxdNgS3Lp zZ|Dhlo{|rnR3MeKCa|c4KeH_L*V|u!ZSuPh;b+@KM78f&2h!J`bRNPxCdm!GBjj7f z;gsVXbnp`QJR>Qym+9GZnaS-w+$gQC^-oY$HPnZCp{}eKgB>_u9=Bp?tXhCcb}Z9K zU*C@>=$aol$Q~PP9Y*+(vjsz*ZtlKWbSTu2g!bl*4O78%j62GA2wSKx_7aT+AaKsA z*vzffs^I0}cAf*mjDsdK^Y1T~1!PqnTm5j9JyljM-S{k+ib}zDdqit9#OkQ4KAT`n zMZ`%sBYeKq1ss;lZ~~?m!&PyPCRCniiQW$@4LLz>D~JCoFkCnSR@W;G^=ZziLCkqv z%A*#{Y@Z+O50fc*L)c}P`?hstZ^zDrN7aosb~|*>Eq@ULgyh)@h;jW9wG7unbBzxX z>UtJ!nX#h2>{Rn`D!^+1dC`0tNcYNiG9KDo#4~fuwtmN#WRz$2} zX_KG!{0e>P|6%VP;xmo@+qP|1jEYgQ-q^Nn+qNpUQK?kS%B^$y+(G~P z8}zIPefMAup5Y!m`@h#-Fj9{7i94CR>Yl&KZ`*v@{qKkL_yB<)Znth&wy@5p?v& ztoQX-;_uf!1w=aDnXZ4aMt=Tw`QGy7^v^c){^dTmzcg=Y#=Ns~J(8e)hnx8L52s~pO#)N{q1i~8x1P*P&!~(v5 z-F$zq32*an^&-y?7o$gQmoSwhfH~anY^7#F5{pT$I%l!TP`sMg5Mj@;oi|XyD-`;;a?-zaZ z=go6=y30_If!qDlJz@!%L&vDFLx6yQ5A>4J?s5vO;J?$l-ytsy{q^VHPCE--2JQ9i zya`Oa*jTN?dc$Ahr6wvY0X?!J+*Wfo^-$cUS*J-ZGp=w$`O8MN>Y3BWc2P<~`CAdL zkI4v>-QG;Q?|%;jqOB7Ggw}W>#Kv~NkClyes{far^Vh-RKm9RT5jJap`QvZ2&-pW2 z1@W(G_2sXe|E+wecNvRa?e}|~v|`S?dC&js>knu0y7|~qL-dpHF$}$(CQ*BNBg5f4 z(33}`H)Q2BZ2X#OdE2Z-zN;3e_4Cwy+l6ag+6&BH9?JRtNv(K1lY2H(eGzsR=%ua;Y&_6nGT5{V3v)^F!*;nVkSu-Yv|EA9TJEQ)aMGIxJPOlFAOEaxG|VO8>L_O#=w=Q?;UONhE?E?cNwO_=HV$VIr$EiW#2*)_m8{phHL0Q-OYM{HXCsueIANT#&M{Y{{?A}kxq)f;^<`_N1L-4RNi~(}o`f0+huoWlD z&*`2mmx4>b(og6b_4!Z4X#n{iR%&?HO&lHcQ>s0BaKTVdt%Ht#G>qUKhwY3*b}~?ucRj5j%s+0ljWebfw}(D~^hv z@J#Ocano=3KLy_qg&zLiFJ7oWz9TX{^Z!vLZi!D&cS@da9Co_ss-&-kv^aMpCB{@T zecl|NJ3d`YF{& z{0ZM((UGrTp&uYI`U!{m9AC&z3-r(>+r}LIEW66~du^B(=dRtW3SmpX_*fft({*@w zEhL(*BfWS>TQSeN?}WUD)K2D;S0uk)=JUyQdXmlIU#XC@8egaGoa?%)lCSL=%L-070+NnCFd&bUgAEZKRvWotbaIJ=JE}Ms`1u+gbTxZUT(OVK&R3avJoyR3G=+ zgtOcj*VE89RJDWLlaV>)F@91^19M9~c)Wo^U>Y^n=D^uZ`som*;wmKdH0{pn3-Dnzx6i9Uw+zi1+Fh_mEE9iBHP1<*#5G4 z$3@vSa3ZGqe@!ucgD5Qh-=49(eutiVDYiD|Zjq;40R+h8Z@A=}ZuXS=n5w>h_&hOO zi@!O3`bL6zRsTIJ(-AYc2*ADi>g4$!t&nFNd&O~YNIDA=q5abTrLQy;cxHO^WNL4J z{n&E(>mc~$;pmi}pReW6Zu_puU$gmrRjJzJMq=*>*{7Kaobt!A8k$wLu{vqERuWZ!*sgU z%@Seoh@BHp+xZvYJ1=Qka#t{+o#|A=WwCv5l=sws_d2S2`A#Y>x>RXelHii8oA)8P zOyk>YW6O3|V4;9&#%A)wTHlL-+BJ0-M!6P8=ckvV=#9U3jI2N1lP8LE#k|mY_vAar zeCeg%+#G-U!*t~o^URY1E8IhutW&+unSMQE#MGLr9v{$?97WT2t>oD7Pb0%`F~$kj z%im^hv=;TkiIx>V5c%i^HS4*_2?>(2t3C^+S;K__naa?5=Ra{JMHJM3BY6|N_8vc0 zUw*qd`O72|@LnT5_1#o%n$l0KCDSA3*)s1Np_ff5hi>`noB zb8f-~%*uxdWmPW+I=`Hx>bscFSZ8Ral=s^?ccT}GbPQb`+nZk8Guib3Z}li<(o}z) z3{{`*J&jpX4D%pV^P{a&gc8G4_fSRZGj{<4OrpFLr(y-9&I-0V6w*g<&qA2|H!ptE zjmeW06nR$5Bixk_rBdbX$#Gtpboy+C=JW#OJpF-znz~T--kuxkiFI ztLr*GPD{T-x$U0#1b8QOv~neuObgF%XV^N6XZIoI7VC0|cEITcL>2Nh$(^P&nDzHP=LC-a?h#o1LCuGGz2S z#+ce>=-|6znQ)zS)3DAA6-^Q?SAfri^{YC%TQVQ>2xcD*-%O=zp3D$yF-5^4U?5*h zIa^x9^v3KJ11ap#DOCQJ_&Dy2FH`szkY~)R(Xl{J01}~b|G9-M5>d23Wk4dv{#41> zpEsP6y6h5Ls0FE&N$OH16t1APc>0J-UsnBBB2kv$7pzrgg{4+z7T6JM^~b&F*zmJ) z`uq4#uv}w{%c-6$_l_`i%n8dWO40HPFWULDR${Y9U1nw!wNu?h-B61pX+5b4dFj*b z_+%SeLXC%rD8NDiAsJ%tMDCM;>X@2S+qX<_)p1oX{fyxk<>P#VOZ5Ochky6$uE*U2Pqztn!h zaZf;At_HEqm4ypbpR4uU66_u)TQ)sgESW_|<*b8Yw;i5oOt>>}s%vhkdUyzoH%p?YyGm{qirNx?XnT>+d z{$^zp!MvISim;LrBLd-uQqm*gwCQBf(C!Z^xE_fLn~H3rrQ8Y;NsY-$D{zO-gjUO? z`&p{1>2Se)TB{$&gJu(UxLjMGfJ@nWKJyAhRxDW9**o$n95#f;MvaCW>_>HL+?L(Hg?#&9bo&Hig`H*Kh*WkGtjx26S3v_Bxnu(o7Y)t`k^ESNb_32@;mg zoiH-)tC$nE500@()@XZfRVKbZz~{!Brpkmx8i|}{H8@&xiR>SoRRWBllRwZp5!&mu$h7qb7z5P_KHNJix|QxSke9yVt=YfcSUI9RwWw>6q6!tUbc7PEq!iID|vNHAb0ooxH=q{ z$&kq4wAqAevbSdL^9V#*U3Y#{u%NDu-6$r;tk~el;gqCqxBPeBm>b_ zcmm38;K?6r%txzbvZ!zlp@$+h0+!YD%}GXd85S!}jE2?TGE}l zk}N=DC`Mu8u0Qp(we734cLB$xY^H!-mTwdqjPz!ECBb8n2z`YNO&p~I{f+bv!K#)V zGk+*1Df-OV4x6#&BsZ7})@|;!BZ3~S4BjZ1Lkwci=m>8gLKTl{l}EnP%MgGzzYBGt zDT6Nfaub2l4<%M;#^eB=>^vA;0I%OIX269*?|LgxRO+r+ghuRTsQe@!n6qUV?+jj+ z{f9;8*?1YH=nUvKb34;)tkss<=e7!eNo(Y~Eg1I}iKhBO0%+7ksJU`!rrl(sF_enZEZz{3ot{P&RbBH8mne}5{r_-lSK+`ydaL88IUBGBJZq)nc+KdRKuqh z+(}%10Z(v~bLn#!7z(!cP31E?$#OHaZOd*|af1yWZe)GsZt%BU-9;&?uuyvthG2@6 zG^v<3e=M*n;4~Hd%tVYjw=!R`UHD$0*elTl=IL3Enq(G|hCwHGM{{bkf& zF;J!Ool_d!zNXNCQ(P*5s1CZmJn~!OX9qgaY?%C{G`;tg=5Xa2nv)$0rn|aUum=DI z#2TK5X5acyD-!n7p3Mk?){0<;A#-Hg&^@@mlKaj!E)l3^e%~kNd<9Uv z)>el;#qXa~P>}+Dr-bRIH84v<@X3Opk!sf7GE!Jz0PF19X~JRsC3`Y1alNS#{>h%X zNivnZP7NP@w%F)hT zl00ie@5O*VT1iZH$TZ#{U$1z12E^r?*8a>L$i!lP|}pM|Ab7O zsY}bO6>8<`lyt?5a|8$c)|!jV-86KQ}89WgZL( zIV`uDUdcz*xSa@(FWPM5B`RM@cA-coDplnq(*l+#aXN~{g&-d5c=KXWmQ2G;rfgs1 z7rNg(xm4CkCw52RpA}BUki?5~7b(kRZrOosT8I_3-F%zSMP8zQFR)?Lu%-DeGEUdFGt$NFQ%vL)dyiF$ryUHW zxJw;o^aI*<_7#O!+2v@`gpZe4P$|OEFZdJ507ExYRmX@~KzVFsoY_ZYZvj>7%w2(; zFbqq4Dm4&b7F{Kd?5LO+NToxTMv&tn_~UnJ6*m60Ky@nJFeGtN;c1Ky!*Jt)T5>1f zo$QQ$7`dqEw#|aRqB{W1TPjh!nfGoKjcFiPXkaMgj=xH9xFXHl+y|0Mw@DHVw%&b# zvlX!8AZLCjy~vKqZn9T{Lr`G-NzRkL>Ocxf$xxVwfg(LQl@2&tE8!u%3<&fybM z6F=+t_UGJ?+H^%W6A-QEsKp$9o2(!;4Bg1?^Zz|3V z1J`-}R)o+1C`g6gak}EmY_{@(>@01it_nJwVL6Oe!_RdVYw8nDdjmT(+#Kqqu@7l< zl^I=XP!VP`Q=cS2Q$G$3p&@Rors2qH)VLG>Q5nz<^V=N+t&o$A7%NGUlDcD9S{%`d z@_Pg0mp(9fXclIm=>52$pIIFG!2X^=RyjK@C=BT2Oka?DW^+zZCnrodHN0`)kSj6J zAO^Px3rualjG{zj?)1e}WUEIVY&KH{e_e&Qfo8w3bY)si!#R!=&v5JF&=zMQZ^Y8^ z{lv`%up!zjsLf_%7wGD&?VB_Y$V2M(!ehvB9+3_dd)Hbv0d`>x!@?ky?6Iyobg8;( zx2DH&rWy@kdon^8i=&dR6&y7c;8Uw~jHdYh_2uR5sNBJuXq|<*si`ocWj(5BTX4Ox zmL4=cf3t?;N^fcM9!)`iSnLaw$&Za7>@o5VRQ*NtTQW|hE_5;{mCZ_G?tDHfHm21u z`L@Vn0uWD0g_9uuo;fPRwI!CqQS*V!a1WOK$KLt(oOtUj3s@>?fqZ&Z)iT2i+Yn+w z58wR<-*y4QlfBu6Li4N`>#}>)^e-On?S$J4%5K64@rWA&2`S3@9}G+C>yt1=92bHd zWU7=3rp%?1BO8SaJgY9Lw3J!}v#={ZxkvM37UI&9=mN40-F1o7WVVuv29(A-72d7iB(%FJH&q{0Y0W*{tU^u znT$qI*rTl}fvd&zLJ1ob{mqIAbX0EhpC=GB-T*ZB%5BAzI*3QDyZjX%=>d}9)etWD zRl5g{W6(JkAUY6PH+6U@>p+f+*7!V2jE&k`;<(B?99t?(UyQeH*7qvN@CQV)Bh;XZ zipzr{h;!2+@FBN!|8FYs?KK0}?N;cLa1%A}<`~p2%)^FlENO=IX=m>Mw#tStSBsO( z7b(yOMD22n0;$Q^%B8rP(+#s1BYGGz)5PFDjF@+U4K8tVR%H! zr)c5tbk@$ClSHDSYJ@mlO}EiSJ+33e#hQ*ZWUln0n?B4;p|h+h*o6_V>!23esI=!- z>{9DhHqx8%h9AT~7V{vnLK)IiEo&DX3~5{`kpBQGVDTWzw_#pH$(Ez*H*8mSBbqtP zT|(W1<=B%gMepJ&ol2HotRo?S zuY-+{J<<(1_vU4X@34JcY$&<-s$7#$p#4^S`XVB}2Z0#u3lhWrV59g~vka|BBh zbVZX8Oz1?TANZi-O1@NJC5D1UA-oR&jGNz6E#)l%yJj{x)m2EXYy@Iyofay#^P=*2&{5gn$vX76OB;ALXC)Hsp zk;*2ICYSTa+D*3=F1^G!9F3Fw!wv`g7D2)ZJk>cpepO&lbr8%jJ$O_tiT>5j;vS zPGZI)z8)4%&Tuu&loJ=FqzEIDOu`~|>Rjv=!fv3!x6NzD{aI**^8#`M=vAsmPY9y$ zc$BOcjzbf(=qA<%EF}yD4fcKzFZLrYf|+tmA*wdZaePdh>mmoznH{IFres6#lUX3X zk)M_sF=&vDAOqMZt9gFnQsjugZ|=Rvd_Wmp7Q@r>huo!nSS=i#4<%hM{F46%5w84* z&AE(alQp!6vuZNjxHp2StO}$3AimyB$sKbPG9F_Qn5a-u^A^00@jGcwLN`k{URmI|IMwKrsw z-6Nz{0UBxW5(8^wY#~t?L82t4udNat$Vb;Gnv^^x$#J5~r}^*?g`E0ee3Ae($N905 ztF%c8uc%%WH!_gjo2uzlgK7*SQP!=**@mU4TY3hOxdQ z1BNRQ;zThlNNcmR1qsn{mO!DQ-WDf>RFK=b-0Xxu8>J2kH$YO272$9KHi0&T#LHcg zvW7St4%yh$t*eOcH&r(c!#ta!xevdWdISOiee|$f;<&V0px*}BdN?JdpI-mSjI*UR zKQ00;d|MHT1VhM$nqkTu=dgr*70b`cL`c$Y)@@EWdMNnNJB?-~a|+u5fO-vfuJwo% zyc0YGH2R29I2OFJu#OklTX-0ZtTsGiD65pp(dJb&qN%~zv@RI}Zlo(W(XYnv72(Kz9BY7u*L=(m@;I5;SSfK#=*m6)B4n^ zUFieLJ&!_MQvb5lkWmS(ELgOH{Rlm}GGER3IGt#6e;r3~1pG2SK=XX~ zmWfr!3S!?P4<9Z~M?8`+#M`3X{!6I!{of;91!?_u7+*!0GVo3;qj52wbX3pqaCMXKAJ2L#NO*YI$DxcAbB z^q85EgiaeLvd$ycHJm1(pvZuK7^$GjL;qxvNI|7QvoMNm$)QIA0mFcplnAq0V>p{$ z%D%Nz>mV>oVU2OZmPJ@dZP`M=;lkslFdix4-~<<_G|^Xmkp#MjH6A-*-%I&r+6(@|8Ug!f_| zL>I2o4Gc>k(E2PKiA%%%rp!RLSN~&-Y+^i0GIr{*Xm#B39sh$`BOy)lDA&2PkXD~C<1Gl3Cxox`YQ z&>a^7OZ$p4=1AxhxeYLymkVKZH+aJ|k0Zm^cKrqJN*61PE*yQNVsffXv7W6dtx53- z$5|bRj%?G04H>155i-Lc<}0y1a8Xa@NS2SQZ~(su zGEgGjY&)JO&xC}g|177_WropC<8?W@(-vJ$iVWifntCQp+BrI3KRD3{Ix%qSSod4< zDaIf+>2;|;E-qdsT{Q=@Mdc-(pA9PkOo+4in8jgHo;-sXs6m@tMTL5!fDFyE?F1fl z!#8TJXUB?Nrens5{H!^cwW;HQmFkaYg9q5s5=yMxNoW&hfQX)Xk-jO*!~lq`23Us|KMyf?zOpIc)T?GTcbp* z@iw8WyqN~|I;3p(wKjEWEYwx{b>^7xK|EMF2z#>f)xX-|@+0D8!nFwBw2?Vi?ax6S4 zv>UF|GA@Uw@U8B_d-uXVzL&C69It zpqDvg8B%&^TPH2yMDdsty$SrdEsU&C;*u=*M|9vttj0Gf3EL&ulu?H^c(0?0!Pw8@ zHWVo3@ru1RgOO?U;mGRNJxFB149ptco0MF9vnV$p(M`b%?HI`6?rsqveXl>SI zJ@CX5#F=(8=0WB)qvTsnddBJAOI;YZdM1K9T7CPLzmzo+R=&}V5@_Wf1L4epk-)~U z>IDc4i-vux#gLgW6!I~MA3$Y}6y1?n<#1I%Y;=`i7y$c^7X}YH%ob=N)|`;mq~2^* z*X(E$xR~Peu@5nIJB*h$uS3sn6WQWx^%QPx_k9X$A79*c6wg2r#K)PDIhc4lG=A40 zDL+(C(N*P6%7Bc3G%VMyZcMsT$%nLuD;Ddf+G7I?x!N#gR>l7?JlTXzS}fl(!(^&; zMRWX3Q@sMBU?=JI%v%0drZVf4O-z(bOFdj2X)}mXy=}GEnhZQTpiL%R;JVO6+rl$G z18HTZf-9ns9E-Dc^zXb#%AXz@GuCT_lH$x8vMQdBNh*(ns^(RZS4jqiEA(mCkpf$Q zfXcFME*4m=bwXj74iB8IQ%u-oj7BaHHN1xE2mvvsAcnDpV@p4bTHs>Uz{Wvx4+7Dw z+L1j*KtgxjFO4(^{Fp6M0fJmh@mB+`yY&q2p4nQ&Us49NaWqhbjakh8nd56a@^#eD z^Y<7;AR*V_AYV4kh+Mm3L0X$ReF%VO(u1G<(5?JnSF)lt)!a6`{HZO_p$%F;)M0R& zBW~#KE#byw%R;Z|!%bP$ra$Pq%{w(=YiYoK7D;vC`;2=g=+rda=!EeGTuEJ+4E01F zDm`PKh?RxB4%mr7UWr0l#HUda*QnGHTU$5p{0LBqcP|H^?BfVTY%;GVjp>&rYL;k( z?c_v=WcfxGQvp?ItbSS(1r3dbDc7{Dx3nMUz-Cj(M7TQgb(P-C_Y~`kIhgQa{}F4l@%gkopl^57Nmbxr zP#)->z_R&r`!KCF88v0ITnm-!**ub3bc@L5jm*=~&@!H^e4t52FSsrbWDsz4O`+Ec z?e&jwFS_{DNhLs_o!ByFMwOzli|fFB5dtFEdct0m*f|2ivsM{z5}a9@*t1?)^mE)o zvn(5f8-g5Eab)A%q0-YrX)LF0Doj&RbhU@MTDKn#T>rG%#ZI@NJrU|*HJ)Fl?&4<;s0yBZPn;tsYo-NKD0U*CX6LwYxqv|*msN5pPhrmTynL7{jn%De zy~tzH0s#q(z{y5j2O;LPTHzMm_LZG=RLYQ0!eNSF&3cWcUcs5?-DfgrOc|nKY+*uf z@|0^Zsj0M{RLfb9kph-NGL55*XvNfU4Rx}KZsRcQ`GZ3NfFZ3jh$6Pm#}5gdWzLJ% zdEJ259%SMKCuTu$Th(Kz!Eg|o#lgb-j{J9lg<#8H?}%o}*|lK-=e)Jd2o#>7g_)ri z{jC{Z)|pzjM#^5U+y=i$woWc~&~XN@j{3Jik&=R@2t?F(GCHIb>#GGz%wf7HQNja# zBG&0|`&ucnO{hJgSqxk?Haat)Q$2g!7lhR&T0eaEv1^xjWhjs77_4Q#+Sg@6Iq}zQG3R`G{?~gn>Q_ z8h@@5>`uG-c@i#8aJdkQs(UI$66VB;Q{6ABYn)^wF9y6f2_RUUwXk; zz_hc8?4RMVZX)ZBYAFLd$;oqC!eQJt|0TOzi%KAfvu%RGE9*4`$H*8$FUV=@eX>P# z>;k`VOpH;OE3L)wXb<)?THtj+ZS!=~iKgr9nwvO)KC*;a1|2W2qOR=y@W2qw08~W`*Mkr6#3B~VWmHw&+3d)m3HreszmwNOqV;=^+ z!neJu8?T|G41vDyq;4xmQsJ)`xA~#|3_%f3bDdD+W3(C?InOdmK2Fs@?1WutaNJvR zYj{QDbuCAUrc&AlM#vvosj;aFp73|X>I9ybJH^N#6c&DVI8+ropdt-2%Y$PID-Inr z9aBW3QdXGyS)z6Pg#Z>l>J4UXi-X99!sGWSmuoFfAJhgyDMcIYPyx(+f%AS zTr<`zlU7K{_4U|8rvZ~6wL#%vX+}{}AXeH=7W85x$i~H-V{0`rfJbDjvtEeo$wjBy z-RK-6)1vAIWolZ?uF|oFX42q-2wh2>sbcPO!Z}nCcV4eWZ;d(PAirEIRcl#OT$6W+wsH_PIT(@eS43+ZfOVa_FD!M^^4C5PHK`j=^e9h19 z)Kkt}5g#u+5xOpU4V}>GbtG(upe?_ZszlOovi=8#JD2KJba7HWZZRC->7~@990eEs zRiL6?%4$Ve|2_>zh^i>TD%iU@S(D$kKR!oHEE_F6L(M}9iEs~HG3BApF$xFl3&$mh z41M)PH5%ipi2SaoAl#8f5t*-sk}zD0S)uj>*TDPcJ$I7wm8%8OtU1ZgZ7ePvmq;j_) zYcgU8#OU+0Vtqd!XSHdZu#-%fR(rI6Wlg!ho_+*YM^}lFk1wn`7Qqpb$F)7aEnZOx=B#kXz;W6tViKraW>w!vQvfvqf1mc>WhER84v!ct3}GmU$_9R2i|TR#=uC z8)}4bV^+1WhPFXERhq)cEo>ceE%x?Xd_|G;$C4XUG;&ED4SG32!Kko2{GA zf)+EaVwZeN>=*%t{8(<84BLP_yXJmXvF~kw9<8;xl2bG^Avqu@r)k0wDp1k#c%bb^ zw9pU6QYT(yrBdrLQ6rpIb85OOM0UlLeyu?J>!L;yPx`Ayc+t?U9&y$vaNQeWjy9EA z2*{jvaviwvwJYJsjDoaCbYbcohBKZ+HzUyhpgcmq7yn{4f+?SGky?E5fh#Xr^P z(PCf7=_AMu4hv1PY2)xt@t%wt%IZmPU1clTX&KsVS@RwA6Dw%JWm082R$KR%_k>J`MXsq-Ov>KE6LG$?0SkRBH9BY6|UaW+$A$G|55#%ZIHfU9w zPWs=X-+FD>TN!=g5A}d`OwBVuOF=bJIyuZo$^eN9cy>D_)Zm`#v?0%|>6j|EjKBHPcA>5g-kRBIKZPnjPw)0?M$*D_$f=LY6MC-cEk?IWbotnTO zHBx$JbjI&aEX+VGajrqQBy|>o7W3(RoW0S^in+=e5iLr{SMI7gAbmo2s#4Awzu!DzRjOuYWZWGkO74o+3_63BOhi?aNqNz~5W zXOnG$XoV2ES0MuCR054Pb8ctVyD+t-d-e^ZjJp^&QJ$(#b;uF#1YbpP$QHg6OMwUL zQA}Q03nnPo!UJ3&1j~Z)UJ&zsB#mp1FKBZNUMmwky2!S%HMGh4G&1Ieg%b7iQtQV_ zL3o^|yaf`TQsIk%0j4l54XNqs9(;ogbVvpE#*oyHXw&A88{9f1^Ww6FEU-sb`k|JN zTMKBCLTl*9l%QwLUW{KgdY2F!^wDz8AWXc;yiUytjY?MXlpQFe+;F#~ae^)FNp(!c z>+duO1w4yIQx4`4DJoL#6#b$`@d9KWrP2jda>$Y=Nr<&`X)Jf404juCNt_g3E|t!1 z#P=%Pr>79c3n^lJJe3syZDUo@zY>}1zNR>SQ9oJw4EA)R_F7X6d#%=vDq9`K0-T`e z;wKx&-S4?lGA^>G4d^SmQX4`mXGCFdf?V2-QQNoCAO{CZ{|3S!*3q13x;_6GO~AoDqEIN_9mVD8QA14j7U<0 z(h{HR>?sOTku<2CV6%_~Jssz0STYM4;Tv^r{ZstO8;&4t9z=ggU_gMT5xgo83m zg^^7UX7f>2muqB!W~$YrLHT1%mNu(?+HsV7>|71I7NUP0H@ao*JjtWB>HKDA_exzk zX7IQU^bqU*NZN+wF@(WI7gKH`f1Cb`leUTDvjXKPi$nDdVF!LPUXyH{9czxoN}^~T zx)CJU1||Bv!N}dgmek`4)$TY};W9#mw_g5aHgeifmS%wPgUtxhhiR$~X+04BFKwOe`|Yls1=Pn#ESAlJckPZaZDR`4oIWksEJoj~S% zaa-m&dPN~SqE>}@nZ;QPQ+MFuPx4=C3*A)H(DSzvG3Q<#Jkph097T#&*Gg<^;P6%X zu>j#t8$&)RVQG1+r0=;5B0GhsMjTFvmk+vrdARc7=I#JHC-Yxe&@Q!OXm`;LA@tXq zJXcL@X71r>=qgY45p@+D6T4Xy>+IzwC6b`sVH z1{Qjuwd^FJYFy(vH-aCrAGwD(i5WOa$vd|=#O#^v6M|wdg0=8OX~rrFQxjP2{pi@W zOa^Zuwh-6NLZpAhxN|=twx|In&NEgZK`J3ULuCgX8cs~`>g!rG3pIus98ruaTB?YY zuKxyG>(%TQj*=NG3-fBm;Fz!)2>e!+<=fpzA=IDWZyH1y9fF#$UGg| zGAKuKOxL1zr=d42R>e>c0-IM|C&a~BB}ihLGCJfjmf1r`vnnF-MtIiwgurN#{3R!6 zHsn-HLb6l|7)_o5?o4LFQ&=7>PZfmvD{ZXe&vvlG3#(yOTKhlSf+I~eB@p^%3Nt00 zQjPG$XxP1~?N3#Rmfl3xZ9t5on7_yM&X23{FPCiFVXT7S*Vjq#Z(e{O5lgI_=^Yi` z)`AxGlxPYzBj)hx@s|y*T#vdMdyl*7>P^I+=ED=9JKy1EKBQQTD-5dd56^UWJuM7}9>uNkuT{gf8Sbt4WX4FO~c;C?GUmpb}};!CxsLdeTQ6elrl$G4@?s3q3JXgmEGwFyAGvR zd0-HG6)Q$8vTY$&b%v2XlDLd0wl0zT4`1eijm95%jfE+&J*muR!Dy~{uj3Pd(Z#dR{7?M>$^tx zM;iLyhaFF2ZNU$x@p0v=6XnpY&9C_Szjj~q0k4N!PW^9hAH$!o&|7HQR?{kt$1{udP_>UI;3W zJQ$5&le2A~WOW#zvnIL=5P;)LTU5|wSx(+n7ZBu=BpTMQzxvxLGKu^ff&6C|lh(9O zz1>#%Wo(TcaUsQ;5Bd$xtb1UM?sTA?uk5qZs0#?D#QVCm+Q&geX{FPzPc^Oxakwk1 zeUMVW4{cfJNa{yUjTq)#nZx7sd*g-k7**#dwxMjX8g`CM*HpBOTx_8T=7K+I#V$D; zel+o=I|})b7T+SaVeC`aSp~M8x}e7Dlb$HAw!1K?`(!Y?N|F})ZNq#)I3|cvL-C3r zyJcYBNs7;de(5ADhER%;ZX=r-TdTIT=7C8gH)p}GoA z{Ig9zg$ng8Yz)+gRYHyR-@N1Z?|^>4Ctcp)+=pAo9YI`nP4)J+ZI(_VNhw^K94)ye9sS^oCvWt_)-i#TYl{oG|@%QtLJZp8QMAk>S@$(3KfVE&*c; z+TT!vlC2jcIQSyJ>t~1|{n}Py`7_jqK;J(%65g!RJ%dvHJaSI@eUgW z$^g8MSIm%qt=0Aa3GJ?a{A>RKxLaJs`TrxdxtjBH|0ig5Rp;maPtfk_%Fq2DpuNRa zp8r2Wo2wmv`+opW$FsR#+nu@m?XG7YH`{Z$Tb-`_|Md~UiNVOF-8oHPXPU9+1nblV z!Lu{0cYDa#(UfJ4x29R=tZ~jFcbYHTUFa(2CvR=@=8)YSx3OTZT6fLD4rcZ7sR@3A zFr8R}Dr-3t0v(ZVW5rSEz+orDc~p~*LqFSy1alZ|=tg3O7Cjdz$}v(pB{DEJUwlf4 z1op=(qNHo0L$IIEl^Um3*Dy-4Mnpu7Q0Ss@x2H!W zD($ZgqI2{ZS;PJA8X_LHqlVmm`!ZIjhdG4n8&03d*wY1vmg)#Z@h}vX7EySonKz;o zP}O0k#EHC+9=^94n1}{Jcg{yek(Sn@7qrl{5lIuw)IevbT)|Ib01~n|gv%5hd}lYs z%sIBGxdXVw&dAw<(v7hR8&4d_{lw@~tSp{Py2UWIR$tB#Bm6`^4-LNUSa7x5?!a}U z6u(WmNP|x4*Y^OUFJ`|x1xS4>O6OV#wKpSl7DQh&GW4yFTokWYWBit^Ms5}T9!Uy^ zAP%okpyt`yLSh)q>)xu}0k zDl$PXG%}>Wh<&ajV!AZevrJ9pSF#h}qH`_9me_c7$RLbmpTlXNIT@}qLi}(o%19Z! z4oFw*L@)}auRHVK#B?5N%LS)k$f)gKQ48bNi&5W?ZJ;Q`eyAq7K6RevRW2Tmt7Pa< z;dvA8_c6+bEjf~|wsld6goKYk~kpc!{`QA$^Ys-=QQr{W5FiUgc`EG z7n>=fZ3V+Kbr#uBtcR-Hl#^&zl*n!|Xtzaffl3R4S%X965Yq3 zx@?JYP1va8p9B*w`#8kiPE`khf;q|fHqFW??TT%gZSUK~XuDX56Cwekl<`q?nZ4)& zOWyh?klx2c9kIbNj0|W&%R(0sIv?`@N^ADM01yR{Gn9ucO;%ql2eR>}+HDOio7psG zyoIrb^n%XpZIseNeN+>5aL1h_jh-}J0S41j1BqlbnT!bVDM}BAjlXpUiOWIUg%jRU z;Ynp8D3K|s!%Yv5jiemI_jgO}j1RKymTupi!v!IwiqDs#0}3|=s>CR7eLk5ZF7VGf zotifwHH&o_Dk7?M(%WcWB(*-Q8UacXy|8_sh5U z%*32?=ghfxB<95I9Tkz0k$);GEAz)%nd^DAad{@62S*DW&U)t?ZWvsXVAhIW|BCau zx!qe4^!{@9PoBIxMR`Nqs8)G3dxzV%q!{$x82zifnaWap(*f*_Hz5)8++F_|B^EQ&$ zc2(uktY9KBb)nJ#Usp+k(w+Mw7jxIEa#l7h(u1 z^mgZDqx%CVrbLCnmdlcDmr!ki7hYN5TJ?Z5d#7k|k1T%bZX*%-yO>xz1z(=Xn*(Ah z4_2GTqXRZGblTvLM1Zmz(LhOpZ-yfuqxXvnvvkNshTPf<@E>LrJ4c7r#4Hu zQ-KQVgsx>RJAd!|w&AUIGU|g*TDBsG25v>&74UQH*U?;o_C@v zpCShH^?XKtMO>YL%zmppx2z=P#^|d{#TQEJS%e3wZ7mFv+2?^69jWW-4U+j))-T56 zvfqm_vzHwyEzSnsrv^o;@mxa^A&MUrnyI;mJ5q^hpN(2^LFrR2oj8Fse>`kia zM62f$(2GS>o?NY-A{wFqTHo>(^UfKoHJb!g`bR41@e;yqejl&WbC6iVm+5F?wOeB` zeU2xVa_CKKWoxUU%dL&**A**t5is=a_0aB+O@>)vVckpfnJo>(qk7#`)0H90l9Z5# zg;aBDlh(WzP7LvZ5egS5&j_W>R0OKZjf_}*K@o1Q*z)5%{iE5Cec1PiM+C#(mCkqe zkq(ZM{=WT|+@k%f6LLu&S!X|V8HW48kh-`*aC*@>nLb+u+xs6?iZC->~-D(Yt=Qn0w8HL*QHpo|riX1{RPm5JO40 znETC?Lqsj8Eg6z$qNWtj_^ug}r!w~1vAeOjNqfztLnd7GKs$}!KuO2MUFJ-c-#~lq z#NAq4wY_HIzxs>Ac4EJ2)a8YX{O6$Z9rz`)I9358z5Zs=nQ(DFQ<#b@o{dUV`XY9f}PXaapki9 z%Hr#scriTk;P6wU;DR`+nSMGh#Hc|&ZqTSfIW7dypdL2}Xwa^04(G$dXpCTVz-Ufi z-@u;kYv+K)864w)#f^zGxdj)I*>}kmkri&OfMBK%g*P~5fTROBb&edjg@BkbAjJ!s zF`&hRnB6ZdYTEh9Nzw$~IhgWUMLc0FM#wv1EJxTqVXQ_VIc2OzFgpEiWSZ`~?S!Ql z4C0Js5KQEZWfUyrjAasR;M{N}Fwrf^1J}@}&I8xjXU+rH)c1=AuDLIytwlD4-Hg$K z@xTi}cEE-Lkaaiil6!KouU78n&_<*$xKz(yW7Wx>CNT#!R3+F%~n*}-{;?Y_TMf(4%2?J zPanRpT%9%Pd3v5>oMw4*b8@fncl=?#{-vjV>7ke2`39oaWczN$ zaD?^raJlWmFp=5C`Lcd|anaFl(z$8FVRoHdggDt)WcDIp6SWs#w;LB6ED@F2`K#UY z(cj}W$v-ad?D%*?F3a2D@#g*tv5R+;pWg*j51)S(KLa11Amc<9mqDQ8S@?XrAmUxR z`g?NWB18+k%H?4Y^EgKhotyT|39r`fX(y${<9WB2== zetxdEul#Kf%j>`1K7C>{u2p`o;--ho_3id7|QiSDRx)%{ny85%%gj9L-*f^dGNmcimdiKb>O{7rN=|86`c-w!6!hAJ!^?!Esi6^p0AB6Tb+x%A3 zGP`T$-yg*fOWr~wTi<0hH)Me>{Iow0_lMsNM$1rl?IrORz0WWN*rZx_{j49@TF{5& z-06X3^`aXCq+6z8=pY&V}`d*_6OhRIFEG3^SIB`g=HPF3V17!bUpQ9_^J4T%f019(+0eWogzj6J#yqdIG>U1fD&Jh`(5sFB?&n8o2Ls!B%LmgjoCN6$$OjgA+W!jT z;k~whXeZxwSonPQdJO|AZoA;#7?K3vE@{XyUNEoWE)VmhNoRsnP8Ag6_3*_mHToT!V({MIU5^(D1!b&iX1MVt&5 zYwh_1mv(=}rM-DMUo6}~$u9W7eRK~##>qOqePA=+c-!oMe#M9IL^k{-0-(Pdq!JGW z^FAGsX_O zIIH)0%0?>g1Q?z-p{BP z?}?uxi%LdsPs!w6>m9P``8)zr0>~1FpP9!L$=8RNd!wqRQ}2mDhdb{Xv&RLyw~xOi z1wUiOH*Lzzscjb!r35G%1OXjO+F#VzFE_WW^7Wr8z=0W4@0D>Y4oq6YsBPFqu_6~( z!MYN6EikVa5APj8SW`*@WOkRIPmzefM}4Jh3EELSGylBmD9nR;=rS)^n-Gt%G^?CF zNywx-;ZL2hlD*uY{^504CcvD!I9NF#4i|kQcZ+UC?eNcuoGjJ?W?hzJk(Fw#aO=Q6-ilsR zK7%xwGfI9z6vtNE9K@MS4o@rGz`5qUb{{!an;)M6zUf52-)lt0@A@TP-$GkJtT1gP zxu!a86(r0UXB?D;8-}QLgkk-0J-?F>+m4H=NkcdaQ-1D)grMh!0Z?H3YFZOTfrlW;&E^eZAqlte-CJjQlT%6I2DoPUKWPuzq@kot&IbA zr~_pF0^0QUtyA1|R0s|Ubtg-0X$wxO zM>#BJjyaM@1#$|E4=JONolILPhgNRG?4AJFAZ?o@3UvORXrq8qL<=(( ze~EWENrJX7<8RA;C3Ro|!>Lc-`~Ixn;x~>iq)iF(Xdzs~mnMU(01>HGKok2Q6eDJ; zbE|+X)a`DYS}lQeKWzpC&9qsN5EI&rMFS^i&`IP`f;p6w`yVsf48xj@CH+nIQm?hS zTjMTM)SzNYv;wK_GD5O=AEx7}2#j}Vf?+V-0>3KHSJiDef8j<8WZ(89T$(Owe03Ao zsPr1wHY%R?Q85ikFO4FKQ8goe(r|I~gjQof-K=^D*E88${tIvdXu;(k1wt}=j> zT;YaGpJ*)jydRKLq-&#@Ni(W`GQL2|!f?U~^Apwq>m&3v&N5ZIf@tqEbz<19{)4<^ zc-A)zsg7u=sTb65;J)HE%SGd&0S)p-O6YrGwOR}@$yT~jjLKnoH$SxSBE3n5tv_$J zE*|Bs6=1k7jNa%7H}J{NAe8q{e7rT7X?{}e1o*HBYG&N)CEBeldOMQGLb%{Mbl4-R zK3Pz1oMbB*^(HB{zJX34y}_1p!m?Nf3f@HPFm#z@FoamNrkJ6NHfl8bw!WV#qpAjs zA5UsNQhWd{+$f44TjfV8V)U@l<({iH(=AG%NMRsf%vd?4;?Nw-G=ms9wfZP+yqt0X z1xQ-DG7pskYl9&rQ;wOi0u(A5a)RYF&Z#aflJzR&XsWJYb7^wi2*0#M847a|?%`{{ zOboa~sZSb0SV)K~TwBEN5O^heF&LPWqa5Zx#3}V@#(v*_CL@p;QDhXO_HT1f6iWB7 z<&`p^f_YYyK8^SfL@zR1HNGIv#pyZ?%3~YSA|RyhOT;r7V}y0f^|b2E$sLO+t+&7C zcgOv38s6FqH_xY8HJQ(*`gUy5)OPyi-mQAxhrC>V{p==?%}_{=I6Kyh5JN zWUkwwR}MceVwv_0ag@=Ce*)WH^a?mhTql#yIoAB`Pmr;2$x$K!+IR)0JCbj=xeI34 zS~*K1Y6q3*?q0H3V;UtUo?gJa)f+XXF|#yMLFxRLP>q@VUw=dk)W8ylhBTAjQ{hCM zVD5>dw6oYNR`*if;J5XW-RMhmi6iP-BlIDu}CdHcmjkH079>Mi-%X zWmK4|#kNPkul$cgwB5lZFe{BYBNu}KkNkn-WltHYsXwXHv<1aSXr+2?70cyp4Kro< zLm=Rg4c@;P7}vn$dK+$J4*+7Wu-+&`%mv6@h*&hop^|F#7pyK)C4}rs_1TMAM7H3_ zE4M2c`=brdV*6S#$mM#HwaJ^7S9;G!mVKZap#+jmY0Qa7+m2giNP6S^xweAkJ2CG- zMxF2mgOlM$@~c9;Vietom{9B9g8Va0>@_2lo0aQ%9O!1J^7DUYG>NQ66>}eFce#oA zQ$Ii;$>;n0u;NxoJdValus?2zAaggdoF?(do?zV~mDP>9H72h~DiX zpO5T;?(15`XbKEvhSm4ds`S zP&%eDH%^4JN>jLwoRoiqXDf?R8Y{{F!IH9r&Mk*2$sAoHr#;;rXrc0i<{Hcv?;6qN zBg=>wMkzi(!KN!_=0t6i^9M>vf=7uwC_6g?8ck-p-VxE^2hVz6;@O30j-Pjxp0d75 zVRLF8H2Z>lWhzmg0OF$G;;d-hu-95BRLyqG^$c*s1I-R_@0W@V>c2j}fUgVY2Nk}3 z=$B&cMKw*(US>Mh4O-TR`4yQXQL|G3`7fLkM(Vx9j^nZt z(3{9jV{*#8kY*JBbu@Y=AUK~a2wL89nKc8sIfjq+x{XFe@_qibkyE3VbhS6R5{{@V z3Qj)~%nO;2ui`|;cJOSQzpu`G``r{%9p14hG>RZc)OWP610}i~X;Bhzph6BW!raU0 zjebMS3hB>8#OL;KZMQ4=UidLNbsqDZ&}&5K>W9$0N?-A}zOuoq>PySp{5js+wcQ^c zQ_a#$Tg&BwmuUJD;6igaz)Phl^5{DM8p!dMk5N)BituF*A&7a(;G5SWF>XO(a74jc z0mm)USy}21G5avuJmPe>7pGA_1nv$Tc@|^F^V?PxJq?*20!LNW<&Q;nv7Y!)s~M&9 z+@cH zPJqgIaq8!ifsxtEtnJwa$}zX(34|o`9Hg2)lEgQLuxWug;v6m>%(GJQm@yRW3;e>D z%f=S7+UP{C2m-Pqs2C%-a8LcDNj1#c!PWAxV&@vaB-fE=N?bGlur8fZ*NIA$+5(eF zux(nIv|_1br5%$_95^&H-;B#=m>Lr9@OCyz%7K?okx}|P4Jri}BCCt2+MBe~mrs=A`;geFw=40dx zH>h!;VRnA_ZvaX~qK8?+a8t))7!qwk&Tm052IDss&alNB#&BkF7)N61KskR^_*a&Q z9oYb%P1MdrCzZ#1JnIZ?eRA)jwzO570^#DT1KbpOami@(h|~4rcEIji{QswQHUJZ% z*x-LhXTz?r`v0u6jbI$}9(;_vHc` z=MwGZjG$^2XrSWwFQ`j%UCR_&zXu^TgKZzwv;l6L#88{l`)j5kd)h@1h68$W!f(->YketY-4G)sm7$fEq%Cr6ohey=G-#WA z4@Eb>5lR;HQcC%f4jv3dWg#%Kqshz0`1-(T6tl9`*GI^)2JN)}UUn%YHXt%aR_;ob z1^e)=dbmGuc0;s7G7*c`P`F;AgM}dbc+#Km0BJTv(s#&1u^P!q`u00&R)|Ekn0Sg> zx5h5SwqS}`VTKR^`@ux+wJO__Sfwh9h7pn!<6802i!lTjhDh$`T0Aw_-DDE2Ao2tv zb-N-68u>rzkxde`Q(x$tbAlvPGKKqNR`|x%%(Dr$hA+M&jdktFW?0Z?{`I@S!W3(tVRB2Ds zorxCufSXltz#yaHlFZ3QD5IO{K6J8WVM8yX6)ok`d5(>MLWf+vqLEs^JzO8bsf67( zL#S3&nZhc;zQB@&6!AymWLjISL@SyZ^tc}sjruK0F4+Am>GRMBAe_sh`IsFnsiWw~ z;JHna`<){YQZ}PKB}FW1@stW-D!O4oY`it{(W4>HcH;x3Z_4`=6zGecgP@6;=XG~K z|6Gx@v{{IP$0K0BFVSWhpZZ6zt=Bh-md%PhjMR~ z7rRhTt+Jbf5b0&~f#RxSK>Brq&8N|@w==e_^@;yxk0QbseMFqZe?=HkElta?3Fo<- zPjEe23#pYN7X+(XS4X~^$~iP>mxMfiM!$6%!|ba^S><0fjS4fj2N00+hCOj0jVL)$ zlPFRp52J(>mQ34FplBf*h6~7zF|cKN3}69;ULHbAMj=?ThQ{qp=kTIs>V3rNkzV#Uz$-I$1(@;K^g899U16yi6n zzK8i}ev2l|W1!B!X~Jv=X&GY`GKT-<0D{VW1f?d_AoZ5v6TH3aPtG67nY@&11y>%3 z3XVEI#$fo#Q4RVnp&C*4qr*LsMJ>2K-b+ zcWX&6UWIs6-*}MzHIhq3f?)j`vUXe@za22qSCE+VjF2TUh+_ARTQP-J69h?n&bZfZ zLs%U}8CL5uD3Pb+HT-kQTsWm*bkxzGMo?8`H=_;)KZV-_E4%y<8SRx^juA%|E@FFZ zh2BDS5fL0i@oMg)8!oI&p)>m9o)!RaI;&`BiLKA5k5t~z?Rr^V0xD%uLv>yvpCt@U zB=Vi0*r){6dBUwpXzJ-bVh5xNjs!V0FM(bVlXnbv;sA{y{uE?NtvrI0;@fF;jF=}z zs&W-k)yjUzKXpVY+a!%4(T!f;f?hxp+lldTy?~+h;cUmD^^~L(~SLhj;u&tBVY`TiM+u_WWU8oLO{o-F1y#dMR@;2NCFMaWRcE5a)Rw*Z|HZQj34yV_ zOyBjak&uZDG;YZHbm!=fGp7xGQx;Sfs%`MH4Qs8i!@!cAL=zc|lBf2qOg)oVfH7sG z*qUXJTPE`flyX_>A-5VJNA25el?k9uXaAUZ+r zHMZLe5s@5aMzBj>-ih6Ns>#mwR01<^YuWlDFuO&!7Rf@AecT}8J5oI|=im?3kDNtz zL-f_;jOXdLR@INjOO}*N zm7ZZ_%iCsw|J2$3hjcbSa&JQwLHS->LeSfu{|Gu;tLXjzVx5gT2PN*>57iV%(mXIp z*CScwCn;!hOSZt zSsZ62j~vRgY7Z{zl3~bN(U1(QL|gMM_@z`|)|!?l{JTITD|~=8P1f+7jpQl;hOe0E zD~ZdjOqh=-*C7N1kzlXkw0|*N z9!v}}uwd#4^y5;w?# zZDj-Xc89btE*S3pyoJcBhsO3DhJnLlO@%F*z3Ig^Ex%PL!FJSb={+duDjGM*J@C1S zu{E8J^tgnCvLqHnMLAR7+J~i=kjQJlUv%D|f(DXo;8^w^tf0>uAMy6CP4UxlOqe)z z)i1q{@ERL-LpP?u{_6GX;;KSUG=o+5yIL+eENQ6;E+U2!g@GtD;{)xZ&8vIqn2%C(FK-4p!wbpnk*q97YN~5($W{=oj zrvTA=8XMVdihuJJAXKeFXw1ojW9-{tFykv%%F&52TRt)P1ynCiBT`*8hT^DFf%Yr) zO^+oOt>s8hQ>mps0#h(o+O%IbuTlhZJ#z{WLV93cJ1S&D@`MYsf#h*p1XUr!Dok*G zl7tnZjKCzrYk_c9Mh?p0rrO|ZIlywl2i)m;40$vQ94Y5|KyuW+fMSOAi_N|Lg`;Q< z8La03x7*AkRLckMXb4h|{iN~sx@45XDyV{xLv4fU-uw^KMX}oyXESlP)(Wuf&O9*? z!ZiW|vwH{I$Ta8h>O|y#pd4VUQu4Y()Z3$ z>&Y*vK|n!BF|7p6LER}>Ijaa*XS7A#A%ZfypnD~#T&Tb@M?-n=8h*g~sIm@E3Dc&7HE;~}8jF`sKtYM9 z#$AC!h|3RRXr`W3^rB|o0xIhpbAN(B)TlIJi{t0g{^18kcYm=@ks}F43V77YL+TW3 zgQ|;W&2C`DL#;{ykhXA{B46`PGnvJhDD6K_rNffXN)^tzoc_9(WZ0~lCN zwK5hvZ$w_L;`u3SdN(sOD9Jy8wtoU`{{-6pj{t2o(k-E;{k!h-nL0qHQ~N$51SfOh z5LyS4=-wu*2o()fEK1WhUxsnuVa#qdHCm`9bR783HuiTsn{=3I+q|HO@@l5P zT>V-I6)ub#yNUXT16;wDvg?;Z21NOo0%-)p;rUPAT%CBqmHoA_2`?QYu&jg24afpur&J&$nh1VC$cO8mzNQ8}up~Js zWE8L=11GAR6I6Al74(PNt?p8$eZ zho=pLM*oOY&cpf6@cyjp%8xy^!5e^WA-sGX^fC(0>i0jaocApt*=vM)}|A{02@9fc*lg3+>;g=ASeZ5;>Xfd0hRYbxI>fWf)~`1fvXA*rExNY5ZAN zT6V%U8cmB;pW2j@U9|nB3cxL)K2JpIOeHxibh7-fF*WH@+41@O2l96M)+wZ0KNF|9 zq5-*xEo92HzK|fZpTE%bC+<%=WQh%hR4=j{W7`hyMXl0YrE~bzBh6xHMWn6XPhC}t z;rP+(q*g}zB_)LdzyA~iy_$71w$kOCpNRHlom&jT zOA1PX)|Vk6fdT;@3pkxi;@st{=u<_UuA4ZprO?d&%mf?3h$s z4V6@bL%#yr^ir%sA_6Dob*!!>cx4OgL?tO;ak9{dBsDf(T)(>QbixjC)oo_%e_5M1lvnD)kSCQGCvKy}d3 z3Wbplp}q$jimjGAR%*%69-|xd^PM-0{aM@!AMEy`A=5Z|F+<* zd72$Eg${p4a&kqUjxj+Kb1AMa%7s1ND%Ap#0!-YTA5O(r2Yt-=rHQenu)Wq=hs)Cu!k-#!9OI&F6l1`m0sp*SS_EXk=2_>U2LKs-&GSrP1kB&NRd^?YJ5Q@ewW+!vy$<3|D1I5$zV!|SKZTBoeeXnA8Quy2MP$kGaj70<>=N z2O5aAbxc!8M&0uLl;f#Sq~Am2(M=c93Bg>|=mIZTldx4x7>#&6XG#Th^fF=Neii^ zI#G1oltW3IZ}XBX;*>B->QY!W-tyUT2Oj!(mMY8;M3vJXf9&%us?;ftV(5TJ#D4w6 zZ1SXX{wqjlc_7jF7Dh%v!~HOJd+7!}h33m@e@c;6GH#$gs1x6kb3RFSh{;A#^2EbIsoL7J(dj)xivoEnFQf|!c6=37+9 z#&r)oYuvS_ZNv2#9vc)EnoW?Xi-;&iEIlR=nCee7(kw@e_&EFO7=xhqLnW?pLDkwk ziG!bzbHn?Q&8|=-lf(WHf=wqL7h>koHpL;7c%_{{eXxyJD1;$Sjg zkN{ygJ2H9JEq*%C<#3he1kpEdG{!&DB*1AhE`RYKqo4tI%6M&ldd#x-U`=2^bt(9M z*^q>#O`cwyaLY+(s+$<6l=&&nLYySykr%MeI?9pl7BG)yPnT>1cj$zPN=?K zl(|u@Xf(`$o2z0ZY$4IG#_~CoMVBZViBq;gw{ycElSXkhR>c-kr!MWvV+h*iiY2TL zB2eW-oJe~4x8u8t+Db{RBfH3gYKF(uAs*V=cvBcu0X z#|$Wnc_M2)izoSD?%b^^4bfY+F_BkSzQ5}8fEtByVP>gD3jJgJt4qBgt+ALM?UMyD&0{n`?cG@QVt0t}!b%_(nu7 z2@<720Bu?#W|CJPX|AqAC|Yi-QKKv*A~5I?G?g)=s|vn~0|$r9BJGXa+nXjpP+FuB zN8F#Q8va!v(gZkD`cjE2-Dzf}-!bC%x?bxNw{R|w)}r#9n${(KXr9bw)!Trjy&FCU z!>0j*iXkCgk2jY1#zfn|bXyF)7Rx9zhS7>1OHd|SZAP1JFcBhJ18a!vZUybPqu+6| zGAI0JfoNeEXS(A`^PfQ5|D`}1q_0&N|H53u43sdFIDADwgu$s&N)0G%5ayC;HwF&x zj!}7Ajahbru%P%*KE7bUw|-=H_@W#)2ZiL`Mqyvmv)o(-E8p5ML&_8$jdG!~jr;;4 z6MhmLEo)?BT4VI3;=t6Phab1T)xRBr;Q6m!zl9Nj8pIFu>*OY9645S*%FYoE_AFe1NLpOdd^ep?^#jG3i`X#- zX_F{+Kz>nZtoE;wI9tk9MbNFRj&uA|@v^!JJmY_$NrU>hTb;+~yK{Ie6sC1cQetx>IkKuhJ+c@5~YxZeES zlu7r=iY?{^A%+KrQesL^e=~rg6f{s^rYz@R?pLrfodfz(NH6}14mEg34;E$wVipVX z@fxXn;p4E944TYQq>RrmEPRwN;WLA>=7P77lR(+1fG{eT3911BdgSF?zt=D2h5Bs@ z5^GA3W#iLmpAk~PFR8jx7m84C3QIN%77SJRGllr2%NWP@&9g@x#K15`C z_$oe4rJwb)Efg>yMe~}k3M?Hn!r9?f-ia6w7bikOD?1XvSubs6epPGq;lqfij>a5U zg1qnpuWZEtn)z#_2azT=l1{njx!n*>@@vt`G(P^UL3Hdx<+P!Npf##!_&{k_Ix!cK zGL+Uu#3)?eaT;TYpcT*}6$~mU_^!{kF?wa--KCOUYT}#UzHz>YVUcc%(Wnsm#!-0S zh#lw>C&yk74n8!;a#-H3_6H|;E2hTf>Pgn3iVs@?_xVg#{Dx4+jQKZX5~IDq`GRr2 zc<1i7r;ftJ-9J^o@qhl#vO@UHzM6%Y$>-hL@o-9To?$hqo4!O*AjC5HhOq#cb%sn}^f=c2@Vp<6`>eaPc*=M5Q4rtKn>Pw*EaF6AL{wqtY{Qh5BVo1=OK{&6o z_d|VoYL}<`e0QKn2Ht$n);frW@{jwQP8~2dh%<{9LgvAI!&l9a%jqkJEuOk7hfb3U z+!xJ`ov81%zr1%>1eobk5~=rx-u?ns96fHM-l{*8mOhF$KR!IK?)19W1eW>vkhEJ0 z=^sB4v1z+2^fx)>-q8PKh;;#vl#X14t#&kK=xKrz^*pgV{( zoDJ_PN=Te5r<}{LEx(MJI5Cpq8%E^`2bc?|W^l8_=$6*7L667+r-w66EZ`_Ziy(&) z@29wwgBOf3gY8v-$ZWDd{A1(~UeM<>`BboeLEJpKf@G8b>`$UA`tT~uHUO64&C8+VC#~yx8^K@NxtH$AtNW9=rqmz3V)V^#5foSXDUCLy{g;Xi zk!c$Tx|xbLeTr2VtcG%0Bvk&sf;vBf7z@{i@Lk?0tewUVCGT9$(yE+4T8IQ9riLy~ zW{3W@IhR%QO2W@^2w~dpqQ41aY7w=u(!%nn^XEy&HMKh21S~HZY6aM6A^3N_dY0^i z5=Hhtr-^^fDVy}`S`I$b8vV7bWT|;v>FBCl=$BGnaP?uX?pL17qaIuJhoUh$zj}d# zr8Wd`?D@DeMnMco?(dlefEQbdKXcW*yxIeA#I|}5U#m$(CQ(8i8m|r1`KPh^kIJ2~ zUV(g-{Bk&WXbEtV5KS|V7&)loF?XJJ6?d)yZL!uh(M%#g zg(zn>vcdsKa~}8k_7%gXK3!iBTR|phK*m}5b*Kk`XL3%XX^n=me#D3hGilVMBcvG44?2o9k`P{}~t$bQ8J>z9aK!-K`G~*RL7MKII$Z z^x#{l4hl5ESxSAhEycpi#<>HXsn*>k^&$yO5}oDeSI#TjTDA5I=As=CdT0$ zwWd)q@U5idOh6r}uw3)&NwFhS)RJB$>{%;e zUs2&jj<9<@_gNb6TZtZZsX%m|*{UnWJ-m`8gwv+WSm^D{&KxBg?*osxa8&Nsxw}@6 z2&qk9m;uk*2ggkSWXo*569eI#rOFGR@p8P^Q03ye(^O@8CfbW1GeD@A9)5au8u3Lx z!Vn+?91yv@tCyeKMFb99D^3Xw0#2X{m@&MlRF9*pSMP$SNT~5y=Wc>ZD9hK05YQ!e z?ZM;c_2|Fd9NV= zr{lcr9TE`2N#(_&$lN@pvo0VtfU~ihi#mgbPK>77yinq>WTQ%^;VT5=A`B;6 z$t=OylT<(FX0gYiTQ46(?(k2J8evw3=@v!L<61qaV;p*OgAXSVAuds=bl87dBck@MkH1kdf>JVb=m(F``q$w= z#Up>2CO956vNQGB(PFwAEw6={khlbu!NTozBfpbnz+i5>aS|LIThRkjyZL(aH+BM; z1pqwL+``2I7of_C3spz8B%T=HbX2U*fjtglZ@pXidr!U%ZrjWZQ!zggl>$9}YA`YQ z%7IdK6XiIxE>(0QijSp!$puHGh=*Q`>8xQClzms8glRnPv?Ik+UW?83j$(fX$VBG_ zUS<;!W`uVo7Coj?G1>WGH(P1-nL^qyq}Q6$QN0Qe+r;ZO+T?0|`@tX&xx81m(vKdK zxvt-@8c2Pm04IvwAbQ6a(qr*t*MQ!6%<((jSoV*9mHdTt{WQL$W=^eoj{Fnd{)$55 z$ctenruxmZpaut@g_Gj#A+mQFLa`O}0{&gsU1c?gYtjstMr5pd>={LP%*=)a6)^KU zU3${XIv8?Q8Y!@@eIlRtF5C!l$&0u&$}vmo)(8{DN+|dIQq`bU@7oK7IZrR@l1dnW z>{~UP=LPEYk8iUW+Xc9z_r3>z32w z<1*9u(0Eq%Nn{gXiXKptR01kV=aQ7xG2BDSoNd6l3%}9byVgcpXm;Q5-uIU@mOW`} zPl8uyBysUNimQ-;!`=CMDv7+jUY)2S4{VlV4gb>Wkskw{w-p;<`G8=SR^bv$v8L$) z1z*3{FO^(Bdkk}w*!{&M?W>qe%vT^Cl6K8I7Vm%?1VVp}M$7l9!U;59z0^EdYIV^XDyzYXr zZjIkGC7mS;;tz5y@nqR!u%u-))b)!oPWIoR(djCP(*{V?ZDm_`cC)sY`Vk_hLpWVv9R{$4axdX5g8OR*IRLZ$Oi zA|2)x`ivTxg8hZi>ma!G0b)q+$FemBre|`C)WQ!m;p)BY%yacFTC?wxxYm{PM@Nm! z)M@nO>}a?Ni0a~h%4(OY6m@X@4wPK%9JQUMzU=I9*)46EJ~7@7h(dO_O_zbPPG%M| zHUVydcb3JsZOzfG5lR~fQS-avb=6;A~InCRHpxqH@`c5Bv0g4mZ)WruLlnV4zQjFlm2hgCvl z13Huz*Xpgx4zrz#_{lBs+op~GfEJc|Zc^eOvK8=Vn@S)3CXJ^%_xw#v028RR2;+aG zE#&mOu|(z}J(R6Ff(osmQlAId3b@?VDOzd~Q;T24AH2V0qEGUHY2=;2O=9*%NEFl1 zW~DD?jIe8DPGF2Lk3VfMZ-41nsX~MsFj3nk}yo%49B?et|uf9#e;_R zU{ypvuck$=e%Vr1D7eyf#yUW^56@_Cj`ZG;L^SZXn3~Ve4qA8M=q+sv&$PmRtMo2c z>Ed{>?uV4yGF`y~E`IiE^<@v>5sp1$CPA{L?45H7e)*)h;mrsQr6{}8mBUGZIC1hw zaMh`%vrMQOv)?<@ilFOP{kpgF<&784oM)G}Q~G)0&AL5ElXeeX zk}Z&J%^tZ)dw@ah>J2}!>~Wu(|GY+WvNF7~%`j#Z%x*D*a5O*D>ZzuBzY?I_cK6qS z;(EvNrP<^WKlBG7v)jiNg9PLx0)dVie7o|8G|d|W@US=oxAXPN~cxK-F<5Uv;A79sa~(~=gzZiZC~XlDSA^ukivZb zdt|9C_qiifq;hyieZv6+*zCGcJPs?6QA}{od!Z=cK~Tl9@7=Zv{21gfsoM9Jc>ehG z0q$Ne!nyog32wRw7(ddUHpf0p-#Ko&Qa;9CFHAZ=4=vmxsL#3#pwr=F;u|9qk zy^Bi?gEd+@kc@V6~CVi;lRYKD^lE z!vgHosjpAu1sFtVv3vvlGzrYq50R3op%^sW>?Fg%<8cu;9Q|zs6Zbtuc{G0pQij?@ zkt*&x0b@zLR6&Q?Uk4QdEA<Cy+igZzFQX;)2;ySkN_$BjsDK_Jnz@m`o2HD|IS*h%|T6T+}ZN0>DZ0c8$0!ZV2qsaD}5$dQ@bHr6ur?Rv-mFoP)9 zJIs7nPQ{$N-EIC@?9?q3TeuX{8fr4bx-zIc^pqddy{U2D>lNkN(DD*jY1~+w2DKM^ zvFqSlb~MExZ;785^PhJHKf@TF)z zV^k6h27!*%6cdCsYUn4<9D3_HNZ-J>s#F0{zw=qg&X3!SBw%d6!kY=E>TKui##B z$egobqqskW`^C00Q>>o)OvI&iF3q;ME=Ey7illpI#$wbIQ?Y{6dQN0N4@aqIk8k@~ zCgX=kRDS>#Sc?O)U|AX*WC*DvxcCJb5$^AqRO5oDyE@c92E#8`xZCxy9XkjFW0AS@D+Vf{&zLo2f zmfP@<9FfYxQ_g$1EUxA%j6_TS<6hY@;Tz$C)V4<99Tli+1-q6CG|yU~NfCO)KdYal zZuETp^i~Mb7iXI)#O58ZIq7Js^_-9@>L zG@sNzX-4dj3C$hu?hSpNRe2`io8|{AWTHkKLO$U^X&X(2m@++nQH?g>Revp`{_j6a z9p>tg4+qyQzGpT*`JTqL!#!}SnU#KsQMwTyChV15aZ358-}U%+PvvN4_KxN!UQ-^V zU)x#PSj&muR$W9P`L1BjsB`A&@lH)!CyFUxLtol+jc~yj$iBU_m79LZz=<%ElPMJ^ zudD?>zgBc`E8)ef;)BhIL8isNWd48xN9DRYA7S*->9%hbwvPY&;AonC^hRn9`x7zg zQq$A2=?@>zZ}pM4Nap*KmB9|_Z@s^#L`d7+K5IsO?zZ-KseEVvG3vqe^P+CQs#(#_ z>#Z+!wqW>FGIy{YXoNzfo7MV>$~(z^$P3-id=N&h)IXLii!_DYTw5+iS}d=<91K3S z|LkF^6xP}5w9wye$vz7v3VsT5+a{os*Zp&}+62?w4fk{k1XmHSohXy}hT5{986jC* z3HsZ;H5LzZx&=(pnOkF)fPn#lYNQ9+j=>F#v1ChlrSPAhfP+VtPMs~-;F;h?OKPwe7jVmgd$W0~EpPMKg@+SuC=7_LN9vU5=Guz;5E2u zoN=dRX0{%QJE7DXwFbEsoW#%mZ&XI1Ah_^vJR>fT^hnVOD&i%LO%QvqOTL**r4MZr(Xpu$*Kt+>~q2 zJs;XOai^Pa?O6Ni9i_ZFI{ovGyr_6;RIV|VpfGA3*(fUh7!dtpP0^OKO((giOqYmo z-ObjlKVdA3QXULgWW|93ZuF-`UF(%+ske&%U~ofcmhR|d%wm?MTSi+X#IrUAb3bN8 z2X}kU->Spsu3nGijO6(Rv>8R^ySv3MK%MoJHk&v51?(a^H;m_+E%bVIw{)3`an?_` z4<)`jX2hdUSk}Fc3IUyw+z%AlmxEh>QG3*wn*##mvNzU?;_w#^dJpwo+dy^0>~Q-i zkL*BUZZcrR9m4gwCLMpdhQULB=$jhsZUltZ$KvheXcU8SJ9`?YR%^W4z8 z@h(1lZ-Cx8#S2XtQ;xs%{}c0(@XBhuGv@_KLtNGtazAmjo}drOQ%@f$8t#)^S0@ z1NTI;{IEQT5cTua2_`Kzvu;h!@2?4SFOk#viBpH&or(;pBWHzNrX~-CoN@69MpLpv z1+bQx^@3VS7fMoYDU8uLN8REzr0E@E1k^E;=OYiL(QaMlsh^#mc9b?~Au@t*Uk>VP z(x)4|{If!gH$DxEbMcZNHNce%*^X|^iWkoOrK|p2niAfD)!+a3MY#h5npdv%{Y0MV zsDFWJvItB&WObE-l5HM-_B}y=U!{ndkcv)H(W~i`(gn>C23y+&_K33}tkU^Ey>Zpy zTpzV}&xIOUA6r2sNtC(Dx!n~;`@jGBVk1-jxZJ~%0Iijr>_!t;Dlxy~FnwI6)m$Yo z@I+3`C+>Y}&57fNw{IGZn@Dx8oeR9PU(5|1rC)ru2hJQjb4g&xVDQYXV#fIXk4+3K zVH#Zy^XjjVIByAi`X@%!q+EvA8Em|WYSq~Sn^5KU!&W=MA|iu_f>V^o^FxQw#!8y= zcjd17mX|a4haM{u6FfwYd!{k{dHC$|(5{O;sA|y*Q%y>ar{5lzak4pdDOu#TL+o*QUUB%PB5{+s zAr*sjl!ya<#2TPiDnmldyp0qPi+;X8R7UBnv%&st|3=?q_UYm#%TcP+onFK>Td368 zNoQRzg%8%EkCWq8PzU8dQbqLQezxv?g+IPn>_d`;fcrWG3e!KI^9lKo)lhLwx$xON z1;X%E^C6iB6CxVV3IpN%Tp;=bp!?%jIAs#H4h=jtKhi}iPtW-`Q$jlB~%2d$*W2K5?8%szN99w=Xqw{<`$M}>US zimt2kel;ZSi``5qxO`A}^z+AuV>RETdw8$XiA)+(5+A+lB@2; zIp$Mlb0Rzb)rT{ZNCNiEp=`pyEw8jfH|VbJ7lcdsuK=k}Nhg zWje@w_jis%nTNNaD)!KIZ_A%0f(o}Uw{vp~?8bXuHr-kmdFh^KU#K1=wEVm*(C*Oc zp^^6esD$EC1JA!={OhhLez2B#aZ=^pDJ#hzX(iW&G*S&e6fEBlV@m8<$j&3XzW;b; z4F2-&!)bu=2xHAuLn#+<7`+E9APPaEGu zlCOxmev?rb+mG=^Z!|XTXmY>ts$MhuuU1R6eW-m?MV*5}yIOsI{U@`s$5S6!!d)>A z8G)yF-ZnvbEstlJuDgZ}jV@s93txGQTs&t3Zr!E_-a5GF^E;)!pi52*)<+#w^N#_mH-}BM5`&!N zS=h$j$LPgHz{JCmfP?=o_;5NV)OKRCwa2zZyXKMo~}(zS#atA0w4 z6cbz%4zNG!t{9pFDm?%(7sQ?FUZi8@Jxu36*w&aIDHl!p^L%?rmp(X;&ChGoHH<$FtYgvJrLB$gd%Y$QA;Ec`br) zN6Jc!mlJgCkJN~?IoRxIuzuzDr_l|Y{g?~FY6Et%O|@zXh$Qr$Es`t|sTaOIbNWV7 zXk|^5sjcM@Lwi{O2>~jjH(054-bOXFCG5SY`eyMXtKTeZ$`DC@#-Yu2!;@TMyrUA! zWgE!WTkuc#vKS8O?J|(a_|BoJBzn)nWW)9zX~UfBqvsU*Uu&5ci(hg?3jWa*S)3%y zIv_5@>e-?b>Cfmyttd#;mkfm;e`H@e;<<~T6-49dzZd_`!N2R^-)G_9{ovm{@!xad z-{bKACrA(_b%e)kL_rxF0|SjD8AGGU(Den`B;H0NO4~3Zd`-_8Dpi@qqi!tbq}CRL zlX&<67ZQ6F*fv6OD6|Z+3y%$O8)5CCJd%i_2QA&Djf=r#WI(Np$RcfL2T`!?9Du8h zV!J_(xI{3ghp$5xkGO0WlekoEK@cy-w$k~0hKp5pFvkeSMI`}SE>T-}5^r03UX=<* z8R_clL;YW1*sJ~DgFZUcLc%=K1^sSvSd>N-dy23uOXL#pC}6wt^Bl?2GKtvf*2ZNL z7V)8IQeJ+iEwMF%!GG(pqH0uF#5RRSo`va#2Zn4jDP$Uq4#onI?R6F`9J;-pD~gKX zt*x(67`7f@*N#$du?TUWBu@_#xQ@G+dzPckV}HqOC?1x69BDuA^my>cx2M1=T%R3d zLTXppZX@=fILu0HzD#RGNKimX(Dr<4Mx{$`6n|bexBC_mvcw@JFmTd^yd!zBwnwMsI$Y14K% z#~ltic9r}1maGqN$+M~d#l-sN6a|s0ZEETQ1(@r@u=CpkSSKcLx8b+ye58&pn>tWy z3wF*&3W`LErBMceza69uBYD&73$*;!@QpQbQ~28JjLLja6gL9}gRGa@ntD_crSm{g z9(B2qmd~3OhZ#T*i!xx8JFAtUirZ))V4bjtc(_R*ak&F7WI;t*zL73}4togd*M5$} z^tjZmvB1aNXinVOWvL<3yr*FhSCV+|t;a zerF+9gs2N!nTbYQMmfF#Oej#uaSM9Eoed8UEH9|Gqf$C!4n`H&a)DR`2f6Y>#nJU^ zC_%&MSd!bY_fScsJR6sTt1M?|>@qfQY_q=pUPBgg>B<44FXuK-+J0rp86jiwyk3GI z0*tkAMjCSG76>`A(?~A;YXZcR_x>A>zI90mM8Yjj7;o20)-RX&FfE&H@O(4IpFe{k2d#CxrULh`~hE6y1k>K^5Dk8=+yq z^lC7-Ayt2$qMy4Bw!pkC=+-f~Ylp`ye=60m%NT=>x9MCh6Q<|+Pwv-+-mFsEj7;{AYCE%VdG)f;`azvne@E|VssRSMC@Gdl6ubME!P2)VYe_;HCY;;e zTy*KTa0bB+r}2CPzF^i+UO(~y^L8zsch5hTi+JSTz%Q7`My3>8%u|*gy)t0%I9(BL zeFV^u3{h_Jwn9baH4{?XBZKI9u1?$5xQ{Hkv~~Q)l$f0X{8#Jk3&a>KbHpNv4o1Cwr1=oLNb0bk6Q3jiXT9Kn$_i2h6xy^WF- zv68;~G3wU)8Y1NT#$4+)mfv(LP7;IW96mrI>5Cw*&xPdu)oLz`ygWv1+p0Y?O$*`; zHMlRn;4*~Hi7lTWLH!|ubgm}G+1FuIXJByN>dg8kMMO1zZOEZ-H{vaD0b0rRpms5T zS*tTJqd5tmgU%ldgG=K+vR+3~tDi2nMjg4qT0M8~&_r$Ar1J+kM2utRImcl{OyFyQ z=Fu2hmXvx^C4X*QLg;M=1UhaQHOlPUe8kgEWSkDE;tjs#pwIXJm!TT5m(hkg%#$%%v?#&?W`H* z+8As8`}gk*{5u2x&cOfI8IZMNG@`haoXVs`K6qQ$8bcJC0%7YY9#$2`j?78~xBw52 zl5RB!mj;7)Fr(bu*3?B3u(mc!bB4F+>PBue7f{mN4SwES7Kua6pqoP$$Kq`j=^rg2 zQ}8Gjf^>d!Q`WgPq8c*Yp5ugZAE5)B4FJTOhi%P*VeDm6uDbu$Jk7Gd-@?VQNI`{` zA?`?0B}HxT61k}Z)QV4JZqo2z7^@wDGeWWvjWA%t#kvm#=fk~t41`TV3^vjdQTm4Q zmB`4AIsCkbr88%BeH~9iV}O9Qxs`Pao=>_@lk$PB^+g)6w!OZns}!}tnOz*i&sX*n zzrC_$TgoC2X>l|iCo4}mc7VeA@q=oHth$><{u{tHJ~|2Q4d*;T+>s^D&PL}TC9-+F z0Bi-2STVX?1#2~dM6q!mmPyWr`g*$hdR#^~2B&I);^B=T+_`mv2zV*25qMJxnFjHQP4utvFS_&EX++NrX9w(kTt7R?ZxaS zN7Gy4u{?H+9-f=RN(HvRCxp!H!E*wTfIE|BF;xY3M$oHAJNj!&(n$r$oyVYVCb zWZ1*XQkpB$)M*vgzBoWKjASg$swRR~|KDKFF+S!T`QI_84*vgyInUiw>-gF659WNo z!N;7eso$9MZj+4s%7BYJfV!N3fYGZ#>`4T?0>U~MgxN<%Elxk+J+8l~^h#A*lJh*q z@vC;sT+aKc^m*?H?YymgT}E_wNEi~v$DytIx@_KNQb1Xw1!tVT$gzyEn#%_g9?rm` zKdAwTc(VPL3rk2LDr2}j(!+72iq%*=d!V$Pl5dcLHn!Pn4B=x_{YzR zn9R-Gu6Yf#%yi&!_~5Vld5{NP7bDwi6}y?+`JbzJtR1AE94G^4Y~~ib(RGus;s13# zaSSd=4txmldyP#9|C8Q)5v1KU-?hna&Y2FhZiplYT0{Keu;$_A^yaf5?auk4%?T-Z z4mmIo;+Kf+qc`hP~HfGrQtX=LYWM|{^eoGF)Ca~27&jX~m#iPV!c)*EfeckoJ*0;<|Dm$K-S zGqo44baW*;&h^*aLUTlyZID{r&X^5WK)#K-PkgzhAm8Wpd$h{uv|-Ro3Y%r7Az%>r>P&5A=#BwN>mP-gzt zie~9^=S!p#KOLfQi<9WUTh8mpaR?t`ta5MrJuA*VVmN#M)TG=xxn-f;h<|_og1`+; zr!PP*a*O&)#)0!@S7v4zis;`qr~cuE+q}r*2R7lGxg}dyc=LQMhi)VuzW4e&r9v_L z;AB$KPU*M?1~M;Wcc!~x7{PB8qFa|v$&9>voe=-_254Fat{X$#u=_UE;v7$YgF~-dlHQr zw4CYTmlFmppOE1}R!fq4K}*#tzsn-O%QU~sEVeLy|2tFhv`a}U=bp(_3vEyLyvwOW^X+0%TNIt+v^>l^ zpL$8P8&FK9w{F1;zHtBxzFG#*!uO^Er8eJL&_6gij9RG9mrZ$(S2en)?|ulsKpYdW zSopwFdC?>$y)n1m*$oH3>}#m3*?V7j^+I&K<__<_BWWLSGwZ50$)H`pNq5ae? z_w;6nQPK)s+qDkp_?&{#)E{`eJn@nYmSk4fP~cp4jnW&Z=#_Jia3_T|@bwE7CON_g zF~YA(DBm_7aEhne2jLzd#PId^5O1C0DfT|UuDpHl6`diEF;HD8` zr25SYw~6}Kh;)CP8r8llIbmwN?Lw3>6>eko=Mm{{xSAFF^f~3&5}y4!KStWHLkHG{HX)+`S|8#e`}K|mHwJ8|nAHUf&NOZK zrDPK~Y5eD7jwrhJ41ds08SoQ6VKRnXR%SHu- z8H|{2Y-deJNr+2e<2H&b?$OMChMA%L3QZaBm*ZrFc3<2H8I9`sV^v04b{QDdUjb4@ zn7~h0u{l_%zat8;-24NNziO>PqstQROd6K%qM4P1JVg6wbUqZft*#E8 z8Y7?SbbAwvXaNN&y5%#~b@=`Oyv_A7|B>&-aIf8?W@{+jwN^gLe9J~{8>s)RbfcNK?k^MD5LCejW3bsB&?^R}_ zQI~%R&grY(YrUHr>RRk2wu{x@_ud|$7)lg;x_5QaFmJvbLQ3bw%bo#+YR-G8< z;`em<*13AQ+;%}$K%A!|H>IT?Np#Ic(sipttjPl8z$Rn8_}6Nh(#GR0=P3=>462&U z#;MOO9~yLU?aCO_&Y@$|-ARnv{grVlKJyV9wci`C%NuP&Sg&A%mzVttPjax_uKm-< zFDJnzQy7GxPo}}j%WkNbVI;}%68Lfy>B2G7q_qg7+ZA^olOCwC<~~fXRx;RM? z2h$G^h~Q9%>9Q6?!z~#MbgWN5T166wpTm6Jghy$9>J%RZ0>#lA+c40y#EMYMw-Ti& zZ3hcz{_Ney7vDhzyeb;jUa7|4E)IH4*~1HDoV z+mwrbX15$|1Zw4p+y^&{?hQE$RMHs09N#V8@3)f!OUV6iTfVPk+RFUI9_o|5UpAak zQ`9HSWSgh0hDO1y;i@U!1Ydfv;)6KVFUvBy` z>1v@kN@PDd+q1mh=Q)@r8d7mLec5=ftrT@b6eV#abi9%vRlC9Sr8b8ilu>y} z%*~9#*sOX4IM(sd-KoH<>@7z5&VaFPlA0M7kfTd#oKva*{1yLUo4ZM(UAPDy4!7fT zMmewP>?Ke50!Ok@)ToG}&LIEsDFZEer48iFR7t1c>t!~X`J>G1bhKor0+}p4mcB@D z^Pq7?8^Fp*si&9YpMl}G!*l-*qeg7`IEXsdM{h5&RTr924wz`L`oSQtb0g>!FiZ4X zqg%&KS@0vIT_j#xzJX%0?c*T(pm4&62-H5}X@!1cX5%aaapPyheoMgeNyC#?! zt)J=Dtwxl}7=P(AG|WrzId;p`)<87@HNae!2;Gh5o_syX!*XW6tZ=0~2P(70{pgMQN*G#Is|C>U5ZSdq;q|5e#qIX_nk^T65+T=h>5FpPgL@{6F`1U4 zw2Y(q@W#I%d=A3ikk?w4S)mVb;f%l)(GUvC72j@vpfJ&iv<2`p>-N|H@KzC=X!L-`YfO%~I zFwY-&n1^cdr~*&!&xQC_4ek8Uo$gHQbys(euIjmdaxgt&9F!BZ1}%D}GN0c4 zb;7I@mV0f>L@}pEJvej+g81`khSZ}Es>Jv!DSysLON%D}$VdifadD8`sLB^je7(7t zC-G0;VOX#?tP$A2^=2L|PYNdqG>@uht~iZPt(^k1Od!)IZ4+4X=7{R}pC?i0V;)s7 zYjr_wJPhCct2_?pV6n3q!ZnJ7am6KG|KrmHnv=M>qw<0+KOzUp;`r-a+jU2z=8~@x zawTZJ&1+6Yds7P;LwfY3I|)rSd{gk6a0Hdqlf z;-kZS?&@@>OUkNVeD>!0B5^n{=jdt;2b@WA0aP31AN?c!#2ZGRopcjj_KnawDtN#~ zTsp8d#;KSX??}^9X=bTfvW>20KsHsnHE8n@kOy2NuISaRs0kj*>HrQ#9088-&)p#Y zsgB^E>KrG%4c3zl)Z&vKGWJ2AQTT#4ZxQR-XXO5fb^a7WJmC+Ob@U5f(+^oQ`GzB? zQSx6rR;S1jch%iF)Mm%T9ow|r*35IkBX!~48E75Vlwan!$d@xh?Z z@;MAK#SRzjXgj7SLauZj4i3p{M5Pap1jmg(05F~Nu1;NZq{A+Gm9*LQ<;Xc)uPP1W zf0@Tpkr~G0Dv9|~6`(H|Ch#LX0?bv+Qe6Ax^2o( z8`v5HOd_QEd8{NXFC)-<-~Fwfx{k0*-WNhg(E%}m51ZRT=k!;iYucMHj_ocO3sG+H zuC7-9i2t%sl@xz~UF9h`D#d)-=PHYnV!HB+Z2ncZt89i8)4!VnKYo`;wNE%F!d_ji zN;)9Io?NXG;TQX8RY|J{{=1-5CH*+S-oY=h2iOOftCFht_m-)w2{WF1z!^8;=NYA~ftJjk7w+5G zd(+#eUt5DFnavh9cWL$?6fms_KA#QRRLhkh3WX9mS>5Dz3mKY8;rHs{t|Xf>m-pXW z|GFObV%O6C*{(b7TZ4B7L?6FWIyzx{#bp1J+OUqs{PlrmB? zY`1Jb#a7{|9ZC^I}` zUg=RN>uWE#F3a45OEqYuCei!oMC~to^%QD!Z;?26tNhj3Ih>Paaw%gy> zgM?B`Z3}SW)_z|k`=GIS6Lwbko5LQ8XpRkQ>aOD>Xds>ok8e@(n@yy^E$g;v?M|Dk zkQXY?90*LMhDumA#v_vJ+KAKD2&5&(cfqE2w&_A6IO|)r9>rh_Y<})sHk?^13I5}W zEXorE8MLD_vpUM30(QQgKGnF)P*j`HJ4 z)_31$w5?T+J(QJSN^{G6N#Po1tWVMPDCO~P7Z_s=sD+;*q>ot-oN1ezJer~htosF# z_5i?og~0&qGjRdQd@xDzU%xBXzh zMKU$uzYz9gsfkntFv>hyBN-C0u=7Hb?`AcNE`46`Gk(MbR}v`M?aAvKIORlEl(~LS z|LqtXu6xj1R-xAAk}&s+_jF|Pv29l@GeKK;Nd-bbvl^a(s-^1PeAIhvbMD#f&&BTE zv$E6vQ#`n35-eR3s9RB&-peXAQgE!yH+gIsj|Cue2Cij-_V;v&{3U4jjCzWeDgQbeZ?A#j;1hUua4lGv<;RFU{q1z{+e<(JW}A5k+HS`yJ8gHJNS>e(5$ki6>z5TC8-SpEIDr*cef)+NVz$#! zkGIU@ZC3K#0&>U-WE9`kTm7s4an5WQWsjf|Q&hCFO=kg33}5wDBgU`aYA{KmhBmTx zKUowMLqb=1-9z*!Y4I6YF)Rwou^WMY`z0}SxPjn6cUkQ}`T~g$=9m>ahT9?5NEr&_ zj!Z%ljth4KJcvLR6V}C*B>~l3SptyEg`58$m?YAqBJY3h{KG!s>qmdFPYvE<+M;o= zM}Ws*!GCjB`Ycgklb1vR7I;Z3fRFtnoE$=Itp0n2Nr*B(pv@_M#h&@%wk+k!Ora~T z2K?qI(91UBTMK3ilv=anDkdf?#bU zAsP093Iynb&^H8!5ZoBoGqi-z9ukU&R+{JG+YpEaY9)d}%|{|OsANQYF)MWkWb`Zv z>XiaX_iBvZM)nr=7<9UlYtUgx5Ppdof1Xh{S{D{cn9lQGA2ZECn_O*Qp5vzw2K9r? zKIn&q+t0hz*eSRKX(CtJ)%t3il(SBot2@iM9nWwL&b!hsgG5%SRWuwJJZGFa?kvL? zOhp`EyggO6xv5c>1xc`m+|Tv-imB}@wOnpFfLYkcsYRQLOU}98Y202fXzBXc0Twx) zG2rYJ6Fju}`ErqFMH{!^xpK{DX32T#NoO&MbWOiF<)z2FokxD$7jy40is z?_-f;$_P(l<7Lrdd-XOAuvMokAR}Hf(C>>6^%~G(cYQLX&^{`Z2O9Y`WyzTl6ON7b zttDFXz7AbyX%8a@JnF5Ao7OH|JX)XO5pP)3O#>uhbMe3-ASLt)eYrM?Wn4SKM=Fw=6xs%X~(&r$7FkO!k zJ7V?f^OHU#mjqHmjN0Od#c}V*k-@S>jm7c6Pu{i{BasJ<3a1k9+Dfq^V;xX8A7kRg zajpk^3m0o5Ce{fPn=wOM$HsVLsK8Pzfvwh+AUv-D_Ai_go`SeO3FGQ$ zVf#%*vA+A!_69+eL!arf!S;-{E{DOWKM!>lcH)#9H4?%m7*C%pzryN#gm)R@T@oA) zTOATds2ESYce)HJ*{3TWdn;+5XY-YXB=~W%gwVq)OkP1{xNR}V#~IIiC?o68waE<^ zP5ObaJ?J8q+%4A&-DgQiO}TY^>gbL3Ti##Q`*f{>R#!Xr#RUUfo5N>@zA?R$A=!~K zM>-vk?tK3spWIwm=f2uG_o`rXTd#ubnto)b?4GMgO*A=|^;}_vd0sjLiLiF4>VY_Y z)1MBsx+dJ9cvXh<+CfI;m|q<_Xy$5LZ#Jy|kZj8eyjMV(zOgUa_4Ut3*!0uL^G2Do zg9en?WPVy$*0pwT$4n>ahVgM>I3!P=7=H<`{^?}?_FKBxM|u=PE_OeV zoI9fDaCDiGZ)Z;bc6<*bH*vA8__~C(Rf&aFrb+cuq!4T4+%Hd zx0egQ=gWoP)u00-*PW68!c;dyGLSp`%M=@4EpYQ4ukjQ8FI0VS~gLdT>sy58iu_d~yFSL9{o%4qe9qE^a=Y7*mFYoPklah<7y%2xfG9CFw z^L1>1PA(x(k99Y2W|!cU%a+ab!xoatfajZz${qvvN%ow@)XAj6eQ}Zp6LU4OJev%( z3`)&??jp5Xp?USijeZO4qX&q&3nws`Pafp`fj`+yo0sP$im!R9rQLH&8rt+FNu7VZ zzPZlaXo~h9a}7Gm%om+zZqA48(^ZD_Lp+E4y@ks$WclTRn*jy+_;Ee47r{k;x|$o% zm4Pb9^iww~%aj}U$5Rpoh}8SG4AEaPV$cn(#*Kf+zX1o z>h-7?+mO$=_P8)s(_z^%sl%dx>sG!@FdUCcUbQHvWJ`N4?2lWyIPQ&zp~1?_6G|gP zzkl1B79lKW)e{!^UVd9kF}1~ZSWKl~>&ASDqaI+&ZRJ?5k@>-MlT9+f)>DrQTZpn< zd4Bd_72nM;*$M)CH{LTsveLti^+O2}nLJJSW*oD2%SgvaZ!<<_yUOtG;%EkxFZ3C* zr#j0R8M${KG7EniT<&vDP^7)6~R`myu3!=!i)9-RvyXUTLUUA(pA~o=_!n%gxdRYh&?Kt5?Mjf%BA8EvK7@)&+nWI0-uTMYo{3j+ z;m2)<-&}#2p7Eee;Wh1$L(Ueuumk#G?#iXFl;JBCZ^H ze&e3o#OLVX<;0)W1MhTIbd}Z0^&jgltmHq5RSOQE{ETV6R-rm^p4VgMsv!Ll)6#Pj z44EW`Ssq!8XTo3`#SghY(I^)uH!hro0}}OOmyL-3Nlelg#rL}?5J=Adj_Rl{>Na{2%P&+vSjb_3~yDqSpU0Nig zX`bqU1`NiP%}Mypg^>XeRpRC!mAB%Fi-}{+ZXx*wmJjobTsIGk?~IrfU4&kz`%UMr zHbOyqOrAIJ0BEZQt9(b&z9N(uZK_L6o2C<)G;QunS*; z@Hq&|)H8<5b8EFw&a=dL77P^5mo_&S86H>*Qf|JXZr@orER3-xA|qnwsY)O={PZ)Gq1_Rd@nVJ?rax40v6SxiQc-e(ZfS#Y(z&3%emldH*Dm4176+E`bmAF!|l7)NXM!6;FFf+|v&ACNHt z*f}=yez8`4K!5mN_103G6zvIta`R2;{=Rvg-0f;*Zm!Y}+0#+W&FhtSKC^M15f+&U z9k}sYsPxYobUodXZPfR&VUdqE?aYig_S?h{$=ZvqkHRh(WOfj@e@Sz!r45)AU}Z#- znl!_~bQ^)d3ZS5$3#ZNT{}-Xv9v1|WWr?*p{*=A7@ME|ke7nyu_e5x)I(fP+*{DXyp zdx$g6DsiRpg_G@J&F)8sf~ih5vWuB+PdGzg{Yfc3e7mx^xROsL7tez&13xwzol6fN zlq~+6Z(F{EL~)I*r#63ax_l~cN#f?t${&5MjT5_P-c^#TEXh?QvLn#ts`%a2MSxKn zdZThvc{6o9M1YhR>PtH}@;a-zXgy>43rksGTUS<`0{H(&)UG5U zBgPxI$DJWjOadLnf#Ikk$kTvi=S$WcY(}Q~pCI5+P3(QOFuU0;yO09d%T^L~an+Vn z>+N+Xtn_Ozu0WXg*zDzseVJ2F#Yg$QS~3qF@M*eqcEjvJ{IgTPbm=g}DPNDc|4pcV zt?WzuN2qR)M0iX5k5K(tIi%(vje5NPCS_~ozg25S#mCueftc;z8a1DTqg?#0Q6qn8 z)WYQszlCZq-NaoAtnG<(=g+UtND*yEu|20PyXu5xlrRkkmI&F z>DR@u)XBR&rVStTd=Uyg$7ygL@QBnNl{N35E1ovd7?n%E3IWSBK--~CUE%A@76X}# z9~XPoViSvhvAy%9a}E4pe}}&<6Sd{?M<)CXSQRcs6H@PptF97c;ZWHT*9KmfffrQ; z^j&REYXQUD8@=o*AddHTq55BD(`FV2uPwRcUI`D2WNIhz*K+oOEmAHZ>Xuc*{T10T z4&zJRcZnYtFS#(eOeha}9x)zXc2_o}KEfzq1aGdLyAUHn6v-lTUni9^0n*EzZEt}e zf+MhbUQ%elL>mBy@^E+#y({o*5xSW@#TjKgQ#YuA95BBfYy zEAH;06nA$oPH?wCTPW`CuE7HYhvM$;9^75C>6~l5@5g=YKafu&&v?dt-RF4`*FC3{ z30*iSH$+-L)a+l+W^e!|w$CEaD@tof-19-qF%k{wBB4}IHj@C&YL6q11uSO9hfk6H z-z7k65d(+k`Xso+aNir)-Q?uUPKAL$3dEkwppVTV0*Yh!W)erkbq}=^P?2QHgJNqV&q09sCU3g#xNl%B`C1KCt`az%` zEOX*-pXXe3cigq{%go;we!F~jVE^iszRx1c>BO@T$oEMhndjMy`(M!0kQl)u*-RyQ$~ADWNuq&YFp{Msw1K~jq14G<5a8kUC_q0^C$wXi&ISX!c2agVc z)k78Rm$xU^shnA`zy?~!O@(I*uE&OFq+TMi*Qh|khxI2x00iFqzYsri>{HbWnBesM zuFW+Heg}R2IQwth;`z8LmeD?Sf?T}XKpdayT)1>av*Z=(W^ZvTGrCLs(_kSWFG5r~ z8GLpt`;UK6-RLb`Cuc_6_k>SZt+>-i4ZaXBr#ClnS9AD($_ZO` z*~3Ve!-Ld4*>IA@}9$F_B3mC+ph|(me!KgIyaCPXxUc< z%6(|>Z)Yhq+g7L5L!2J?;@S!gn+5>hZmn#U-Au!w1h>b-=!L6ZYc5!fR9?(|6^ zQ|P|7q~vMNz0)r^EHL4l!OPyM`_=LVwNh`d>9!R~oxxieNKu4rIa$T?4-SDsHQJErlk)&r2FSq7;U zcUp>mW8SoqJ2x|xWm-VkUR1{*xxnmiaFlE|*4!ny6AX8HNbxu2b0;{YMB zT)^As2`AtAhPKBg1LFsof}_#B$IOd6p~MT{^2N-DQFJ5-;mbb5;;%!bb9YjOBmcI^ z-&9OCRX?A4O>LH+JBMeZ4GQMfSLZ17#CBW^LIvpz=MApJB;w`LoY{aRn$Bv=QibkH zHyVkSX$!Lq`ZZ5)`N~UV)73-lfKx|YjL?G=Q4rzHYEX+DPAAs|;lMWrx$pTKBpF71HD7$NaF!cfb+3(n^zyzBp|y z)EWk58k}SDV6pQR9EjY{3ur?eE=j|k!>dy^=}Hw@+IA=dp0kGN;~B2x?yiiM2ZUX9O z4zAP1dYc@BEHzCSiI{ENbSv5>;Y~}=kor}=_1H6Bf}UEzr$d{b@bS}1qi#QROTxfL z%f=w|gZgiw;(wsW=#0;roBeU)33Wfx7P}*l2YH5$c*7xGoKT?-dgbvc;*-YGCWE|z zqK`YKR&JBCM))bM9y9Yj4WOY8vV&sg^&_W-pAB|CO{>47aO>sk_eeu_$YgF$U$wbh zQO0#ROj}!{>sqrCEnXzp&kGk+fvhhb=rYGCpVI=Dfk;==8ws6q0cx8Le;0S8kS0w# z%0h2AexRcz_H<#oEnSLDstty39;3Q5dpjj#MVdq06F&Z?u}vCw7XZGi(|9+!$6H_P zr%iK(UH2pWG_}x`XnivGPJr3H^lXHO^l`=`EYpG{T|IKpUbMj78`?1l@&)>Sx?tIa z4v&9Nl0BXI%q<k8>*~`E=#z5e=)$&*TQoEttYv1J|O8ePwYNU+>4W7`pQTl1Xk{1y$7Ud`=o>2cS3=JUH|W8+djgAVGWj{- zD62PrTJ9tBe%qMV)&37IEw_eJ#DQ&c$y~ru{txwPvn$*BZ*kpMwo5|GPBAqu8I(Fo zGz{0>38l`+(6eVtsj1$Q_qOjhPU}T@E~Uw8HpH| z$16`Ix^tQ_#y-A2JK(d4R!)!Ywl)?-iC0yOz?4Ckj^4H+gL8va9 z)z(W&=V?+tLH{}8eQ0A&5JRMP;Y3IubFRFo5uVbQiY#Q3+{$Ee6Y6S-0xk_&)~)nl z+4^p|IPz%6BahAVxt-Dld6ITcPULljY29}Z_}utXeSGq*P^>v^JVH|qEL(RPC9%?{ zs@}<9tK68uB81$jc8Sd@iDCamtk?K%xsFBdt`3+Oq%od(9j5Z>5tpWYAwF7Bcin#) zpE;A<5<8aM-@SSG)yn5!a5j2$ZCB*rtQ{eg>NOx^^HSnj@^I6esigb z*am}rC+P00(UPgRT-RSIO_g1lZ%stcXR$^v)iuLS6Yyb&Mx;lgLs7Xawgh|ku^aF>||nb+|{j6M53B{t0NE8-Zjx>UdkCUJWgx;J0jcu>`ZL=n@}M6ZziDI`k%UTIw+9+f4y z6F&jOgEfV4)I~*e@yIr@OlP!xZ3K~N3SnW0&Ss(x335wxcAaFNjL_o4vIG0`@qvDd z`jRrDV}P_^6pc;?30-yg#MhXc0jJFfe+Ac_KA-oupgirKLqck(} zodCX)8l8E0teUOf58X}8Ra6Bu+@IVQ(0p*iXKn{RN%qc2n0~axir||3pXD=B9U<)k zZLQyRA@=m52(+={Tz2jO#sD^v0|sL0E%T>hMjgN(gHZ8h2&r@T&f}Tma8cZj6m7WisIeC{>2n@-+IaOJE*8z{bZ#+2nKK!K_PuLy4FN&s9 z2`}WnArKUcH3IVqrBo_mUNQw*T_?jD||rnqbd!IVF`8h#%}gK&_lg&d);)oA8~ z#;sS)xcD66Pxo+>GC`wG(KSv;R2@DYTT({mmpNLF&^=3Dom8XqIbQQ5dZK<%8-xY0 zK0@GN&-r4lXS3KX@#j;#*vZa-nd_B|*mD*{)tE}hQ064GpU$?Z(W!r=H32Jfn$;+I zN_B}@%VF~a>ilBmrMOs9X?#V5Ou`G7hw;HauBbexUH^jy$6)Sd={pwu;SeZ@L%5KJ z6n!ZDMtsOM|IAqFNS(&~!Mv!3ET+)HPxS+Ke75-;gi*a}60{l9w_d^*>g>H~`>~&; z6tQ-vb2rAnVQjeoug*mMNTn}4DQq2G7Q{UWf06G7p4LEwuIqts?>%zR^ft?`HL%>q zzxA|SA13d2i(LOwMsr-~&o)u*tZJq$h^Ucla1Kie0$gm^}MO4W(3O&rzTIscH3pHIcV@{>^<@%x0kH4I3 z$!tfp4W`+OHlE=zfWgx9sza?(_0t;8sAeShgkP>v`w~Uhq-ddv^P-kqm-=HK`Z)y^ z+O!9fm#AVmpE#ASe(a1XDdg52-&Uo_M>mUtlt;A%>06N#7>A2LLX*b8)tA7ijjkrPaYhBB5QaV>WpZF&8!bb~bi8z9oXYZdXBpxBiWD7aFQG>R_ zMXyVKoDn*HTDm{<%EZ0=QOI9mk>9h~59p%fq+lmT=i@S2+Se7hV@;`}iO9i{l|e0t zkg1S#ytm3*nvN!6T<0B3zy8_Dy6F}rXr9ZrQE(oB0;xiL{ne`s)z~VdVUgu}?WT`i z$;{*d_P4!SvGf6Rd&%E6)Ln7?^RCqRAr0@Y%+N^1>uu3I_)91mvhFhYC8gJfkyukO zAQ#O(hj1zeyXDKe0!Ek`lW3|6QSl~+`aK4(3|D{3BJ+4(Nf<-#2_i?5$w3Bg&BqSk z9F(F| zpzy`}qEr>gL#O>yhTEy%RiG!gw-WIQ^Fve728M(YIh(2_p<0Lc*s=bG_zleGIs;Yz zM4a-=iY|iv79e*0W>d*@Th{>^?wCj-2F_7i63-Sa6B*?-34RDt2JrLO^ho8f=(2ri z|06okWZ1<7yE(j{=3%Y+VftGPC%d)_*C#%RWccCnTA`&r={ywp+q#YbxKQzAp zoUK^KH;eR@meIw-=!)?wQS|5~Z(nro-eY__v zntL5J!sJyJpdL4T;XVcnFVKwx)Z=R9S8R92wCJlahb7Exw#-s!Rm>@-$78T7jjoyJ zhNl@fsdY-kU?|k1vzoqzm$xH+!{7K0P^F%(AX zf=!ZwdQRtN&Aa=o13@29A~s{#PQrZ@Xs$y}c)xU5Qvn88;na~+k$d@@S|#sQEDNe% zRAJK28w0W`gPiHl2f%<>i6Y^ipb`f2L4uCA6eD!}gWmONULkRA!m&d8)A=c-wl6OZ z+o|RoW6nM&mM$fuZFd(Qc^8x3T)Z9qs)BsAQj;IlSIwMSS^D;-+^Q!^;Uz))?~gLd zDk_mdgAtDj?@TZ3X24@PC`cGFe?N$us@w%Zl>|P^pn< zS`GFV(Cl&oCBQr0;LooWTy`NyZWjsp`ZucGZgY^6E@iW-OmdEp3ZLt3qniMMAiXzv zDlop*Ovt{$FzzU2@0xuJ6dj+?L^U{0@XYJ8B~lT zYZyw&O_$kk4!3;?D3kh9A*GMwHqKf<*bJOMGjP?b$`oaoxDr>Hat_5$bA*+tzt*MPspJK|cvu7gER){Z{2H ze}Cvpr2s}ODvHTLqgyt3N&<@>y+-w~`NKGsTWTkP_S6siu-cs^srfLqU!oq2AW>EJ zKz*-GO50C86>Jrw?!|02BV;~%N8?2)?~XcIne*?tW45$D4M@>xsc9)0cVX`3{&kA@ zlP&hirqZSKtFgRU0Xmh$aIPAk z-ySSkU6?o;hm!3lcNL07C#HUmD#6%la<|CI=Af+iE@7BmoE-Mddax2zPO^}M(}+X3 z6NGwOu^;>Lo;m{>q)|ZD$S|!?`?d6Elf^u0B%s8CMAR?e&Cv5&jyxOPCE8ZwS7zr! z<0hq7K^mbpJi4-`F0LBWb6?L&rJ%KC)OdhwqiRR;Lb2vMPCI*w>>V$ozv<9a4q`L= zAdw>6V=GHvRQNj}x_b8gT+FWA@>9_l3WMYj3OP21L<+kQ`x4Ku``dZ`^i?A$Wg6^C z>ICHqkabH7w{9X`+*5lp(Wig`GL--n{Kw>UXzoUgwbbWa_YQ_2U=}4S#-5ZPKmnvw5WtAq0_jDHGih5c;0wD<1mFipTHQkmBr5%Xx(x_>ad!N zECFVpOE^#)NEq{aM~)AjZy@27BOy)of?Hh2nLC;IDl0jKagnI{oeimtdo)rZ1iaUW zG03lru3IYUs-s(?<*N}xw^%dXSr}*A6>cALwe`-An*>f!No6k zn-^UK-L8LSY6T&KBD7`ib7E%foBiQ%!vn_Tp~5pW@qi)0#big)w|{KDzR&11`~tqH zZ>(ukCj3RCmOQNYaPJFiP!eH@D;JwLyEJJ@i00L~S0IpS*cY$@3Fv}v+Q_y{NV*O<8X$tf6>82r->$shDi?4gAB?IJ-O zT;Mb-APlv5CC>d=D%4GpQhT&djl*f$2pnK9kg?KOI~2Pgn;bH%7AMibSDrqthsU-Ph47|ic2l@s?_r2H^CSNp$x>(eF2sV2_h`d+?6?Vc(dcPEhvF$8WV~E_)_g5FwtvEjaaQ7 z1_rR}sH?pibet+Qz;_u^r0V4Uwmq8N{{CC4ormyrF(h0#fx*GW|`ki^~X!`)n#9YEUfDW7EyJm5_&kk zr_dFT=}Zfe2{{;jjj)u;i@Y$S8^z!gaI9k)u1aAz2=scxZkmcB$SpGn zWo>a#Ib(~NM1oit!Ds!6;7O>KS<71hlO#Id?PHc+bnN?5?&GflGa(8i*9*;AH>}>Y zJC~||5^Z)9?rcM5L?7ws6>vivA)chwieniG;*%iZNckEZej!4tt-s>=XzPc-_{F^u zwLJMf?zPuuP zVg%+zmx2RNK{5DmP&2FYGjb?fc(cx;-lA~k8Kz7|?2mWp;f>a&Lr{7yMu?xQxD}w! zxsI&ibk<aXawT3%J**To-`W^hA>OO8$LD@t9;nT9Na8CRMrV1z0Mog>i`=%5twAB(54?~lszY;aQz zl{lZBj?Fk}8K?(>FWG2b2mcsnoCsHMm%7fqDrMM{jANL0GbE~y8}6IH_{Gr?B^jQZ zQuKP&TFI>Ymp?0#BWiFGjK}JPdDO^Lq#>a>hhBNY!c4Y1cBktX%~1)cF5I`HWTV{c z3d6*;ZYVxHbn0-GbAe*ocz=GM5d3o2^Dj!V(}8JY!pL1x69CBZr1I?z^KKBi5*stK zOr(syIIY^+y06m0>G!1{6bQ?F0Q-a1BC7G6b&GXRo>+g$!OQx_Atim}Y**>0 z=}pcJZPUD0eO77kluW?+{cTGTUP-S6rWP12E6s&tN_WwkHuX+er4L0ZDt*u*XM6Yz zUnLmDaOklaxsI}s*oBU?P*R=jh<9vk=aYB_-)!0-XG5d(r-HH)umbXkkxm$*M1RzB z2d4)e{p`x8NNXOe)P|xJn%i9AMV*gIhzv;V&F3Ch<+ZOdSEIDNhTo=h1FCO;vL3Vi z>lxjkYz-w_@weGdNX8@EuibcHYsw@>LnR6LF_G^p>D23Vvpm;7f5@n`dYlrie ze%2Uk6nFaaX(X{FK)pKQ>w`KgB2_=3yb7oG=Yf(gp6vMSM(dYBadCNX9=G^qn#6+9 z!oye&n3!IaZ90<=KQWi&V*RqZtgp1dT)ktulZ%Q?(Rf6QAuHDWO7l3DI3@m0U&c^%XuW~7E0uFyu=MN$k1|KGiP*?S>a1&yrS;>QK8?AM zb^JCkB3{iVdGa`afgrlVvhiB@ryEK<#m;um!A3o5aB}1v2Am$|p zGiV7|bCRB}93etDBb3`qw@N0OV%g?u+)<<4S~Eb6*%mm=Fl6R!YGNm7FzZofP2=4N zD{!rWJB5?%&8CKGUS5mIOOz~8DxV!Hlv{B(?OSoM_*9h<)KJe`*-DB)kgRyV6b`?^ z&u@>@l3f4Va4k-fl;#MQN9u4?SGf}dB+-~!WtjrKa?gh{F7q2FpcqeYP`q`P3%Ji? zAT<0t`ftsJiQ82 zwJI^*`cgG)n4}xR7?(|JGoHc!F>b4SZ z^9u0Bd9ezzb-TwUX8I7GwhSef?OQJRHJzlRJhoyi_@>G;iM?WCgBUWY%Kz(&9hI&! z=(_)}b2;md@}(3gIA}v@d;d!yrY%Tkw$SZLRCu{JUBK(rLuah+gK4qU=AtwOQr=X| z5v*845~#CbQG(%^orcY!fb}O87f_Pl9nkxb=2Sx3zh_V0GHH2O9~rr5C>{Bd$~NMy zqbU7S&ooB^ONE>7DArXVp>ERxhtb|`cPP&+p3d{6pvOFz?xz1edjVExg+|w_Q!tC> zU)Rn!4SO>2FL)iobTe9|#R8Of-gkfs_YEOk)%O!9yt+q7vA?Q2TiH{1dn*DmVp{|a z8xj~0z4nsR%ni7JTHLmH+_c$KINCV2<)9$*A?qfE$w*I)1i$%Q-oh-iXlqR8v+lG5 zgA74&b-kYwdB+>^H#$asmp5WC(e4k! z%I+$0sE?A(u<4U**6^`Lq#|SjWp-F;w^Jr7=i0sdw%4jq7nd$qc5-9}@oy@TZ;rGy zMgorN^|21lE~f2py90SWGac3AfSGt|ljcV#++T0UnexIZ> z|1ourgdxeM+K@Aw#X5VD$-RA*MMa(M%ZI9;A&dg3rS+V~htSKo&die-A z1j?XUKVushx_{$&kL2|?4$fZ_3hOsjRUb)q_g9Ujt`+6bJPU2Fv)07um!<|%uIAL_!xAmN06 zZ1ShaL|D9a(l$ym}g0YCc`HG{8>E+>{_%ikN{j;i%_Bz5ZjqRJtlS3R7_a z(<%SgJdn^6V5mvKs>#=y-_M}&)~(+(7rC9lNiIQJ(TOCfN+BxUsjA*j7=1LNP5*L7 z+ZNa`sn{QH+Ps0c4f&*f*JhoiMH>b#NRf;&tS8ev*Ds&p4VS%``5x$mX3!qQ`$`Wt zN@m-PGLk*^In<`sSp&P|alJn(;_)kmParHVrAA@Nl;U_$>g*pl4;YuAN@n zQWVWC+^M3DoGdQFH(*lR0H0>o2fX1h%x~~^9KtAtSh4(?^Y#bZT)KJ!Kf&?i73V*{1GtaDRI;ZPUI|(WE?{1uH0VmW8>6z%_ZNc!6)6FoUl$?qJ|jpSXmnPxr)_ z(VhkmPlw+yRwmR1hkX`P#35cxM(|Ph1f71+j0`G^gc^0Yvl|B8D&5dlVBLvuPIIm* z%j#Ul-m6B6#TiMdA~w7=X-7U2O?dm0%ht5afU5t>^?4iuTK1BnzZ%An) zl+84X7hqE`aE+4VEf-a$T{+*mxobOM8+2&cjZVe(4vYHr#p$EIsCWm5MnDpMi6tj% zOmhgNyjoplMvpygID>XeGDU{vFQucssrExmVS~JK;i_YT4;h3voKRuRiA-tfP>M@E zOS;!32|Wb~gVy35tV6yV-`0;Qe$^}loy$TR@=gq1QzoOcr?37FWDCm6P|8;g?Ir(| zK)3f9!bp&N8JttQ@fFGNbLCIBd(~IMOb?air}dnvYihDvjc!N><=SZFMuuW_4F}lO z{aZI}GboQgU@;qMEcaMttGd!ToyiLiU5?oG^7_=s3$vNJI8K#^ixW|`j*g*~feF`l|>_$7BCOuQ>Bf@jia|;<(PLXQg~p@50#*J-zDt<0(dBIzKU6 zIGnkADoK^vf{`xh)gnQbT5WT+QYJq1T1&IiTIN{2&KzPht&_4RQg%kjDKq-H?)l)# zFdK5HQq0xQ#Wqfzw}l+99r_NYuo>HW)j`HTh^N^S$fq+0y*xhD4c5N@>kwlB8jtSk zxF7EO;{|*p8$oFPdkVk{!;eL35x-OAf2u9Rc7EkBJ9Y^bw;9$PHH`-=k%;ZJThSO# z-2n{@_!k|F?aa*Dd9LwX@ zKe@T@B3N!QMf_lG3d?sJADKT|nFR++qy<0vm^P5;g3RBm-o_v-zMU@v6Dbx-#MBat z$C@LD1SK*3#9fZ3E;_u0Y{aYglMiMeyzbY&=*#js9*5@0lvf6V=?ncR>LN%N685@9 zv}zUJ_fOBe=hgLFU2xDFZb&J_XWYs2(UO-1UWT$aqwZVTw``rq(TyHvs|MN;Bd5|Z# z@A%!{fQHdu%P^gq-a5IxMWMuWY5j0NOSdU?S6B#LOEOOXD)g1ss+cizL!GH0+NZ8V zE;}&?CPY;yIJ^wW0Jcege#kMIZ+l%Vs z->QU(zUw|sr}gSX-2$+@le;V$PW$M1pY|}R6WNi9cF$7vyASeR9`d&PCDGOd*;z>g z(f%XPrf&p)j7UTn6EWb$29;=#czHu0vxL+ZCWFt;v81%h6uA9wrxI$=Ur7#I8Vi0W zuN-%!K5V;dO5)_QSIkO9}ttSJkMMr$_OA=L~~lm8fA7 zJg7acb5&!C1n?#Z8aj{;BcB=yOCBxyFaY&@Y`?@FsB>0?!_0JGAiEpe&;Z;vz)r=L zX?Pu;|7ndsTV>ANp#4pYJ;0^f0hla|>1>E})QxkIE@(vD_ zKLbqXs3iyzy{PU*lt|>53{+~Ead`K3CfK_CFjQuTgMJh|T~b z)0q5rqj;}tY)TdB*|)N2w5=EY*(zXC0%t36lJ{z8kJ40QE5F4&0$o?L zZ)o4?wAJ+0%_qU(rG=CYttN~~)gv~`(6X=rE_!XRHp!YYU~uMAL{{35bTcA!oUeU=TkUhwUNJC`)ajo1 z#3_$yC2>O+GLOg^3@%fnF{Ldn^5p7jULZ;UHaQzo1U8jOCy&cI2fs+|VM-ERvJ_hh%i%dx?;c z%BYo`I3-Mzk?E-zU1wx*zZHo{X2HyQ%UwnI8FI4hU)vN*UJ>6qQ`Yv8)r!9I!~B<} z^H2w!-Qoh4?X}&*TEF>LA#pjI8gaQisdW|rcB5(TwNj4tDAq`&yi`tH2VvK-&hM=a zL!0SZBauTijDs|8wk#6z#7=zb%LF?=L6B0y(o_}~`aI67a!!Hi#7xg#O? zgg?FvS9NLri5?jLkbHmr>&^lU<6w@=agye1{->!?USY$yq^_b0BsN(XO zrA|9`p^>K7fQsc=g(*LXcyx%3)mOqX%6nI(*lBzrKW6q2PgI0lW8i>5Ys-_!<>52E z&|m55*PmPs)A(kWllV)k<&sYXoLytHJDKbBWAM9&7<)XcQa>&k?u*MeeYqC$0UEEAPHnT>}LG9lDClV|hY^NEc_{5FlS&pbehGX%WhFpGZ8TQvPsNFBMm<(BQ{yr=z{lFo`d;Lco z>aukvI1hbCYXw-mzcn7A)CCR;_D|bB5VOvIr0gT;NgEA0PU$-L?7a!N!cI;c>@EkT z4q6}8+V(B|&62)BE$s<;&pWc*YqCS=E9`6RI; zXc5k9Tz_mSYK`?#O@E8)E$3WTfA7@Mh~Qfwt$*4$YZelV(%X|cXIK0jZf@LyqB2Fc zY8(m6EdrWQWroBdq7cqGb-RvNNr-> zdXm+$#779ay(!t8#H88T{`a9P9V;xh;^TqTmuuc|@hyn;=aOp;R)vk2pPU_m?aHGS z7{Sqzk?Qi&G}C?6kXmEDgjR-kE5ssJm!=L~U^b_mKyXz!=MXMt-p_U;pJt=L_{EWh z1TbsKvy?pEvLBl!t)zJgZ8vNxo)_<-$%i2V`%oE$=gmP)Yb|6d^pR#PYD8a+U93cZ z1&_bG%AR5Do9CFO&RM9-Np(Enh?~Edo3-nkdIR$1ZH>sd(j@@XLUj9}ZbbCz{Bvfo z8&Oa@tm~|EE|@NcLf|jqx=kGg?%j9gm~TLr`JL~koO?=E?_5J}i%_jvn3vPb+UOdc zP`x-d3YXl#n%HHJfWgK$*o9YyJGE&p5dw0XJ7_aK1T5%m)cC|>6l}R+7cQ2`66N|c zDapK8a}x;;i?>hnNU7i82X^StpAoYocCVgR0^zgpf2R31LiqVA{G{!Pp!EXo!^7)U z_jrC}px@ejn94@vdn%Xw{4}|`|f|p$G3`{^j8FFk74~DaEYj;Lic8^ZE!{pr#^frnQ)!|{DquZx!Y%O=-B>70W^MIrrzh*(&m zm&;@5(CPuD`W30z^pt5Q!5Q~+t`Y>j6o~4oEYqg4Tud4wl+D*`?QU^a=X2uf^VR%h zuEuDD{78g6-243eDs-{}evNL>#tXXXO!eG8650Y9_;zpZ*w=JDy~ys}A6HvnAl|cs zpr4zJRuS6{UOQ*8vlVui1f9^d7T?+<6#!Zw9A-p*9>0a=5WAyzs4v?N;agJ<*e z)k4cGn9U4u6#~x}HT4dZmg9fu(VGvWhE(o<24-?Rm*^H}r1moF7Eq^WJ)cshFU`)^ z)hbt7&9AOOQgodnTR2krFNPnx?&Q6#PFIi*8kuy>sJnoq&k7$Jz=*h^#rQDY`*dW;`JA2j zMY8i>Haa$z25Y4yB&5;G`(YY&vR&o@9#FjF8-2T-Clvu-8U81kVjDH&b4Li`MOvn# zs2ONKL{!_nIS3KQU9x@)1^*v9To^kj7TDqPb#p* zUH^9Ewa4vopXjN%!;7h)j2y5RFmz0t+v-?P+7Txz8?T|vfX=jXuv_DA$5D+Ala{}EW0zrfNTnM>?I z1Yd}S9tH?VG7-)#G7-<;UwkWnypjm4mL#s#m%Kg_*h+b9%}e7xwwYSia=B4oUUpd` za#<2|$;kzHOPCJa8ULeI1wl{84?=>zUMZYUB-qoBp|RYn>BI+DlWl_+ZAR`waDo?W z*MIbt8kZ;qz8Y(9ZvM>L0zSAaY^tJB-=GEhTBIU)jO%#ynB%t8&X)Sr)36_m1?mQ_ zgqqkP8QHNPfbdzm1a7AaAQIbEb#ku1GG0Uw@$)Oo{}8r+vh(FL9Hva8)0+~Rmm1_P z!{=Te@n`O#p8tS$c8@nNQnTTQ=tdq}J|&3Lc^Zz#30>VoOn??yo@4LId}2#p?1~C? zixi@3`ezcocic30XKFLAN?#M3*;l=Y&+SjrFFiS5A)7+GN%kE~pUn@p+O+Im3>)yn zpD%*5wjCjB_-gwZ-fiYG`y7G&AMg!T*wO2wx31=t#$BN zumgPbzv{IY^zuywir3)!-^F3>mDXG^j@NY`bhp5H6N#bcIa=_ICW$X^@w%V=gz2gS zN21nqWrJ9vNl4#jZm;Q+Z_=i3>OYLO?d<7Uh)ALw?n&J{`~V+dPkODd9_s@u1I#a; zSC&jXL@zIUySaL`Ym{%d$}+r=BY zFK4+g*`+@4ve}nt9I=hU04%6NmAxE601)vAA{$ZM;_hl(-`NvqS**9plh&fh5Q0*01U} z#K_*?m>wwac0`U5ALfa1w|(taTOI}8wjI@fg{O+GEW%aHRQ}^oVTQ|$8WyF_@|26=SE)ffd1SsR>Z^)JbjQN*xcMH5B_iHZX>iU!Q)Hfb>JNjnjg&IEaHHp zY@(HNvj$~Tl|;A4l4BLH3oY#|dM?e7^!7+cN~T%_WcmGpb>Eysbk^@%8BczN6*xV_ z(|~1NR#5FTaf5GMfn5n&;@&{QS(@QXEx{W(2X71ec>psPdgCEt2m{+Kgu}SYg#A$I znU%?GqP%k$l;#XiLUW4CzFlhU!5$v0b}aLDAS`g8RdY}XtWP~!k&EH>m?hvg4-;C9 z?CO?gw+`ax?mbz<=@ry3T;7^a^yos5ct98bWk+F~)^kziD(8S;yu{Eu9Q@sv@S0@S zoeSU16y74X;aCfjFK{5lR?u+?E@V7nBq*dsu~0{Y@o_yQ8ifbGsTgDRF|=`;xky;I z+6a-8JNs)*s9JC>5FpiwP+*Q7akraj>2P&@SG3SGR=J9zp=MC{Gy@KjpJB4i5ZCyX z$Qa8^-|U^?&1R=ys0;J>EBB?#RDaPJGcy1?!&>EbN#L0H(`d~+WN?otar|xRcs#}Ey8LwWPJN%H`$+v4NxoSn%C?0=k)lC}Mu{Rs0R==PD2Pf4 z2%)G*QA9v#7K$QZp(jC6L@)%ziUKNKP*j=s5L|dMJ3h-}%TstyHTS$L zZAD(=zHP4zEf?T6`hLbg%#z^+e6A*roS8gr-=*V%3x56lQFn;n2oL-n!?iZLa5A6TzdNB4%^MUV0af2k5PsY2l>DHVTb_uVU@^pANgR zG>X36w`^0j*u>c8#VJo0N3zfMRlfOl^356AZI7=$XX-A|;?Ug1iQ&nW$k{dW%(7b- zrjn-8;!onG?+3f@6evXG;6E5szin#PMwYs}onB;BX}Z=!KTmvI#7UI5{at#IQdQvO zfmVr|()?!wlWwiD%xyO`uTnNjlL^V4Cu&|MNrxWaqWIc##kU}r8(}q?v-iw87C*fC zwR=P7C!-B#W+myP?FFZ<^i-vfJ--LA8b-Q?fUt1>r6Fzbwf(e;ZJ)Z7AvXy?mj`%= zk6O|CpUXp)-de0D$6rJwQG0r|E>WgY^{9sUHx8-I+nQQlH=j^^ef=@F z>dNx+?oo4{LL@cxPo@!Y!kSytPP|CRkVfAW(W!(wba40atD`DtG# zdPrNPq8OkD3dsHdRi~5q^EoujnmjB*FD?wNSZmuBQyn9q2IZ1p?EF|$m`dMuBXsby z$(YyO>&!_(qVqsg-`ppMkW)+nW9T4+`;p)DMKGIw=r8xD27p`LP_$nI_pWu$H=8Ew}CcU9gD;QDNZl2aONsk_WaeJty z(2tHeAG*|1aJ#%Gn97uqk}V77-S!{-v~XCVK+3-UT~DSaA{P5-!g;W3zsvF&AuAF*jA4r z$bZ_Hu6`$AQx7o#O0o>MJ9utKM$v_9C_Q8=Abkrm;7B_w7wb=aKm!%xA*C z_biBX8^K?;&B~i#2b_Z_+ZOOQ)Ryf#3VWf=Ee|-U{EKmqc*HeYK zZNjf)fQskoM-Bmkz*EeVoUdKvipa3f`bW15`|fNK@N&o;Dt@}${k1+L%Ujzmwf3el z<078hL&p{M4dO#*N9#^;mSs6pfa!LC$4&>n-UP2GEySM|4hT8CMWW`++P{MHpfO(lVMkM)c=M}FXL>#^5(%$@<_i^i*S^P#$3Nn{sUNz|Hm?+a_m5{*bn9Q?D`pEOWU< zMJJxyM|jd-Sfg)=zYbo=JaQ*vt?;JoD!&M~o~+WLv%Ps+6l(^9PVY_&`*z#={|w*^ zg$ui=LS{p|r6m;?F~cA4Y0khKa@(oG-kIUAZ^uGsgDQ@3%iF<|{u|RsM%(~PJeFPq zi~;HW0^CElLKRGAyM^&&Ii2n9J*>g3;O7=pi|7A4DZ6;3d1n@lY;TFP#V5UD4rc3VfX%X>q6TnE_AtA1-eR zp-jFY^LQEL*Ye=iRrpijrMQlfNB3oCfoF;JOc${pFbdt#xU+H z2lNJ6R9jnOFJ@$h*%boL7sWC}=U1{h!lQ#esT7vFX1;xf?vY7 zG%uba>r9h6b^PfuUS_WMqImhY1C@TgM>c&Wl;Ki`Kp4z-CEb~ zQ^^*`vB!S?4?i7h>)iLK!K@y0hqyTKn)J0v&%hUxW^}rNrx-d&?wK!~z zma4eU8ZC8k`!!k{#Dmsop=vc}?l{!O6!*!QUn-uItG!eVftkk^%fV`6uc+JIE53Mn z^`6WHajP9NFUGCjpSd7@wM*v3_}z}X^|E3SzH3xsSNNK$#IE%9REbsaJ*N`8>b5=G z)OV4R>5SDPq$%HO(P~q1>qTo!q1KC(ogZ5H?}S-?6t$MK{zr2?Vt}jyF z>1wW_yi?}vvTLQW4(7iB3RgbSz?l2StN#E-V#AmHr2f5)UxJZ+C1>}>#eQ6F|07tj z4e}di=1hOg+7zD+hh=F!_z`GMUEg`Y<@Nd4XUpwl{uXR9T`3M~L19OK0J}9`_<^DL zr2J0quVB5q*{l64T(eHt#brKvwk6a5dfA#FGjr#7_3vPMr}!N(6eY2negNQW>i3H= z*Va5J_%q;y#d_%Pxc6Hw|4h^T$b8r3I*<@MwcP#(aLV*J1PuN*<#&Gy$Y{xfIrBAE zJ>V~(ZTXJFxh`fti)Wd3dtJ1YbuT}(xnKeNWwVyW&+dVpR(VkIXF#Myyxc$Fpi{A1 zKBu#@FgEuorZqNprK>sNSE&65^%yX%RFwTr!1RSyV=<-CK@jl9+E{A7yrte-_`!v^B@a~6qwm7VfKgPb73&OKZ8CVuFZ41 z`-u=8o8n?_6PufdX}$0(R2*vtPISK=<~CXn)|t1q{t5KRFy%ZL#-+-BelWc?53>IZ z3d5fKtJ&<)8Z$Mw*@62Cg~|8Tr}E(2a{JA{LYJRO+Tk0Yy8@ZLd8`U%*EN%TubZ0a`V=Ye**PFv_*kwu}Im^7gn1IJ z(9&aGsLol5eKon4fcAtuw77>7Z=>8xarnJXJo7q|A5+@dv0T~DA6BCL;L~z@&0nF* zptnrH@mU5dDUEf3*&qKCXufBlEa=mt*xV9KYh0|$pFu65=5yKjkC+|1V!9Ws(7)B0 zpQ*;(z z0K)8I#JHkgaY$7l-EOc_Od|5qN`@lpi$UuxdXy>kz6Dki%3G{AVc^jhlAA;oF(aE1 z1d9=TMZcMls!33VEG7|kX{Cq~O5Cs&F&Smrci-ZiBvedRZ?U1r>d@SzJ`uBOGr|H{ z1XRgSHx&G1f!$!Wn8bxkD`83~Rm0X>pi2LJi;I%bMY4Ju4Lv+Sm3|Sk8Z&~7EMk?C z-LmOd|Hu$~8(TbI=MS%5>nq#brt8Qdzw{h937ol>rg6Iy1sDS;PhH=tGD$4ZB{RgF0SN0r(r!;@Qa(?snRQphiq0I|FY+bFp{ek`HqL0JZEIe1H zwy4`>B(J>ax36`1L9;L6mEQJ3`zi~yEek&nzqc?|NbOWVk#SmK!O1hHGCmL*Pr@sj zM+mRehQdTp%9txsi7<_0<`9CFy^81w^Ed@ql#0WbScqbh zm!#%C>g<8t2#}-l_(w&ZCQx$L}6FcQy6ehJDW-EMFZ5 zzpQLxpLqZ-azz=FAr%PMP{Kemtza6Ws+c%yn2CnNzBtIPqyYzUk2v^kWs|*`9dMDm z%9tFfNw|hF29j$9TQ6#kiL-~9tarE;2ico6uuuG696V3i#4+>0GLZshOukg$G7UEj zq`(TMDS8AG=L|E^bZCu(xF!wk7w?OMzfd-D%IsJsQlX3~l$ue#!F08nH57zv5 zqsqO~Lp$yo3?59J7WoRDO+CHw!;9p`i`$)AMZpVS+{F9h;T&ZX_skA?5xz2pEHx>w zA;dtctY9cnaf`TfFcXvm{0byGX~08#^%c0Ziiv0D0fY!t1ydsxh|o~7fYe&SbVOAx z;x5BXbR70wfy5^bc!_&lfv-?8Ih@&n5Lu;yp-4?4G>k1EZ>(UOM9nSYl3*sA9Ijn~ zBqt3V5x;i@uAyRbH1og;kqs)Cda1w_8g3Sl_g1jYqDL&^Zo^DAJG5T8qQ}a9u)OaI ze4C1icV@>55knPBlhou24V(q!gB46y^nyj)eVB=^1DpWKNE+}FUrm5ptC;v^9#|=| zTLsf16}VC(%>vSD1=ADFw1~@tndmv}BS7+#2K>Z52v@K%J8bNSPs9DL?nux2kfU*p zo09v%dwuJ^i*YU{TO6(tAVo<7C&cd&;Jzv*{+S09L{6w+x}*XXG0aBkda7x@G0e)M> zVmBGck046Cs01c}=K?8Wz|Ddz($8MOXY%nyU>}a_O51eh=+2T8T;;*_M!5 zD|;i+0?W8DSd@{&5h8?^DB1Dkng%_VB z!UZZ-_)IqP_VLHZFmE z3oD4Uwfzp!-Bxi+mqhJw$V`MRyXtjO{7E8QOSS4^X6PCblqv=)ow!Ehm=#3kZMV17 zCwR57<&$;NOy~1@Hcy(CjxSxX3v08iAa{X@Dn?FPPFW+y3IemXHy6EP6{oT!%G^OU z39|O8*JW{365K|$>T+hNvWT531}>ebtdVU6S!QjI5iPKa(_9jTaX69$(YoptFCLf# zKcrd}pUGAh@leGeq~%mJURyy{y!|+otKt{Gc}bLoLuL|0@2VF;{7DkrU$u&m8LA=@ zsES!7ov5Pm%?hGuZEq<$ZWU*+B+Am^OA^HBsuxjwG6{}Vts-W!RYamxF{`EJR5c{5 zA#1Gdtwf>Lab`=RtQ=IYLNHgolEhJ0;RMyHq|8uNkwjIDs&t~N#yV@rT5Ef2(GAvd zyOu;*I~=(RvAOD%EFO3jo~l}voXJ)dxu=RzmzG=W%!N`@2D*mWpH!*F^<~){W*AAY zndOnK-3pc_r>g_k#jCEupQ@VN$h@^yq*xWBCH-Wr#z||)M(dRqN2w5OodFolz#%I(DK`JLAqT@vw7Pbq#Xis`7nt z^=t4&YoqRG`lyR6UR!4-9i{Hfg9=g}P=|)wRRzYo$k`*TFs`0Gxv7=DclJ)WqTe%I z`Xl$2Sze?dpV()U|+Ihl9uMGH!Ys(d~JTi(q)E6jZnYk~O zSjvtkIQ*VKorOk+y4IGR)fT=_?H6v@`TxD>KCX)b_9?-^z21Ljc8gy%fiz&D^`g1G zI~SwB-Y@IZsfW2>T>dw8n|U_K&+{&6(;h8COZ)4*x*+c8))K=#?#P_wZ$>n~v@Y3( zCERsU%_+RPxBkn_*;9V^?~)w$Xk9=3Wd9zm8>gSREN*4Zp7N`{OY+*Ib-ShgeIDlY zdcWhJ?~+dL(Yo8>)R>obn%lXpBy^9vQO=E~k+Gpx(e2m;sV+Nm3h(cYAD(eMV+9IOG)O?TsIuaXjnioJxAUN2{R4 z=~G_TS#Ia{lIMHe59Qow9~m2K6*a(KOm*?fDSWy&etgC;*za~Ksd0~1af?%DURE&I z#h|2PkGp@)jjoX~TI+cOY+b6$shq+Wd*df&97Fs*r;;Z2XjQa0_2y-T#Qm4De*bZ` z{jI;6QLS7Tr;-jwciKbk5XaHu)%m0WN3F27_KJs?!u5XtYbMZ7h2w80NM&JIT<+gj zC%_lx^Xo>rerHP#_Ed+TwWB%}GR7til$ z$-xuu+NH~{7#+}AKFUjZTHU#?G!n& z%lIbOW#+}dI-j!WBj^6k^6@{N>mwiA{R-;L{fjm$enI#Xy8p(aw`UIR<(6t>k+$p^G018Cq=BVh>om-w zr#W=x>;pF#zZ;@VCMh3Gk^d}Y|Hj?^VR-+@*J673zAh!j@d)u*>`+E0Hc2dlxTGT`I?mMF1*Pl8B9hy_9l?e?>r5-4+wNDm zHmmr~Fa8DJUlq!z%z<1xHB@JYEV5t#LwjjsLWj`^amzDe-mFeF;$^Hw$`9*C$Ow}tcm7(!s zKE1w=9r{2z>yua5V8)xcUxM|STNi)3Z2un#&VRCS%|~E90`n30*G3?kFy)o8>clm9 zg|8kN6&>o53Sa9iwsoj$Z+Ta35%$!6@s@Y-gi}xK-A-Ja7oCs5d<5nrFdu>W2+T)d zJ_7$2BftUD?aRBK>KZLsA8G7le`)pY5y6C@4!OkY7(&-dC~{))RzJPx9veRQyzaSy za3+OxZaI!!(dBbYwSIk;0X6rbTf)0{p~d#{TSh-x9O}s3rEu0WW21lTMZ(K@;-3x3 zvX0yj77MnlFSb8NSm?jcBV&icJZU}>^Eoj;73Pb=d@Y<`73SB+`E9`;Z;-Bc2>5bTgYtjE4Gg&tn_d7$gohDC(TD< zJ}2gZRL?}8#Xk}B84Yh8 z?v74~Zu}c0ZE<*S^lt&T#o;~CzXdi*9%9|AH*Gf>KK0?O?8{YWdZNFR0`0n^UzrBU zE|jb@QeOpU-J2tPG!2v8Df!h%UGW<0-Wj{z?}YV|ZAQ7P_Ob4rvg?U{XL?rlJ3+CP zbuZYC8r^A%mGzUHHOieM1byg^erLr=5z7<(GW75U)MwiCPCM` z3ek8T%^eVsg$!+=X~><=N8+YA@?+i%I-WDu1YQhQA3ze`NUzn$D+&7XY~j!(k0NU< z5OShz!vuXTvjSo|9@d&J#P(PVMyTxxqqYHjaw{-H#S0l@HN?RhA_#|Sh)on*Vw#g&>Wh63}C_6)uNJ|O`}r-P` zev#Jc3aZ5!X?`wBG1BZTdT6BCRrJzGv%Bb>QIS(er)Pf8kRR(sC-!l0I%-Sd0e z{a7WPSR*F#)!6m6iQ^?a7aD1QdEqZm!Gh{8C-JfC0=;VC1CQSj=x|RfaQd~*FVz1I zw4}3ge>v!g3(YOR=XgoPqhQefe@g6hZ=TThWwjUc;yVS7v_GTzJ4Rmn>F+Op+DT|T zSrB}RwViwve7wXpzsH64UnG$=O39j~WGyBW z`*il*?>0|sJ6RYUQPP#)<3?lpl{uAHcK$b#$ob{uN9E+ga`Mx1a#8uOnr`$9*`=23=7uAw>jn~wYc2}gXWBL!!wlacKLk(;zdeoTb2A*oeiP#6B z;|g_$ks3<8os16eI0|X6A@e@ST1Lb^3>|j`S(-{b7e>br$Z|4dR)ehNMC`NB@jD<( zONr+PYQ4j0$JKhyl_wpWVstMMFyX0BZ$~npsm-3kvdAeI8f^f zg>=}ExdvpdAY$Kvwm{ZqrE+H3-tI1~2^xYGOyI?z?#7MMRQ-Q9(qpqa3O??)IkSN06|CY%XydRex)rz>@W zX2%N7i!f-b>>2G%I2+LZvg~wESLFoFgBAQV!r*gd&vJSy4|{&jc-y75pN? zpoiAg^O;g!R2br&PH!YpQ?XSByz?PUY5KTA?`O-3>Bw>ua_T+m$qxaR!)2N?kFnIL zM&oa(*aidM-4Lb`eLTANvwUTdZMZjerqOsb729UOD+pm8qL0_~e!dAZBD|^0M&pT8 zY>xr2B7}*hkBd=1%ae+1BfY8YM&qee>{kQc`w(U-ecX)t`6kG~dQ-WL#>`YK&4AY( z!mI>sfEwf?Tinp>F?=wMHyXnHOdl_&e!fI5(#CtI3mZv-)OLlJp-%F=JpB-cgf=K7 z9?F!N99QZ4>{M0M6y;47X)+d1!%7?S;Gs;d$??N|pW8vkId7_1ld)797G}s(4Q1L) zj%R=x)kRI`y{QYDjG<}R6^1dQ%rQ86(oLI)=O>p^x>wXCw%{nav7%;YAkZ zOzbv8-kDG)Z4!9K3TgOsY$z-($6vVcMArCv>4Pi;E9dU>ou$`1980g&zOyK`lajVB zNQ9V~2-TMX5ic1D(zIPT`INX^1D}K~9Pq|^$0{87ywmF>XOVDjd z$~O9_Yn8Ixx<%EuQ`IIT)Azb=!`19VA9Z)9+^}xZ_3d0|Gnna{s@ss9js2)==YsS3 zD7%N-Z;dt?xuo6}QXags#m?7dz0KY%-+W!d_3V<5x=ty~oh=@|E*or;vwVwm2{*Fa zKI*!r1n+7I^mW;2(~{*|u1mPNA9>Q9O6%0N5zF?i*0s5nEz_dwm9l?#%N5^F9UHT3 z-?zFpx3jfcbdRNc+TC*3w{x>ibhdA^u1!j|O^dEy%6^-c0^d%(N5+uZOZSAUThn7) zw6o~J4R^A!ExLgz%lEX@`gU%$QOWWBs@rfkJGDjkOv;TtEv>$t+iec#_|kM6QnM>t zbVE~?+qQi1?KHH>$nl-kZAi=RY0-^LxnbKf`Y=et>tke%Jus(U9TcyCLfpUWPbmONhrJwi_Q zY^!d13UhBul%I>8&C-XyW_pC&>}74=2DjWM@{SmvyvBLH_uKUT+vM=P>Di#7_mrMZ z$g=v#ZtV>H_ddIfIvt3mtr=Rg|4Nh@7IDjMbWq|c(V*7&B#ifbXg`=H|0KtzgOFb+ zghXV>4qW#_PLTE&N(1;{hU;F;2||CN$RwqQnB%&aaDr^VP;8QrA(+u}!64}kQ9%Vg zafSa}Z)&jj4e<{6kidU#G&Lv$IjZQ1ME-Nk)S%K^;vMkeD*w67)S&TOqQYAA#5MkN z$EiU#kOMy4;6Fb!HHZT_>(CRo_|K0{4W@w{@F9i&+<$721aj0$vXuPuEL}&NZ-f{B zh`vCotp^kCJ9~m+QBuq?zMJ=$9{itl z>9;%jr>SY|t=g7<6oOicy)4q(C{l@e?_p z`ueA{>^8Az<8064|K@`Dp7PWmIaG>W)y)h#)iu=YkDqm5jSWtg5?i{A69RZ!$u_cq zEjPkTvfTXhoLooS{)H%Q`uUDZaEvb^)t2K{Ip@v=@vkoNw!ZydRSHX6^R;IEc2#yd z{L|1h;?nKtgkk>HjOoF|M&jP>-^b~H&qX(Zp;#Fy#+2}NXr)7T^nA|RvKlb7)b#i zYUWy(y|qI*Pa(-MWbkxASvL(US5zLjbaUQ+HiLrc|iwg6km`7J~;7$JZO~5 zAmaT!lQ)5)Yv~nd%@gMXGM^0d6JfqGxcd3+vhmN$)6y%+$}Vkfy^+%C zuy>v~ACUQEn4bvql>w}ZH%ePS-f-!3*yZZyyGzSIFH5VWc%C>Pkojbop9u4n0j!G0 zclrD0J<=*E$to>v{dA+V)4_F~I3JMtWSE}_^OXUtiV3By%{R7pI)MK}#%GtJe_n>x z+2VQPd_d-tVSXaaR|ZShhgjd;0$~%Kl1`zdw^P#b2b+u^dn4P6h-05Av*Ux|L%;rE z4Tk^nM;K%TJWm6a(2s|Z?eWC1I?C+8U^pYFlKtEbcTgMs_$IQwo;dcEGCMjLJ{wfY zeeQ-mSZMsX9oarc9Gj)gatFf&L6w~6y^#k4j34VG+j$+Au;VqxOiv{52@%^tF&;mR zWImPs7m|aFng7@i<59oxtGnjWA2sV1&BFpTHp0a-u{-vrRz1^k=j z5&X;;Fxw;?r_P4B2*(QrsW*T@vVhJf3t7Mn63Ysi(icpl1#L}a-W{P9z6L*Q3yk2Y z{4M}Z=C-9X$vkp13wVlS;sFZKlrCroXlSk)@C6C5xb72V;Y~E~RU6O90ThH|f?43` z20&k6+kzM1ZwnQH8va*gBp{0icw`_PMw{7pKJF@!b%8k%stW8BZ$cuo^A zlup8rk#(@+)*wI;P(U+yW?lm$-v+@2H9J`zJ<)&IZ}?Nawb^Q{0KCM zD=Tnkh6osiIKiECpw1RI#S(rk#53}NfOPyInNFQ81QZ2>JU5)c-=Li2Jmf#GxBIolaS-iA8iu$+X}e`gH5! z@r-yX|4K*~o|z7OamTat`Fwr+lqH%|24D@k=|e&R{xvWv4-DAiI6BjKfh0JX?wmvY z<@hc2jHo~{*13>K=kOP`Z6*rhl=eVW(bmB;dt@fiJEt4p$m@HJ`ln$ z;Va@F;VAB7RL06_17QkQSSjdnHzbcy*H7Vofp%(?1yvmAg}2hjOUs z;8~tD7SNvw0kY`;*Otr^b`_16oi1kxny!<`dpI27mdAXW{QV)GHg~W*;4Gf!&FM-u zBX@NWquJ#~v1FQivM`|#5VGa*!aH~l8h8sPO%a;grVZpwQ|Qw|Tf7h)40*HxnIazz zJT3$#+6Db%Xd;~?oW8>xY{vsSLN<%brBfM=Y({)FzFjRD%i{3^06I$;`79t^mOi8w zd2sUdl>neK45>~YbLTNnC-DpbJ#gxA+&S$y{tTyFFfv^>Q2&(PJ3|%HS-6%VRyu;( zdwsS?ThQgS8%qXe*fi~-F~Jl?f1FAKf0^1hg|aLPoz0~;f&SsSg~Dv~G%p{|?;;DQ z3)vILjX9k3>+KxI7#Tp|`4LzFiY;$%b=z$Xj%Dw+aw=5a=Jfgl8WI>Tq`li0N8DM8!Gs3?Xa=Ndk^D1qNNGfEk|q zXAVP#Up7$mMIaPL;4}H-d?9i@+5n9!;Ip4jrN3T}=O5rGU<>#OA>+JNpV0!LlRG#c zM$x2mKnTeC%n_guOi=rxzY5vaQ-hNA_?6{+M)cQ!nd%4hAu`wc75LV3{sfXr#^)P5 zP0)cC1Ni8sWdP&!S4z`$KsT5(IP{MC<@DE4bR_Q}hnDkJIKm>2&dHY>JMmfUgPe3c zLk$>k7k=|b&sE(oWn?O#gw#gfWS8?arfIgwK%VPIE)}0m4!@6tRZ#`!fF2-<3lrgk z0TZB~8XIpGenTF_i>XeQ7tW+lQaY&D1JZi&&FqP$!E(GFj=zmUL^|OOdRx#h&y$Y^ z^pQVPTIh5rj&%`-4w(#i`H7gm4~=sWY@I=(^Kh990n1P^K$4TmtsH(u?sp)|A7$$w z8lyBlB=1dk9O5z^8P(DT79UodG5W5huJ8Qrgb>?9Lwmr$EmVKtl37zpD{I zp@$*`ZPR!f9gFWS6NC!s_-kn4Bz1^S#Mc-MQp8M!hmht}{L`QbS@$S*J8znN`2kym z#b?GgQ`2Plvt2Dw_=xkvWrNR1sn=#{i3Q~FjxK`_RFZ&3--_o2p9{G~#+KI_yrYs1 zeNp^^|AN@Q=WaQEcGZiJ3qTm<4zBziC-{0uBp?7`D1ZeVdtqm(kEa$-kU^M0n}bSz z{d3UwJM`(N?=BB;3X{B4dC8ewmS17%Tnj?U?~rn!mWO0^ISzC=_Nr`{gJK(|6scU= zdJb(py?@ypZ2k_bzr&}?W*fKjg0FLfue097&B3a;kkIjngBWc2yNClAJP7xFhj!oL zZVXwht+)K^T=VO{r9NTO)=z^?GT?Kt|2u4kPxqdw7-QNmqo-39=tj2&%IOtUG?~@g z9x+`hxwoS|f;aaMi9nqVRhi!N?a}*)+xna7$9fy-f)jL^hG^IE%B--!_)2{}*^^IM zrQfQxn7ti_&+B?!??{$)9QC1x(`nzOs^_H2w$4ed8=8~)HYcT!3Q9%fPxmd42nbf{ z_M+1o=bG#Xg%^;QRg6{Y8+~v0y9WDvyXkWp)894lphf^>jw&7swho&Fo7vn~t+k(j zZ1yoLy9#WIa1Io|BL+62FEuB?P%MuUQrT^i(c64j1s+$eu3 zltkT2H4Y7|=M1zCvv{LjlCmeaMeG?a@BNMiAPt{?t-wz9-I!*A4J;=2)9Gw_I6ti^ zfXjc}6u{$?ngaL%$e~k8BjvqcRlXde(?A<{%Vd?hPxTr%c2R;FQCKj*TN}MUf^Aa5 zcw0ef+F%9tZW(c!8T4I7+W1TF?04)(_MWLc&STbJ1pk)3^(g;un)-#rx6=a9n_pY{ znGJPUpS=Ex=g?0ZBw@R?7Z+pQH&$S~%ZNN?5Y>31i=s!G2?gDL9)M(h7x@nkmiPQp z;nCjNt%1MdY;<42$bz{m(Bv)Y;gMmr?Us?}YMrbiFVFm?MNqHN$(0`dn=J+)_1ROj zId?&Ker4;s_&Rr4(pcqhaX|5bMejo@f7JQjP*-FfcP@J0`viLME8~~6&v90=u`oWf zKL9CY_Pei|;FFf%c#~za_&K0Y%Ef^(`1$cW?PpNqG&0wf#{QP?s=ov{&NBe)Ne$JQ z{};efIv=3S-grJhg~^KzF(M`Rxk{66_T9Ps7en70l_lwf{hqZi2B}~dfAZ>Q5B~C~ zO*$L?durbzp8lR1lJsJ*yqrbr>9VZA*8Ttx)Qxii|B(R-_w$_EM=)jfW-e&IrFjbXaIS*c2OwvE0M$h6-)nL`=f&7u z2P?5~boE!u=X~f*@lqf71K0cQRcM zeHZv;)7zyWbFAO;Bo%&fE}TD_DkE-OQa9JVmhG{Mt&2}dRz~-kGFFG3n4X&o|6%tR z*qR4+_MvtSW9k9OX{OHK0E-)Q{@yAhhW!A{hJKf|YwKLi`7JF(j(H_mINoUw&jVKA4R${X(tw&H;H1M*IynW;g%Ax~Vsh=(y_Z(sAzaI_;|i@-~e4TsFp$udIn{ zUPhLRcfVGr{c%9vpAr9@jXA_u*2Xo!(VGJ8I&h5;@_|_NBPQ|am_2pQUv#F zueBWp^b)e<+}ddXJ33CkwX-2wc}${jq>{>J#!&e2cJDK;XHu^yvXqY8Ck+ekrg;NZ`A2 z^rr)cs2|8#_`N`>kRWj7>9Ydp)IM?@J{i~o5QMG*eO>^II!XSDZvjdH0^kbh0|9_L zf9PA)XM`QCbk0cuF69qW;5#ExivCSKaT@qVhV`Psy>Mr zeN@%CHH)FAJZ_1JDI!LzJS1Zbo|KCw%j}SaMtJmznIRGUs!2%+6O{OQQ#oj~$7*r2 zH3(>Rpp=C9lk%(n8eJ}>NR)z;TVrXOziC%o>0192a@Ra^#iQ3EHdaST$LvP=ocCM` zz3VY79<7Eju7)g(u|vh4_k=?;J=7OOYasSiM=gxmhsr(gxeQw1;jmU-U zqo6TIQ0md1E1>T^d?cbbBhsoNGBG|Vt7y-a&{mIJiD*5<uUTWe(PjvxR}ffCEXwMFr!o}onJX2IL2P^#1&g_Y^10xt0$t@fEER2uFn$GD z5|fCEz2K<|RrOStjx$dge+; z+aON9idq_T7d3prQw?h9IV>G*i@?2tz+>(`=_rSFtd4zU2bah|t-2_p0k!tDTUg_O zxb^A-Tp|Z$cu{0M)ZR03Va+~7?kl@x68Wfu7ezFo&Ym9@*6c?-fAwLRL?J5jqR0lQ zr>ESangfV8ue`3aHqp=aE~<122TUqoOnK4Udh07cukL##Cok~=_5R|yjnK26PZrf2 zLQK9&l$WSPjbA*c4aIs+E~;@yKx*U=64fYZ%sCYFq9+Pk?T$IO33}c03AE-Y0#%c^LZSh6BIev?XsYKVw8k4@QX{ugq8W81 z=A14x%M&G2U#S)=qpSyg=y^a!>^Q=urf+3TJE|&X%@*i0&s#EL{s_Ms4~3X6RA0;* zedtTi4jHikM0ib~LJSoJxwK|0^pz)Ku~-lyuEt|k%phv@r8V22?>rAI7CVhdsp(r4 zGlVj^v}QZ>qvs3O!Eqb;i1xbP;HRuZ;{rD`-3@Px>jMIpp%QL80Hb>OkpCQwN)6FIZ<2zc$2)%zp7l@u;SBa~_( zYht8ytYSTPK;d4wa?uwN8*8K1#6Wd?Vm-~EtGtHgqAwwgYaz-payqfGp5{>}h_dnHO(rh-5fxKpp|{f^bQYCot-sOcD97TF24 z_exw+a}AMOYqwTHQ|I7ikzG({uMbOVZXll5epoA^trK}!WH;2)OKxe+EySBzyLA$q zb?#mkv4Q$}*)6R}L3Gr9SSO*cQ*l{jPXSAKU`y;aoe!7q?|}w;Jzpx8hM20orxs(R zGkN*GEfnX)UMhAEAzHUuJ;qc=F7Cb^G{$QyTm57>o`o?!G-V!7CIlmWfcV zTdfgetz#E=-vN5V>p5I38?mYGo<@v~PGH>qz0fo-He4(hv7>JF`WSni#JKzWpxItq zmx(<@IMm%+ALFR=B<{W=^s!eZOJzdla)jkT@73ol)d`!+5l#dDkG(GeYx3IGZXI!; zs3tHLn9Z#3;4@OT9GK5M{F#Y6%`~+xV(qLW@RccBeCIbF{?5dmZkkg! zwszJr_*W@g{O0oxUpH|teKUM$Fm%D2!}TWdk4zucJ+Vy)gJV-f2j_PmHdq>8Zpy0L zye{D+{A!BGfBw6}o=f8&o4&4lvMwPSUY;U4G=J=H($e@U)3W@lQ-=u{N(y_z`ZJD*A4|L7=B;{4-M%)|2%K32>)W z(f9Kg1SXlr*P55hkiWgq186SR_;gnp#CgY?%GKu10>> zAk320uq>zP>4(|Ts*eMS9rB8CK{+F4RB}%P{ws-~iV-7`jA%k%$ZJhDCZ#(&WySvs zcentSkT)SvxA3Y&2iURlv^~9kl`$I+$|R6 z4vif$Tw#v;nMJuv^Ef`-ZjLKwQSQ^&$A?wsxF;;i&oqzFVKWO{J&W>y#tt2Jw!pn$ zQ6ADfP7M26;NGw(k7(=@!?6~)ZWg7S<`FiWWr2IgqCBRt!-gv?aAPb=70n}jxZMJ$ zWKn8p?C@cg1@0p@bmW{p(gMw5xt_f(8qo`3!y_rKpK_kYT5e^Dz^gvS2QJ_irCPHMTc z%F0;svKF^dYUx!~WGs=^qH?^OuC)IZA^%5c{U1|A^RThyUz7fS{T~1L1b;``L|IhB zJ}Yq+;dU!2i)%d2Nqj}P14@dCo*4=L3n!i-e=_C<$CKv^?U{-fV{Q(3vY{}FspyXI z`h?x!pVL(LPj{Uqgyy;I^SwvbdA;rVm+tr!0l<{%{9}#AgvMHh0pP|Y8A*jcT7@YI z^zjO(tdu|Kj%yyS`1{g7()gFcFw_E?5l<-!HusOE7SW7a-&lnVuW*oOnQL|%PTdfv24O4 zj*(HB=(z@)O{n1@yMw3^baMIk@tTX-gy$UO+aPKb-RN<I-@aCy6p5Yad@+>FK z?i|7w%}5grHHB`(YPaeg&X1J0J82?v2%DObmLQ$~w?AN;!r4c2{WhVc6&W&^h}jj{ zPvEyA!$PQzrA9f2!TRxEDm4D9j%%T5Eh7wfAQdNI+DG0*jp${#e=JP@$eYx`W=s`; z|2!H0yOg-3u7A4F`2fPj$%_@a;X-<3PHOLwWmdgTX6yzCf-C{~1QZGoDL`BZ zWd$fdLS@1G3#1udt0Oj?NuQImv-`+2D`TguCk+q;SpxD2C=?)4fVdFK3Q&H8%7XV7 zNGrWoM{YQuzAa~G&yht|=}uX-4G;ub0`dtc6d+Q7xDd(;P=18Ug7+6li@gdXHk?ae zle1HFWUiHuQ`WNv2!bpD`2-XS5Gg=h2xSE*KSE`}`wJw5S7GFawDjPdoxMlQt$0pZ ztOf{zECKlh6bcY2KwJoA1t>p4Wx@Lkr1@S!5gX#u&2pN$kIb^NcglL&06~x?AfJFj z0U`y63!$t4?j)jwD4r!k)E_KuXqkxEOx*O)*0!!A

    2BGQ=uQAaCT zs4@SeuA=0U#{7@E(vm$I^FQjUOI~Ws|EQyvz%}N->u4@zT@@qECt|`XqdIasMmwX$ zk3L{XMt4C~{@?5>T^N<`OboJ8)}G59#YcBgeS%m&`QFu!)HLL~&+|QWd6u0^-P%mP2l@ad#C5Bs4)_HG2vWyEWr-6i$*zL2ng1mMjB-PL;SB z6{D|^adSzd+9)yA1^AF5@T#4vC^0wpLmec#7{yqQx^~8v%Ob?0W@he(06dnR5)U-< za*JHNV<@ApU9lekkm+I<6EIrU8CxMcDeiA>%>596N?Ejcq`4!v%q2WVFk00W`vCxC z7g!8(6yK>Tmqm(uo7uS^0`ORNT0GR;mRsV2j;R^Ncd0%Afa-#ZA&zQ0RTZ)*vACI= z`!5gp8z}}vNVm?SM9oA5)pC@_B*c|_L*V}=gGk7hqe4JK0?_)UBsqqp7n9_6dbLD9 zOp-AGIv_*oF;e|7TK9^f*N`R|Y7NO&fKs)Ql#ZmyHb5H=43iWXV3ah*(2{fvAfBY< z%I$!sC>=?gPpTEF0DT8TJ;dlBjgH z03;+8xU7OSK*a1U;9H#9hX@y!Q zFoEhM>4dshXkZei?+1Pauz-qWj|S}2B|(4;2$GZf7*`np3A`ZCC)9T_^kpOkL)`&P zlB5zn^Qe9RrXRzQ6bb+i$hp9%5lIt6QUNL^8qlFtl??D)deAPwq#ZDV0+>{Q&Hyq2 z@M?}ET_Qu*0Sq!ULVZ3-3TT)p00UlC5B}+`Kcs~LbqqDQ%m_o7&Cnmz@6vK$;W{e- zMgk-vp{|ak0?j87lHBXiT8wZ9AO*f<2zO2W0SqX3B<&E2YO3s2^pmtylJ>s7H)aCH z$Oe?TTDGJIMVt-bd%DGT0RDBiIDQt2x z3Eli5%wANwx}C?ps>fcT!kkg4a3b-IR(n&Wg3Dy~Z^g5lxZasmm@NvmpGdr=)qbT? zMUKnped5^-T<=R%*m4vKMI>Ui+CHUbT)5Dq4zS2uZ{WKpO<8O7$`qop`C58!n@w z`%w%wiFh8+&H_}EI$5Pgyi~yrAEBb#QH&B2(HGDb>QxRpnTuMybb=c$rlL70Mka~4 z2GDNRt4=pzILe!5RQq0^AUrG8`>Is60eOgpX;dCDquR(il;Sx23R!Mb*#==F-d@(# zRAx_I-fMgWPfYbDOWWZ{XO@_yY&E5t^%@7_15&+Hq`q+E8kV?LdD)b@u-Eu79+~Q$ zC~aSlG-HXMDZe$P&gnJ&9&efI9WV7=kDS92KT$3=rGDIN{2gAO;!Tpa+aXnT;u>Y^ zQmU%Q7>yUCc;lqLcF2J`ah3A&QmVAaH~?Rh;vFMxUx#e36F*jdyOcW6V|)ltN%4+~ zeWc<}5^+yaB)z|+%tybS zeQT~(J79l@iqeUFQ6#lmE%pwWE}fgyhN@DFN{veJ*e+%?(d=>FN?hY;HsG&Va!2x#?}0Rge2hSCy=dJ=9RVJ-}x?`KPw@s%w3v zD@#_!e%es%8Q?xX_op^;Ra{@`ijrlqUG>FV1s3DY7ZvBKZ2H`mOAKOB^_5!$^Txd{ zD$Z2R>vQ`|@=+|nu0#nm(q^)PP&L%+29qeU40ffPU`XmkR>W2{_PSY0#;_!IrHi0L z+I&HAx~jO>?NiA+EQ(!O(R<|?cgHy@;rey>3xi`?d$pM+70+_Zr@*;Mm@0d|*-+{! zmUDg%yx9PQw|{Kbks8W!$iD{XBn7$JUo&f5R^z0uu(Ts%3AOW91}v8@8oOGGv+c4KGK6Nt- zmQSX!>jOLCgQiibPArT3G#bk@FbDqJG%$5NYiWK8t*-vCKm3lVPpUP`IR8AY*7NW( zc(SQ`>SrvY{6reF{`=1LA*S}JrmT7S=V;G7zt35}-E>9j0@m#Oc-qtY@BG()VQQRe zz?z2J0pA**6wNcdC)pI zT6KMZzuldsyp(r!vb*yX2+Jlzd>*g+vPkB-&%nPTL)gSD%9W;qbDX;6u zJ3)Koc`(QJxk+Hk&vk8i$7!YYe*U(1Ong#q)$#IzX%9X9mf0qoxTjpN3n7-EOiXFOWv7Kj6Ri3lrm%q&j9scm?S`J3CBpTJ}hT zff*do+NhUklJRxbc>(Z}r2SPk{3S!lr|K&5{NeWv_Tz0HFX>1Qtt-v*gWpTq=W26p zNn`SnI$GWV_+5j2+ArdkR3`h^-OJktFHYK9^@ZP(;^c4ZZs&aqzhkf$|HY;y*~vTW z3i7^z-%k40^$YVQDal{e-N@Swzh&^Pc8z*5F4?s%H_r=Rl;l&jhQIiDGNLXkZzsIa zzz4tP@#2HYYwND$ZHE^medD_3+TtC_E9)-hdBSfRe519FTkMj&tS&8YD?C5xrJU-e z^Cy##b*Ss=_SxGPuSi}{mzd{HyHUT}-};Ni#>obC@p)Tl*FASHv!1(ndh$ngq&yUj zTJPOyHE1k5udK!8xzQ+|-Z@s!jeF0J)yCwx&~od&{H^X7^UlAkjmmST<#>86vr0B* zp6{*=&vT?@*YE0F9b!yB|EBhMo&)Wg=dPU9+d*G^QH#m5r)Aae^k4mjar*iC+Q2+G z?W*U_Wvk~J$De;vi_Wv7W!CTLTs62T>U??a!8}`9hUbo)RnHd%p8vUaf1VBPO8s{K zRd*KoIP2p(K}sQ?B+1X*tlg7m4Zm!#UAr=Fk<0m<+Fg08;ps`+s#f|fvN?abc3a*` z_)iAg@GCbhvN)ew`*q&u@JmUat}D$K%{za#c1zx8@QVhX+7;@Bnk0O!Tb?DHoa9lp zg1>Mm=~S(A-ZJLTerJ%;9NCTU}RNTiBR%q!ylM3QslIs{K4}VP%qk zt!8em?0d*U!xtrX+n;yF70J zJjviI?Q-=3T#{?;r+J^WVJtyqj&l-HG{VZ;QOVYAh!@ODW_@pgWpZP7YNm^VxCvOJ)M}sZ+&o(Wv zNSa?eGjAIFOwwl8&&(IhOPW#pao$Jp9}G5YVQM2yqL!(&lvkUIBlmUOE%dden+KIPhGe|@qIl1~?7ORdZ6BB) zDP@OTcx7G;-*@nC;Q?Ek`QcJ#NW!bR#S8aY++BUZ_Gk0&N*N)`y&c7c`)y0i z14=7HqF)W(dAT>cc=dkU`{w?oWg%g&_;+Y~4;B~hv%P2TS4s~FdiDIyrM(u#tM}R7 zH9t^V5)$z0@tw%M9d`=%+7_GdD=iM$_v+4_J$tk7tln#T$NbyUqCxjKYK!KoKs5U ziq*(BiiR4pobX7(Bv3~+SfN;h>{iTSkd*|M#GObT*IX5fwa9mhZ49!S@LIwoQstU* zp;(6;Q=DgzwS>14cM?^tc_S3-kxE4agRCdWB}@`kuTcucz_gEM{xD7EnYi+)ruknYx++p#`Fg{)mCvaf9{4GnqIdjGaBPf?61 ze?Z*2Isf{l7srl$g$R9d^|q~#PhrMR!z@1BAuyz_HR-mYk8z>NPQ$BwI|p<~bu0Gv z$Tz%#j2(uVd|QWuA*HRyZgaj#FEH6*n8CMk*dKDQ^}y|W-}n?{Y&X2ZU*q5tQqbyo zJMA0e0+a2Am-$u>-XXcIPPb2d!@HTW%`lz6%3(*yl~(K9yS_=kX|m1mC;kctkC3!h z)7!S+_}t9!G`z%L?%*DBuGQeS;Wx%NO*{=R@?j3B5JIc+*2o@Seujr3ng6MSOUUWg zceglu((_F`3@`9492`SVw7$7@Z;wxY##X~LzF8W&iFKtvG;ZUT?7UqqtXZ5CzoicA zLJqb*ymfRBGcWpUPO{$;hc7~WT5sO^dQW_w{436RKVyg0Av;-X(7{FXWW{(d-`=b zibL`P5c*&ZZ=$H*n@Lq2-%;`4@wWkY?3P@8uqB=$8iYJ}h}T+F;O$P0Mso0e zD#UnjFR!-focDC9+>L|tQy@lzJ9rO^j(RgG(QceLze&VkFpqb$=xgtIirkeG>nB6J z4Q}LJE?VX7PKkEqVEv?so?s^LY|%{b=@hvOC&o{L5Cm89P8IcgF>|9`IMIG12!1e~ zcciG!D?V4giF4X-2=O|&nD=c_ftP!3^d`BQJP%Iitt|T5D?Uf=#EJClMX-WXc#DfxHLD8*{=UQa`hR66^1U1-=hbnStc6{kMP+D za`2}-r@|9Ec~^CshI#jgF?QOqnHGrTU}N6O!moG6UzM-teDB9ZBm~dnEiPQO)BS4n zdd_!#PZ2)`Pv^}joVj!QRk$6IZ%b{#xgFCp<+hxIer1TT;EtAu1xI%%|rb zpWuQP&w{k=##c_qm*5ZMcVj1K}QguEn6haJ%tk6KliWd@2GJOlVQw9NET8&#*G|=I0_@ zf={=+yUE#>o^E1g=*7=QI0m0+d2{pLHlOs2)rPzHSqS^!qb>C})3zC>o2)k6$`RNF&;9V_OZ`yA2`6*+i;Wqvy z#ERgrTas=XZZrPLWTl}epNxP7yR~5Z^u%?SZ0K8D zJ<75Hwt@rd%52q-HbAsLh}B)O0X9wzo`QpIj4M+Lt(RNi^1?lYj+H4{?nEfoOU)b9X zZoh06cd5Thu@ZY)k%RW&=%1HC!Qa)B2t>ingBYM>*U71YT3J-O)EQheX&E^2%L_I@!^bB{+8)J$ z+d^KiC3*&WC3-O$fu4_Ei;e)*b|z1$ZTkjPTeb#NOJ2NRK7WmOsC32}ql@EOYchF! z!rF*DK5R|CFy3cvbYZ;Hnw&P?YHgG@-egTq9j~=EN*%AVCZ~))v^Gi^ziUlS9=~aA zlsukeO+G(<+1lv*c&aryY5c6UQPMcxnw&U(%GxM#e7+O;2iXiKqaS2iM>0t^;b=sX z4Lg#FvOY&6qO8-AOpvuY8WChoj%2*7*3k$rt8ygcWDgyUaI(9O&n*8ewI4M{o#US!gU9eiFk9J9gL+U z%nrfkC3u8j3lkDUuy+#JA=vu~vyWpRBzPRhK1xVDj(wcKK8~$Pm>r6Jmf#VJWhEqr zVxK3lL$S>Xvrk}K5|g>r3_F@II~+Tn z;1Q0MCnSbr)d}oytUh6O1a?}YM+A0OVqye#ZX!DZ`$^*LNbI6Sk4UUZVqzrLJdquV zwM?9S68m|g$H_NV2=8OLFPv_i_>y-vV~(LQe-&cKu`4gEZ|wRq{jA9x!$tfR2#;fF zFHCRPe(7^I!@zJMe>uYa*tr)5Hw?ctK5JrNxPT8!6}q1&f57Hr@2Ab~z&=Rx==gaH zJ$~Qpv`_fU5YERUUvyuu-Vzo6dRCet-yGp^EcnHX>$zL(j_Z;>mTw z7TJ%lXQs{Ln;>kD`M&u1`k^iKANS2no6BE}SaWR8i|g0jwnY8-dPdqD{vyPxW7}R_ zy#DDH`ycnsNHgFsKrBDD<;4%zb(>{pUQbV(&Hn`P=`rUQ5!bsm)6eXio;Hg=A7OS3 z{^Gmq)tjTvyq=aelRp=+HJw> zVl;cP{`#TK^dI(poHmU=12N~=!WW-?r>DT{uDBh29$4s#c^+T*=KAu@EJ5|&DM)`;Yo?-0F+JR8)H5gz@vZKz#!NvU!SJ~csNbV1i9x!%YO_iD0H%eT zN8N(L5#MU>s?22ce#~?3Pt@fo7h;h1uF_0O7h#@pf24kl(h%Ni?kdbA^e)U}?n$Z$ zSxE@e+?AV+(AzN&xQD4vkvPIz_1#IcA^I!K9qu>O8%P&IkoxX~*#P||CXf3iT4$+Z zU2&V>_y}F)X@=L{(wi_>x$CID$YT6fRp@wDAH5!Pk!wkHMjpqrl%dkB9(pY%iTerF z3~7Vkstg^=>ZU)z{J_;yRBl5!mLgP=C7@Sfu-s8fyIV1Ct0HtXtCL=iiQoz-6>i6I zEP3ci7N7nQbBx{eOm+pMM`p|NSj0}S{{bA<&W17pMemQvvA9veO0)aTn^{}W~f z_bbXd*XglxX;ZH++x`N^f{UOWb!B3srA@`EjUH?2_I+yqBW5~x24$w}bgW#`B=lw26ET|RiQIk{W=yoCN#I*;ABT}O_vO~P z#K*`-o4R}}?V~Y6&8@iwF77eWqfMQ@74|1Fz0I|`=Uk@8$VZwwe9P>^FdfYgbC0?( zqoYTf_`ap~AsAls&D^hD;-lrmP3^vP`yfnX^X1%CF7DCM!%c5|Y4(RP%rK(SSN+c| z&p)5VY4a_y55QD5pUUmu#5^56)b!f-p1mK2-h3ptZd3ee`C!v4-(vfHnBwMda|<@P zpN<}EYW2Ns{|zR;`K#P>o2H+Z4>a+7i|oBH+0BUDqnntgq6eBmLi=Eb?IHo2dQe%JKE_lEtKnE2)yxidFSKP4A8aeb-wNDQudBB$S( z85J#VYWB^w--L;3?#rojj*pVRZQ}T5+i%1iZ*I*gaCVQ1e%sXKn`OTN6WCmvbIy5s zl)S&G(Kpk69p+&3!yOVL9zi#gSWkLs?m=bW_aw z=75|Er{j^Vp3u6iWcp%^b+cDamXl57)}GMXtR(sZ%xBHWoLHx!2$m?6nUz4Fk1=gt zm*eYH9I;gt`Yh`#-2k(o*)qr3>39UIJM?MRkMtQBgXT|i%$#f@wswa;$@+o*5$2<2 zeYVPRD4Zn>Wn>ZQ+8`xoG`rogIDD%xv?dErR|bu71lbji$HQ5I(CVx>`b5w>PIGpa zqfNL1=dXm7EF+1|h$yC`mqZk^(HkO)`6x|9u^4?S%2{?HeNN8K?jzHzjGeNcG|cc? z9kJoe--UYYhVMq?IQ`>E|J+jlBeSSQ3cymxz2f#nXsmU6CbZYO)e3#IZY-){&)+ct zwRFYCq`zyB|B+k#6A$$d&qc8{3^Zpr7~T){W?oPD+A!UGqeE)Q&s%bKx@HH4F8HTB z;U8}Bk6h?Kr}*=sz$#W;n#x?`Fe`NJo2x(5z7MQrC8wELxNJ-aUE6;30qtmD6YE-< zvjuMBtP^YbS0B=X16x^l(tIr{Hzu4QHf}6D5!#_;fmKSZg=XWdu(h36%fFA`8g{qy zY6UGS@Exlq?Y6~1M^@O~uB(-_n7}bsPg;XTw&T|DyMn9LG+dyPHJ&zL(c#Dnzbm}T zppgPUVo$p;XPJf5)`+{^SD(`20}a@VF05I0(1{gsS9Fz0OAK7V{`|tWW!X+!Bk%TH zt)ry`nzA=s2wv9V#EQJzdzJn4ePQ>Xvze!hPeS3^U0K)ECw4_1J?`_iaCnzl&f)m~ ztQ-CJz4Ci`=-+FQ=Yt?!R)!}C*D`d4!tw`FkWAkS-+cBjOJx6t-tJ$>&7hc|b})3@d9>^ZW?D%~lowgG}5OF%vWg#tth5EnvO0m_e1S@8Y>X|Y#f z#D;U}YjSpqj?A_4amsqu06~x?AfJFj0U`y63!$t43WfI-(B_mNpv_D)$(8z2a> z1mqJ?C_tnDaUqlyp!^7x1@AAA)_Mg+Zb(V@&1vd6Vq!&i%BpLCAjlGsPe7pnkpjep zA5K8Vxo5vy&675}cM56gd7_1W$f*?ykJ^_USL<$fWLRkUIk5F0gPZUVbmd(u`G}Ch3Sj3scd9ME5qG?7;vE@&g z5CmBQ@(CyuAX0$15XuTreuT<`_ZLX3EZ0dR&L{40_3sfaGRnl3*D@gpvIOK4P$)p8 z0C6Fd6`=eGl?CrFkd|29AB#AbXyfWH63sQ*k1c=3gdoTgkWWCN0FeU3g-}+2@*`9h zyuUzlu)HsgNJ~8K>fb9eH)_L{vzQPBSpxD2C=?)4fVdFK3Q&H8%7XV7NT0fPK4n53 zWCO?#pfG@_0AfOj2_YtgnDAeU2@Ng7#vYk%>L z-hc5G-%Qirw?@;yZKuLMkT!bi!)TVLK8!{=^-*9A9bDX z@iXA9BTO%kCljR&IG{|SSIuLn02QED0R0S(2GBF~BtS-bO;w>til-jbW(LqlCF+Js zH0iG;qycX~piKx^0TN&!T(2ue-9}BsFx1NU{iJsa?HQmyT>lo8M$`z2T4~AKAz&*P z{aOQ%fiWNsAnC~9>e@_TjEV#@HE2K-Pe!W8_6xg96#XRo*TQ#b4qhc}s_I8KF|;a~ z5lN~o0+=2QtfZHLlf;t+nlTtkIT=oz6j6ajU_#8$jq!xK3@&~ge_}ew;o`e89c#i=Aq@$p~9AOW72 zTeXTS_V|iVtyWvcJdpV|0PXhQ%-7Hrlp26Bnh9fQ`gle}jQ~fI8&UB}DFD}BVMvCE zIx97H$sm_B`P731WReupaFXt6QAx6{5=EM<0CpAC;Yf-`G(jm~s479r43QLqj^n+rbEoEpYxG2@cQeaf5Qy7u-18jzlp%?g>Vb5FqFc04)ik_K)(lCxmw00 z>FH#lk}@$^rjx*QZ)2$XY;XMt6_Clw3IMezQ9@7;W{+BtCI=F!c(vXQKc`smF8F<9PMBzk1wt6B*M?)!K{ERh#w# zQ)e5Z6w3!!#!aq(Im2ecR>79Q9ALSb((%0r{JkcvFyNDm+{~!) zex_``EBeA|e^{NYa-w?vas6Vl6`!g!9w8rIr@pgfC>++vYxKoGvkhqODzekPlQqp0 zD9@^7n&@hkrr9|_{+8;@brf_N+4lBVk!sG#_fkqw7j6w_yMJ3s8pZnZbZ;@w`AEkn zQC?Kdr-9B^0__H>#4%Q8H+e@rp2MNu@emK=LQOU zf`@0TO?2buSj8p0o>y(hYq+RZnGu&N6Lafi-a@6V_D471trJyaOV$0DYo{xPaX%t` zW&ChIa6>l<5GRUa+^BipTnvVxKGquc4aTAhif_^m==b3@IS#b`X#ZS>hBt=&h+egWa zN9{k+-l1uKAXr+XwZngnHtmffRj`kyQ00By9`N8*%y0`dO2X4r^{P%hv2huXm1*0G zBsDc?O>9wBW=eR8uzppPhB6c;jvXot$?f8X@0tAROWp8Il*CG+7!(baCB@bR-wA+i zQmYO(@T*5*TnmT2{Lvkum$-`K?G4$lZtq9)Wzi(^N#bC?R#f;3d|#bzHdW6|WaunE z?r!4%!)t^G01gINnnhL#K|>d*ga^5)>?X+&p&rYa-wV_g6oJAB9v{j9j?LK#|* z#U}zjR13lokG}p6Ig913amD*T(eUD?V(qDrI8G#zVn}$M){f3t58$?L|83<=aOFOU zg>|8ZA&Is~((Npfm5jv3Z6J)zPd!yKM39+xT`N|U#p=k6R*TPGS@0SdRf=;+bk40? z$#05|=Lxi`-t)R5U9GUTs29G__-$arQeTKEhWWk5C?%_f(10h)nuk}3rxNxcfbpQC-O68hxWbpj%Sr2^1@Mvl1#5bCm|S7#98ZGJ$7eKx7y<}7%$PLAcP z0Fx9b`9?%tBNY&}k#wy~wbcD?TA|T6 z))7!VvhRg!G_$<)Q9GW*?3f39GJjy?AQK<`&T(Bxr=plx(J9R&mZ++=z5Q+a@ku*k zc3ofD_B-rV7f2%*HH_^q!=b7fT%IDbSzq7W&K{mX_e(VrK!+UF>t?k9&8lZQ1~H z9p_q8!afMbhN^q1M1y7kFaRZ-r`OQK9{^GK?{x~5;fmiv+%CtAyr$c+ynW1Ndy%kA zBkTQ%?X8)k@PN-7-j8XG5gd^mC*fsB^y4Kpv2i5TeqraRHk+)J!(a{Chv~6Rdl+JN zhgK-j%QY3FMeH^K(19K!MGs5$FjP}-msVH|nri@;IrK%G{W;0W+x8SfDI}3v$dj#l3NrcZUznT@?lVuzL2^cccOLb(VR--^E*=XW;m*bBrz6yo@0 zz_nO94UJ@kn}Hdw?Ake@o+}#4&7xXreDtPV7Q1PTERz}v0L7?G zib87pg#fdNS_M3{k^tkX+Y{;Gt`=HwJEBx4eB3(#2QhS+oo?4|#hRfH8!rc!EWg_Dh?a6`OnO+)XAHh?Hi zk%UV!#cZ&Gld>im_;#QjreSEoSg6!1>cGozHM6|0g?DKBE30O{Y7?4{s~Y1qZ)dF! zqJF%#=2{W8I+UtW!5Ex`_2}wz-MxdHK)v!%b1aF_tQm?Dps6ywI!Dnau3{j?x=CkA z2{)GHEY|hsC^Dl2u>EzU9?W>)G_?*{69Qyr7F1Ii+C-qw#a|10-+F+q@0n!mH-7S4 z_RY>UBX1Kq9@vXhq1QX4a!G@+`{ez(5e!j!e@&B8pk-q+&4mZx#Hh~)(AU0YS=cl* z8N*_H^l-N;QaaMqZuB#ob$=~AkY-zsG{sM1l+F#BpSePyVG;VA7ZNe#J7*DZCB zF$SO84FjRx(lHXMekyfPQTCRyDwH)HSl-47cy}ch1w>dDV@Q3ndsNo8U?jJw*8=70 z)kHd1@w&Hi1op00Adaza62mNUe#n|$q*oK_wy}&oJ}A$w0rG(t5SZUVWU4Ab@+>GWiIzk-DC1SUb{ zV7=ls_)H+blmIYr$pf1JpX}qrM4rZmD`ZoSky6D&fkEmLIY?JnhfkGbujsNX6uEvj zL;Xs&OISk5L#ZbYjtNT`fKWIF(+$c32vfn{Ck@m0W`oC4bu-j^O*xwwho5St{)!@` zBW|cd>SgCPjG0U@FLA8G!lN4Z!%&4ms1e>I+(FYztQt`ZCB#WReL^b`%y@TPOcJN) z6%A?;iFA=&c?jTv9NSx+qEdY07)W;qhsKILdHfxai(4e7On5GCyG9y-~S67B>ntQ=T!vz4^PJ_y2Yf!(Ew+c+|Gk^sXlB1P5Y#b3- zi1Y?uJyJ+KB!Bhoi90>6)N~b{PZKv1GGz3JKNFGPQ+qH2_GJ4>D`Fus=oD!#xvU8c z3Q;&xmk3B3y^;6;zD9EYL=8i?c5Fxpz!^USykc*+*Hbrf;Q%#*P1vSs4C%RaSmhHE zu`H9QE3}i2hkc!E7yhxVT%#uAyy`dZM9J+>1>};+QBwF2$hPDuwk@GN6#`?g%*JhU zfS9Vq%2EyK5kaIz$H@cr3in?g76<0Wm0+!T82AkLwBumeSgTk!4^?xb(OWZIbEynA zN?WLfad;Z?;X5i@#~%S>MvhXA?h+Elib38>Q4obFq;5naQ1@n%+V)SX(w(zm>8~VO zWwHWLN%VukD$TSX(3QZBwxy#0_5t#@q>KS+<$=O5Y+U)g>}$L@8;_C^jJS%c=XFDZ z^AjhtlpinBdp0xK%LcvGzXVP|xJ*~Z&?)25T5bG(Vd6uPqO9g4@JzX=V<_rbc-7ox;ey5EwnBAZqJNYoH1Z#^O2D| z#Ma{D)l!C-iu7t_*N%HDr#38t0dE0pO;aH0o|RI9HLh<%12`tIw^{&5T<-(AKj(6# zZut|PkqQwTV8PhWP10dEX#*)`gtC+t$^DNgH78Nx7#C+)QGZ~D9IIV!8IA)#n#y8e zL?xiPxJy9%(+7fFu~7`*p;{^gR;;y@0Mb9j-Cod2)0Df`UG0l`v$1zXBdCcD979*d z2;P_kp~`|syB>_RJjq0b+s*ztb491_@NmCrw4#&XoXl8OCuxf~)ZMs8OIxhtQKxII z3kyr;#r8?-dp$DQu*t0 zfwrGwMm7Tz@(gA5@bqI0aTiK3GL1ucP4Jzo8E+fM_Ndy3Jt|sLW6j`vl}S21F7V`Q z^tD^TEt3-!bJ}^?3=>^@?cgge5^zCZSfr|m^(BVIMJHA=cqo^*pjCJm`pIz_@Xa>Hh=>|TxRFJgmrwR_YQJwC!cRm?Mf^jdB2l{#iLP>-~vcKz%=1~WZ zq0Z=TqsDcrG}tOvkmnf+luoGwKZ8Hz`VMQRt5GiEc^W`=grVOE0y}2?NtGy@irTOdHGeTF)2>7XXmRL9_0!-UG6=4 zbE+2jaVn7Y`gm_Z#1z(0g&hZfz;m=EREl<98{j$~sgul(o%%R;4}3$pfM?K)>g&+2lh-ktVtqfxH-d26?WNBrGrWmXyKn89h8n38cKWozqiIrGs`7d zi|53+N*|!8qg+N;pC+UYCgXUO=%ptYGj=Qbt@S4t3mx0T-iQ~6Ro#xs7xdLgPhQ44 zZ?9Um3Vq=xJCBCU{63xI_{qyv0_8Wo;4h7P#`~=&PhO65-hQZ1d-Ae6$K-xyerw~N z!Czpa^LECv0`!FzJCBzKDMA2g=P{6(Khd~H-ES@a{f{0)bbcQD%TLv^qX8E-*m-1S z<~KF&dE0L-J9)VR)A-9!bMi95`FYK^#~t^M@Pv$os8v-kWkyD<0w=i@VZy zE8Jw1uWBR}ic{V-ML4-z7f&)^N#-dny{#TJAIW{bDf9G(q0rX@crGSt&dJk}2!*9D@d|9tR=t{KA{zjK^cx#SJ!N!aY@3C^IUr0~14nU(N2_nwA*5{>)W z;%wLF!>hl~mk<@$vbmMMZ$_SlnMODMoSo3MahUae*(V1HF0T|kjp3wRRA$5=DAU7& z%7uHwVMNVXN1_nfIbo=_luXGSyFLML`zO?2D48^*N|Xw$no<(3R^KlP*Qjes!nMCF zF~W5BUar7Cj_s9;9t**43dIhtGwKpXk~8DH9ya;f%<)t#`epR1&_5~*T+-v+)>UnvdVpsIRn7)&y=ER9 zR@?W^(wnz}Lq=?AnZM2FW`{@T)Cy%}PtK!J={E|mu0JjL+b-a~Efb)JOwIXqV-R|6 z>#M-t>!?A%sz4+CavTu-Z831-%Ner2Rr*z&Z#Lv6`1MMEsWGx*pU-!kGm@1``K2;) z6`^lrMK~;0Q{UksK#t2V6>giHQu|Bj*I9??6H~YSW!7ISCKT$XZvWd_p4AC2QVgn- zUo;v7cy#s-Tsa%C{axwVfSsLFJN!CvhuzLmY1m)v{M!QUaRpJJYyI1ae#5~-=4X2S z)lPq&v#i>r6136Z+~ybE!MfaluCC0OGV)_W>qveW?6l@qhesE3P`;;(yrJ5E%C^75 z9}_MQO^QKN={;ks7>g&~itn}^RlJO~}UpEOj(NPosVJU%_NxcrZIYJK+w^DlP#OWl2$wx_y!0?Nw?6C?TI zFoNb?hldcUmcJ|`*AN~Kd|5xG_qRAW_0ax(Q%~~O^M6_5)5qUVJ;`6L^KVPHmQttA z{@eQhhIe>mu3J6j=D+Ow`t+JZS6i;EIdrY1bj_ihp21c(%6;#FX|MkFAjrp^r~l4Q zFDL1LXQz^N^#jSB)hlB(PG#h%-*tsM@Kav%dqEkw_i+#w-0lz6ZlqlNdur*A@5KH+ zwSvZ*e@~6~cr@-$YWX(hkIxn#FMpg=TwMM*wRou9zrLEyE9mTCR}gakrh9os1YZ0P z1RZSOLm{tA-{NR0IlPQqL1-EY41+~!u6D3Hk#FVj!j!hDqx=ppBS#)G1T)Xy&YzmP z^WC@_^pC$<=hr2^X&aJ&{_*Gae_Pte@1rR<{hRH7S?>7PB9Ni}=yV=C7Ehh-rC*|n z%R~N+{!i}39DC|+W9c{k9jEv2=oEjw_+DP;(aRld0a7lfm659n!y|#=Fr4O12m4ya z(JBA^W&GFTAwO{ic)mYR{Y@NtO!N-i>u*;2MST0pttqqrHucZi85tmp|KUa*el}Ay z^yjI6)_&R3F=f|Z&-!P4B+#QcR)O_f()!@~>zSqNZ(Z49YZ!83cS^k9r$=HhPHwSX z@Zih(Maz$$UAySP_WBLWht95D98xg0ZO1upKO6jEk2Uv9khI>*ujf%8ey6;-glT1K zh79J6ZA(e;_H)3uJP@V7Hl#j`P~N-(iatdKkAR{?Z$CGD%Qz@9qCWfq)T*_zU5*Sk zlx|B&^7i{0-?E`6{q;iX!;7F6C|ZRKwgpA!z5RCKTTX%^W9q}}pjMrg?HXk8E>M*0 z?Y9@)3luG(KKvQf0!6mS;1i%I#oO-?xECl|N`3eQ)M8oL!jZvgpeWVb?cbbH7ASH+2HyikY2JP(z`a0`1@&P!sKvIjbq4PRiY|Ejod)*;MW0e1j)7XB$PK&~ zC?b3N5x~7b5sdoqBh}3%^;Wi9khiy(4EPpcvM8tHOa^^#|3CJ=G^)vLO;@Wd6`91S zD5IhUI6(kq2#}N`q9Li$Dp3I$L{U*75Fv~aQ9>A1RH6g|A~OaN1rmk`1R@TJ5*3gk zK!899gb*M=LUOTs-FsKxbGoZ;cc1Q4u5azNHhVvO(|*7Ad7d}a5~r^hB#5SlPw!Y?mjQK8Dx5<&TM))#1k zv}C-zhBXgWmX%DD(^*=u1X;;s`CAqQro2!xRsNB68kVq7GE?5g3WF)jOJ>UlSPx+d z@{;*-0gDP#R+Pxfr&wQL35pVVxr{XrTe4*7lBF+~TwC(<67Uk6C7@+s$0eX2z^9gg zmV+-Y0j&UsE&;6sEe z)CdFD@M;9XH6?Yz)HS?1L3T}PHDOsaeldxscF{^}2?wI_YYFgZrFDc~qw(tquF*>C3Et88^@PjON}2?8G+vVs5v{a=a3dPO zfsh!jw2^Q(8o!Z1j8@VjJdVa|5lW(!v8Lsj;zZkD_G$4 zWEG#r@}*x^U%6$u$M~+tF(t$M9)U`R1dlsPh7UcSDH#$yT9phlJtmb5vpm+UGJND= zzRK{i$FWt0c^-kQ3=2H&tTHU}c(%&0#G`eUA=zVcmElv5HOhw1Jj|61pL-lrHmvXn zR5q;gxT9=H^?0UiSmV*EY*^HGciea-ypo-xuk2@-cuRWfr z7{2vrRWW3GOsW{Z_gJH9*y>@fYWUIPn5yAtk3dz!c8@!%_b+cPS$A)N659q*~>u>15U)R%h;LQ4Ey71Qe1G@0f^*40kU+U?)@Sgf*dhq`G1A6eG`Wt$1 zemz}pCo1>lG4E~9rz~8Gw&Wb`%bh)Ttx#)iGu;WV5P4KCe zpjhz5l^`5AbR{Sbj9&@DgCDE}#e)l1f)c=#m7qj$%SuoZxML+K89cHQlmebx3AzPV zQUcuuZ&U)^0dG?RrGjmgKzG58N}zk-Q%a!w;EPJ2G;ru*s(|rIpalje z2G=X6)_^}Kr`Ce|lvC@#BIQ&Xc&SS2OR$DYYCU+fN@@exOeM7uyjLZa4*o?YwF&I0 zlG+SLsiZQ%(JHC0z_(RWTfo^WsjtChDyeV4^(v`v!5>so-+}v7Qkh_pN-7JyR5kTI zSVJ}S19-D)YAe`G)puO!Sj-(Q!y?aTT81T_ty+d;&q*!Ar=DxH4WD_MYa2fIJf>|} z;TfoHSmk*~+mPz{Oxv)=vsK%$&T~@R@TKP(9m57sa~(sv=P@0_X3sz!!&jbnbPQj6 zKGQLL>)EPf$n>1lF?{d2M%S>_^PRnB7rNd}vn#khQnL#q>d4T9m`lzCh;M4G6@ur~ zKoejkb&v?WQ5`f1-lh%`gKgA7Q(#AR&@}jzI%o!bQ5_@!hpK~S!FYAh9Qc7cXdYas z4w8Z?>L3}oMIE#N?obEG!6WJ*1$a&!w8TtlHE5~X#?_$j%(krteQ##78nn#JaW&`% zvs0^WyB_2(Z`upjqc7VBH=rNb2REeO*azQEr|*NC(3kCpo6`^Mhg;Hb?1w|>^!;!s zec1u{KKg+J@I&+)2jEBO^aF4Nec3_yG5Udn@DubK2jQpb^n>s-^ks+OUi1Tp;OFT# z4#6+c>4)Hc^kok4K>7g(crg8j13ZjQcYsIHmmL-atXNz56H0xp)n1h5+RD8s{f|CY z`$k@Mt;$_~(-HoVPIrW7(w7~9KcXKv0?(u0I07%C(~rQ(^kqlk&*%q^!Yk-Ej>4&Q z`cZfteHk3yu=r5$X8H{{{572phcoHRoZzkW15WVI^czm_FLb&SyobIFak&b1LiII~ z@E-4&L1@RfWDt7sj+umEd`l)_0`Ev7%;H-}geCEgS%j7GEm?%s@s8Pq4e>46giY~| zj|j%`EsqG`c*h)qb$m+>;iq`V$ArW2EsqI5$2;Z{PR6(75Tk;42@s9Zf zOngf|;aa?70U<8FrGRiN-m#ECh;Jz*WW_rc5enj4iU?2R9g7K7@v4$7f%^?Ywgw(G zkZcV+ZV>Wgpi94x(X(De8eoY*vQ3ZB)p^)&o(llN(OLDS>YmnlcxR996Hyc6&hgv$v^ zl>~GGzLF4;pj1V;k$|rvBqk_P2zL|k6aq0piAs2!fTt2l5|pY5&lB*~gqj4U8Uj55 zUqg7Cpj1ovn1HV(bR{U&5e5?Qbp%0z5{)pGfTs~;2}&;s%M$S~2`Y(7^@O#F_u~;(~cQI7Zi^-8{zLqJ4La zn?<{2Or%A-bxejuJ0zyoqTMd0+oByBvtZHwQ;hEJ_I=!P``zsaxaDrU(cUv3JhrF? zw|eYQooV&3P~Gz?SWA#!AFCxOsK2izD6G%Z5){=}XbFnzo3#Wb^`Er_PwI!X1myY| zEkSAha&5uW`t{m^vU)>p!LxcxZ9#ecA#K6)`V-oM7xm}01r_zd+JegZSZzU7{e5i# zr9MwvK&`LP7F5?aYYS@XKWhtW>xZ-jb@ema0$TlY9l^``^*Vz3dP5ySL%pSrpt1gt zj(}c&LPyY4e_lt>Tpz3>VARLz2wv6S*9kN?G2;G5Iz7Reynpq&?b<)Y?z+;>9x8Xb zg7&U>;pwhzS>frUT~YB%%zxwt|H0?_ho08r5-9fTx&-dn|MR~j{olyJ@65$ap6|c( zGom?lA-CNlUN^7Zi(hV^hxQJB;FZ2@=7CqvwmruGv8ew;SNEsh(iy=(;lFzC|6hc! zX&*}(>uDDzsdCyyW97RzXm88Nvu#!tk;nf6Nzx<8FaD3O{oe>wdaY(h#O-d)j;PxU znjP0f<+>ZLde3-xgAV;uB}r8Kr_=vl&i@x$)~u(|uNC{wnf_|I|D4sY75mS@KC&zi ze7fDS=C}W*C-{FBGyAl%M(ggs>+k=*`~Ro^t3UJO`LkZZhUa~~W(>~zi7b8F}@Bt|xL7!xxZI<^yQ8#;CpyBlDgCbGd-B6@6} zU<31hpI{?1uTP-GtmqSHGn@Oq*tjAL2fX+F`7RSfT6e|B#0V5Ry2vd^Dz;>Psq*DMeaCv1 z#J2S7lqGgfoa0^TszmiR-_$npyYT+DwzD6Rg5ou2Yp3}Usi@KBGVL}$Vl_(GT(7O^ zPpmSZII+_8*m#FU<8amDa#Cp^U#%7(g z0Ad45nPH~W7C>x7sWbNKXrhR86o~PQjx&nbgwkYq>ZGBF%_waKN~aA)WYqs-^X70! zp3cL-i?0g{Nl&)b3BKG7IzH}XV|_5@pE+ay@9Y0BVbbeQRz94r$e`JGgO#Kat2)7K z`lvsX_F>aMDPjMMm;X;GO28WbsXPLB9sm1vY(*HN%<-v?M0mgV8lzO}2lN&I0wMwA z1V9Bq6aXy*TmkS$pe*>~0%NX<0&wG{8aq9;SEdT^W0>}w~3V)o z1I%MmHxl3WdM`J!uzpbA0zg0{fSdrR0Ehyhg@7vn{s@!>e_SB#M4U&NC#0StzU}v3 zZ&Ysmps59bfJgv20Z;)D1wab{R{;DGC=33$K+-`B1(@SfZHRCCyw!|?tRFPC01yxf zASVDS0HOeBA>aytKLTaJ9~Ve9ifiUrcUBv3M?`Mv_=`#-j7)gDQjIW7f05KhdgL`|C99Y;9T3N&gb7 z&)TC&(_g2k9&3N16;qp+wD_BU+!|ERQ?U<^#Uy3GY_Y*|fvjV${96DN0$I73#e~^@ zs{?i0Z=)e?zl~;U`)xFT+i#=k+kP9(-S*pPFxzjTwKdw?cd>yEhyjoX01W^s0F)3= zLO=-tCH${aLMf+lLB1e_(Rw(X)F%5=UH{4%=`0ze=j zKr#SC0F(hxMZggNHw4`9FXn~{!*O@t^oKWP!{}&0%vp0ptWg1wa%4Ed*Qv@JFC5 z_`?FpZz>=Mr)}Rg+;>RbDy-^5W0D>K0g(W50-ypQ3V;>@t^oKWP!{}&0%T70CEDL0w4;276PsS_#;pj{9%DKJ{6FUv$gLU=|iZ8gjKN{ zGxPumhy;)m02KgH`0v(2SYo#u>qTcMX?h`QmH+i$FOj$r8@UI*+jjSn!Qc;v+dX!e zd+*)yV{n`HVw0?Vc^Q4q+)%dL#C&;g2PUg}(!#DU*7G_OHY5N{ofS^yJhydmNBi@7Vi503|QfILQxGvtoN@5Wj$wW z8DS~YdZYHA!%HM5g=IaB%x|ToWa!(%f};eo_PH|gSssJ=4FGmy-$ttud?RhF=9_2{ zf^VbAzRbN4`|{eDr5WzFwBV?*Zvpt$_O|%CPe;Vqx6oXHUkdsk`=y|M2X%?MVnSUp z=`Iuaz!Y;V`J^*#p#v-CCw?4`{HRmSUKkdO#WtGtXO;sf2_Pz zF{UpQ=_}G-*axif99RMEsyaL>u7aQaa*ot%YE**g|i+UOs(cs5I&Hs1}K>~vR*u@sZeiZ=I! zZg<&)7$)sj%r36lA3MExptw}tjg>lJ=M)PQn%EhkVhBf@HBxMsGib6={ix}5nryl> zs*x7wuE30vWH8yddm~5D?=BYzC9-kTU*-Lz1-JPHMNu6#JxW0@pN~5ehpGg`I`MJfJW)DC{&8 z<_v}X3Wfawg(0A@qfppkDC{5 zp)ftDa4FP(36!<~qs_x;5*Td?Miaqk<1pGNj5Z9T4Z>*sFd7F&>w?ibV6@LL8XHD? z52L+<(O$!73>b|Lqt(M`bud~rj8+Asy@1i4!Dyv0S_zC+2&3h}XgM%i7L1kwqdkPt z(qOc^FxqVxEqRW-k+Yi{em>$&qL3g`wWxD%u6#4=w~@1x8-7l{NA8+JdnI!kDmBNZ z&AoZcIWLEZbol4r^|O{6I6rXHJtEW-{i8*?;JT&F658yHCTF2P z-918>;2$Z+G&$4#>1QHX3I3rXUDLY0CQ0>duqJ1$Kiw^&G{OI>D8r=g zLz9FuYnkSgjwMUFmF2QIN>2z%Am;JHIPb2rNTkt7r2WEIRMPeM_bDxOVJ=laKMFP= z&c8n;&L3a=%PN3k=Q%!+@~MSs9*_S&!kDJgizBsu<)bMB-Fpf_GpjuB>9fyZcWa>!d9VS zsA-F>V~@k6$sNh9nwWuwEkeCe;VWB*9;-?6ZB}fA`9RQS(yBT5E2Di-^~sC3Sl1)g z3hj;Dgy)DQaKP6r~w43|j!`qWXQ|~_T!fNdM7c~5DVQ_Sp$4LEG z=eo=t&H6bhWsFy6yUh3-f)cHpwR4innh&2Pn!Vm|HWA#cnUhe~^vcWCtZ2jWMB`>q z4!(?j*h|MOe#8F6P0i{#xU$Aqo?YNe8!QqxG%M%aENeLINdzC+uqAPI^NO69viesZ zuHfw(K#41xzt4#-dwJMH2fS*-_lZlI6_2oGv{&w3rhH9t!fccDQREwz25D5}cs*rJ z#QokzdcHc^v_i8ZVf5g%SC~dE=FD!>`q8T& z4|aR`X;fldp6~j+{$fJ;!49v>8Wppr(Yq?vpGqh`_{r;%#tY2p-Mj9ucTC7R_`&Of z#`D=zQ#*s#+a#nPeCKsuqa1VU`OXvTw#PHB@EB$I%9Dh4$K*Omd{|SB!5I?*$RCdA7aASkD>ha5)t{dz{ zWf}jhV*K4&1zt{%GJv5-1<1EX{U9NJh8|{MkIFPgCK@$>IQVgTuz@WqqW}RndIVzO zhv`x#iF(hHp48hU|B@t^6J4ECTN8apK#?gNqVO!|2P3)F)G zINZ#Fafhj7j;ON^Q``7s6E9j>UcA@SYA5HwRVU0VU zOg~|;19h+92z+pw_G-a3MX_DNUU6rQ1O5^HX9HtV>ae3=Yv39y{6jk2z)*DOf@8_n zV{44?cj$)=wu){KAHKZRe2pePo_@fTE;hkI8tXYA-PXEbZlPG1_L9k^~LlPH8 zw=>WaC0}qT+0v@PiwmS%8|a9Vh7Vod@=T*G?h<{sftD!o!l9j8?r1RLyy#$q4WfkM zg93vS~v`o%D5&R+-(5 z+h@1=;p(GtTj?5)R+`;dxlgt!Y_)Bi4qf%pa2mag<@nc4M~dm}bM zR%^$p(0_RJomq6;Pj;KMRx8IXqc455#O&J2pJe*;>QbDnaUoj{#>T;F^uMT&;-(ts zvSr|?m9PkXs(L3*&^Vnv4~~q3+UY-3e}fxn6lKqXBUVCXdSU9-xUR;r>=|%)oPCYn zY4u{<$HtNDDR9_I`v^UVdOGfHo{xL)a|60{YFl=SQY2zHW?$x7 z$VWb+*y0KKIi!_;s2KFbO$A6hIfA(um#e9K~XiAg!+E#IWLyoCG=@|tg0oLXY? z4DyPvR~%GAE`v1l*B2izF)4%4`D(=$CFG}&2LAG5P>IP?$V_ zrH~r_cu{#VnGB)whl)~*O~{Zceos+QG5HCkg8#Ybc(KV7$a6lk$fB5B0(r)7E&>&s zlt7;H>x#rhX5$g$)AB%oH{0gl5wlr71wLrzDQ z2!kT^1yXO0gB02+>8nFZB5Q==Nc{=v4UU5h%9U)UA?G6B3WK8br=*WL4hzsxi6;#y zjqDPNqx5H`bdG}@Dwbr^kPDFlVGvedCLQ27D4AS3@tCtom z@m}JW0lCU=E!>|!PJ{&UUl(r4_aj13{D#8s^T*R6{(Nd7H_tB}a)tk_urY7^A;dSV zE-ZG(sL!(9tF)AoVh@4|#!wfj_OzH6axZt^|qZ~R{ikLIe~qaNqO z3wPuu-=+S-KTxQgt9F-)Oub6!lRGXZd3R3RSI`JPQFFm%U@Qg{8;T46~>npjOHY#Q0@6s1#fcH zQmD3kK|yg&ax&G1KTwd6qn1py;&&DJ<|HRk_wYX!9L-TnqFOw3f7^U-@N$^t-tBW65xJHMbH{ShLbYRu0n2!F)HQw{lq0+&Y!JasGo zR)Os!W*pUkA6KCL2oXo!#J^S`&1T}LdVEYlXEp*y)!_#eRA)0|sapIC1?ky{Sn3A8 zM?rWt^CopY|73wnHsU6AE&t~N+id0y>Kgvx0_|+X4eDzCPX*E}W(-x0Z(Y!tg@~c5 z@WBPuSPr5mg77S6G<7+DLxD>cBAU94zq-ISi+PRu9e-tkb{66q zbqRk-ft19=QspDF`JE&LmMR;W@QJ*#&G|&y>!Z`T-yPRNPq1r|JgA!#Enq)E=0n}) z(ZlQvqz}|>4n4q5LK;EcX3@RuNF>kRO@i)XUqR;EyUn26*=|T5d$(!yN468v$lh%V z{hn=)C!yZHA+q#XT ztJvKLA6vIE^mFz*gpsYA0R5C*i{L@rM$skgCy0E=Vu}^8GY~!yw-NMXb`rt};x>%V zVn-r)Hf}sLk$nY`Z{s$ECa~QQJ~nQH=(}tugprNg0Qwf&9>KGA7mzN{x2%K(wArSqNCYM5fZBm4my%8a;mh-=s}0F`=d#);{*C2Tf@m}cgB14 zUiMNaiA4qrZO<0LD=jjZXd8APJj5d79eNM@1Ki3Y<1N~ZT@RO-XS_l0Vwb@y%`;x3 zx3RO~A?6t^=pWg);a27uuh5&>(Qt`b1_Q0jM!_r1GMb4(E7PiHoZ5*un!AUM^U?_j zFKXP#ro3o`yI>#f&8@!AJWahcQjnW|A90#`VI(Uz{66y(_527S z*X2Iq6xDm=R<7-R=1Hp8NL;S=eZ)zs$H=u@={=@1^~?w+xAPvtnd&+ckXwC^d4hU+ z$q|p-@O#YPs3%5F=DOUINDms51MzTK1Qds+8k*!*3CgDv~D|+W~}=N zjceU_#MxLk8y(ZCbVSowHw%qzopWq6(j}q8TSpwzjC3>6A*~&b&PKW!=%CgXM@=JL zBHF)|;@D=Wn~wHvEp$vX)P0EdX?@`6Y^eJHeYO?vsA;H6K)bhwI=20&n}&92z37F+Pcj#ZL97bv_tDgN9V1&x6%7s zl^iv<>fS=bTIUY8ZP87M;0S*VJg0@WZtXtov&Ah5y}R|@VWTZ>iD>X-wdRQHW8d4! z2dV2uetmrOw%S1|XaxRv$L-_;RE?1XkCkt$9iXa@*gYP-6?L@x%HjOYZn5YMt!{^X zHoM(KuWfZYY_!?!26}a?{bAlFw-~fa>#oE3o7}FWSGMXN_SxhXjb7HOde~@_+cor( z)&&Qiz8e-T`!ME^ukRLxp8e46;G^#riJlTwe`4OF9UXI>t7bFrVV|`!mC3S<3%u8y zzVnNND?_h29~K9xE$7b!3<{HXILpaZfrG->#evD=eBW``OS)+`Q^>jDNz@Vbe`dq~byO@@r!+<~>)v^ZFLeeoWOXie8STlvd- zG`70yT=*tCXSv=A);(Qa0eENTU+0XYstGf!8n_i#v;V3BRSYc-|Jq;(4zY zpG_msb)pprRg3m|1jzt z>((TFJ@VG%l5MP%$ysA$%H)JGD|vF*7@0iTYs^ZTY&S+GO};l~B~HFt{O~aO(wLPn zS!Ik&n0#u?ik~bnM#fKO8ME+{1Y;zA@|H0xZZggo88>;&n1!3f7$b3$0miIr;+59O zYvLu=EUb9e3W*g@Sh1qS!&b;Bajz9CQrvEZj1<4OVnv8wSs^3DFRfVN;wmd-xcI3R zD@T;*|>P?ZkT( z71q0nUKJ#BVpxU6oS0BSvLt^fE0cs9we%i|><`B52v3e(aS?MT z*5M-Nek|_d`z*#qOlItgOPEKo4wo=_vA9c^qFBZy3^{g%FXmaSgD<8c7Uzqh#xi^{ zb+IchV;W)|E@PTwahEZ#V;Pq*%-9uIFs-o;S1_MraaS;3Vi{L3J+UkNF#WL(ewd+H zoF9fC%kaaD$FA_lOvXC+V`gG;{+Rh#hCfCgyCMMd9nK*DvmA#Dz^uYC0x)X06)4Oa zoC6B89*09=v~Ua*Mh~|l5MzLI2&_U5+hvd5NCr`zhIePbxuFK49v$A9U3?>X4b^dY zYj(m7wKY_SVV!K>8_62fgTrgHkKRzzpza@5$=-1zc{O$K@Um>>8)~bmFal@)m8xfq zFBo#ticZY4B!|xTS>-WgH3(#QC@VF_L=9p++>;d)Lso@Y4u8%%9%G^k**(n6vWOw8 zK+K1mvp_K>DiH8+U6%MdSs7w7T#?1TZlVm?IZVzfzfN8S**=_?m3rM|6~uTrGb`vi zSqWk|d_U{>brU7X*5TwVi|gc-5QE{^EYNk6m5@!tQCZ?>@(PIFaBvno+GGVpXV@>R zJes^5qBVRzD>d3=Ib_4|nXI5_@(+;p!zZ$iN1OZrSv!o#vWO-xgRB`olm&`5Sq51> z49yZ>BYzK38@9}1Uo-h0qB3lfRep{99c0z8VOHujlkXraPjW2ss-ES;L*L%z!9(A# zRfQ}YR?D)uMqUE>Zg_bX=$gqA$dX|>NsJ{cY~;Kd5*urxu#xe`N#$6w+-9CPL`ub) z$ZckMJ)|HkdBJ9e_nCAYYqDT7#bc5zuwp0WADJ5NfDG?}q!=ba%1MUtm&KJ!kHjz^kI z+kE69NEVUgDVtW_ArdIkWNKRS%8D$=_RpLjD)ZkxyqokULQPD0!`n$Jj!2%QwD7i) z5+c+lDGZ(t$u}ZdL}}u!B^`}W6Hyv@Dx@6|$rF@%-ZGMMgxUm!#*<}^h9?UtwY;g! zH{ohRN;OZASsb1`PNDDyG84kp#wnG&u1w$XrV6 zJQ}7pLdoGJX6^_}9;RgTZe%KlsSQ&|yok)v&}1GZgNM$16RO6er1LIk7KbJeQ6BKT zdm@k?1Ftjdhc_qg9zL1r5{ek4+~xh8X&cHMpxogd&eRS?3{Y1#(*=X*quk)F&a}ladnwm>D>Jn* zh+fJy-jYmd2$MsJ;>~7sh9EeU2;M|SbqKSE62=?ONDo2uP%ymSjPMXU;hs<*j+(=h$(0%4_5QmYj$3b_P2O%p)x^jY&nR_6N z;jTwP@ys6~xZ$qspjhTQh{JH#q8KoLfUFqqT1t4_a=5IC*gFTW6FETtv?KQ=SMHuAiQ2`IH@iYCvM z=!;5jqO9ZnN<4~EYodU7aN-VBGM%EqJ3v%MsnIFwJUilOKyo8RmA9MtCP1x`qRiV# zEDlI+peXUS5)%T{8YnAxIz-=q?|CZ39RbNNDNA|Fh{^$KFDZ&4 zS^B7dGL5n@G?o6wUyVkQ4hhnW{gdk`b3+5^3I1wz6vWBO5lwOWdJ z=xzEAf2qBRjIo1fY6tEDHQJkZSv1=3?mFIRZ`l>pXm3qSZA6*lQyq!$e(yC#sn!qZ zEn5+W|28CV$XJ&QV*L9D{kcp1PaI`#SE~q*fH}`B!6GLlE3vEz$!aW8B-wz~`nML| z_+rby`M-ZVPX7~&`!nzF_vd2lQf2I}^VM?guvcBN$Ms{tu|_9Vhdmco8>KSdnf-IG zwwsKqX`a^N`F53}UN--rZFIToDT>~_PTS{lR~hP6^N-s3m%E;!-Zt-9q-<9?>V5M; zZKEq)&ru(nf7AB4()9w>K2&{~`#h=Bl7rGF3u=<2mTFy90bIJODkPBmI?2LnyvrKR zZB06E<=1T;%>9xSWHsJxjp6nurCRy*Sch@>N#$1KJ=PK2$t1RwAICb1JD()B8s}JF z<9?S6vi9q>zRq2hY+*g#Ykh;eCi%FvU!Qd>S1UQldc4m%j%$#dYVFr=9namKTy8zy zZ=J|BPi9;DajlcNkYus-A5!o?ms*3&yZeuuIN9u(?ta++n(Sm_HP~}E=J@WoKd%A* zxd-?MukTM>{+U~3(9FATD;&o)N{XPN&M`Qy*dTe*^1mpZ|HP92v@`(M`qNhUSEm7x z6Mv@g$qO*QnYxV#@AF<|blmzuL(5tO00EHzasr?NAPRsM0RxE|7-$y-g562gCr#1AqpA6aY#HC?TMPfD-<- zl(5fR4>1IEKn#F90B8V60ic9{5&}vHDB)j83D2X<6H-qR-}Za2H!8P&(A2UM0YE?` zfSdrR0Ehyhg@7vn{s@!>e_S9LMlHQN$8hc|<`z%=Hdhq6r;b_|RyX;#d*6KB4Cn9d zeuJv{+r4kTZie$Wd*6Ho{$}r+ufX5zee*l;_jbQ&`@6f}EbfQ%ulK(Bx*3kT?YGjt zwY_hC-^b^<_}ggD#NS4%5Puu3O#E%MZ*A|Julx9X-7iMgF;_13;jx(C%4Rq(#NR** zvUeTs+Xrld^FOu;4iNhQAof-78*l2FOaYMy10n$kGC&2uk^gd9*!7`N*BRxQ6P=<9Z`1h$r$?R&$i?ljcLg9I5Q~*Q)&_ci!0DlC^fhM*B{y zw}n-GZhWCfo(_0|^R;&cARrPzP5@K@L;=u3z!d<01j>RxERec}`wppFg;jlMOw#k4 z3dq4}+XD~~iGOfT$gwly6)o>Zp@OGpMvn`M5$0Q3ttE|$`imA^`o+`-bU$s$LHDuu ztw+uuy}jMhW(Fhc4i|n`^?YlZLE7EnT$RMw2Ct<$c0~R0?|Z+q#8mb6UB@ZwnE(SI z2tXzPEC5&lu;3rff}?sSp;ZHYcW}S}2m+7^01E&X04xAl0I=XM$ATStWK30mUpx*N z06_pU0bl{Z0)Pbo3jh}UU)R-20##iOaNE_umE5Izyg2;e=!!!5BKGD ze`s_Y*Pd~jc{j6b=E=;>$cyeBK8c=xr?qTIH8VU5Zzd#~Z9HqbX2<;0E+<-Y?b6U#Q7HEZ@tY}$}uw*KtS=HG!m&6g5CZ74Q7akixSWKx;d*{=Zj z(1tCE8qF)u-Y{#};Q9MB>2~wsvyaVMH(X5IeMoD|;?(%;U9;C4&Lx^3+L)90%kR@K z8?Gc;9@5T9`p|sg>~pg(8zB1MNF$-4BN450kHwC>RHjh|67nqor+DQ|p9~g+bQL-p z@p-O9{0#u^=(^#E4|9*i2YCrh%?wk>w*dUcD`q-pltSFE*6|}g%@v8i0YE(i$K21$ z7QYW2wnOVd{!-JAp?tfmnGj?~KJz%QQ2bY#nr5UikMeTGt)U||=rfSN)bw-cXwB6g zh<`>U(}nj${MVT>8&&oECIVt`+IC&NeaqFYFje&c^xqSSGD!edE+)aGQv!hx%r<;> zcCMqA29u__FPzV0)t-&iL2GVI*#Wuw#O+K&Lxh=(Le)2==icWkTZ<%6Z z)Lr4Lna zol0eoo7PGB{i0w8C~daihanyEaksB+SHpzKoAmWzv-5pCp0kBiCgPeZxbi-02~07R z?vjX_+l!AIoK$RS>z-D)j}K|EjXKY|nUuLw=a43+dQD(9oeLM*=D&=0`mprv>}{n3 zAuN+j8Ry}d@EWTH`Ihc0^-=QSdT!}>D1k&s`?t(P*=>xzv2c97j*y*PpM>2T#i4j zYPQ3_B1SP+&t_<{GP@PsEL!ziQ@aE5$-Xwtl*OYaXFEh1li=YO?w4~E7bP7Xq&yb0 zx#G61lwBq3yTp0U31P<$R$t+KN|N<|7V>3kq!VA__0Zu;rdLAxt1 z!uE>k@+&j)`A!~IG$J)QK3y8|TgX**&0vsDa@q${NQ^V26AH{Hk(tSv3%l!ITI&#B73`&aI= zm=$K>)xB) zyB4;W*rC1qsa!hQ>5wAF$mdeWq7)I*roP`Y9n>?Uu8R1E3fV|g&I*?iPNi`Kd*1(a zns2GXiR2n(XP)+dmR9)yb$V^x#r>660!}3CgSpmbNyrHeqVD3DG=*)ANtL)fO||rl zq#A-&tncYxN53){yzrt4N*pSct;*b-*6KA4-=_J9-UrC;~MXmonwv|tQ``Ns z;)@BA5B{QY`s+~#tg@u4&bIbiu;hGibqU2!oE254Kd-CTsh%$@#>6RpO6g~;t~^S) ztbHpz$nN0Pu#+(R66}D*M6miO4)=v?!Pm}I*h`1%d73;HiKv*V7qhS-nA;&?DLyg& zQ~IOx{i6!@T_c6aGq^HLE_wI>6=peWX*%J4C7vUmmI=eN<>loIKE5-g;*n9v>=a#e zHY$P!+L2iGuufa)6 zr~eW(TSg8S9mg~D^V%XeOq@s!3H`L;N!!5>5f5L(ZWl(JPK+$6PKPR{ z2a0hd7zSN;h0S7^a(FQcTzC3sY_+qDu{#5*P%N(Uw4!^r1)}(+Rn)-fc%380*$U@e z**R$R*nPHY3OTT!9pcp;^6;+Ea+^CLxefL~@d5dUtu{8G8H z^c6_c%8$G^rV>+W4SV0j;>$jn-yJeaYgZ%PcO#xrNH@;V$*zEtJpm`X!G|~j?U=deS6}R`>5DTZal=A;-}r=H z?R~?0knc3)P}foeN@$cgj`ujd>#p8L4Y&F&M3^TS@BY|bo8C|zay(Sx2N(7@jdfR> z)rDJq4iVnvAWdkr*ma{9CHu$yPAagX2?>uDIX_UZx%d)iWcEEX<>5IB_h3`3JSa0b ze_DZS@4mEvWxuty{~nXjR+KkNpIH5nlEmiUEld- zj!z50`ixCX=bOqRX&1x#^5<@n?rTP!cAwpLW_FwVY~h*NLZtR=O4RpQ-2HTR`096N zWP8=uQg+n+X1%rUig(WxPPF;-FZ$2xWa*7W{Z0);>H;@YkpD^GGPd|KBB!UkY@?uz+T@YeJ^u~;){i%@csKY4ZFN>7jb5V`n) z-L7h>4Kl6NjunKEoEmHg?*pc7E%Oi|&Vm95rHPl8`(aaMz}}05yHUt5Zi< zF};7_licR)+s+4W3t*?5^3n=^+7^>7ha_^Vj%u@^!HGud&@;HyVz?VJ)&LGNsTE+q z=;;r=p(#IATohESz@KJ>2!m@d+uZARa*}3WZ=?lWIaBq%&E!jhW^a@4Lc`VYKCP;| zix)`L!m&$^n`SV4b;Z==#_qE8K&uz!=@stDg8`39uF~#*G3g3E$9Vo(e&KB8!lUkh z&dJx2$Erh1?n&L9I(xc_;=cK@z%TcYN%e;ad1Isn{Qb-={D4!H%C&ECh2^_g!8ZCv zZ)Ot43`-mFc~_c-ANpL$w@og8(sL<<5dJ2(#!TPu3J**Jhu`9_jnVy?Jw* zaSBpld%5~vUgNH^ckjm#T^!Af7d!n1uRPf3jNGmsS~&WN29BJUqV7a`B*vqIVuQKeVnM* z1|4U-Na3zaUbLKQd)=BU85<#$%q<_DCJTR{wJ4m-YGo2yo^_<08zbWd^@5%#8U?)Aitb2R@*5^c@qS(p%kqtCY_h!)B5&=?ur13ZOQcy+ zdCo?L>GPT{nj)e(;ssWL&l{W(&+c{>w!1D^e?ANw>~<0=BE*ZnDlWz_dHVfHSyr_Q zi+-xYrE;2M!edKe`raYc66~2piJR`}tksy}UYhgOFCv!Kz4UiUYr1$Um}hx4#Ou^d ze|}0b3|=aI&q~?-TiU3m=T(g%HoZ0hByszEp|WF@4h`C`Td|yxnpv?b|49R5W81~z zfG^u4?t&!2E;qT~PmgSrxpmfxq_n7OnoH^wbBgEoBHD}$ONo%fhU_Bc!heZI?qZd( zv^U9rHX`_hv!Yh)bKJ!9Jen0w*zjfzyS{0wUDb>HA0E=P9(X=;GUfEs^#uVLz9H7Z%~WzYsVb4o)u>pS);Q?m%i3XD-|bM!+55ro@}FL)j9L>I4*q`!xi^SUYTx0)uv7xf3A?_ z{ZS8oMlU=gSgd!}#FuA#K z{Z>n%Pr<2_3 zR?Y3!aSoggV7NtHWcBsW=)8c5&-IW$$a_-E-Z3s#z2i*B*;IB0LwdG#hDvU?o3JmR ze^)lt_clCz7Y=V<(GpVS>-#XQQs1ZT7jfTqe~XG>oP}$iPAv(Prg?P_Bkf1^^`WQc zHysbD?Us!8T{kV4J_!jD^6G2ey*rtL^OFqq*%>s4*-ctvbjX9j!_RE#md;nl86!Kk zL{y1Pg;@)f#BNL6&=%#Qqsrk`n?Iv&G>9$z)s9DuaWy96dE@G1aa`4Ew_CSy%GH-M z=zB-}tY)8)ZgzTBcmJ}NMd@a^>xstEA76F8KPp6?q9in~<>>9-uI`*&=@9#cbE$LJ z3){ovUCeoKNB)%}r~k#iTLr}xHEp9YLI^Iw2`<5Pa0u=moPhwr9cCcdkOX&k3$6n} z2AAOOGPwKT?sM|J=c`lSU#HH^x%w~ui`}(W*WTT=SJisD`&qp{YSfui1{u{+Vg2zO z-aP$Ywft@oDv0IL?|w4eR~Dny)7Bhy^otV3)qb9W(qzXtX>%n00S)3A;VEXc?m5qW zF8@8W?#AQ#dX5`ww`$p(??|mdR9g9X7A$JLcXbdM*5zp*JnGx9G{Y5X4)kmR*FBgM z^_A3(z@?B?k*?q041u&9wM_jW$&5=M8>6CmnvYz`kE5^R5-2HM>NldmN9V3ppcItF z6*0;Es7vB1QQ4zD$_t}D*LuTK0YtqLH)YY+iVINrcMm~oEG^&W-Av~+Z2rNx`93Ar z5_mMR@NSi`n55$rq@-&dQCl8N*1l`u(ek|b`WrcAIa1skIZNK>LU;TR*o#uiFol1; zX^vWEI}wtstF@Ec`_nwxhU*>pBa}K28TSJ7N8P3QUUPfuOybOI9J-K~XqPc#A%htt zpxtk}+l{IUY-u*Kj_!cHh_g^Ot{tw~EMjRBJ9scTF!S$fU>o`Xoqp4NEtjH?4N$nj z+R_u`>-Jv4zHd51FdD$3_U?96L``+H{|*vSclbGDeA+>%YB|C_$!fpU^U~L9>TD{e znZeVw3i=1f9B?mW=#0b0X+wl|G2o*~>~rLF)(3>8`NBdrpuSheG+3Ez+Bg(9mhv*$ zrD`N@+R{~iC8Wf-OB2%4A#V{vF1YiZF;ztC7MH$#ZnrW7*^ZPnWw?8q)?B$YJ>qzg zkyR+d{wR7izKK5=f=WN&$?NA+oBol}$mOk5X3$27e`i@{rVn#IJPHwy60H(ooI3Kt^K~ zg;u=?oydaR0MLwOSdl_WELo84diO-c$N`8nP(?(c*Yt=^A`wbAsa}@m3na1tvH(qd z+ux9Zm*WGrMzc!esiqh&JU3|USeaam;jW3s8_0c~&@o`7bFR_k;_Iz8W2&)aR%{&j zN}9o8sfE1)QM)c_T&95C!;* zyF%$t{_;a9mPD()HmWFS;+%0eXs4@Z0LEvW`3C{5_kXl+HO^`XCtME0uKbz&y%_Rm zg|NeIW~fO0wYT`?PRBO?{amhnsHiPQirEmUGx3`Cz=3^9mWZ@UPEg0ZqyW$~IsH@Yml??{*0X8Q z9n)RL0C>8KHJvD0%IalR!~Vj2lTp=sXAZH&S4R7?_|j;WTfR!s@pLUY%<;W{W?z^C z^O<}FzJ&SXPnkluK@-4&3ezYHGwSbF8z*#6_NIjasC--Jxg|-`7HH^&>_@07p3v>B zUEdiVw43x4#dp(qqM_)%GvnitmesvnH%HWQPOc(v;FtRMJSx!iOm=N(`2)2qI)py2 zROo}1&enQ0N;nM^%0VY{p+R6kBA%RWuA&le25mrr%i0$Y$E|qUTiK zmgboW#whQY0@qdK511GG63-zRHcx#PljygwCp_PsV z*XppLTe0Qf(7Q(FhAhhhyHhn8XJ9)jQ^W>!#C=~QY$lD#v@Rzr_Ii$G%(f_^V^{e` zuRIfTdgk8XJEEy{+w;KA#j3hNjbQjCL;)F#;+Wg)k`+ICf-7<82HfT%QHrl;uPo1VJ0J_#{)= zanmQ@AAFh=ejMphi7&!jW6DTib9Fj83F$2}UUZTbIf&mfJa*Jkgfg*as&xb)zJQq-U#T4u3_%ie!Lzd5HWiGt+{{E=l^w8W6wYnoS?`T93J zl820alx=(B;O!{C70vqBLqp0i#!^#AQhvv)P>D{Vn(#Q0nb~16JqbonbTC~5u>nVZ zf&)=dn{K_fg&8?2%S5+2KCex%wM>RaUhZuFe0|58&@#5;q$Jog*%asOxP&yoaNT-b zpzv)u(6%EbWM@rToj$_hxT9WO*+|lf+I60zGoyxZI+Ek6Ffk&e2CWexd6AxrOK79` zabYmt;Jg|YPY$^GZr@(Xy@yX&OH|>7gHca-OlltjSiPXeF_6wh{4h=8aP-N}7p+|r zGrWdQ2|BzI{zZ?VM8DD|ZZ0SdsW`3SG_68mD?rlf-U!p?Ur334BQ>jycNI|*cx}L* zos!7eYX*}DF0t)q%PonVwNoU#zsjosI6w{^#my{)KhstDwLFwp!}7_b>*02Up%(DSw&U@disL50n!Fez!OBL?xJBNN$T z6DA0Zr<-ahlx6pOm?h?WU-@!$82a`z^^Sj>Q_`E6XVOa#hF=is52P0s0XqATUqr%Yhdbn!hV?fJY@*=ViFP^#uI|E>(R7V}~$Qmj!q`#m`{ z88IsX{3G#-3zdy-7;oRJ1`E$arJa3a%ZnvezR4V$S;7`tln|!OhSgn#IUw(9JTfPs zNNk8=!lP|&BGyn?#hzj?&~-hsd*wg>0(ZX6CQHX`Q7=8kVu(O`FfHsVNl9K)o8^nK zLv15q0&{A`nDh@}aGc{V4_~&(kI=$49w3>P6-tcp<5!ALxD&9Apl{)ggsR=i0Xu=g zo^x{S6?vg}ZyGG`>B&mc;>$%Sddv-l#KS_%GMTXWW_`3EqwhkfVa&-}H<@>xDTqZ4 zp}vJF6gJW?g9;w4raTkC=vwhq+gdEJPW*&I`DXbR-OIhXx776Rhla4fJ%z*ogLJ-Y z8oQCJQHrI)5zT06kkQ?J->ZIt5of8+G~JwWx9R9!84duWd3hMkMy}4+dQsS)~-N* z{m;QQ(2b_u4|O?pK6^s!>4WuFo+6qRm>O(2gHOYnzS2GqKIzN{i1Jr`D#pww2^TFS zJ+%g`$a2V5ei%ELXabxw>wl^KuK2AOQw^ihy%7sruQVqwlW*nB4MH6`8v+5;`)L&IYWR|scu;Hr^$(# z1O-NVy#)G+un+Tj--LFu?g2egoxMT?q~JJJ%qlu6F8&iUyU%DU_QV~NO>|K;g^Ht% zbA<}~Zyg*ysuyDyr6=qA%viZx>tx}{E$_U%s^~GuHEMJO7g}@_5>iLP;60>sr_eXy zE1InP$O+OfucXWB54JKCTz=I?@R~Csfk?LU~!|GYyTS4`K0P#F3J@KMM14fa;Wb5Doj{7VOBj+tMclxLpDLS$WB}3 zV;;&09;nekXZ&}Y3w7j#;R^&XbGYbS7-J-+M<;^L(742~$s>o)A=C9Y z7Q3N9+9I7`?A5o9`XQ-EI(jb;zSl$#2h6&vAa=kLo|LKfM_7nM3f&FtL(P|M1$6;(mDy+q;xL1gzTJr|^nZo-%yRiB zN&;vhPcBNoL0>!dm-{UX6Ddhu-CQ!U?ov_70KIe5Ife`X`IUtwb2n}v;cJWd7uyQF z0gQ?(M@)B8Cn?U`_eg(6#M4Fm6ZdlS@s(S8$olJx?4vIXwCLh35M zMZ8#{g+^?zLdTx6)J1FOR9{w-ZBJ|cErPlp5}ex9kPD;u2(eH8Q!}#umwL1Ph~RXe z>DxbbhFIhkoP}q!fy=6`jk>bS*T`-jM5y=b@>1q?*s4y=gfUr2gz~OVu-v+w)Ck ziz5LN7>BXZhFDT(!Zlf*{aQwiqKkc-@}2>9n(ox{)9TFHYj34XJFG)kRKbpTd(ma@ zqHBg$I8|*0t7%-7-E=CjNB>?XKxa| z&L|mlG51?r_0FU$^S>2aO{{PWHorlz{!LOMqFT&uHm>-Q18&h+k&N*AA40XPpw+0T zVA8mPY^zDaDiWcWXT;wR_WzHl%Ti@bMkqqHE6BE+H2xn^1s5uRG+EFmYnn(#2OLl= zXsCJ*+UN=%mC-b*dJoxn2p(||g}vCLSg=ri7`7=AJmM=_-l<*9aL<^^a$5ki6Q)iG z*WP5fXHPx5E&NpV9=9P9I+7~N+^Z!5GMg28Sj!0=DHH+sYdJFA3#V2<3z@19Q#Qd( zMs!*y<6_{25>@XRn|h%m-69G^tyQKwbgB*Pi)+~`)eN^m3LSkd5`g6DEEV%2UkqJ%)|7UN^$UZWeZp zAC~O0A1YGfsxU5-CYHJBjCy|#tvQlRX#e)G&h|cXGr6p0+R0D#lA9RG&5#ie_ab~^ zeP8e_aSBYGlJs39sHaAFYkzbh>tD5443JoF^&BruSdsvQG<>X*mNxok=EK;{%zQJYvwvW? zxVKDKe|ulF-*@oy`OMwPC4b5+hr_@j*iL}@dGzN z9^-wUlKrPPujc1h#!Mg{mqYibMJ6T{*c{GhR}8q-}xZk<0zBOjJM%7{=o zwxhlM{J_pW$LG=i+JVlxhOYKI&erD-R&^>lmhVnhpHCB)Z1_ceA+A*Xt(!sOPp4h= z{=nz@`1su}VRm*O%s}W9@^PpA2Kks=*@k>hcjEXX@OZcDzqxsJf#~nw^ab}^_=2yG zQJ221Z;TmFaJGx8pQ@ic@7y|r&p}}476YW~>BjcOmH$6qYarj}gIZs=Yt*OX4x$$J z_AF?00dNc+91Sh`C@w63^z>~yXO-$SxJH;X#5|;!!|5AGQFo_^%8t!NFLw|#^LHKlwpTr4R)}~c*`r})^YMxBJ>HY& z`QzH{d|nY_5eL?&Ol+1|%*k_Cx7pR@-ksM~b5Ri^N1IVAklQ$e{J8b;>c+dQQcb6i zakuPhed5l)=Aw(E{ju+U;BH{Q{eNt{_SAsAXk@ilo!BaW$;0xAZsm56|3w~Sna3&| zWWe_G{J!X_CT~-TaP7*!H6GKSI{E49cDw9iHS_7Y!wyCwUI8z2s|vA-9)nx$K4_z> zs!~!?h=Govr+h^D_Z04@TOSEhnhid2ChbNn_feOgU&RLd?_A3sK zz|E(wJR+-y;P#J{EdpMyWv3@iw;aNrexIJ>Q(|sU*p2zakSok2^;pR3Wx|(hZHrJG z^`%HB>a{vZ-;t*aMs%k!>&q2_HcUeld1<+!3+(WasKW97)yfc>G<5ut^g#9p$M?pL zTPTkJ`i|gPV_s-W@ubbD_ zW$pr=6#+kSc@*3(C7S-Oz?fzN0VPWQium#^A6>eWpE(L|$aIH*2?{VA9T~@-=;ER! zKP)S_Q{E|j@4MGE*@mmpAem_X3UmBbB169;H_`3(v-0zU86*Ftz3VGvm52@T0?`%ubH9U_cdUl$^ z@DP>_#Wy$lEfpb1;}$Ptbm5H?Y*l!D`r}29cekI|$eGD79`bJGrrXcstV!e11NIFE zrES+I%rK(huJOS1s!fhv>-D@Rc2mfYlk0Oqw8Hc-kmvmt{;Q^kSBfDw3hn<4t42pV znOrRTvkQ^sFR!EKaFD!=63aOqTdCEt4~C`(!u zCl@Vk-~V-q2`XLPrIs-VP9Eimo#QO870Ib8q>0S*Vl**Xne{}oW6s_}BFuFMwyMa_mo(HV9Ma}bOq{xJ&udfZnW?3lbswV=7}hQnYd6-+H{x0e%d3{^{R;{5hN zE&gS68&w9Exqft4?}TE|r*y$Mk5bufa<6YROVz8ywm(LOF*1VaL+J_d&`o#srq%1x zOGVrr=Aea3Bqac(K%j;}1eOuRN{c?p1ZODa|>G zQF?hXg=I3h8C9Yog!s$3MgT_SffLOu8d6&RqA;g}P4tK&DlfykvF$^CT z3DYhxEHXLEMX9hUDh2aJc4n192@Bi`Es14PrSRvM_;ZefMSzRwlq!odf8+FvaGFNH zC=le8U5|!?PZ*eKuKdepmx%1dVh_c*w{&kmKmtwHb(1bx*buRm)jO}vYvnBm+2q{77SfQJ7X#6G-N&0-Oh6(x zYpgFc2^}-q=+L>;?PQ@ugDd$~H_ZhbbA zdo-wjT6AO>2VeM^T_#^6L-7p%?Q)_@vPh&6BS{2i>uTIRy5VR0M#57bkL&`z?2jVi zhT=xrAjtiJ;`g&skDRhkwOLKhGrhs}N7+?8qduQf{a3v#SQnfXL@G6c5MnJV`0hsI zp#~i7E8fUpLuIrkz%v4{>sQ2$Z1(tXaJnHJpgQz!2U2!A%xbI1d@#9jD$afkR9o*X z`o6B!bo#G_Y1-tL*XWj4lX&IMV1%Z#8PMm0h@?4Kp^r~cvwJ6_sPyw}!(`(!`D~;e zV@h}b+@y3^xQ=nr)0DBhc>-~?+a90^PfhX;jP`t1ia3jWYR_bR{tu9~*DJlHUh_`i zXD5Hzb;rRkh?E)AY(5woQUkWNJkI?Klw&~1-noAf_TOFYvExdSrA_9vXYP^OnZmYp z=6xE0#*?*gxUmiOZ13vp#C7Lm$_~d8>(-gOTwx5rk`Ud4vqa0aRvY6SH{DX!k`6~Z z%94)s%Ca%!9p~=z9Ae?gPpW|c**ttF?s4%Cr;$P5aIV`;=2>e0h%Y$;@@q=KYT(x~ z=!pw#g{O`gGItCnw`C94y5S*&s|+9cm!M4^{*;A@X^tFyIX9zllxjTFt0w7c3~P|s zNskd>Mh|sk#!?FLuQ^W*hD(DzMA+@g%-N7XBa)Hb0NrD#lHj)6woJ+6B zR%AOb9bRCdHKAEdC|5gPl9N9xJxzwlf0e2D8~+_>;fE^W_+$tn$M!#l-Iu!plMvNL$=glF6^Kv%>>Q+e(qGg0<;)jQSH%vCn?_$NQ)=58n&-4+ zr=H^t`YBf40FhU?yMB8+M#;N=bZSpG|M}rnc9Q&;C!)xS5%1S&5380`*F2TMt!__n zZI(KB$k8CpuN}i9g2?_haO~`;5~IetV_Ss~@@Tsw{_6Q|;CU5mH2bkd{OYc4we6kH z(QSLP&$H8>&_=tk=Q7i?cltM+r`F(AWu=?+^sZLje+^yNno6fo;bT!E+{JB*kfU;Y zp(my5LY6q8M6G6;YM^IaMZcr&n(PrJ z-b`b@Iv9PCF#~&u^1wCSLe<-Ar$(fow|=^DO1=u8^wl_zy;75%MdKFlM;sxLzNs5T zk4NwqKtrUr`6K)x>ca0Io0)feqBuZx?;TC@n+)-M^378JOV8w;Axgo%R+hSWj;W;i zlPhcgjCj+%t}gO0LA}1wv%_$Z@wR(R^m&o{yty%=-B1VoYI70vVgi`4hc~)BhB!LQ zZmzis$NnWp{j}_ix3oVD>xhhGUwAM|rU8EO(z&?A$%|Zj{#OEo z^?YWJEC{I?IeA~d{9-TjEL)&tDNyO5y*Wc4>H8nD&q?uZ$Er6*aHD1-)7LwmG9Qwn zQGkEgP0Oz6iRYO2%0<@yj>CFSzpJWkaxn;^tB+Fb`{Qk}?(Pl5xe|K{sU%*ftG7jo zyzLWzK0CShlV+#<f<-jjY5cb2y!nWwy9R zg#ojhRLUz;S{^zBm-@)JwVrb?s6osB%CS7rtKR>wa!kSZe<{blWhx`tQW9p2ly0<@FB|6(^eGL(Q$Dp46iGdknU|Zb_7+eY^qt@D0vyj0B83%f_xi@j*Y|Czx?oSD?WCy&#I`oN2;tYQ+tZ z*gLr?|7Pf@jZ!jdf@tL}<+eu#r-@vGIIO1IVBPOo|**Mlw7 z^R@RjcYA$XUT|@TR`-&#Rr^TvL!do^U+B@CK2YFBt)6vaxZk~(_NbBCN~5>qvJ5?w ztZC)p%a$OP=b;;3VAix90gMRcOeSM9tF({lV4E@Uz$e>RCrqaPf$IGZsyNp64!O@> z9CJNzsgKURB&|gCvc*j>$VGFMO2>{kn0#PQKiI z&3UQN_D&{#+4xQgG{b;qes27cuKP=(j=g%wvL5D^Ku`s?7Jj#==aQqm&FSnL348Uy zyeDV=%P!6xugX8!-M=hqvOI}3aj4h=I#234PmFl|F5n-|kTw1n+3+ILMHM!;^zrtV zicqAC)xWUC*f{#YWP*!a?Lf$$A!p(w_WUx(viu>)DmlDD6I@CUaOmz^|MyQ_Gfc;7 zdx8ft*&zA5iNN!uxMJ=(+Fx%3Tf}bv#YMHt^(_z*@N%^P4nE5sdXjzO_oen5S^0-U z|BAyilSGVHZ8xdH+)s;js(FDEB|qk7xc9y^FmbBzMU@EU{8_vpAah^B=Aa>XeDMpx z7z$1=Ui{&Rf1vyB*b$xmxN8XNQmv_3iIAN8UJYwyu7s z1g!K$-q>vVR{RxXLA^TS6xDlj;a8=6O4shciJ+nDr2v1elE&N=2rwnhQSg`Lt!+2H^1iLCA)o7vDP7`qgiIaFetWHzW`38GOx5Kbq@ z>Eo=kkt!i;1qCa3XZsR8#z6PCoQy$ZZ&XubKSs~)`Ri&pn6W~wrsAb7rDGh98)bvd1U6j4t6~ zzOEum*T>7;m$E8dHw&3JP&p=-J8EXOQUy_`T83?ut1wQFh4Di9;(gb9Qs-hR7P!!Gz*)Bpe}n#{ zw8*cJXi-DL&3#$#FK2Hi6)aKcS_t~o12yU77z6Z&lolhk##B|<9JYoN8Z{0Nte2_A z=nv)?0z-b}TMEg1lEyAAW%0DBVsJOJx}Tk?pp;HeQ(TCIoI>n`XUyxksc66;KhTwfpc%lTjy^OyrP^gFM+eDp=}5e4Q5Zz-=+>S zwGPte{T4B+zQKeVx_v}~s;&DFYMdZYM@djAcA0lKoo^{(dEqA}@Ao}D7r&RhVE+I= zQsUCK_LxwCPl}y6xm!r4Ci&cFWkKT68*qM3h4&`Kf{-z_iPS{?lqyLOj*w<6_-Wr8 zhck62o|y6Oa?4}TZT~UXQ!Lin*j}sjo~m`Q{?zNG=r(+c50rv zG#xYNNt@9w@UC(a0tr)S;%*+a!LpE5{4$rW6#LP6GoZp$fiN~8Cz7mcTxbgJ3( z5H@+F<2x1LTKO?%6hQW z*Bdn&Xdm@iba##I{zH9CpP(%w3`3D$TX0nWDP*2QJZ#LCy{DuXGlz@ry+wr~M4g5| z{XH8t4Ff0OSdwG7iNO_*KX8st(DLkPBvC1XL)Fjzh*_ujjtQ-o)E#d! z9VMxcs7vT!nYE*k#QRLHqEy8OkYmFu{i|oc(fViDyV18KRTjQHh&Y=5Y_?zcM8X)B zpR3=Tww9$P@g@cELkq63JA6`!0L#`48i&`#+AMbNXlOmY$!fW%X6%OQB1xB>!2O|8+g5zg!<0 zm;&caSqV)KD}`MV(WPP}iG!N*sr3S~YAfYx0Qxl3F5Db4X1OA6@oZex=3ofDGO2QP zLN2FSRr6)5kPwXKh3a9RGig|-ENZZtJ#ei{@-9riORk7bh;ep_M%o0x%m#2+_h?xA)#;2eufwH^(-!&Z%I>rN1t0S=qwa|;B$#YslRiQ|e zR(*eo?^?89lO)YI2=Og6f_VcIw1z_Qgm{WG*=20dY8h(d)UOM#B}iv~>2ny@+65^n zn}(luN7$L#V9^0*QCN@AP!)31v}g6?fR5hMp1sq@9~202Ic-kUlwcqkQZA=}R2sRb z!+!eX!6ShD@5X`6WB4(4s>)T?yG+ zEg+Hi(-}kBM#4i9wY=SdCQb9C0{7zBTPkG(&5C%FN|s;U>S4<^`hS1B*c-O|Hlt*P z5XyY?Pk;Smi!M23%Ii#?O*3%?u^TiR$Gx?4K_B=*jWd~?m&aPM_$+H6I3;uqD*0*u zOLcB)@$GLeHL(0-Y!!2b^cqbqp-))*)XxN$V?QoHj<)(Qc8Fq9i{kLX@Urb6T1kq< z%v@qKl*LS26fJv=COBx6blHyGP+Z}2!Bwl`m*LXlhJw7NSIlD_+6vzecVV?_M}Sb) z>TE$=Mu2X9OKr6iU)WrtPsbz%mp^P;=c6(CFM;sJ(E=OyG9`t_wdL_Rl99hbjf8pZ z6?ScI<7Ot9VOlK8Z!m4sR|&%hLLi3uc~&AuqXSW)-2b01U>o`G|b6v^-5)6 zon2f?p~36}MlO#yDnmm#sPf2;gxznpUmt4dbw+t-%VcalVb*ms(_yYkTf9U~J)ghV zYqSsx*GwNgw#Yowq_@>Xe37O>NnTAC2IRced%Y)f}+WrjRq5{?m zNpoGhaml*=ozJuxOtvq`m326`Ba>(|S}BFl2=gsWgO!^^rkbEPz6gNJ3(pGa(ArSK zx(MR{AZt))&|9sJu@#YG4Qs(xBBe?|*+7a8p&?<0(k}ul6kRf6fTXX+jDBb!tsB5> zq^a?JDT}80YD?U`d>N(!!S7AUr0-!Vfr1h2+qcu2jA+K)*|lxg8Wbj4Nx3_O?*`m% zE7ECA&xUjK#S{h#D${_vR;DCjr>xxHw%^%Ii?|;FMixh`6e4)7^$S15!Ib&!lx!tJ ztf2yi@%CtWywxQH+eDHGX_buaKEnjs=yatZr;eUtDI7w2kGNHERVr=h!*G4r1%dkx zrxv8Tr{^H%@sUg;m_5xNo3v%FQi(O6#l-NzW}+}KD(+FC?dvC)wdbGTCc>O!v}zkT zbVRUU8A*ISuK|5{;QkK%DZ8Uq%JMMihKNBD^Ku?vw4LQy+SHm|9Tx7yXyZ(nBUK>Z_2WFnWF zcv<`s1g7kRcKFF&9S`2`Er^-zRwuIM=SkC*$8EsWt2LCSH*~GsB|DM7?-ZuitE4fs zHxzr#jTmvKM+4+hVN2H97_p2Ry_Q6DyO$G^eHjL~)Lh|(-?R_GCEvsQL5~$R0RxHW zEj2Blx8&?Uqo05iCN@+iK1He+YOAJMbDEdy=LQ_<^vwp;@#vF*fmU{kzz1eF(V|YX z9wWOqXtic_9reYkFibgxw*tfVSZRIHmVSX`WojX{?-^%%nnstwBc$O9O7g>l|Z4Ju?Wn8q=P z6Szz}nz3SKFP{HPLno!QBNWMMFtyji_WC3|mXDcK-GH8oQ;a!GTP6MSX}Ij*R1Wjdtln)8UKCwcS<){dmTNc(Kf zx{OUI6lrF%WAm1@rAg-wJop>^Xr69rzqc?j?%!D;y71_Tg*pf_qtX?IRABwBH0 zORuFUL<&XK*Wd*Plx5%O*S2GR;;EO{Yv&u`YJ}~)fXevppcg2AY zMQSy|n;0C^l!{%6=2;_bI>&^>H1H3z#MVv*4G(P@<=2xCY8bIC@auA9(FXy{C3EFA zhfyOh`}9S6ai&CQWmtAHNzl>i=_>4xwRUN1C1fX9U^ObF(ywI33_yiv&iKcy)F)p* zwcpO<6qIven2RTCz>c((`oWxZX?*J* zZp>v?i_M!UH!Ar6$fK-{G4ZETL3*u{r_DL1MMTp zv<3XS~xD zQ+%42pJxpuP)723WYEltDEZvcK^`|*U}TcHRX8b@1VM-P>X*!^g->H-*ui&uJ~WxH zbXEK1E%bC$iqQ#S`W{4mW9d@mHfF;W>Pd3)X??2RI;aT~uCTfK!7QSgxFrYk&Y$urqSBW_SeDnH?PzOuW5q`jW!bvqnP#{ZFgMa7WRuy_ zoD~)3s%v(o-A~Yapb`5w1*qctDz26|FHy&{K zSTd@dv7%;J$|p_((YHdY|7$>7J3kTv#k%%U#UK0XnOwUjFy|w%>}rFtL(cn6y`U=E zpjKt&<0lLu{JasyFH%cy4M1rj>HaP7G<}+r^vKguv`>~JgI>N=9d~!+p}xCHhLbHYrWmMubcZi zP_VmCVDQ~OKCaJYG^$6}nthQW&OU(_+z%MNkjP)|2{bI83dBReVpJGg!)Xl6%6fc5 zZFXK(Ln@+e3yh{>ONzSP^2ePTB=+4-px=cKSUIQGa&+BnCG*LyFsroX!A#H|Q-3Uf zm}&$m;g~|@flQelx_OQhy(H|MEnuR!R?%jk*aG??g}0rwbBScqwPu+Fv?W|mYDAfj z9Enpjz}dZek;^TFwxXz|&=+m^z5>>{XD_WS297MY%<_eg930iA2_^ppe{)dTO zK0lbJb^xrnN!1o9wRw!h<%IQ7xrgTzf8cGjx^FKj2hQ@R?@VyrD(*0fBX*5f^?@N{;oi;Gv- zSWQ#TD8+J0aou~L;rW{_sp0qC7u{Z}bgFlWZIL^Bk3$n5#Sg5NGIqmkinaxrW^t1M zQaX&jR%McOnJr)6S58$vcs_3x^Zg|BR4|x{ta~Hh(iQlt82(Aw;bY^dlsp5s>aOVy zfI}h61eKuN!HcVw59n+QY0|GtAcM#Kz=MyzRG!TkX66@q?UI#Ee;+e5IExBuiWpFo z$(^_PsEp5_wK?vSHoFLTe|r8IlJ}R0|NK2r8fRx~G_*SiAg+G_gZw$j>yNA0x4}|# z+atXrl+05-v(sn{OSv7?`S~SpZTHU?&6^Hpp*XhDi(s%+;si{eR}v68;#elYAD^b} zX#X)2l#mpi{`77kYK0-nLuj$(`FqlIRit6rckoEcTcF)KltcR0^7szQK+QdbR$r;f z)T(CH4B=}p^_zyr)Vh@zbZu!h;E%;wOC7idp4UK-+9ZOgJv7PepP(O}d5aheAk)&r z;pjsS5mGhxfO#z`YCdKLSYFag)@k-Uk=b7gudUIGa~m#aP4H0u9di|Z#}HEF({-Z{ zrAVv5H285~P`-#L4btg@7uZZ@JM6iaYf*+v%rcLObSdbrVqRQ?nPClNL|Cu4czxED z*yG*3XRP{rA=%GOtj#f{t?wo~k7yAbvXMr;7l>|)YEuc&wX4+5tc!qMTF?8zR9@q^ z`IPP2G8Jka?dX61G`FBi@d1Zr;ysnwp8skwNjPn`97Y@itL6SLq12?83S(ZS_rv^M6B6K_{cWt6A z430$%m6Ffs5b5J~eBDByiJC4|?!h0`tYy_9V~v)H=Imv%Dq^kqWhy!OjFL{(YtnP) zB8@%K7C&P(CQ^SeZ0Xky%o5nJNp=q%a8L5(GLsFpfD%h1CvPmUessC?4TuAaL!wY z(9oMpOPagjJN(t>aMcOBI`t5l26h&!Uzw1ZFXb` z(d`viDb1fTFT=G4@n5o$dQ?E4Dd;I}K1>zsTfraif742{`Le{?8FDh_7=GN*V9E@c zV%W7k%4aQu+S`tFdC}Q@p*YCS(38mC)yCH-%7bMS`IQJtyXpHnw4Ec&-tXS62r(=v zHa9suix{&*v>11Fo@VsfEoniH5IFOwc21VPjm*x5CJBHgO301p_ zGC2630CUU;4F^5L3Bg-IYlXB53^u|{!tT-3NZxor{XkR$Iom_sr3i>DpLZ{2#j%DU zo;`w7bSAe3)Jnq1#?k9JP@#j3TEmC;?@o$B>tojUUY)ZuA&Zg^KR`1Y@Yx01LUPF^ z`9Midw(*VX0_z2Vs>l-gH!-|oPQ4D}1#%bVFMu@j5uoXxL7td7-6g(;Zh{9bO@vfi znx$z(D2PAM$hYAZV& zley{Ce3i-KvOR?};OvhQUF5RD;eot8FOq#40ZpT3N)88QQF)B%l+=&A<6}J1poSv5 zsb;We#5SQ2i?y;txjuZxovku_!E!c3Ke?zr_J@YK=ukVjsa;jrPQ0k#jBxp&Hpvq& z%^g@L(aD|u$w`@V8em(%Rr#9yLa(Kwj@-cT#@y;b(|Sg-Z)&kWSw|m%f7MhB4J0t1 zv!<)$F7y&KQ)iU-;`p@JLiTnsINC!YIpoI0(Y+#hRLo*r)2p z-(JX4B}17KH$po)pO?P{Q>@h4Wet;CL`uzp2o}7}sgr-ew5&6p7nG)^8rt-QCfMF% zXE+0F6epZyCrk{6`awQEAKC-B#unDHk!NCdJ`Z)U_%4Z7jcC`3I!yZ7=ySLIFv?tc z_98~`*=G+GmDE?~)mAt8ckHwH8+@mU>862Rj1x14*-;>Y2P{KwbNyMWf&-E18XH?~ zuAaJu`VG6uip#d;?7e`7I|k{{{4J}vH&n)%ta0N*O9Aw7j#-Il<|ey3S^MgDmt&F5Pg#?o{}ZRR*HSo_Sr?S6Frurs&=`WB7?; zs7>6pSnerWbdRY1uAqAcywxNaYSXgASHrDoH`XFFif>zHKuH{(MX~l30FhHRjp-}| zT30(d>AO;=#%{U@Zmj2MrWRgxYdzGo8YblAuchsLgH?{y;$tm!eE*lVl2fhPRSdj8MGxa#I8U9_vdO(s|uarC2EtB7Uz z__2(SD=XK7^bCHd;jFGHXLIf}NQus?leTPMZ|Cv8UsM^5F!3bM-Sm~lb7?~G7O^dbsc3ydD}n|o#vS70p{ zYWZ>S735x!2C4nXu~pq^v8yD7`*ym+4;3cqNF5S)*w2r~=^7Iu)x&X7;{ZV(MzRa^*(NjoiuNyQl> z?iwsK7uny#v-{*@0LU}{Ts_4SH;n_l;#KHb_XUHdt(+c#ZFO2>0_>|R9fj0a6`nc} z+88Co8qCUs#3PUR(^+SLPw5{S5zQ9rq4_AG&UlAx6b4X_7_p^c<{-zYDpNnu69Z!F z`p|NY4DW#>S7f8&@x=Pi!0peG`bxnGX7ECd=6ou)BJ>y+Db%;YRt`+&h!3>;+FQ@w z^6fDt9h(?8XBhb*V1xwt)T_$0c0N_W3R1z8*)k$*n;pRM);6|C1n;Ce zQu>U-^#cRf%k?b-;|;ck`|e}F+&Q9_Jnx#iKcqUH3I%&+1P6PXLxjuh&*v;;sQz?( zmSr$Le4G&5i#x0M1y&)(Z={m1wOGI%4J#`z*BgJ4?m4Wgr!sdc#;Oz;B4qgO4Wix) zq?aSC<}>kS-kG}8U3ydCuAp)pJt4a1fAn~BL9a1qCZqK*?G=W?*;BjNPg%#AfX zhJoZbEn6DA*mb=tn3y?lfhEI$=#^r`gJHQsbpk_PM*R@`TrU_l!>N8`%>GZ)V?+Qx z)ojpXZpjtH8eB=0CWXvv<{z-~2dTn$vwa>S52{(25=LCFWSQo^Zq|^guzpAVR7yqp zSRREtnFisz=lh)+5Il!@UKTzECO15sXw6LZq^w^d9-Yb2s>oTyy;hJP>wBncAD<44*g|=12MXIE~k`+AowWeG+~KjtT-< zr9hb8pWjN*%?$`u)b;lYTxLsrj1htk4n)P?3?Lv~hPbrE56hYCeb*r{J=Mr@5W|A5 zRmD+|PQWBaycggsztJD$obfT&AY#~+8!W-it81sPzk%}eUwon_=^lube_J=gJv#0N zD{0&!H2x?LYRB_{gkJ!?InZrq_Cta}j`6r)ebkqB%c|MdM$gm37l1br~odyb9_7^N~ z-gk|;o|RGjKH2bC!(Hc?y=N>%*9^7Zq@(An>-59Ua?p&v(ts;je)Fdy^#;2dI5W@l z6jCwaq-g;rYeIjVjFc&_u4Q$7kP4=3_IoY%T&0>`UBJ!G6>lg@DdHB&(9k5QXPub5 zHEL-xN3vJ^IvFvow8_koZz(ECWNMSB$}|h0?zs%L1$yP&;}Y)Z%;Q}Le16;D`%*5h zQA6{I%c4kk92?-aWt_3O2|LZE%M9*IJVY-p6Tc`i?Raa~&8SLGkL??EAoO8}h7Oem{) zai<=rfVFDW;IU&XA zd0w8jP)AGZz9vU%R|nof5E-Q6(B`TwX>TGlf0$^pNAy%-1K0;;&C!WI7R_N4*ErK| zh;^Lsh0XcBOr^(L?-*ihGwkxB>YxNhfCYfVNVB`PziubQ37TbiBlLOPT*IJJquw#wjhmv!IS4V3bi^uO zFfamNdY1D?&6-Aj!P-6j=Ub&y2<@CMQD*_v4AO8qk*%>@luDLU_)2JjGuy#jIP=Ar z%7Z#R9#Dw}QjRC=1E@mfB5Jq#Ktm(KhBRQ%xRi~N`DSoxPMFxVXI*^{$kN14^&M-d%J;q+SJem) zv*N;0YX;?C(sjRN)2U`hAvq!orlW>Q9FH1i^y7;h;P&Y}m!6WLdn0@Bu?*0s*(nX0 zVP89`_j$6`FGmbM*kfu`923VZ>b8M@i(@?L>MGho`C!ouu9LleU4ScDko+m72YV zv+@gGUX$OKdZT(^ARsJ8r^4W-+~7?V@S*W6qQZ07R`I-r`15%&pw@utPcWwCA+)Ta zXyI0kqJps1V1}-~QVY-r7VZ`C6x!-N3r^xleKwxV<~}=ZZvG@Eak!Ludic+|IseF+ z&M~)-W6QfO&!vwKJ%x%??>fxtPpFK~#XPcvpgkpbLGLm1L&l@kHh0v>yh<~393e}9 zVK_z!$v7^IEdqwTe!tBB-LbfF&(smV=uHK0U_LO?`^AN$A9w z4}E?8ft^Um{CqKfbW^0t0hq65*D9;}TV_%ny;yDcUCMD+qj|plckl6~xOwwm;~dXR z;AO>?UyM1IMY5ZN!tEXS-I{7-D}`0nt)a@TrM&WU%6JR(qm1=i4m6NIiVKGd^`vF0 zrP2^-TYsuJA=|c#$;Uf|D&g&y&`WOnYZJWS^T-O7o{2kU76t}ei#X)3>$;1AK1`{^ zH<30&!YN{xAB$Y;^UQTxc=v93 z!%CT7%Vzw`4Q(BsoMy_w7?kpxeATc5T3Dk1;xBFVNhC%l>200LKzK^B_4oVyLC|iM zjg(%u)$k0(F=$7^rZmO}6IYSuNK8z%H5tAE-IQ=Kz@ZGiC=`K$Vt zx2%D{*!oTHZ36}1p?9{W%*mt-EvUZ+(AQnGUubOalI# ze9QVFU)gf0Qz>jD7z&bnT?%0B*fPxathVM5u}DR{1haU%dPoa+GkN~FIA`%JStL0X zTp;xvH8$OHt_tt)ES+{q?G)9}Y@iHhWYEin0fJ&g(mN&@_qX|qbY{lV zZ}G$JEMdM^9W|T1)CRA!YDu4-nKoYjzj! z+h~7PkK#)f@R`(;*d+@d8`e|?;duNwl}}BC>dnH+BV|5J(aKQ$vV-J@^Pjk8MsI`q zF4#(qqXm+Ip&59Ki56Sp2|A;Qv0BTxONxWa`3}rK12p<$ENU*#6%4bHfey@3aG!8r%d*LZwmpD*% zD=6h`0)!XKeI2&a6==_T0(aa_vS>2^L$Iq0=eS z`w-W{{ANCcCwMMszc2EFLAYS-l#2}H-lH=}=$ml{i1Uy-8%rV7!RyoimJVNs;=-EH zEj#ba&c4m*ACx78db+hF3T~7;!bOpwNw~s#{vWM-et^*f!UEvt2c!_RM{1(d2$Ho2 zdfhTp!VOb1L1tP{(sp@*I_vCGiv37ki}Z z=$2!{F&Vf17Z(x%dZSUhHS+XC0n)aix;6l>T0;cYhB*g6veZ&u?0c5p8Bg_`o6MN= zo~JStaY$M!U}q~{mye@I>fR8l{*2U-QTkZ{E)*WB+LL@XrU)}@tS`1HOa5ZTd6N7Y zF!K}ar)=*i##;salEo@4T>8hhR-m97WI88X+ZAjc?nV^;Zki*NKio2>Opkkkr@`J8 zZ*Wfq%AdDcThR+Ol+k6gaGimX+NGAd6FdtV_S{PigjybNn;1eqP1?RQmeT<5&dsc| zZKd&w)#pH6DoY1~0S`2AKLJk+F{ZU@#hfh+!OmUB|+kv8_Df)B+V^EyS5oSZyn<=FO&x18(nk=gZ;+Fca{9h3~WWXREgjBF{cConGSUrb=Z zKE@9#@027sf&wgYA!!x+es&jH5|3K+wKGPICbLUDXAfuSHs4R%dv9*aNKh0_Fy|f5 zoc8vLo7zweJ;n4oJ!=~O(h-@EojLksOz&nltnb_L^&4iWS0+Br2t0p9bV#!F zm)|124ikEA&Lka)NGuI;3yM|^wGIBUr9Kb8`2I7;OLBDI!SE>%UM+c?36+zT80o`Z z*PE=RR^;M5n363*yEB}pTIbst&OL#<(#qq4s%3wY^Vi`pS9~3BSe#r5E=^}GZqsvL z;^`XS0Q4pxTR6tOk^P4EY5-73eX!ji1@fuhf^;4*xU!Z}f2wa7Zh8_rOyauj)_HvB zs&DCp+}UgrqaakD+Zl-$h(5IMA+=_Gs94#3Nzo(d=upJuY=J@=3lLO?BI|)7W(&^^ zY4k?veJ*=r#(pzX-r8L3Ejy1Taokj5FSf5TgE5tU*Gv0twca;3+dt2J&bN0Eq?{Ga z@HLFU9_XUaeMxVm%!n z;VrrdVP84&<{v(=8hWbB_x0NQp8_xF<=AjYX+vBe#^<1sszMwCM4IQz7`!lM; ziY9y%UWL4CPxZ43M{(_ApxgbAPbnaU+b>TIV zceI3aMM12i+=?-ckY`V#VY^47Ig7&y^6Bmq>$ASe8AU5+5qsO#wo zeWgt>Rh_K57rm>&Iii8=n|_~d&ub&1`SrBv+~fjzj_qCs_=2#kOVk2L;z5K@@1#|R zuu-cGq0C7)GLlT(Z;?T41Xmusx(IkVg#!e4%gXrzCw=iSdBz-LwO%d z8!*v;c`~^ZQToY8Ut#L~FHxF~aXY>0h%wt0cM23;Km>68>)!UzYQ~Xm%`Pqr#AsWtd2tDVRjzTb9CehD~DaB_0sjHj%t*cv-Ivm#31=K~G#K#T(CY|In z>G=LS`Px`!8GG_Yk7Li=p7{Zj;@wDvdKVjfGS-t2u ztd&z!Q#GMXi{KJVxOPO-Fp`uO%8epQm&C-xJP+5l(_;^pS3m8qRkkk~`1;leiwl{Q zKZ_orNlT~kyI7N0-HwC%r^DNv7-=t8+RHM^N6%Ca{LZeX?3;f5yxttxUO3|)*ZHFE zhOP5+1NkVtFqWvg9_P419by>Em!B-8)jC4rE(w-`aheR5=P6Q-kB$jBKb(HiJC!BU zt%#32iyz4jwaieTkrI9}@Fi^Z(Nlm2`yN>K+8rkDgSKe(Ki%$W!+N#LV^6Cy+bSZk zK1u86oCDxk*N~|Hq=t0+?f=qTfr|6**k10<8yEKmiLpP(YvloKg|sr)(-dCRI93v= z8P2dW1XSsU<1X$eYzK0ZN`$W%M;k0UH?(|}cSg}O?j_XZMR=Q17Lr*-U;1&3-o>B8^r*+#NIT@I2wGeCa zg+j!?!rwI1Ry!I@n*={6)J{rw&y9Gz@5tF44QtIa=(}(`ujiBSJTf`ThnT*O( zyO#?dk(FLH?ia;FNl7%#nGXjm&$EY~Wr+WgsVmmp z9$xT!Q$Ftf9I2q&cW|?8mL@BM)|B^$+zA`iB?X_axz@JQ>ofyF@?{>aXzGwH}0gBkYQ}26i@$kMMMKM^DIWd9>dL5c%QlqqE|Rn5&GXHkQqqQWGdgkGz=LoWrO5;HB? z#wMIkcfA1XvtaAi(5$1x9Ly$F4HZW}pawQn<5l~;h8)A(Hs3kAqxRKd-;w3>=oD%` zrebO~Di>C0EIFZ(f284gYa4S>Wa5^7xA0-7gkhvhpP;eWdRV95-pCpUgU2Sw6?W-= z%MW~XK{H?&&szM@7dQH-0Uj6F#q`K}1L+axcI@aK`z)o&P+rfj3gK78I`mSEct0I6 zL3Kt`zn5MSV@wY|V?YPr(|aVnZ5suD@6;LXKy|X@oBNF15%pnaFeeX7>i4-aU_qGQaQ7YRwM<_f)WnDHlac zg}+`VawIUu_x~g?{B!oFcdDps&vmqJALp}->r9WAMw?9RRr^jt#H*7>OFz zEB~PW>*!{W1)G~b;@FOpCDgp+qF4D;WSSK3E)b09O0XyrzIpWm+oes$4ZkeW_cu~4 z7AoZ!n?QenT`TJ1Y~{R)R0r+GT~~@o^n+8E?4YPMVHIB`qSuHAO&o#?5g%@ne&Z5j zRW%Z^Yripy|AZtOTXPR#U5w&?V*%D+ZxCBtCVx>A{~`nU-v5gY@DDNoU)kV)kO0a? zSrBW+?wJW#e#CfMh*4D11|8{(g0`QFmS9L&SA`8Lm>whtD^ zj+MN8^wc;}zhuSihr#2Gh5+iM$S%t@f9WQTMN3vr*G`H2-#XL|iHzNLqU`e0mgRD$ zfOgg!1b=V(v!oI1CuZlHzQ1&w?X+YvScEK@txN89dcz_cVk!ms>iOgQcR#=V`{VBv z{JjePz7YN&x);n@o*fCy!~dQbl;!+wEgnXzx`@t@8YU7aOL<{X|0J#OXHqu(%DiYe2r$w_x}cl ziI9k?smEq6U7y`#VvnZImi&)*dqS>`M<4a|p*HppJ)EFlhxVuVx*+VlhT~jE`-dwu z#_8=o;G5>!B?*sm3qQk3-HoHEgux}CDcaiR;m z1;_I{fK!EMK! zBD)@yPObEn=B^x1HZDt|-Ooz7O9fBJhgiHiw}iyB~%+vxlVEXw~DkX~qt zEv3PpHSh0aw6_Zjiqo_SU!i*x12oN!Z=h)r(IULS}<2?8UJEKK6q1mgbHTSTgo$l)(CY@@etAp^iSY8fv^YR^e@# z<>?~EdyS=*xcN18F{HcEE^>tlL@2KJp}oKy(VnO0KE7^kSO;IWhP2EyadBb4qwA?( zmxpRcXcK(ZXoR)3w}k)2!Z;_YE7Q-nNxbEI4?>P}y3bmUsn5E%d*B34;s2c4*!`Jq zW!&Sq{>p1_=`=;B;k(*is2q9XC!W%apTCotWw{~WioMZM{2yVlBRsUbn#6q^=EeT; z;JB5A+44N=GRDAvSBM)fB2|rrdHO7tyP7X_+3P!37z|} z!Xk_RULKzNZ{=aMM!qKXKg+|j#u8l59DmcV7rTP#8~35OlzaW0L^|oRf*Kwqee{M( z@*Iba4_C;;>|a)JH$MDH?rXnU?%eQjf!x`?k^Lplx6e;4Ufm<>fBW$7-M{nj_agZJ z*u~(2+!32KQBOp3D)?h z8%+zuJ;xlH=!{NpTroPdUtzZE=RbY&JF!6TU8Rzj9I-_B>r03Tx8>DU(j@QK4&8Y9 zwBtAQT9|bVT_KR?3t``uP#I714aZJQcb>RB81&xAY?HiMFoqpWddx3cuVjfYBp(8o z4>JN#XUos!z<-!aJT=?T4L>v{mvZTP8Jt|>D(iRr(r{;CET1om`@sdzUXL>+jQebx z(ML5d3)hF<|Hv;?`ZSj!7Z;;qNn>l0Tt^tSfcXth-dWwchzK+hH+<>=yiV$(#P+Zq zW~wp&6A-zS5G(oABmExnev1Ad=T-o0+nt~9ev&@<`k-KdO4Z+w4};O*qS5#(m*WUEX*WN4?nE_lnT@q&zb7vO&qHbZ zO2v&@j_4AU+Qi|A+M{ZN z{vgQSy^ZRt2~X*b>}emRjbBhSHU8P>t8)k9lQ8Dk24!5~JRgqa+PLW+J0ip|pNL^vNTRptZZEjZ88)@q?N!HlrZYPK~tu@sD z`7_@)gJu6P=lUB9aq`zXvY@11pke$XhZG=Bg|S4v zTR~~)5eA)OTn$jS#TK{Lan}^f|FW`65!4H&3zM}DpS0zC<1On`@2>q6w@Wr{@HMgR z6bY*it8!xDrujHMF$-{V;<@aQR;r5$MA37Yk)I`_NA+?JDSH`yPU1^$zoZ44mKLEEa4VyAHJCN zhvzdh=K5&C-_I3Z>x|1!q0F4a^M?i|t6rk%z6Yk<-<@mI`|~JyZ9{V^qeENQz`^{5 z!&-r8ETqMlyRHYeUa}@#)cY1~&82Blrf5A~_xN~he)Kt$fN`07^{BQj6RG-%4bN?t zK=s`Re}Aq9KDViBW!1M!cvddH5mZ`#Bq#to=oosQ66xx`Nsx<};v%6@7oE1zQ6Mf5 z%%$@fC`%ML<4J8sdDo`)>?C@_*6&1&!|~Y`bNls7RLuF8#Y*EP*1mcc-aYlcF0=!* zYC=A3MPpW1w-Y=kriO7Rb6O!cKG+AHyA6}-#duywOfly4#52urydF3F3UX8Cmjm(n zDA8)agIQbpFxyKLW=i8hWkJen#-+I!?XNk$IQ-c<`EideZvf(+&7ewI*TB~hb2^)XQ6`=vh$9*3#T#Wsk~7liUJwHkb3BVIk)zgPAv=Y0z@ z6&jjVX5aJYR(fVe z30q9#Q>VjmnUhmv(-qolq%Cdv*#;8_J<(ZfO$_3yrQ!qD`=8T32iF&E*h}f1KRgz0 zI62U_mJ`S6N@dvA2*e8H_Ctzi2D2vIP@2%Q*6XhX&d%S;_=n>}D{gvbo7a$g(l4Y` zbh`6U#A7Yq1q zvJ-sx6MSfbZ)=Wir$00aPx;pjQ1(i1ki zv32?LCD)=a_=8QIk0oY*XCp8P^$dzxLZjPn{p*_BMt872w=nx@EaSkde((0|CFFTJ za9Pl&+wm$?Hc0t3K}7eShgKKL*F<+0dwp~?uXHPhr#@)60#W7;ZLDzo_-~=y5uW+mvJIa>~Zd%wVlhG5KEe?uThq$u)MravFtga4Zy6 zB@}n$es(=~nE4ZML+)YIs+2zH?6<;=Q_(|!nw=zW7wGx7>I zv-6AEVP_%emuA~Rar=R7<3aK8CPyje%x5lnVpm|v>BcnR7tp`)Q%EzPTnk`w%2h2? z#7$)HgA7|saiRfBF7n$@+vev@r3{3@%zv>Sd1wITezhaQv2#XfVIqXRqCsgM9->V2 zv-^B~A)ap$)7a(Si~S}zh9-97`s~&p;?vEUcC2L88TA@b#Nm4AT9<0=Ldji^1!KkY zz)`XDS>g3CiX%NZ{@tVWB!wrRGv|Ikjb6-N;I>!ZoZl5M3S{B#a?tQ&C_vl3K<@Tr{6Nl=Mhm4;j3u-)c8i=Va}*@)q1EnnpPz`W%*^VcqE|+2TlwhH+eq4A^xE)VHu;=p!&-I zn%=gXw=PE?5_D-M-b8;O8x?Av>APeOp273m#sL~#7y$R48-1`C$04s(V$8mIg8 zW+-dW+LUqEd16NR(h_26o5~WxD|&u23ZxAiWVt6@ADX#WXU+)0Obh%$6@$}nT9FUp zVkleX7f)umaVexR--KyGvP;tw(nG)Qm!oAxg3c}{bAGOjNdN&3089EAuePBJlQL2qig8*zJ=?@!!k8EA>`9?5R<<|Ax(}^Qd+N6Hp~lzo|ZQls-4q@31vymr0$&QiTEsg!S5=EJ_Gov z*MW`&u0rkh8twL^>Q{kR62X|vt{Vx=O|*&c*)Xv5kNpvB&ujGI#5D~m-PK<=j|=SW zz$fufF~b%08(=Hpx5gmaX!GQwyEsp3D_>l_={Obiq)|9`k?@R@JWAU!xS`)lxF`{X zR*9W5BMf^QeBKhMByCUmeUs_$+~WeFx5W#!;*^~O*g2ADz9vZE4D@}dB~hp?X{UI< zQPZN_o3)k0h# z?SvO2r80UoDH2i2fcKO?ld}8tgV!6qE_MH&2y<$d*HS`SPD>GI(5LNc1oxDggU@$~ zyzS#F8gAXnG>cMa))sTz_j@*{UcS{l=^!6c3Zw|(z~>)GFf8|DZE~n%BuZRA6FQl8 zP+g^0CcD+^S)kj*+bo(H2{Z@-imL-?yiL|#DU~*Qh2>eLgIyDMM5dX-VNM2??oU0F zL+ztT3m22V3M9V_zlL50*Ojk*mb8f``BTAXl3s}?wT*xV%<)}jKI~wsB4Nz7EG%sm zC|wlIMj&FD#;~}(@WDC`r?hy$2ddojA0hhwW4jhy*X&A34a(J_hrF;5nm zW>gcg*5~T0cmdFVP8Muh_T{rirxfe1;BJ--ej(-iv7B|y3dhJzbI7`yy@J?WUqPufq})K0+{>QN z`l2?@NxD2V6mMuT#SJ|ztnWL>Xy;e<f0C2CD zcXEfeO;7hEljRDI&_7H_xw#QF17#{?)o>$-{T|-T7X;$mxKo8p=Sq zEOul)6g1pLWsMZkn^4*si;lLr88!_@k&;XKq`^^Q0SkzAc2O}b4hUS6-73G zCD==?Cb&mdg49l^`)2gRK{rG@O@^vTpPgS89YSAK{aE%Xn(J^Q?}e1*9O@6ndS~hK zTqSRX6MfEvu}?fum-5E1Tl;3WlgH}Uds|}kG|26 zp-Lx$!ESltz~AvV6|5RFX7X=i>jm#&6X2aT*QfMtg=w)~uh39&QITtV zr{Lvwe;QoI#-(PvxU_bRJ92w3%zCkqA!Sct&z5eD{KN@dfv)}3+qbCrOWabG z9LXSDWZ-SNdHuyo9u-}@W->FL9{m2U5~;&er=lMoF_8p7FXxN_DN%*b(s(*){W?WF z>CGk6azY zpc{fVtbKG#PJ(<&iQNil7*oh_qjn&|IO6zF+O@>ek5|#V6EPHzC*a#nhftg0={$*< zXHbgnc{dpqNAe_}zn-@SS6jTNR|P|-{NZxl49do*2-_MSv~kt;-SUOnz4f$zEk{Si zUQ0+k(li&7jaO)iV_GuVP0OD<8Lc_N2;3BI*3lZ#>!l?dA<|Kl2xZaMB!8&1z_5Lw zqwzd~r~Mm-|tqG8ti zA!Z&j99QDndgz?N#Pw!!7d20-vKKXR4=a3WpJf1jv^sm{fD>G8#ZFHXlf$a1;<`HvhC)O-eW_e_&K5@+#^oQ;Z|u&12U4ASq~)h{oX74)4tilekTTcq z7PqEU8}2AHcUoypNe1ItqMV|GhqRRu}HlcAjD5tr%; zVUyC0H=Od zJxj93cbkVw=GATQDq=;>r0aiN0`bUJt($emsy$5!!s|P;<}FkW=UppeX0t3g?P=Lb z5R~Q|z{hLZ!tCH1ejJ=5uBj5daZB8LvG=dp*`FU120Y+2djP>3B3BbAWNzzvoN^pT zArhg?_alS4SBcHyb|QadG}rPb_1)w?!w2Ae8E`5B%< zW0j9|0NN5`UXr54`HGfyM(I`z;!ag`5si()2@aIVv?L9C*YcQJr>;B8`XzbW@ieG* zjph1Ony`MTgW_E)lQ_Zw8!6VEa8`GqzBggSQ6i9xt zl108?_jx{Q3gjR+ZR=5rwR`Z6U4Qo8S_NS6O**jak@N)j!)7{tpI4@8bzcjUx^s1$ z=#P(kKko3`Bzq6)FO?qem-fNSCOJrptIFh-pE#w`L$j^G1S?Y%@G-o4zL&mFbcU$u z-&bp7R@+NIk0p&RsGiWShywQqS`O7dq_Ro1QTnbM1Vw0xo}lDzJsFh#v`kQbQPWWt zsc4Aarcv$M=InM$U_H+h(@|^;+f0;$J+Qc&d63o+~Ny-!W;JrSs z2h^gTq~p!raUB-_5*$>`pOmx;Co-Od)iOuro|q+MIplg8cYTT&>3KPK?~33|7jvj1 z7H{!m_EY4EdsUk*dsV#D^14p1tMH5|Lo5CDY&YR>=$y}}q&17q6^m#P<6$hHev(_H zX*#cM=XUukmf>l=$9pw=xnOnDx$8k=y4}kwJsuCIX-T@-Ere`Zno22=0(X;wa7ux` zcGwB(;4PDL1zyRjMg2zw5t!BklYaiwwLp7vls>H_HPlI5jp-^`qZBR`;qlaunw`QR zwBr4=mhwclJ|1PHH;&K-Pos6Im@W~?+)Shq6U00%L;Nd}$?Wc65H#n;0_XkGF z?r`Ug^a|1M2$;Nd^<}mFWwXm1%U`8k{%}ua z!w#I+7_Oxv$NBjkB>7E}LD`IMgU)ub5yjyi!%VT|bo^ZW{c>7l-maG1ZV+DBCW}2s zA(vQsulG6!_~>nV#P|jDRFc}2MeKq{UHrwjKi^DKngm?w+dlz6`CM@B_k=g_;c{D| z6oYsLT*5DA>NivrmLeUeUf!cp52g zKTStvt^FQd49=2Foeo&mh*)f@AJJMn6K*aNuax<0zSbL!(E_`eQq#peuDz5W7FRLu zbxy5cISW>l8!`s?#WIe=rR@6a?bylQuK7KlLWOtVU-Bv{w~pZ|udFYgzo;M>)(p*l z{gA|}XQyDU?b@O)P1mfen*ODR#W@2 z+ocomk#I8Onr0gv$M7RiV=2v1xB0}HEa9E7COb<0ccmOxueP~ylJ7?~wGk>=22vLf z?-SlLm3E2|B5B6_m- z?H(zRB(dlXWbBf{JZ0qC`61IPF@@7`28Rkig>#VzP<@B$NsOtF@<**Y=e{ zt&PU00HHzJuYeX2K_hp5g$+JJsogKNACYI~>}L5q&mB&!xGkC~ZY4t>8VoS90ndQMZZ=7M={;L&3W{|$KX*~g3Q9e z;&*=Dqb?DXt8`yR=SSxaqH?}F(X(hq(Y@NvT!xsU%=n10HCkAgLeHW28X{9w?c^ku zY7vfDrY{Z6vhToQ z_uKZ{?oF1~mOoTnCC5$Xtt!%RnB-%#BTqcZ$kS!SNKZ)QrBg_E4(tZSHHX|O>^Pxw zDZqrf#ibZyU&`D+qMx{?WqtVJ${t zpN2*PvoXB>#n|;mb3!b0<08`~U5(b|2AZCJ3U< zLR|r6(Rf_aWFThem9xy5#%b~_v65*Jk-Lf_J3=3vd)fJjaV)B7NvTkv!luz0j4at1 zuk)1Fev{Hs#?pxgBdU;OfjvfJ4~@G(3>zpP(y(*LCczPn$j&N)~-ucYeI%W z^Ex?)MNLj(G;dMj41^F+MK4iBF1r_Ja zoJWUMFD{hRxXSb}fIXblL9yi{#qtBHXi_l0@kD)Sg%1zxsEGr#0$xMyUMvm42WBa4-=4FWuv;)Uk??p82zqH4e1FyE5 zMl+Ym} zA}Ruk6r~rbAs`(RNR%odAT>e=kuC%XHGzaAHvu1?w|wt+zyDfy{nx!~JfoO*a=B_d|Y%TRp>AYNd zh|elza2!1l^L@r|O0L~T^y+mVTXp=GKxLH@S*0bS$5!qLL>g|3bsE;J|UcG?JJq;MqPD~%BE8&7!a`<4*r z8%KEK`-bq^_cbBbH&Q=bB2k-=Ie^;oVUzI?(>)@j26ZznRw;vF5PQS6R$HC_A8pYlQ61p zmE#xRE!JQR%U?H`%k0z0jjD`I^^IH0m8WlV5y|J94mJ{@Z-qJhLQUxP!ShUi|M@?l zf+cgIQ1W6;B%3LuEf%2NHVe@PTT)OodUPBEDLicxi4Ca<^jyA6ttAox2ij((Xq&hw zF4cO=41QjLbSTwrgVdKsqb{-m+tYme`)5TCc=ao}Z}zjP1}lA^1}j|)P^q7sXha!) zAEEVlP=jbaV0sWAJaUV`6xEagR#8x~Rz=3VQXT^Ul(7H@eCVfRoKn<<{~z22^!$)9 zMHm~RmPk^`VX8WSnHHl>!x4FsI{onVwblJ8`pWgajS+hJ;&b;z(!_I(p*~p!b@O%N zdB$BnMag$S{e@nNgLm@h=kBSdi5En*T=~gYlyfIxpfFL<#5g~2{+>~qbCIzLs0bp( zVUz7w08<u|HckT-RoPoj>&MLl#+?P{AgXYkF#>BIa~frQz7M%vs$V- zd;W#yLV)MIX)1|BJ~2ZSnWdVY-U4x0nZI8j$XWVNS$JdOSQguljYIV+Pf!laBzEhJ zMx4#&D)01-Jds|dj*9q5F#}`%d}V_G!4RR+6Eh}_!J=%Xeg#hh5-#ohtk-=9IJ74I zz*_h#){P|e=Z=lU!ZvE>30vDr9}URcAS+;LWyK3rzBh&OGb=DBP1ofLVy_U z0|H~l@w6-oP6Qf8j6WXYXh0ToL5JAI5;=O7vJsM51TUb`(w+G+%1|L^x{=haeRxP- z&HAXqNWY__{^C9x*O@ zl`7UhR_dmotS=#kZeQF1#$aFO>|InX8+QTVFuL8r~D zJ1j9$nF2IUfYC-jou@5Q^{X@tJtJ|Q&aAM@5ABj#uQLOSveEsSiKUFj z!%1`ol`?mvkKW;gC;!Ic>oLzW1FXBetnIWz*N0Ljqp!pJGpkX}u_il6E<4=z>>@qg zMUtc=+5JWW{YLU;s6r`g2Py40(yJjlQq0_sHs}sxpjZZDyPAgMP0r92q-3?NzyY`u z4r^ecv%XsOYKL_qvVzX2TW&aA1|P=g+@YwZH@3P#hxENoq%);0^O`{n`csK~y>-kB z3vvN;btN6?YmB|WE5wevP`g6J?+T&M=v(JM)wS^pvtbu=M}*&)h~JpH3{^SoV*34w znf9$V2$)+oM>n?zQaWs^*1N_8W}MgG?fNUezVr-qoxyBzm_Xe`hm&CqN@mAq82vC$ zXK?ZHj`XrIJJQoDYXmaX`C*5buHWi>jKGk^kI^l~`lPDqKN$8eBRjE+RLMYs{zlrh zPC8Pz69eh#4ie}$(p7T?QeL<~%`Rhf<2Og_YML>U9?|W|XJzuSB*i;u!7x=)12k}Sb?6RtIJAC=ZLEP zt`H#%by@AI>+Y@)GH>WYKz<8xbr+Mki@B?;KQT$aF~u0FO54R`*!V7{`s7kbLTes< zV)`}X?idr5asJmx>HI!Sx2ozs4LV9WV}g~)zk#vd9b$~3QNZZWeY?^t>~KP_%sNLO zsEe<6dD$^+#)#WT^2I}B=#~=1m@fY^>|aKv_X|m#-b(dvq+RP|AmuYuadH>w+Mh_> zjtr!Ctf8a3j4_OVYh*|28nSinXSzN0x5OI%!UMuiSC?;{pyc+uT_Fezb^RLddc(Ww(xfw3 z@>_`AdBPXFi@7VppP0DcnE5gcOsySEhK=uH9*Tw&P0pmyXIJi-C+o|TCOgJN+Bv^F zQY3A5XIC~lv+ciU*WDpTZ+tP6(Vwi2J2Tegj`R$5eqan#ja^>oGc0|UW>Eb^HM!@rSst&@S2%236fU8LFHNI8sY$VPF~b9dTj82_}( zuGC9G2`qGby7TwNh%wXreK7)}mS$xN{_pg;+a{=e)@UH7Og0U+_YZzvai(;Bj!No5 zv4xVG!uvP2ApL}{VS#n{0bp<{s$!#T*%v|gl~yHg`h;xGw+GT%K1-AP&n=KClau+h zQ3zn+dcfAI7P7ZpVz$7nG6&sEI-D&Dd_F_G+(wTPm-bE@3oe^i>AMQ_Z%|ZHRj`60 zHn&_uR>9$590)}T(FO{I8>9>Hss^pEPp!9nt~)r{e_#~@erHyk&VLw$6si7Vl8}0L(Q^8d#%Jgqp`G#&zC(+DQuc=<6pztxF_XViyxXz97fAo3t z`4^uz^MWg4kY7G;nxz5@X|6w0EeWW}q|$D%AAx+J)MRldPSV4B-TWD)J>5jeiPZn( zyME2v^TsSvC9q6IMM?3R%GHEYRoKdbtqs&eOJLiH>m>Qx&X-?(23~sg8QJu0`?vEU zv@eCl&A%y~LrV3BVG}QGTuUX6CqUGT(gax}bGQPgS|lw$=9}?3Z7V-HV~wZK!)2a4 zfIudOh9W?p{N^^6Hhc0_2uTx2DNXHPaTzPuT@=26=x@q=yofPn5@ zE=z=Y(Nx3S2^E{3Z?5Z{-C37B*J(qM+br3^fVEoh?3k`=O=?@J$;Ae$ z?}M)aGb6F_lPY!@H&}15mL=r-L&B(P>60|5fdK)S{^f13DDkp!bL40t+aZtDhSPRq zp1CcNKQuVc#|V6i{Ax-vjhULFZ}^`30A9*~W~A~8ED01I$EpUxz%X|%Om|s*kMu%f zj;GS3Bdksk1yV>d7_Pf$b}C8Sx2rA~L7fVDwHCP|Z4i6s=5dwmpa3%44=a_SdzHh; zA6Cm!{#^B)`fh7-;kdp&T=*pP*-*ZPU+39ods}SkY7{3TmeW0+Rci3Ka}(lP=GKc- zXE?`|+0kr*)jW;xwYvfRp9$^Gz^lE~q7CqYww|F01%jR*efaztoi_0J4-k&#QwB_- zrA-G3gmizUA?S`7JWyItzU{|@%UZYQWpaxUM;5JI@qLNh&FjW)Qkhhpk8HXWquQQBlGUF;4hmkX~ZZvU!KYF=!;iKn$4am zmlkK5&`m>=Lro43sCcjQFn4e65xL-YIjr{r<=G(8uf2Hnxu{g*1VExbVw{Nn9*ne@ z3B{=|){4wYf9{Wg^a&L!UfI0g`Qunu9aixvwmTEukb$s5_Unk*Pn>(D$h0YT_=o9U zlIgSSt~R|*?(bI@YyL<#jaaP9X9Hl;r7xX2edIRt3=^3tl1Ur$)@kH@;V7=7q2yl^ zB@67w;`F_v<$FU$RDK;=2$Dz=xm17b zmB-$GIbOhCqMRWstAB;jx2c}doYBIs=OUt3MeMOPk0d;Kz$a9>wADMFID=oZ+LI^k zohSO-=-+y+!(9o`@H<&!0Ic?s_r=dnfOLTL>izIhw-Xg-6An&Yv5b08;$-ikM~ zj3QQ2JZYVJz+6-3!;^>QF7RykU{Kpc@QtTD8Y${A_oOY0?z193Fx)hv06i`;K_WOL z{9B7c8~DVLc@qA|FXs~vjVlL9V>Y7eoNXkG)AjK<@7Y)D21?l$2nC@qO<+CUL*?c+ z;Eq4m4~Vj&x?51-Dpc=Y;0Uda97DqHgxIta=HCAVwT0e#S_PH2C<}QU{8DAJeEem< zy!U_{@JA}T{_)l^_(BQS@k>(vHP3Tj@`-#retoH#;XC0^Ss1bi1^Qd@&^D5YV4wTc z+uMEz7y)HHbXY!PgT9LPeTJYTAIHukzVt6FII;L)!liG6S1{G&K%Ur)bb&EXF#P#4 z-fhCOf>Q6M*I!a1ASzHar81)Bb9^Y`ki_S4Rs=na?-Z?KW=2)>*z(Axv{el430pJ$ zy&}wLn<&>Vcr-ZU{=JmR&l{GBPyj(6{TQe`m8&=+NqFd90Xw*tYu1G8IMY~*CDOoP zb?hROyU@$9`Xw4@niLx@f}^axr8I4Hu4SN09tucP*UI2BG-;pT?+nrB!#DBQokh#G zEl4Ac-7^HBxyr<3<{zhCD>4U~^wkQ^nUA01c;x&^gpXO#qP6lz&I|dc{wJ`v;<2YD z?)x}+YCS)6TD5}Fi&LX5od!#YzyRWL9GGhYDboTjrq#YhMNc4WZn4naY-+pbupb|x z>bSR$V?***Fi%`U4vMYtl(G?F&fCPP;oi65vt*Hc@;Hv|^1fvleduqkl484!Y4EqG zu2wKM{-`>V$gKpg{Ny_<led)CgCUULwOF3#es`x;Ulj&+ED$Bk>3#YURd0Wrsy z`vdm&cbv?SOU$yQBt>6)Q7sq2O@#yZm}@Ew>&~Ge8gEx$87|cuo{%1!*$r_`-uf|3 zYJ%=X_)>vil4fXIQ^U0Boe0h)@kg`a#5J=A-rUP_R|_tfZ|q5Nf^$WbK}r6#9n1t~ zR^}LV=^=OJ$yLsoRnFs)(KQOs=2$nV%OAirR=Q>Ds9X#@_AIIIA zIPKnaWi3KglFW&Bo-~D}z#V znuMDVOl4G6IgsQN0#gYs=XAjMQZPcC^}ST-;al5W8ur`7UCq2X`tUPu6iff!&xrnk zYCRpUh9Db(p9*OM>4es^cD(&DN_jW;yIXX+INWM0IGgAlq``H5lI_cMN$2z|_oa}B zrH~GNKyuT~co*6$N129fQ6Ra1m$l3|%eYPgl=4`MGduufrbwFWDf zNaX&&j3r#h7C5pry_m=r1IEPql~ajbYpTIq!?Z2BJgGF|Gh6n(h_(gf$8{(zab7{* z_ge^A7vqw&AFM0#NK)v6&zH5xgjoZ1nL{W2mN#N3zz-g(xA*ujl7IrzZQz(S?9vRV zf`sLWyRm1J-#dLYCFoNvH2=o9#xRThfp9HbYIw)C()py!I20K%W=#w7T-L*2n?k16 zuuNQOngOF2pKJ*=Xe?hpN$`lL>}^Ybfpa1y5h->ZC|yHfahE*mZIZ$+x}GT~EQK z*jOzp8@&KAum9S@GT0O^BJPwOcR{>RZb37*B`2d%6XNXR z_1Ev!R*1iJlTqM9krFvX{S`XI>s9DpfTY-^MPywCO&dt##um_Oc?0KF{Z?Q`l-XnL znG&=La)`*cY#S!_~J6xo4zsYRmm?sDdPdrp)S5k<}fqn`kA98ty3kfZ|tSTLQfe?gy{d@Y?<~{;P2Orqk*e{H@ zDKuL7KGl11q$Y8xY{4Y_{8Ih-6LFGL^l4LMX}hV5f`lIE}9am`>a7$*HL7UMEE&6}oVY%UAK&=*C7D9Qc}B zGuV0p89z)Tx?9V?q>braId}vcq~;%l4191L8nViz5^*HsJHHE&4<{tlY9-G2aW{gAD}YetqN+G;%$SUo_WQ8=0Aj4o==u&D(>;{rDy#} z>c5rse}}SEIky*Uu>2}!P<~ZwKE8}nev0P?3J*RUx-s!1HyAF0Wu`TCZp?jN86Td< znwCmiZ3Ag*gGAM1WFTdezU#$-xJ{uifo?4i4wp(>Tl(d9l08P}R%7vqH7t@cE>YL- z9!T1rQgaV%I|e3=M|vE9Z+nP{^hDw^bcX+RuggReWotHka&lvh=wB~y(K=0ZP9E<{ z0Nm{Tgr-x z1;>@rydDMCt0v*c_-m>0!kxT~vCZ)1Wmr&cV47xP`S!nsVfUmJ?fE|x z^!{J^T-D&qWfgeHtDbPL#Mm)P*UI?()ZF|J|0?SAkCx6^+yS}*KhKf22c5_>3v49% zv%LS}JS4rnlD?X3McQm{Ur{AAQF|WHGD2xI;$RJ!5?D%g+X>yi{a5Jr&d$j5VDGbx z#pI~DsBZYXJ}$^oxauNDDZpX&Kt5!UOEm*(K=oMmXjpK6{Avt?{k0n4g+KHM4cUiFK`R-li@X^pKB;a)-D7;AveumRwyOf4c~49+ehv^v(NGJQ{qIJQw1 znxe&UW`7>~wV!>0R-xJCVX%|w8w)dih5R4`z;r(V7bknu<@w z;e;;}>&E69%&r5$+68PwX4Yp$6@1EMDo$`OqEmAdG+g4gLRcGQQ%+tJiv=e4gSt^| zJs3x!;|)E=Hq5F7M8}OpQ_qb!PhLgbOzQS&+{qTHT2j^eAN&_4E`K?x-;j~UcHx;2 z(&@B0IuLg%>-vx$Of>z|d5Nh3z(kIY9*3()BLMEZ9J-K)ooiKoO8O#HrU5vlg@t>@ z9kP>^<&G`Oc)oeMt;G7WE;hfz|0opOD)s6u@BTMk^uPggUApHSk`^9rtypH()R`|3AuCUYjN|A_8ljfFZM?*rM8eMF&$I25^X?!PCYzg8qvc(v686@k$!v?ibs^0O z-~l~H>9sp4`vz4+yC0fs{t@Bv-}$r$0&>;HVoa?1-jVIkHEZc3@#?2*CJmz%rOfk$oS zY|;j@-`h{wJ47HYbM;>Zys8pm=|zofoYh7F_8txq8SDAZ$I{;_dN~iY_#^xMbnix& z*#0s;WAB-ZgnVlWmddL=Z{J?^LI^;N;u5*!mFJ>(;<$$|_Vk&o{UrZ4UhNT|QB+Rw zs0m~k5G6V`0>BGTe48#hl07+gwD70#hL}ysOHqQf$-Z+>l#-tx?ES+^L`ulw^c8|o z{RgsA`jYg-fc2p_YfUC#IQ#wE{f65;=S0TDPua|B8mC@6?#W@*d#VPdS8pnv@Q33S zX)oyMcDkFn!&hODb^o9i(+eSgS(!Ebjwo2)~Jqpxt=# z>C*L#U_drVTFWt&9~*@toOBs+Hue(q5QcgA8XtYTrE}*Tx!t7zS2$$wZcakp>Tnk1 z{R>5erOp2KlZ~1e52$W@6TIOyt+aJd>J&@x#zJEzJI ze%6$`_3mN%2r;YmbN7qSrLpJY^Znwa?q@jmx4vM0lP`85POIoL)HfW`YZ~Y8`LI>5 zCh^;ss9XciSRDjskpoDGGwY>|`$|c&S*LGv*k;@h|3d}`fGIY`a`kVYKf0a6d|gBJ z1F1I6Khlr~*ytv|qEHu>dGUkomHDn0ojGO)b8cBU7TWR_&E7t-Y7UcqxooPc`rM$z zK&E+K?g+zweBiEtbLp4K18eBs7FTYA_<>f7rIi|-PI+Z)o9ohT6Y&wNB1H4Mk zT-@uV*YUKyFktfaMa`tmU|seP36>71mvRbG?!w9G>ZhHY-mI31kNn}3?XTc54Eedb zZarnPR+%qL;4ex)))#kyeBx*2g6S)R=`VVd@0vTRG(K*OjCA0P3zyTr>y+pGbr1n4>F9hg))VcAsMethjw8X@k%jjbB(0vwMq^abI zhSco`M%kW@*PQsng>N@p;CpLu(&JLHu3nsNOD?;mx3NZ4g>XKni`o4*ncz!o-}OdS zzixTT@QenO!NpK1gUNPR>EZ4&Nb3j|g}8g`8db6+H>li~9$(3RknDZ-q%rXeUT?Qf zt#H-_Zr;G{9a1;H327fpT{|?LWp1`{v&4RO^->Ax{m=4%+*2*0BjF%x<8u{wb>SN4 z$fs6ajZv~P>h^`xt=+u2S~mSno0r$^e<;muE1T<{kEt6x+hR(9vI)l*+=%=@XurF1 zLpfg9%VyTuMOVSD&Uw|;pd*e}WzCLdm-Jjx0!Shh;ygbt7{Brb@&!XZZiDi&5?qB{ zT|jp)x0~E;aZ#YVX*Wh@l}nzmy;ip08qxak#O5>8l@qg5w++dTRR?Y<9O$%+l+s_x zxgurdk@Xm5p0y&}TTp5%V|zLBd%MT`fsQqF8p0!it7~R4%nQI{7vEhCn}1!o=?u_Z z^PkAc5uR??|4|mYSn<>&eC`G8b6tM%Mgy#3Zng#Q3wNv6(lyC~EF=qH0pVMQ@@bkxP^Z>0jD zszVh|kD5*#Kg;uVq@a25WwDO2>$EBs^~6Tn^x;SGfDDU5y}0+s_Mre)s54<9n5YZy zPsC_AM`*N1KdExyPveN5x*<8l-l2lJ`{LW{cM>SyH~#Gt$5nlD+MzpM+D*LHHd;r7 zcfGWqnbN(qP2YUN-siI*DVbp*oMulYZdgM+6rfBN&~t~t8zK37107nLZrIkgYMszy z%eqy6UUVqe%Es*B1Wh%FHgc}c^F!2RAk?5&0sP|=%T#*DtNPv`-@VY>>vDhS!Ce;b zFF@3tVD@?7r9yY&O|B!PC9t(nk@%`s`IiU!1`g&kGEr~Z&vW;j40!QsG&WK9j93v6 z{cOo__43r7hF5xw6) zWhpGI!;$u9jyu=^WcT*7Yj@2^@J^^%bLbyY1DzAecJG?d+4I9T+i_&j3Vt{_F~9lp z2iT2g@h0+>!k`eoV@$FIeK8l1PYZ74@bDJkW1+Z&a&mwTD#3z?G%@M@^xXO5cMuWk zCDY6T;)zhiJrpj3x;t0;e&|1X%1G(k&|*S0h*dUV(kIGM&Hc?#veQ1>TYEXU#c}`= zy~iet=Uc^$K32(``yoEQ|5(?c5XR9QGe5Mc?<09D{865JnU3>_lD4$}pj-#e&%DOt zk>D+K_lFnH%Dd`Q3wI|^77vrJ50q;7^%%;_7`mCV1c`xwy?6aSxPSf9y*e~9 z^!3oWZ{kjl*|h<(9v@>ftFk=K7wBEYsXP&y|2cBF>pKjT2MMcYInWDotLr@jc&UXs zKah+r)WU3E+D!8P9_iNlM|()#vkTEQ9L#i6q1s7)5k3fNOGthf&9#?x{v}7uIk|oZ zyFH6_DI2p(QamfP?a;MN5{9(mOL!*s`arBqAJzEfIh&nN!_?3$*Iq=9*R8&z(V2ITK>gL+% zs;31vH6_p8UcTJ|3pP*F8l)1B`T%O?T@*u@2ksAwSGr|<&SG^Ln9PRf+O5YKf1vnz zlLNgg5LlJb!HXZQ@bk|;!v?3T6rsnyqpADGp){wFuv|)w=9xiir<0b`KGu&p7;sh> z_k5n96g`*$kGo^ramVj@JmS?Y#QUuxO=ORFP5-g2^zej+}C%H8g}4KrbcxUh~}WGawsuoS_Z@X;7%x zB2u{T3n3$)JGU3*a4Y3=C5cuR9>ENEfM^r%ooH45c_Uy6<+oO1 zW?v5dXC895G*8N-ZDVtY?m(vcOy(g;N%INXU z;S$kL*{*>2#@*yLBlqftn6k;HsK5A%)ua<3zBOA0;eAop@21o5gqxA0C!l{#?la2n zUpx%UzR1a3aMO3w@tf*$**g?K#LCX11sKiQ1(-Pi_dF z+0!e?&ieQf`||^8$_MxF7Zltt#KeA_>FnNvhr-_N5j1UDWk1DawD<5pjIUa0g5tQ5iWFI>(Q}5pAtYOQ0Zd681pH(?Ov5#Hg#LC|1 zY%ezVG)HO%@0rjLQu*Via?H+2dj344;Ljw^N-bFD%F*X+w-WcRoNbp8R5?G)!lC!y zf6(XVSAai~I%NshUMFCuf)4bq{q!Q8kZl%h;g%Ae7VjaB~;axt%EC+&=zd;(^Pz!UjXobMh^tP ztUu0eTBxL9B7NOUS@Qa-e6F^2;8d=5%xtdqSXvT)fFJ!QCSdN3-#->~A#=5dXsnbVz>3@-XO&ozdQ`+ls*3f;z6(Kwe9{dBJ5TG8wXCM zc}?E`vw@v9|BbJ6X;-lwsw zqT!1ad%ulGD?MCD|3=yh5E#0aW&z$p(Y88)qCMCQIk+})x~T_`B~G8R=bJh;Q@WK( zBg}dri@opW$;;QIC>mdALef1^Gx%PB!9hWjq>O^XArclz?b#5ZZcfW%gDDQwM_~V& z;l*nCef-kFR~wH8%1Q-am0Eo`PqS_y5$0Bw=2qm{kd*DDymv_V8m`z486~xD+Pt ziN=tAs#`?@O@Wr%LOjhf&mkZQ!vaDO-@PJ=wPoSR71Jb={#9jLT*+hK5k=#gaX?X^ zM$S!0r!K0i3+CWviPP6FE&S%c=Q>YY=B8RWS(zvUri(92K<5dz z7>?;AaX`(Thc011baT`0LvLfMk4c-z>Z4&q8zFj| z2_86HnO;eGRWz$huk}?S&)lMl;)hj^8BqJB=G=~-j?D1U*T z(=DFE`~72$;O#X10pbyB76oY90EoVaF2ekM!YM&pyM_)PV{HD?!-1~Q(*B24m4Dz; z2D$t*KDFm1nLtguKJx((OR2*hC9WYKyW1x%b1lqZ5z}Td|9BMI%yA~VKiEmGCEEOT zAb$QodnR-h(+C7pHlli@rK}rZwPV+ZnZu$nBYwQrcI6B{sYq97C}GG#TNc z_3lP#YlKJZP3y0v8S;_R$)iAgiIh9m($$v>ogs(EyH{t>NCp{L4}pv&B2szVcnv+9 zksxCb2*(TpCq08UURp0hS%mZH7vq;HB>-uTTl|t4p(3YwKW8P#HX= z{=2o6YAAKte}jyr86FuMqh%m1X_XD6rCIpmAS&$yrIWrZ1Ri4fwwp3j!-X8+mWxG| zO0RAal)rt{Htq*w5DjAi#Veh&Nj2K%jiV?|e#G_7lt^qSRj=tP7~?L@@4^}4zfl|F zZ+VWz9S^j^TVs{2+<+kf+ICwP#htwTXlscWX?`h=JWrzd5+8-Yg9KxmQuwV3@bvdZ zQBlAtF0LydjiKBMW<>-yFN?vM#jhF4ikuMQHgt@u$~2U`?(bW5N(5q;pJdY8;!qlu z%S?Dke;^_*`-4MFEQpaQI20O`FN>RBp84?bp;fP{qhi+V}omHo_)BqP3DeB0{do`!g29R+-U9fFHgllD4!9yov!uo@|A1F0- zu;g#wqdtrgGOD#F!BwdrC2aJ_9xr5yC@4oQr-hA?p%Kff8C(+gvu%Ri4f0#@9Lb)s z`B2_tpNvh6s{$Qg@~Z@0zX(OfJ?o)$u*uV0Ol+xKx{MZ@wo5O%HJ+W6T)GzphYd(v zf_8qNzk4t53MRw+ei6S*&B%wV{xC|kGRU9+{2+e zvaRs6>Tq{LcL5<|=i*mxNhWYT=)J2MITlr*k42S_ zOz2O-C8fE>7qnYOvbgYtp}Jl*+|*k6@eGJRXp!TDN$F!!g@eq|R% z-njFEv@V=9rbEpMSE|s$XWE|kqg>ug_>(n&9ySkwJOcEI?c6){gsEmO*yK( zcSO8*R7rM3Bs;34I3iLURZ<-hsg5dXhR|-AYssY|$vq;;c#&i_(PU}SWCPJ;57FdU z(d1H5ABeqv%#6m%8NHV?#xG|qUe4IOoN+na-_0$5?Y7b$jI;gQoUOdHPPDGfDE2 zlC9h|bJyx0uD!#J6)g@&zWqt2=lGe<7^2SgKOFZYsVTf&@=5>cfaM;=bhDn)5`T5? zbm67D4?Z;NM7%kCj^powzxDBd${ZY8b&ymNjw=IbSsw`UYzV5~#2_uV+rS}PU4D(l zJ9mJcE#xk%kd;-uK?)}45|~224fMcQ6H_pQO)aXx?TP8_8dWgaF754=hPpb;!22uQ z)D5%;Z9!+7j7IY$9e2k~k2phV6dcWxHXciVKI=k(?M-}sJyC_U48#)crnV5lUsQh> zacsJslg^{>9*1~j@Z;0@!E4Lt91ogLH<_P z|8v#_+T1_^k(=8S6N8W_9A>fwPo19FeT+P5PY>xuUy6}pJZ(lLfx#mS{1IC?`g#ob zAD=pEOO|#R0%)Vj9ctDMx$w!%ZooPBtAm7C0zI@yvC5}4mbtlcPj9g2 z$}l`~aS@w(yJN!&oL8tdg-hvLTEvfyjv=wiL*Ihe0w`#ja8<3pcSuFUW>*RsO++G} zJGYY2kWP38I4EFcaIs~Hu<|enMA_VGnjFM~X-$NU$wdlXof`&d2tE{nwhqCF^&+?N zQ8nl`S+P-%(4ZB)vSt5ki@@bxo@MFwmx z&LU;@ucHd*NI)!_B*wm{hFDtSSL%^oQeBFGfT@!+p*11?wZTNPhU95-2#692 z52{6AuCs5A>(ggaKbqY}ON;bG)RFr|Ty zswv4HLDdunhfrH0BwAPYakoi3^HJ!FXZ5ZK zUJnsjQp{ZhMMNz*0Y$agtM=;#o;BI%Ovt{4LM55(qiMI4U2dCEL@M(aGm7A6%G@ZX zwGESa)1akl>&G*rooLnzret|DpuGBAiB8sD%Ot(d4)KfdC0+BgWv({SGSzUwh&^_& zk%JI%M_Tc1JIldakC_VLCnB!u`kpYnb_3~r$i&YtH`Q8999nY0bbqCsnK`BA=lOVx zYc5I;9>e(4o(>Kj4)&8>5sQ?SJzUw3={#O7#hX!FlG?~Ktb1ge1=ct6P^@T4M&hQ2 zrNt>Dm{eO66$0-V==oT_IRMgk&YhcgoNwDxq^J{-Q4v~m7~4`5Ix7+2{{WO=)KFQZxI(HL zkmfM?X3{wBXEBJi{+48Qzt;qH?VRZciRvL%x4Envc#o-5#juB_nF+d^r0Kb|z$f2%@W~9#djlm7Nbko)Bt&&yq=w z{qQqwNJm~`8$Hrmo%0H6DiOSI!@=kIQHKYmxW{LVxul%8k8gj{eNj7@P=Uz+Rht}0 zGk;&~88n>GSYz@&L8EK$q?%u!Ec<>F^q^?AhjssBO}9#Q9FYPtdIlT^&8?HmJCR%v}1l zmk;va0Cg5-a{<>_oL?3^fGk6YGOFNF#YTdJpRRvI4}3S2LdcqcQmdy%%sRN~3qW3M9@?(Kbv!%Pg{;gid z*kRWS!9`FfVey-iz0SE+_tGfrRU4%;$?Y9-iPqVR5cCAh-A_T$#lm4g#c?F;`(_FH zmAZuUs@$>5u7d4FMd&3H2hkQ??(aO`U=b7htJUTcecZT?Ro~VOGIn*p2WlzA>;rih z6hWx=*f$WoDYtV2sK;thGG%@<(gd?Ve&ngN4pTK(ELawVZt;=Mb1zOATxshouh`0# zTa;MH9q3wt3A%m?J`Xw+SS{dOTYHoh+;@^g;h|7o%)4O!a`Nl8oU%8QM`XdK5Z;8S zGqLLLa@JszPs5L--RsI9JbXauljy@e-(X7)wk}mayN;wOdnk%|>(3gz*~)<61ctoz zDV2pNTHCv(?byVOK|qRF))iWX+8J3|zq8hR688k2z8OGSI(aF-e?+`oHVA3Fs9K_Q ze5RKB@rm1p99Op%x~8;y=hI6*Wt)64ycHFw;<0fNT05KikgAGK5eXY>aJj6Z_c5Q6 zA1#wki)B|&B4AHArPA0?uli1GXPSE7LXD7UR#sX5Ly7jIZ697|83!7DVZ%O>II2{m zF5k&{Cf9oKKq07iUcEvf%v86?F>5$!ST$yymhS79f&xQ$=5UY>6t%M!vc8W@ z0ypw+&|Vdg!5{nVC~ZPc_h`~t+_?hZ9oTMT$`q~d@es;cf-b7bzd#7+Ml5-NhD%ak z!!{=3^2meM)pxp!Qlt$BbKUI6K=KhKHzm;er+#+Yr(-Hx(4Ezyeuoy3gSn1VnVrlN z<)C9jpS<9&GI<lXoWC@oo1@xw0T+rhrODE)REBiSds*Sv{c2Pm(4aYp`O95_j zX*T|p!HX@`3iql2$=OHyI|;N8GMP_RX*K4_uU}%zM-PmZ2;Y z%1``l33r#TKiJ41{9*97^?izFrH*2GG}FF z2=kcaPen#vtwwF{&6>=J_KqMAAMKmmKVxeD2(ALIm(a-7yW#m z+|-wx|BJe>0E=o{`=_KNW++L4p-W*X=^7dokQStol2A&e8zhDf5ozfZkrHVTP(q{x z1Q9`LkcRp1J?g#ZobR0b-RH*hJ^$w!fBW~=T8qVE&AZn)@75`|OC~L!FrB}@nq0Bq zC40~Kqy^^UOSn=6_daht(hN@ubq?ey+8f4mHQIl@9iA&;NOSVxq|vM~Za+`)JjCPc2C=U@DXn;vZN$k-gKLKgCAgoghkwhmqdY3fJ@?JMm2ksg z_|#5JU-50g`Kl&i-PXLZdqL}5`qind`jFCW!{VZFCBi~K{ zM(AbnDZ?qxrP%pu_f0OcU?$6KkH^PMgcQ)g2stJJd>F2=8g~RWj*gPxg4}s+f)lK^%lIdz1PQZ2Cc~r@Q!ql;hNzcR4e++NgOwteR&(2R1)9mB z!H7jQ7;Ypa9zjIrp++X4)p=3%Hg;zt>t_EcsV;kvKjRAp*a2JMlibMOj;Q(d%vYD8 zCkY^s9Y{>~E6>q%&e0&|XmqtLnlINevp#2LJ$EZUignpOo{&{L5W$T^oFn@*2%%KT z$5Pd00+pD2XUTxFSeA@(h=-jUsd6r%NESw^l8a@}c|JK(mno8vRgo|UE>xu4`Urcs?a8=D8TGh zV8oI6%#lK@34k)&T!n&w;5(#9O;)?Chyi1Q1j7MO>x7Ew1$KoW#|!Lp!^|&&vwML@ zf=}Sh<}y}k1WCZ4`0Gq7c-)DWO3)9O9)J|U$bi58V$+wAo^Y8R$4mAF_T>F2`*YUc zFmFpC%b(KY=*nheVN&X9+>=#I#{11hPus#zhHOt9^1#9ezXgTKn3#r%qzU)|fwLc^ z7pdSK(#v$zghz@tw6B46CiqGkM$xUoVvC_ZoKd8q-u9+Q!y1>#HozBx$4&k=k_ID2 z3R#UOv?tqracBaY=7Fm&ZgQmDI(~h?i*xOKOwNI>=Ob<~eZX3v!mp=p4stDp)JMHc zH;xY}WF$I+j9sjYvX3)Xtm|>?OQ?!)h$9nl)`Pt+SL^(kgJUr6O93Z8`OwXt*H2*> zHB0j{&-b06s?2KefCUr*BUD8|BRN^2imbU+VZX$D96*E+MyVij-f+R7&prc8yDJj$ z?q<+ygkxpz6wk9g2gZr~+P$c<7I_Vx_Gdq#)OX@i=QEhbs3yTEg|8O0kkZl^T9}-} zd#q({SQyDeJFcgLM`s{Jrzi9iKhLu#nQEksJ2QTKb56!;qA7x3T6#D}zEK+?8rA*| zGQ*kw$oGoM=OUsbUTSI!#$?hJB`BbzQ*_TmI#Uc2hw|tmM5#X_mz7v%?CvX2tm=@@ z*yYn{4&uZN;s6-UR9u{%U7cPs**Q7cIXDe{W8eMeE2ME`9pR_BJdTFg*V%9ZTAmy;mLuUm0%M2A$aPvoEAuWL z8lpB;mqjD|6qb7+MqxuPkkj7*XVm2!#^h1RQA|E#kp`kw3}>ekPH-9qizYkL3&Kk+4LoCCn|mAZ9yZGpcB0wr7vYkZ}! zpr>TfMD4w+R@_baebg6NXvj&FL}j{Fx^pD) z*Og9KP3kRHuL3evdK;Obp*61hfO$L(uK;$~(eq9ss&os+qzRayp(SqAj8h~Hr{@bc zTUoZ+P9psgV4s8sv)Nuj=*{-{cf%uToO*x<;kBWi9}bvD((s0MvfEb4%0(Az49J14 z?#=g?gX`h%JxSPBft>(7ujaXu1Ko!ytU7{Y2tYpTB%*lS4-82mzNJ`0!3dcCayHvn z5wB9x0^3b*SmLU?gr~LX2uINHo(rArk?Fo`iAyMY8|W4sRaGxI7IENBVwGrBdbv&! z+p+JJu~1bC5pNgrnYB5WYYiLrL$@RXVG`mp7VK{jrC9UDJ?&h+Q&}6Wc!p^DwqEAw z%!v7Sk@%-qpm~fOu-#9HXur&ep*V7ZBe&{91`ey;Pu3xRnbFcI(NZbDNBt{uV<*(U zyb)4V@sZesQfjmT)(a6D^jbpBBNzj6)(c3DJ?PA1a4z~fq3Ksx$(5~xj!CY-$ivZo z36U&tHx>5S-cD^NFBmx@^Owk+zWOOg0-OvmWI-J6rrf0Y5%^7n-kDLhP=>o9*eNV# zq3KM}Uu|(JH37l9b_jN7Gu-6K_{iz-T(lp_-+Er)B$S9J=zhS_Qi9arh9wgA<-_2` zf2VEb6hE|6E))SG4X{ThIhqx{>;`s;JT-j>`IM83YT#m+T8whK6Z&`iXa&RZ>BS*- z1DrfU3teb5cZjwke+d)TI-%vq3d^1167JbU>uM^T^#A3s+^&>K#1b^uXajW`R`Ro z<+6`0c3g@Gv3#MA{5vWy4+&Hsik^JLYM}_-C{Qs_^wLJ~uvO{AQz2!&x|Zhb3UKtf zN8Z@`b*U+4h%6+`D&`BY3_S&6jJYC{zj9U`Vt*Uf&X$c+lDim_yJJVOILtYer;Sw3 z`=J_SG-TIG53m7#05OW>;wlYA8}5W`IAuz4kw&NuH}`d~FBz}j85S18g;R=)HDYWW zxUXBKXS_yWD%jBSj%uaj_6q^26_8%F!HJXYo9?RH81s(mq*L`j(=F_R^CjC?YpB~S zG~~eD?2fpiKA_a6jU3APMbs0tv(P+v(Etf5X!A1Uf5Kcc69MXuq724q_9mXD z4E0Id2q}qwHms?VmjFf9#sk3CWdK@BSWnb_d=oa~TvJ2*8&HA3IMmx)5rW(V+hl`9 ztVv!xeSH_SvM*;dnaKve3>a+HWzW!74M?_AVzWqckSj_82&Oi;28N^6f*yf^7Bzx- z@Da%i%Om3_bdDw}IBC&B?dQagg;Cwcl4Lm?ESk67R9(gqJfYc1@nVQTlAyuXVKo|V zF=MXIif9dzs5fy{haARSh`Q(j48mQ0r?VJ+!mUfUpXuQH+2Qc8uRcKrBH(8V2%D zK#s5z2h26e1@Jp@0vZ7+;0aCy0mhTZX&B4wfTV%(TSyB2*$joMR(davtOf=e7|1b< z3}iR4X>?_qL5KiB7l8W^cu~N_IRIP%Fb9+DY#eye&^)9!8%v%PfL!9TE)#-HNI#3oT!yL3!h@*<+ zaxk=m*1kqurQ=IdgGC-(^S8>MDa_KV(@0 zAP!m(K}(?%40DkE$dik!NXg-#^#FJy`FnHXf71MqKHabXY!n@@6;|`JHxIC*AG#ld zT+{2sB^)e=VP$*CAr0&-D0575LiKQ*LaFqjDaqj9#G7_*tTOrYrTBS-d}T00?q^j| z$f6-YX|^Bf?tVoxuVHL>UMS|dfpYk$5UnKx!_`rIDG(V*JODEbCmQ9^Xo5yh0Es<7 z|7ZE7!)nZ(9@e;8**xYuF5sGENdH3qIMICP+A|%M^R&6i^07mZatg9A_Vyta>I{7B zoYF{sA+#99Ds&oPLuO%z6A2iLVi43BH^rf0g-w8pl}IxrC!Svjic0|bjT9yV?hWlj z2ou~?)%TK6j&_mU_>z?Ig$L&z?p~zn){V-X;_?XFj>|_~yH!QjiG}L?m4fx4u%td1r8e7o#8ja&%)A*k&5Eg2o9>1PO`^_`Q+;Emho44ha=ad1K7l?RX9?XJ7JFv zbIBjNTSi{x9f@QUZ@UbzGB6iMdk>f0u!KBJ`9Nkr@y7c`F2TdSDIj4K8cyj2ju@YZ z_&?O2BwN(h<_-J+ijJi~I*urHB50v1uVH&N^@e+T+3<-X`RzO_U#hjbnAq<|&jv-r z-`^UBO)VQTV!cx8IpVEp1@%G)pq@CHzMNv=HK=yD>t6OYfkY9Yz70St1wEh}4(L3r zc%htPB*+6;!pepKB?|+nSR=5C8ImU=P&KmSN^%u3Sf+I68bk=)Z&NHzaSqjKBOUXW zVS)*cd4Fv-`5EDKRsfGTpx=s8xaro(LvpVRTKVytS0~R^Fbms`V!7$o%MQ(a0V3^dLc z=)~NuyG@-Ihk#~41MVznFnWMC9DxRHg;$^|XB%N^x>eM+b- zC7oDV1aZkkuZ7|}t#VjhNd!j;|HoJ+#%^#Ej*u@IVm3y4S%Jv_;*#F-9&hi4wh8R1 zey31!KI8KHULi^W%}|Z&&{1799TU)0)VR((s@o|<3I9C});0U(RRFGG~24VW|soZ(pUqL^50XNt`@5!Pg8+NC7=z~EghaEMKC6Z9{s$#x2 zbG<<)Ic0(!@Y9Kbw=QK~iU*Y{4fxxXzKZhq6e}MFm}<9(^QuR+d-N|T=t|-c28cPF}|mYiCe&Ej_h2S^}e*?4ByL7aOT@m zWt-9L&e9zzXOk|@Zwke1VZFDax;v7%STKIlmPN72Jwvyt8tB~9!>E$E9;0XAE-Jkc zFcRfl4v5`&G%A_S<(sOMGnzmqx?Z?7DKsPdi5P@WcK>!NI%<&~``R5i*)j6?LYZu2 zk^Tq3xig$ah}9tLOA+E2$jyR3j@25##ChIFS5Aw{kUh|V$@^Sj{myFQ;v~m#?X`qk zPY`dVRkM7z6QYg{9;SW|Wi{=hgK#T89SDk>P815#fp9i` z^dD>$=Z>vs*Yb?Vc;sK2b|pnIn}Tqo+u+=4fokSO-T4{aT{&n{kSv$D79oSL7NUqL zgb-0QG>oVCf-tC5_64D|yliNWkc9!0=Q&1-3H49q1ikHAesq*QH^@sA!yVe1&RAn0 z7d%nevoZU7`vzO@L%agLFNApM7`8XbHlP13fTvuO^QMs5a|{0yn#fq^O)Hl1vKi4i zd9aM(QQJJJu64MVs&^{BJ^)4Y37eO+$pN>8Z;2sZA@!`y=tIs0Q?gmM1=EE2WPCSO zNGd}oTe@*%QA&sd0M@2#SVm+>021U1j3J(EmIevL$x*CD8X={uwOr7g1PHf8h#LHn zj&cSIzyXC*(YyGi2-XlaL=kK7*K=x0Zb3ytyVx^t5VSWFn7E<22~Oog@nNZ~5wA&d z6NoCGRdcXKsD%`<^6=hXRVBzJW9BBv^?4c5Nl-O1OzJLi!QNX{z7nykCSQ4>CJtD= z3`uNqM*3h2ChDDatS0^3Bby00cOscmakfU&e%DFkxc+gpY|(OpKWjd<-N;z;7Vn4g?c}39nTzBQZWZFp#reux83ctW*EdV;$Lumm-`r>qILD)2V|u-=uE*yO=#mEYu< zIR9}5|AbH=4}kC5&;#gH)T_$nL;Mqm%fKD;0cD^WGYB3Z4RHKI{{$MjetMwYXuu=h zKliuJp{ziit+V4nSqVObz@_a2luVJo4^hF*=#`ZK-w^&X#C@DN zK2WYjIK)5rKs3a^HkUWW&-DPCa+1bIfk_nq4ghxH#Pd^36#!(f2;Ka z?-S>YX9o%oLxqBug+p$RuB1oCX4BwQGH=p8d(JK%WD98yd*wK^o3V^f%UuR)J3uW( z5VhO_K@Lj$ zm(|%q#=^92i|amG#?O$8pld>5Zcu?iUJBl#5nfh#q)`_LPH@{?Om^ks35Wy&<=FWz z9c9O=J;htIYlHAn=buw}W{#vH8V-^(=0}{zwFi{$8*j+?4D-*aCs(PQPqAx*@fqfx zd-%*elFCW>_cHHD5SuaD39~g9U|`A}PsKYNq-b1a-NOeDWMi*lAUCY}K-K0#Z~{P| z;f>BFwq_8HqhH$)EKo5!_p#mpQ>Wai1j^_5*6Om)8IXLFrgU_6wK&!fWuIGn-FC_z zBt!CHO(x9LRR@Zwt2__$%bxDSo#JJlgeCB$TZ6tK)(@vt_9na0+L3;CtJ_xCw^NW# z(7qJj`WWJ8MnN?mz&?6OWXP7_HC2vNU(xi0>?1bLX_D6|*-oIZ>uu;b03elhhAyVa zuHU{f9x2h@+@b6ha_<72-CYC*3BfBU<_o<=Rfg)oZxgSebXTDFbd=wHMAFi02P-(Z zB8W+f+n~Z9z3w20(TSu^FpGn$3UO>v8?*40T-@+6*zd%5A0ue#K_A&|ub;8#Jo4{h z9%z>g4`rv1NMicxKn3;n2n>3%02VA+MNgc2;Ycq4_NJh((P`wAqVg+GhAIZkY33&q z?KKL#ASkTnD!7tVuj2RHahtm1zlo26Br*6tT!v)kiOOnu9*Tc%eIpf;}1Jl^J5OTv%)ol7)LAqU2LV)RkQM*rEs)?#16z zERW~Eya>?&L^e6__`eG3?KDt1PdSX0`xKUUlbKw%6NeHmr|8)%ziEJT_F9_;NXU_! zH{(kZ>ME8K$vS!Nn_6e{lo*6N*!fNb_)IV@b>J>`;4Y(4fKh~rQA8Gv5r}?C|T@ZuQNa>KIbS5LpI_~wZ z`6Pd6^hdyV#(5P6q%vK$G96O+#*Xr;s_@K>Pl*nVrdM?E7Qf*>uB1a&qV>cxQy&v) z8y`hV73bWb^U+c8I$s}*wH=AQ8;SjA7_8p@@Hc0i|65{-pv{${M9^Y=zX7?1uPY}f z$-Jpfkc%4_#~ne7lcH+Pge1a=m-|AFw@8k+{2NTw#gTit^SdDg7hR*zzDAF{M(-}| z2KoN?5&qo(m=V4U9}{MA1Dkgu|ES0CT?p`5$o?w>U}pMCj+eTdOz`P3x}hHdKlMLG z_m_rca^TF+SF2g&;{|*_D9teTZVL8x3V@DhG15hOBP((e#t=PV-(^A60o$^^A|nNX z0>lEJl}Nkm;xnEbWDj~NPi4pBrC6)O5qrqai)51ufG!p-FRNRUMta8s%I|7q4^(_m z2@}|T@_-rqV7(@sbr0ElF(Km|8G!mVBeX*cAp3?>bRKQaS@3|1jAnT-M=tkFzf};?|rQ4X8=6@ z)o8Ld=syz;u363nV<6o@i1#Z}SnKGym(-Vng zkC})aa>>^$uc>{=jlUt}_E5=rxRY;6JJ3Bi_HQC>gCcpo-6ECd4V=I5iNga0L4xyk+(35Y$uz z9Xb!%z|Ac$skYoCS$h`CjfAJCiG8zTu5KMCjSu{o*vSR_S zBM{{WM4`Q+Q1QPq457`IFC%4NMh2h}vZIN!t%Vn~_`4Cw-M|0_c_+T8h%2ephg z|8BHrZ`xU})+=SXc9LHbL99dw)7ONtymzf;R#T=}K+4l=M?56f7GEF_2lnIHr55*O z=a|KX2ow6wtfaJV=Fuybw9hnRf3l}4LCyghpCat(;V~?QZVaF`{5Fa5;sVlyKK@~c zZ5WHRTQ5iK0gl^zgd9CQf(6%YBFz;+`v%-_VxF^PxiVb4vOo~N1mxsb@^OJgy($@U z*8|7h1LuxMi2Acn0tU*m2FgrmjA#_c0$vRu%D)?fivN{iXbjpr2EEkusLO-_)080) zjm%%a;xB*2Uqqv}f%3A+Ul@M$_o6eE`qw~!;e-^GvSgRCAkm00Wyk?+cffJ}(eR_c zd9v@!OTX#wOuzlO)Y}mjO-PUzMRHT0;el@FMamjwN`~e#q#Ac_0%39I^gFN1z**c) zIjKqwAStott_6}3+gwtIBL4eY)|pY%Prp(>g>@Gr-MB3?32A;(2Y+1^I3@4^8=zc| za%i&-v{JX1hf)~Mou3d@ln?IDkpb*`MJijxT-xzL5DVR1Sn+o);CMAwtNc?wtVcEU z_<(ym+3Cz&Nq1gP(}4KQd<8FDM(Z$F;(|N&to)F7@;dt_Zjdw6-{2M9Im7JyX;q(# zs$NKMyr1xK4TBr$j5i}TM2z8r;g0=^4r3czvZpq(z;~-gyzJ+bF__*+r9|6;{|*1{{-@5 zSeWxI;M0E5DsSvxP<`2Q#hL0!&j|76hhbv+*;m8FCaVvxG(PI+adXF3J~u*Kvfi9a zr(1nlXLyF&Co{@fYo3_9Q7%-J3F$>m0sa@x3jnBb0Ozc3d;pjQF`1m_O>~_R6X(9; zuDiTk)H|%5la~imRt}AgjiP55W22P&NQwRot2yWu4+MbeLlF@aoTYHn0q-VY!~X?q zA7~!=m!njtO1(v~E=vjiI=nS^IvcCr1|!koJmNMGoY^<_)rS`>KQnuW&Y}WDnKlV= zTx#>yD}nD%c73^Uq4K7w$;QRYe*2d%_#Z7vATMl)dVQ}<{qDll+A=-s;%Pv70p+vx zkmH0A^&KcG)9^jj|MW@gN$B>;seSDOcQfgelQUnls}}F&u3DUByghXBKilgU=dihd z_ij_Mpbh-wTm9VGWc6(7-4FMAG~@h0@Z(myD9b>$!KZf7Pq6>~ zmnqR-0os0e-rgam1eFpfUpMzHTW4*5L2WE$t4VK~*T|xF(G_=9qgJs04Q#7#|H3($ z;7rb@aPxgTBS(LBBk4W|ZX>RQWS9}xD>Y1g!YZABWUMlZE;Vf^qS6-@p|tu%AY7N;dL>h2V`Hpehz6^IEBaAZ1)B}neqKGV z%O>igl8vh2Jo>d?Ur@BIGc})+ILWZRmGZb6P9rb=(C>;>0lx}9bX~8imHm*0ClM3D zAApYwX~Td~%i>^1m}5YRnB)tDJ8yUoJx?MTpJ1Ri#!*ne^@Q|RSr*a7ptii}2|xet zO(yD#I32;}-Vh?v8Ku4O+UT%Nu`6e*L<~VW3jFT)VI*xs!(FeWGOKm|?ie&LSHqDI zy|&1G>N9eKs;jbEg5O;wjHGXfnMRydkUC*r4lbz@hS#^rM01g~BiPy-=@XmTdZAac zyxu^dsar$+ONRdyt2*0dBoRYGjspDe!3yElzf-OTTkpxiaa6)E`XEe)?t3KL%#-R{=F1piB>xD0gzsUdy}=&6XD&{v@g;CYk{@^y$`{H9 z<54TkQM^e~ChGp0Y$Il)cW3u1+y-@4Gi+|HMpECQP>8kc)g}^)VpbMdV`8f>{ICvcoijYXjlV2Q6qgc-#wD}ciphS3Gd)>xXJ8mO;JcX1po%Q+}xL#=-p zj5M<<(jn4|%25;07LZKm-KOM<2`xy*^8ts2Q+)h}&_ElwubY z$!L=oG_=D#KWLsq!z&aVYFjl#M1@vT>pOOsTTt^71&3Nx@e@&nwYBpbQsbWgH9mNZJd9T7Iv)kG{Q7M<{D!~eX}uReN??~_;m z!3-Rv45Ruz2QvaQ9vWT@qozIXk*wU0flD87(J)XbEM*=YL#{Z;HqsJF z#pd(~>|P1G*faEr`aZovoYk`1e?a-v(yY;PxtRJscUV?b&Qp-?7(^fPbe~XKoka=w zccryj4wwlnRsK*HHc^hwM6EcVQ+4uw>35<-mYdbj*qRhl^yRSU(u%vVdn2v5De>cq zu&V|gn}T~mG)dgJ1yORvmRcp$|2r*V;f%D+l{|Z$p&t2m)I`lEYe{Ee2jBnBV^gvf zhx`HKyIiI*Nk)#B3QFVQj)Mj6!m;L;CWKndM|XoXN6(9BUjCgh)Ux5)t)>m`)5B#H zgMWh#HGwjUpHN*!(XaLkdeq#?D1Jh*Ar~c2fn|Yi8ATXRn-HQb;IX}@?IaD4VwDcg z-++}zu|_9tov^SeXr;ouk>|fk6ILp$8+p>#)e4*bb*dp8k)f_i$8w!GcS&dDU7v2S zicsRm6?I^KmAoJQxF`xf%zBwrq2v%!-;h)hrfJZzCAeo#bHW<@#CkK4$S+%Kf9V}Z)j zv~}*nrl^$)%l~UCK12C_rK0MpSExrLPuZxc#p89tZ|(+$Pn+6*Z@SLX46a=rmgd}a z&1+_A2G}EG=A2_h1sOTIf=LcxI)hpzG-|>M#kT*R(!y+%|GZUNsK7uwwXFqFEq|C1 zbq8;>1y%PyQ2%xl{mp**@6z;jgi-lhr!(*tJ&XouY%N{Mui;-0Lm>kJM6G)hSN)_15Z5Y&WL(CP$nj18w$h1tr4_xqiXVRQS*G zVRTabZ|}JO)OPA@E%>5?P2ZaO;h}3}GNY1(w27r2$*0R3mj!&|oQ^$TbY5g9YV-jw z(e*zr&wgC0n_D|E$S0@PfaFDk+7Ji_F4uv}lXLMyB&ib}{73>^c$+LcbuSKfx;Ya$ zk#$g@#xEsJFY5!`K3R6)ik)uFlt^Tq5U2r1s^Y@`ro@$po2c3{u?8nt19VpfpxDtM zjRt9OC`egBdfl&&D8%~cQhFd#ql--tl3-s4+7wd;WL5sRT^_yt{pp_gpHs#^t|Lh` zjJGhE0%qEeI)2YJQco;ev8lClSE?rb{XE%&G=7Y=`H`&-gy$P^6D-dZAL=fCvPnKi z|42;s8twP<3AVRXcS@;~U`=QmGdxHXX~(h}@!>uL^W>gs3p_&0b|ZEo-TlD>45YiNCUJ%3LPv9wKdL8uNxvF$S^uTm}DmC zn3Mj}(Uwo>x*OV74%jr}g=?uYRw`^8OVigK3Y-2b^q<)`<69!ZNe13KEQ}c@k;~v5 z&rphJ%&{~<7%o+Zt9peJE67`a%jsa-;|IN{Bb`K_p_i~d;4G!l$m0Y|@br1~1oMdi z6U-M1PB5&A0E{Qthl-n6l6F+tQBx6j&Ue+e1=xS zd5wQ5Ygtm--ohm;X%=_^8aM#b3Xsb6f%HB|F}p#k1k$#4kdlFvtJTJ{6Yg6-5NlYt zkTMDFe>Mf!(qT7jm3c=^``yRkGj$qXd}#Wc9QeOVS*9E4FIB@vrEGhiiPwbp&y$tj zY3x}|d#NnqEC$R@;<|uDSV*rYU%2|KHwmQI1)>qp?Iz7E10q-7+M?a;YLC_U7xNA{4PtnzB*vLZrwLP{D-vl zKet8yJr#bGS&b`VMZ7}eXfF63nf0F(291s^s84iDP{dA`=rS5s^RpMg>E_g%*MEAf zPE^DF;qxxA6A^V(S{~>TwMNYw2%K<-nRRWJQu|S&l>>X6xJF%0ah9BVAk_e=p)N>e zKpHVRuxGdDDO@X+F|Q^#okx=-1kfC1keY+k0s&H9kTS~wG=1CA_JPo-*Pv6JEq6uw z+OS3`7|c~;`&?OYW=z9Nc+_;z-QZuL9q!}&Gr}5P#G|HvMMcGcnK+{x*_nH_)XFUN zUlZC$b>86c_@($!hv_7)7v8`%x_ory!n;-8Lw+ECjauw5Yykxb=-Nr+(z^ zbiEoNxiwdK$-^9Dm#$+ffRBJq}CvHwE?LbNQbOI$`4Y0OOR556lo4n zjYGa#wUm`68~r{3N-qsOhEa!}vIMKB)ik|3zYa3~!<)F~n?meIn&G=n;#HIGDsTVG z0-j8YSSye(&OEB5wk~ydd1-m@Jt*m}+#WysDSz0L0>-emW=sA`>H!|yohX2^mss{z zq(zocr}5b4a)-@Dr58oXJqOjCpy!7#}yS3?>@KavM95G_fcv1~>^hNzi)wOGLK6u*; zr%M%Q-)qi@7wf&Ij|bPk&M!Q<{K;JF zMnBhpcrVw0*^U@$?d_bO(U*J;7iVWtk3x+w|I@u`83{7f+1_6FcNy*L<=G$=o3wwiIwqDhz z_B&_mw#SE+sNKWUy{#2LRDXwzaI5c`k!r)PXH)6i)~CI%lr7H*s|HMnb<`$HUf5VymgYPqM zy{1bmCD@KqQNCNA$1m7#9}l`*_A3^2x>#vuwz#wAHd7^cz2`?!Z{AIGQSW^1-xu|! zLl^b_#^L>=sCVAp+-z&(Y|G1gjZ-g=D$Wk$9v#o@&ChSoY>?KO=YJpje!R9mf8@C@ zn;9x9)u=UhnmTsjLTkk3OV+pjU%r3@ez)&CHTO5Z^VT?<)TUx83aCe{F>}At(edk7 z?(tJT4PQBYns+a$>+VHQU8qYySNrB$qxfz$KBA=Pn9Mt6m|@5^h=i@Rj@LIa_O{QR zRkc(?w?5^wo0}b9%lNeSq+;5C@TK_AXCFOuP_oA&am128ol|x;*{3X@_HT5?^>edm zeNi+WsCWDBw7&T%sxU@NJ)%&tlm@|xrwEMcVJOxy$?sJ#jjFlIm2vnBsC*%T)cvLG@5-QduLegsZfLZ)KisX z)Y9JC_iw->9h`eLA0KY$Y@D?6hwrskd|rIrJ91{m3e@gqbX2n6CVg~WZ*7YuOaIic z-07HvapTEzprY&K2TA%fdp~tyo~#(?8Rt}`qQXeH1;M>~7=mmx{^sIk{o zRA!oVN4KxL9A8tfJmm=00e`{b#iw3nGzAP3w`|7zR`n?9B)YnB3q4VmlgT3*nP!0x zr>^QxUsL5%pWeK#8oxK!G56&8w7=8%cB#kt;JUO4+*LCaOYD??jXlcfS1I?@5%BjU zob!~p$~{jgbuhv7-eg&RsQP5NEuW*&lLi9c=X_1MiBHO|rr6FrRIdgqgwG1c1U~xa z{|@)iQ}iC$i^|jbnyDQWg;L7*xrg~~pZs6iCWXyaaf!H1HAXs{is0(CZwhtwO5bHU z))qQh(&-;7u#c-K`YdSBKeG2Ao(m|GK6g{9Wwzpx`Q<3w`%`lnp^W|~8(YDFzTf;; z-Av1d1xcRQ{^O5gO-z=zLN zrk=x}FPLKH^2oU>MpO_DQ^Fc`tPP?%#oY^fD<~$DVNIa8supj`Rl{?x%56Fs>wwdg zYbY7(w?Jrh;!#jkK@sYb&!dQDW68OVI=Ti?>uSTMT#Y=6UIlhO)j!#C1;tFVWuuOU zLDY(xfGJlYPuIu&U(CH=qJm;N*|JH;$slS?4TzzVx%-n<@N{_;yr`fUPhM!$F)@hx zrsh_}XUbKKP^51V* z+g?()fZ`QS;FxG~B_-}K~~(G*HO-)#*gE2w7WAE39aG&K;??MD~-VU1%bViPW6^aY*yV$qj)I+2(Ul^>rGvc=f@M?D z#h$}lJi|lk| z0Pc~#24Q(jou1iR_=yB6u?2nSkadl!n&3<2m}GbwpNxO`l33^Jo)OQpg~4aubiDDR z@6)1$6H>fm4HbCbzfz+@I&*bROZ7{pj5}R4lLbm6>W4n8i|t8GaC({bebTZegZ$+o z_v(Yp@70!%Q^{{$EwS2~XUy5ve$DTw%E;T*?a#PTS^3=H8~6Z-8^+6g>{l>%D^dGB z{sn<#DEoLJ=M&Ki8kTYl!Ox#>uuDFvXFKCmI)7{QZEnol8+7VTn`{LN z$1Zp8*3LdDM?EpDd|%1P_?{Rg>C=0#K_*Mzg%Z*{b}5ic0t)X%)_s48I?O%&Q0dt; zIP<+UylL%QxIsR7IlGB;v!k)8neT(G!TLEr9wxzV~6a(yQK!bfQplo5N0M!=Lz4rBxl&sM2rt_N-a!#d|$a zQEG!c#@UJuhr9Bc-$rZtMr+#aC*Rpm?oK!@JHd}0BNJao9xAm`4u~GrnZL~0_zXO( z$K#at+K}0R*wrq>!kmo+8`>k7QDFw7h>WQW2C0x}RYW{($K4p8?)a$@=X~Dx8|J5Z zCaCZItv`=(OOgEOzl0O z5@{YTN0LWvgMX8hz%mD>si__*jd5q@J%>w0Z3O#!duB3h-*`9H z-@z-Md&On(%rMv`}y ze4)KU?u3|wh)YZ1nBDV64ZSDi6PIsvf4C-6eRb!Zt!v?4#W;%kh&{L^>GN2cr_Njb z0jpZl0ezm$wbT1Wo1=q+ZwDnl+%^+cP~&)pdkTYTKfT6d$!WXL5KjBhdtJc0KgqSm zPq{x+_X+Tn-SsaUz^W?rHRDyd%4Nk`&0cfu@J;QIL-UZmuz2024H43fi=uOLPLY*+ z3@6YzuA9BZ>B=o6GP^70pYh?b+7A)0#xN(ZQj;z=DCzGY@7iTe+j0~iuXW6Wn1YB-eLhecgDcH|s+ul|1jC_ylk4Z#w&W z`5&XkP6P>Ed8h67#gjifU%WU+T6v4F$+`LFa}H~oc&n;QqBWm*LX57y=#QXhLG;M9 z6-_nIow%Q#t(b5@Km77s#`2t>giZ} z)YBdIIGZiWgWl4+BBwIuPE2P^>g~-L&P%GIt?+G3t8|W;dv-TE`z7~G74MQ(9FQ8g zboai;FJksG=&)#_o9c4ej%nzkrk8Z~+&GZkZID{Yx+UH`{t9*d(jv-UPWAHH7qwEo zEtCn2Zyj}bsxV4Jv+>2~OCrZoF!@d~`Of17UE^DH-1kw>1>%Dlm?gH_&f*>)nAi7Q zJxStNzmvtE*K;TK>zL&mPvX}Ehn~~*MnCU9{>pLTt|g|%b){R3 z9;I82&8I8t&*H9skyv^B{_twGwC2l1-T9|YgX<1J0pv4Q9`__)gyOZPVXeFSo;wH5 z5Bx_AB&-=Ug~Q7}B@^wut|U9oe}Y6MHIAoSESv(bYp0gmyO-HHd<4=Cy37|YNW3BC zSdlWk7%rDk_N$$%^%NWyL6{%h8+9US4Pi%fM^V1Sh+})$wZGByt`mcKF zo2GlnTeT8*p4E>nW@hVp?8S>YZ(Q@~o<1gTU0gej6!(7Mzo1?&_$E8BEGcoD^fAr4 zqG2*xnA&@K_;p4} z{dXbqS4B|au&ZyykMuuHuX)wWWbI}=@*%(Jns|y6)9BcOb#fg0)$TxXC~5uxezoPW z=uxJQ)Ca2<=`pmoL}THxf;W_Du5Gtue_#<@^^@Mh{1*LG>f`zsH=3z?!Q>yiYU9@X zZh57jbvUtYot7w5cwo5)eR%p3Nib)5GRXdFeX5J|+j%lV6}|MNlPog#YHsB7GOnY3!YhjBGqAcp zdwbuP`K&H7$>F1t;ydCMo$5Yv%je3S1L@B^7r)4SW|1aZzcnj!a(cFvf5|F!C!cg~ zNwQ#i;ry$@gLXRKH6lq9mdG&f&gmv9)deNz8mYV_-e&r;=YOlP?~^vSosCT_4-&F46P-F0LCJNe=@@BE%` zkJgNFF8buDQ?u}L%sFp9v=6b^UJJ9_+1guo`Np`Ysv_}PqA~Dpl2p&3r{<#A4%@mdr9))?ASAAgcoHc+*?&9$rZcS zH-~E^U4W5hh?!;_m^kMH3BGrqpbfq?+P`X9r8yvN<$Lwbl{%8*&DtFb1*r8czEy0g zZ7yzsOOk7np>K71n?2|r9Ug_hva)@qY>|(Iis41EFGR=s%zATubvSjMaejG4ovZfU z3I$Pgl&!e>nP2qw(&)OkgA;w$gZ5)_VHotS zR0~$H+0l=CtZODiDh=13h;(5oIf%U#rZP8OkKd;)HA=f}JvPzZ_jEwL%k$<<%|6M4 zKw!gOWQfjInQeQ`*eBr)6Hf5jMBHPoOh=Ai?UfLcHsf{iP@7F$-`^j+{_y3fZu*@k zg>hE#jQu_tB5|L$4VOlD?=4;m9v*mVFJvpNSs<`uFCLSQ$Mb!3fAqqgE%|ODv3Fw1 zZ|}v@k%%qQ)nD30Zctnf@m+Fbw+7zwd~-LY_uZ$JQ&0Bynb(I0wv9{4cUo_YXP_vb zl;hZ0$TSTx=ED8%&+oK;!hIK4CA4ty@DP=ZRAYSUf4Z+01lL+ZrK(g@V9v9 zT`>pF=Q6i5v^Q^>DmT1wt&$igZER^M5*#Crc{(0fzdwBZOh*Ay*Rry{^-a+?HOJ-C z&fC`GqsqB*)SBqAgRslq{KG7#*7CUp#m7KHW-?OF{)wq)ryHoD?y5ue>wTA}qLYr` zBHAQ#>(0R0#i#9-8R?3Z^T4y-T2G(8^*?PkTq8~OIlH#H3}3k@b<$c>Hz>2&TD;a; zS9svhm!P!ej}T+8I(4&`uCH}G)p)}#Lhf=*`pA66s+yI9@%$l?j}Ko*y$Nqug^2@3 z>e0c5*(=GLJx5diSg|KE9()`HCdqqqFnfx#o!0_^gkI3jchZ!JU(JX9T0 zf+;7+k!?$-vv$Ip69(dQ)2zcr?mP(NvO|5x>!$OKyLmR={>8sQa~{Pu@?y{IrdZ*# zbL4DzYwbxN0z@oJw(r(m5WLJW=BZ(=TF*jG6C`3Ie`)-shOO#z;Y6`0HgA!>FQ`=! zD93DG9Lm7>!O7+P|3}rEheP?k;ltUplq@BMu`iLmP#8jpA}T6bLRyrN>;_p9*|TJ8 z5lOZb$ujm`LiS}a)&^rRW6bj0@8k3R{oeO@|CwVB4&#~Ux$o;buk$>w>rRe@pWjC^ zvPg(81F0~yOnYgFoHYJ&j<3JN;X?SF!JK4u^atJIXkJ0x1opxVZRy*ZG^6|>q01f9 zAAKPYA8lypD~P0^o8c1iDh*R6#K)yy@DwAyU`V}eupxKv8;_3CDJKQhn&cZ2DIFRJ z!GSp8uBA(YsqO`LZ|Z(n*hc+4=08V5;lR~hM#KH@B@0=c>R_JW?%5gF6(6wJO8WA< zb)Ar&_-@Bivuok~XN%eQhEdhd&2qouyN;!W@W^fE3G<(JpVDdn0Zu~c?P&yH1b+zL zCZ0V?)cU6fkJkr!f!Hp;+W5;{fj7#>GVM%@le(~3332z^<6XQzaa}rVU7>)u`l<>f z7_j%vGQ&rR+ssyjHwrYg$q5XUAlvbG33w(Aj3D`rkCEMo*_G>fbg11Busw^}LZ-!? zr6sUXCg^r&C)bugL}oeIZtbBQ4`yGz@jd}G-AVvI)=|ho{Iv(@c;oy~pi%zJ1m~tJ>vLurNB_9M%epu_-$uUv-Gkb5_X`_SR&fH% z^W&xZqIZt(q?qYI$RyDdSiH&;x=joCR>i*mfSf@v2d>4aN-8(}e-<#DuL;kr+9z;s#x==I9Er=?aT>Tww){=;H zMJ~|wNgoUN4(TZYP84KgdwT*d^jEO}asNB&&lprBv4?mz6|yEv|EaR4FR|&|F=-rM zDqyzRl^?V+dmNR(*A6}@5D)^USTeF$$hs40 z|Fn_DUcvAkK55M^J1;~q_(Xtm*PiM2J^+%X3Y1btz zivOPONZ$^bwo}Cp_OY0u>hsBzyw0TpnDrk8)n{#iQvT`v2Y`k;tmBI~QJ;UVyvn1u zg_#ADe6qY>_Y;_wkL@Pfg(9)7Idl%~rFoehBPDumy)vaSKXg_TJl(`h-8 z(yzQnvv`8pDkc)2MEyWq{pLM5rc0(@}fCDVC>+qp}3t%EJQGBtHc zo9te@Mk!x6I4RFI9Qrr?WZ#N!T7FaBjlC=e6VhfO5E+BYc(9$dZ^8WHcZxr$;9u%jVpt5ZZppluVbXy-Yh3+L~9|u1=TwdSG}Y(TszPULBd9 zo|$pPTsb`hdC<@c-Kiv?`hXwEq&kN)Y?|$y)_YoyVP<}l-No7}qU*#QG?0IElOzwagUtxavdO2h>8NMGzY1~%v>=B7d7`K-qt0CPh{@IzD`QVe3iBz6Sm+GFo?)t(f zHSNFYDn{nPdzWrM8{ppoc~3?#7|fGWdk+zjEP^;;IAEUSfSpHvr=yB)xPlI&!JiN+ z&&ynp-6Tyojy+$BE$(nW!l~3Ve9i3A>*JDYpE?dF-weMRk?-sw-%wIBVX zxh}wHYd1vu^u;7h)=Z(ORM37B936T44fMcC6&R;FGhz|xOWxYGBmMur-c=*~1m)~Y zy2s4Ch!u(bzdJ@%gL>PnxOzXaK7I0)UfA%~V*_nkPqu+5;4RN90K^|WqbA6Aw6Y?O z9eXr4@X6`&xi7re6OdTdTRIad+moM$czWu@V*vJYbH7%-SLR3*C^#S4}OQ! zf}oZN1PZ-w zun@qbLQtUsW}Uc<-k)@oYJI}bXHIkr#XTBGSf{VIP?jhjC0#b_y!siMe1Cg@6_w`k zE)FQ}Bc&T~vJ3UAX*gtJLcASj_`~fR(`tHhIWvLF^!5>f-4cDfQ_kF0f>y`Gq#WFv zE3fDR9g%eU7ZPGyVAdSI^NMQ`?3{fcu|T z6Hu8|wcPLd?+$q1lmMQXKLAR}?RzMeZk0AV&eC?4?(^(H==SKX}15s8rd%?(EE9YI>lnwR8+O9v% ziglf6D#*IAZ92cFqigzb6UvR|Y9W!dJ5jtPv*}qu5K;l-U)LrY*3*u9LPzCWx`M)e zz|$|^!OWB>J63%_t|Ejl@ynk1S<&sgycb22Bp$HvZZ!y+ymVdDQ#I~BBB{o2w#j9^ z0{piPN9YO!)GxBf2QpAdxky1pb*psYw?F^VJh|(O#(^@h?ez#sY3OKiWj*%S&y0>T+d*u>^(ChNjB1!@XK8LjR#5ZosO?(!f_($)Pi{3|$4IoY z1R&-tW0LwksVyW3KBg=I7ybBkR`HV$-of;H!RTquLw<_WyLO&ot=@-<{ucG`Em0aa z8nVe51$Y%KV{k%d>5y_Oe4LrPM$^nf5@oW1(!ruRQ(ODSex}kwpNgmNP8JvArYDJg z@WElZ8zWl}w+=Ys$$7_SpakAaG`#Eo0p;rKFeoo+g+Y0g^FAn-+6U!zFevv97B|qN zve($XP|uVazj%2`RwArlnq?dO_<=?~ywNZtqQrPh6!NHrm{Jx0lRLJ7L8|jBgkBY` z5h1Fi)2Jj0G`;uOb{f8?=INbvo^TM&I=y+a^U^YnEkIS8elTF`=w!uxINMyUzjv?f z^-$cEKW3!>HxEj?DG9EFopgk1x5@*Fm$o%?@kg!}iQP6ZEY_J6;VDW_o>CA`6RUh= zXs$h_D=z(J+E_eStp2^BUBf1^@(KgMH27<~uj^PGoYjTgsHo@L;MXRczl4DC8)pu} zHX~E}t|IrlfU+oW&mi)woY!*lgB#0U3_069S|YnrDa;$3$Cec3D!}d9DwG^hV(1{` z{i0+30eC+oT^R$fF~z zi(BwZ+eO19S1oH?_0W$`LQIczOtSlQoTKhm+%s*W;Vpcl6CKpn8UMgYeum;va|#EJ zUBWJ*;N$Fo0Z*o$jB^j zHc~1NV5$Oq&>MB#vI}^1??30kqsR_$X>5LfiMIi!H_qW&E442?gaa0-uZ{2qeJgpooR(sxlR|pmiHW-r0t2j@;X?^cA(yihWLSc`F-kV#eY>-IzezS+pkdR~;+JEdO z!`bIQD8PuJN>OMccKGG(cgbo5-?cfup!Hbky>Wq^y)ks|9L7?;t@^#o+^9g7IFvWJ zj;yLIJ+OWoJpL*GuG@x&}GMoCg=Z=C(=pyx#!=rZR0nTegFXfXwQRA?Njd znuFin>JtKdi0?9x+v(wx)|F|dMuT6K4jrV8Um~(V?vc{#V4|Hj?g(^;gjV9ft9ezZ6G_URvB+a5cH`VI}gz5J^cXR z#@d0ZK?hIV5yq1=T7;gK0aQ6~!ydS{2_sAaAoo$odS0*X)uDNPqe#})$X6r&rDV$@ zf#g?wM^h|1VqclEd49Zmp@w|bbI(Pn=%=5c3dPV$9U2wOwWu6O$^Lg(Jp>YnQ(Q+I z4U)+Y^lp_i>b6UV2R=J#?S(P4d2yd{`&yBkp3Z5I)^$$(gl(eA2}M~6jKCj-tTQKZ z3R#u5S=?#i5R!Yl!T%wX$(7se@KR$#f$CpW2O50qtVyWNROg06qMv7Tg+)nsKURm1 z!FAOdgD357)MG|{t*8w~{VknIy~MH2V9~=`tWTIdJ1uP~f(2x<66@hyw>=L)QK|xN z=ONDh$eeuTI3T7vZyiv6BY)6npY=j;o| z5K*$IMu60D*FGhd*b&X@pYk`howi)hjK#?v2N>PD$*IRJ!4@(Z;WH#OVHcHH6$g}3 zE^dHCn|RYeeaTmccOy;M506WTUkBr)*v`6l3o|@1+L^(UyjsmVQoP~gbg5+j!NALs zm8rT-#<@X|$H((@KYID&=gm;lWLIzn9r;O?ke6->^9sd6d~gj_+D~II3YoCD7AXDO z0-ee{*P$jkuB*6V9c*{1WxI$=F4>|>APO}CBa>SOAgY$qSH<|%m!3Y73;Me!R6*_j zUNc?HXrw!3>f((hDOyOY;Y_kF#LH`k;E?gx)w3btcNw;oBXzrmCku~>=nDf zSrB0zL{EQvhH*y|@)$wI%!3-sb53*{b1iA5Ju7-~-CtAA+P~4adydw7TXMpC)mdb)fFkLFBDBWVrCYO3Sm3XFn!- z>#GOT`~~_AQTyf0HNCA!Nd^6Dx(_%%3^ts3$>F-De?ixkhj+eBI!GL*r_dlI&<8&3 zx3;0Co<2D=ee2HUvkV{G!6dn#_q*JFSva~6P*ShxeOP!WlbC$+kFd%0kWyX!rN#Vn ztT76!Gy38Ne~e5z7b)yr3SGZ1s2W!shBM4}L{LA4y9fRgr+%-+`!dFY|H+`tIg>W2g|l%CYfdp9^74x3mS_yB|O^#pFJvO z^mKAL4&Ay!bhx|lzn>kihs@&c04WZtE)Hf1bwBUMJ!%ZVqE2nnP&PNx$=tE|L-U}& znpHtoRgi#cy;WYG>)k;{luIilt|$DM}-5X{58M^l-yN69-Yg0}}*+PANe4d@PL1BiN zjk6JbyF=%Y7Y!oZy=LF}fk0tNQd=nm3_)*cf!FxC? z84VUu(Fpkt?OJrT)ICxNzBFNCufBMJMs2{uipl8ijc1*?_!7r^9lIn-Ieq5pv&JWQ z*_*bYO&SK8+S60Cx>B)<$%ax?j;q3F4Jc&?3z+l-0qi532MpaRO~)24?XF+AQH8na zf^d-z9N7KFTP2Zx`hl@mW>8jgO5a#|P*rM4*Yc_7%WHCT0As$1{rYD|%CR0oP9Q+K z=qu1KzRC|ks+YOLp(Cl;wkMBtAF`J_awBBMhiM=^#lh%^%j$M4x2j~&0#gk{WKB}g zDdrZ}fx{5bp8ahLg87UxPwDC@tVHGJ35qfENPFB01J4H?Fr{}TYr^F)~wEc*h$VORh-CI$-t9Iyazr*r92W)v&{2j)`12o%0A9eW?n; z#Ipv(2Xn1hUHh0OzXVipqh^9bS53RGICtKOQG($RRx8(0IUakb|22mBLzick-nVin z_Hq*igT^3#uOFD`0k^0GOaz>;Oiolo8lQx$(;!w?*5VL@7zWOemM_15jA&4Jt$Ko( z=5{zRdg``QCruVch!-PL%mO67wMo~#x80h4Bm#e!bk4j;eZ>ov8( zdYP}w*(!mo=eC%??cLYbO3%@Ir}X=`@^bde`%ZL$v>*sIwKh|-%t;?F5P=*7*Vlpk zKZi@S<^B2|c_W@Z8*>c19klC=toHTEi0OyR>Df(EKyvkDv93)u@*Y{RtWZ->NvXYs z0mAjyNBOhAE2F!mF49p>aS9Lu-ZW(RQ{ z!*J+3ddcqqOR{0;sBtTC{oAv8KPT3|FAb>=)6$(iy{~%NUOr#lBzAONG7y)^=RCZ| z96kK)br3Q?xx~>!Sk3C|tCA@;j~6mnl#jE%vhjgVPYCmY2uicU;OJpe*alw7cL+H( z)+MJ5GzQi1=r_^2`-M*`YGMEKvNNJRPzCNs2X*NAzxge%m~wS)_~^3RRuHv%k^ z`2kWqIDy*(?6NqJ4(AG_ZY#56{+~w9g?zb2+sDgWEP`j)k0)ITjM%YYx@hPXBIYd3 z_^~;%4><=fUF!P1>KWJtRL=Xz{}@P+hH7m~UIF+#SGekW(AXoDrL9Ti5N47`T>Tlzyk5=$MR`1G0Ne0#KhRWG~g zsMH=0{`u1%ra^w|)`1(W!}0h6+^B}cQH;0mCRNz(YBFLwB>F~HYxJo7p^~Z-*D!JX z#h*{mL)dIm7dq-N?}w!csvHCcD;Vw1u!v&YQMG%>>l5%d=34?wcMkBT3FgsH)-X$2 z4c}abSQQ!BBLnY2UOjpjVrKl&PMrF8zacJj|H$Xg3@4(pqF+dVe<3Iei@G@)^D(4% z2|%ArD~mY&-x|e%s8b!OEMfJ#2X7oCEgvs;DF*{fw+u8_qzaB5CEdGHrn=U5LgD;c zU()SpO;K+zr}@yjHr>8tMu5j7mSBw(4O#dT(2;Ufq(Ul8>>*$VrOfxvrPoyX94Fh2q8Dk!lRc&m|49D?HBZNZ%|8)QZ+#9prUH+X;Ii!X zO*kS50DMWn48^c&T?bj+N)73s&DSe~3p+ zy!q27B731Ibo*n)<=a$h9~S3Jt@*e}0uSjZeZAFv+OZ9Vg9BF5tm-E=l{v3(Odnbn zMO;cXJ$01ikE$r9oEAQ>m?HO5JUyweEv4t4*~PS;rBi8(YjU=rVz21-U(?Fc*bbE1 zV;buA5^Raaj-Vt{ox8WV(VdP&V^L*Cg0W?P%}7YAz23vsXr(Osy*V>hPGnHn)UO1~ zvo&iH{HNn+&B^`1X0o=!7io`lKbg0LSZEq|4;O~s?i=A$>D<3q^^{25}68Zxj_pL#wbX^QddDIZVDMK z7N`q(+>nB{RUbqVQ4ba~g8Lj6ZnTW-wW;R*s!}4=A)e_t%(EoiY7vlgUiejS!rn6S z&qy`iK2PAZ^VHkA`#efBx@|P%y%}gRpN@zH9jXt$gO1*Tyl`>nTrp<}WF#?rz|H0U z-$K2!xXa0}$@~X*$eCPmBz~S?LJ)b-%7SL&Q$;S@2Qk+3XiB$=1I^})GE{s{2P&pC z5*&uf9(>VlUO6U3gdxZ>0it}_P}7;RL(goapS8d-!kL1?&OiD?FKLRkkdyv!eLjDBJ+p)OT3yxuEd z^!_nTaI(5gqcG;CUnV%t#mH#JXy>yK$}*V|EW zI9~!miy5leY9;v$?0GvuVOB#R=oP%~m=6b`8(U)YIixN|z$M>0?0+E^HckCAMj5pOm4YJ|gy3u#t#J zPc`rvg_P(pDC{I#z~lx0uAIV|xLy7vLEdFKtGV&{^KA|nRZFd9b7PkIIifwYFJ!vY z3Nrl8J+U_bRlXyW9!|Ef(D42h@}tO?jv!-^^2MD03mI?{P2IGxkI~h zS9E@V^0gHi3w7bWE&@hZ%q2rF8*rpSWqWmGskDLL##+JA$ye^-OERUwV_NN7i>p%@*&wfv898nyZ z4hb%cq~d8q99MNec1%O4{p)W*MTB_je@ zmGsJ5bNvtjLD}5&u=&81IZvIPKYk*=Wa6VmM9>D@@ssa4jT>gw%jt-SD?&qjD_GPa z2u4arL7wZN(E5JNLdzu9VHSdc-t=8{0-eG7dlq&XESFxMp@JOOck zIJY+%T@BQdvM+SG^*TshHq!+f(jJF!zU9!>Qh-TEK|?kOUC0=_y-GkU5|C-861%7? zvK{&Bd34p5={%+wu`DFp^_<7}Fkp+QzF0nh{P5j>%30Z-+jZh zs=genXYB#uBHa=^d(YW^4h6sW~nyiRAP zF+T36JJ7RYJ{w7KHo;SY4RohKu(Wj6E|RxcYsW@Qs`=j~jnl^ zw~?@$hJ^ei#CMBPKPe)$hzs3F^Aity`muS{>e^MxEJIHVq20N#{@75>p;p_KUp1!mk>hI=m2tt`FT zo$dsNZ6$d@8ReYQd`ikWa&ad!()!xDdtJ>sczPF4RbkiU(2$3SM(7LyT%QHS*KUGp zleguN$Hs8*-jBeKHcP=7KE5w(h7Z}Pr91Fw_|;GT8Fr8KHoFU#w;C228c2;V-zFPE zw0{aS$09mxqjVvgW(QDMqJTrf9av;kKFlt08Q>QtY ze|+LrsK<|}rJk|MIiq}q=}q2Oho<1XhpVPs+a253@R&T9nB$>G#S%BxDG4CBThG#i zhFqrt+1vAI#gvN#aD3lJ^x_Ft4Hnhu-;ryAZu7JzC3SuBi!GV)5X&cd|JYo+H))MD zxnx8}y)vzlKMnx;gIanAseBvr{XIBK{0+j1%P{vKuy2z%3Ho7d`5yQ+HGM{1aBmDn zeOU5-O)PXUf0%*HmC$KMq;s?GV(->?(yzrYB!-+an45CTfCKTa2gW0$SxLUX3K$GY z$U%EdSPXVYyyWipN&umQsEsvIFo!ppi>NuO17UcR`s_ zIuRY4Z%qRqyH%z!c>-CR_}vIr>UN+{?kEZwB<7C_R>Uml^?%}n$abGGU?v?;sNQWu z-qa0xAa?>(QU}X0k00Xv)E*Wu-~T5J9QGg}dhNd*od7bg#~1PFVIz&5L!}MbE;3x! zZp-%68&WzYz^Gg^1w{0) zJ=Niabvx>qsmu?M(yd}3BdTzInD3>@VfU8CSD;efX>(F3uLyK z$jTgTxbyuvJ>g>;8*CaF%~C-$VTLs6>i+s9^{#-+A-_=Wc#l!v>d{&ig!!T1);dSL zgvcr1dGEoIfqj#~a&hVTs)mrt2Fym}MeS-4%9MXot2cMi+TUH+7Y)sM{(Gcs;9y{1 zjz5!+I~Sky#KWU1Yx?DKr9qyOQs>_1;x;w+Im-X-slKexr}(R73x}GZBNaXmLbt`? zHe#t%=L>v)lm^QchK<-x0|A zMsP@51!Z4}yP%W`_6wiF4pgSrn%UWjo-t7+gk)9EpG3tL@fsJVpIomeRh8|`qUM5W zn5l;P(%M|yBsH@Uj2{3F@TU4#{G}DxOTKPKy%XYVM9KCTLj!L@#t_#ZNcV0>n;(Eg zQw^#5k}7|Fk90QDZDk{g&Y1*>AY-EWVZ79rf8673@>wa}3IB3V3Mq-QKpK->NK(~*{bM@Sj!`FD1dTm~A82pH#@9WuV+ z8vKVrDjHDFVlozO=e|kC(_1ij#ArCpuOHSAZ|+N(qX2d|r`n@z@BbN1c*(UgXAI{J zv@RrWRljF>ZaSLV^+j%=Y%;E_BR{2nC*$b@zK~|Q6`=8`dU=8@Y>g+EZSA5M%KvjT zmI5Ta^to7FdblsLZVfU;v2P`)jb`ZM&CCvox~LTx^L-l+e;@hv*^}(|GR|Iq-pe>t zoQ*A*A?K;BF-+A$0N^$<`EyCAt;HQysLKt(w_8}2-{gd%a8M-Ia{56QVkXxgl%0iMzzwUg zruILt<}B>hWZm~_KHK+dYQkR4WTH`u!asPuufkqUN7$?R%W^Bl@mum0xC+JxZ^E3? z>)HbH!#P6U2)O;~8|0P=f7CY2opVWYkV^C~eZ|P;Kh#Zm+*y|q5o5Aoa_dz@orf~- zUBTL<-Xv8-7gyV>|6_2z(T_fZIoj|g@LZ)P1E~u9K!aM_|4WC!z~>!QZk|V!;H5_( zivF3~AyDk>Z@D8=xDc->3nG_X{B)$VKkQ_LU76X&X%e^4o4epk ziQc}P{CXS2pX%V}eroSH%d~MvWR_)P_^BdtbF;}hch*jIo%8o1qsP+_t!3^FGijaX z;C*2_lHgFq&AshMM_A!02=)+~081Nyi33F_7B)XOqb;xM@1)Zs8JZkO#7uco4}q!R z0y@$B+y@R*W~XZ_7ZhiE^rZ#yi=wKTEx|N=l}8bu@=4CPN8raciUn|fx=Mz9kj%$6 zId*Cq(Np~gLk_vkM|QrD8EZ;0_ll5NR*Ur2Y7q!?d3A=dX%Lc!GYFsC%XLF}*d8MT zb4JanUr+|QKMRTWnnej~(nR|uSfyfUc^(QTYktL{^lyeCc| zcSMo0@hkduWAPS}kvP|x-~0#itLf;$5Sq+Q66(&r-I4EVylGEJd=IA$wY=Ei z*k1X+zN)GY)XG9~u5(lGrCjHVrwsjs;k94FiW&B$EsGr9>CaLbp?xFC8MUVGhQ6z0 zc$z$>`|W?678XQ!nX52_B!wM4$jo{?{9Ug1c=#BnNhXuS*~dIVU5&D2My=W--O`s0 zWN&?K;WX1BpxfYa@0Cvmu4;(3D^RpA1=F}M`O(V0%Hfdvyb@{|c6}(%4%F@#G9@ev z3iE42p$O@-ep{}GIeA3-bk(i=)V(blH}B`pdA9J4vqf=REGUjVdIpzqNCTO7zBJ$m z@XzUJyG(f9CaFKB1M?i%L|I?F7yX=#cNnsKv`sz7O5kd-nJ}(B=vkzKTM9hKDYw^L z(|-;&WiG8QEiEtc;j7AU4sJ2bx)14Y9l$D;i2Sb}fg2)NMX#27Fj(ECON(CL&UDvX zYLkr96E_~@kX#5>OFYsu7UmM4DrRy|Hf>fA_Otg44^!A~qB^|yP#Rcdb2lupn8UEQ z%{m@FxAF65(fJ;zIn+{#O6fg{T_iJDH{CIj79Nh241cQlOiZ9-5grdY>vbe^8e~za zwBYT(<|ku^!wC+qFna_KurPT$3zp?O+(kmE;oG{QY!wyuq0w(qN054?yR80<8{S+y zEK@zp^$fdrU(gv6ytuXxYIj>sp!QAPz%OvOy8UOTpznbb%V2s3M2@)9eD{OKK66Hg znX@UFIdg;dnKL@foRx>b%-J5yoDpE=tN~`u#&pH(e?okw?}6{hY5LqMJtTUm>->Hz z0RsxKm2^W1Fz6$pOip#|2#Yp<;0x3+B)Yiw0`41Pe>|sq!mtUJVPV-vPBe);ywA2gT%zLPP75xY4XE;(GA*z%o0- zP%X5q2|cO?m5Hv9$NrYLzNA&a9L~|rC4M22m+68*1$LlHFd+TL8wiN6=0hket$WAW4)$Dm&1NVW zoxr&da0y{#vwwmz2i1u20ueW@u&nPFLJ2Qy3mQBlY;y-+(PXWa!DxWbZ`dub1x=E` zCpicMU)nZG4rUzp1X2ZiY4D9Jpf`W{eP&r8n7xP{t*5CoDe#ijIylVmNv@K3cJ@MS z=dlL3*=QC_!oXGo+4qnkU=qmBwV{p{z>en>7=qr?DzFVTDGEFEvWu&nv4W%Dm5o

    |Baj(jFQidutO19|}==gXp@4-UjjzrYFDR zzY9Cda6I)yCIgo%>t%+hqq!c3l_{!&DWPG0PmU=ci8p6rRo(M%eCc;jTv_}M8KjuC z%p+q=>7lw%`rtAu5hg>GlJC6)lK1|xEnGqzW52|;F5rS#c+nog8%&R|t4bPpZ}=UP zm?A7Jb7^NsVyotrj#l8-9#~%@Op_XJZg4vEU4Zg(zC)E84E+0*^M3+>9t1=7B^a^~ zh3rH25E!x_M8c4r1Vi>=85pwX!H|8=Jgs>X;m*N6KhSF=~d zx^V%AoFMmGy`cjbHZJc+rD0CdThIRrh$?FRkr0IrdQ(^=9jtzz7kw*-mpIbLOXPof ziL}Qlsp?wK%Ip8ibx+oM7Tm68`G^12@GE5dULbn9{)z7$ZG|N*SG#?|41ot?524Wu z0-(%jP!nKyVCRgOU*}^+?m7KT)}LJ50qr$GFQXg&q*@aaX%CdJVl_)yx=SM7$KM2l1)hu>Y># zru~Hw?`Vm+hC9OXhJ2G+LZ&--Y-2>=*{6=*KCuzS2g`}bp@PtWQE1P)aU;909)Ts0XoN6uv4VQU$)Mo#;1jZHXA`*eAOC2ku$9tHBZ$uK(n4bS({Dj);tp2IZMP%;zzlAxRpJV;WXXsDFTK?vQY z5C%M~RF^?kSLzPvTadE@S7B!g{%vH7n#X~^E#T4#4CuLUJ@Q|V2dIECFs15v-f%5~ z%3fF8Vs2NDddGaadNhS=%AVL&MOSGl_R}$X@2AzCR&z@-D89Md@Xn9$bBp;IyT3)j z2keQ`#o2R|hATz4q!+}Ac0DR*4y8F2JmO$_bRs==n7{q+wL@h^Cv1m)+I=l}Xn7em zWseTt{WruxzDBM&`bre_9MNxt z`!M^G?{1^jxWVowI`M)niiR-134WNI&;jTB%{|<&_|C4aXK#PkQTw)6Gg>vR&>Ql_ zI%BN$Q9u8q5Y^$24V&>gb|NJ*%rE@)b?8kR=mG+6{MQA{XS)xjUGHw_=X^|){Oo`6 zOCi*-C`cA6If^UO+4*}uK<7G^lq-P~MD!e%{}OQOM1Ag;m3K+22A30rI9cC4E(!c{ zqF&_2n`z?rU-2XJ_RRMd19F|dORGi4LvG4CJ_+{yBmNO*y_Zu4@b%o$H*0d;Gzsb^^XM%czK5iH{3e3~tSM(}Qfv z@cn(KElN3H*qX57bHfSbVsTo`v3G~92Y%`m6tw&~u8Dbl*uk3jc_x3YNo2$Se9 z%&~JLZr>}t_GFH~s5V%7ABs~G^9(b5%_gyt=&HOxIe1zz@$-s_eDGA4Y1HVezcm)} z^Nf2}ZX9{=$nK=G;W~A~$TKYWUYqt01oxkY=&tTVK?NxaSTcTa)bADNLYzg+3~9>R zfrg;{*2tn(-`?p97(4H#!<8ksk_Cq4VHc2dR^ky?8Y)a(4(b@nzgobIU)Iy*F@JouDTgc@hIO5nbc( zftlYVDC2yJ-v2+a<+yR8bC(5j#C`Q*T5gzH8Om$Pe8P# zR)IUC=e1kqe655Z43qjoHtwZ5Vs+7%A9Hz1rp1?|B>4?Tk-yR=MZ;sHd}AD~nEZ6A zAmbor9O@kzz%BP>AV#jEm5BV{K~K3oaj0;C#G62ZwG8mUBjH+K5|MYw6vWjL zP`3j6e1iK}*3%y5+~{nhg2%LuH%YGaW$_175CWDtzfq%Y?w0RH!^W`R2v{_o*p=AY zEV_f#9@xo6&E{^qFvPb>{R}YipGLL;2`_@e|6&l zxNc;DBSJ>#e%%-f2LwR|xNc;D>qaiPZafdyjfeDQZz(i!!kVDNW4s9FKaYHYAu7?v zdxt3i3m&2%i{>wD;QK!1ud~lk{jVTQuh^wz6utQg1q1=Z&g zIa02=gIceS`k#e~Z|C_D)MNNh*I`urh5%d#B|*Yo6=e2iYMER0ZVTFF5t)eU41Ek) z-DIW4N=}dSc}dWWDb+h>%d#}!4%;_UzTab>?YP{TO}4l$u1(Ik>wb!$v9-r!kNsln5ERZc*XW@v zp0bUIdnH3Z_;Il9T#y&+DeobcI)%fN1>td>9=_gR8UHP!R?9$YFNdsbG$FYZS@!;790dB*d>swU)uxWTu|#6+dDb2kec0f5%2 zeHOTB(t5!5&>5B>+aO@`ao-dPJMs5>Qg0qiR5)+Tlb9w{t}aSH=v~6o*^vMDZkS8K z&X+K_aePnDO0_y(G)1oFooGtoSWJTIel3a~*O{80&xf=*83Oh&LR8eNDDdO-e!n6- zhX6Ly@8l^=xPkAE$h|@wl5f||!7`kZ;-B1`*E`3DvQloLW@^h#^t;G?1xbbP=%8hi zIK93ef*#WfKai~o{-^r?*l%6_G>#V#{7!=5>1!Ii>VB*{D~S&0@4611_IFUd-$-Sw zc^)X67A+l_L4gq?#wy5lF?xfuSf^`#(1}R@pU&sV^_T8jEz04g^WqNs3dJnx9 zKl)U9CPCXToT$?!xg6-Fiw#Uy3qqI_jOiXxuye5b32<0CahL=TV)3R2#L_j-3~=nk zgM*ti5IFz`CV0FY7>)BXA7?vz!@V5ge(io^L;1kD{vSy9f06m5nAcr}zIR`nvYoGJ zkMN>)v@fPmJKNi0*U5%fjbJis9oIqR{sz@?wxLqHnc?2i3;*>rV*wmK{yxS9LH6+&byo57WuB|I z!{AoEtaVCsU!V1CSZWX>Dz^Kt+QQ)Qy{ zD>#Fooj^sIZt2D|+)g=AsEFsrm%$<|-0S3f7NFGg7rkj!e-Cx^ac$ z-h@P^cG0m*(q@7(P^a~Aut+Qzr;%XuFZprp!lNw(M+E43L!<${RB}R3eiFfflImo+$_vacKJpJ-hc z>J;{IvhNZ;ioEBTS~%q|m1DKhEtR7WO^;|^%?4QP*vF0R$B&T#)y?y})}CiAWeOZ; zzs`Mu8C7!_zUpfPc;9)$UXg!S1PX@~PCuA;g687aAkQc# zL@4J7T%Isu`TTaMoim3$jXzm4Q8*VO)7*4po%}^|gtQcTXeGYP9)A`_fN2BeiKN;s zQv=uA@G~^LmF>K(o(H0k%h(T4_rBT1v)rYdITI~Ad50xv+T8V+MDS~b4k1*P8kUXt z$7|<}gf)C?5wXE`c$d7<1BCN?g3OzEFj5XIGf>d^VOfR^+`+4*Z1@|sQNzmCwh!_! zt}4mAj6+w|cEei=&<4%vmzb`*6V45P-&2*wS3|0?0hI?f@6#6uaMU=)`@gvwJR$_D zlm|yY%WS*W+A@eOmHEEhp1d>!Ik9pT_o7A{7520ucHW(ud1K}Hs4vTUsibmNDl8;l zx8hI!l#3+dw{Cp*`%?yqwlpo3@jpEJ&GQpR~JC=es$XURr%5V{TF59A$iD>+Zg4Lk!`>yd zRJzaIUw9<7dDBaLculs&sd|^xs^%D+uae;FcYewtIT|{}jtj+pcuMpZAgY1VUo@fG z3QgZeT<>Pvn~rMoLcZyS_357!o6AZQx=od0^{38m8;h+b( zmLLJ~{h1i8J~~rW#|H}Wg#@#puipcEvQjNF*$0xX!`WFG*rv5UcyDbobEfL8Gaq&s zDs5e?0eo}id%K;$t0bvSnkL(Dly8Fko13z~J^NTQVYK zci>W9>h8d}tdVj-*!DkE7_Q@o6U5^+2HxmylF_!1O17?*^G`kL#dB92MXoZX*|j z+k<4slo!zaHl*n!0baJeSisA~!vhpeK)^D*%f?uR;Fnz@0lf$@0mX~k(EsThV*!ep zA0E9*0`VXRKjL{2Wy5vdfMTK|C@F54Zh(?ZD>TCWADS7vTtdG4+$YTjwM< ztY4_#k@QgFsvX&APO2$?UWY8HYtL)mZl}AP5mx#3)GECfc|lB(w7?d2XJEC2>#N>( zkc*ZuNk>EU;KA~FxQ@a>s;GM(SXZCdNpGhk{Nq@S$H7Pe7B0`oA z50s|fqx~~mF?<1c-vGWq?+aMq2Mw!Oo;UD@T!10nA$kO8J}{zzhNz znST$w^4iM=YPY4A;K>{}=iuTqvVY;?mf2V>$T0>su;`y0ES=|U%C(*EfViL^`aH-6 z%WyC8>K0Me3gs3tq(x4U|KoG{iXv{7xc=RcKUcEZ8X8uAB%7QuSkAZJlWnCvoL2EC z?|gm@{F$3qDf9daekgk|iq{cJXMw?2Qzsn67RG~Wh|xb?Ucm$-k~i1Rpd^W<(_=l| z{Xiv+)S=}im87l@Qnsr^jFoV8MXw^T|<#s{~Igd1oq$ZPs`3 zwA%@E3@qD|!K>N7QrO2>RR8pia5GJ&8IgCP+9}#O>AjE1dFiG8%SVtSz??qH=AXAU z#Ve5f(GBGqKY%WQu>9nAaUMc0|np9LE53If~G|N2hNi?kaM#(|eyM80moU8%WxUG$|M zsN@=12W-$*pq%vYS=;;^jkmh}VF~8pYgSDjwv7TeLjh8T8Y=4Nf2PAHo?B#LO4n|) z5+6Qt^rz}x{yWG+IvT{Jtj3>I98{!wspn&>>ZLf^Skj(iEQy-bVrw_Tiv|ZH5Nc)# z4vl<&%|r1do$N(qgu$0=7yTwU2p=}$j7(s8B^ zR!U8k2Z{P0i#@ePj4B2BobL7m>At#)@~bt-$9r>M0eW84JYo)QLm(hbHl#qUz5*m$ zK+80Owtw!jPRxcU~gLQ|UgB0ZQe zPk2$AKTZ5?5)APHCW^bbbFe^wK_NP2^XW@IAy`XTTPm9ichz7Qn7!n@5YWj=KIHn!;GTbYV_l)9u0l8UPuOU_d*h!Bx$+s{=e)hQ&djCFR@Kpb}g;&=cV)Nlj_$WGTlu331zeuMGqR5-U<@g>K>W_~FN zv07eDLCa9>Sj~^?9~r5qeufC7PE}cI2>Y-mJU_X(0tP=0tx}Ax0lSP9oJ1Ma(K(I! z6I^;FGjsr?0I`}8auv|g z?KmJEgFJ>Jg!$h8>QmYh7+6CuW@X2tWLw9DKK<)|j;E0GLl-tn;!JjV2qW((Mi)sQr93`P)YS%%wf_MMN5K@n_F&ckcfvOg`%no$(lub$+s zRZ&AJ4^p&}XEiTlbty#zMbfKTo}U$0#wM^Q9K=B=JC`cK588pfS{&E)8;bwX##QJ; znH}fG`8x;jyYeDlaO2`3?0Z$%S`6x4++L9c|1VUyd+HjgE@GK9oEE9=cz*^$$xVb- z6GnhqJWwusY8d?PDtg|<0aD}FfY}5bDs>%jI*5KKo7AvVvzjc@Bek!)H177*zZ|D_ zztWt+WJ9iuGKYV4bJ66+&&|pQQkk2T!@>aR;d^@EJ3DZC14*cO26XS8z|BS8YylUj ztD#bDTCk<%7m%^N$bavszLR)VLPVOGaVX`|h1fpIBI4(jGybaAw7oU?Gv3$mBJ6t0 zz@zisKj1z-*yZ0DVUQ7_^AwETg&Jq!yaqy%jr^)PS8BbH^SI89$xq~$>$~2;%XNqJ zc5I?kuiAFgWayvtFJ~A=b80cVm=eG)y9)Ec|uTi$8^%GLcG@ zGD4^-O4ddbvZj9ZbFl`Wr_W$?=K?ciqFRC^;e{?SNsfbq3I>kN12OqY4x@jI_g^XI zKM8!$#X=kH)3jCp{GIA;`7jn8wy*F?$c@|xVWF{5EDihCK6QxzVg7yyk8p|Cji)MHFsw@YxS z;OP43jlr|!!>}Pb~Y#SCC)W|KF~CLTWq)sc&{XZFh(cBe`W?O z6feWteQ|d>;@uOZR=0A8zVG`rt;=N^B4(p^91qc%f+oM$hl!fDj?fY0Ruek&{cH9<~k!PB^>-T`(JbB5Bw3c*Aou$al1_K9_g$YAj7FQL<8Mhix-_AbY+405#*O6Ud2I;A3ot;2etgFT!mf|gR5MpogYU!-~ zi?R(azg;XCrWx&{w`^2%zJ z*xp_QwVXGYZP>|kAvWT z5OmLz>tyXrhrM=0vUV%A>ihD8ROU?nysNGkM$LntCTI5ld1du6U8{TLKTx6q{Bi@8 z=Ds#QQ*q{`;*bVoyI@!@>^}-|36y#3FA)6eRrW-Sq0|_tJfcgt9L zo`0rAmPi%XDPOkn$D5a2g3vv|vtb&Pzdnz}+Xul|d4V6*Y%ud$i zvk6JaXk;TYc`Fd%g>qsK>e>j(3s$cDbp(g(k5$TxH%1rC6P0EA~*-S--J1Q&y)QLT6V&OMnev8KDi{?nwaHN zhG{OjYgXc&qVHN!a5JNd+O3$9)9UMBX9eC!G6+x)@Bimp_zI5F9Y&X6*Tk&OYLQZo zQ}aLXR+pbChIappfHC)$od~6Pedc@I^YuuP>I3^3ucN*AtY!5dsrDzc6W%&I-X>ir z%f3Zv+CZJpT&)FM&c%=D?EL9oZM=OzH~znWj*I{Jb3P%%FkQMQn^Z9`Efj&8bzY}A zK0rRuUWT2!nI$2q{k%*8NU^hJIb6Z|7HT)l`yML|>YtovXu9jGIHcwrP6dDM)pOat zZ_}ARu-Ru8$er*}6API#60LA0NyKjs!R`rw_|FR;?*9eQtuV*Af5GsWmZb96Y-GXe z(Fv!$gvfmOzR&S@*^o2Mu`%|oJ(o!NKm}gA=+>E`o0mJm)&BYVPN3Fz9?^Wi6F?5; z5FDr*Zb!pdIF$%PZi;(oBoI7Y23?~e&`VX^0dPnm9KdhP-XaV|L5p_l)mTs!5M0}A z(^oM8)xYfb2VhDU3%hH%3Wq{lmXO664tM`$>3mk}&w|L8ClZkNiFNJrfs_f8wcygm zpBqez{9p7mMc9WfU(0{}k2&K;=E6Dq0^4zWhPS*WPPtQys5?d)6ybmY3$Q73{67)} zM90`db3X_yWU=i(x7aqFTWlvFi|sqeV*8r!t|MAS8?w%y&&MI3l2BIspIv2? zlm?|3e=7np{0U6w$}%{I$}m+ZlQP0;N`xegTV^<1zZv2+l7LvvHHbx15Fi+xNBj^U#=p@fJ&cN`AV1tv zdg@-#eW53s>hNEXAbG&(L6};;vFA*IstTXV6-EiR<@4cVPk6&LWY{yC+~dpq3m%&b z;Lr?g|A~M1aC}gh_XO-({~Bgl4^IoXD$Vhq4H_oP<ZPFLGF1~{x=rt+2Y3SA;v(SFjszgdKi{Y$xMl!}VBlOBwF6CcJrlDe4jfwri| z(Q8~cv?h}lPL$49boo_j(vpxHUu1#qge6_+b3^exT7_Ih{K3+6LIcdd$1T6$kyV@i_zL&G;7&TqCIQ;&41_~-Rv6etw$~WG$6PlT z;15dcJ27XkK1QEGEddkZmHy2-cwIX8BrNkT{RZEKH(a@KNkFhxyc*;Z{UB7o0Y6iX zp@2IDJ6sl&HT2v@{D#V$9(zpUk;$wx@4PY;hFEu_7xj8g8WkB)TWXi6c;>q9xJo>U#!e*4Ek4XXsXuwV^Gw5 zf*=#?!$3=KH#!O8xlM?=x9M7`k^>F|1o)4PZd=lJ&#`CGlH?5J#6d6Y(c%5^F3YMX z|MV#%wtF6}a*58%^#hic2P-VD?dxXXAxWC95*m&Cft!r{{qSRsP1eMux{$BppI z9=B`Lzp(K0St8TD5XGOzq3r79%d~N2MfOs_o7I@OP%W zyo|08iW!d@%!-p{aWW!)Ddg9$A30Mv`}RX-pst(5UdWOr`Q?XtGX^OZ!sA(}Xn_4J z7S}3$FN)+UHRliU1nP@4`i>-V{h}oaF~?$3_fB5w(cqIp^v~eNCfQ^q;CgZJCeb?%VrB`WjPMEf*@a6{GtldENfR$E2&Gjd+`kn#cE zoJd`m|p?&-sNip-k-C%dx^$c;iU(N5I^7$b-WMLY8|1(r){MDA~?zubtATo4HiSa zMb#oPJvS31VVtHE2WgARiy_|GQBM#&)@va_EW$1DJE8{u=_p)<3D8bC0_N{;pogp$ z*BOXrcTK~H3?#z^3O-Bbc7!>LYSNR;uA049t*s}&Is8mSn*R1c1yRJusl1o|m)mDR z|LRdIeA*Fb-UYw12ghCUJ+m>@*@L`IJRvtiKV8b<=EL~3JY-PTx$md+E{px~u5sCt zC2xF6eZzZ|tl6oXWvzW&(POShvU)XyA@;VN7Y@vLx&pxT9PnixAO(Ce^E9p-@Sk4C z#^6Cr%}EK&R16*%8T|sb)}aoECVDuei>GFC)c0=b3&#!r^Fs zdv5mK>`1Ox<->?mp=$GeYdJQ$xkny*FCYO~BhSOSyqs(u-Uw-Dco>a_A2h{+*#v4e zN~&3YV}D3Dos@Sag0b;62s5z`~Nw!ZDJ3 zbPQZuTz8;|47oL-+E0CxX36ZP6L)c%uoP449eX_i_f`&Cfs%d?9WLsDnBq6pf|R;7 z1REzUW3RwlxkGGIFPwxKH0y!OCX#<1GT|mW<0YUIbq%NWvd0ux$jSfV=lXN5(u0m1 zBD?1j@n$CD2}_QB0rzM}6|8A1>cZIDHfxT#g)D%P=Y(R7rv|xg%`qHG;d+7gG}X}q z36VuH1eb=h65!N2qP^#Xbvjl?TqsyOYm@mfAC)FJx-~;#A2R6Vax9mn_Yp}> z7a3^TvUiM15U~JhCw6oKZ)n8IYg1XDF(WgIdD8Zq7oMR%nPROw^Wr8z z!nO(>{V%Y0jAR`h1%iv~@6kCoT;B{yeR`;@XsDr(uSiU?GPqBJ;%n0_RgP9G?2vB~ zL=D)zY&?^__V`uRd=Z9$z9pCoJQu1Uiq*!4;Cc%zeL@arKCnE-0k15tcrts^D$WV4s= zGep=C{f@Hjbp4`2=1HRYQt*lO^A)1EX`k6uZ*^~Ke`{)0U=}YOLwUabcK~~7;5r|Q zGlCus)=eg$K^;?xMtrmhzzt>U$r4-?x7C*5-MYlkZh~}S2Qa!uB|^y}Nf+4WYFUE& z;s))To-m_9$x*)ks-t=eN8DDyf#Jz)u<3C0Rj{z)^UA1|BFe@j-Mm2Yc$ z_K(^$oHBpG7&~p*PqP=8Fu8$}RjVN&B=%gk2DX1(?Rv%wkgf1Up%`ErRdWm(`~#oB z8?-G>Zy`#hVYhTN0uiT&B;`icG%r;Kbe;xdeByq(m}~vbUwP3$7+@|3TY<2T>OgO_ z5zvX@jM#x|RKsr|#f=%-cwmgxyS0FjwWxlH46`Hfcsv}CNHsG)xWF+!v|{=I%dFj` zxV7J_!v^B$R*0|Qfg~?TYHQyC<;H7hqkJz88ccgtFp*@DwZG^kL71w7T9~RKZ7)0H zkK`zKds{+SF`cj+OJi^ZyOut|)0~8ha}>w=@(-X4-9ARrjrIXn8g0aHn;-W@wzAqt zbkHlWCmwv2fD3B6{uHm3W6xZ1O0_j-GdGn;vdkXtbMRHbv9go{KE#^Vc&>|36yH;m zbvR7z(;M)bnMWS)37s9-Wgg}9&@qFJIw?Z%746|(pyJ0`ci=y21i0VqT> zuuBlKuhYNc$}tCH18yhE%~yvR@;{a?$v5Zyx{K?I{0evr+sol%#kB-G=#iGVlHam^ zffc5j^vSw6J(O5KUuW;i+jvw;p+Hf|MVu7=gZs7LPkS>xC>1zHFkpVlt=@30T7vUT z`nU9B1rM4G42ZejfHfZJgrDU1LV>bmYeJH-97Y&?%ah81@vy1> zWpJta=pBk?5x9%CtM8an5tm5YY^&-%4ECRC73RBCJvLC0%>j&L9^YOcyc2uv_8&`cS` z@aIuymQ61LK|<;C)7oee->_b^#KmDOa@Y?G6Z>rky4Vxv6ji@i5-ue>N9>YpNOI3o zdW&c_uoRTB=2Zxk!a&rujwdZu|!o)kXZKq2^WS*5?rD2%@XIkz=XDEA!g z>8tY;UklX?icgsLFIDko-`PIJ*y%8ATi-39zDDu>#wV4J-_=lNaVk$ZG`ZfN1>5Ge zEWlQ6{J4hMZ{rQad$Bf%6R_NV2H#G;L?|+m!|HOPX4J?rs|Hm=0bkHO8n5$Hg0 zTcgruh%!34^AtVd;+8STl`X()h%8feO>JA4}tZHd)oVxPjt2?byzUcqw1k=_**JcM0TGRQ)0R5m1jx? z?f{pgZQry|t{1BBkUpaE?^$4n6n~2Wnb#J=0C-Rd{ z?p|HHLU32w4R-T;_p(P86xnj-MCv2|zIAkacUhjxSk$gQX4FrlkT4H7*w5k>spR*>Gz6H#a#It=PMHLysK0%YL5eb zo}FHN+$O8=sM~}hgzyb83Ek;{yme%aIcztzp4yg!Xp*>J&R~{$UqtnVr0P+;C*>2v z)%Y;yjcAxyu^D*9n5d!!i}%RB z%eTTuyaGv^*yYai6t1!^B$yxiGlHqTQ? zt(=0l#X#R8++3g;4U6+Sj>9d#>}$t8&mkHKosrCqKVmV6z0#W0C6TzZ{QTCv5G4_F zB|d}=M^BfFAGY00T?*_4H#7{nBY+<8B#8PEYSr`b$YL*!!?VoCy|^aUSNWc`Tx&#x znH@5s;JV8a-JX`-y}{ovlMcfykJN*!q)W%PJYNfAfrS)?k0>1r=NW-4u+I+=rk}P5 z2v+)AlthI#J=9_UMioes+hpJr1tMA@RQz13hQwrz|2R~=ehm_rQ-r-zO_(=-&2FHN zTbpbg+8A(eFhrP6DOkFf?SW-=P}Fh;FHg3{m8i;LG{Z+9OH~%olJX0tbC!IrGoB-h zE!1$eem=uF(4(02ucuJN?>YZB>UCIPqV2^Z_K;Cq2XSb;gxKhyeo$etG2L{;^Vb%KYwpTi=O} z7utHqEB=HoNqVgBBHQheIPI;s(&Fyh+m9IR_u#wvp4G>$-`TbbdVu4&)vt`cnM-l|De#CnUV@Lej))3A8dYQw5??0Qr0cV8f51~|U!|GSk?Oqk$U#>~ z1oNm>EI~U{hULx`JxlrNNNcszfs zxrN%9A6E!gI^^2KoN|L*u z^`Bv^@U!H_EN4cknI=c|Xx$XE?!~MR9NvFs2k{xnB%n>_(kBx@NnH*SU=XAW$1K3J z`$$<|l&KPpbYEb76U;%&sG9$&fczr4X1eS(+gHnBpN@2*=-wIlhYNQDa~Nz_{j3i; zz;?RP=utvkjvJB!qO?M0po8XR4dy_Z2p_YpFbTA|8Mep6TTSDT8H{jgAr3uqd(6z!CO9^yP z*dNbF>$1X!mdt3n1S~~V$kR;?bHeFt67I1}ZF3q?#u^YVJVLUVfg$>rgJ6{=<~`ZI z?|c=lSNwem@^^tz#NI7ax+KEkwcO!VEz<^$t_#l$Z7<2qQl(NE);@2nc_n<0V047y ztRvjtf+G@v#=WKU-XtBgG0~ICt2$N`DiyFXD6xN%wrx98J$Ag}9iijvWX;$k-b*JK zKy;?|%=clUaqjfSzM1wRj_=kDA}2zL(HcO9Lnuo_`4FzZhVMQKsW}Rc?%5+=LlGSX z{0mT8!0|qrOB`EFw17G+?8d5qCFd)GnC??xgMk}!G@RGJ58!g^NE^+5;D=|#S=!H- z>PTfA*)`JyzR@fBzJIeIIhGmGB~-~C?3*&a;G;Nwun8^axxw|^*RbC%XAqKlut*ZX z&{3HH9Yr7tsk^v-f3I;n=JVcTJgf0gBUkii%c&URFNUs6us%!-W@s`e>` zUGomc=ZzKOi|&WYd$b()+P-k*ysOAqz3Gl^meSfH(93EZ_Q6BnsiTggqPC(2t8*>iDIrD{$^fi_$YxUu;k(vlC{Y| z54QZ(v(u5XZBBfB-+4i%T=v4u)b0S|GvK#WX`ad+hAzg+sKx=7%eSG;I`?5a$(i^V zYVD+FVN1lv9;zK*=PrJl>mHsXy7@ffdPL!eVa%1kBO*G#ZcNEuv5f!0&>KBC{Un_1 z6n2(nEP7&I0q)HKIh>2$?-fFf7!O_|?C&g%N7xmSeO5+k=W0LI?Z|G4@V~e(mIX4k z3tXQDihoiTp0Tp4-EM?Bg2-%~#5W@hzpJD5A3IVgG9UE)ld+Q69U*AS3> zcNJmS+WKRgmjsCbaDN@1-rl_kw}4g~2zN4kM{XrdoV1I&T{fcaN%rtW7OQ^P%Z_{- z+@&tIKP$@IY#LbfoT=ZU}&vk@JRbEfNKA|^OpDk za=!wds6oyled6fr zIaSHmZCJDXk7K&l7kEcD&Uie&d(u9u;OI9zi%Hq7ZS=i5m*=-LQfT0WLrw)glfl@0 z2Bfg!U0{5Wa1L9BKqE_{3jKT&duN*^j&5P}I)!a-j3x4Qw`(q3{A6^M@8g!aByFc+ zE?n5BE0ga6zG9`M+_q`+YW$-%Z>8xlnw8}BSnWV#+>mA4Ulm%Zs!qMY~cors0^TJ#X!N**^$bv$QxCT+QLXWg=4 z@KDR!Y%1@?iF~b+r3gwqSX4H694d~^^LWu z7w<0x6>=z^9-aLCdTGV*$TN%smvzpH>GcGvG!AU?Zgsoi%*?@52e1%S#<4@#Dm;Ag zWnaNBuN>1y2Q9*eim%PZ&wAcW{WSqhx}^!G ztxJTVE3QDDokOI37Q|W{A0u_-FjwFojqV&(^ALR>q90EmTr-T&S-jP6|7_@+)R+9l zbrh|`quQiFUBcF{$0R~I=GcIWgcOs{|=3;C{O4$Wa4{f zvH@-uECc)PefUnY2M&?%b(}y=rjo!SO7)C7o;UJmRlUFZ8(p?84id$b4qlSRg?2#_ z%id*{UwJzAY`;R9?um&efjLq_P!2F_kCcPBw7_4!O^WV=yRCwbhXlkC)X(i;5F&l>_)@qVE4XcgWqE*MF+RI* zg=Tqh%3^X-eSdt2cVr8m5i(%9^_t9)#?ZYJ{s1*^zkLEXg`jlFBe}o~!oL@Nrucu7 zYBZsTo$h55CAR93PAc-aR2l@2$B#PE7+s6Wv_6R&8$C{bjooJIH3WcDLVzpGrfgF? zAJszlHmHm@L0Bi~=~;n45@^OCOrX`ABz<1;KKX+diYsK-IO;!J6fDItWst7aGMbUC z9Np4VJ9DpUC@5rW9W5xN7!+wzjcA7#DYwHRtT{vjZCrDwtsO3;gOlCl0B@pSgF9~z z=5@pf+-gh66la|5XacxnIEm!4Uk34L#KRjEXEfcTR)Uv;5X28KeAr_GOm&wqMDN-O zK1UXr@f3XXk-22tPZM|HA~LEHt-7H}y-XHWE_@KtD&sy=ko=GfcE$B_qEPy6?;p!y zu_!RFycQjb$36K*z7igD@~F5z^&^O|^xxo{Eun?$6^efA7di zVw^}SH*qNGiAmUyXfr#bG?NWotc4sMu#e3Jt{g;nSpkza@naw#57E5pm@`DQiAueB z{&2ftQT@9w#|8nNBC@*oGqOJGbkuxO+d$bk5-zsfKEhAH@2bZDyQrE8q`q9>I;lr( zP3Xd{&nR+ufAn#TmpU0HvQOf-gq4TIyjPAeM!Q8~RGY-Nwk_x~RJu22dG86z%}*|; zy}$&dm5+8Dy3P&=KsL0tNCLwJ$RO8!-kfC;CZY9Ih8TC{uVD-KmuHl?5V|Rra%#3e z9A13tjikHbKeQI2nX$zqG2PCtFAi3BIJQ6@tm{qr9yj`4nl-`3>L0N411>SQ@kpM< z^%c}*tV2k1xskx7xrzPPXD>Ih*aCfY@5C`=IXWoU{wpzoKg+$h{o&bBbHJOmSYcC8 z!S~UKA`OH>#8g`3AUI+Z*6fT*_OEbb3_J$|zw&%*o_=E%bVn8qMKRXe ztVYKUk=IErF--sFP4zO-%(^>6v1BQ7L(ylaeI<2PgPMy<;v*T(19nyjk?5ym0~$Z1 zxQX~D0hK7zeQ@+l z1TwUOA=ANwV-=$*zgx$joM{=3m)2ll~T9VZ=U-wGht+UV&KeH7QH{gjE za40j+C`v#4-f0e@8Jf4+C?54y206d)Nb1|)qS>#+6xhj>aaB=qyARN$X%8DeHN2h^ z`Q~LGZHC^ke{It@rdx^+=X6L{sK|NBQ^+lJ{{rbJr`5O{43<9~6%h}T^aD6?;WAii zS%ey;W~>dQY1N;72z1;H-WVdTZHD^8*&}aj>lbZ$mGOJNSMRKvW?`&+5q;liJiR#$ z{bI1r(7nSjhH7&Tyfrj>=6$wu_E}1Z4cblyXLhjx1L>1`=POAJ>%yttGzoFmUqPzq zXWchqMYU{s(R7hA5krQSG%8u`*0Ef%Wzn(oQ;F;b=iuspAf`+ejgRN{w<)thJ=eaA zLz3M)K-dwqOTaVeQKqEPX3#4LBzeY?j&s{82)?S}OCl9OlJMT`S`F`)=o;302YTCa z4WGNKK3OVo-{O$Oz)xOUuD9tNoFZ=HVV4OBh{*7B#9{=FqKcA!mIOK;E8^u<5o51z ze?ZDhNQ_QiEPVvK|N6DcN0qD%o9Kt1JUIf z{EZ%%46_y0c}!L~OJPD@IIC_#Q8++VN~%^%d@-URSMI6L0t6j=P&Gr@a9o=<45P1;F6a8?# z#ZE3SGqk$B9n@|%%eX!ZYJqD&vOvKl6yoaQO;e(V4Kr)vz`hTbB!Q6DOT0(on$u!7hNLCkTF>0OrN@YW&*EzHLIie%Juaum9r-p^%#C z89o5Flm|=7okAaFJ7PmskY3Y6E`D5R8H`XHfAO`#ms0Icv)pt7h&=$dW?+gQQq$i< zYC6FkQM0<{^=VwiozWz9}HxBWz5BYgQHk%-35oN_mp*m~o{O;ps^`VIBgP4p})=A3` z@;MD(ZoeuEz1AmImIhIa5ZMGBiPHSGJgAKaaaM$^|9cU)&MzVcx`;d~=NB>X{30Gf z7mxmuSw6PcxX1$P%iKju8GB> zj~#9zcCxe){UQJ_NP^0PU;F*%%%vuE5D1U-t&|8pv?5H{G%cXoAsFo`F> z9;>qXp~EkPtY|ww$#!|>fCzt8}Qer1~j*_K&rNuv9w0jR#%-TlL) zj#J;WnEuC)39?gotFy{|!>_m(448~KfC)rEt?N-|l!GA6qk53gjfEmExFc@>NYCnk z_e2#Laby>`h;Zq*Q&Q2e(9Jz?&vx|I6tIsFS<;bIBNegLZxK!4W|3ivPNKG~3t37K za0aKh5i9d6aXnPFAAqdkC$vPQJ%nkXJ0GqN-TAzZicY>UJZvq_DNrXIq)pI_{gEnO z>+jNYD8+4%sr6&;-}4K#@0FZO25clpFo>(24+NV3lmg>%(0-0ZQKevlnLf>F{!3!U z_?+7hdab$56o!Y>&txTsQ#~GD@V{IeZ)4t4Za?;UdZLCX>*-T2ImH8#I2NE6_Ki%m z!?{3$sLlk4zQ9wy0}uy9H~I!-T+l{-vsIga_r#7#^h)fbW13a_gv)8d^zv-CQ?&#y zy!9eUCx3TX=VT|OD_8yPO*K8GZVMicxt==f^W=X7?^93vGe9J|CP!d4-YlS>5`!2r+=8oQX@Uf<*kkCUHo*Bu4NZE!U6}~^ zX@K9{qcYimzs|wH)D{lmW#`2f;xxqgxAK0Fn^=F>O8yxu&{*(RJp5&{G|OKqbD_T` z-Ts$P)aUdarT>+`+xepil}IeX@eg*M4k$i9MgCup%P8b=kyh?2Fx@-%xV(ToE*Iq? zkINk7ajAklF8Qnb@#(i>#$Er(-q9!Q@tQ{tsqRC66j}oE@?_WHY{ZJQfEZh%Nwtw@;HlhWwivJ`RfV?kzfogWU$5&kjZJuzl$Hk=*52g`1($&op55 z?0G8i=IcLcuZcx+<^`VL+?){(UIPo~|vNi(LxbBrEs|qdh=+vEiJ$9|dlU>vmMad1Gv9onK_=s%mdK3SDHme3LEJ zRJ~NdRU?;yy0kq}$1;vKz9ywwoKjOC+Q#@;VgL`10;@|90cVW^L>o4n+ZBdAHf<}R zt{*~O9Ya5}AZrePz8L*!H8Gic*#ZnrGHi^wox$p7^RKf9m4b_R1|dln@*9{z9T~wF zUNcvs@^CBI%|H5B89Peyc;5Nq=9vt4`SLX4?y=&)mC>Esh_S6iDw8GfASgm*ALfSR zi@bv8yQ~74%P3H7Amu`gE}HLGTKYPy(3iIbt|_5-@g&L7`~5I1;>JQZEP_701s36} zPzP(h(ee$}dg&eA3^NM|sFtz7R`9P7U`psoj^*OdI4$>_8v|Zh6uRO` zn#h%oESQp0Uvcaey6|D(VP*7aXT-XOEB8yK!oqq%^?M_KcO1S$c-pAcozPUiURR6u ze9innu*(jC-Dg85x?8^VF&1e)(QZ1PN1|#4AIzQK8#~M{`GS#OW*04>8K@A&6Xe$b#xt!wiUvDgxzvC7!s{}M`QUOb9bh=Gj3t?FhyeyjWrDgGt1BieV=Z z$LejbLH1t?qj^E1NHxUKtNKTXqu5@NVguZU<1U}=j}VQQmELb3mKq2!+|iGFgHILf zNURoKnEkjCgkvy})2&%IJg{|;i}M@`$`G9JCBTSN3!nl`ViN^bpNfnqH1*mn=YZ`< zhDt}xM3Bmnrko@J<6beR!Y7oCQJ|&a^Ej)2 zB+-p&5u4Ic?hO?*!;G5msS(Xz|4t=bM5T;&%R;`VIIgRoIPPE1kLE9QdJ8n|Ey#*u z;aZ)7<6WiafsK_Xa)&Kc`pZB;J}VHXbGPMY)Z|C5YgvQXjbzq<|0J{uYTei3f$hf5r!$7KyZGmwcynO(Ce+}TH z5FFaZ*BKFj-{J@sHo(0AoL)s-L&5jAR~^oT9-qRjpS3;SZwmW+4HUBi2_Fl&YbJr) z8dN}?;N2iM$lhnB_VG;wPVTs^$k!wJm<+mA|LKR8a^u;dJ;xP*P%mQf`z?3brn_YDC93@cNuRvWlqj!bAig0ngk4KcJ=4enI~~7^1KI2W>K{Pj_whM9ru%=Z zfExeZpxB;hS-S_l8n1C}EV*4R%538U3sniN`zrgUtoxiZ|2(oZc$3HcM!@1X^9{5k z%|VL-DA;hom0vpl7-otQe)e}1aGih)pCR^;rwP{xPON_e)NDH`3JJ%>dgHf#ItMlp znJ(!&qn;ku6McJU9>jd(0B!^jDXJ_4ERZ^&pdbo5j6ZUad5(KJ$6@vsJD6}48bQZ( zM&Mzr5!hoAJk2snlp@iR59S{m`6AK%y&E0A|0wDtDUD|!nfnYy?QRY9c ztxHwfpATwQ4X#Tk;JW0~9qHL={|H={288}z$k`fAo%(Y7e;yaoJqdIvSv6-B-2yfy zGtLAg9dTs(psG6CH{+r#@GQJ2NjPLiaGi1G5pEy>sK%c#M)RotlU+HE!`zZ{`B~C! z_3?@kiB>kphb@7UHz@T&vzNbsKXS{8i+4&-0pSv?`3jg#OE$wW8c3t`gE{@J96OU? zxMdy5_+qcNBK+UzL@Qm?&K-eA0?p;cU71Nd`SA;aRG88B@Wvzsa zABeL2^ta_v#XVhIGrT9))U38LCdh+RCDvaZ=J|Ct?`zO~5by23H!87a>w0WXh-G=I z3<5PO{wNr2qJm7}H5BAF8IdbNTHXwcQUJo^35s&z_?ZC#PSjlVvKX4xRzvdHyZ}^#89w|NkgsHzfx71wtH- z`tw1YVSU)oPsdHT|3TvJ7p5#Xvt5rjcZGW&kgwQC=V76B+{b_+gEjH76(C(`~KLHqtzr}EL`9DZzl-SHbf_2vedGqqN! z|2+0HXXO%??EauBK5FGQDZTfK2IikLQtcR8yPRI8+F}hspwtxm-_XJc4lPH}7uCu6=*@&4K%lv0ciIlh|GkMWp`umDV4Kz-YbJ05880oV!n3Xj=U z_Sc}5FPo6&ACx+OXqe@>nXZerBzGGtdLH;2#6M-yBqPZn?CaCjh?`PjXim-zF`5<8 zBWEfN|0ePGpsuG#FmJjJN=H~MwS)2|&HE=F5^pxrr)+fc^Ut0$Hm@>$XYzexNH8bo zzqG#RLx%GdrM&@yUAEkWXL1wa%?Twk5=ksgFJraZOACG?}Te;>=zN8UrI9ETnZel!mzKQZ(jjtt>tl>6~I_cnV# z(j@A&KUBp%7U9hI#3$Pa{77%93H>xD-Dme;1D9Gxbx({G{r^2u^rihXaf7?+C8*uL zC4{}5jdc3pt4qKW23NG_NAB16taxbdN0x>T21XOeDODI8r^qOjuj%$# zzj}UWDm2>L>1~*OV7-T-h%&$NaNGxpC(I%0%-v7;t==U(nqa!G7xPMbh+6U~NECi5 z_)1}W0MYc>|>-NLKfQxWQyfke>zTfMj{Gzg13( z;M^?M#Hb|2mVwc(`0-K|Vw4ulQjNQ4Swb0#V*v`|Mfq)C>|I&ZGUgV z-m=@?a>^b_UirH(|2KEYZ|<_+N6bb=nu__AQqhxnC$=?lB z+<}GFNldU-Z$`hXV6F_Eab8)^kf3gVvhO?ytTM&q!w!DIIUw547{dTEFyH%IC zl3)i8dOd3g?F9axh2L6rqB@;@M|lGvD-1aS)TF&{AVvXfl&M?CVEhuRVldf*#<8kd~D}~WK!drP8zAhC(!j;28BNAVlz{L9G zZ&+wFjp&e@WuJ5SThTS17gX%hwa0*Y-7Bn?Eqt`)Ka;ZW3Iazm!gvnRt`Xwv0RQSX z0~UVP{{(P_gc5)#iuJ<_SfHlGHnf*JhS>GBZeI+>RpC=47LnPP0?2+&Po?`7Sl@@l z+eXf21~1>TF*(@Fk+q|{c>Wwc<9}*Tj%?rob*#cYXn#sCv_{4Mzu#06)Pq)wdmTd% zydQ1g$0&Bk(M*GW@KvBt;_?Fp8|B7W$k1AS*?2_Ho2h~6i_u)U389_+1|HQhx<5=hTTU1d>a<;V2lEKK8i9aa6<#su%cdNIabSSY8yta4*D}{JTW;OJm4+Wl#U{pCNgX_ z$X3V5OaT(<*k@jRWmqz8>=$+W=8I~X^Nclp11tkKR+k-r7yDo`nE6S`Dr+qYT9>_K zH&+m*@2W_$N zkYbsQ0>wWMML0D4>xpVP9@BA#ekgAK5}iyhOhy0EQfEc^(c&X}jmWvYBXd#FYoOW( zHfeI-g*;5LYz)w(*12NW+_$<}=a;cCP{RrUoXtl%p_e3Y$GccQe!~Rs-B%012&Vd8 zw%b^Ek1S|gUk8L~HaGG5$~ZdsW1zvG$D#Coi*fssz`G!0rsb(u8?yX_U{}|tmzNMF z+GZq}&1$#_a;Wh0u_?YIWoR=B0wJs97r4)J6@T*?kv1e1vQ~bi-=~%T;>53OHWcXs zcL=q|KyO^+;uap?z|XEJa{exa3@AvnyW$p<8_XxvC>x>7c|cWOp~={2<}~Z?ozDg05IcjiK>a1~ zoofNcsW6^sD0RyLA*_%VTS z`5q)K>_Y={KaPd7f=`*ZDm4p`7WcmOje+bpp4~z!lrRUVJYl++IB#O0;_F@3U>J-# zenFIl=BM3<@W^9zmxoWDON;(^E9yd~`S5lIjN<3ocg^)d0>M69n=qoW@9m1ZOF))1 zvHb=#?HnTV{=DnpW%V^gZwn{5t878xs6|m^KlJe8(-uK6{*e|N0Nzvmr@2Wlk4qK( zVz0{{gj-E7>iv+C<+W&c4?$U2)nQ##M7}->a^ir2A*-;dte`}g2kyqE69H__!s5o| z2hp2*EZ>DusX{ff{;VViDP~gjd4Jvs)|$7n#_VUiK&nTU9d-bjxhI$6ASXdavjLE# zqP1^;Fb`Ch;9V6542s{VwbNlzuUAUgUZt8Ybxtj3X0&a`P3bdxv6f*_hx4-LGgsrT zM@vp0=&?{P5x2Y}Xf6OJJwFC=K7_7clpkT=h5R^_JjNPh1QALAG58-BupwPPxiR%Z-=WghyPR7*kKv8IYy?0;E=L6 z*piA;j;>HPEN?2bXX{Tt@QI#>{#X_R6lx9t@F`NhQ$V!v5Q=}d8Lcf~Jpw=~;N}Bx z_~f{2Zx{S`HT)N>dVK(~-G$g}0ug4MQgbxb{u6LGwDJeoktqwnLILhN^x@JiGT4GJ z+u+~IT3@3JTK2z}wr^$&3VaJ*G>!i-i|?m8Z!btcYqDb*+yus83AX_hNeIBmB?HW; z|K~!B0U6-ubgrvyd3Mo!O-y4`SX9Kyuj?R#!4`8k%l)`ZmPc~k=Q2wio?%UAcm2@> zXQ?m-nBgaQ$qZynT)}j9_9CG3wOe0Lu zJ9vSUdEqien1AtH;`v8;^WGna$ZYN6>kigb-=?PZ-=%xs-|-L>tWwr&M#k&?nh*O? zmbqGF>jF{3|CwJ4av#0=t3#rvr0n*+)uK%+7E=31zr|{33oVHAkHNpir>Ngzl$(e& zw+k-lb(vi)qAKX6R9a7SYaqJgI4Fa~a!;8&zVttOnF_)pS z81_V(!d-w9;(iGMX-@1j?z=&jU6~45&BE#U)TjNFQ~K?UKD4T}X5@9!V{sN*jKhS+ za^V*5-mMi;wfBhyz8SGY z7gavtsdDK8OTXzd-*hmSBH3nwB}Qru=AvKSC)>)mOfs+fes?YvTXF!{UY*^?#Oek9 z*>6K-g#3*Igbtv&AT%#fYy*4&r$DN1M8-1lt>(W0xIHx#2gH4;_h#viSZl zqUb6hG0+g0B=u?mY(NcFUgUSlibCc;95`93ibMe9xmFnZbLLxvrB3RKuwlk9l_2P% zrNJUslW*RAUpi3AxjG5 z0hl6$u;9Ck1?X=@h1$PtwbOCp>m^R2w;^(scKIarIF?VhHRh3WoC^o(WZ@Og@mD9h zS7-bSKfs)h3M~fSmvE>85D)f)=Ry?<#(UTH3|ej%gLORxPri$l(LJc``@g8JDS|@4 zn1UPC)lWPL=+lK@ON^v3fThMi;7wJ03smL`s4WM;@jZNAFAH1hap9vvI-cmq!V*nSDTD$MquJVFo7y_Ss!RXEY;y z6v%>u$=zcJxKPGi@wZy61%hL$C0Nsw8N1~Gj;XzR;F!9=uV7>8bu*@VfMcrYV{lBZ z7w*8a*X>ANkJICPq6PR#_|O?^uKxpt$LSK;{PU++E2-w$%g&VpoFdQgPk-Ra|6Zfp zjl0cQDP_d|^o+Ql_YW6+&Py0rVHDizH)s%{B4vLMKR^~I4GXyg;9JF;puu#q(M?po zfGpf}(qCz$bed-EnB@=S9}?PaN(+_(Hk!tKuh?XrLUV3s)z&9}`tt5k6w!&ktnYXVUEHwKQM2rN5D zLWJR1FNvk#=-Mhuhn$%{?jJ)P!Pn%XaBk*x z*VbeF{He=3FTVjntyr>HWgjs8adrswfjPT7$&&&eEQ9N~CZs1sPP;BIMVJc(lquVg zUz2-BOqkyLT3<$*c(0ywW-Ng4xH{;xo*?|mw%QrdB{w%)k@%qCcpxg0a)XLI$D zWB%(1V=dv#8_&ZD{bA=DG5>kB_?3Jw$M4g-W+@-HYGf!tvV#R^A|w>h3$z6j>v<9Y zbO#`DTR8zG%V0E2SsL{dKel!2CoySOCyJEH-i-E1Cz%C!}f1Ff^S~#Et_2y$Z;6L^qYNwUinZlZXf=;6K2N)mU zmBg3@uWt`%d642aJ3J{F zW&MQ!$Z-|<&%4-luAz`4kV%j%y~eF3nu7*=>gjHnf3;tZWlX$_jbqpi2S!MvaB=Jl^=RAuI%ZR$1P zW&m^;0FJ|p?9lAG{XhZbG1R^k39-fFK8D`93q*%l$5yQ)ku0pk4FPI*-GA& zZ~Y>Oalr&DSc{R1ggFTD{Xav`<=MnoFdJtYn0~_?9zo;K<08*Z&`k^=P7+F@-j&SJ zFD9*{Ty{IZn{%o>EABqt^Frx%jo+_tyBFIJktpldClB1`XL+Wa_Q^T#%OmB&YxqWN zH?+g!uk8j5~OUhLkqRD48~;k^O(TUCyRezu=pK( z5RNcj>l&a0ep7;mc5Flu)jn5=A<;p~06vc2KZ;rU#-g{6RFf0$dW(MKips)u`*$?x z#?rE{-26nSX>z7IkqH<_aRwO)0=#WYBk6Hx#*%H&*@{UAAzo1Q9c_&he0d1a839q! z?qF367^i;=#_1`Feeu>QbdT|KmBB>+1`FQv&x2~k_75)T`R_cUUw;K|N! zEYk);n3rAJr*HI*?o)tDBFofO7dZCcaHn9dpk>ebd|+{}5kk1Q+8AZG+nMg?@O#NA zPA8CR!P4r`#27YoXvqI$)$~FCmghIFFeVIa)I&BmxW7N)`cRS>{w2qCIMEFJc~jxb zV0ne!u7FggqCd*qP#QH$bX3RGhU-dF!+&HbY2#YY^Ts-^J1_cxzM1T2WT^4be$EqK z<%N>p11t@36=0}5bMt|Fs)fpS7Y-UO;&2Up`4QZ)z>m%uFaa7~Q*{UN=ezp~VbX5i zMUOVi9Xd)U1nV|F`o3+HtEjWNPWfA{>=t9nN06G%Q#1oOVydTp&JI;;fKQp|SU|-+ zei20f9V(%^(*70b?=1eKhEV8pzf0)ck4Nps_{_RcF)tj_n(d*f;XE!f$4 zmvAs`0&cjUBq%?0EP-h#2xwsb*V`Zg_BPm4fV~X`H@yv2H@yuM{MoS*pTORRcCfdh z#-oX6U`Vl%Gc_Kf6=FKcxJeNHJ)!70nd71%wX74{#wfT;uBG`zxm*X4uYlyU$WkuUMun~V$h0l zo{~+>S;ANDrGmXtMQAHnHwS)RLg{>wTHcIC&(%$ z0H*bjL|`BWG@(HK1SSCLC(t&C(WM@&OadYy2UeK`IfPETijjH+=Oj!c547*2KHJhe z1Y{?b>?v_ENLO>?Z=&-1)`@EO0%*Q9QD4*AxZZnD`*fLTzBtE62Wi0C$rCX{eU^wa z|9mRLhWGp8KZb~F^ws*kwo&jd3Yx9SsUU+zk>_1oPfdLE^829uo|vtgTO^(r;HWVN zZFW1GrN?1YM$mI;{aqj)TwtEp!{ODf`HfS5d!9nkc-|8yTjPR1f4Xp#th83PpOM5T-)fM5@c~&iLZIPhYTT|X zH*=epYM`M%zuWbTuT66$efi`U*LeC%Nb~7e7{>+%s=5p?Ucser-4N74hfw=f5g)2> zHQ69e*hL4mWJgJc7UlluTLM0aLZa9JF7nxuxEvfrO3gw_mmG5wM^98303vLG6$ON@h6KxoG#P zO2Wh^cvO&k%1p^9|HW1P6;qP-wN+ME)F!=ufc=w_URF^QAQUh!7noC7w>G^Y;BvTG zE}M}!4W9SZ6Ig&F`Y!gv4b(*qLR}QaOOuUY-$Nt)iI1qY)F(QgntOk|m2>Vns~I!# z*Yz!fbq^Iug;{lK0r%X|u5OmxR|wwGYxoeb4yuWjF9(qDMmHuB+^RNubzCWOnb7W0 z>4X71I=Xyv;wnGf+FXeeY2P|qoPI)Azo~*AV}p_?I1?yO&KctI`xrM-U5XM7VW3Pp zC}&TRV|@CJU|_wl(6~y(WEnRg4@h*5RHeETTh-&JQxBabFX+V;C_o#HPJ3MDzLG+@fI+>(5A!WxAJ$hI_cuNlV&DPH&8AkN89+XH8}w%hyUVRapyYZx~cRykRXUT|`U zb?(_xpB+I-0c|ECFqs?Cih|Pru=vTq?H}DI!0lAH!9m-Z=VzYcbZ^RqOXpoQvF2Zc zwdb@Fp;u{rmAFDY?pAi+{HZ;F>*}emw}=-OuwAWyTFk=#o~z(a(So)?S-S+kq3muD zmetqu>0H#LHbp)fSz9oROl#S-k+!~UziYdCUpQ*UVd|!mN%*_iAC90~e_W+8>&X4H zVhCRH30A5A12C!IJgo&W)%1yM-inJ-&Q#pFj%^%%gen(G3YvJboG#q;jB7mgOVl1u z_oFIO1``M8r32n4_w@kJ!8I=IahrrUe#Ig{?1@f-fe7<3>CPJTB)o_nZC9>NiL$%) zphnsSByZM0vN9M|ppCC>24R9vD39#+JRTqN_CE;46ies1fT@D$E1Yq@Pk|p1$AqY^ z{5)*ZZ|z_##G(^OB4Q#_aWw%vI1_IGnwAG9RnBTN$IL@$Ncaw_n$PV598TAvb>Djq zaXN+UeAL6gpp36hw*fxFZ{OV+wf%s&wmo+3x!*j%Do}|$hXAMk?}NSEv#|c|?vf{o zSjLOQWS>t!A9TZZr&Oel9SpNpAosrSTIK50xjto@e|WfWjhDFxtTLW$KMOuk{OjjmlOaF(bw?+|T3!Xlep zeErRibLxcQwmvz;qP)xQ3f5pB4n_1U$NJ<&rYV0J!*SdfQd7u3bC!))02UyXXZ;l0 zdMGoq?lsdQ>N|!tcod+@f+$T6-Pr1yTEZaIfdQTKz+KU)VI`QJJLp6or*cf6zklzA zf~TI%Ox8`F{yLuSn?~r#e|^+=HeJ|MyEOME@W|VM;*EhRX|MpVQ{IbHnsnhRi`(=l z$dUVV@Wv97CpS@*U?h}wj%o3sMnl`kL;VZxPdS8N3ql}FBk?%mov2LZ*U2S2dBV2j z-ti3oq@vF+ELvY%pHhvVj_x}s3>)MdOd7|Gk_}3cY0b5-ZII~w8-A|Svv*(=LSZK1 z+NYPi&(F}L-iQZ@$?{$+czyBmYXR>zncOFITlm4-*O|n7SOep8M=GcAa5W;Z3y4F>v%gq)lGg_b7lsy*gv|C;~72TMOHRgMeDO z42OmnvR5C{wxFO6Wd)MNFbeP_#r+jY5v7PTe<6|c2)D_tnisySlh;JVy?{(}H9kmI zV(C`95EMN9uDH;RmK`*R+cEjx!o5WD#YXqwhkr}cDL(RgJt;LXGMe$y+x1oE>_I+& zmxJ{gFr2AOkN6@4|5;jlJE5gbs>3%L;0!65vB2C9UN#1LLTF1}RA2FJN0lzC>hjPY3)vQpCp(4htI{=F)x?FAW% zypt}0Ii`!;nVu#p*-hth=Mz0Mnoxq1t&D69%hSL=(p6uR6veVVo<8(dWZ&S%pE&=t zrOi{(KdxJ@|2{yK>xuGcN5s-_+LBWf%?o6lNv*q~cg@zTq>HZo zrB-PFUgw(x(ElW=2*i(j;V#K@w`W4_!5eMPhBIzE3;(pIiM{&|UQsH&OS44&s$aPn zG?3X7=lERrM!r15VI_rvLU{5Cu{eT`nk+4ykWPR>=NW^SdHHN>8d0S|_(pI^Z%ysT zh7a3nEm7yjNj-uqGwmTwaYM^8lsIHe){0yfsyPosgl;#`9`OdgQVci@FE{!Ap38CL zX>L17|DBzO?|)HSVkFo;CN7zz_^U&R`@4oDnEb7Y+c(%q(*ue-suShDe_1_Wt~lL{ zxM$gslh9HHQ?I-{?3uY*t8=O0++!uw{jl`Veq+uk7+!pcQ|v?Z_ZHu5*HcAS$-@`C z?QipHOiM2GRhr@Q-5P8`!n2!6S9@$A(-dU$G2w9sU37niRjkDCHy0l{5c1ntR;>}rH)&2iuAz9IoA4&zaKOFd`^L5U7ItH zqgob{`reOaj)=UJGSj)6?F=_ISJ{`0)S^wp+^vE*>9y~|Bjl!#DL74PsonZ_8B3_M z0VPsWUWIP5x1mAlDGGS_DZ#=@sU9EO@E)sXo6vSZm@W8HdQi(7VqBrLn$eOSB5Ghx z6W7%iWyv6i6MVmfnIS*W_C{`}mgMSq5h z?b`>>{d*1%?u4@IK^qP-@cQDbIt=uVP8-*7J1S=-k4hU?6sL@S=I>NUw9PmYCv`cK z@SojZsFX7k#{96Yt`gSP=HcC$n%im9tn+dtsQZ|6!1(m}nK$h=C&ecYJ;|V;D-qX( zObncd@%()9`%hnuIz&SrRjVs5$nVjwNn*KvioQp(8)&=(c+(O%8$On?T7;P~zwB_K%o(6=hkiVLP^%c~G<7fV! zlyWPhpRn{Fbk@sKVfX|oIxqTWS!VS8 zYkZqnA3X4*Ngf1*j$vH?cEKKxN(@Xx^5OkJV2Wu) z=-YDhE{>)Q6D2KuwvFJZr%OTTP*rigoXzzw3Izp+EkaklAl8!xQ_C~JCCD|spM>gC z7!T|a)lvNm#9-FYNtQRQWuTRQk45D(wr6|X-&fn%#XdAtmxeiV>ZQ%`jt%3#r2DuT zdyJ{2XLpE{a*e3%b3X-lv`Z`SA7%a7i`&iBADykL>3QkkYsP_t#gj6qWtMgwz=R6Ae zqcF%vv1^tw?W?O148NW#SwQ-C{JzMYZe-p7MecEK6cE)&TwpO0= zpGXv`!Kq)0^a zjo8V30rvsD2dGXV9FY02UIRc8V4QMi3c5D@^XRKN1U>9O1@Y-upV@wl=`}rOMbo5? z0@;GGYhzQC@OXUC1*YLShJsmzoc}_889;w(LXTb-5g(3%lLeyu(MH2V0-5MekFv28 ze%jS8?X6SGZ!DrX4**yaei_!#g?15aTi6W)Ui|t=O_U$iew(Bm z3auxvMgF^Hx?{#3zHfK?Rv%V@DDu053zwJ%Ql|rg^7T`S;H2o%r0s}Wkf2bC$Bbjo zjN&WcMaD*+SdtYy(Ly->3H4Q^4PjlYzig0BVd}Gc_=eqXx9LFUr|0BW(e>1vW!76lGfcqAWBg@JdnTCs7 zWWX;{SxUjr?2}rJuZ0b3WVMerN{4uM(zZDxHF_9l>V3@F6b)`3~| zN$lizQQwO$wxc+iBbZ$Y*Yz8m5H!*yQ#gD<&rHY zzbr{K(a$I&Ljay>a>mu8&X{Ll84SnK!X>~*qQZF7eRc+YCK)+?{bv+4MT(Wm)0ISb zvRm4W@faEkiF7kFi-01?f6=y5#P>m$Q^<^Adxv7aJ_su@>0n_T?=K&kn zxk^is^(oGdKDO3D*5#8(k7Gy-j(zOg!MIoMd5!p_0%c1_rrK?eQvV1wr+@@)Pxc3` zUB9$ryztewuGI#R2a|JiPzDB@Fi#rRonqeDE4?;btXu)sDvRnp@k6}P-JdA0qag48*myS_A8R63gAC6!T_ z4@oeqo2dO^uWx_;RGnQjQ@y9hW~@@1<%A1M>9Y|mLNrHc#0)4qhJMV90o=iEJWz_X zvhMxaSRG#^C^v@{E-HLmZ6G({mIKkB$#6*;ek3!I@i0IpOa5sl@xn75rSi_g33?6} zhSUA`r;!uR>b-~t^uWo;xArKcGQ(7nu#1AKsnbGp4Q$+cV%`0N(z*Dm?kCE z1uP|@XA;N}3`yKDT*2DE!CrXj5^A(DigfpiUcSma9#=Yc)vjZgsY8r(7RjgF0ub(~ zj>kLas({`C^q&|KitCL8u|hH|yP+iI5(5rCUa{3U!}$AKJ^vEB*)7%Gm^cFN-&u}Y zhwL1LsE{o;;d%UxdyVcwXoEdfMNB%!fEtF;4s0=D!?LSOI)wlvHM!)RkdB9arUKs_ zKfE1!fkn?cZm)pQL;NEY;N2p`5&Rsl`@ty`p9tWOzY+ex=OQSYp>Yu%g9^>AF{0XCUh)m5of7?{jpST6t*PEYt>1ZEB@2rh}D(Hh~Vy$c_9yZOi1sOp87BRF~*0_ei&$mQqLkKZVYd@Kbl zvK9m6BDzOTo1r`bd-Ak;Ox+v*LMMC3Y?JKqmyiF6^%LlE-nX^dc4i5~|8|kOggsnC zFS!dH49t`;ZfUs$Jl0MV;QsSXby~W}L-ZkU{_edd)6Bt2?MR%TB;GDr z|Bf!-?O#E!{PYNX^@r|Wssw-2hyhr{K$-PFY}*IRIQ(w>F~WDi#S2mNQ)V*(6RVe4*4G~Kyb=#Fsf>v|^P=iY~8Huv+c_}ZZklQ!(3Du#Py-koRWDIN>vI|%7^v9efZ7Ou~+s;g;t>90elxEa=?konmw7Yt`PY!rLuf6#-W5AvyeWqG zrW;^5gMx*hV^^`}#ntDsld7-)vrr!ndN-A#3@ZL{Rko3@DX+?)xMYN5a@|Iy`%CK^ zhy?W@<6Beym6J2}&3BnOZv*`1Pm{)zHp?LVAZ&!ptOCMKZd{QlpguKWRUjxcaco*T zg&)6)g8NOdv^)fJw1ZFc&V(X4T7-?G#`lu`&pjluJ*~Za8I3xQF2hq!`8&TKgvb|u zPqzNCG_2Nbw)_5*0{bTgq6^(6mG}_p6WU+ot9a}Vw*vPGI%$zl$Pa$WOMcbpVH2-e zn{spGBoWTpE8L$g<&$J%Qzc!e_GA6`!RTaGzK$z{YC(pH`=f2W8!txF=Odr!=lQ4Y z8H-}1uYcrh4`Og0Ak~I32DcAlpg73^68W#&LK=Ro_Yrc(d6K@p{#Z`7G+1{4x&^4L1L~n|K!bAYF~nq!35_S6+;2@qO&DByKffTWnRTs6 zsY1=$9@_!IFKW8K)4fFYoa^3eSq_1g%nc35;D zG(HXqmv*~xjF*50Yb#_u`%<5_1zkaj6g{2>sh%QX%opVmDkUJL28z%)ihljbFsy{z zBtv3Uf;Pndcc2yX9Xlsl-|~OYrG_7*-TyBN5n?~L@U=x1ZpXT?YnEbc-_<6nj1>2yUg{g(8v&|N1m6)7(!n(#!4ll9wN;##42J|$g`x4rF z#vD}v%Kcv2<#_`LO}{1mr70bA0Yd!yEW}fP*1f+pkCgvZjEVWlRGQA#RIU)6WM??D zaO;`&TQB((&2!|%A3A0gZQC8WEauZb$)QwcjwDx8xzL>>0`H!pw9Wy0&m`R$wxT%s z#KLoX!QI~v{=D0Rc#Dd>|8+}qRm%PyYmyE>+ZW+Gl&7`UFNP@1hwiw(gnVP3j`%0b z{*lBI4T;-dwm%+#aa?v~egpOnK`l2ObiG(Uw4p!9H=_e0Y+>5LK8)RoW(2iJ_DRH! zo{)f6qPj>kcEJ5^0|ro28|=H8m!p85R0^^yNQw{r)w0eS#K#Btv9Q!g>^&jF(S#pS(C0opT*OPM-bQc!;YFEuFrF>TZz|0c1N@WB*RIB z-`1<`i~4TSpA|e;(VmzIMa;e6VXuY*r}g}n({`6Xvp%j$db+S&;zc8Xovtk|%@|;v z_W;BGl-3ZKqTIQLIw7v^+=6{>c1=?=^!jjRtX@1X<6TB3z*0O_x&H2izh`LlmQ?5{ zNzwREVSQJ0G0nt~DeRWWHp2lbH(=!8>s<=%hgcQ(GwuHP*f=(jtE9l6YMZ23WLW z@#ZNvRd7zL*bVCXGu9Dx63sDlj*U})KejPNuv9sMT4>K$^V)DhW9D~thW+I#8l>pC z;X*Kf_o354Lv|RfiGgZc$a$f=Xg;hdydhqRE6)-9m{E34{(s~&$x_3aj1pe(oZH<$ zTe8jor3?G}@(Zr%nWqO%^-=#`_QIt_ue+1 zf!nr7+4U<9{k5{RWze2HCcIFJMt^A}+t~XOtD;bF+QcxgRI=hs;x{4qORF*KM^z+> zLBLTdkDls_qr@OpM9#OScYle0{d@oZ9E-Ga=egHO;40M%P5W8(xw~tHxGD}_1-T1o zN>&lcCD+nZEN5w{t59135XkrzBo;ZQ=A8q~sq*thV3I7sI}tx{dv1YiyA6Q*T~zu` zpGMOy{fACmRRI0FL-Hd^K$rfswoAN#<#l{@Ya<^Vj^6H3}HDm1W#y{~Iu%}PV`TC1#y@qW^I%ap^U(u;g-oWn*AL!k6)Ma~; z4o3E5ogg76<8D-27qL6D%Ng0&D?9or6(yRdjR>a5DX|ZMcikmuA>>Dmaon@=w)@1R z*T`9QrSm=M3w8H%Qe0&n3B;9nL9<_N;QW)(Z8rt?QNxua_lbCM*S0*EwBhEt&j6%9Fy%6kI7Fb+fORg z5nBpYsPH$^!LkxJ?*xYL6(FTg+%Y5E&?NrUnG})I8*TPjpx^IY-5gG&{X1eU zk>o4RM!3FfD1N8lN${zM^WW!YYe_5)F+c3(%C-cQ_-03G9mDVU{A$ixmcA8su<}u7 z@n~J=J3|%6(_J>5uys_2PT0rd+q{KI+(4@v#!fCV2Y0i)?CM9@+j#Y-hbg@1VE<~tkVL(xnAx=R>z=n#?cLo5y3J=b>9u`SdnJULkE(bX z*ZCaG?%O5moFCktTx{3G28dylu$j#?7bc&coHL3DeR@&%SwK{fEwwt&Hcg$+iJLQH zHk;GDEaBhgUQ?Dyu0Z&cskC#kM&Y8|jiRl@f$OLO_vF=`N8D=^BtOX;5hqK{}L%p*y5Oy1R2=X3jnMd!Bpm z|9`@t)-w;$0U0oSqPLVrJ~y{{6c3 z!;nEa3PG@BYa|ETGpqr>*>2ruFrD800gdC;l2boiP z2TA;-=ZYKpHK|z~QS`y_#TgR7ZoakQlHFqLV~R@dYlkDM(|}tyk4qCpg?ae;!o_z+ z3Z14mjSN{QRg|iV_&UT+xaPx9IK9l$yB)Wayg;AF7-)s3z>OucmK+eO4M~s!W?4$l zm`%jOBuKclZMoVE&YXjx5CKjPdufkO}E-WOfXU~o3(&?-3v z1zrGuaxQ?6@jn3?DUiWrnS)}k(gxyW@b>9nw51?F{MZhq$O8&Yj`(79FlnHoKZl_z zvdh5p!YDxDEM4>@w8G!g@N*z?6t8EH1DqT+GugtPmAd-o8(8-iyOS}sE zj9!^}5@$1w;JCOyY*Fr(4(mc1W#Q_LxV1zxU6OuwBN=|G{_#dAXah7|nC-$k?|M@X zZiHg&X9@wiJf6B$p|a~}b_!5kyo1&(^YSCyZJF232NxgEZRM0k7XNJM#z6ZGa5`2E{H; zKlHYXyc`=}+$-Zt>vJ55(q(9m90EZqn)YA0Rd-#a>-1S#-ZI9XhxQL`) z*pDrXZajr-EiIqnQ^$<`njK^^;}#RI{@Gbr!;7=-nr!e$L(T`)BlMHagl)B}>%B-3 z_OIku?{JqWI`w@&F5Etg>u$VL^{ zb^?hUxQkf8v!Bk=`qArp&#wg&p|7sH85;XZLS}!s)G4zlg}>lOU4b=vXAQmS`Pb?h z6>y;CF5F52q;t_z^TOVz`rDk|k_Z@Spnu1Ia;AFv<;jP>py8I!Y|nv_w5z#;%+hOU z%tOJ zh>^SRi)JK5UCh1<$(*wlzK{jLN9+9kF0KVWV#=l(C`&pUW4_H3Tx@G&KcDxGZvkD6{K?8V9p>>NA0IhO#)%Yz)+)cb)z%rEqu z1f?1gG<5%q9eE15ZE%f@n|UC4y=eh(%klzfrHsv=-w^5!V^7gZl+it#{$a@&I+2)U z58Iji*{RE-Q{alLKS0qT-hl;LDaQ12&`&ZjR#f4It?cx4#@R!wPK#VD+S~#JoT=R3 zrnq;Tl+W@loW5WOTH)(^CGd~Pz0d6G!}mjdH(|>b(yJuIK{UcD zYy0b2=iO)C7&6ZF&JNMtcqh-fyQNv&s#YjgpGpzj8z=iqbk_y_DM6LTl+Es%8 zd!i6#z!y$T*30oU|IESLFxW!+>gf+@1g}+uoK*#7c3jU9%Gm1q?d#VD+=hVo`(~|+ zdDh&=pSPs=iImV*d!RYztje&J^WEzX%j0Vp3n4o%s)GbfdRT^@uR-3=3OAs~&wbDf z=_y1g(Et{68Y{Wf46a=)5B!^^j=4#7ox??LLXHEMlo>2Lf2==WWniD>*m6pI<$4*` zz2D)%r}gE;Z|tvocaKYqrqQ1CV2Z|T8bxNa{b}f@L>F4o$+o1?LY{)Gm>x5uaA~H{ zy0T`bE_BDD0WPn%$cK2(>jcr5V^$3Zw1&F5RT3Mb5Zr{|M3Zr?Y3QCn!d}Ri7MH}r zG$~IP&ix+0D$mg1rt%w0__@1w1z|SgEInJ5{@oq6K>BrL2ekTbXYik`xJO<@ z&+`lFb}Y@2g~|3Yk`_lxLAcH5_WxK3Ur>;jes}3CEuu+u_{kIj--}{@_k;1~pNnX( zRsO5`f1BR9?HHu)%i{-vi{48$eWS;nQ_wG)FIRnik_J42!s;HPVHI>juWS&tXdC;E zfOXLME{xmK;&{PWi|~0h=}b_*HS6+JH!kTy_}bpUIAq);ZbW@7t|x8UKvi-K`w9D% zM`Ratq!yBHuBBs8^#=_*qza`Yy9!WlK`Gh(ttiJ!-~r8&6c?N6R4VkNDX*y4wa=%J z8zGE)-`}s|zV{LSbH1(UuF*y{=1lDDjK$^3k&UtXa15>Y>w?|yy+}6H$^LnRk#*G} zqC8W`PZpO84ByZ)>T#+dtu073iq1?JIz>=P#eh|$

    y~03&``zf2-P!2|s-}={@dYYSJ*^_ak)5 z)SUY%j;+~$-zC3EqJu&P8kdrYxvx(p)mU3ys=3`}DI=rJCvE=M8Ev z5BTn5CS{cS-lg|6DD_okH-<_I@k@e<$$yG;S2sA5p&B;=O34LQXX)n>qfGb|O|a|tf8%XNaI5u=(v1Jmc@kJ= z`pLFJLV&+#%A(i45~H?=?=w55W!}aU4~|%lnmc%X?-^{KZYX^qh%Ufgod~%|XdReS?PB+F ze<^D;J1K40#eT7>o%V!W$WfT1u%tI3vTK3gR%aF0)ls~*W>nW_lXzl!q6P6s<~q{o zUByn-b6;IloqS-F0U%_9bejHtK+*U_AP_6EqhdlZ=_U2}o-@hInvRrOgGwQrF0p|g z)a=}H;)a!D^45FJKAgo^efQ%USpUx7wp-yrM!|{sM^KdkzuN760XJo$*Ax%u{M8pC zBbHoOG{H|8m0gh_oXF~SoZX`zZD8d7wyCbqoI1yp_Su3aMVoNhZsBm--2CYw^mwY{ z1}{I(LTJjD2W@k?8HXrP%6=^q-^+~Gw2|K-yZOp$i;C;QxH$ER?~%o1gCa=Vaj5>d z&kVb&vHeZ5x0>!wL69jEaAc9|8KLFLhYT$}UudYVoywCgbul(1&`XY9vgLmoo>V$f zY_uv%ugT|i&t%^svrF)PyDqde?$^{!FT24`>BU1hN3TA!ucdsRUGqtm=+R@&sfO7o z%8RqY-;2#GiYtkI{NTnG8~!E|2bNjXybc}Gxxyu*0M!B zUzmJ8t#1&*M37}Rc(aBn49v^|)3D|ANo(LOtelGZIMJjwcfVbf`4}y|R`cwgw!bV_ z$)VXKj*oJO<0Pz83uX{}Io13O4bSYxr>-=x>lYc#D0R&afh23B_p|dCN{<3@zv>Wp z$%#b~+cDZCX9>xTUY9~WyI;1r5HLjuDJDryD z;$i!I;@`~F!xpthK z6JC-m<*owN#P_5eK@Cdo_d;fY{qH|o$J54tJ0t&8dsO9*TN$#3p6aYBuu^wRuC~v- zI#g=D{V`FZ_$Zr+?)Sa zuZyv4WPvvzx42I4^(D#Vt$ugzD&0b-9Pt-}Du4d<3QX!4gI*yw)}z#v=6w5#kE5j4 zmS5?qAC((lCuUGyT9%@V263PSJyu>mAGM-aGk=93)V+R1inog)9;DoaN2q^ofRjq= z_U)`)z2@I%K2AcX4v-T-e7|k4)w?NNJWDsY^2G~b-VYSiOWa5IXO2Smlnr+1;lt2!>9g@qU)-G|5kRY&Pha$8$Soa5vN zA0b<(rC(D& zp~}4gPx8l5V!Q12j8bytODdfv%%q7=}#;=f8EyZ!~3>UC|wibXQgnq+tgGF>_VN)JkP6~H z?@%*^wKva%FmomVnZIBK)bIu5GWf0&5Nv-Bt$J9uCBzSqQRyAJX7;%7e^Slbble=6 z6S8qZ1U|v1;x!V|bGULoeg4uT@bB;A?I|R&rVex5(ksx$cy4)ma)UgS`0X1Z_2M9o z770yDF_VH%k2#I2$G-s90w;h`b$G!;{5vT{ALP)`%WwMyC|F_L4+%Jd_$swrE~t+O z4>e{R8yqF6JRvFoq<;!}6iG2wPj2-)rkgaueR|V!bjh6c-yhLYFv3V-FQqkhx;CT_~imXj_OH$Dn9NWH;!`q?zSr>me#jJ9>^kda;~|9dc-dC+b^(mt`=7 zMj2AL3fk6}xi%P&YfP(r8hN70PI~{3jN9_?1ev~uBwqy8^T2dc-8qn`gyZP zul|tWoxOQq^Q0(cNb^#<(O+=!!&Sy7G44A3#9e|cxZ{fux(h(u?D{EXfV>^xX8c{nie zH)NMs&KqZfsMeldb4cbr*U5uh^uH>80{by-eu;?}W;F>C740bTjib2oMH6PgM@(fz zP!NUMv5<+IoTv1rgEZ}ydK6ffGbCtMclk1S=WsH6M_h)D0@>MHXUsM&<0h81>6~$} zB~1-?lzf0}CXfi4CP66Z7{F+0v78>H7T&7!0nwPiu|25iBu>xZDB1^`j_; z|0%`(-SsDtc>}`mlP>ws4BnouE^e(H!$9Fjf7^&S)-#B3lz%^J4FMZQ&E+`4nPcpA z`+>mwzReQC--L5)et}*7KIhZgxFJ6KT!^eR2r}1u?er@!m$) zNnhhy@m_mKtqY!Qa17pz76BSoV?ko~yyw(>g3D?KuvPsEb7ARdekr~k6>g^54FVNm z0boTLXf=Hbj8~bl;z(fsK-mUCG)7wK(#ok`*XYT@y3Q>0)dXXVY#hH(z=%uYJJR7zW%wz}>N4>3?%q44N49lBCH{=5&f-w% zSrK9htU`s#L!7T6+R7Awk^sG!E2Q<4tlD8X%jdVW2=?VI9DsX1=-inUSJp31g?(pQ zZtd$ZL%siJLF!pV$C=;b0SEn>kDKY=qc<=vac#yT+wbe8N58I#R(bH@S^om#|M;Qc zXLSgY(_%oLin)juG}#-XRQt2@cGq$?%Wh;fkuadmqqFa$EtI9Y_adcOcrO^9E`J5*Bt30Y<{Bw9BHm zzbouMh-RGq!1%(!yoM0Rymq?Lj&?z%Pak0|*1qIg=Q_WxN<$zUtg7 zaWx&jf9oAD>_mur5PI~rawEm1aV$4-7I@_}#bf)T>9QQD6ORvIC0u}X;G+QYhP!6h z-@sabcdG$Ja=Agr)=1r6KjKgtbp#Two!V(*fuNMHqe4Q_!~# zi!@hOxT-;ZcM_dSQ;GozUhL+b`fcN$V(6uK)wVA0W+zn0!s_rOkl68 zYql`VCX7?rrgJs5MAkZow`r=?YV^)Jb@XXuY>n|+Cr5z0+cWU{u`HhPu_tumUu zS`nobUe*1#gl6qpuUbOVV$ZZa>nP0fNarqs1KVDD}+4$i^eIw{VEd=Ra^rE=X{aN1j5JXDj0IRdZk>`In?> zEa@0Ed7MJZlL=`24Ym=J8yLUQ%CnSc3)x8=~bd(@8JY`baEA=i{LC zSE>8)c}P+&R9@YRiPc_#pZh!bC%X&XJ@~MxbN@97Ht;|eeA%!LU6cH@CuG9ZMN;g2 z4~-mzsZBuUS;~KSR$I?x4iDMm^UXfVgh;DC%MhQ%?tU-En&V@0TE{2tpuNvDtLj}< z8m}3}W5?Rq8>E5n;WQl)+|n-dEyI3{^_$o8z9X_BS#_S4!l`J5Q<{JF)KPX*GbY7& zrIvK?qLsxwG$JGj0Yea|tCNeJF8JYUF+8g1e#ug$S4$(duLC$2D0*avanvf8_7>$%Vl-y*fh-!@p*qazU8 zI%wkk>vql1oPj>&tE!WDa>e!OFRa&+k3Xhw$sYZ(KuT7lh5qtNf1+$l=&#c7D(*h& z+uL&{#{!Eijtl1gxvY{PA39Q?gXClw)l*mjfO*Yd@{D^BXG@D-Wer+G_5xF98p8K_ z`H^acOr?i;r_5*>>!&JKRd|?yVp!zpqk~{p?>pl#6s?^2_M_`tJQ@>yXF|l+2Kd!$ zRLnrO3jD)c6a!zPYzyV_J(KszVGbVMpFOW?A^b5%rXt5*?|j4lizGu^$m72PzcYrD zj#j@iO%83XkBn{2uP>x7E!elA=dl)_J1tqqYe!x9Szocc|jLY&z85th>O=j@9FTFK z3!x-l1o|8ap6$@z7$o)sAk=~X0!mwV24fuSfTHzwFUv0ba4(zBI6rP{iOPd^JbgX) z9dSg$k|$y!2z&aHxI4tCsD<@U0_=Ke!myic-))h$d7&dWyvv9n&T(OcJj zj@~D(f_0i-lfS$XdT7Z1xVAIrC4U91vpuc0^SS=Z{Xm+TUIF0&0{&lhtwqPZ4lGq4 zWC%%XEe#lI~7wo|C|HfOpfOlV^3NxQBiL zty^ccjgRb^^04Hcdv=4RwP6eH50`hQw~-JE-~{e1186S$3RKM@Mu_?WqlPSLnW1X8 zl%ay9YNlVyDl9y8I@ zyUIv<6)JA>ajh_4^2E55dkO6t`o)21eh6vwzfnS|QHhmvx|{dQ2I$N*pMFyB&eneH zy7rP&;j)B&$&N0X4N8pRx>>7kZ4GFdR%NO$8ma%iD2(!~zMGMs$vtBQNE35^o z6XfB5dFc5m;8nG93Nb{54*;~hz}oe{WFkcx=BleKHtRARDr21qqqUxmph}^XAYN@r*q_9=d3JqTE`qESHRY zsosP0Hh(tr(}H#zcH}OMOhQPy820l3mfh)wtYvWqDf|=pD0&?+{yOybs_z7XBkSx& z>&&$H{Cl{#bYQP|4x0mKi$NwT6J1|+;Do@hv}joOJfD-svlaK1*%Z=?PaLqyPO~SY=s5t zWQxyZA)>z=jqm`i{a_LkuJQO1`L3l2nMmDjLy{BN7@oxC8H%+edb&;AImKXZ4V7*Q zxRPmZQ7)HZ+Pb_%SXw)Jj<`%s2}ce?$Kms|xbbp;l8eF$Y%?3I9=l*VH7o@8u)iw`N}!$X;p5}+U&`! zo`n#i;b@agDt#qgoGxI_7R)z@eU|XaC``tHW^FOY9XbMah2{=(WDJ z<5z0D5On7VXrP}QZPJh}aiS}l>%>r+z*k!MfMx$9;n`^yw!wO3YtewME7#bYbf2Fu zr-Yg(iwE}Kz+~P49ZD_#dBm~}#lnbg z+y3;&IdG>5%0LW_9?3QP|M=i`6SD3sa=0#)_kjyeke#0dF+wjAhlY0*0}p-u0zg^| z|Btw80}3iMts966MgvNTq4BQK$p8ojJ>fWhVfu%%3m;)O@Juv79{z&|!TJ7wE~GPx z8fB;dE~Id(502a-fxBTU2@`m8Cnn7hltiC(;%9azadc=3PS6b!g~7}r!vKO)R>pCv zDs^CR+1lrbl_RDb^0FjaByeQ^+pyqlHjRI%rC>!xpNZL559E|8v1$UM&+}1n^Wc&L zqI)VqiUfKDuisEJZ0P-MmhV20WoT@eZ(RV*wio|9$uw7jlSfstu;8*%Gwqa2i-Z{m zEK;JkOwP9G%~en`|hb9trCS=S<&ArLZiYHYkg-&0{BO) z`suwI-+lAr?A?U#uGa2phkO_@!rA%P12rycPvg|kB_4bVe zezUZ$XOP5Vs6a9aui;?l6O4@PYOes*v_~(40>ddzr}X?aHO^E7n+*I~=MD>Kd(+V1 z&t+RqG;Cfz@6UZRQV4Se@8S5$ce(#*p~&mqwb>o_4}EW%pD%d(QwnhMa0i5R#T8zY z(LfScdkR5w?C~%BfQC6*!sjtan2}(MHoc$0$&!RP!T}+a3^sHw5!=iHHTHx7=LD83 z=3@rXF(cd-Y)SrJlIFlbY;9F?DK*2>67ga~&5oRJZ4zvW(%54XShX$+w!)6XP@kNL zUl>N|1=ytpMH(+&R8uL(E$}OUC5h{T@deKu`86{tT{O#?&G0(L+2@DGzGJ4`Sm=T? z-^2auNKlzxaXE6yM_$Ntj#bv?@ap>8CBGa{9@)9{^(|_N`_cw*VyM44(k@H_rd?N^ zcCZ|Id3)akXgy=xwOqnK87JvBofwg>OK}gZmvFSIzi@Nh5O?#jynx#99*tf2d8CWt z0?3#^IRFR!k_Vwg36c=o@OMD4sy(6ML7U~Hig^ou!W+!%829~Fq&@l>{nl5^Ze4$I z;l)!p2=Woiv9OZ4Cwz2cCTigK1SMk-oPHi7@%^Dd|IbjSpu2w%mJ9~f65&Zkc0ODW ztGd?ioA)m0xKah)eHS=7h~fuq5j@J zkDjQM534P`>Mec!AU$Q<9;RnLTw6WLlsISd1cRax7}%UW;6z4^?>FYsqjjN#+RzgK zvfD2B63ILq-02Sh?paz&^dBW{=)=gIy$W`X#4zmTmwmh^d+bqE+kbUfTeuCkDs-4eTli?p=|<&Y zx{`0dxv-lI3DJ_o2DbT{zx+l*K@ptrURmJ%LkS|nFJX(e+ql2i*&shc_1n|`$PulT z!d^Z&Bb8f-;IR#8<8V&WTSo7rFpyhd{AAjDGaRF#bo*x9tvI2H%BztLkJ>D`F(gg% zkbIG>nmCz}q>{M(Id8g&vvLV$wF_P%SIyM3tN2huu}r?L2T64(JNIUb97ORW6h#l7 z5g@8&hyug)+CU08YUIhL_m^*;%1Y2n1oN<#)fMv{;y1xvx{MYUO-DKx0fgOemDv1% z(PRxcR-N3?iW0r zmGS$|N=W5EV4MX(H;YlY4`p~xi6Z%Ah{$H#4Fo#Ui1G~3>ZNpOZwd$y48iSHl(&%@ z6OF7f@7eqa0?8KemtUJJg#R(Z#e0iHd`|s{W`Xlug1o7MWgtptM+zb$n`65f!KK3I zd2knc4anLG60W0fWyJ{c#gTeTlyw~8HAJ%rDxJSn&ff`SN4_U?vZCmo0fyX8;sX@L z_8^@RZ9w#0IhY(MnRC$NyoMK;Aj8vM59V^M?=zKlT%5pfwO^)N-Je%Ue^~;E>tH`< zf)bq?zV%g$=G*c+aVpX?vipnq0A=^F5K+zzl;7ReMgW53SXOj1_vluezZmF?CH!x! zzt=E_#_+V2e|E_i^VKz=7ndCi&EQT9pk%S1 zeA8e5Ad8$Q>-DLx!s9=0`j7IlIJv^-c;M3CzF;pCb8TOrUL&p=)|I7SUf3m*aH}3_ zGc2e2*0RosUE)8x-}^C5TIz#C@XQT#{rJB)@(}0}8~F8e+gA*D3eobIMPJ}s5pSlcOEAO;5341Vbf5kkai$xI6zodflDOxnJb$Gzii|xHB?@dk zEw`mVn=7cU26&wpNA5E4Tp0qd9zHv-=^pNCD^?9L-v`cCZG3L?olmi?GKDa4k9lw7 z5<|*>{qV^D{^Bz$3uPy0OO8c8LCzbpI$B~&k1qtw@S?%AysUr1%hG#K`9r8X zN(i4DfH5~*3rT_YnLA!9bSUyEyXA zwxU9fts;cK|IzkV&UNh;8Q(hN+E?*VrVYA)@`b8wr8Dpjn=uSHG^Y&|G2BB)nwgKFEJjWijM(vW~p1JP`5oO0r($7m}5$c=y9H_%wOB^z(IQ%VPv(p zRj3+6qdMoP|fz0*@UyJK2hG0Dbj^=$|`}8fs$@Wm=zU_AdYNy4I zN^%m{`;2f{h-LYW@R%nrlovzOLt7eOb3HC2G zBDORZhlqdex0Qi@zC%wZACC(eq?h*zptd+kQ!q)d^?GH}Z5k zWjiueX+^1xL8aAc(npUkpc)KHUNjdiUwCZ%;06`>eo)uE9ge=rP9_ zaQ(gayr|5}S)Crd*K5ix0V{v*PANv`5EC(CLR<~~p;BuZWntObW18>qp?M6+At>$^ z-2;>X#Xi`MLjMIYT7j+1fO1r>@wL-0d)+AX(a=C4x8H0JNuwsFFbRo?58vHzzQAsJ zPMeBBYY(aMEKE}VojzMMzG^=NyvWg2b%>(!vR)EDLp!^rWy#*p5v2A9dW%jzqNX13X z+v*w1WRAMo};8w4t|JDo~pd_s3+7z+u>vw}fR!f)Ak>831#@{Q8oH5Kdq} zN(ueP2w=8+yi}@DegWL%!#uIB@^G4|972a z>=^(7hZ(9e#xP)OXY%{Zd!(kfp2u9tYgvya^INV1%N5nWnq*?45RffH-LNp*Mot2p z@Yzg{^R7ZO)u!D;Jsnh)H^76|-|G{{o>IB;B zDWEN47{K@20_o40#}II0CqG@o(gJMIo(v0>8Bwt)2?c}wjQ6h_-K4V@-o5(>ZnXgS zH&A`OV!Z1J9Rti~E0WO+8|{CewuZez<5aT5ra0xivyv3X*H49lj5&t?Wz21>e)<0} z=H3m^X}x(FMx{LZlg}g09$RLL%7lm#tMM-QAwu$Vd9dluS&a#?=cE2JzFQNd^n3D) zd?wD+zHu?5-y?%|JpXr%DLs4nOV5zIH>c;8u8GE(nJwx2!Y}AYOBDVeBMHN|pX`Y< zh`4`k$hm_+)Y~lpoEV3Jkrj}h){G!&v1fQYgX6r^qew8Lp4sz^tnqfrZlV{f4uJ&k zYsLi6=k(I$xlqWXg}APB7Z1BajHX>goG{pwssYycQjNiYeDHR0Ak`l=653X6pPK3YB}OzPq>q z>aQwZ9Q|Z>jXBJ^2|WBM#HDi-2cgC>ky~!g6almj#cj=y#T^${H7DpsAJ0s*T!jv6 zq`|z7FX-CXiz;pPyFB83S*O0Kq}YE8#CJnmdp)n!?ucEPAci>$c z;cM070H-ZeV?3G?j-oys$}{x|DX8p@iet}i0OBgf0gql?l!<{D|12gg2fwC<<&3Iq zW1FoYMTI9Pi!G`{g|KseqzJ&{IFLK7X3`7c*3KvE4nQKl!>uL|k1lV2P=`I`YTa|S zshen@PSoEf$7~?42Il_jnZ>xOWQC(h)>tAVktP8sC6GN{tDt`iOSLDg>5_j~--Ri= zpVog5yG56W{()0@5^y)ZZ_>>#Ed<_fmbdcYN&#pis9&nzX<4ZJSjrt{94K`pj5U7` zeVjj!Y&cFLE4p;_x|jU850#0x8~>26UGmohBC>DKh3F`%!nn=kGxG(~(0xxYLD2@R z86RU*IQILaLMn=(Z^;u`-^uMd6r@S|JtRcwRrc#>N$a=I5Xr}7l1`5jO?0X|*3Nnm zFMZHG*}T2m&;!0xVTzU2@9aTm|C%gqx^hNtv6a>94BS8dYM>wRLBZw4+rH1{DfzLt zl;wl|vS%-d*Uw4WBNmaa-L`GQu(Y3kNJ$mJg=vtA>+_2W0?8~D>KaZPRZ*R^;_l4auy}TiO+fH9TUxU7x z0K1_WYWAfA@#VFmxE+P@-S3Xq@rxNFk$n%i+-KQUK0Z}ETH6M{;s%hm@Yh1*F0|#B z{!-%P<8L?)BAs!$eWTpFJ0M*i>Pw@4p0>m3GRoWj z&tJ@Z_n|JD?n2?Zh@RPYVMOpFKYX!0k6z6bbaQnnO9jtQqwY)E;w$e$m%$djBn&HWDWsEMn zDeTLZZxqmv`E%xAdPTzL$rJdwW8xz(4|jlgYIu~7^%8L~`*-8Av_uHA6NG8YAPn?@ zPOt5G9*v(-V3cNh&2C0IlB{f_DZ8Pvhqrjd-k@IjqpsNL07h0FU>F5jl7{ncSn%u0S^REV4%B< zad#82vg(jS*ZoGSo6z+@g=5cEgwkp~wlMt56XG(^*0ANDj8n`+NQ72iCtamA;+h(p z^eZH5KC`n3r%wK}`|3WBU7$#LxUhe=WuGE&ik0pEzh&^vJQ$F#j4TtJh38OGaukeq z;|Ah!1x;1{0F*zV?_eUtXl{u2eWDp}YluBj3O)Vox0N9+!)T(WHkLHL6p#+m`93I0 z!INaqOVv2s@M0Ia62U76fLWk#LVmIbnUZR}TYSu_)e81_b37IfD03QsNX+aafI=at z7%&Yz--Gm9b`106MAgW)z6o3v&fR_fR`|NyNCj=jC=o%*hC^#ZRE6RDhFPwJu8i6* zx1{}^i&K7)@Uw~+c8b4vU%o(NlPkp>+v9bq$Zo)BF2*C&k!;n{v$=X4Chdl z{@d)pC_vVQv>pMj>{bB%Y(?UkQnybF%Q>)SQqFJ`juS{q4^4FE_% z0_TX58^VOV{;sz(A2b@9t<2MCD6w?8#6_J;RV4Z~^AEzi+$H<0w+7*t8L>MrV^BhW zbf$LvF7wY1_KYu_RqOzhMPNCj?LT&7SJgwm-RZXOKo>`Il&`C|W~(mJ-xG7Ky^Vp9 z&1Ds~Sv^F&h)x0ndIDNIf#;QXXY0qPmn`&PKvv%I=J#r|!~*7vS8@)75)0NRh|m2l zK|x^zm?SL7RvFcISfV3^1!#0h+IJRQ|Gcqbwes}ww-%cHQ_{FgTA8zVnXYTkR3I^=&jD)?_(M#$}jM-E& zs^M7PxY}vVfX=~@6Z4N~MO5v77uCj~<7|wHb}0Wz_9-UV-6Ez8L8FQ1t1{`P zpZx0osiEk(F@oRZ@ZnM~2yLxDl)YuqS%DMRWzPLNTM3@bFu8ldeVer)L*rDt*NoU+ zIDL3j>36h*{|x{5ogV7r>YWjHc5ZP&;x!o#P(06(UV@%`3RpmXoWe92>aIaPE!f5i zlFsO1veW1+twe8LnV*2$_4(o5v%`ue zF+qT)J+a!I^EpU1E9+-M%1?Ae=E4Yd#d45gP&;k05OKGG4A9|*#QOE>|Le%^P~O;> zkYi;02Wgj*jB*N_?^R}}!++;AtuXF{8Ib*dPth7U!~adu3jaS-wDtS`i;bPWb&h69 zDSvTR(ha-W|6_>mOk_`#KxM-p+hQOFIWYLv0C@#C)NdU`&E2&PX@C##4D5(;h4kLc zy(Kd}yljcvkfu^&H_7QyGx9v(<@=F;q0(C9^)MQB@-E_t3mxPE)9|2!lmn2v0RBJ; zkH8)9I-z1wd){>K)iquX(dzfKB*F;VwqHu8C>|ccE{B+(CY+N3qL87To2{4Ng$>==R+%& z1h^s=0jxt5*uc^ays+vZ7n~8c$(cJ`!wine9eeTM_OUs8ZU0n#<)bgyFC1f&y>nWW ze(f+e!w)5Znos5GLc@9##xg%DIw$E#oMQU6n*4kRS0RfF7eiM$D)~TO$RMBu*~sQ> z834c*>FYR$S=yP~;=Bz9Ub~&X39x|d8AHw|^}k9#z}hSF;m^^b%vrzoU~Yk!~xe^L#$trJbIx{PdV-YejYq}77aI~|hdkwy@0^&kP( zfi>r-^g{AelwJx}tRua*8_w!E(*94Bd^0bSg-fbazOLfOK~^%*^o|c%Ikw-tYbC`Sh&i8d)qBOK@h6Rgu z$Vn$zSQ^(^z6s!^Irw}ZBnN~@?v2;&g$CaJw7#AE5PGq{{f?LhXw#wM1-_O6;R&uV zNCYUZd>4AY3V`%`iCt*^-gIQJUH3HHgN7{drTbeJ)p^WEzDoH%@#qk?-^dN^O8#?O zQ(VGQ->b--w3<^?*`m;-y1Vvy>ujw|Z6iLU<8bCSn5OygMBCmzHB@EjiDvEy%#ZuS z1nVyjHIj*?p>|UK3+8GXZjyj3(Mj%=|1od#DTi50-g=pbYVJF4i(bWqh&8!VIaeDe z5IWDOeu8c{arPn{zC~hop-U%iL9!}=d++o!y-P53<9c*u)VXqWdkt$5hl_ezfn1Ni zt#6Hak6E&#fJktA%W)4w0hJ_6pYd_EeO(6Atb5riFb{jn&HJ31gW7lHY?+$!=ECH(Sk~-v?a|byHhcUhgTPcxcEa%(JR`Pw6AL&;EQaDE`FPz_9 zq9}1A%G;sRP5}mpGMb-{#3$Tr?{muJLyZ3MA+YbnS9M1=7Xy6TgWM^T ziGoFynse}OC`1sw;kUO8)ACm0QUCE z|An}G!pF@)`pWcP-8WuFSe?Pv_$R?&P)dR^hfL(T%o^tTgy;7rdJiPz^D;~`eJG^W z7t&yxi&a!cS0rz|x&rmJ`+Viy*f@%@1YmM@lOZ@xKe!eEJaN1T4sM#?j*CM$6?!5^ zCD`9>2DloDrDf5$>aUUBzLLFe(hH89jub|1?;+Kd=BjrOctcI9NRgR@+}3sKnrRu# zI{C)3*d{w4M$dfoUemwRQ_&o*QgeZd^GtXXU2y&E#h(e1ud>z}&-*98k&k9#pvXmO zquI7)Vf8LjopvGjihpreGz5`|ExHB%?*Ip)YJ_nzsKW)NJRactFW&YLH{c4}OX5as zx&XLf>}|~>VJYKH+cKSC$p;Kc9j*jn3=3nVb911>s zA!VfOLOG%9y#1oeR>+LDHoaplWH{f65vSibY@NzLLwUu>7~iyGk^0*PD?lqjvefeI z>)U0gJNzUBT~c{o#SP2pk1zoj|C3= z9p^3T_)dX#0{bqgw<4+U5fzy&De20x;ty{qta#>W08U~rhx zmUA99PB}V1^kDldF8}yz*V$*bruM8?EN^{P(99!EZNtBNb{FBRzFRs|X0g2+MewbO z^m2kD|BW`q0guIzPg4IiqOUX%+K!uaJ7kpO2%gI1SUAKu-3?*XNx+X|~0Vi%k2l4RO-n zuxP?Pg1TnPz3@JSr1ta;ykFPmBk4gVP_rJQ#?BL1&7<~0SE+){ z=V6kxf@`X$H^rN%Yy96W8Rbm<9B-!G$)bCe&q(BBr#`WF)cbk{baGl{W-ezRj>Ps= zLgZ8NA1ewQQf>5WqC?3Xsjbe=_^%m>ea}Me>Fe} zM*ZRzzHbht{`vZOQVaM5DfuO)bj*L4w5*ENQq(jsIwvT69dXExaz*cfFWh`ZHSe`A zIDfoIx_z*!x+*xoc}VfxP8)}iYqRA`#wuL|2R`ZU`fgdvJF}Cl>KlP4uP!a058ky= z)t$8Az$lXeOYjw!1qRXlkvHjjOfQZI2=G?MhZ>3R>zuNOFUNKkQCS`$v>`LGJP%LW zl;Fl|(1zPl*FphAGWrZ5vZJr@F<&T5km@u!#g-@(wh`AxI}5H{m;Q_1SoHPQF1~xR zkhkUmO49>Txd09<(tC9EXSnuI2%7C$`;$ zo#zR_HFFG2lz%qSFrEdizOwtQ_T05C2KPtXmZA zygDsS=-WQHW6n#FQadXH4Bu&Qh9yxB_e%OkbdS<6_NC1kYho;h_4=7rn|`$!K{+P^LD@E?2_*SdRIH zlV7&nu10=e7eqQ4lc#VA}EfiVJ{Z>ltWYlZ0`A zMGGf(&0@pKI);3A{1A(aiR`18@-o96X~kM)O@QyI4$3|43~TZN=$zRA)#tny0M%@T z(^7E^5>3Uv@la6>#@ZnKBNj#=m7MnBXhb;KGE~zzCC7%59Kn4_91W~A!3f2FAp?PN z(Qx~lAF{r6W4GVReT|4v`aAMGmnj2xO^JW5A=weH1(uqN%v-w^FO<5X7Q@xe@@sML zg6(YRkd@bg4vr?W^2|f1AAmWIfYOS9Rma^`U}dPcl@v*L2oi7;^1J;7$OT5Ah%4TRa%=MqqX~(<7T}KKu6-HxVdy*{W`~%Oxm`Te!YNX=-Qm; zldz58VDyG{EMGxhATS9Q;IZVp&R`lSdW8wOhQ%)_u5BUEuh@F!+zvihkDg%9GtV;? zS(Wh!adc~396f$c_z|v63u2*UMh?&0&?mZp4UqPp+J=5*x%py&?u0hCj<+1)G^?`p zfmd4*Q+Xc_t^Lh)h|Ji4P~^nRtwkt0ZSO+CiP;^XCxk(8fns-x`}T4?W=`GRZ2+f+ zsVK}2Rd>ZQv6!1#vw?bHERK5kHDc{6vYU*wN%uSd;86LK$3$p1#>_TU4KYW|f=qdFP-q6#e2( z2?h6VJHF5@u2!{q>vD?s;X};1=z5(r0w|J?xD=mfJF!@PA=+jt5(B>rwu{?(j6wfC zwu`$XN(Lu-qZI@Lr)C}2k5v;SY>K9by>0(8n?_e2nQ6>It!O_`4`l(qpNg9z=^w+0 za&X3`SMi++ne_=lA{zM{OX9l7$JvpN>7SG?Mb7;Lcp9u3#shd(=mY|In!1FSovoBz zXRr*Elxk=74K8JP1`dtBJWUqzO2C(G@L@}!^UwcrE5_E$qEN;5vzJZW8jY8kRh(M7 zY5?;GtN1SOtWbvoY1ms_S0*IeEb9cb0pZy9DJV~-CrdVkI5eHk4EuuAdr+~+Z?ZqD z_bdc+Ioa>icgF%e%XDBEd#+^Qbbo_bs}~w~4S5He2n`YCbQ7__#$d=-vIXXtBRG1?NycA?OSoh#8^V?ow%sb9OWev+W#!YhP*Ja)@ zYw(klP36k1UrG2BUDDh&^;J*HUsv&rf+3Qvak*QyZLc&EkI>r_xT{>tD?HKBAJ5;}@K6<$8 zfTnp1VLk)Q+m3;}nfu_yKb|M80Dm?#p-Z}3c$^13U<+?JrR%?+nEz2it#c8U;G5TuF2|P8>xhT{jOss z)(OoGrBgPbYWSBKzxqc^9f?nkXrFqKH|`P1Tbaa%{7u&fx8PP*u3v4j823_?)9j>e zFt|yj+B<%9vWX#bXKlZhfhuYj(Wizo2#}hsi{JAOj@ytx&M#FI<3awkmGnEIl5c*= zX9=C9gCLBejNzMFg~+6V|GVNIm$F2&vUpRe_r5p(@FJUKou^9Q%B+OY8x7ZU&jLh5I9T;^yQxb*d~BE4uFYC?1}(~%F)1DRGR>z zeBhcgJZ|Jvd({Vuc|VcBDn;;z zt9It=r1(f(hIrOK53Hay-PeIb0Ul7^L@K?fXYBc=T~N($E%3$AQx|(@SBs_MgJbY~ zeB+rlU1|8$p-&d3NYt$fGe%6_y9{~YqlwW>sDR0$RHu^5&^KLsbsEA?t&cEAx8qyWp&Q-i3Ptl9?>soblS%;V8fgrfKp!klt+be#d;72{B?6eIHI7T5P2BWeu z=>VuCpM^Hq;(3yk@Xx!TN}FKHt}=a-k?!z9tv=5BWOb*TieXV>ws*2|R2m25>QkOV z>%oN!;v)SWFycy{IevW8hW#CIN&zD0Z$L3v2G9+5BOf?2_LOg62XVtaQ^9oJ-59)k zvLh_4iJgA3b?an}A&Wf`U^mGMKg=+kfq=pw(52(bmU;Hjy)}d9aPkV+x5qd?ZNq^x z1c3uT5UYJ_%mWq>{4h7@B`2O>;A~qqU~169Tj)YI@;X7MPQn}o#;F>4*8=%TxkM5? z9m)ZD&UAysfyevF@$8#ApW#1h9g};t&cWHwGB3*x+7{c@9;id2S5!8DybCa9odP$? z`>WvXr#(!Vg+_=A*zSR0*OTiS^RYdA{%#Cku7EJf>f?!5anA5BF3T6_-QmE9+l8*) z_036m_#8v~4dM9Tk~f2R{sVyLbF>UH(y*5AYpRINZtZ+V|DRn*mdyn_*0=9j&n_Jq z7jO;5iWZ~XyTKj_xG?Kl|8E^RwBesR@~4K}qC2-OQALMHkxM9oRu~J~uK=eE3JB_j zc9$U{=_YCbFtsHOo!2^kMuw69HE20g@q4ver8Z_u60!qo?}TXDs!}qzV)TJ>P23$> z#7aK#onxH_s5nd~q<|dO0buR^R$&781hxWZrz>ZQX+MAW(R`}8@Z8A=cV$a3|D#c! z+UtqE$0y1~lRY!*qp^Kk96JL$q1?G#zn8Z-fn~y1=UjzuFZ_DilQmMgU;Q4MJN!g+ ze-9kTLpk|8tA_-?Qpc=LXaCV>g`k7d6mVU}lOl&i#92eZqDw1=6tDDyHi7ALS3eKt z^P!xCmtMulRU~5j`9W^Est<-)aa-z1KG=nqfh)Hulji~T+;pwE@2_NY zzYmj-|6&x)H=LrJ6`JY9GV1TdI&i3G zktHTE|THiPTH@3Q5r)a&RZvT>(GERq29UW1?6Uj0vb~UdvdTLy`x)n z%jwJ^T21tQXm6NH`;_tbPAN&33jT+3PT8NdHizEsF6)LK>xKG$D=Wnhkh-3Jo~NJ1 zf#rLnbm&~w$-g=;#Xi1qquhe#f0@Vm<$lzJZ3Ec0nTbYOpGhD6{Ha;~5X93e1UWd+ zbkqUl?O8fK1;w^f@XUpdT4a^G*zacyF^Sz9;32~2t{i+pTgx(YO8qJ`&3?}*!O2DR zY*|}jL}+hAPke6Q_QM_13Y2~$@q;FJ+P^v7twYg&a=J?!crY!_vj3UW6%AA)HU}bC z=@p9GkdGlmhCq+n-!xej09s+FZk7^w1pO|Yq;3|Nd_+8TySspl(hm*%SSTgk0NW_^ zD?qu0cRyS;gc-NmiV&G#qTpd5OSLpg`E14c5q8Ca!mN9P8cT~I2nMl3YX0GJy%#lBAl^!FZrY;VrPq9&QRyuPu>@R{Fz`b~CF^$yd4)d*RWj6zTOg{ic&?E*f zdmVt$r05mX<(%xMP!ep%i`XF>N}T0wbTED{kTwdoa0aN`LG74n^CVDdybUn3w(mf3 z!3a7_T(pc0#M^TMDJQRlk27q*k9 z>*tpU)-jJBI~Q%>R*EYZt63jnLOy#X;p?OsrR#$Bw{KH$N4VnjZlmru zx7o?foZsj5)4X7-V>R)5$tfHlxnI^jJp<%GW*TL7(P;Jt`q)QA;2@;E+|S`JGA+mC z%Ea4^FjPx+{vMU)Q*9FSKS60)?pH)^g3S=KQE2g9-;NuL?@|cpDVOH7S^{#>L}8TT z`ECf7x8_S%Eej-KSBb+`T6Kg8^4NcljS+~IYqec5`?0yT!0E++#*DbsK#X-AC*z-d zBiK9)xq4Xs`dEDSOix#D^xOnuT(Cm`VNL{hZ@=ZSvCsYf<8}?u4M(2txqH^hZpPQ1 z^ZSzDe_@)dYh1k1FLWXFPG;rD{1Po|a|Pk;0&)+j$hml(lL#xg@#4L>(v4r&_j1E$ zMFwk4Bq5pzH=^5-du8N)+bbcARtzv?n6BW58EVp2QDblRsA6HjmA_)9_uzd^XkL(+ z6(MG=5`Cc_-iW)PfwraSgljte zm;IEg=6j^p)CBX&4<~_rIB8iA|4u@$H;IN5+|31PHhM64?q z7X(E1F8I`@P&>WfOD~8FeKm75LFzC9ikQP+wq$ePg8CEeP}iyArX(mXYde%s&H9VO zQ*SumKIWHRD4qtGQ^g0XS|jn%{!YvKNO1~$uf~zK6Z9BIpQl{B-v~lhd$KD#Mpv%A zK|ewNe)xt=I`G%MaApKg>BCOQ0@G8zEE|ZeVW>O5<)hg)0sojsqQ(;$SGQaebqb(G0I zJ?mTXPTm)R0EPncxZH^A3KHCdzcm;mcGjyW!SeKP_>?6M&RkkY)^F@RP-EZM`KZt4 zEb?0LAD#XKzuEDad#$gkA?l*td?D|UDslXO2(1AIC_Rr9#2cSwe zL;~0g@x^{|2BJFSKaXEDQ5#0n7`)Tqbm6Q^ZKmlQ04c%Ti*%;geLsrR-)_9B@ZhrY zfoYrig5bX<18>#9e-=QoV01Qf5a5u8tI>{KGsJ_3qi1@zS$s4KNp16y+>uvwNx*jI z?P*A*+b1lVh&5;t%lE2Hx8!!F_iv9+u|H?ATo{nPYjKdBIe7nf353nB*H~xXgssoM z76HA4KInU_(6I&6*#=11*7zfemew@F@|@ziFP@V`ol08Wa8qyGa&bW~`jm-ccRzE6 zD>}i&33_D=;Ck0}0gUHeMe=0wrt|zOBGhl%FmFQY-agc0lqj1;-RJO?OidS$N!Y!5 zOuhA|>g|Ib7HCirB-WAY?qjU!hc^07+N`4Km+RpprvQbi;x+Gy zRZl9Zm}kH6sJB#$AzK)QH;0cSz%7s|5$3Q9>q`BNy`zM}MYcv1tZ0jvIyRN>Q5G&F z>$Qisw?^EFGFd164+DZ|*2D_0H%xa6YByafrGaRKYg&MUI{(Pu@!Cf(m!(V7j|Ahn~H~oXi8=HmRC$re9 z9tXh-j)$;-78@`T6s)i(pHdHlPCci}V-=u&3m;aUYmn0V1rfG1t{yyX2@#Nz;C_OmE zqub2Dh~x`>S{UIuhu&0YgXa4JK2>Y1;+d>_5BKdDor}P{-mS4(*0AfK{0guk5B$#gN!v=O+w~@@bjNHu0nsFIeT#_l9*jW&oelAPK>&}us{`J30s95KZL}9E4rBOBbsOkiPBQ-C0BuADn#q3A;Ue#&`VXY5 z`8o)?v_Hl)i3+-Qm^|v`Y6lVUDtafzZ{X9MPWLxOozVOKWv+1}(-MDAd3%!8$eoFj zP;@{g4&&1oPAYh^(lkMb{CvL#LVF+I>3Wus27U>Dse`6Rozj9A`1~3DY!!ovUa;+N zt~KS>VG}BW=77x5Ft*9On$r1rN=>P1u0oQPptk<_X2N%vUPc%1fuY6HJMHE}%<}c2 zM^HSS8w`pJ&LovjrC*Km`S;UKheAzDl1m1?Ih~WZLbHFUJpRR?3qH}fGd@xijn0;2 zP7OT;2Z$lbR(YtYSJ+{}DnkL`Nh)0}Ifpjn`gIf8QxRkji9$`y-(75N%Qu8xM4q4W z&7=5+ZirlGDUgu|btHi}i+z^e#!n8x+kxUgrTK zGq@oAE6Sfl>nBAyimus#QuJ0w#VoP5R_-mrd(9p>VLC@H(pljbFUiepGsoYxU|!-2 zy=`If;8&^6PO{unr=QH-N6L4@_=Oe5%Ugk&WG_qjlAUU`Dx%zJpL3I?PU?7wv7hxO~0fQ{{fa#*rYp~0L zrwilYRgKTiS1TWEJE_$dz^0KOpQ)01@~92PfVKXWp;)DVyKDzrd8JuL(=^AUm~)e+ z`-yvp>8(pJgoQ0av^V;7_V(lrIC_UV?2<|f%7>>3b7fXn+XugWv!r@9gtS>^w;vQM zQOKC+Q`{!3W{UtkZy>e4Sz;A~2!Kq5c$aaL!27%&R#-qV1G01(6HI8&e`fI@b+ zDpa)<XKxX$ugft9NtXZanKp#cErIH_z;OxSy9!xh5@0m22HxW_$kM88sOu(`Lzj zf1}Sa%5#qLRNb8(w>huTL#Vmx+a^q%5jm#~F!X3*mhY<}f{V-c+$OW_W#&mu0XnO_*STHu#RwN_-t!*7Kg%jKDGAii{R5O)jzB`S>{|} z0SM|abE85e1BV~r{rSV#FXV9&7cH+ge!pGI*7KY%BXd8SqT&=Cr$W8=YqC+JcK3O? zeuSI4AvV$s)!x3AZ0$Ig`#qMJBBo~1Cs7%H`j(=9EsavmL|nq(^!R=0(POTKyz?J@ zoaV84R+4(ro;5M&e{kXq@uCc#$i8xa{KvY_Tr)y(Co-g~Ik8K5cICP5TG%d!VAt*U zh%@~U7nh$G-b4YG)!w%WzC91QBw-C!`-CVtozO585}>jW@ROwluUq&NrvTPF^M3AZ zCSEAssO8k*iB8f{O-KQ7KrN z@7#oGZoy34{@v@!*3#e5cgIjTD>kUq2GPk`AmG>t1?g}bpBcCe7*2uRj2HiS{f5m; z5=GSC7&L9`l z{qulTXUPoE^h9ZD@!0u$JJ-$)GOdn0gZl1tAp0}nzp&8PL!kpFHzR?DEh>{O z&lEqN|4|MC>U@1)bXgP|BkXGH)s}$jp?GyGU@b@t2nG|1_Mwf-z(WvL>FoE;N=wJ( zOJ^)6!3l{}HcedkY>PkhTfgvK{*5>JEM-Fn8Yw;VZn2DmCcj^rY<4DmR5NM3J@;Q^ zv^|nvIBXvuVg~~sYQluGTs@m*09fir{Uw{A)2Wi__bqv6O|Epk@`~X`Z?jc&yyl5o4bEIwq80P4{%oTU)sX!pru0co&_Da(i?CF zi&e4zR^I{2#d<@HM9lDO)M!|0`rMq5#`@_# zQTCy?2<*&IHzV5~x!g$Cw_M#M>b{rSfh=M#yga#j&tFh^{j}H3c+-aYqWR5Y8+icP@^BRly$8xo)I1+E8ZQG{ zDrY5zD0j3C+9ekLrN$?&Ho2VH&6YkX+(l3(o*1(swD+6t2>@=0=>&Xk4KoU^KrZj@ z9-iYu`Z(TJ%%}Aw>Dj0crS;xD*i`fv$8Qg$A>wS0{BAecf!dUsuDWPg05w4&DM@E@ z5N=bA*o>a~Q?RJv#eab{k6+D_35TXGTFASesHQaOuVv;+3Ez+Z9&Z5H>f8HZqp%vl znWgGnt)BU1YA1LGJv(b~dn)8E{&VpThq~Xte$6?INnAxh!%X;W;~=?4L_0$6%LnleTvy;CT4jM+(~HE$T|$pQUMU%Th$qG z{LUQ9O`Y_ODJ7svXJNyLwz9u^HR;x8xpN}=pkte4f%d>(2R7Q5R?-}-@{b#pxv{u_ zSbYaTQVasx`hhN)fWI%C>nxDk`V`aPOpFnU4R4d{;T=I0mT@GD=p*@OD@qz~rKNxN zt#_3tCMHgvEq!?Ad~&76F!anD!EPk@v*R#kK(U^C9jw_SpsDtehG{ZTjAeiWjFgV3 zA%pP2dw&0iw_V^&;G1k~1^Cys^A{RximFk`igZY!e&%4|SxgZ%=Nxo!&+@=ltP>l8 zBAx}I*B801e+_DU;cd_NW*$X8jb$%p**?U5wF)uo1W51XVGgw7;86n{d?U?Lol^Pj zrK)be_H7K1?I57WOm0DaD*09RjYR^3h1f`WGG!mfeJUria88V+AR{=?je~8;h=RbL zA=BG3h4vuP{hF_Ca#*OYtM`VFKz0z=3e7pXzpD2@bKZ4z-jr8VL&@JWux*C^y0#$? zG-n`v9b4(Kr^!gK;%SQ>U6!?_fSf^0#+CmTBrV!zh#oi;H@yr({e&3JVWP5>7Iz-<66yI!?-x_0G&j90EkP8?O z_L%W(N5c=sW~v#Auwl>kl^T#KM#p~im_)AmA{s3u@+WJwiS6Sk9{wMLc4D13HvWLoByP;u)MRLQ_e)aLCl$3WvtQ^C}JL#Yr9r|^o}EP&~Lm0;?x zuDz*c`t@lk$fyfn;jS^fy+slH=tvUdir(u!O^7z{1!f437Zug1YSIj3rsm;7Y8;8N z(?@`Zr=V3wP7-d;Ck;*Sww^lmpKneSdOn0Y{isG(Ah_5MCejC-ZY6yNIVyW^x-n?) zv7DclT@iejq)QoK+Rq5C7-Nfwop?~)eD+TIxG_52z zrKL45A@)50#WjO6)!zooE+hdo{LRM3mjN6UwZu)nt8~8TGY2fzjA$qwtD$o92ayjr zqwST*#IOjK&wF#Qn2>K1OB>1DK9fr_$dp@h4K^<$ z_Ab@l)N)?5R6gU{7na<5_KnDfuwfRnAM?gSKuZ@ky0N9|G*TSn-rtQL_XHsx1IFZO z8}IvAp}@n>(H%@3Wb%wwNZik)(`GHK*Re{KXzUiK$Ozf&6fUb7p7Y^EU@VsDGHAvX zgQK%E76xRslq+kRm;`r*5haH7y+Y82bFzrw3H$Vfz4TQRY5JNujk_aKR z)<&VgjCA9|crk5lQ%kuvy z3f(Bs&38L?q&Qo5gMoKgRGJklS9~UZoa`zAb z*Gs#+arx-LjHmPiI2*WyzCk!3n&v;lYKLFkbvs+csA%7?}vVwnh9e)sed`nc@W zV{4d&mE6N{Pe2pWxT2P#A&^Gk@m4oFhkE0kReKfy?!_bm=op&19)Z3)w zr5^jvKaia-8{Sk?lBhld=0sl}x#)8$da=DhXqzwr+yvfY05`x;2UN@WoVKAP10TQK z7=xY}+5%qO{dboB<4erHW|i(I?sR5GRFdk+bRa_g5(1f~v|eOIs|TNvr?cQf0MT_H zR%o@I2|+3+`cuQ$PVN?vUf;2*f#CpKANL8=HRWF<#pAMMT%4eP@3_-SqKYSZ?c&iX2Kn7O`9UKoG_j5`6 zm|cpS0j4?)=#woszSx~CpF`@?bI8l6cBy#Zx2f?hm!5lt_QisLO^(QH{SOl>p2CTe z1-!LLLP%Fx#-0>Ae7EtriN;q{+dhb>rF)KKSVzOJLLFJ`K(r{-EBF%hK-fbk7B z^~h&((%!VCT17jHWAzpzHJECLRq0>9Q>FQ);PmFT+2j|Ld?rej*QuB}jZm}7u|pBk z;&)dP)-BnGxZfyDytp@)X01~wmfACXSUMdA;glP5JqAy(+*o-|*_!(<(oS&oR6aBa zt~AGiY$p%1hB)xhY=5NtFV_!#<%5laSnK?tKB`H6qJFL@MN!By&*AX{Hw{T% zW2P7DPVh+%;2DAy60czK$a3)#dH=maGiRLj#1A3M;X~y&x!*4qli2N4tDDIb?&k*) zaRu(>a=ovCM;~Y2fBG|v6)Gdd3{8AGl6V&DX6Xyxz3Uiw)TMOvZ8QPG)zG$aN0O z_O#rh39O%XyPnu<|JFLJdotVq{PFpM5WZB!pkbAbE>_oNanS->h$LIIL(ms1=a6K{ zRXe4v9w`a-Zwt}VfFk3C3Q1X%bz0idqm!_u*M&G~HuDb$Gnq4xB&^gP7jTdz8b%kr ztjN!{VLr+~cjpv;@Nh{PE@Xb=_H|!+w5J*h#b<8jeie$(9CJEiZvNXmu(y;?X2gqM zisn!7mbcJ1AE8oB8*F2_*y*5242yZXf^rq7$%B0}K1_vL%Jz zJe-L)hU>#$!?RjLzj*|Gm=NKJ{`aBTh(p!9?T>vbWAZyP3P|p%qQu0=hc32j9 zjAY>`O8QVD*8Sh!4%J(wvdHSca>%S!B+b=qK{S3ovZOCAeFW!vPLeB~^edOXD#E5(h2_$>0%hvkpl?i;{L^_2t z5ajaPZ-HIfc7l6S=>ecKOVzxc#tDM3imfrK*?_aW2V)edAA`&IVn@rRj0}jePByMX z(B8~St+u~00$88SY&0|fL2*-JL0$YtohX+-Q`d^GA#u#gqx${RhRYPNy=i(=WXrEh zPm!=;I$o(ZJcJ!{)wvZmS^GA{D4>RC1>1;!WA!PYDf4GWJSG0NaK;KnGG10U^(YS+ zD<88UtOEb1a$zEB$s-QxVc}e#IdgQ_Ole$Z;#sW&Et1e)jG@MLlZ=-n=r9M66ETw2 zwj#g3q-2!{bLnsI4Adjw(5Z;z(u+U{vBt|iEoMy!LkPW%m%}Spa7lZV5VrhA9+MbZ zC3~5%l<{kA@z7W62hXRd8KW~vm9uG1nZkrnf{5cZ(ENxPWlD%S!x+a>#fME;%r5on z_T#|M)x zT}byrmJ3^2DTebG76l;SSP|cH&ETYyX#5+SlaPM!3bk7}_#4Xy7gQGl7XE^9q6(x8 zUnkN0t$?VJ?`=iNv?*t@69s;}_>vs{%x(imDNWr21t{m;&rPhOv1L9qyX(IAbEDb( zlzqXQs|3SJyA@yI)MF=A?LwCBVRwp7rmcL|;|%VePu%Evzuk~VhW?Zb1S39Y2=tT) z1P^3CS$&}_J@Tn8O?D%lW5i?ie?3g3hOo0E&U_m^`n4=bpw_RZ0Q_8#^+yTbGbP@m zx5@f@D`c_@zmF9sBj?koqv5byi)k||47DhAZZo}T*?F!^n?Nayr@yedmPETPNaDym zrs1iNmTQQmVf1L|EmODuDW~s0^*#=D&9$cR9#A#SDdNajkQRvDIe@@iy7{_PksWn_yP}{HAp5R1rtmqmeL}FYr09E+x4069 zE~4)LMPzbhCl2)OUF}@RdtRJe)5QzadWCW0wLxAj1Dem9Evf5q0FHL4U?33ZzPG4O z2xE{HG9o&Uomaq0#2iiy=~DdDQOe}!k)tF|Wq)l_(IuOok@b~GguuE_=3t-3I<%+z z`=uggNa$Ma>+<*+xUomg(5U;nW`A^8mUkWH_#FhIM2pBHpO^z4SDEh2mxrRTF889? z`>=+`alLM)4DEQ0m<3Y}fmZy@1FN2tWR-@g1Q;CWFfTIZcwy~;- z91V9N-@mYJNyv(*u&bJj>+E<3u?+xRNbuyY9a>~TEQ@X9xncWW)ajai@RxVUNdQBr zyCq<*xeN)dtOnCDqX_PI=I#>GapQvSrzT_LG+`1pb3*e%VMI_-CR*^-09olijvofTMk zcL23zR&i2jJHqJ*CvR8JgtDreb1zq6O?WbYloi0xTNw#bx4@JrFO5QUK<-E_-r5fs z$5tb0{rZQTM+$$ed+55foUlHkU|u;l{D-TQPb0w?0w54Zk1h zDxCB)1YcbpE*g=r+0@60vv(3IL;G|xK4@*TspEudDp29^X>q3FdkO2-3U6MR z_sfkQ79R)hG}}SXUq$q@O3pMBxzQe6P@V4|<=QEbg;f~QLdiDou4qhx7wwLSpZ;E9 zKc%*JKYCn$HBUX?o!Y{&+#WtZHeYB$#~2#@H~n{=Kgh$m@eL*w>A`1F;2yfF+~ppt z(>GqbcMOR=b(1P`!&IET+P`dY_cuKHC$ks3Q4H6@G6Y3&r<7!XUA5=PyobemGdM&y zWkOr=Dn$vp=^bTOP0gzuqIAvPz2W$=i zA6N%B%68P(r%ORa&V63boyim$C}^6>`UM;7eLMe{-_8ci$Nx66)m@RjYPFnC3Lyzc z75;rPWZWcvh|EXPk5~%w2jShVi)X`zfhVwr#?MFZ?zdg>BQzuc*yw>5up=7Dk=vB;Nbz$cSxPG+dxp7 zVgRD9U^7IC5J+L6;TyjbDAC@!A66FcO}eojR!0Uqxx{rZ0asmK-;{HJJSy}9I3!}N z{eJ%ehqnnfTejf0*>_U%lD1}Rw^jH6YoL|QD|Hk+AmCTzJDfE^&BD6AvA>RGCPWGS zQt(r=0Wc_FD(ulwWAq3I(QVk0e32El?_NX&BKL^6n9cJ_&R``=I+bzLrheX3dLXZh zwe|aZ45>X|<@`hANRfr-3|6ripDt&Vz4ZTtmX!@)d3mgu82l!c@veN~5F7L*(W1un zNj7b4I&IxXJxMVD^*Z{!P#;}_KYDS;ee2BosS$-4KI)53;S1Q~mi!B=v~{kKr4 z?&3rn7V11c50J;Is&~_tG75Z@Z8`@i+|3=0P2S=l@nvNf0Wq5DXBw8SDQs;kleUsg zm<<+T=Q<<)hQ7C+l=?G!>+Ctl4||sV(kA&HMA^6*rd~~56i+`sH_bKL9QJtZ)#QUG z9MtPDdr1hw)?v@|_(tq(${Y`A@5{#--4sW?p*N^5){DP;iz zvrsFYb)fru9bgdm0HKEIfM)}L& z5+6f}zn|4UgtoVm2iX;yawjRt zX0b1uSurnbYvpcefAB2(2xTtP{eVN%1ejN|7|!-)&tSM`mzP{vzjvTfaxhCAq}_`) z+$U#1zekrUjbD9dz|2Ez+Ry)ot+$Md>V2a?3F(wBDFXzhrC}&Rr9&Eqkd_vthX!es z?rx+JB!&(F>5%S5V90@)Id}a1?_GD@_%a_3!&zs}oOj;+>}T)&Pz)vS+r9|?+olq2 zbZo0+zXp)g3$N1|p0_xpb zJ$*vXSud4pv}AYHNA{6wk*l^1HKr{4OdLfSn*F*k9YP&Yx_b;~D#+$lwTP zolA%+0=`B-e*mF(Ca%1j*1D>^LVa(6rf738N7^-d8@|jnkMnnVkVoaq7i%1cgVjfC zS^X-1MOqrNt9Q_I5+jpEqOMNfL!~Pu`C=Az7|L`Cyx(0EOzxL@Xkt_O>><&t<#mXO z{zRO7=cl(d5>lG>2auk#WXpL5+%g^j^tSaDqB1ZExw_3&8+rqXpl=Hd^e~6+@4;zW zbrj{g+ETX_aP!BQv2tEs*TH^ZvDFXvz*knP6Z~~``z+zeUDtS4L}Cfgei+E?pT0;1 zrDndChWOn^aifJauZ-4s+_D;G`u>zi7C6JMEMDC=w`kfDIiesmueP8M*_C8?0h5G8 z#Az>M+}(q&3>=4s+qU;1U%)L0GMAQ5i;ea4409H}I`F>^?4Gjndthravgp}T2ET{9 za<~rN3n|)Z(vmtPQ+3^*xVQR-og(|)jZBZu8oIcfOeH=|&h^Q-z5J#Xq{qigT55w1 z_VWM4KSrje9x&oMy}&9_fCw;w2(S4LUbqiLwPc>qaa)bFE?PXK{Q~S^d%Lr6Y2KNi zJMu&FdC|(5b9KDs(E}g;+z45%c%U6jev-<(fLe8?ZjAdqY3TqPn5|{M*?|NHK z7jq<>@yOnxZ)a9il`!NUj};Z7IgOHkPAY}Ur4*@3V3ZjLt(3d&qkBm|V-Wg$ zmT7UU%CL9A-SBx}a;@x@XB|e$VcbLg&snNoW==kOdV4PD<`WUs$82uVyaeoL%^XST3Y>CA3 zKK}^JcYrUAux@#1bd3wW={$Xd@uKohT9pT6^2psT(7bqCgCxxKyQn0ExXC4nFjrvX z(UPi+{_>&AnyI;N@cn(a$_Q+!N74)D$MYAOnu?q;lg%*`C2WlN1RvXdUAx9ZN4^X- z%5f0he(8-Q=xcsKNp*a{uBH_^NXb}W(N@d)V&bnOCGK(T!dKcvZ*|I>lAx*y^_A^6 zyu4J55)XH9jzta6vV_|#By=|5&Lc#Q&x3>7BUL}UGM!OwY_jvysJ!hOXHqmZYj#Y} zGK)`jfyr&7MU4N>%zO41#_q<+YZ2IvabiwQShQA= zg&ZrYC5Mxjs;;BCU1K@8gu~UBv2Gf|5D>-VsW+jP=Wk%H&I-;}nugV4Y)r7*>+=`;l>WQ;qu@8=D?a zXDyIn#|sY8ub2-8J?&&&p0p9g&E&5Su?EW@4s{XFtI;qeiVMEudrDtWFSJybqywWI ziF-)V{}zi)GrSDfQj?GFR_47@ZZo;&3QiSG*xH{3{U|;>41ucU^YVgY-#8qzh3hcN_CwX6q7#G5PF{P4}8#`l@ zIUf>@`Gf@f-92N+)l4yRS;HzZpm0FNND{Yte3R_;`!(=i_2oN33Y3t#&-am}0mR0a z6*7nmp|WxQo%)rbriQw64fVN0hk9V}ye!UsEDoRX2BdO%%}77TG{o?F4S8&YQlP7z z-1DEf0I9h?yM~bZ10azP9{>+nc*q5W!tYI_*Kaxdk#0#0MX#=t9WKz1+O(-e66K4U zyY|3Qsceq{lBeJUgRthAKBlV0Wd>HVrHc2J%xhis`Mn_n?JFmYRmN5ELE4>=Gp(Jo zz}#kKLF+By(IC;b+@LEvm~VimkB>2f!ALDi?H7vzcO_il)`+QsUTeRNfN$ywXqd#5 zDFrkcqHaO?HGrqQ{~aK9gdsEISTFzfR)z#%Sixo}KJ4sYt8Bg{igw22=k$w)ey&-N z3@R%IzF>^gZu|ZCRWl#okO}{Iku@06*HhVtB(28qI0r;WLs1aduU-fnjt@mZemj5F zD{}s7?h#Y%ot|J7tc(ufLw&n(3#kqY0yQ<>i-Q`yZ(AFfMv_keloonC(Ot?E>RQ@-vL*bN=MgSw#46f>{<`7vIN3wz^(KNbreV<}|sZ9iZ4njwELni&6v zz=byW)iRJ{xI?2W_ZKSM`B36mx;)BD^AEA5a>o%0BJUfXYUO5=!?Vw>J+Zdbo!@#F z%p?LJj=i=kiwlM3Wp)nmO{e(Ei7`Rm1u*y}4FIQwuAM5$yXasH{j*Z~>B*Cy6xQ+fk0 z1?P%PrZ&1gzv4!B8W(u|S!SrSjH#*ZR+vS*l_+GJfk80I9`UK3gb z!qT?CkN4bI-`#I*e|(n~X_;ttj|6xwKVNwR;d^JQ92|&I81mz(A8RtNFRn9oOEEe( zz<>On-R>T$U;(7QxPk*tLybW6^hH2=*l8J=$~E(SSJHFW;^fWyE-%$d_CIjv3%51y zy=aaVcsv5K3FnRSM?xtVcM$W#j3Ik|Z`Ir+!Zf>^P}ZW>o(;I`j;6S=6^jHF;`U_N z5^o-vK2o`eu2eUQw}CVDUvP~6n+t#z=J5c$Q5N~AXhcIi zAkNls$BQl`a!By80ZiS1az~u<2HM4O0^Oaa78B_%VodfNbL^2*>=@AW6=+QOGpjmZ zE6&-%Sn3sQ!%0qv%PVT9zHxJw(8k;&Fj)=%_RG4K(4A|Bm2IN^b8*Q9H7s6j|Ha*R z80|T#6KRB|HP&PF?GBwZB;m4thdX@K-_P^{0R&V!QU${-CuvYW$YNV)Pf6gbLMq4GtYnrsj*e4|f_l61I9{KJZ4huJ30qA0r#sT!4 zeiovI04W5#X-9pQwZ4OH#!8)Ee=P0WWgQp##wYX%jP|Uk&}uyLcxRTmP@T6jyriue=dYqYaj(^#Z5_HgTv+2zU%l|>NCg`ya0@b9h!5n&)KdDRZMLT<;#@aLOGBX9_ zf^{pQF!)w)uZ3EGiIsPi_0dwl#w}S3Ho$gu3;tK5ICxolk=x4uNGNhsc#qGX4tv)D zz4c?mq0nw^GL6!jgyNQr&DUKh5U)QWF((igAgZ4 z%~JE>Ugg#nFtPidV>)ESuEC$$Kl~?ma!uL!Ose=Hq?2bZ6Gv>(AR6o75<(F+b^)pe zF5ytMp;}bLUaa|S#~)0V#XwBlV*}w35e%Uj)4A;j%dR?7xhP>NXj(m`J%O11>W*+P zFB67`OB-SC5$)f6Pg_h^3vA-wPEG<}o+`2?oWY8F5JEfABAaq2VJ8i27EC>Y1ucw@ zF6>Z^5ZcMV`e_S{+>Qt(6owo-6$^HnS2CFTfbe#yejVwT#JwlmC(m*LwrfH#)D-X5|UD{~(6N(B&PN6mwtN4{q?O9C1CMmy#piXPD>Bi+d+P2tk467o z0AZbV{H#y26qXErlI}TbBVqmEgRZ`n3*0~Oec#UD^lqY|mF}n)zWXSbJs0EXFWwn5 z-FArv#rs3@#C1^Zm{mVXo)|EwxrG4V0*}{#VUm+Tpz9)>aXX@H?kur`PloYA)_A}p zQ-kn@M!Ur~)2Jw4+qcInTfH@*NR3Vs%G(Gx^zv)Q#_AUF-A-MnqTn85?1`e7Nvxd- z@O7Sxo;_wXm{i8*C21F;Gi8`j?pJW+%>n7$pZ0DpS)k-t9NG;3t295jop*tz_X9$6 zS)y4q?#B17Ml5VbQXLUeO%UkksX0Jr&NLT`)dZzZWo>~Z`>o{+wB?yzf!LizMMfA2IK)7paNAj48foq`{=fD7|NA>xez8FJfX(Az)if4Zmku%;#72*wOXYh&5vr zrV*+p4*oz6U(E9tb8c^@^KU5X25O%8iN8QW*f)VKaySCaF@p`{?vdbSfaYx1R_w)M z3^5}&ls4l0EHSPYkPgIoDpt6jI0@!|E{-l82_{I)*PIBeDi!1*<+ZIGtkQV z_`c?wNVqeu=3;Aw2hW z`2>JB4fchf29KVBAFR@$Mx9sj-b~-MIz&CNS6jq=Gc~1zesx1C`1mh=r_&;x_tN|N zUO*$yK__-fc0K;RhWJFirq`q`3ZZXNwD%rD#mXG;D*5d4#A2}ZqTjx0X;iC_am?h~ z#fp#WWdOXZ7OpSAayL^CujNP`3zwD>f$Ec^iVLq*_8$+Pluz7KABkTWEygIVOhsjC zujKzJ+GHLp(TTSdKCnaHLZiJWao^rEfBkK@K4&_MJdp4!?s4GDz;uQs6O4@Njc}}F zX&~(|o;tw|%VpYswe(`D8{gf#l zz-o;n)FI^DpW_Kq@OVJcK^`dwvVD6gLq0F7OoR6*SeU%nmNjZ9?>VrTncts^9@@IN z_^L>4g5r4)D{uI5z+UUewbm*pv4v;{+`lUN`4F!g9kRnSz;9U0x^4p-y|@%!0_x%D z=mG-zJIG=?zY8r!8Chhj2)jF3f@Vp?z`xKh!2GusIl0OSo1 z3t1Y3G#Ms7)tR+@Skhtk*x~xPMk*v|A@P{p=0<^_^ENFvI~N5(?@@)*ls!f}l*z{3 z40sm~q)A(Uj3+pJJIN0Bfd-#)_VyuF1Rh&b^CpNH7$l)bk64F=M`x+Gp~?FQ)OhKj zp7izU!I;Kp#M-kv304%#pej6m>VY@nGsg#J;3f#da{K@L!e;RA55P^Al{5RL;VqGE)x#<(9Rz!@g}keyBA>KNbsM*eU&8= z|G^4-6*E13Lw?;&US$OQES05nuWGxqU5G@VI7G$3M3a|cZ@e+3LzU9hk|q>x$DQBOF8Z~k^~ z0o|LQ+jPPO(7{el9B$VC$lJy-dKo81?uRGQ@!tfaqz0wDYW*vqL@aV1(~uFJzDuK{ z9w0j>*K~VO3CP%;USp)Ce+GH$dB66VSz7{i@70zyt}FJZySboi5s_Oo3rIvuqM(!l z(SWeGCB7XneQg_^3y!4InNYNvaLJSR`@H8zLoJ+gqZFsSLr zoS^|@a&3q|_$}OP{vuBe7(~Epj)8EO?SIs~B=FoDZ65~1!4<+-W{zN0$p9CV_MDX$ z;6O*+q%ZDSq&d|&oue|3YiP%7gQQl%<Kg#;d4vOc9bsLZn)(3ZDlK_ak%-cl8S{UTjKWp_HASW32LJ#~o$o=6AXF z_Pv4OpU20P$3UB=*E2RY z8k;jVBI%f5vMp0bdq$)}+SQX!RBX8!G(pc(5A2`xq^or%M`~!FNi|cPmwe;E_xJ;G zcgwRO4PbmE^9jY0)qD&UJ%^)_=M3s-Gj>BFu-#%|a45k+Cly;f*I*s`#A_9c}-KM4Zfd>&z#ED-!x-jKye7g5tC<1JvEy1M zIW1Z$*TofPPmaIZ?-fj^$bPv?=hfq1*!WevR%GdOga7N?;Y^$?-fjk$+@a16 z%`(8ouGFv{cj7O#2V1N;#@_@`mIQcew7?hFXIX$e)jdL;qtP8%d{BC1i!Gree14u&Q`uMf*$U0fum=p=H4K5cKh{SyUeVKtsS-Kd7j2Ohz|76!v z_K^8oJoV&)i2`=4<`lOv)lu(j5a&ixV`9PYg?Gr;`Sem(p)zR>bfTLqFQ$~@3i9~J zUC;wLO5|iMb2Rg!1WIHZE9h8+Y)EN;Sb){MN^O0yr7f;<%B=wZ0E24OuP-GK{n6nX zZjZGQ@|kPMTlsEOuQV|Kkp2(EZPmpLU$K3sS@|#76gaPDqqN;!`kRt93UNI^lkGW>KR{I=Nv?0!iUBn+VO{nv~v0-m;S3=lmLccxI5W{qUa z>m)u=pb^tsz#vntv0rL%=>p3W%sz?g)~crdTU|kj1v$0v@seyca-1JRrgL z9}UfhDZ@|MoRPy<`*Zwvv@_#xVlskXZ_gr9eP79du>I?&`B-(uIN8w$6E<942ie0BUTciURO30Jaz$w4V;W zl*HKtk!#(I%olcgWPdZ?q9SyyD#N`vM;w7;cNf!O#H@Qa%PXNFN zDcY7kE<7&Gh1m!LeKABr1>%AAP|qPi?<{=eUEoVm;nh021gK(-K;X-?7GCT#D-7o- zLl}_mGQNA@YB`BrbOk&~Uk7B(cQ1hKe_gii*basJZ%+>1s$=nEBh){N2M^VN`*C!t zMkF9n-?73vwfhzKs{%8ZdAZ!YrNvrBB|6sqK>fuX^ESCPmHrm3ag*qC0FR&S0v31UrYMQL zY#0RHd9`Aw-KFbNl~-$s$d99sHW>K!P5w0Jyuw=P>n~ui>E$_D0+7!MXfyo-$DUt- z?Dma*pfVDecIQQQT&|@rqB9-+;@$6Dt9CH#wjnf6^+$nWNmM+ez&Aj3l}`GG^2tf2 zsh|5Gv}91-;hpUwVXdzpdxx|m#?wYmgneyV`#5~ zI@`1GzTcszQ7$i~Na-{=7X93*U+tM`aHv<Ud_@;%Byi9!O2*Z~w<{;oFF{ zd~QxA_ys;NrRn?ZGI8r#XCFDRncDeOfCp;&+swT}LMT&bx$sZYq3h@~scIJBGk>XW z2}gfqGw+5q z4!e86AAaQ1(LQ|cas?6nLYWVsm5%>@g-nrvVLN|20lD9syl|Q1H>0mMh{hHHSH78W zHue|Q#t`LLlv~E>NyQTQa|5*Cl7mD#W`Dkr{}~MQ3%8L)6#j^m9?NKrUxIpEPZg)I z_5|i|NmcbrCmQrPcCeQ`ZmmK`G{SR#g@D7T-JSe9f=!VEUDWSpm~pg8vd>> zfbL&HlKMyx>IHyi#uH{>gdjZ|K>y5D?cw40*JYHrbhNzhea*3|1`OU0#MHO`d z9p&RDWjSfr<*^Hm7R~;0pC@PS7W*y37qy8y^LjC_X`<*DR9N2o(IxxQIi|1vb=5>) z$7LG}JHg|hmUj<&E-4zsEFw7sY%(4^gN9o(g>PBn^s)u*P+Q`pM8v*;b{vv^Tv~PQ z4|5$3J5Sl^zp)D1S&BY?!E*W=_C-ELJ>rA++ge((5!e^Z5$*xP7i5eE0lu@3Y}K_R9nWaJ*0K)CjMTLPms_`NRfg~lnPUo1yE9B!cVRnq+hb0M7+k?%s4$$!;&a~S(-(2Hof#kH1>Pwj@yUc zMy-GfkUWYPrH%sY+$em1G(*&QYTdUs)~r;ZVPEz)TAwB^T~{w9xA{Eov4^ZBKifKo z{X0&^aqLqsEmE>uyG0xO7u56}ZJg^i>FpgY=hqgb6Ooqd_j4#dUiy&GsSjZTXOKkoA!r$==Xes=$4~WD{8BDNaQpX!{)s7z!M|Q7*!i21yGJ) z!%z6JQJW2#`luR24FVW_1o}vDdVk)@^8a};cS<|0OY-HgpM~%aF&ej40g9Zv=U`ia zHtAXUDeyNm<`C+zO4p1s(t6Hq2g9mn(aGw5z3k1p{*G;fD)sdtHkX2_I;10msUdW@ zmZIC3+QAm2{ncm(aDD!D!?%1g1snU##m=_XN?wN3KslXh;iDdxyI*8D-23093!62T z*rDtL?<5CFHM~~z%eos+%k~XlqJ+dUEu`L5rhXR%y;mYN>}ToGOhwjHI5E%Kj-=I%al|AkZSfhqo1|&wvSP{P2B9w<^zYIukX2Fv zjvOn2Gce$*9vt0KZJ4cax|pcB7r_7|(EUp1EI0~{XNQ2w_1K^`h!RdEM!#j&Vpe_~|!P~g7#foG~tsm-&3U(;fO-6wd^m)$4w`wM0g*l7<~ ztqr7Vz)85*%%q1^};g|o4R`jCC%zpbzL}&TusR}IaV@L{mj+F%Ork}7l zB)Mo3z%SKDyOYszRLcp8sMPZTMj||;gR)6Fk*eP7?aO-Uvbyht*jJim-^WmpHI;5{0#ns41{#s`T8z0?7+v8w|!|YA>z{K{pq;!=^ z`%o~=1RUt$bhrk(IfwmFrIZ3kz~e0v4nvtre4*rHE-DEWOqeQgQDVR@Fe3Hd*kCs@ z&=7~L;Qa%RMY3PP4BEU5Mnyh&6YQ1tUK45aHNGqTe_S=Sv~0h__$#7Q#jCuOvAG0L zccz)%Zb~na0OIyjDx_abx^Wj{CL7RV`f&x>{m1i60a~kc3z(e!xPQyxJ4&w!P@g}{ zzG-nUy&99V`KzI<(Dx&g8SH7^M~ZJ+W`Jh@mK@jy}Uh(b+4zn##7JKddW z^GZ;>eFPLcAi@sBXF2OVsM_iT5rEh@H=k6(W-X(v^|sZi#P%ZcjJ^uVGRqxnGBK^5 zq4#Vg!Yjy3$VW>C_Xv&-OLgfjT~B_zAJKQKZ@YZID6rz3<-aU?=*JFq?TgiRsb7i> zLHzzRm|ZBTd7C}m>YlJwja`lpGa7`?eL>9euZr_j#&)qz@SRza`#BplEt6nW4gBpt zA-cpWiR_Cl?$5y3xM{wg`%6v}XC|QfV}Yi#AlU+Gy)GbmHkVnH0>o!IU+@Zz-(*(a z)!=V@Is%mC4bdrdr|42t5e33=){n&~Z(?uh4HC~)_^QiuiXg=Ht>6gMP+8Y~smF{x z)fKo{lQuuUxXAlmWy)+GE^nX9Mh#yId9hxY^P&#aSz1r ziYLRR8;d8y%*2pGKILt}9Ia}-v#}tre=bu5=GPul>v+`nwO*S*1UdKf>|j_3c;mnu z;5^s!9^7(^d#O6PclO$KRo^!nogDsB#Ns0!o~|X(Pxn*~k&2j_|0f=YLBv++*iR#V zv>cZp-1Nddz6+l{VR(w5IejQ>e5CfVRjG03{$4|7ycCt<-23GIN7lp0hLdd!LA`BZ zk{09Oi{S^y@5$CPA4&@JX!#cJw6WtgIDdEz-9zTw_^!_7)TBmz!8MhHlzIM%=>O4= z2$P=kY}(8vWj-C=-VB12C^h4QKs2Dd%Ur60B6XqSg;mtTS-_h(z`Otp*%?bGS?DSDInA9+qc?ik#90UHst<3Zu-<(Dc_&Ms(J9{K zuKgNi&rPV8h!or6@yWg1Z72C@39mwChrcua8e@;5bEL2h97TTm+j-?L;0x(!X*G~u z-u)jVPvm6w2&Ej_TMdZtz(D&TU!VpXy#$xK8Rbg?yRy1eC27gyIR1=8JG0`Q>M;Zg zMJ|&1Xk`Z6+4*>ReS#?m`1#z`R5r-*ed8ebwZz1USJ(Q0H|}PF8nJkPP_3y1#cEyiR)qhS}>%t|G%M30abz)ZGp`E|R z`ZN9apDJ6eu9CV762iD_qD%Peb7 z$4E^h6r^J;cW9&x7v+5}d1MBQM%e9n!g zN}0|CPErY^ZX0cI>npExDxs?598i^uhUw`org<*Udy7m6>{Jd>=@03g&nPuPORfka z#%#t_dW^nc9Q=kD%tZl%Xj#;8Xv`%9&slT=MSyceD4%zD+KC&>c2HxEo$S8s{lhSo zeU$Kdc#mQ9l$P~33OAz;%P5cT2wic}_1(+qTD(#YJW80svs9&0j?w9%!yuQY-wCS& zKGh;yo;p`R#kKb-uG!+P`r;w*HGgOXLfhy7@CJ!6l?4?L(S55}N#Bs@4_X{C8x!ah zOLHc@J`7%>Xu5#d`2)?}lqc6qKd6g@0g;i6Te36+x;ZmXkiG+8d0C$6Ca}hoJz`La zd|!(FDNOgBLs_)?H++m=kH1%lH$S9fS%IseNyA)g5x6=F;Fy2ntSS!ARY>~=q-_Ep z&>{dAIYk(FJGQ%_^Buv9=TBd+SbGsCUykAS_Cwg2hjT`Jx&=6tVL>!0LV5u0S0Jf& zt#~%NSn(eJV`Hu=X~aCQY13tC++BQ4^p$n@jaMWo-U$#l43PuKk`$SNKLqr1kT+hI zL+mnXmurLJZ*ZKP&!`}hazii+=BD-MoTTv14UNI_FHUS8N0 zNcad6RnKAwXj5Uq?{bk^P!l*9e3Ak<+=IehxdCnk6k;z_Y)U)W` za{QtWx})1~xf~jOJ5)b&F?MX4@7iLyvFok+gwvL@n!7-3kT39n1E=8e3)v2P!-Y#5 z8rv$@a~oZRK>;a8e{SLCc-gJsOBB;l29u|c#L=?|OX-oU74^Q)qEEEmwJA`|Gz9un z{ox2L8Ert<{Le&PJGZg+?RWMAb8$?+@IP#Us=WHA0n28;WlZ%Or;uuPj+a`xc5e1L z=x>guTs*TljU&8s8lzOl)|LRi?cX!aqgKYP@bXPJ%p8%QIj*v!!9%?gv2qxnWcvc|IXrQ=I=8L9)id5H;S(4vR zaZ*Xew-Ds)SDia~F8LeE~sZE5zw;EL#z1jceS5FaVz_ zfd<3k|7viR?n#pY_sw14)WvJS;<~Fs;=b5_rU*Tb$b4ZGKt~px1Ir2NNM2Ytc5iO6 z`_$Lvha@+2vsh&&G9ASJ8*#@^Vl_gs67nHG^d-^kDs>3nYcJb5`q1tO7WO;PqdEE7 z>}4y|po#h8>!JyHTt;+n;9Am2g@a>hCK;x=u;YIhRVt! zRBwpIvV0)ogemEHVb$T+l1Yxl&#{;neGYyp)L-rz4BF@WO9CmEV{k zwO5k1qu9KXVIl48A^b$oxr3)jC=p*yK%(+$VFlAfq8(d z-;8a%MxFJr`2Q_k8`D&G(OQD2AwZg;+LJ(t_9@^h*R}+4=H0SUnkM_>M7&)MvYE>z zg<;uZSOj9~yEuRtYu(eEN%R%^KBfg_A9J5>}<18%9 zH14pYUn30})y}l=+-P*(+McTPQj!-QW14^27@&-n{P7aW#{>JJF~=awRl0T=KK_>^ z%lSfnHBngJ5)^X@is=*!oJ$S33fJP#gZ|eeq4COAA>%0>qo_G@Iir&OIkco)pST4v z9?#hWSA7WHhk^Lsg`$IM(Vir1iC3YdJgGS2&2~=HRPUWy z6U+ruKsmx)?iuXZSID1a6jrhkzTLx2k!o1So!A1`f04bQfT>VVw8nMkBK}V@-;*ux zqu%A9usTn9HSk4;gGCH#5P)je=Vjg{X&KBu#Js!%ZJvQ1P6Do*;}9n_r61yDStE{t zAt?J4KX;uDv+7O0m+2!;&eCJdyu7UMc!9wd>(8f;^@j%R;(y!|F#5>8Xt_XtU0#5~TL94iYybejV{%A(&CgJM8av z(s2_Fw23LDE4;YY+yG*XtDlq+)Cx2C1^IvYOM3qEmozfU!Y#kXZWY-cqAVjDCOMf> z(GkE~qAwvTO8jaA{RvNEU4*y-Vgw|)AnC6k$4tRe3x#67xh>LJ8R(kQ$8O^5K?e-d zigun3IV$Y5yJWpSDEnFL8dKn+gHj2#rYT`iSmZ;yORy6?`|xxYqUGAti^AGp($S~O zteOt4M|~4DE8MVpNpO4kwo9?MCMUQi2lt=1gkF?DkZ8fy7o)Wwmx~T}@v`Xq|Av7;6N8b4}ucg#DX z1DP-_uc?emEjLl1(x@HpOa#7uLIG4Za=>D&VNWT(b ziqz<~_2U=~N;uJfXeUM6BkGe)UlkUpc%WAU;*-a6va+TWrf))!8#5bb_^C8y{SpAS z@&pB#RDo)k0ra$kO7?10e4+-B-1p^Qb9om9A$Jy)3*}|(pnelG9M^L8&;eE67Sre< znjOA#@sp@u=sNh&ApbQ^%|a+CO+252%sljlX%u(;7rxwndS)C+v*2|jJi`|+xMIn0 z;mStov?*xuyW{C@n^V`^6mT@ga%4kzYD#mn_y%(Z4tW@-fcRIbgV9wsD)lTp+7dcT zgV8H@p9@C#1kmerJiQ5d7(xUP?w5UN54Uhcv!q*QN0s(IVJiWnv57OGXG);Q^@~>5A?Y`^z)o&7=VRB_?~(AX95@ z+S^?Gr>^XC!1@c=fAAHOD1ys_!XyZo;~I55HAP8kM$*2d)2_MftoE z)BSW#lHGJpa-(4zHJWOPasSq`8%FgcJ3Y_!FKp~2Plbr6JB8&Pzp#B>weV9vjLz3F z#a_Rgj7(-s%evd!QmG&2xD7%d20~^xHB!u33BqSKa6*-`FSI?f3Y`2$c8|o2%(tJh z+i(w*y!<_9IF9WiY)u>?_M1kemE`v>t-XuzYIeiph^$Aak8ErVe_LM+%KX-48ZpeO zrps&+HRAW;`)#cf%^ufqk~6wU^h(tC#*$$}_;XgajHuDnUtJ657>ug)0fTUSU&XQu z3d^pio36Tjb*{t;RYcU?bBRM6QFU815!|?siFoduXyWPj`kA7aNFv!MDi$dWbo@|ZDbwd-Y!_Ozs|5x&yk4#tHJ6oi0J_Xz3WHBa}xbyHqeNAoUykwIP-B&H>>$rFsw$yX!c0o&CSQ) z-oBZ@MY{EGTPYfO<@%504n@@NV@=JXXWmjGLEFGN`q85QR~dQLm}wkx>8gDi|3al} zOtJXIm2R1n_H|W1QQ%xHO$fHr?;O3_fDA-V?!)Q8N{i&#@38o1s8>vGDf5jvKIF9k z2Qvjgba4yDL46MW+V@Xrn~y$rdC5`an#4!Pe}WjLfVs3pILc^-%};t1c2e5@ArSHj zQ_FkxHL9MZ{Pe-$N%(2VA`(iYJ`P41JJE=}_U#dwHE{mt$&4&p!y!Yb<3LSY)KMTN5s5%nevYY> z=)?aopO8WAe@DUAAtLz>!ZtW2dIx`kPSC=47oRjR6U!r&Rxi!|jHgF4;1u7zt0mjj zuVh@__8Lp5`z@=wTWHuAbKcJj$kY%}iUbLw#b8PtAFKgKZ%eZwFpW}B@qK2or%*}| z)>)9sk3%FAT8B`Q^807dm0->L1y2vQ4cYRZ=7!+8r`zc?tUFgnv!Cw3YL2d{H&M1A zwB#(88EDjlzyGR;8swxpUvs6n`=?uZ0=#cWCxGZ_V@|~NA3eQ)g_KgL+U2R0Dd*J4Y1Bj7Rn_&qx<8eP2B#(3 zL~I}2HUzD<@{03UYJp1dRaeBGx3X8(Cy{VfZN*PiL@C|`XV~?4xCgXjY7lsL&#I~;}BOfOQTVZqmn5ZIOQA7 zF}BfJ_)}JFKSuLL8?QMmP+{`pgJo8(S+ym&XYt?3;mOiU^)c&#r%%dsSu=A;g zAKZM#H%3c3{4UTmxcbpxrlfVObTpWU#pxo%6iCHV7B$%Tv?>)KMJqh$2N-*g zLJ6_V-i&FyER<%_m=R5&2|6P4fGjAot@(I5_m_!83lgbX3@hKsrhDPqQZ1FOmQH=r zk$&`J^uPT+^(gV*es5aVl4r0w`#&NatpIg0wE;yh#~PWgK!j9RPw;)JHu%V z-7sC^Ft%S>R#p_4&41*WV{r9}Nn6B_^~Gp|H0iVL1{I!oHTASrYbi5VG#66QW_mQX zf98og9eI_yC12&|Cn|*c+%tkU*1UY@hpeF#7y)Ks16?E=^o!BY30wT${TTX8fFx0p ztqUi-7dLx^MoX{O#lm|PkUUb}ZHZN=$6#LF2Ipu-j%(0BFoJ9v&8l!TDkJg689Pb) z=7ZXc#S`xC_|5nFFCwCnK`%OA?($y#{h>_tA9~xR8NYHT@Pi@r5rI<>=^8MNvq{6J zEs&N=D#h_UE=H&@D!z6d&Aea#GyTK?3ux01LgrP=CjLI5R-0?B6M8G-yW9>G>htkz z@bf0w(cl@~;;T%O``0@(z&I|X8^WQ3={`;(x-E-xKkS1v7$A^j*@*aF&Mz6~5QTuV zD~Q?<9pI=@$Wm;!-dz7sa+Fg0I-Eu5aYc_a_o+Sipxk}%3o66a(|pbb-B6CRkS|bY za+;}YZ-&r_l=t?z*oXph+A}1pTZPuAc-u0Q zUL|J}|IBSlddU+NvB6GK!In+y7fEC$|G?4UL!VxhOiI`tJ3aUhiJ%8;_HW`dSoO;J z`}#pbs=0U5t#9MQn+_6R7UuXF?d>j@X5QrFil;14hKwHkVZ-4o_leM6TXNY zz9><*2g49&2v^2){YZCSEkBOSu+YQ?b0k|E*r$HL);G9e@R8TxuC^H|~5QAP=lYs$ONtd^^% zu1bbtd_2t9W6H5U?f7{a`ASEH)Asx~%G>D`i2Hhb@NymU7NQsKlB}jL7NlSNkcgMiY+~?+b{%7xf z&Utg*a1HT-x#0fA`mXg^_4wSsg1-A>nep1&yneF&CSx|yyxVGd8|OYrB*ID5)=QSg%=;3r=Zg`s2hnNZuC z@#TL41nzv2A>T^PF=6>EMxcHdaie7x=M5)hw+sPB?_^xAcQUSt?_XKgkq_G0G#9e?}N>{Y?);4>xLeVH@#w z_hH(Ub%juyM*mlX5Lgc>K=lwg#k}nE@dY(-?X4kpa33f%_}m)cC@XvmiTJMA26Qo= zAzD*F+)C*WQNt=!#xW8yc0p})B5O1T(gjZ;dML}nH(J}?{jS3^R4jsLAx|7kVXLMV zk^jUBGlW@O@m9){`Sv$E7B}Wy(bHboLkyaq{e9J+NC07Z()f#p?qs}e3l|~m7b`p| zasLKTjk9hGv<(XHcjO?7!dWzX_sRcfn->4CZCZHszqaX9ul%T|&!_TRxvz)zYe$<{ z3Cs7wbBB=22m_?BY8VjBJaGg>Hz7dw-~l)RqimYx%6QrVZk(NUZ~c6I{20CupcFfd zu*_7fKW#>2wA2PER@HrupG=FogF@lg4<+R00ZCHO6qwxt#N3Pgrb{#ZSS#Q4mkPj- zn10vC$_BqyItZ(9>ndU~GH|^H===RholDcD3AA_+tv*dgLpFtQmqq)_`h7cXPh_Mv z#}2@O`_D~t7mDJ9{|lC%MElZ4F8HbxE`051bfd{Bp%%765uoRaYSiYcUfyP z7Por6HFDA%nF3W3ubISB;{k|?5q`k*2K|Le?7;nDiX3n}>;77?^oV5PJrHTm!5vvh z`9Kf1p}+^vDxYF}eYaiYvi3VX@7C9}sOZW*x@kpPkGfd#Trj^~VmT*J5iM!z^NClO z!#_PgU>Z!!Tq#~Me({JQ%q>=bV zl+@ug_|%1A9A(<=Tqpb%?0V;uE1!>zxI|vVqAyYlUo0{#L`}SLi2m|N&@P&2gGwWO z(f3uf*}Jp1e`k5AsSgA#r)-B?1}}%M-VEN|O-l2{oq0a~F8e|`{#8xQ_Lcm*NTr9b zqBImAhDA?WgtAP*sD<8JKvvr2up9LqPOho#c6wgqF#0GirnmGJHmswMef%lRn=`6e zzM7gp@yL%_ay~+JLcO5*a}O6iOr^)vS?D0D}YP`4W$F$xy~)vAwZZ{&UfCr~5FzbhcJE83CU~ z##qxo&BGsO7oiTpF@t|6son=89aEWGiI*0XDHl-Tb9)1^jMWFNU|7=c%5d|_9P3Kh z5+R!NblKpJknD1KW*Z8citn_Bs5{;U0Ck1j)(*0*A`KzML>u$SP@_!<)D6bkq#0-1sE=jrw8^ZCNskxty`7*hr($X6}kbwr^YHDB|RF=_)s&)`nWjhQaYPOz;hR2f_t9fyr zM!YaLS%|NeJ-KS6{(cSR$dW>QO@BTH1ICEL5%gmd>wx&L#ajrNym#1$$pJzY?SJXK z_BAWox$P!d7rx}!4X4_Uo`hX?t72&vnjXXSraQ6YoBo0TqM_zi}7LTD~EhiMcbvq0}JebRAK z^XJ3m`FWZr?;C-~A*I~FQv|)59AwW1!}bo+sk#FMj&YyfE@NKq0Bxt;CtfkisxjX$ zX41qVc`XFXJ95$<0G0Gt!pqp_()3cAyS|KM$XaT6E&p>DrY)!yC>y!TH?Bbmoi3HMQ-;ZLX!r-1UDyw?jtJcG zIOQ74`Nwdc+uFRYk2}r@uP+wi-w2!+-0WxcJ$IBh8(9J!TyL?i9~MSdJ`Ejg_sy;M zBo|2*{B~-}h z6_(X}y0kXf6TgKQ!W&XV1AI;2(+AIZx z1!fl`3{(d_Jr^zvLtX|GSSc%r3#{;&j#)Ab=y!ool&a?O*@re-Kky0c&qYwP*QOVD znSYk*QI*kNt2e&bF>-0mWl(Q7x3#WVCacXrO+TDF<^;1m{Zog{!k}Lb;k_U;tWM@b zenRT#r7TeIYQ@}?RNx#r_4saN@skkvtR{SKpGANbA! z0zYEMZUIVfI6J%L2B34^eR9NLyGfqgtj-j~JKr&|HD{T;!~p37HrIZY&V$*VSEEBNDI}ZI%fr;}6&^-~Aj3-1R&D`Ug_9ML5)GEBS(J zixua-8m`TC%3H%YL|sgmbm`#VG!F)Cq=|$x(gYo-{tG+@6%e`RbKr%|6O;rvyNkr7 zI`TXb>h$z{Sbi#G~OC%RA2WVVgUc1H>u#Z8k4_x>Wpc zt0F923~+Sy`yh4!137LXr%5D5^nr?4kwmpv z*+_i1jc6$3%lM1QHeNasj7|9@~u;!|<0G}(#+KgYW{ z`YX=<+CEaLyL~$J8Hbxs?mhF9?N966YATaF(H4(Bf!O{>J#P{M^b-X(@`GwnXb4Evcc7hesxDT{qG znoh)&yjZ!|sI6$K{XDLRAH3t6>l7w~Ze*9R5*;(&L;|kyxw9I`@g6As3@+l)?`S~f z+X`@g11;(8MZN={_7!?7QdwqfQ*Mn{v2TME{l2}y&1;+XU<<_T2(=@quoQBuLK03x z3f;ZR1EneKEg%!*};}U6a+nJ>bxyonQoZADZxqXoY;E5QiyV7p;z$ z_{h!!%;6ZIrsoIniCS_1pqg`ImjNGsuyK6VAcB6kKRbAaNf~T1^a#EF=EmXj!U~JZ zyV)z0VB*ul;8{d}*p{@nTG&f*sg+#2Me}Th?=6DpF)a64GCRNTnzxWuQ<)gLs*I!#Og z3Bg|OYd{AF$t6T7@P6e?+yy1!s}8`XHDcS&Bdbf76Kg13Iposi1vy-HF~~Y1QRWjW zh7;~y!MjP$BAq4_Tb81Ge~4`vtN zZa|v`=+b=AT3`Jh20mlL!@j~-?!6nGz#}koNOrP6xv}B;LyH%AzN>q99c}v_3%1+y zHAnt!5%#|E2kxRvUK05U)wH<{Lek&qVC=CnUkhax`ad~snw5xC4YHN|+ls^AgkTDh z^T8*0D+dAdzf892^X`?0?W&k6xlah&pTHUH%rh+gF7|HZP*_n6HSk(4jKJb& zK~%%#uGM9e-&>xS1bs{^cKJF*nR)U-a&IbcKVhjI#ne4n;Xro6hvASPP~{4!?SO1F zl>b%9Jc{w$%K&2BAk{zZUFkZfHbq8)5B4If;vG$}k37qC{n{A>!)j(4ir0Tm7^4tp_GG?cH@^KjKi&c{D7ONL{agQ1 zEa#;U>*KukM8=pu1Xu|ok)^-x^G{%8J5n^X>;?nFxh~PhcF~M6+hG`AEOujUw>Itq zfbHCXR)!>{MJ}c1ErqwpWD9V`JWG^|`r;a~;3_<&%AL+|>E2C2LqV)k792fY~!G;u7(>+*+fEG&R zJSdwq(>b8E)Ra6FJD{$*XeyP?tweCk)IcYmn)BiGaiBS$G} zgCVMN2$t^}FqgC())9*;Za6-0dbfTOvE1|tshm#>_=5SmOqeubBut3Z3}#=`VzdF} zjjJ_D6ZO{7(Rhp>N1js*DCX?~(SO9wPa*8!{b2s5D3%u9bF43qn0RDQ_J95|>`;5T z3VSs*7GuJ`zxgj;BP#x%FkRxgEA2z$W_b!bOa*_m$lT`N@8Lk+AFtK~zoaWqOnU^B znB<2{aq2t@a}j5LMuK%0!<@oKh7S!HbHzywI;o?gTjj?q zY-G$DXR#Oo)`hvkqQ-Q@tfvow~)Vwov^em0yJ`j;tKXSClaS#^LGMOvr>< zFWtrAwQPQbXf=6)NpSwwEb*nAGQNX%8WI5gw*q-2go(Kcar_bg4)Wo&yKJag?`@SG z*3F#zYF6$TJb3Q^W4yG^IRr@T@p>peLOB zluelkfy9NYatq;pU@FP|e(2md<8tIz{`OF$Qg2P!Z8{RxpEP4WiQchg(dW0L+rQP3 zW+*~^d$9mFm%EAoGAHyaWcc9Ayqoxy+_bl zy~R&#N8+ffEW9h!PGv6Qw_k25{-zv(aQ2+10w zRtad`TZi_;bA3JhKJ>xSH!bui0N=vb*bauhCd==%w7!7G7CZ+axs}fPy1zbE{Xf9g!OhvCOZbADS|c94pr2kg@NB% z$6aGfjBTc(V_B&{ zOF@%({VpLi;LTWo$%YWn{%)I}k&k!aBcY2SB2fOpoNPeq3#GS6-vWn;bCovUw}m72 z0tNCtf=1IiYU0uo^>w$MKR3dDk}G@&|3zHW|8vQ78y$ZpdrX4W>YcmYuNQ_nJAr?H zc9y(rPl$5%hDg z!?Xr(nj>AbO&Z)Nl2_KNJOzStyy`A z*(m_6`cx%%=@LM$c{nS>xX3M?z4=OfzB$h@bbi|-*lx}KkIb@kh4NKeqko$0mG`oA z?jMxTdLm6qZg0P(eSYCmiH6OG)FyCn((kg>jg$yIe91E2V{ui=d}$mV>{E_J7bL|6)#b6bE=A4;n}69^!92K<-{#cwxL>=G<9)t=)Lk+$J&c#%1SivPmm#;aJhahz7DL zjrj1w8P8R9c_7WkyUXQkL1@>wb-j-sidVvg^Mvfx%ei3t1Y*E6B`23QLGLyxnXHOe zXaCszlfE@gB~RU}PB7smGRR?0CO|i)?oPM>&u}m2rOwQHDt9?Yz`$PcSk9sVwNo*y z{_K3R>w+J!bM-zExMvXl0La}Nuc>=$-&yB(M)Qq$kzE=!;NqzsI|J*OrN!A&1WJeS z*6pXOd&x}$mrv#!x89Trqog9jbip}(H{;nQ_-JqgT7Rh(5}>piIX{mGvO=e}P*6@V zWP({FnaqP`y}Am+qIsqO;rWFi2Z8@f0ndE?FyxKJ*hU-u1gn49@3x^aj9a^#Pd~~m zJ~)@9q8b0bEn2}Y=*Er?Ke1Qprd-KXy65U8H7Uhn7J|G0I=G|A3~7V4oBoLV&ooxH z_-&Q2&d^0`bMj~3qQsvs{g?Wk%_ymQgE4pon2~T4S!rX0?=DSkv-L^V(D9@=b2Uq9+&WVOyTcxKNC(j`u&YHoJ zA(%jEXE<#Wz%1{=u>(EhG67aWs3r6RNUIs4hw-@=NNGq&RJ)YB5a%b*&?7uuy+8Pn z=sgAKoJPjebUXoegVc1jv1_QjJdi6a_m9Q%4gwp}YB`TBDCqDEI7>}ex49eAN_F$w zW?zQ&{qvRkm$5zIOT(ID!!1cjv$#DBdStVF#(w7U#+l;s8gq9I;CS{4sRcGdNH8%2 zTI^1BxTlpO+QR1Pw&;kf^+V9x{+h*KGwSW@*)M`yOMwCUoeP6w?7OS5QUJa>n)NAK&#ZAmW5igOkZ1m(Q}q5Q91T<{MjC)|VH(%;Fbw7UgQ%-B-bV1$4?|NDZQj~TbH~A zmYN1A#Jjw$WRLS~s6q1tAFdEcqRhF3KHD1xN1A0o=|}KLj~0 z<}A~dfs*<gC<*c6J$d-JLbd*{Mj)3sFcg-9}J7Yp+Gi?|# z3M0c`my9&j?eUI6kip<`nLjI*;iu&Op{m1vl9;mH4v%q7QO-1(DvD`pACawV& zg+|zYmc{VU;`5xSJ(8U*-a?z29$m|&C2_H1g69$&EhgZ68_XP`dc29WdVAoKkqfy; zZwkB8Tjc%ay+0~2UkiXVJDH7pnw@xvXuXd=yfPeG?t7wXsf7U31L^eXM`xvCDq%l= z&^Li@X&sg;;CaQ&y~XA?kPWhTh#5KuxNGsiAa_3f^EZe>=;d~+@S{^muk$fzdIdd6;1!b$@$7_= z(>!oa*;&#abuJ|;4K0ZBivPgaymOi3m$O0J`;1>3x>8(jK%@O*e110`y2nmQf%W0M z<2yNR)|>29@_DC`yQmXr0pwoUX9!WBWzO_M8v2uS^`TsDBkBP8Vrz`Gg8+MWA}d8}nu$oxtTCY^47&eDBUsb?ivJQ!~)M! za1r#{DK|(H@Zb+9fCvES1O8%^ubKi*ccz=BsKvLz-R`E*nI^S*M{9ezSPe&Qy$v!p zX6!Gfp|dV1cAL<+>b=&?e?l0(h8y-xi@YaS5ZEQ;%C;xXPKUJ+eqx#o!tag zKqK}1XP4Bk1lk0tHdTeqsg!%T@(H(3omc`pPN9-4Slu7cbw{NS=?WcluC`R0`M}-?--C;BPKE z)@wMn^gu2bqvjpKW8T7m$5-1>QOjdEkVGKctkUL-;qbB@=2i(2J#Me}_6sI zEkFDf9|v+Bs{bD^Jo!~p@^0kXT0^l3gkCsCih-*Jd* zm{s*{mM3h@fR6TU0oueG5M~I}J|crbTafbvpidaQKybmrqg2BU>3$PA z`~G!B7XBqX7hp;P}Eoqd!6n!5K!j(;y^oWjI}xRLNR|4*tY62^CJ?Ed3YCx zYC8-qMjneP%OJsjkz0TRyhwAQ*cGGL?gx6Jp<7JB5kDnzOlW;66A&*yR3Jck{v8Z13zd+~z~NetdGSR(ZV zUOTk&5`pvQ=ia34u}{D60Jk4(&{374NG$_|37D;LmzUfD9xlzqkP|hgBdFtRSFuC{ zG3j3RxX<&L`R+m`e@#9_Xc-5EpFu5y^hJih%<-l48l(a@X9osjK0edn=KUX+GGRSU zx)}oC(C9>JQTBl3z56x~RwsNq@v z+1Y_rcn!S6nHH;K)xX*|PpH!<(W0&8U_p`!J>1dQaKm0zz4WzUuJ4D;gCj5t9N|xa zYoKTLlJ$>vvG1ql!3cxTgSA4-#VBO{5ANGysw5v`!!a8~Jh^8VzPY$6KROZOIeyP3 zF@vtOfTUxh3=W}f@qHBPAmy1XQ z7=0y-2^uhfSN00)^wZrAh9ex?tQ<+mDuY}P!9R!mZd%P5Rxtc&*Cx9hbtY%2!D?TLG+)318+eJ(1+YX%?p?R_6_T6l>W<4*_Tvtf|wD%9?+T&PlTyX^k=PeQgHA&y;p5a zHt3{YxdZc_tqi%hWSCdwL2#Y#n#gsKG8Z0-tPm^ znr?)a{Su`)=e8J;fhSF`Ap`ns;(sK2ewuNLQ_!>9TqLypOIM6UR|M&b!Kvc0>AX~= zVd5Wp-_O#^NaPy<Mmu$;CW*6fNkS^WpE&Owe z`mVc~=zJm_EH0}Wj!E6&LjhQoi*NYOu0Zm6HWF0uv8GT9%+cRmOw(W~8>g%=nzm=- z1frA`;kQ>kXcxqBD0KYU6L=|sNCi2L=ZAKYM`k^!c@Q8Rc8}ybeh)@F#=EF_&sw;d zDhd3tQjh^#T7hXx55V&Q+(m&JK8ufPw+=~&1b#FA1Y<<|pf>#t4#pPbwU!OaIc$i6s@d>EJyETep|EsxN@;_{qIe zKjh6xSwgJIJWAl`E+D+Q2U*WV3W1JIkVi0fU#NnDLP}wEjZ26U?2IKq7=ou`MQ2|c zWYwZ1LEOJ;!?Zc~i7%G>)*|nQOb3x48)fv@gSQ3(re=m;_4ELJpe0o^2RnYeBNY9k z%p(JP1~8Vn5n_7>ZC%>Z+R!O*GnQXIKbT*$mId{(1W6zZyco)W*_%Wdl&oo$35SyU zysOl#-c9GK=3-@6BJFDB-Cz%8Yzm_ zmH(f7@_{>**i0mt`m~0)i^3dzWMEcF>8WXS+jaV$^RId#={FROgs&RPT0X!b^u6;i z?Y=<5NT)~`o`2KfWF%C=tBL&;HywPNgntWGeuJ6~{C?ANY}kGz?mhu6GA5FoW#l48 zaYWkuz`ZH$L_4{mzh{gqY!HZ+mEsf9eCcf9o6XoBU+3BY>%$JYs!svBbg)MXTV~+7 zf%OUUm}7MlItxgD@w-Qoc7VA1ZX)}Pt6FDqACP!EnC z*}cXsGqBbMIF@8yKF$;Rv9=}a7~8)(9(%jR=H7_CTbU@E{)&II_u~^y0grcB8601?mfM$Vc~KYtV&Pg9b8HtncVHF?Xm0>Hz0>y{3OA(@qM=c! z9k&4y@4f47j5i;DeyI~`8}^4LH7t#gnQux~OGXv8ulrSeWM=R@$+jJy+_%zd>RhGe zpGtz6JX+f^Pp5=Q#*2)KfUK_#|Gp0?4@5vzy3#GqavO@gXeXXe^p2)BLuFcwNw&-C#nZ^EvzZhZwEj(}Xgr zajJuZ-|!!8Q@+gla5eJ!rEKb4rwW(y)^JRA$pB!;?bM1A%eI#vlI=txIh4w$U$|%; z_jlg*$VUMBrZjw2TZZ33CT<{1rrJT~pwFuEm)7tgA9)CLH&2@MwwG4-*EMyx$!%P3 z9o$Dec(YgE(^e$EPNkA_ay>!A=Pd+zY_54|f$&w8$jy(pPkp3D`@OI!A^(Ib%6g#J z$e#xvX*e#-MH-h_8mHADp0&lrpIVtn+9d&eH)-`l7P(%3VRbsxVU=q%q5cqYL=&Hc z8TqNSEttZNft>0eY+q^uc$mE@@>5IhX5_wS_Dy;`@>~dRW5c>;xc`*35xW=l=4S0| zocy}NU>{5HC)J1je%Ty+^q)C+diLC3_w!`413$h$BvI83IU$zH#$T51OC3p6?7GUBUo4ypJ)lsj2m zg2qztWaJz&9ZZC8)q5PzJEPg1nyjgVCcN>1YY;kMjZA+CKD8JjXsw?z?EUL}+vV`G z*T+r1@bbny{7AJspY_>yPIPP)%7lHBu0vUK0s(_P$04^XP^Rgd^cf;xoFpnY6R8<^ zqGwovySm%(>5=~f+o$xj)DAY3j2<5&(If7Q32@5YPTs%9B1$!zPKU3DLkXtk`Yqgj z1b<(_oXTfsGywDsfKdRfEb#+Co3jTp7F?}*WQAtZgT?`t`1lQ^*~seTHd1^DA(>`u zcryVhwQYTO-+#Ajl4dhqsMO-)*W~N{WE1}N65xmgLqrs6%^;frJidbf{D9Nt8tM4N zt@v8XpY~Aiq+g~epAr3gEhauF=VzGckyvC_UMaXIKhwf_K4LvmXyy17#mxc4#74%% z$EJVDg`Mv1ww4U_`f@K5@W5}mIMVG4D}vX)6g@dA+4Qm+HY49aH29>-r*$A0G2A7=1%gm?dy zoI+o`57tzSliO!}(;aS)@0l@9D_?TJUL>jU2iB&0X7`DPpjS^!ARt>rM*rSZlUUdG z<^kmbsqr9qd8|RK{XZTLK1O{r2ECuI2H**oIl#Xl`F z`i~7-Ji~K6)s1dSO=e+7r~Bpz@5>oo#Qp^||V>1QZozt{b1ubG^?7 zIUfePCUWH2xX;S!ObhY~q8eyYpx7?F*NmTjhQ+PDQ}ZlvAT%}?J)-4{1g3UpNQLdI z?xo)szq9|h{IJ$2!@adJT{IE)lVqz>BeA0*i2Bpq4fM|@kh^g$MzB6`A0_TZ3av%A zqoSHU4$_FC(iJoHy6_2gB3!nXS==tRXv-EKmceLNyckWHO^ZBNK zhDaW1;(KDzY6fE!i(lUW$L~P_ z7WMVd;YNdojJH1{O@<3?GYgrt_^hN4)p=lmaPso#kW5|g+0OV?KARDeaP1C4flTKg z`VY0ns7b(bPpjbYwoG@7khq;Qk2bh#D|$`4n7lwJB#nYFx#uZ&7EX$frD#@sBk0>D z-6ioZ;FNnOBLx>yjNN|@5RS%2GXc`m4Jd} zE$Tg2C8q>88FzMenWHuz!NY57-Ib4e4hlK}$?>$*GQ__u7X(|SAMV47NYNqRH!28en`nVBtQh%qFp&G6trdGj* zr1&cz1FVAoWsn}~64#buHe~yYzEvjV!=!id6lLuZy2e6=3W`&Mt|e4X9)XR)X-h{R z?HyX}2mXbNtsdI#m5`iF`r_P#(OhV=nmCuTwA6XJHcdBTr=7k?cJv=}5bx=xE$W!N z>PIh>uUYu@w5=`qVUQ=SH@jy8udU=k<66aFY8aIyD#6#Vxh22UcSmK2-W=yz)GX0z z)a=KWlxf=~s>a)AB1eIn#h24zo1Jd>nO*f5PO|t*fg5U`Ge|6M-uZ>Vy7I@0?>hfr zbsiGT)Nk=p^SjoeTC`z>9J1NvZ1f82lKf!htYR2Hl2l^FiNE`hOF314{AhiA$UisX zYvN%ePSY_?>vG>|zsHhimsRo|ul)gPqf%n=N4&|T#)HP0)t2W)hmsaNBKDd@+Qcs5 zVW07&6hl=0Hkt3TI`5Or5frphZU~|rKkASW%`=DBxdlIAqkPg$*(3*=MQa-5BKt)| zZ5L*^JzP%zKAoqoavL-fv0ZZx&Y2R)dri`QsGV@j3tqkd36AT~LnhlP$V30RZeDd9y z3UJ)I`knm+zRaXLWQz1{Lr#I(e$}jQB{CV_-2+YiYKAOL6Q!vRJ~)UmeJb$!^@pbA zW=ihOTd#!P>7}nv76wro6ll?}T-KrPw_NS~h2yNm+t|1>=n0qIeqq0&SN zPXnh_J*%IqC-tFdSfKPF{)q|cpUYv8f-xb-|HOnM;H(RE|J6Qr=2(xa74$SY%x4{p z3Dxqb>+#mRdbMex2gi(Rp?{LhvIsuJ@6eQh3kK>-B!irmrq_ey+N#w)@(cq{y~Eh> z^zS#&n|=p=_78(wMH%}~q^Y%y|GRl?SH$Ma|A}we|0xyp%mP3N`2J#-p1{M>e+O8p zmI2N?hY-<;4&=L&&|nM560EJ%4#&)rB!YmIU;%AOc0mPQXWE+BmdQLY{PhLxp$)4C z@3|Bxk;2cs8UApUCe66exycGFXHU{{F z0gD0qI*2d9k`8@5XW|T|h0^?dA;}BR?9{&G2fjCa$fgAf)FPL>17#Fn<5n*e{?a}v z0(Bk)6sY(+9XT1lmQ&jD@nPrd(+(7ZwMyLUKeKj0)q%Ghz3>;ZXFV{V_Q zlfqiCmgo6)2Ho!k#Pl&k3hDH zDJ=opAs!OIE?8^sT&Cco8#gA0S~nLf=CWO4w@=Nu$f%jjw7S8 z+clab_OvLvf??5d_l<{X&*80yDVf{lq3!5~!|#N|$r+d7_ffe7jofP`kyI2%zE%T9 zk3~zOqWRm{L0lOo{z>3EK6%HJ7WjSmrBUOrwh{)!UwarN zmhYw_^bLc<(D|9MK#*?bO3t6#&}JGI9@u7T$tT{h-?+!e$)81kAB0eM4B}R5*3EAR zqeVN~V5~VfKdj0GN(9dvJOLwZwhN~)h*PQ{+A$VEH@?gd9?Mr4H(jS>nc4z6E&-K&kO-k6KpIuXMo6UVxb#_39P`qU-@| zPWVSyY8IJ*Lch^`itw;h%LDt}J%7UrNZesDH$e{J0kXDzj(U_b%5T8OUwEq>;b6{Sm`>SkM4ZL z`#bi8(Wfj}N#y0EIzL0nN4p(`6XGuV4uJ?~WwVI@MYBiQoUqfdlOCMxX9+(bVOcn< zu8LA6Dj6eZUv3xMW}t|3CbM4%u**(~XofN!r$6T<%oTNjxC zapJQVix1Sa^&u8C@$Zy3WX8@k|A_s$WUa7m91x3|IVY;5Qre7J7pBSHWLCss-SlNI z#mzow?|nyI-q^9xRjx(!#ijG;$3XG2R#K+DeUyt{bF5C1-Z7by#ScC73d&sOE8afr zlJ`!JLJ#IY?Q(EoTd2#(BkX$>n69GiC|~*ob?6A1mrAnKzHHW4Y;g&h?NX_0_({ah zSTZ%RR%cF|vSOK17Op5=e#a)kITv7dRUV69F8ZcLMT#mW(*?#%6-~6h%&$p$bynd# zQ1@x4=`_#BOppAsw3Q;b=Kb3ROVx-xNPCxOl*eO8~jS!TOu{oEwHZlsK@1! zr?;EC^;P@oGri>xQ7UBEG0aldCcK(V-Q>><-|O9m|B2AW?Wq`yT;rgdbmA+bXQhi4 z^zgLQdUej!KwHpE8q4$g^fh_+q`gjDbYXm?%lC2>#$a8&0HfQa+1Rh?v->fDVP! zd}Gwb9p!kIfT3#sqs&w0L835i*JDy0#lQLsv8fsakVgw3 diff --git a/packaging/nnfw.spec b/packaging/nnfw.spec index ce1cd0b..a1157c7 100644 --- a/packaging/nnfw.spec +++ b/packaging/nnfw.spec @@ -1,6 +1,6 @@ Name: nnfw Summary: nnfw -Version: 1.7.0 +Version: 1.8.0 Release: 1 Group: Development License: Apache-2.0 and MIT and BSD-2-Clause @@ -30,7 +30,7 @@ BuildRequires: flatbuffers-devel %ifarch %{arm} aarch64 # Require python for acl-ex library build pre-process BuildRequires: python -BuildRequires: libarmcl-devel +BuildRequires: libarmcl-devel >= v20.05 %endif Requires(post): /sbin/ldconfig @@ -62,6 +62,12 @@ Requires: %{name}-devel = %{version}-%{release} %description plugin-devel NNFW development package for backend plugin developer +%package minimal-app +Summary: Minimal test binary for VD manual test + +%description minimal-app +Minimal test binary for VD manual test + %if %{test_build} == 1 %package test Summary: NNFW Test @@ -83,7 +89,7 @@ NNFW test rpm. It does not depends on nnfw rpm since it contains nnfw runtime. %define install_dir %{_prefix} %define install_path %{buildroot}%{install_dir} %define build_env NNFW_WORKSPACE=build -%define build_options -DCMAKE_BUILD_TYPE=%{build_type} -DTARGET_ARCH=%{target_arch} -DTARGET_OS=tizen -DENABLE_TEST=off +%define build_options -DCMAKE_BUILD_TYPE=%{build_type} -DTARGET_ARCH=%{target_arch} -DTARGET_OS=tizen -DENABLE_TEST=off -DBUILD_MINIMAL_SAMPLE=on # Set option for test build (and coverage test build) %define test_install_home /opt/usr/nnfw-test @@ -126,7 +132,7 @@ tar -xf %{SOURCE1005} -C ./externals %if %{coverage_build} == 1 pwd > tests/scripts/build_path.txt %endif # coverage_build -tar -zcf test-suite.tar.gz infra/scripts tests/scripts +tar -zcf test-suite.tar.gz infra/scripts %endif # test_build %endif # arm armv7l aarch64 @@ -134,8 +140,10 @@ tar -zcf test-suite.tar.gz infra/scripts tests/scripts %ifarch arm armv7l aarch64 mkdir -p %{buildroot}%{_libdir} +mkdir -p %{buildroot}%{_bindir} mkdir -p %{buildroot}%{_includedir} install -m 644 build/out/lib/*.so %{buildroot}%{_libdir} +install -m 755 build/out/bin/onert-minimal-app %{buildroot}%{_bindir} cp -r build/out/include/* %{buildroot}%{_includedir}/ # For developer @@ -154,13 +162,14 @@ install -m 0644 ./nnfw-plugin.pc.in %{buildroot}%{_libdir}/pkgconfig/nnfw-plugin %if %{test_build} == 1 %{test_build_env} ./nnfw install # Share test script with ubuntu (ignore error if there is no list for target) -cp tests/nnapi/nnapi_gtest.skip.* %{buildroot}%{test_install_dir}/unittest/. +cp tests/nnapi/nnapi_gtest.skip.%{target_arch}-* %{buildroot}%{test_install_dir}/unittest/. cp %{buildroot}%{test_install_dir}/unittest/nnapi_gtest.skip.%{target_arch}-linux.cpu %{buildroot}%{test_install_dir}/unittest/nnapi_gtest.skip tar -zxf test-suite.tar.gz -C %{buildroot}%{test_install_home} %if %{coverage_build} == 1 mkdir -p %{buildroot}%{test_install_home}/gcov find . -name "*.gcno" -exec xargs cp {} %{buildroot}%{test_install_home}/gcov/. \; +install -m 0644 ./tests/scripts/build_path.txt %{buildroot}%{test_install_dir}/test/build_path.txt %endif # coverage_build %endif # test_build @@ -189,11 +198,16 @@ find . -name "*.gcno" -exec xargs cp {} %{buildroot}%{test_install_home}/gcov/. %manifest %{name}.manifest %defattr(-,root,root,-) %ifarch arm armv7l aarch64 -%dir %{_includedir}/nnfw +%dir %{_includedir}/onert %{_includedir}/onert/* %{_libdir}/pkgconfig/nnfw-plugin.pc %endif +%files minimal-app +%manifest %{name}.manifest +%defattr(-,root,root,-) +%{_bindir}/onert-minimal-app + %if %{test_build} == 1 %files test %manifest %{name}.manifest diff --git a/res/TensorFlowLiteRecipes/AveragePool2D_U8_000/test.recipe b/res/TensorFlowLiteRecipes/AveragePool2D_U8_000/test.recipe new file mode 100644 index 0000000..7322e90 --- /dev/null +++ b/res/TensorFlowLiteRecipes/AveragePool2D_U8_000/test.recipe @@ -0,0 +1,26 @@ +operand { + name: "ifm" + type: UINT8 + shape { dim: 1 dim: 8 dim: 8 dim: 1 } + quant { min: 0 max: 255 scale: 1.0 zero_point: 0 } +} +operand { + name: "ofm" + type: UINT8 + shape { dim: 1 dim: 7 dim: 7 dim: 1 } + quant { min: 0 max: 255 scale: 1.0 zero_point: 0 } +} +operation { + type: "AveragePool2D" + averagepool2d_options { + padding: VALID + stride_w: 1 + stride_h: 1 + filter_width: 2 + filter_height: 2 + } + input: "ifm" + output: "ofm" +} +input: "ifm" +output: "ofm" diff --git a/res/TensorFlowLiteRecipes/AveragePool2D_U8_000/test.reverse b/res/TensorFlowLiteRecipes/AveragePool2D_U8_000/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/DepthwiseConv2D_003/test.recipe b/res/TensorFlowLiteRecipes/DepthwiseConv2D_003/test.recipe new file mode 100644 index 0000000..a09afc1 --- /dev/null +++ b/res/TensorFlowLiteRecipes/DepthwiseConv2D_003/test.recipe @@ -0,0 +1,44 @@ +operand { + name: "ifm" + type: FLOAT32 + shape { dim: 1 dim: 4 dim: 5 dim: 5 } +} +operand { + name: "ker" + type: FLOAT32 + shape { dim: 1 dim: 1 dim: 2 dim: 25 } +} +operand { + name: "bias" + type: FLOAT32 + shape { dim: 25 } + filler { + tag: "constant" + arg: "1.1" + } +} +operand { + name: "ofm" + type: FLOAT32 + shape { dim: 1 dim: 2 dim: 2 dim: 25 } +} +operation { + type: "DepthwiseConv2D" + version: 2 + depthwiseconv2d_options { + padding: VALID + stride_w: 2 + stride_h: 2 + dilation_w_factor: 2 + dilation_h_factor: 1 + depth_multiplier: 5 + activation : RELU6 + } + input: "ifm" + input: "ker" + input: "bias" + output: "ofm" +} +input: "ifm" +input: "ker" +output: "ofm" diff --git a/res/TensorFlowLiteRecipes/DepthwiseConv2D_003/test.reverse b/res/TensorFlowLiteRecipes/DepthwiseConv2D_003/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/DepthwiseConv2D_003/test.rule b/res/TensorFlowLiteRecipes/DepthwiseConv2D_003/test.rule new file mode 100644 index 0000000..edfabc6 --- /dev/null +++ b/res/TensorFlowLiteRecipes/DepthwiseConv2D_003/test.rule @@ -0,0 +1,3 @@ +# To check if DEPTHWISE_CONV_2D version is 2 + +RULE "OP_VERSION_CHECK" $(op_version DEPTHWISE_CONV_2D) '=' 2 diff --git a/res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_001/test.recipe b/res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_001/test.recipe new file mode 100644 index 0000000..5e0b6b5 --- /dev/null +++ b/res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_001/test.recipe @@ -0,0 +1,61 @@ +operand { + name: "ifm" + type: UINT8 + shape { dim: 1 dim: 112 dim: 112 dim: 4 } + quant { min: 0 max: 6 scale: 0.0235294 zero_point: 0 } +} +operand { + name: "ker" + type: UINT8 + shape { dim: 1 dim: 3 dim: 3 dim: 4 } + filler { + tag: "gaussian" + arg: "0.0" + arg: "1.0" + } + quant { + min: -30.3175 min: -0.779597 min: -10.2751 min: -10.8594 + max: 4.35049 max: 2.70807 max: 11.0269 max: 20.97 + scale:0.135953 scale: 0.0136771 scale: 0.0835375 scale: 0.124821 + zero_point:223 zero_point: 57 zero_point: 123 zero_point: 87 + quantized_dimension: 3 + } +} +operand { + name: "bias" + type: INT32 + shape { dim: 4 } + filler { + tag: "gaussian" + arg: "0" + arg: "1.0" + } + quant { + scale: 1.4758e-16 scale: 3.15185e-05 scale: 2.20685e-05 scale: 1.72205e-16 + zero_point: 0 zero_point: 0 zero_point: 0 zero_point: 0 + } +} +operand { + name: "ofm" + type: UINT8 + shape { dim: 1 dim: 112 dim: 112 dim: 4 } + quant { min: 0 max: 6 scale: 0.0235294 zero_point: 0 } + +} +operation { + type: "DepthwiseConv2D" + depthwiseconv2d_options { + padding: SAME + stride_w: 1 + stride_h: 1 + depth_multiplier: 1 + activation : RELU6 + } + input: "ifm" + input: "ker" + input: "bias" + output: "ofm" +} +input: "ifm" +input: "ker" +output: "ofm" diff --git a/res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_001/test.reverse b/res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_001/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/FullyConnected_003/test.recipe b/res/TensorFlowLiteRecipes/FullyConnected_003/test.recipe new file mode 100644 index 0000000..0ecb561 --- /dev/null +++ b/res/TensorFlowLiteRecipes/FullyConnected_003/test.recipe @@ -0,0 +1,55 @@ +operand { + name: "in" + type: FLOAT32 + shape { dim: 1 dim: 16 } +} +operand { + name: "weight" + type: FLOAT32 + shape { dim: 4 dim: 16 } + filler { + tag: "explicit" + arg: "1" arg: "2" arg: "-3" arg: "-4" + arg: "-5" arg: "6" arg: "-7" arg: "8" + arg: "4" arg: "-2" arg: "3" arg: "-1" + arg: "-8" arg: "-6" arg: "7" arg: "5" + arg: "1" arg: "2" arg: "-3" arg: "-4" + arg: "-5" arg: "6" arg: "-7" arg: "8" + arg: "4" arg: "-2" arg: "3" arg: "-1" + arg: "-8" arg: "-6" arg: "7" arg: "5" + arg: "1" arg: "2" arg: "-3" arg: "-4" + arg: "-5" arg: "6" arg: "-7" arg: "8" + arg: "4" arg: "-2" arg: "3" arg: "-1" + arg: "-8" arg: "-6" arg: "7" arg: "5" + arg: "1" arg: "2" arg: "-3" arg: "-4" + arg: "-5" arg: "6" arg: "-7" arg: "8" + arg: "4" arg: "-2" arg: "3" arg: "-1" + arg: "-8" arg: "-6" arg: "7" arg: "5" + } +} +operand { + name: "bias" + type: FLOAT32 + shape { dim: 4 } + filler { + tag: "explicit" + arg: "1" arg: "-2" arg: "-3" arg: "4" + } +} +operand { + name: "out" + type: FLOAT32 + shape { dim: 1 dim: 4 } +} +operation { + type: "FullyConnected" + fullyconnected_options { + activation: NONE + } + input: "in" + input: "weight" + input: "bias" + output: "out" +} +input: "in" +output: "out" diff --git a/res/TensorFlowLiteRecipes/FullyConnected_003/test.reverse b/res/TensorFlowLiteRecipes/FullyConnected_003/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/L2Normalize_U8_000/test.recipe b/res/TensorFlowLiteRecipes/L2Normalize_U8_000/test.recipe new file mode 100644 index 0000000..3fff5cd --- /dev/null +++ b/res/TensorFlowLiteRecipes/L2Normalize_U8_000/test.recipe @@ -0,0 +1,22 @@ +operand { + name: "ifm1" + type: UINT8 + shape { dim: 1 dim: 4 dim: 4 dim: 3 } + quant { min: 0 max: 2 scale: 0.0078125 zero_point: 128} +} +operand { + name: "ofm" + type: UINT8 + shape { dim: 1 dim: 4 dim: 4 dim: 3 } + quant { min: 0 max: 2 scale: 0.0078125 zero_point: 128} +} +operation { + type: "L2Normalize" + l2norm_options { + activation: NONE + } + input: "ifm1" + output: "ofm" +} +input: "ifm1" +output: "ofm" diff --git a/res/TensorFlowLiteRecipes/L2Normalize_U8_000/test.reverse b/res/TensorFlowLiteRecipes/L2Normalize_U8_000/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/Logistic_U8_000/test.recipe b/res/TensorFlowLiteRecipes/Logistic_U8_000/test.recipe new file mode 100644 index 0000000..7b2a84d --- /dev/null +++ b/res/TensorFlowLiteRecipes/Logistic_U8_000/test.recipe @@ -0,0 +1,19 @@ +operand { + name: "ifm" + type: UINT8 + shape { dim: 1 dim: 3 dim: 3 dim: 2 } + quant { min: 0 max: 1 scale: 0.00390625 zero_point: -128 } +} +operand { + name: "ofm" + type: UINT8 + shape { dim: 1 dim: 3 dim: 3 dim: 2 } + quant { min: 0 max: 1 scale: 0.00390625 zero_point: -128 } +} +operation { + type: "Logistic" + input: "ifm" + output: "ofm" +} +input: "ifm" +output: "ofm" diff --git a/res/TensorFlowLiteRecipes/Logistic_U8_000/test.reverse b/res/TensorFlowLiteRecipes/Logistic_U8_000/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/Net_TConv_BN_000/test.recipe b/res/TensorFlowLiteRecipes/Net_TConv_BN_000/test.recipe new file mode 100644 index 0000000..65248f2 --- /dev/null +++ b/res/TensorFlowLiteRecipes/Net_TConv_BN_000/test.recipe @@ -0,0 +1,149 @@ +operand { + name: "Const_transposed" + type: FLOAT32 + shape { + dim: 1 + dim: 3 + dim: 3 + dim: 1 + } + filler { + tag: "gaussian" + arg: "0.0" + arg: "0.1" + } + quant { + quantized_dimension: 0 + } +} +operand { + name: "FusedBatchNormV3" + type: FLOAT32 + shape { + dim: 1 + dim: 4 + dim: 4 + dim: 1 + } + quant { + quantized_dimension: 0 + } +} +operand { + name: "FusedBatchNormV3_add_param" + type: FLOAT32 + shape { + dim: 1 + } + filler { + tag: "explicit" + arg: "-2.04724" + } + quant { + quantized_dimension: 0 + } +} +operand { + name: "FusedBatchNormV3_mul_0" + type: FLOAT32 + shape { + dim: 1 + dim: 4 + dim: 4 + dim: 1 + } + quant { + quantized_dimension: 0 + } +} +operand { + name: "FusedBatchNormV3_mul_0_param" + type: FLOAT32 + shape { + dim: 1 + } + filler { + tag: "explicit" + arg: "2.00834" + } + quant { + quantized_dimension: 0 + } +} +operand { + name: "Hole" + type: FLOAT32 + shape { + dim: 1 + dim: 2 + dim: 2 + dim: 1 + } + quant { + min: 0 + max: 255 + quantized_dimension: 0 + } +} +operand { + name: "conv2d_transpose" + type: FLOAT32 + shape { + dim: 1 + dim: 4 + dim: 4 + dim: 1 + } + quant { + quantized_dimension: 0 + } +} +operand { + name: "conv2d_transpose/input_sizes" + type: INT32 + shape { + dim: 4 + } + filler { + tag: "explicit" + arg: "1" + arg: "4" + arg: "4" + arg: "1" + } + quant { + quantized_dimension: 0 + } +} +operation { + type: "TransposeConv" + input: "conv2d_transpose/input_sizes" + input: "Const_transposed" + input: "Hole" + output: "conv2d_transpose" + transpose_conv_options { + padding: VALID + stride_w: 1 + stride_h: 1 + } +} +operation { + type: "Mul" + input: "conv2d_transpose" + input: "FusedBatchNormV3_mul_0_param" + output: "FusedBatchNormV3_mul_0" + mul_options { + activation: NONE + } +} +operation { + type: "Add" + input: "FusedBatchNormV3_mul_0" + input: "FusedBatchNormV3_add_param" + output: "FusedBatchNormV3" + add_options { + activation: NONE + } +} +input: "Hole" +output: "FusedBatchNormV3" diff --git a/res/TensorFlowLiteRecipes/ResizeBilinear_U8_000/test.recipe b/res/TensorFlowLiteRecipes/ResizeBilinear_U8_000/test.recipe new file mode 100644 index 0000000..6ae87b9 --- /dev/null +++ b/res/TensorFlowLiteRecipes/ResizeBilinear_U8_000/test.recipe @@ -0,0 +1,32 @@ +operand { + name: "ifm1" + type: UINT8 + shape { dim: 1 dim: 4 dim: 4 dim: 3 } + quant { min: 0 max: 255 scale: 1.0 zero_point: 0 } +} +operand { + name: "size" + type: INT32 + shape { dim: 2 } + filler { + tag: "constant" arg: "16" arg: "16" + } +} +operand { + name: "ofm" + type: UINT8 + shape { dim: 1 dim: 16 dim: 16 dim: 3 } + quant { min: 0 max: 255 scale: 1.0 zero_point: 0 } +} +operation { + type: "ResizeBilinear" + input: "ifm1" + input: "size" + output: "ofm" + resize_bilinear_options { + align_corners: false + half_pixel_centers: false + } +} +input: "ifm1" +output: "ofm" diff --git a/res/TensorFlowLiteRecipes/ResizeBilinear_U8_000/test.reverse b/res/TensorFlowLiteRecipes/ResizeBilinear_U8_000/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/SpaceToDepth_U8_000/test.recipe b/res/TensorFlowLiteRecipes/SpaceToDepth_U8_000/test.recipe new file mode 100644 index 0000000..ec403dd --- /dev/null +++ b/res/TensorFlowLiteRecipes/SpaceToDepth_U8_000/test.recipe @@ -0,0 +1,22 @@ +operand { + name: "ifm" + type: UINT8 + shape { dim: 1 dim: 4 dim: 4 dim: 3 } + quant { min: 0 max: 255 scale: 1.0 zero_point: 0 } +} +operand { + name: "ofm" + type: UINT8 + shape { dim: 1 dim: 2 dim: 2 dim: 12 } + quant { min: 0 max: 255 scale: 1.0 zero_point: 0 } +} +operation { + type: "SpaceToDepth" + space_to_depth_options { + block_size: 2 + } + input: "ifm" + output: "ofm" +} +input: "ifm" +output: "ofm" diff --git a/res/TensorFlowLiteRecipes/SpaceToDepth_U8_000/test.reverse b/res/TensorFlowLiteRecipes/SpaceToDepth_U8_000/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/TransposeConv_000/test.recipe b/res/TensorFlowLiteRecipes/TransposeConv_000/test.recipe index 79271a4..1313e26 100644 --- a/res/TensorFlowLiteRecipes/TransposeConv_000/test.recipe +++ b/res/TensorFlowLiteRecipes/TransposeConv_000/test.recipe @@ -10,7 +10,7 @@ operand { operand { name: "ker" type: FLOAT32 - shape { dim: 1 dim: 3 dim: 3 dim: 1 } + shape { dim: 3 dim: 1 dim: 1 dim: 3 } filler { tag: "gaussian" arg: "0.0" diff --git a/res/TensorFlowLiteRecipes/TransposeConv_001/test.recipe b/res/TensorFlowLiteRecipes/TransposeConv_001/test.recipe new file mode 100644 index 0000000..ad76100 --- /dev/null +++ b/res/TensorFlowLiteRecipes/TransposeConv_001/test.recipe @@ -0,0 +1,45 @@ +operand { + name: "out_shape" + type: INT32 + shape { dim: 4 } + filler { + tag: "explicit" + arg: "1" arg: "4" arg: "4" arg: "1" + } +} +operand { + name: "ker" + type: FLOAT32 + shape { dim: 1 dim: 3 dim: 3 dim: 2 } + filler { + tag: "explicit" + arg: "1" arg: "2" arg: "-3" arg: "-4" arg: "5" arg: "-6" + arg: "7" arg: "8" arg: "-9" arg: "-10" arg: "11" arg: "-12" + arg: "13" arg: "14" arg: "-15" arg: "-16" arg: "17" arg: "-18" + } +} +operand { + name: "ifm" + type: FLOAT32 + shape { dim: 1 dim: 4 dim: 4 dim: 2 } +} +operand { + name: "ofm" + type: FLOAT32 + shape { dim: 1 dim: 4 dim: 4 dim: 1 } +} + +operation { + type: "TransposeConv" + transpose_conv_options { + padding: SAME + stride_w: 1 + stride_h: 1 + } + input: "out_shape" + input: "ker" + input: "ifm" + output: "ofm" +} +input: "ifm" +output: "ofm" diff --git a/res/TensorFlowLiteRecipes/TransposeConv_001/test.reverse b/res/TensorFlowLiteRecipes/TransposeConv_001/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/Unique_000/test.recipe b/res/TensorFlowLiteRecipes/Unique_000/test.recipe new file mode 100644 index 0000000..887380c --- /dev/null +++ b/res/TensorFlowLiteRecipes/Unique_000/test.recipe @@ -0,0 +1,27 @@ +operand { + name: "ifm" + type: FLOAT32 + shape { dim: 4 } +} +operand { + name: "ofm" + type: FLOAT32 + shape { } +} +operand { + name: "ofm_idx" + type: INT32 + shape { dim: 4 } +} +operation { + type: "Unique" + unique_options { + idx_out_type: INT32 + } + input: "ifm" + output: "ofm" + output: "ofm_idx" +} +input: "ifm" +output: "ofm" +output: "ofm_idx" diff --git a/res/TensorFlowLiteRecipes/Unique_000/test.reverse b/res/TensorFlowLiteRecipes/Unique_000/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/Unique_001/test.recipe b/res/TensorFlowLiteRecipes/Unique_001/test.recipe new file mode 100644 index 0000000..9beb516 --- /dev/null +++ b/res/TensorFlowLiteRecipes/Unique_001/test.recipe @@ -0,0 +1,27 @@ +operand { + name: "ifm" + type: FLOAT32 + shape { dim: 4 } +} +operand { + name: "ofm" + type: FLOAT32 + shape { } +} +operand { + name: "ofm_idx" + type: INT64 + shape { dim: 4 } +} +operation { + type: "Unique" + unique_options { + idx_out_type: INT64 + } + input: "ifm" + output: "ofm" + output: "ofm_idx" +} +input: "ifm" +output: "ofm" +output: "ofm_idx" diff --git a/res/TensorFlowLiteRecipes/Unique_001/test.reverse b/res/TensorFlowLiteRecipes/Unique_001/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/Unique_002/test.recipe b/res/TensorFlowLiteRecipes/Unique_002/test.recipe new file mode 100644 index 0000000..67b947f --- /dev/null +++ b/res/TensorFlowLiteRecipes/Unique_002/test.recipe @@ -0,0 +1,27 @@ +operand { + name: "ifm" + type: INT32 + shape { dim: 5 } +} +operand { + name: "ofm" + type: INT32 + shape { } +} +operand { + name: "ofm_idx" + type: INT32 + shape { dim: 5 } +} +operation { + type: "Unique" + unique_options { + idx_out_type: INT32 + } + input: "ifm" + output: "ofm" + output: "ofm_idx" +} +input: "ifm" +output: "ofm" +output: "ofm_idx" diff --git a/res/TensorFlowLiteRecipes/Unique_002/test.reverse b/res/TensorFlowLiteRecipes/Unique_002/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/Unique_003/test.recipe b/res/TensorFlowLiteRecipes/Unique_003/test.recipe new file mode 100644 index 0000000..375db66 --- /dev/null +++ b/res/TensorFlowLiteRecipes/Unique_003/test.recipe @@ -0,0 +1,27 @@ +operand { + name: "ifm" + type: INT32 + shape { dim: 5 } +} +operand { + name: "ofm" + type: INT32 + shape { } +} +operand { + name: "ofm_idx" + type: INT64 + shape { dim: 5 } +} +operation { + type: "Unique" + unique_options { + idx_out_type: INT64 + } + input: "ifm" + output: "ofm" + output: "ofm_idx" +} +input: "ifm" +output: "ofm" +output: "ofm_idx" diff --git a/res/TensorFlowLiteRecipes/Unique_003/test.reverse b/res/TensorFlowLiteRecipes/Unique_003/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/Unique_U8_000/test.recipe b/res/TensorFlowLiteRecipes/Unique_U8_000/test.recipe new file mode 100644 index 0000000..d3985e4 --- /dev/null +++ b/res/TensorFlowLiteRecipes/Unique_U8_000/test.recipe @@ -0,0 +1,28 @@ +operand { + name: "ifm" + type: UINT8 + shape { dim: 4 } + quant { min: 0 max: 255 scale: 1.0 zero_point: 0 } +} +operand { + name: "ofm" + type: UINT8 + shape { } +} +operand { + name: "ofm_idx" + type: INT32 + shape { dim: 4 } +} +operation { + type: "Unique" + unique_options { + idx_out_type: INT32 + } + input: "ifm" + output: "ofm" + output: "ofm_idx" +} +input: "ifm" +output: "ofm" +output: "ofm_idx" diff --git a/res/TensorFlowLiteRecipes/Unique_U8_000/test.reverse b/res/TensorFlowLiteRecipes/Unique_U8_000/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteRecipes/Unique_U8_001/test.recipe b/res/TensorFlowLiteRecipes/Unique_U8_001/test.recipe new file mode 100644 index 0000000..b08dd85 --- /dev/null +++ b/res/TensorFlowLiteRecipes/Unique_U8_001/test.recipe @@ -0,0 +1,28 @@ +operand { + name: "ifm" + type: UINT8 + shape { dim: 5 } + quant { min: 0 max: 255 scale: 1.0 zero_point: 0 } +} +operand { + name: "ofm" + type: UINT8 + shape { } +} +operand { + name: "ofm_idx" + type: INT64 + shape { dim: 5 } +} +operation { + type: "Unique" + unique_options { + idx_out_type: INT64 + } + input: "ifm" + output: "ofm" + output: "ofm_idx" +} +input: "ifm" +output: "ofm" +output: "ofm_idx" diff --git a/res/TensorFlowLiteRecipes/Unique_U8_001/test.reverse b/res/TensorFlowLiteRecipes/Unique_U8_001/test.reverse new file mode 100644 index 0000000..e69de29 diff --git a/res/TensorFlowLiteSchema/2.3.0/schema.fbs b/res/TensorFlowLiteSchema/2.3.0/schema.fbs new file mode 100644 index 0000000..b7f41c7 --- /dev/null +++ b/res/TensorFlowLiteSchema/2.3.0/schema.fbs @@ -0,0 +1,1094 @@ +// Copyright 2017 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Revision History +// Version 0: Initial version. +// Version 1: Add subgraphs to schema. +// Version 2: Rename operators to conform to NN API. +// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers. + +namespace tflite; + +// This corresponds to the version. +file_identifier "TFL3"; +// File extension of any written files. +file_extension "tflite"; + +// IMPORTANT: All new members of tables, enums and unions must be added at the +// end to ensure backwards compatibility. + +// The type of data stored in a tensor. +enum TensorType : byte { + FLOAT32 = 0, + FLOAT16 = 1, + INT32 = 2, + UINT8 = 3, + INT64 = 4, + STRING = 5, + BOOL = 6, + INT16 = 7, + COMPLEX64 = 8, + INT8 = 9, + FLOAT64 = 10, +} + +// Custom quantization parameters for experimenting with new quantization +// techniques. +table CustomQuantization { + custom:[ubyte] (force_align: 16); +} + +// Represents a specific quantization technique's parameters. +union QuantizationDetails { + CustomQuantization, +} + +// Parameters for converting a quantized tensor back to float. +table QuantizationParameters { + // These four parameters are the asymmetric linear quantization parameters. + // Given a quantized value q, the corresponding float value f should be: + // f = scale * (q - zero_point) + // For other quantization types, the QuantizationDetails below is used. + min:[float]; // For importing back into tensorflow. + max:[float]; // For importing back into tensorflow. + scale:[float]; // For dequantizing the tensor's values. + zero_point:[long]; + + // If this is not none, the other quantization parameters (i.e. min, max, + // scale, zero_point fields above) are ignored and the value of the + // QuantizationDetails union should be used. + details:QuantizationDetails; + + // Specifies the dimension of the Tensor's shape that the scales and + // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1] + // with quantization params: + // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1 + // will be quantized across the second dimension of t. + // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1 + // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2 + // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3 + quantized_dimension:int; +} + +// Sparse tensors. +// We use a modification of the TACO format. +// Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf +// +// To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1), +// potentially with a k-dimensional block (0 <= k <= n) with dims +// (dn, ..., dn+k-1), the format needs to specify: +// 1. In what order to traverse these dimensions. For example, to store a 2-D +// matrix in row major order, the traversal order would be (d0, d1), +// whereas to store it in column major order, the traversal order would be +// (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order +// could be (d0, d1, d2, d3). +// 2. How each block dimension in (dn, ..., dn+k-1) maps to the original +// tensor dimension in (d0, ..., dn-1). +// 3. In the traversal order defined above, the format (dense vs. sparse) and +// index metadata for each dimension. For a dense dimension, this is just +// the size of that dimension. For a sparse dimension, it's the same as +// the compressed index defined in the Compressed Sparse Row (CSR) format. +// (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html) + +// The storage type for a dimension. Currently we support: +// 1. DENSE: each coordinate in this dimension is stored implicitly. +// 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The +// compression technique is the same what CSR uses. +// More types like a sparse dimension with a different compression technique +// could be added to the list in the future. +enum DimensionType : byte { + DENSE = 0, + SPARSE_CSR = 1, +} + +table Int32Vector { + values:[int]; +} + +table Uint16Vector { + values:[ushort] (force_align: 4); +} + +table Uint8Vector { + values:[ubyte] (force_align: 4); +} + +// Variable-typed buffer to store the index metadata for a sparse dimension. +// The widest type is Int32 instead of UInt32 because tensor's shape is a int32 +// vector. We don't want the per-dimensional index to overflow that range. +union SparseIndexVector { + Int32Vector, + Uint16Vector, + Uint8Vector +} + +table DimensionMetadata { + // Whether a dimension is dense or sparse. + format:DimensionType; + // Index metadata used for a dimension. + // - If format is DimensionType.DENSE then we use the dense_size field to + // store the size of that dimension. Each index in that dimension is + // stored implicitly. + // - If format is DimensionType.SPARSE_CSR then we use array_segments and + // array_indices to encode that dimension. array_segments represents how + // to segment the indices array, each segment corresponds to one element + // in the previous dimension. array_indices represents the index of the + // non-zero elements within this dimension (as those in the CSR matrix + // format, where the first array is row pointers and the second array is + // column indices). + dense_size:int; + array_segments:SparseIndexVector; + array_indices:SparseIndexVector; +} + +// Parameters to encode a sparse TfLite tensor. +table SparsityParameters { + // The traversal order of the dimensions defined in the `shape` field of the + // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1, + // ..., dn-1), + // - if not block sparse, the traversal_order is just a permutation of (d0, + // ..., dn-1). For example, a 2-D matrix stored in row-major order would + // have traversal_order = (d0, d1). + // - if block sparse with a k-dimensional block (0 <= k <= n), the + // traversal_order has n + k elements. The first n elements are still a + // permutation of (d0, ..., dn-1). The lask k elements are a permutation + // of (dn, ..., dn+k-1), defining how to traverse a block internally. For + // example, a 2-D matrix with 2-D blocks, both stored in row-major order + // would have traversal_order = (d0, d1, d2, d3). + traversal_order:[int]; + // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n), + // stores how a block dimension in (dn, ..., dn+k-1) maps to the original + // tensor dimension in (d0, ..., dn). + // It's stored in the order of (dn, ..., dn+k-1). + // If not block-sparse, this field is NULL. + block_map:[int]; + // In the traversal order defined above, the metadata needed for + // each dimension to locate the non-zero values in the original dense tensor. + // The size of the dim_metadata array = the size of the traversal_order array + // = n + k. + dim_metadata:[DimensionMetadata]; +} + +table Tensor { + // The tensor shape. The meaning of each entry is operator-specific but + // builtin ops use: [batch size, height, width, number of channels] (That's + // Tensorflow's NHWC). + shape:[int]; + type:TensorType; + // An index that refers to the buffers table at the root of the model. Or, + // if there is no data buffer associated (i.e. intermediate results), then + // this is 0 (which refers to an always existent empty buffer). + // + // The data_buffer itself is an opaque container, with the assumption that the + // target device is little-endian. In addition, all builtin operators assume + // the memory is ordered such that if `shape` is [4, 3, 2], then index + // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k]. + buffer:uint; + name:string; // For debugging and importing back into tensorflow. + quantization:QuantizationParameters; // Optional. + + is_variable:bool = false; + + // Parameters to encode a sparse tensor. See the example in + // tensorflow/lite/testdata/sparse_tensor.json. + sparsity:SparsityParameters; // Optional. + + // Encodes `shape` with unknown dimensions. Unknown dimensions are + // represented with -1. + shape_signature:[int]; // Optional. +} + +// A list of builtin operators. Builtin operators are slightly faster than custom +// ones, but not by much. Moreover, while custom operators accept an opaque +// object containing configuration parameters, builtins have a predetermined +// set of acceptable options. + +enum BuiltinOperator : byte { + ADD = 0, + AVERAGE_POOL_2D = 1, + CONCATENATION = 2, + CONV_2D = 3, + DEPTHWISE_CONV_2D = 4, + DEPTH_TO_SPACE = 5, + DEQUANTIZE = 6, + EMBEDDING_LOOKUP = 7, + FLOOR = 8, + FULLY_CONNECTED = 9, + HASHTABLE_LOOKUP = 10, + L2_NORMALIZATION = 11, + L2_POOL_2D = 12, + LOCAL_RESPONSE_NORMALIZATION = 13, + LOGISTIC = 14, + LSH_PROJECTION = 15, + LSTM = 16, + MAX_POOL_2D = 17, + MUL = 18, + RELU = 19, + // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed + // since different model developers use RELU1 in different ways. Never + // create another op called RELU1. + RELU_N1_TO_1 = 20, + RELU6 = 21, + RESHAPE = 22, + RESIZE_BILINEAR = 23, + RNN = 24, + SOFTMAX = 25, + SPACE_TO_DEPTH = 26, + SVDF = 27, + TANH = 28, + // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS + CONCAT_EMBEDDINGS = 29, + SKIP_GRAM = 30, + CALL = 31, + CUSTOM = 32, + EMBEDDING_LOOKUP_SPARSE = 33, + PAD = 34, + UNIDIRECTIONAL_SEQUENCE_RNN = 35, + GATHER = 36, + BATCH_TO_SPACE_ND = 37, + SPACE_TO_BATCH_ND = 38, + TRANSPOSE = 39, + MEAN = 40, + SUB = 41, + DIV = 42, + SQUEEZE = 43, + UNIDIRECTIONAL_SEQUENCE_LSTM = 44, + STRIDED_SLICE = 45, + BIDIRECTIONAL_SEQUENCE_RNN = 46, + EXP = 47, + TOPK_V2 = 48, + SPLIT = 49, + LOG_SOFTMAX = 50, + // DELEGATE is a special op type for the operations which are delegated to + // other backends. + // WARNING: Experimental interface, subject to change + DELEGATE = 51, + BIDIRECTIONAL_SEQUENCE_LSTM = 52, + CAST = 53, + PRELU = 54, + MAXIMUM = 55, + ARG_MAX = 56, + MINIMUM = 57, + LESS = 58, + NEG = 59, + PADV2 = 60, + GREATER = 61, + GREATER_EQUAL = 62, + LESS_EQUAL = 63, + SELECT = 64, + SLICE = 65, + SIN = 66, + TRANSPOSE_CONV = 67, + SPARSE_TO_DENSE = 68, + TILE = 69, + EXPAND_DIMS = 70, + EQUAL = 71, + NOT_EQUAL = 72, + LOG = 73, + SUM = 74, + SQRT = 75, + RSQRT = 76, + SHAPE = 77, + POW = 78, + ARG_MIN = 79, + FAKE_QUANT = 80, + REDUCE_PROD = 81, + REDUCE_MAX = 82, + PACK = 83, + LOGICAL_OR = 84, + ONE_HOT = 85, + LOGICAL_AND = 86, + LOGICAL_NOT = 87, + UNPACK = 88, + REDUCE_MIN = 89, + FLOOR_DIV = 90, + REDUCE_ANY = 91, + SQUARE = 92, + ZEROS_LIKE = 93, + FILL = 94, + FLOOR_MOD = 95, + RANGE = 96, + RESIZE_NEAREST_NEIGHBOR = 97, + LEAKY_RELU = 98, + SQUARED_DIFFERENCE = 99, + MIRROR_PAD = 100, + ABS = 101, + SPLIT_V = 102, + UNIQUE = 103, + CEIL = 104, + REVERSE_V2 = 105, + ADD_N = 106, + GATHER_ND = 107, + COS = 108, + WHERE = 109, + RANK = 110, + ELU = 111, + REVERSE_SEQUENCE = 112, + MATRIX_DIAG = 113, + QUANTIZE = 114, + MATRIX_SET_DIAG = 115, + ROUND = 116, + HARD_SWISH = 117, + IF = 118, + WHILE = 119, + NON_MAX_SUPPRESSION_V4 = 120, + NON_MAX_SUPPRESSION_V5 = 121, + SCATTER_ND = 122, + SELECT_V2 = 123, + DENSIFY = 124, + SEGMENT_SUM = 125, + BATCH_MATMUL = 126 +} + + +// Options for the builtin operators. +union BuiltinOptions { + Conv2DOptions, + DepthwiseConv2DOptions, + ConcatEmbeddingsOptions, + LSHProjectionOptions, + Pool2DOptions, + SVDFOptions, + RNNOptions, + FullyConnectedOptions, + SoftmaxOptions, + ConcatenationOptions, + AddOptions, + L2NormOptions, + LocalResponseNormalizationOptions, + LSTMOptions, + ResizeBilinearOptions, + CallOptions, + ReshapeOptions, + SkipGramOptions, + SpaceToDepthOptions, + EmbeddingLookupSparseOptions, + MulOptions, + PadOptions, + GatherOptions, + BatchToSpaceNDOptions, + SpaceToBatchNDOptions, + TransposeOptions, + ReducerOptions, + SubOptions, + DivOptions, + SqueezeOptions, + SequenceRNNOptions, + StridedSliceOptions, + ExpOptions, + TopKV2Options, + SplitOptions, + LogSoftmaxOptions, + CastOptions, + DequantizeOptions, + MaximumMinimumOptions, + ArgMaxOptions, + LessOptions, + NegOptions, + PadV2Options, + GreaterOptions, + GreaterEqualOptions, + LessEqualOptions, + SelectOptions, + SliceOptions, + TransposeConvOptions, + SparseToDenseOptions, + TileOptions, + ExpandDimsOptions, + EqualOptions, + NotEqualOptions, + ShapeOptions, + PowOptions, + ArgMinOptions, + FakeQuantOptions, + PackOptions, + LogicalOrOptions, + OneHotOptions, + LogicalAndOptions, + LogicalNotOptions, + UnpackOptions, + FloorDivOptions, + SquareOptions, + ZerosLikeOptions, + FillOptions, + BidirectionalSequenceLSTMOptions, + BidirectionalSequenceRNNOptions, + UnidirectionalSequenceLSTMOptions, + FloorModOptions, + RangeOptions, + ResizeNearestNeighborOptions, + LeakyReluOptions, + SquaredDifferenceOptions, + MirrorPadOptions, + AbsOptions, + SplitVOptions, + UniqueOptions, + ReverseV2Options, + AddNOptions, + GatherNdOptions, + CosOptions, + WhereOptions, + RankOptions, + ReverseSequenceOptions, + MatrixDiagOptions, + QuantizeOptions, + MatrixSetDiagOptions, + HardSwishOptions, + IfOptions, + WhileOptions, + DepthToSpaceOptions, + NonMaxSuppressionV4Options, + NonMaxSuppressionV5Options, + ScatterNdOptions, + SelectV2Options, + DensifyOptions, + SegmentSumOptions, + BatchMatMulOptions +} + +enum Padding : byte { SAME, VALID } + +enum ActivationFunctionType : byte { + NONE = 0, + RELU = 1, + RELU_N1_TO_1 = 2, + RELU6 = 3, + TANH = 4, + SIGN_BIT = 5, +} + +table Conv2DOptions { + padding:Padding; + stride_w:int; + stride_h:int; + fused_activation_function:ActivationFunctionType; + dilation_w_factor:int = 1; + dilation_h_factor:int = 1; +} + +table Pool2DOptions { + padding:Padding; + stride_w:int; + stride_h:int; + filter_width:int; + filter_height:int; + fused_activation_function:ActivationFunctionType; +} + +table DepthwiseConv2DOptions { + // Parameters for DepthwiseConv version 1 or above. + padding:Padding; + stride_w:int; + stride_h:int; + // `depth_multiplier` is redundant. It's used by CPU kernels in + // TensorFlow 2.0 or below, but ignored in versions above. + // See comments in lite/c/builtin_op_data.h for more details. + depth_multiplier:int; + fused_activation_function:ActivationFunctionType; + // Parameters for DepthwiseConv version 2 or above. + dilation_w_factor:int = 1; + dilation_h_factor:int = 1; +} + +table ConcatEmbeddingsOptions { + num_channels:int; + num_columns_per_channel:[int]; + embedding_dim_per_channel:[int]; // This could be inferred from parameters. +} + +enum LSHProjectionType: byte { + UNKNOWN = 0, + SPARSE = 1, + DENSE = 2, +} + +table LSHProjectionOptions { + type: LSHProjectionType; +} + +table SVDFOptions { + rank:int; + fused_activation_function:ActivationFunctionType; + // For weights-only quantization, use asymmetric quantization for non + // constant inputs at evaluation time. + asymmetric_quantize_inputs:bool; +} + +// An implementation of TensorFlow RNNCell. +table RNNOptions { + fused_activation_function:ActivationFunctionType; + asymmetric_quantize_inputs:bool; +} + +// An implementation of TensorFlow dynamic_rnn with RNNCell. +table SequenceRNNOptions { + time_major:bool; + fused_activation_function:ActivationFunctionType; + asymmetric_quantize_inputs:bool; +} + +// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell. +table BidirectionalSequenceRNNOptions { + time_major:bool; + fused_activation_function:ActivationFunctionType; + merge_outputs: bool; + asymmetric_quantize_inputs:bool; +} + +enum FullyConnectedOptionsWeightsFormat: byte { + DEFAULT = 0, + SHUFFLED4x16INT8 = 1, +} + +// An implementation of TensorFlow fully_connected (a.k.a Dense) layer. +table FullyConnectedOptions { + // Parameters for FullyConnected version 1 or above. + fused_activation_function:ActivationFunctionType; + + // Parameters for FullyConnected version 2 or above. + weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT; + + // Parameters for FullyConnected version 5 or above. + // If set to true, then the number of dimension is preserved. Furthermore, + // all but the last dimension of the input and output shapes will be equal. + keep_num_dims: bool; + + // Parameters for FullyConnected version 7 or above. + // If set to true, then weights-only op will use asymmetric quantization for + // inputs. + asymmetric_quantize_inputs: bool; +} + +table SoftmaxOptions { + beta: float; +} + +// An implementation of TensorFlow concat. +table ConcatenationOptions { + axis:int; + fused_activation_function:ActivationFunctionType; +} + +table AddOptions { + fused_activation_function:ActivationFunctionType; +} + +table MulOptions { + fused_activation_function:ActivationFunctionType; +} + +table L2NormOptions { + fused_activation_function:ActivationFunctionType; +} + +table LocalResponseNormalizationOptions { + radius:int; + bias:float; + alpha:float; + beta:float; +} + +enum LSTMKernelType : byte { + // Full LSTM kernel which supports peephole and projection. + FULL = 0, + // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell. + BASIC = 1, +} + +// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell +table LSTMOptions { + // Parameters for LSTM version 1 or above. + fused_activation_function:ActivationFunctionType; + cell_clip: float; // Optional, 0.0 means no clipping + proj_clip: float; // Optional, 0.0 means no clipping + + // Parameters for LSTM version 2 or above. + // Basic kernel is only supported in version 2 or above. + kernel_type: LSTMKernelType = FULL; + + // Parameters for LSTM version 4 or above. + asymmetric_quantize_inputs: bool; +} + +// An implementation of TensorFlow dynamic_rnn with LSTMCell. +table UnidirectionalSequenceLSTMOptions { + fused_activation_function:ActivationFunctionType; + cell_clip: float; // Optional, 0.0 means no clipping + proj_clip: float; // Optional, 0.0 means no clipping + + // If true then first dimension is sequence, otherwise batch. + time_major:bool; + + // Parameter for Unidirectional Sequence LSTM version 4. + asymmetric_quantize_inputs:bool; +} + +table BidirectionalSequenceLSTMOptions { + // Parameters supported by version 1: + fused_activation_function:ActivationFunctionType; + cell_clip: float; // Optional, 0.0 means no clipping + proj_clip: float; // Optional, 0.0 means no clipping + + // If true, store the outputs of both directions into the first output. + merge_outputs: bool; + + // Parameters supported by version 2: + // If true then first dimension is sequence, otherwise batch. + // Version 1 implementations assumed time_major to be true, so this default + // value should never change. + time_major: bool = true; + + // Parameters for version 3 or above. + asymmetric_quantize_inputs:bool; +} + +table ResizeBilinearOptions { + new_height: int (deprecated); + new_width: int (deprecated); + align_corners: bool; + half_pixel_centers: bool; +} + +table ResizeNearestNeighborOptions { + align_corners: bool; + half_pixel_centers: bool; +} + +// A call operation options +table CallOptions { + // The subgraph index that needs to be called. + subgraph:uint; +} + +table PadOptions { +} + +table PadV2Options { +} + +table ReshapeOptions { + new_shape:[int]; +} + +table SpaceToBatchNDOptions { +} + +table BatchToSpaceNDOptions { +} + +table SkipGramOptions { + ngram_size: int; + max_skip_size: int; + include_all_ngrams: bool; +} + +table SpaceToDepthOptions { + block_size: int; +} + +table DepthToSpaceOptions { + block_size: int; +} + +table SubOptions { + fused_activation_function:ActivationFunctionType; +} + +table DivOptions { + fused_activation_function:ActivationFunctionType; +} + +table TopKV2Options { +} + +enum CombinerType : byte { + SUM = 0, + MEAN = 1, + SQRTN = 2, +} + +table EmbeddingLookupSparseOptions { + combiner:CombinerType; +} + +table GatherOptions { + axis: int; +} + +table TransposeOptions { +} + +table ExpOptions { +} + +table CosOptions { +} + +table ReducerOptions { + keep_dims: bool; +} + +table SqueezeOptions { + squeeze_dims:[int]; +} + +table SplitOptions { + num_splits: int; +} + +table SplitVOptions { + num_splits: int; +} + +table StridedSliceOptions { + begin_mask: int; + end_mask: int; + ellipsis_mask: int; + new_axis_mask: int; + shrink_axis_mask: int; +} + +table LogSoftmaxOptions { +} + +table CastOptions { + in_data_type: TensorType; + out_data_type: TensorType; +} + +table DequantizeOptions { +} + +table MaximumMinimumOptions { +} + +table TileOptions { +} + +table ArgMaxOptions { + output_type : TensorType; +} + +table ArgMinOptions { + output_type : TensorType; +} + +table GreaterOptions { +} + +table GreaterEqualOptions { +} + +table LessOptions { +} + +table LessEqualOptions { +} + +table NegOptions { +} + +table SelectOptions { +} + +table SliceOptions { +} + +table TransposeConvOptions { + padding:Padding; + stride_w:int; + stride_h:int; +} + +table ExpandDimsOptions { +} + +table SparseToDenseOptions { + validate_indices:bool; +} + +table EqualOptions { +} + +table NotEqualOptions { +} + +table ShapeOptions { + // Optional output type of the operation (int32 or int64). Defaults to int32. + out_type : TensorType; +} + +table RankOptions { +} + +table PowOptions { +} + +table FakeQuantOptions { + // Parameters supported by version 1: + min:float; + max:float; + num_bits:int; + + // Parameters supported by version 2: + narrow_range:bool; +} + +table PackOptions { + values_count:int; + axis:int; +} + +table LogicalOrOptions { +} + +table OneHotOptions { + axis:int; +} + +table AbsOptions { +} + + +table HardSwishOptions { +} + +table LogicalAndOptions { +} + +table LogicalNotOptions { +} + +table UnpackOptions { + num:int; + axis:int; +} + +table FloorDivOptions { +} + +table SquareOptions { +} + +table ZerosLikeOptions { +} + +table FillOptions { +} + +table FloorModOptions { +} + +table RangeOptions { +} + +table LeakyReluOptions { + alpha:float; +} + +table SquaredDifferenceOptions { +} + +enum MirrorPadMode : byte { + // Doesn't include borders. + REFLECT = 0, + // Includes borders. + SYMMETRIC = 1, +} + +table MirrorPadOptions { + mode:MirrorPadMode; +} + +table UniqueOptions { + idx_out_type:TensorType = INT32; +} + +table ReverseV2Options { +} + +table AddNOptions { +} + +table GatherNdOptions { +} + +table WhereOptions { +} + +table ReverseSequenceOptions { + seq_dim:int; + batch_dim:int = 0; +} + +table MatrixDiagOptions { +} + +table QuantizeOptions { +} + +table MatrixSetDiagOptions { +} + +table IfOptions { + then_subgraph_index:int; + else_subgraph_index:int; +} + +table WhileOptions { + cond_subgraph_index:int; + body_subgraph_index:int; +} + +table NonMaxSuppressionV4Options { +} + +table NonMaxSuppressionV5Options { +} + +table ScatterNdOptions { +} + +table SelectV2Options { +} + +table DensifyOptions { +} + +table SegmentSumOptions { +} + +table BatchMatMulOptions { + adj_x:bool; + adj_y:bool; +} + +// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a +// builtin, or a string if the operator is custom. +table OperatorCode { + builtin_code:BuiltinOperator; + custom_code:string; + + // The version of the operator. The version need to be bumped whenever new + // parameters are introduced into an op. + version:int = 1; +} + +enum CustomOptionsFormat : byte { + FLEXBUFFERS = 0, +} + +// An operator takes tensors as inputs and outputs. The type of operation being +// performed is determined by an index into the list of valid OperatorCodes, +// while the specifics of each operations is configured using builtin_options +// or custom_options. +table Operator { + // Index into the operator_codes array. Using an integer here avoids + // complicate map lookups. + opcode_index:uint; + + // Optional input are indicated by -1. + inputs:[int]; + outputs:[int]; + + builtin_options:BuiltinOptions; + custom_options:[ubyte]; + custom_options_format:CustomOptionsFormat; + + // A list of booleans indicating the input tensors which are being mutated by + // this operator.(e.g. used by RNN and LSTM). + // For example, if the "inputs" array refers to 5 tensors and the second and + // fifth are mutable variables, then this list will contain + // [false, true, false, false, true]. + // + // If the list is empty, no variable is mutated in this operator. + // The list either has the same length as `inputs`, or is empty. + mutating_variable_inputs:[bool]; + + // A list of indices to the subgraph's "tensors" that are internal to an Op. + // Internal tensors are those that do not flow in or out of the operation, + // but instead are part of internal computation. As such, the operation's + // implementation may manage its memory more efficiently. They are needed + // however (i.e. not just an implementation detail) since they are part of the + // computation, which may require relevant metadata such as quantization + // parameters. + intermediates:[int]; +} + +// The root type, defining a subgraph, which typically represents an entire +// model. +table SubGraph { + // A list of all tensors used in this subgraph. + tensors:[Tensor]; + + // Indices of the tensors that are inputs into this subgraph. Note this is + // the list of non-static tensors that feed into the subgraph for inference. + inputs:[int]; + + // Indices of the tensors that are outputs out of this subgraph. Note this is + // the list of output tensors that are considered the product of the + // subgraph's inference. + outputs:[int]; + + // All operators, in execution order. + operators:[Operator]; + + // Name of this subgraph (used for debugging). + name:string; +} + +// Table of raw data buffers (used for constant tensors). Referenced by tensors +// by index. The generous alignment accommodates mmap-friendly data structures. +table Buffer { + data:[ubyte] (force_align: 16); +} + +table Metadata { + // A human readable string to uniquely identify a Metadata. + name:string; + // An index to the buffers table. + buffer:uint; +} + +table Model { + // Version of the schema. + version:uint; + + // A list of all operator codes used in this model. This is + // kept in order because operators carry an index into this + // vector. + operator_codes:[OperatorCode]; + + // All the subgraphs of the model. The 0th is assumed to be the main + // model. + subgraphs:[SubGraph]; + + // A description of the model. + description:string; + + // Buffers of the model. + // Note the 0th entry of this array must be an empty buffer (sentinel). + // This is a convention so that tensors without a buffer can provide 0 as + // their buffer. + buffers:[Buffer]; + + // Metadata about the model. Indirects into the existings buffers list. + // Deprecated, prefer to use metadata field. + metadata_buffer:[int]; + + // Metadata about the model. + metadata:[Metadata]; +} + +root_type Model; diff --git a/res/TensorFlowLiteSchema/SCHEMA.lst b/res/TensorFlowLiteSchema/SCHEMA.lst index 2044e2e..73dfacd 100644 --- a/res/TensorFlowLiteSchema/SCHEMA.lst +++ b/res/TensorFlowLiteSchema/SCHEMA.lst @@ -5,3 +5,4 @@ VERSION,URL 2.1.0,https://raw.githubusercontent.com/tensorflow/tensorflow/v2.1.0/tensorflow/lite/schema/schema.fbs 2.2.0,https://raw.githubusercontent.com/tensorflow/tensorflow/v2.2.0/tensorflow/lite/schema/schema.fbs 2.3.0-rc0,https://raw.githubusercontent.com/tensorflow/tensorflow/v2.3.0-rc0/tensorflow/lite/schema/schema.fbs +2.3.0,https://raw.githubusercontent.com/tensorflow/tensorflow/v2.3.0/tensorflow/lite/schema/schema.fbs diff --git a/res/TensorFlowPythonExamples/examples/while_2/__init__.py b/res/TensorFlowPythonExamples/examples/while_2/__init__.py new file mode 100644 index 0000000..af1c745 --- /dev/null +++ b/res/TensorFlowPythonExamples/examples/while_2/__init__.py @@ -0,0 +1,32 @@ +import tensorflow as tf + +i = tf.constant(0, shape=[1, 0], dtype=tf.int32, name='i') +x = tf.compat.v1.placeholder(shape=[1, 1], dtype=tf.int32, name='Hole') + +c = lambda i: tf.compat.v1.less(tf.compat.v1.size(i[0]), 10) +b = lambda i: tf.concat([i, x], axis=1) + +# this loop changs i's shape from [1, 0] -> [1, 1] -> [1, 2] -> ... -> [1, 10] +r = tf.compat.v1.while_loop( + c, b, [i], name="While", shape_invariants=[tf.TensorShape([1, None])]) + +output = tf.compat.v1.identity(r, name="Output") + +# by adding the following code, [[1 1 1 1 1 1 1 1 1 1]] and (1, 10) will be printed +# +# import numpy as np +# x_val = np.array([[1]]) +# with tf.Session() as sess: +# result = sess.run(r, feed_dict={x:x_val}) +# print(result) +# print(result.shape) + +# with TF 2.3, tf2tflite throws the following error +# +# Exception: venv/tf-2.3/lib/python3.6/site-packages/tensorflow/python/eager/lift_to_graph.py:339:0: +# error: body function result type tensor<1x1xi32> is incompatible with result type tensor<1x0xi32> +# at index 0 +# ... +# note: see current operation: %1:2 = "tf.While"(%0, %arg0) +# {body = @_functionalize_body_00, cond = @_functionalize_cond_00, device = "", is_stateless = false, output_shapes = [], parallel_iterations = 10 : i64} +# : (tensor<1x0xi32>, tensor<1x1xi32>) -> (tensor<1x0xi32>, tensor<1x1xi32>) diff --git a/res/TensorFlowPythonExamples/examples/while_3/__init__.py b/res/TensorFlowPythonExamples/examples/while_3/__init__.py new file mode 100644 index 0000000..840846e --- /dev/null +++ b/res/TensorFlowPythonExamples/examples/while_3/__init__.py @@ -0,0 +1,33 @@ +import tensorflow as tf + +x = tf.compat.v1.placeholder(shape=[1, None], dtype=tf.int32, name='Hole') +i = tf.compat.v1.placeholder(shape=[1, None], dtype=tf.int32, name='Hole_2') + + +def c(ii): + rs = tf.compat.v1.shape(ii) + r1 = rs[1] + return tf.compat.v1.less(r1, 10) + + +def b(ii): + return tf.concat([ii, x], axis=1) + + +# this loop changes i's shape from [1, 0] -> [1, 1] -> [1, 2] -> ... -> [1, 10] +r = tf.compat.v1.while_loop( + c, b, [i], name="While", shape_invariants=[tf.TensorShape([1, None])]) + +output = tf.compat.v1.identity(r, name="Output") + +# by adding the following code, [[123 1 2 3 1 2 3 1 2 3]] and (1, 10) will be printed +# +''' +import numpy as np +i_val = np.array([[123]], dtype=np.int32) +x_val = np.array([[1, 2, 3]], dtype=np.int32) +with tf.compat.v1.Session() as sess: + result = sess.run(r, feed_dict={x:x_val, i:i_val}) + print(result) + print(result.shape) +''' diff --git a/res/TensorFlowPythonModels/examples/tconv-bn/__init__.py b/res/TensorFlowPythonModels/examples/tconv-bn/__init__.py new file mode 100644 index 0000000..ae034e8 --- /dev/null +++ b/res/TensorFlowPythonModels/examples/tconv-bn/__init__.py @@ -0,0 +1,27 @@ +import tensorflow as tf +import numpy as np + +input_ = tf.compat.v1.placeholder(tf.float32, shape=(1, 2, 2, 1), name="Hole") +W = np.ones(9).reshape((3, 3, 1, 1)) +filter_ = tf.compat.v1.constant(W, dtype=tf.float32) +tconv_ = tf.compat.v1.nn.conv2d_transpose( + input_, filter_, output_shape=(1, 4, 4, 1), strides=[1, 1, 1, 1], padding='VALID') + +scale_ = tf.compat.v1.constant([1.0177339315414429], dtype=tf.float32) +offset_ = tf.compat.v1.constant([0.015628524124622345], dtype=tf.float32) +mean_ = tf.compat.v1.constant([1.027155211195349693], dtype=tf.float32) +variance_ = tf.compat.v1.constant([0.25580066442489624], dtype=tf.float32) +bn_out, _, _ = tf.compat.v1.nn.fused_batch_norm( + tconv_, + scale_, + offset_, + mean=mean_, + variance=variance_, + epsilon=0.0010000000474974513, + is_training=False) +''' +python ../../compiler/tf2tfliteV2/tf2tfliteV2.py --v1 \ +-i tconv-bn.pbtxt \ +-o tconv-bn.tflite \ +-I Hole -O FusedBatchNorm +''' diff --git a/runtime/contrib/android/api/build.gradle b/runtime/contrib/android/api/build.gradle index 70eb802..5c17043 100644 --- a/runtime/contrib/android/api/build.gradle +++ b/runtime/contrib/android/api/build.gradle @@ -8,7 +8,7 @@ android { minSdkVersion 26 targetSdkVersion 29 versionCode 1 - versionName "1.7.0" + versionName "1.8.0" externalNativeBuild { ndkBuild { diff --git a/runtime/contrib/android_benchmark_app/README.md b/runtime/contrib/android_benchmark_app/README.md index ce165cd..19640e3 100644 --- a/runtime/contrib/android_benchmark_app/README.md +++ b/runtime/contrib/android_benchmark_app/README.md @@ -28,7 +28,7 @@ make TARGET_OS=android \ EXT_ACL_FOLDER=/home/hanjoung/ws/temp/arm_compute-v19.05-bin-android/lib/android-arm64-v8a-neon-cl \ ANDROID_BUILD_TOOLS_DIR=/home/hanjoung/ws/android-tools/sdk/build-tools/27.0.3/ \ ANDROID_SDK_DIR=/home/hanjoung/ws/android-tools/sdk \ - TFLITE_MODEL_PATH=/Users/hanjoung/ws/ghent/STAR/nnfw/tests/scripts/framework/cache/MODELS/mobilenet/mobilenet_v1_0.25_128.tflite \ + TFLITE_MODEL_PATH=/Users/hanjoung/ws/ghent/STAR/nnfw/tests/scripts/models/cache/MODELS/mobilenet/mobilenet_v1_0.25_128.tflite \ ANDROID_BOOST_ROOT=/home/hanjoung/ws/gh/moritz-wundke/Boost-for-Android/build/out/arm64-v8a ``` diff --git a/runtime/libs/benchmark/CMakeLists.txt b/runtime/libs/benchmark/CMakeLists.txt index 2af0ffa..748b2d1 100644 --- a/runtime/libs/benchmark/CMakeLists.txt +++ b/runtime/libs/benchmark/CMakeLists.txt @@ -1,6 +1,5 @@ file(GLOB_RECURSE SOURCES "src/*.cpp") -add_library(nnfw_lib_benchmark SHARED ${SOURCES}) +add_library(nnfw_lib_benchmark STATIC ${SOURCES}) target_include_directories(nnfw_lib_benchmark PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) target_link_libraries(nnfw_lib_benchmark PRIVATE ${LIB_PTHREAD}) -install(TARGETS nnfw_lib_benchmark DESTINATION lib) diff --git a/runtime/libs/benchmark/src/Result.cpp b/runtime/libs/benchmark/src/Result.cpp index 7a3f9a5..df573da 100644 --- a/runtime/libs/benchmark/src/Result.cpp +++ b/runtime/libs/benchmark/src/Result.cpp @@ -166,7 +166,7 @@ Result::Result(const Phases &phases) if (option.memory) { print_memory = true; - for (int i = PhaseEnum::MODEL_LOAD; i <= PhaseEnum::EXECUTE; ++i) + for (int i = PhaseEnum::MODEL_LOAD; i < PhaseEnum::EXECUTE; ++i) { auto phase = phases.at(gPhaseStrings[i]); for (int j = MemoryType::RSS; j <= MemoryType::PSS; ++j) diff --git a/runtime/libs/misc/include/misc/polymorphic_downcast.h b/runtime/libs/misc/include/misc/polymorphic_downcast.h index ee885eb..412b864 100644 --- a/runtime/libs/misc/include/misc/polymorphic_downcast.h +++ b/runtime/libs/misc/include/misc/polymorphic_downcast.h @@ -27,7 +27,9 @@ namespace misc template inline DstType polymorphic_downcast(SrcType *x) { +#ifndef __ANDROID__ assert(dynamic_cast(x) == x); +#endif return static_cast(x); } diff --git a/runtime/nnapi-header/include/NeuralNetworksEx.h b/runtime/nnapi-header/include/NeuralNetworksEx.h index 87f0e30..d15262e 100644 --- a/runtime/nnapi-header/include/NeuralNetworksEx.h +++ b/runtime/nnapi-header/include/NeuralNetworksEx.h @@ -558,7 +558,26 @@ typedef enum { * Outputs: * * 0: The sum, a tensor of the same type as input0. */ - ANEURALNETWORKS_ADDV2_EX = 50039 + ANEURALNETWORKS_ADDV2_EX = 50039, + + ANEURALNETWORKS_STATELESS_RANDOM_UNIFORM_EX = 50040, + + /** Splits a tensor value into a list of sub tensors. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_INT32} + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: A tensor to split. + * * 1: A tensor containing the sizes of each output tensor along split_dim + * * 2: The dimension along which to split + * + * Outputs: + * * 0: Tensor objects resulting from splitting value. + */ + ANEURALNETWORKS_SPLIT_V_EX = 50041 } OperationCodeEx; // extends OperationCode diff --git a/runtime/onert/api/CMakeLists.txt b/runtime/onert/api/CMakeLists.txt index 0cce338..49a5aa0 100644 --- a/runtime/onert/api/CMakeLists.txt +++ b/runtime/onert/api/CMakeLists.txt @@ -4,9 +4,9 @@ set(ONERT_DEV nnfw-dev) add_library(${ONERT_DEV} SHARED ${API_SRC}) # Public headers to publish -# nnfw_debug.h is header for runtime developer, so it will not be installed -# But runtime developer can use nnfw_debug.h by linking nnfw-dev -set(NNFW_API_HEADERS include/nnfw.h include/nnfw_dev.h) +# nnfw_internal.h is header for runtime developer, so it will not be installed +# But runtime developer can use nnfw_internal.h by linking nnfw-dev +set(NNFW_API_HEADERS include/nnfw.h include/nnfw_experimental.h) target_link_libraries(${ONERT_DEV} PUBLIC nnfw-nnapi-header) target_link_libraries(${ONERT_DEV} PUBLIC onert_core) diff --git a/runtime/onert/api/include/nnfw.h b/runtime/onert/api/include/nnfw.h index 031aabd..ef3678b 100644 --- a/runtime/onert/api/include/nnfw.h +++ b/runtime/onert/api/include/nnfw.h @@ -99,6 +99,10 @@ typedef enum { NNFW_STATUS_ERROR = 1, /** Unexpected null argument is given. */ NNFW_STATUS_UNEXPECTED_NULL = 2, + /** When a function was called but it is not valid for the current session state. */ + NNFW_STATUS_INVALID_STATE = 3, + /** When it is out of memory */ + NNFW_STATUS_OUT_OF_MEMORY = 4, } NNFW_STATUS; /** @@ -432,10 +436,10 @@ NNFW_STATUS nnfw_output_tensorinfo(nnfw_session *session, uint32_t index, * *

    Supported backends differs on each platforms. * For example, `x86_64` supports "cpu" only. - * Can set multiple backends by semicolon (ex: "acl_cl;cpu"). - * Among the multiple backends, the 1st element is used as default backend.

    - * - * @note Possible backend strings are: "cpu", "acl_cl", "acl_neon", "srcn" + * Multiple backends can be set and they must be separated by a semicolon (ex: "acl_cl;cpu"). + * For each backend string, `libbackend_{backend}.so` will be dynamically loaded during + * {@link nnfw_prepare}. + * Among the multiple backends, the 1st element is used as the default backend.

    * * @param[in] session session to which avilable backends are set * @param[in] backends available backends on which nnfw uses @@ -449,12 +453,10 @@ NNFW_STATUS nnfw_set_available_backends(nnfw_session *session, const char *backe * * This function should be called before {@link nnfw_prepare} is invoked. * - *

    Supported backends differs on each platforms. - * For example, `x86_64` supports "cpu" only. - * The backend for op has higher priority than available backends specified by - * nnfw_set_available_backends.

    + *

    The backend for op has higher priority than available backends specified by + * {@link nnfw_set_available_backends}.

    * - * @note Possible backend strings are: "cpu", "acl_cl", "acl_neon" + * @deprecated Deprecated since 1.8.0. * * @param[in] session session to be modified * @param[in] op operation to be set diff --git a/runtime/onert/api/include/nnfw_dev.h b/runtime/onert/api/include/nnfw_experimental.h similarity index 94% rename from runtime/onert/api/include/nnfw_dev.h rename to runtime/onert/api/include/nnfw_experimental.h index ecf0597..4cd5c58 100644 --- a/runtime/onert/api/include/nnfw_dev.h +++ b/runtime/onert/api/include/nnfw_experimental.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef __NNFW_DEV_H__ -#define __NNFW_DEV_H__ +#ifndef __NNFW_EXPERIMENTAL_H__ +#define __NNFW_EXPERIMENTAL_H__ #include "nnfw.h" @@ -62,4 +62,4 @@ typedef struct NNFW_STATUS nnfw_register_custom_op_info(nnfw_session *session, const char *id, custom_kernel_registration_info *info); -#endif // __NNFW_DEV_H__ +#endif // __NNFW_EXPERIMENTAL_H__ diff --git a/runtime/onert/api/include/nnfw_debug.h b/runtime/onert/api/include/nnfw_internal.h similarity index 67% rename from runtime/onert/api/include/nnfw_debug.h rename to runtime/onert/api/include/nnfw_internal.h index 7af06a2..eb4b6d6 100644 --- a/runtime/onert/api/include/nnfw_debug.h +++ b/runtime/onert/api/include/nnfw_internal.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef __NNFW_DEBUG_H__ -#define __NNFW_DEBUG_H__ +#ifndef __NNFW_INTERNAL_H__ +#define __NNFW_INTERNAL_H__ #include "nnfw.h" @@ -23,4 +23,16 @@ NNFW_STATUS nnfw_set_config(nnfw_session *session, const char *key, const char * NNFW_STATUS nnfw_get_config(nnfw_session *session, const char *key, char *value, size_t value_size); -#endif // __NNFW_DEBUG_H__ +/** + * @brief Load a circle model from buffer. + * + * The buffer must outlive the session. + * + * @param[in] session session + * @param[in] buffer Pointer to the buffer + * @param[in] size Buffer size + * @return NNFW_STATUS + */ +NNFW_STATUS nnfw_load_circle_from_buffer(nnfw_session *session, uint8_t *buffer, size_t size); + +#endif // __NNFW_INTERNAL_H__ diff --git a/runtime/onert/api/include/nnfw_version.h b/runtime/onert/api/include/nnfw_version.h index d787870..320271a 100644 --- a/runtime/onert/api/include/nnfw_version.h +++ b/runtime/onert/api/include/nnfw_version.h @@ -21,6 +21,6 @@ * NNFW_VERSION is a uint32 value representing nnfw runtime version * in 0xMMmmmmPP, where MM = major, mmmm = minor, PP = patch */ -#define NNFW_VERSION 0x01000700 +#define NNFW_VERSION 0x01000800 #endif // __NNFW_VERSION_H__ diff --git a/runtime/onert/api/src/CustomKernel.h b/runtime/onert/api/src/CustomKernel.h index b4fec87..a42f7a6 100644 --- a/runtime/onert/api/src/CustomKernel.h +++ b/runtime/onert/api/src/CustomKernel.h @@ -17,7 +17,7 @@ #ifndef __ONERT_BACKEND_CUSTOM_KERNEL_H__ #define __ONERT_BACKEND_CUSTOM_KERNEL_H__ -#include "nnfw_dev.h" +#include "nnfw_experimental.h" #include "backend/CustomKernelBuilder.h" #include "exec/IFunction.h" diff --git a/runtime/onert/api/src/nnfw_api.cc b/runtime/onert/api/src/nnfw_api.cc index 0747583..d65158f 100644 --- a/runtime/onert/api/src/nnfw_api.cc +++ b/runtime/onert/api/src/nnfw_api.cc @@ -31,6 +31,8 @@ STATIC_ASSERT_ENUM_CHECK(NNFW_TYPE_TENSOR_INT64, 5); STATIC_ASSERT_ENUM_CHECK(NNFW_STATUS_NO_ERROR, 0); STATIC_ASSERT_ENUM_CHECK(NNFW_STATUS_ERROR, 1); STATIC_ASSERT_ENUM_CHECK(NNFW_STATUS_UNEXPECTED_NULL, 2); +STATIC_ASSERT_ENUM_CHECK(NNFW_STATUS_INVALID_STATE, 3); +STATIC_ASSERT_ENUM_CHECK(NNFW_STATUS_OUT_OF_MEMORY, 4); STATIC_ASSERT_ENUM_CHECK(NNFW_LAYOUT_NONE, 0); STATIC_ASSERT_ENUM_CHECK(NNFW_LAYOUT_CHANNELS_LAST, 1); @@ -57,8 +59,9 @@ NNFW_STATUS nnfw_create_session(nnfw_session **session) { NNFW_RETURN_ERROR_IF_NULL(session); - *session = new nnfw_session(); - + *session = new (std::nothrow) nnfw_session(); + if (*session == nullptr) + return NNFW_STATUS_OUT_OF_MEMORY; return NNFW_STATUS_NO_ERROR; } @@ -338,3 +341,9 @@ NNFW_STATUS nnfw_query_info_u32(nnfw_session *session, NNFW_INFO_ID id, uint32_t // It should not be reached. return NNFW_STATUS_ERROR; } + +NNFW_STATUS nnfw_load_circle_from_buffer(nnfw_session *session, uint8_t *buffer, size_t size) +{ + NNFW_RETURN_ERROR_IF_NULL(session); + return session->load_circle_from_buffer(buffer, size); +} diff --git a/runtime/onert/api/src/nnfw_api_internal.cc b/runtime/onert/api/src/nnfw_api_internal.cc index d03ddd4..eb0b743 100644 --- a/runtime/onert/api/src/nnfw_api_internal.cc +++ b/runtime/onert/api/src/nnfw_api_internal.cc @@ -73,15 +73,33 @@ nnfw_session::nnfw_session() nnfw_session::~nnfw_session() = default; -NNFW_STATUS nnfw_session::load_model_from_file(const char *package_dir) +NNFW_STATUS nnfw_session::load_circle_from_buffer(uint8_t *buffer, size_t size) { if (!isStateInitialized()) + return NNFW_STATUS_INVALID_STATE; + + if (!buffer) + return NNFW_STATUS_UNEXPECTED_NULL; + + if (size == 0) return NNFW_STATUS_ERROR; + _subgraphs = onert::circle_loader::loadModel(buffer, size); + _compiler = std::make_unique(_subgraphs); + + _state = State::MODEL_LOADED; + return NNFW_STATUS_NO_ERROR; +} + +NNFW_STATUS nnfw_session::load_model_from_file(const char *package_dir) +{ + if (!isStateInitialized()) + return NNFW_STATUS_INVALID_STATE; + if (!package_dir) { std::cerr << "package_dir is null." << std::endl; - return NNFW_STATUS_ERROR; + return NNFW_STATUS_UNEXPECTED_NULL; } if (!null_terminating(package_dir, MAX_PATH_LENGTH)) @@ -156,7 +174,7 @@ NNFW_STATUS nnfw_session::prepare() std::cerr << "invalid state"; } std::cerr << std::endl; - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; } if (!_subgraphs || !primary_subgraph() || primary_subgraph()->isBuildingPhase()) @@ -188,7 +206,7 @@ NNFW_STATUS nnfw_session::run() { std::cerr << "Error during nnfw_session::run : " << "run should be run after prepare" << std::endl; - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; } try @@ -211,7 +229,7 @@ NNFW_STATUS nnfw_session::run_async() { std::cerr << "Error during nnfw_session::run_async : " << "run_async should be run after prepare" << std::endl; - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; } _execution->startExecute(); @@ -241,7 +259,7 @@ NNFW_STATUS nnfw_session::set_input(uint32_t index, NNFW_TYPE /*type*/, const vo if (!isStatePreparedOrFinishedRun()) { std::cerr << "Error during nnfw_session::set_input : invalid state" << std::endl; - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; } if (!buffer && length != 0) @@ -270,7 +288,7 @@ NNFW_STATUS nnfw_session::set_output(uint32_t index, NNFW_TYPE /*type*/, void *b if (!isStatePreparedOrFinishedRun()) { std::cerr << "Error during nnfw_session::set_output : invalid state" << std::endl; - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; } if (!buffer && length != 0) @@ -296,14 +314,14 @@ NNFW_STATUS nnfw_session::set_output(uint32_t index, NNFW_TYPE /*type*/, void *b NNFW_STATUS nnfw_session::input_size(uint32_t *number) { if (isStateInitialized()) // Model is not loaded - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; try { if (number == nullptr) { std::cerr << "Error during nnfw_session::input_size, number is null pointer." << std::endl; - return NNFW_STATUS_ERROR; + return NNFW_STATUS_UNEXPECTED_NULL; } *number = primary_subgraph()->getInputs().size(); } @@ -318,14 +336,14 @@ NNFW_STATUS nnfw_session::input_size(uint32_t *number) NNFW_STATUS nnfw_session::output_size(uint32_t *number) { if (isStateInitialized()) // Model is not loaded - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; try { if (number == nullptr) { std::cerr << "Error during nnfw_session::output_size, number is null pointer." << std::endl; - return NNFW_STATUS_ERROR; + return NNFW_STATUS_UNEXPECTED_NULL; } *number = primary_subgraph()->getOutputs().size(); } @@ -410,7 +428,7 @@ NNFW_STATUS nnfw_session::apply_tensorinfo(uint32_t index, nnfw_tensorinfo ti) { std::cerr << "Error during set_input_tensorinfo : should be run after load_model" << std::endl; - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; } if (ti.rank <= 0 || ti.rank > NNFW_MAX_RANK) @@ -463,13 +481,16 @@ NNFW_STATUS nnfw_session::set_input_tensorinfo(uint32_t index, const nnfw_tensor NNFW_STATUS nnfw_session::input_tensorinfo(uint32_t index, nnfw_tensorinfo *ti) { + if (isStateInitialized()) + return NNFW_STATUS_INVALID_STATE; + try { if (ti == nullptr) { std::cerr << "Error during nnfw_session::input_tensorinfo, tensorinfo is null pointer." << std::endl; - return NNFW_STATUS_ERROR; + return NNFW_STATUS_UNEXPECTED_NULL; } if (index >= primary_subgraph()->getInputs().size()) { @@ -499,13 +520,13 @@ NNFW_STATUS nnfw_session::input_tensorinfo(uint32_t index, nnfw_tensorinfo *ti) NNFW_STATUS nnfw_session::output_tensorinfo(uint32_t index, nnfw_tensorinfo *ti) { if (isStateInitialized()) - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; if (ti == nullptr) { std::cerr << "Error during nnfw_session::output_tensorinfo, tensorinfo is null pointer." << std::endl; - return NNFW_STATUS_ERROR; + return NNFW_STATUS_UNEXPECTED_NULL; } if (index >= primary_subgraph()->getOutputs().size()) @@ -570,14 +591,14 @@ static std::string get_op_backend_string(std::string op) NNFW_STATUS nnfw_session::set_available_backends(const char *backends) { if (!isStateModelLoaded()) - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; try { - if (!backends || null_terminating(backends, MAX_BACKEND_NAME_LENGTH) == false) - { + if (!backends) + return NNFW_STATUS_UNEXPECTED_NULL; + if (null_terminating(backends, MAX_BACKEND_NAME_LENGTH) == false) return NNFW_STATUS_ERROR; - } auto &options = _compiler->options(); @@ -596,15 +617,15 @@ NNFW_STATUS nnfw_session::set_available_backends(const char *backends) NNFW_STATUS nnfw_session::set_op_backend(const char *op, const char *backend) { if (!isStateModelLoaded()) - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; try { - if (!op || !null_terminating(op, MAX_OP_NAME_LENGTH) || !backend || + if (!op || !backend) + return NNFW_STATUS_UNEXPECTED_NULL; + if (!null_terminating(op, MAX_OP_NAME_LENGTH) || !null_terminating(backend, MAX_BACKEND_NAME_LENGTH)) - { return NNFW_STATUS_ERROR; - } auto key = get_op_backend_string(op); @@ -627,7 +648,10 @@ NNFW_STATUS nnfw_session::set_op_backend(const char *op, const char *backend) NNFW_STATUS nnfw_session::set_config(const char *key, const char *value) { if (!isStateModelLoaded()) - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; + + if (!key || !value) + return NNFW_STATUS_UNEXPECTED_NULL; auto &options = _compiler->options(); @@ -693,7 +717,10 @@ onert::ir::Graph *nnfw_session::primary_subgraph() NNFW_STATUS nnfw_session::get_config(const char *key, char *value, size_t value_size) { if (!isStateModelLoaded()) - return NNFW_STATUS_ERROR; + return NNFW_STATUS_INVALID_STATE; + + if (!key || !value) + return NNFW_STATUS_UNEXPECTED_NULL; auto &options = _compiler->options(); diff --git a/runtime/onert/api/src/nnfw_api_internal.h b/runtime/onert/api/src/nnfw_api_internal.h index 1154f04..1c3c370 100644 --- a/runtime/onert/api/src/nnfw_api_internal.h +++ b/runtime/onert/api/src/nnfw_api_internal.h @@ -18,7 +18,7 @@ #define __API_NNFW_API_INTERNAL_H__ #include "nnfw.h" -#include "nnfw_dev.h" +#include "nnfw_experimental.h" #include @@ -127,9 +127,15 @@ public: NNFW_STATUS set_available_backends(const char *backends); NNFW_STATUS set_op_backend(const char *op, const char *backend); + // + // Internal-only API + // + NNFW_STATUS set_config(const char *key, const char *value); NNFW_STATUS get_config(const char *key, char *value, size_t value_size); + NNFW_STATUS load_circle_from_buffer(uint8_t *buffer, size_t size); + private: onert::ir::Graph *primary_subgraph(); bool isStateInitialized(); diff --git a/runtime/onert/backend/acl_cl/KernelGenerator.cc b/runtime/onert/backend/acl_cl/KernelGenerator.cc index 3ca4058..a84f983 100644 --- a/runtime/onert/backend/acl_cl/KernelGenerator.cc +++ b/runtime/onert/backend/acl_cl/KernelGenerator.cc @@ -31,6 +31,7 @@ #include "exec/FunctionSequence.h" #include "util/logging.h" #include "util/Utils.h" +#include "AclKernelGen.h" namespace onert { @@ -76,15 +77,15 @@ void KernelGenerator::visit(const ir::operation::BatchToSpaceND &node) const auto block_size_index{ node.getInputs().at(ir::operation::BatchToSpaceND::Input::BLOCK_SIZE)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto block_size_alloc = _tensor_builder->at(block_size_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto block_size_tensor = _tensor_builder->at(block_size_index).get(); assert(_ctx.at(block_size_index).data()); auto fn = std::make_unique<::arm_compute::CLBatchToSpaceLayer>(); - fn->configure(ifm_alloc->handle(), block_size_alloc->handle(), ofm_alloc->handle()); + fn->configure(ifm_tensor->handle(), block_size_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -96,15 +97,27 @@ void KernelGenerator::visit(const ir::operation::Cast &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Cast::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - const auto input_sub_type = _ctx.at(ifm_index).typeInfo().type() == ir::DataType::BOOL8 - ? arm_compute::SubDataType::BOOL - : arm_compute::SubDataType::NONE; + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); - auto fn = std::make_unique<::arm_compute::CLCast>(); + std::unique_ptr<::arm_compute::IFunction> fn; + if (ifm_tensor->data_type() == ofm_tensor->data_type()) + { + auto l = std::make_unique<::arm_compute::CLCopy>(); + + l->configure(ifm_tensor->handle(), ofm_tensor->handle()); + + fn = std::move(l); + } + else + { + auto l = std::make_unique<::arm_compute::CLCast>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), input_sub_type); + // TODO Support converting float to int32 as round down + l->configure(ifm_tensor->handle(), ofm_tensor->handle(), arm_compute::ConvertPolicy::SATURATE); + + fn = std::move(l); + } auto acl_fn = asAclClFunction(std::move(fn)); @@ -132,10 +145,10 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node) ker_width, ker_height); const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto ker_alloc = _tensor_builder->at(ker_index).get(); - auto bias_alloc = _tensor_builder->at(bias_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto ker_tensor = _tensor_builder->at(ker_index).get(); + auto bias_tensor = _tensor_builder->at(bias_index).get(); const auto conv_info = acl_common::asPadStrideInfo(padding, stride); const auto act_info = acl_common::asActivationLayerInfo(activation); @@ -143,8 +156,9 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node) auto fn = std::make_unique<::arm_compute::CLConvolutionLayer>( _tensor_builder->acl_tensor_manager()->internal_buffer_manager()); - fn->configure(ifm_alloc->handle(), ker_alloc->handle(), bias_alloc->handle(), ofm_alloc->handle(), - conv_info, ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), act_info); + fn->configure(ifm_tensor->handle(), ker_tensor->handle(), bias_tensor->handle(), + ofm_tensor->handle(), conv_info, ::arm_compute::WeightsInfo(), + ::arm_compute::Size2D(1U, 1U), act_info); _return_fn = asAclClFunction(std::move(fn)); } @@ -171,10 +185,10 @@ void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node) const auto multiplier = node.param().multiplier; const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto ker_alloc = _tensor_builder->at(ker_index).get(); - auto bias_alloc = _tensor_builder->at(bias_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto ker_tensor = _tensor_builder->at(ker_index).get(); + auto bias_tensor = _tensor_builder->at(bias_index).get(); const auto conv_info = acl_common::asPadStrideInfo(padding, stride); const auto act_info = acl_common::asActivationLayerInfo(activation); @@ -182,8 +196,8 @@ void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node) { auto fn = std::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>(); - fn->configure(ifm_alloc->handle(), ker_alloc->handle(), bias_alloc->handle(), - ofm_alloc->handle(), conv_info, multiplier, act_info); + fn->configure(ifm_tensor->handle(), ker_tensor->handle(), bias_tensor->handle(), + ofm_tensor->handle(), conv_info, multiplier, act_info); _return_fn = asAclClFunction(std::move(fn)); } @@ -191,88 +205,28 @@ void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node) void KernelGenerator::visit(const ir::operation::MaxPool2D &node) { - const auto ofm_index{node.getOutputs().at(0)}; - const auto ifm_index{node.getInputs().at(ir::operation::MaxPool2D::Input::INPUT)}; - - const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout); - const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout); + auto raw_fn = acl_common::kernelGenPool2D<::arm_compute::CLPoolingLayer>( + node, _ctx, _tensor_builder, _current_op_seq_layout, ::arm_compute::PoolingType::MAX); - const auto kh = node.param().kh; - const auto kw = node.param().kw; - const auto stride = node.param().stride; - const auto padding = - ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride, kw, kh); + const auto ofm_index{node.getOutputs().at(0)}; + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); const auto activation = node.param().activation; - - VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl; - VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl; - VERBOSE(MaxPool2D) << "OFM_H: " << ofm_shape.H << std::endl; - VERBOSE(MaxPool2D) << "OFM_W: " << ofm_shape.W << std::endl; - VERBOSE(MaxPool2D) << "KER_H: " << kh << std::endl; - VERBOSE(MaxPool2D) << "KER_W: " << kw << std::endl; - VERBOSE(MaxPool2D) << "STRIDE_H: " << stride.vertical << std::endl; - VERBOSE(MaxPool2D) << "STRIDE_W: " << stride.horizontal << std::endl; - VERBOSE(MaxPool2D) << "PAD(T): " << padding.top << std::endl; - VERBOSE(MaxPool2D) << "PAD(B): " << padding.bottom << std::endl; - VERBOSE(MaxPool2D) << "PAD(L): " << padding.left << std::endl; - VERBOSE(MaxPool2D) << "PAD(R): " << padding.right << std::endl; - - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - - ::arm_compute::PoolingLayerInfo info{::arm_compute::PoolingType::MAX, - ::arm_compute::Size2D{kw, kh}, - acl_common::asPadStrideInfo(padding, stride)}; - - auto fn = std::make_unique<::arm_compute::CLPoolingLayer>(); - - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), info); - _return_fn = std::make_unique( - asAclClFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclClFunction(std::move(raw_fn)), + ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::AvgPool2D &node) { - const auto ofm_index{node.getOutputs().at(0)}; - const auto ifm_index{node.getInputs().at(ir::operation::AvgPool2D::Input::INPUT)}; - - const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout); - const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout); + auto raw_fn = acl_common::kernelGenPool2D<::arm_compute::CLPoolingLayer>( + node, _ctx, _tensor_builder, _current_op_seq_layout, ::arm_compute::PoolingType::AVG); - const auto kh = node.param().kh; - const auto kw = node.param().kw; - const auto stride = node.param().stride; - const auto padding = - ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride, kw, kh); + const auto ofm_index{node.getOutputs().at(0)}; + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); const auto activation = node.param().activation; - - VERBOSE(AvgPool2D) << "IFM_H: " << ifm_shape.H << std::endl; - VERBOSE(AvgPool2D) << "IFM_W: " << ifm_shape.W << std::endl; - VERBOSE(AvgPool2D) << "OFM_H: " << ofm_shape.H << std::endl; - VERBOSE(AvgPool2D) << "OFM_W: " << ofm_shape.W << std::endl; - VERBOSE(AvgPool2D) << "KER_H: " << kh << std::endl; - VERBOSE(AvgPool2D) << "KER_W: " << kw << std::endl; - VERBOSE(AvgPool2D) << "STRIDE_H: " << stride.vertical << std::endl; - VERBOSE(AvgPool2D) << "STRIDE_W: " << stride.horizontal << std::endl; - VERBOSE(AvgPool2D) << "PAD(T): " << padding.top << std::endl; - VERBOSE(AvgPool2D) << "PAD(B): " << padding.bottom << std::endl; - VERBOSE(AvgPool2D) << "PAD(L): " << padding.left << std::endl; - VERBOSE(AvgPool2D) << "PAD(R): " << padding.right << std::endl; - - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - - ::arm_compute::PoolingLayerInfo info{ - ::arm_compute::PoolingType::AVG, ::arm_compute::Size2D{kw, kh}, - acl_common::asPadStrideInfo(padding, stride), true /* exclude_padding */}; - - auto fn = std::make_unique<::arm_compute::CLPoolingLayer>(); - - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), info); - _return_fn = std::make_unique( - asAclClFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclClFunction(std::move(raw_fn)), + ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::Concat &node) @@ -296,7 +250,7 @@ void KernelGenerator::visit(const ir::operation::Concat &node) return; } - auto output_alloc = _tensor_builder->at(ofm_index).get(); + auto output_tensor = _tensor_builder->at(ofm_index).get(); std::vector<::arm_compute::ICLTensor *> input_tensors; for (auto &ifm_ind : input_indexes) input_tensors.emplace_back(_tensor_builder->at(ifm_ind)->handle()); @@ -305,7 +259,7 @@ void KernelGenerator::visit(const ir::operation::Concat &node) if (input_indexes.size() < 2) { auto l = std::make_unique<::arm_compute::CLCopy>(); - l->configure(input_tensors.at(0), output_alloc->handle()); + l->configure(input_tensors.at(0), output_tensor->handle()); fn = std::move(l); } else @@ -313,10 +267,10 @@ void KernelGenerator::visit(const ir::operation::Concat &node) auto l = std::make_unique<::arm_compute::CLConcatenateLayer>(); const auto rank = _ctx.at(ofm_index).shape().rank(); const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = output_alloc->layout(); + const auto backend_layout = output_tensor->layout(); const auto fixed_axis = acl_common::ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value(); - l->configure(input_tensors, output_alloc->handle(), fixed_axis); + l->configure(input_tensors, output_tensor->handle(), fixed_axis); fn = std::move(l); } @@ -327,75 +281,15 @@ void KernelGenerator::visit(const ir::operation::Concat &node) void KernelGenerator::visit(const ir::operation::FullyConnected &node) { - using ir::operation::FullyConnected; - const auto output_index{node.getOutputs().at(0)}; - const auto input_index{node.getInputs().at(FullyConnected::Input::INPUT)}; - const auto weight_index{node.getInputs().at(FullyConnected::Input::WEIGHT)}; - const auto bias_index{node.getInputs().at(FullyConnected::Input::BIAS)}; - - const auto input_rank = _ctx.at(input_index).shape().rank(); - - const auto output_size = - _ctx.at(output_index).shape().dim(_ctx.at(output_index).shape().rank() - 1); - UNUSED_RELEASE(output_size); - assert(_ctx.at(bias_index).shape().dim(0) == output_size); - assert(_ctx.at(weight_index).shape().dim(0) == output_size); - const auto batch_size = - _ctx.at(output_index).shape().dim(_ctx.at(output_index).shape().rank() - 2); - const auto input_size = - _ctx.at(weight_index).shape().dim(_ctx.at(weight_index).shape().rank() - 1); - - // Check for reshaping input's shape into rank-2 - bool needs_reshape = false; - ir::Shape reshape(2); - if (input_rank == 3 || input_rank == 4) - { - const auto &ifm_shape = _ctx.at(input_index).shape(); - auto feature_size = 1; - for (int i = 0; i < ifm_shape.rank(); ++i) - { - feature_size *= ifm_shape.dim(i); - } - - UNUSED_RELEASE(feature_size); - assert(feature_size == batch_size * input_size); - - // for reshaping - needs_reshape = true; - reshape.dim(0) = batch_size; /* H */ - reshape.dim(1) = input_size; /* W */ - } - + auto output_tensor = _tensor_builder->at(output_index).get(); const auto activation = node.param().activation; - auto output_alloc = _tensor_builder->at(output_index).get(); - const auto input_alloc = _tensor_builder->at(input_index).get(); - const auto weight_alloc = _tensor_builder->at(weight_index).get(); - const auto bias_alloc = _tensor_builder->at(bias_index).get(); - const auto frontend_layout = _current_op_seq_layout; - const auto acl_layout = output_alloc->handle()->info()->data_layout(); - - auto fn = std::make_unique( - _tensor_builder->acl_tensor_manager()->internal_buffer_manager()); - - arm_compute::CLFullyConnectedReshapingLayer::KernelType kernel_type = - arm_compute::CLFullyConnectedReshapingLayer::KernelType::GENERAL; - if (_ctx.at(weight_index).isConstant()) - { - kernel_type = arm_compute::CLFullyConnectedReshapingLayer::KernelType::PREPROCESSED_WEIGHTS; - assert(_ctx.at(weight_index).data()); - } - fn->configure( - input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(), output_alloc->handle(), - needs_reshape, - ::onert::backend::acl_common::asTensorShape( - reshape, frontend_layout, ::onert::backend::acl_common::asRuntimeLayout(acl_layout)), - kernel_type); - + auto fn = acl_common::kernelGenFullyConnected( + node, _ctx, _tensor_builder, _current_op_seq_layout); _return_fn = std::make_unique( - asAclClFunction(std::move(fn)), - ActivationBuilder::generate(activation, output_alloc->handle())); + std::move(fn), ActivationBuilder::generate(activation, output_tensor->handle())); } void KernelGenerator::visit(const ir::operation::Mul &node) @@ -406,17 +300,18 @@ void KernelGenerator::visit(const ir::operation::Mul &node) const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::CLPixelWiseMultiplication>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle(), 1.0, // scale + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(), 1.0, // scale arm_compute::ConvertPolicy::SATURATE, arm_compute::RoundingPolicy::TO_NEAREST_EVEN); _return_fn = std::make_unique( - asAclClFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclClFunction(std::move(fn)), + ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::Reduce &node) @@ -427,14 +322,14 @@ void KernelGenerator::visit(const ir::operation::Reduce &node) const auto keep_dims{node.param().keep_dims}; const auto reduce_type = node.param().reduce_type; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); // Convert to ACL axes taking into account negative values and possible duplicates. const auto &axes = _ctx.at(axes_index); const auto input_rank = _ctx.at(input_index).shape().rank(); const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = input_alloc->layout(); + const auto backend_layout = input_tensor->layout(); std::unique_ptr fn; if (reduce_type == ir::operation::Reduce::ReduceType::MEAN) @@ -443,7 +338,7 @@ void KernelGenerator::visit(const ir::operation::Reduce &node) const auto acl_axes = acl_common::asCoordinates(axes, input_rank, frontend_layout, backend_layout); - l->configure(input_alloc->handle(), acl_axes, keep_dims, output_alloc->handle()); + l->configure(input_tensor->handle(), acl_axes, keep_dims, output_tensor->handle()); fn = std::move(l); } @@ -453,7 +348,7 @@ void KernelGenerator::visit(const ir::operation::Reduce &node) _tensor_builder->acl_tensor_manager()->internal_buffer_manager()); const auto acl_axes = acl_common::asSet(axes, input_rank, frontend_layout, backend_layout); - l->configure(input_alloc->handle(), output_alloc->handle(), acl_axes, keep_dims, + l->configure(input_tensor->handle(), output_tensor->handle(), acl_axes, keep_dims, acl_common::convertReduceType(reduce_type)); fn = std::move(l); @@ -469,13 +364,13 @@ void KernelGenerator::visit(const ir::operation::Reshape &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Reshape::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); // NOTE This operation must not be changed the layout from frontend to backend // So, PermutationOperationPass makes layouts of frontend and backend the same. const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = output_alloc->layout(); + const auto backend_layout = output_tensor->layout(); assert((_ctx.at(input_index).shape().rank() < 4 && _ctx.at(output_index).shape().rank() < 4) || frontend_layout == backend_layout); UNUSED_RELEASE(frontend_layout); @@ -483,7 +378,7 @@ void KernelGenerator::visit(const ir::operation::Reshape &node) auto fn = std::make_unique<::arm_compute::CLReshapeLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle()); + fn->configure(input_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -503,10 +398,10 @@ void KernelGenerator::visit(const ir::operation::Squeeze &node) (void)dims; (void)ndim; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc->handle(), output_alloc->handle()); + fn->configure(input_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); _return_fn = std::move(acl_fn); } @@ -516,15 +411,15 @@ void KernelGenerator::visit(const ir::operation::Tanh &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Tanh::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f}; - fn->configure(input_alloc->handle(), output_alloc->handle(), act_info); + fn->configure(input_tensor->handle(), output_tensor->handle(), act_info); auto acl_fn = asAclClFunction(std::move(fn)); @@ -538,13 +433,13 @@ void KernelGenerator::visit(const ir::operation::Softmax &node) const auto beta = node.param().beta; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique<::arm_compute::CLSoftmaxLayer>( _tensor_builder->acl_tensor_manager()->internal_buffer_manager()); - fn->configure(input_alloc->handle(), output_alloc->handle(), beta); + fn->configure(input_tensor->handle(), output_tensor->handle(), beta); auto acl_fn = asAclClFunction(std::move(fn)); @@ -558,10 +453,10 @@ void KernelGenerator::visit(const ir::operation::Slice &node) const auto begins_index{node.getInputs().at(ir::operation::Slice::Input::BEGINS)}; const auto sizes_index{node.getInputs().at(ir::operation::Slice::Input::SIZES)}; - auto outputData_alloc = _tensor_builder->at(output_index).get(); - auto inputData_alloc = _tensor_builder->at(input_index).get(); + auto outputData_tensor = _tensor_builder->at(output_index).get(); + auto inputData_tensor = _tensor_builder->at(input_index).get(); const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = inputData_alloc->layout(); + const auto backend_layout = inputData_tensor->layout(); // Set initializers for indices data such as order of inputData int input_rank = _ctx.at(input_index).shape().rank(); @@ -613,7 +508,7 @@ void KernelGenerator::visit(const ir::operation::Slice &node) auto fn = std::make_unique<::arm_compute::CLSlice>(); - fn->configure(inputData_alloc->handle(), outputData_alloc->handle(), starts_set, ends_set); + fn->configure(inputData_tensor->handle(), outputData_tensor->handle(), starts_set, ends_set); auto acl_fn = asAclClFunction(std::move(fn)); @@ -628,10 +523,10 @@ void KernelGenerator::visit(const ir::operation::StridedSlice &node) const auto ends_index{node.getInputs().at(ir::operation::StridedSlice::Input::ENDS)}; const auto strides_index{node.getInputs().at(ir::operation::StridedSlice::Input::STRIDES)}; - auto outputData_alloc = _tensor_builder->at(output_index).get(); - auto inputData_alloc = _tensor_builder->at(input_index).get(); + auto outputData_tensor = _tensor_builder->at(output_index).get(); + auto inputData_tensor = _tensor_builder->at(input_index).get(); const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = inputData_alloc->layout(); + const auto backend_layout = inputData_tensor->layout(); // Set initializers for indices data such as order of inputData int input_rank = _ctx.at(input_index).shape().rank(); @@ -704,7 +599,7 @@ void KernelGenerator::visit(const ir::operation::StridedSlice &node) auto fn = std::make_unique<::arm_compute::CLStridedSlice>(); - fn->configure(inputData_alloc->handle(), outputData_alloc->handle(), starts_set, ends_set, + fn->configure(inputData_tensor->handle(), outputData_tensor->handle(), starts_set, ends_set, strides_set, begin_mask, end_mask, shrink_axis_mask); auto acl_fn = asAclClFunction(std::move(fn)); @@ -720,10 +615,10 @@ void KernelGenerator::visit(const ir::operation::Transpose &node) const auto rank = _ctx.at(ifm_idx).shape().rank(); - auto ofm_alloc = _tensor_builder->at(ofm_idx).get(); - auto ifm_alloc = _tensor_builder->at(ifm_idx).get(); + auto ofm_tensor = _tensor_builder->at(ofm_idx).get(); + auto ifm_tensor = _tensor_builder->at(ifm_idx).get(); const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = ifm_alloc->layout(); + const auto backend_layout = ifm_tensor->layout(); std::vector pv(perm.cbegin(), perm.cend()); // Reversed @@ -732,7 +627,7 @@ void KernelGenerator::visit(const ir::operation::Transpose &node) auto fn = std::make_unique<::arm_compute::CLPermute>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), backend_pv); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), backend_pv); auto acl_fn = asAclClFunction(std::move(fn)); @@ -747,17 +642,18 @@ void KernelGenerator::visit(const ir::operation::Add &node) const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::CLArithmeticAddition>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle(), + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(), arm_compute::ConvertPolicy::SATURATE); _return_fn = std::make_unique( - asAclClFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclClFunction(std::move(fn)), + ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::Sub &node) @@ -768,17 +664,18 @@ void KernelGenerator::visit(const ir::operation::Sub &node) const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::CLArithmeticSubtraction>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle(), + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(), arm_compute::ConvertPolicy::SATURATE); _return_fn = std::make_unique( - asAclClFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclClFunction(std::move(fn)), + ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::Div &node) @@ -789,16 +686,17 @@ void KernelGenerator::visit(const ir::operation::Div &node) const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::CLArithmeticDivision>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle()); + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle()); _return_fn = std::make_unique( - asAclClFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclClFunction(std::move(fn)), + ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::Exp &node) @@ -806,12 +704,12 @@ void KernelGenerator::visit(const ir::operation::Exp &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Exp::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique<::arm_compute::CLExpLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle()); + fn->configure(input_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -823,12 +721,12 @@ void KernelGenerator::visit(const ir::operation::ExpandDims &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::ExpandDims::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique<::arm_compute::CLReshapeLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle()); + fn->configure(input_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -842,20 +740,21 @@ void KernelGenerator::visit(const ir::operation::InstanceNorm &node) const auto gamma_index{node.getInputs().at(ir::operation::InstanceNorm::Input::GAMMA)}; const auto beta_index{node.getInputs().at(ir::operation::InstanceNorm::Input::BETA)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto gamma_alloc = _tensor_builder->at(gamma_index).get(); - auto beta_alloc = _tensor_builder->at(beta_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto gamma_tensor = _tensor_builder->at(gamma_index).get(); + auto beta_tensor = _tensor_builder->at(beta_index).get(); auto epsilon = node.param().epsilon; auto activation = node.param().activation; auto fn = std::make_unique<::arm_compute::CLInstanceNormalizationLayerEx>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), gamma_alloc->handle(), - beta_alloc->handle(), epsilon); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), gamma_tensor->handle(), + beta_tensor->handle(), epsilon); _return_fn = std::make_unique( - asAclClFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclClFunction(std::move(fn)), + ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::Logistic &node) @@ -863,15 +762,15 @@ void KernelGenerator::visit(const ir::operation::Logistic &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Logistic::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC}; auto fn = std::make_unique<::arm_compute::CLActivationLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), act_info); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), act_info); auto acl_fn = asAclClFunction(std::move(fn)); @@ -884,13 +783,13 @@ void KernelGenerator::visit(const ir::operation::LogicalAnd &node) const auto input0_index{node.getInputs().at(ir::operation::LogicalAnd::Input::INPUT0)}; const auto input1_index{node.getInputs().at(ir::operation::LogicalAnd::Input::INPUT1)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input0_alloc = _tensor_builder->at(input0_index).get(); - auto input1_alloc = _tensor_builder->at(input1_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input0_tensor = _tensor_builder->at(input0_index).get(); + auto input1_tensor = _tensor_builder->at(input1_index).get(); auto fn = std::make_unique<::arm_compute::CLBinaryLogicalOp>(); - fn->configure(input0_alloc->handle(), input1_alloc->handle(), output_alloc->handle(), + fn->configure(input0_tensor->handle(), input1_tensor->handle(), output_tensor->handle(), ::arm_compute::BinaryLogicalOperation::AND); auto acl_fn = asAclClFunction(std::move(fn)); @@ -900,159 +799,8 @@ void KernelGenerator::visit(const ir::operation::LogicalAnd &node) void KernelGenerator::visit(const ir::operation::LSTM &node) { - // TODO Support dynamic rnn - // TODO Fix subtle error in the case of non-CIFG, non-peephole and No Projection. - const auto scratch_buffer_index{ - node.getOutputs().at(ir::operation::LSTM::Output::SCRATCH_BUFFER)}; - const auto output_state_out_index{ - node.getOutputs().at(ir::operation::LSTM::Output::OUTPUT_STATE_OUT)}; - const auto cell_state_out_index{ - node.getOutputs().at(ir::operation::LSTM::Output::CELL_STATE_OUT)}; - const auto output_index{node.getOutputs().at(ir::operation::LSTM::Output::OUTPUT)}; - - const auto input_index{node.getInputs().at(ir::operation::LSTM::Input::INPUT)}; - const auto input_to_input_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_INPUT_WEIGHTS)}; // optional - const auto input_to_forget_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_FORGET_WEIGHTS)}; - const auto input_to_cell_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_CELL_WEIGHTS)}; - const auto input_to_output_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_OUTPUT_WEIGHTS)}; - const auto recurrent_to_input_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_INPUT_WEIGHTS)}; // optional - const auto recurrent_to_forget_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_FORGET_WEIGHTS)}; - const auto recurrent_to_cell_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_CELL_WEIGHTS)}; - const auto recurrent_to_output_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_OUTPUT_WEIGHTS)}; - const auto cell_to_input_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_INPUT_WEIGHTS)}; // optional - const auto cell_to_forget_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_FORGET_WEIGHTS)}; // optional - const auto cell_to_output_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_OUTPUT_WEIGHTS)}; // optional - const auto input_gate_bias_index{ - node.getInputs().at(ir::operation::LSTM::Input::INPUT_GATE_BIAS)}; - const auto forget_gate_bias_index{ - node.getInputs().at(ir::operation::LSTM::Input::FORGET_GATE_BIAS)}; - const auto cell_bias_index{node.getInputs().at(ir::operation::LSTM::Input::CELL_BIAS)}; - const auto output_gate_bias_index{ - node.getInputs().at(ir::operation::LSTM::Input::OUTPUT_GATE_BIAS)}; - const auto projection_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::PROJECTION_WEIGHTS)}; // optional - const auto projection_bias_index{ - node.getInputs().at(ir::operation::LSTM::Input::PROJECTION_BIAS)}; // optional - const auto output_state_in_index{ - node.getInputs().at(ir::operation::LSTM::Input::OUTPUT_STATE_IN)}; - const auto cell_state_in_index{node.getInputs().at(ir::operation::LSTM::Input::CELL_STATE_IN)}; - const auto cell_threshold = node.param().cell_threshold; - const auto projection_threshold = node.param().projection_threshold; - - bool has_input_to_input_weights = _ctx.at(input_to_input_weights_index).shape().dim(0) != 0 && - _ctx.at(input_to_input_weights_index).shape().dim(1) != 0; - bool has_recurrent_to_input_weights = - _ctx.at(recurrent_to_input_weights_index).shape().dim(0) != 0 && - _ctx.at(recurrent_to_input_weights_index).shape().dim(1) != 0; - bool has_cell_to_forget_weights = _ctx.at(cell_to_forget_weights_index).shape().dim(0) != 0; - bool has_cell_to_output_weights = _ctx.at(cell_to_output_weights_index).shape().dim(0) != 0; - bool has_projection_weights = _ctx.at(projection_weights_index).shape().dim(0) != 0 && - _ctx.at(projection_weights_index).shape().dim(1) != 0; - bool has_projection_bias = _ctx.at(projection_bias_index).shape().dim(0); - - // NOTE The input_to_input_weights and the recurrent_to_input_weights do not exist in CIFG. - // true: no CIFG - // false: CIFG - // NOTE The cell_to_input_weights does not exist in non-peephole although regular LSTM(non-CIFG). - bool has_cifg_param = has_input_to_input_weights && has_recurrent_to_input_weights; - - // NOTE The cell_to_forget_weights and the cell_to_output_weights exist in peephole. - // But the cell_to_input_weights does not exist in regular CIFG although peephole. - // true: peephole - // false: no peephole - bool has_peephole_param = has_cell_to_forget_weights && has_cell_to_output_weights; - - // NOTE Although the projection weights has data the projection bias may not have data. - bool has_projection_param = has_projection_weights; - - const auto activation = node.param().activation; - const auto cell_clip = cell_threshold; - const auto projection_clip = projection_threshold; - assert(cell_clip >= 0.f && projection_clip >= 0.f); - - auto scratch_buffer_alloc = _tensor_builder->at(scratch_buffer_index).get(); - auto output_state_out_alloc = _tensor_builder->at(output_state_out_index).get(); - auto cell_state_out_alloc = _tensor_builder->at(cell_state_out_index).get(); - auto output_alloc = _tensor_builder->at(output_index).get(); - - auto input_alloc = _tensor_builder->at(input_index).get(); - - auto input_to_forget_weights_alloc = _tensor_builder->at(input_to_forget_weights_index).get(); - auto input_to_cell_weights_alloc = _tensor_builder->at(input_to_cell_weights_index).get(); - auto input_to_output_weights_alloc = _tensor_builder->at(input_to_output_weights_index).get(); - auto recurrent_to_forget_weights_alloc = - _tensor_builder->at(recurrent_to_forget_weights_index).get(); - auto recurrent_to_cell_weights_alloc = _tensor_builder->at(recurrent_to_cell_weights_index).get(); - auto recurrent_to_output_weights_alloc = - _tensor_builder->at(recurrent_to_output_weights_index).get(); - - auto forget_gate_bias_alloc = _tensor_builder->at(forget_gate_bias_index).get(); - auto cell_bias_alloc = _tensor_builder->at(cell_bias_index).get(); - auto output_gate_bias_alloc = _tensor_builder->at(output_gate_bias_index).get(); - auto output_state_in_alloc = _tensor_builder->at(output_state_in_index).get(); - auto cell_state_in_alloc = _tensor_builder->at(cell_state_in_index).get(); - - auto act_info = ::onert::backend::acl_common::asActivationLayerInfo(activation); - - auto fn = std::make_unique<::arm_compute::CLLSTMLayer>(); - - ::arm_compute::LSTMParams<::arm_compute::ICLTensor> lstm_params{}; - if (has_cifg_param) - { - auto input_to_input_weights_alloc = - _tensor_builder->at(input_to_input_weights_index).get(); // optional - auto recurrent_to_input_weights_alloc = - _tensor_builder->at(recurrent_to_input_weights_index).get(); // optional - auto cell_to_input_weights_handle = - has_peephole_param ? _tensor_builder->at(cell_to_input_weights_index).get()->handle() - : nullptr; // optional (non-cifg && peephole) - auto input_gate_bias_alloc = _tensor_builder->at(input_gate_bias_index).get(); // optional - lstm_params.set_cifg_params(input_to_input_weights_alloc->handle(), - recurrent_to_input_weights_alloc->handle(), - cell_to_input_weights_handle, input_gate_bias_alloc->handle()); - } - if (has_peephole_param) - { - auto cell_to_forget_weights_alloc = - _tensor_builder->at(cell_to_forget_weights_index).get(); // optional - auto cell_to_output_weights_alloc = - _tensor_builder->at(cell_to_output_weights_index).get(); // optional - lstm_params.set_peephole_params(cell_to_forget_weights_alloc->handle(), - cell_to_output_weights_alloc->handle()); - } - if (has_projection_param) - { - auto projection_weights_alloc = _tensor_builder->at(projection_weights_index).get(); // optional - auto projection_bias_handle = has_projection_bias - ? _tensor_builder->at(projection_bias_index).get()->handle() - : nullptr; // optional - lstm_params.set_projection_params(projection_weights_alloc->handle(), projection_bias_handle); - } - - fn->configure( - input_alloc->handle(), input_to_forget_weights_alloc->handle(), - input_to_cell_weights_alloc->handle(), input_to_output_weights_alloc->handle(), - recurrent_to_forget_weights_alloc->handle(), recurrent_to_cell_weights_alloc->handle(), - recurrent_to_output_weights_alloc->handle(), forget_gate_bias_alloc->handle(), - cell_bias_alloc->handle(), output_gate_bias_alloc->handle(), output_state_in_alloc->handle(), - cell_state_in_alloc->handle(), scratch_buffer_alloc->handle(), - output_state_out_alloc->handle(), cell_state_out_alloc->handle(), output_alloc->handle(), - lstm_params, act_info, cell_clip, projection_clip); - - auto acl_fn = asAclClFunction(std::move(fn)); - - _return_fn = std::move(acl_fn); + _return_fn = acl_common::kernelGenLSTM(node, _ctx, _tensor_builder); } void KernelGenerator::visit(const ir::operation::Comparison &node) @@ -1063,13 +811,13 @@ void KernelGenerator::visit(const ir::operation::Comparison &node) const auto comparison_type = node.param().comparison_type; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input0_alloc = _tensor_builder->at(input0_index).get(); - auto input1_alloc = _tensor_builder->at(input1_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input0_tensor = _tensor_builder->at(input0_index).get(); + auto input1_tensor = _tensor_builder->at(input1_index).get(); auto fn = std::make_unique<::arm_compute::CLComparison>(); - fn->configure(input0_alloc->handle(), input1_alloc->handle(), output_alloc->handle(), + fn->configure(input0_tensor->handle(), input1_tensor->handle(), output_tensor->handle(), (arm_compute::ComparisonOperation)comparison_type); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1107,13 +855,13 @@ void KernelGenerator::visit(const ir::operation::Pack &node) for (const auto &input_index : input_indexes) { size_t input_rank = _ctx.at(input_index).shape().rank(); - const auto &input_alloc = _tensor_builder->at(input_index); - orig_inputs_acl_tensor_shapes.emplace_back(input_alloc->info()->tensor_shape()); - assert(input_rank == input_alloc->num_dimensions()); - if (input_rank != input_alloc->info()->num_dimensions()) + const auto &input_tensor = _tensor_builder->at(input_index); + orig_inputs_acl_tensor_shapes.emplace_back(input_tensor->info()->tensor_shape()); + assert(input_rank == input_tensor->num_dimensions()); + if (input_rank != input_tensor->info()->num_dimensions()) { // This means that high dimension's value is 1 and ifm tensor is applied dim_correction - input_alloc->info()->set_tensor_shape(acl_common::asTensorShape( + input_tensor->info()->set_tensor_shape(acl_common::asTensorShape( _ctx.at(input_index).shape(), _current_op_seq_layout, backend_layout, false)); } } @@ -1135,8 +883,8 @@ void KernelGenerator::visit(const ir::operation::Permute &node) const auto ofm_idx{node.getOutputs().at(0)}; const auto ifm_idx{node.getInputs().at(0)}; const auto permute_type = node.getPermuteType(); - auto ofm_alloc = _tensor_builder->at(ofm_idx).get(); - auto ifm_alloc = _tensor_builder->at(ifm_idx).get(); + auto ofm_tensor = _tensor_builder->at(ofm_idx).get(); + auto ifm_tensor = _tensor_builder->at(ifm_idx).get(); const auto rank = _ctx.at(ofm_idx).shape().rank(); assert(_ctx.at(ifm_idx).shape().rank() == _ctx.at(ofm_idx).shape().rank()); @@ -1149,7 +897,7 @@ void KernelGenerator::visit(const ir::operation::Permute &node) auto l = std::make_unique<::arm_compute::CLPermute>(); - l->configure(ifm_alloc->handle(), ofm_alloc->handle(), pv); + l->configure(ifm_tensor->handle(), ofm_tensor->handle(), pv); fn = std::move(l); } @@ -1160,7 +908,7 @@ void KernelGenerator::visit(const ir::operation::Permute &node) auto l = std::make_unique<::arm_compute::CLPermute>(); - l->configure(ifm_alloc->handle(), ofm_alloc->handle(), pv); + l->configure(ifm_tensor->handle(), ofm_tensor->handle(), pv); fn = std::move(l); } @@ -1168,7 +916,7 @@ void KernelGenerator::visit(const ir::operation::Permute &node) { auto l = std::make_unique<::arm_compute::CLCopy>(); - l->configure(ifm_alloc->handle(), ofm_alloc->handle()); + l->configure(ifm_tensor->handle(), ofm_tensor->handle()); fn = std::move(l); } @@ -1183,12 +931,12 @@ void KernelGenerator::visit(const ir::operation::RSQRT &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::RSQRT::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); auto fn = std::make_unique<::arm_compute::CLRsqrtLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle()); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle()); _return_fn = asAclClFunction(std::move(fn)); } @@ -1198,15 +946,15 @@ void KernelGenerator::visit(const ir::operation::ReLU &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::ReLU::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU}; - fn->configure(input_alloc->handle(), output_alloc->handle(), act_info); + fn->configure(input_tensor->handle(), output_tensor->handle(), act_info); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1219,12 +967,12 @@ void KernelGenerator::visit(const ir::operation::ResizeBilinear &node) const auto ifm_index{node.getInputs().at(ir::operation::ResizeBilinear::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); auto fn = std::make_unique<::arm_compute::CLScale>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), ::arm_compute::InterpolationPolicy::BILINEAR, ::arm_compute::BorderMode::REPLICATE, ::arm_compute::PixelValue(0.f), ::arm_compute::SamplingPolicy::TOP_LEFT); @@ -1238,15 +986,15 @@ void KernelGenerator::visit(const ir::operation::ReLU1 &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::ReLU1::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f}; auto fn = std::make_unique<::arm_compute::CLActivationLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), act_info); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), act_info); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1258,15 +1006,15 @@ void KernelGenerator::visit(const ir::operation::ReLU6 &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::ReLU6::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0f}; auto fn = std::make_unique<::arm_compute::CLActivationLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), act_info); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), act_info); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1288,25 +1036,25 @@ void KernelGenerator::visit(const ir::operation::RNN &node) const auto activation = node.param().activation; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto hidden_state_out_alloc = _tensor_builder->at(hidden_state_out_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto hidden_state_out_tensor = _tensor_builder->at(hidden_state_out_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); - auto weights_alloc = _tensor_builder->at(weights_index).get(); - auto recurrent_weights_alloc = _tensor_builder->at(recurrent_weights_index).get(); - auto bias_alloc = _tensor_builder->at(bias_index).get(); - auto hidden_state_in_alloc = _tensor_builder->at(hidden_state_in_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); + auto weights_tensor = _tensor_builder->at(weights_index).get(); + auto recurrent_weights_tensor = _tensor_builder->at(recurrent_weights_index).get(); + auto bias_tensor = _tensor_builder->at(bias_index).get(); + auto hidden_state_in_tensor = _tensor_builder->at(hidden_state_in_index).get(); auto act_info = ::onert::backend::acl_common::asActivationLayerInfo(activation); auto copy_layer = std::make_unique<::arm_compute::CLCopy>(); - copy_layer->configure(hidden_state_in_alloc->handle(), hidden_state_out_alloc->handle()); + copy_layer->configure(hidden_state_in_tensor->handle(), hidden_state_out_tensor->handle()); _return_fn = asAclClFunction(std::move(copy_layer)); - auto fn = std::make_unique<::arm_compute::CLRNNLayerEx>( + auto fn = std::make_unique<::arm_compute::CLRNNLayer>( _tensor_builder->acl_tensor_manager()->internal_buffer_manager()); - fn->configure(input_alloc->handle(), weights_alloc->handle(), recurrent_weights_alloc->handle(), - bias_alloc->handle(), hidden_state_out_alloc->handle(), output_alloc->handle(), - act_info); + fn->configure(input_tensor->handle(), weights_tensor->handle(), + recurrent_weights_tensor->handle(), bias_tensor->handle(), + hidden_state_out_tensor->handle(), output_tensor->handle(), act_info); _return_fn = asAclClFunction(std::move(fn)); } @@ -1315,12 +1063,12 @@ void KernelGenerator::visit(const ir::operation::Floor &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Floor::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); auto fn = std::make_unique<::arm_compute::CLFloor>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle()); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1335,10 +1083,10 @@ void KernelGenerator::visit(const ir::operation::SpaceToBatchND &node) node.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)}; const auto paddings_index{node.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto block_size_alloc = _tensor_builder->at(block_size_index).get(); - auto paddings_alloc = _tensor_builder->at(paddings_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto block_size_tensor = _tensor_builder->at(block_size_index).get(); + auto paddings_tensor = _tensor_builder->at(paddings_index).get(); assert(_ctx.at(block_size_index).data()); assert(_ctx.at(paddings_index).data()); @@ -1346,8 +1094,8 @@ void KernelGenerator::visit(const ir::operation::SpaceToBatchND &node) std::unique_ptr<::arm_compute::IFunction> fn; auto l = std::make_unique<::arm_compute::CLSpaceToBatchLayer>(); - l->configure(ifm_alloc->handle(), block_size_alloc->handle(), paddings_alloc->handle(), - ofm_alloc->handle()); + l->configure(ifm_tensor->handle(), block_size_tensor->handle(), paddings_tensor->handle(), + ofm_tensor->handle()); fn = std::move(l); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1362,12 +1110,12 @@ void KernelGenerator::visit(const ir::operation::SpaceToDepth &node) auto block_size = node.param().block_size; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); - auto fn = std::make_unique<::arm_compute::CLSpaceToDepth>(); + auto fn = std::make_unique<::arm_compute::CLSpaceToDepthLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), block_size); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), block_size); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1376,32 +1124,15 @@ void KernelGenerator::visit(const ir::operation::SpaceToDepth &node) void KernelGenerator::visit(const ir::operation::L2Pool2D &node) { - const auto ofm_index{node.getOutputs().at(0)}; - const auto ifm_index{node.getInputs().at(ir::operation::L2Pool2D::Input::INPUT)}; + auto raw_fn = acl_common::kernelGenPool2D<::arm_compute::CLPoolingLayer>( + node, _ctx, _tensor_builder, _current_op_seq_layout, ::arm_compute::PoolingType::L2); - const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout); - const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout); - - uint32_t kw = node.param().kw; - uint32_t kh = node.param().kh; - const auto stride = node.param().stride; - const auto padding = - ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride, kw, kh); + const auto ofm_index{node.getOutputs().at(0)}; + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); const auto activation = node.param().activation; - - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - - ::arm_compute::PoolingLayerInfo info{ - ::arm_compute::PoolingType::L2, ::arm_compute::Size2D{kw, kh}, - ::onert::backend::acl_common::asPadStrideInfo(padding, stride)}; - - auto fn = std::make_unique<::arm_compute::CLPoolingLayer>(); - - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), info); - _return_fn = std::make_unique( - asAclClFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclClFunction(std::move(raw_fn)), + ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::EmbeddingLookup &node) @@ -1410,13 +1141,13 @@ void KernelGenerator::visit(const ir::operation::EmbeddingLookup &node) const auto lookups_index{node.getInputs().at(ir::operation::EmbeddingLookup::Input::LOOKUPS)}; const auto values_index{node.getInputs().at(ir::operation::EmbeddingLookup::Input::VALUES)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto lookups_alloc = _tensor_builder->at(lookups_index).get(); - auto values_alloc = _tensor_builder->at(values_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto lookups_tensor = _tensor_builder->at(lookups_index).get(); + auto values_tensor = _tensor_builder->at(values_index).get(); auto fn = std::make_unique<::arm_compute::CLEmbeddingLookup>(); - fn->configure(values_alloc->handle(), output_alloc->handle(), lookups_alloc->handle()); + fn->configure(values_tensor->handle(), output_tensor->handle(), lookups_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1442,15 +1173,15 @@ void KernelGenerator::visit(const ir::operation::L2Normalization &node) float beta = 0.5f; // pow(reduction, -0.5) = 1 / sqrt(reduction) float bias = 0.0f; // Don't offset the reduction. - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); const auto norm_info = ::arm_compute::NormalizationLayerInfo(::arm_compute::NormType::CROSS_MAP, radius, alpha, beta, bias, false); auto fn = std::make_unique<::arm_compute::CLNormalizationLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), norm_info); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), norm_info); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1466,17 +1197,17 @@ void KernelGenerator::visit(const ir::operation::HashtableLookup &node) const auto keys_index{node.getInputs().at(ir::operation::HashtableLookup::Input::KEYS)}; const auto values_index{node.getInputs().at(ir::operation::HashtableLookup::Input::VALUES)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto hits_alloc = _tensor_builder->at(hits_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto hits_tensor = _tensor_builder->at(hits_index).get(); - auto lookups_alloc = _tensor_builder->at(lookups_index).get(); - auto keys_alloc = _tensor_builder->at(keys_index).get(); - auto values_alloc = _tensor_builder->at(values_index).get(); + auto lookups_tensor = _tensor_builder->at(lookups_index).get(); + auto keys_tensor = _tensor_builder->at(keys_index).get(); + auto values_tensor = _tensor_builder->at(values_index).get(); auto fn = std::make_unique<::arm_compute::CLHashtableLookup>(); - fn->configure(lookups_alloc->handle(), keys_alloc->handle(), values_alloc->handle(), - output_alloc->handle(), hits_alloc->handle()); + fn->configure(lookups_tensor->handle(), keys_tensor->handle(), values_tensor->handle(), + output_tensor->handle(), hits_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1489,13 +1220,13 @@ void KernelGenerator::visit(const ir::operation::PReLU &node) const auto ifm_index{node.getInputs().at(ir::operation::PReLU::Input::INPUT)}; const auto alpha_index{node.getInputs().at(ir::operation::PReLU::Input::ALPHA)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto alpha_alloc = _tensor_builder->at(alpha_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto alpha_tensor = _tensor_builder->at(alpha_index).get(); - auto fn = std::make_unique<::arm_compute::CLPReLU>(); + auto fn = std::make_unique<::arm_compute::CLPReluLayer>(); - fn->configure(ifm_alloc->handle(), alpha_alloc->handle(), ofm_alloc->handle()); + fn->configure(ifm_tensor->handle(), alpha_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1518,7 +1249,6 @@ void KernelGenerator::visit(const ir::operation::TransposeConv &node) (node.param().padding.type == ir::PaddingType::VALID)); auto padding = ir::calculatePadding(node.param().padding, ofm_shape, ifm_shape, stride, ker_shape.W, ker_shape.H); - uint32_t invalid_horizontal = 0; uint32_t invalid_vertical = 0; if (node.param().padding.type == ir::PaddingType::VALID) @@ -1528,17 +1258,17 @@ void KernelGenerator::visit(const ir::operation::TransposeConv &node) invalid_vertical = ofm_shape.H - (1 + (ifm_shape.H - 1) * stride.vertical) - (ker_shape.H - 1); } - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto ker_alloc = _tensor_builder->at(ker_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto ker_tensor = _tensor_builder->at(ker_index).get(); const auto tconv_info = acl_common::asPadStrideInfo(padding, stride); auto fn = std::make_unique<::arm_compute::CLTransposeConvLayer>( _tensor_builder->acl_tensor_manager()->internal_buffer_manager()); - fn->configure(ifm_alloc->handle(), ker_alloc->handle(), nullptr, ofm_alloc->handle(), tconv_info, - invalid_horizontal, invalid_vertical); + fn->configure(ifm_tensor->handle(), ker_tensor->handle(), nullptr, ofm_tensor->handle(), + tconv_info, invalid_horizontal, invalid_vertical); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1550,15 +1280,15 @@ void KernelGenerator::visit(const ir::operation::SQRT &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::SQRT::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::SQRT}; auto fn = std::make_unique<::arm_compute::CLActivationLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle(), act_info); + fn->configure(input_tensor->handle(), output_tensor->handle(), act_info); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1571,13 +1301,13 @@ void KernelGenerator::visit(const ir::operation::LogicalOr &node) const auto input0_index{node.getInputs().at(ir::operation::LogicalOr::Input::INPUT0)}; const auto input1_index{node.getInputs().at(ir::operation::LogicalOr::Input::INPUT1)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input0_alloc = _tensor_builder->at(input0_index).get(); - auto input1_alloc = _tensor_builder->at(input1_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input0_tensor = _tensor_builder->at(input0_index).get(); + auto input1_tensor = _tensor_builder->at(input1_index).get(); auto fn = std::make_unique<::arm_compute::CLBitwiseOr>(); - fn->configure(input0_alloc->handle(), input1_alloc->handle(), output_alloc->handle()); + fn->configure(input0_tensor->handle(), input1_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1589,12 +1319,12 @@ void KernelGenerator::visit(const ir::operation::LogicalNot &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::LogicalNot::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique<::arm_compute::CLBitwiseNot>(); - fn->configure(input_alloc->handle(), output_alloc->handle()); + fn->configure(input_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1607,13 +1337,13 @@ void KernelGenerator::visit(const ir::operation::SquaredDifference &node) const auto lhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::LHS)}; const auto rhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::RHS)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::CLElementwiseSquaredDiff>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle()); + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1634,13 +1364,13 @@ void KernelGenerator::visit(const ir::operation::TopKV2 &node) const auto k = node.param().k; - auto values_alloc = _tensor_builder->at(outputValues_index).get(); - auto indices_alloc = _tensor_builder->at(outputIndices_index).get(); - auto input_alloc = _tensor_builder->at(inputData_index).get(); + auto values_tensor = _tensor_builder->at(outputValues_index).get(); + auto indices_tensor = _tensor_builder->at(outputIndices_index).get(); + auto input_tensor = _tensor_builder->at(inputData_index).get(); auto fn = std::make_unique<::arm_compute::CLTopKV2>(); - fn->configure(input_alloc->handle(), k, values_alloc->handle(), indices_alloc->handle()); + fn->configure(input_tensor->handle(), k, values_tensor->handle(), indices_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1659,9 +1389,9 @@ void KernelGenerator::visit(const ir::operation::Gather &node) const auto axis_value = (axis_raw < 0 ? (ifm_rank + axis_raw) : axis_raw); const int axis = ::onert::backend::acl_common::ToARMComputeAxis(ifm_rank, axis_value).value(); - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto indices_alloc = _tensor_builder->at(indices_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto indices_tensor = _tensor_builder->at(indices_index).get(); // NOTE The frontend layout and backend layout must be the same for this operation. // If not the same, we have to add a stage(?) to perform permutation of output tensor. It @@ -1671,43 +1401,43 @@ void KernelGenerator::visit(const ir::operation::Gather &node) // a model. For example, if a model in NHWC has this operation as output rank == 4, indices // rank == 2 and axis == 2, this operation should work as the axis W and C, but the axis W // and C are not sequential in NCHW. So the backend in NCHW cannot handle this case. - const auto backend_layout = ofm_alloc->layout(); + const auto backend_layout = ofm_tensor->layout(); UNUSED_RELEASE(backend_layout); - assert(backend_layout == ifm_alloc->layout()); - assert(backend_layout == indices_alloc->layout()); + assert(backend_layout == ifm_tensor->layout()); + assert(backend_layout == indices_tensor->layout()); assert(ifm_rank < 4 || _current_op_seq_layout == backend_layout); auto fn = std::make_unique<::arm_compute::CLGatherEx>(); // input is n-D, indices k-D, output is (n + k - 1)-D size_t n = ifm_rank; - assert(n == ifm_alloc->num_dimensions()); + assert(n == ifm_tensor->num_dimensions()); size_t k = _ctx.at(indices_index).shape().rank(); - assert(k == indices_alloc->num_dimensions()); + assert(k == indices_tensor->num_dimensions()); // Disable applied dim_correction - const auto orig_ifm_acl_tensor_shape = ifm_alloc->info()->tensor_shape(); - if (n != ifm_alloc->info()->num_dimensions()) + const auto orig_ifm_acl_tensor_shape = ifm_tensor->info()->tensor_shape(); + if (n != ifm_tensor->info()->num_dimensions()) { // This means that high dimension's value is 1 and ifm tensor is applied dim_correction const auto ifm = _ctx.at(ifm_index); - ifm_alloc->info()->set_tensor_shape( + ifm_tensor->info()->set_tensor_shape( acl_common::asTensorShape(ifm.shape(), _current_op_seq_layout, backend_layout, false)); } - const auto orig_indice_acl_tensor_shape = indices_alloc->info()->tensor_shape(); - if (k != indices_alloc->info()->num_dimensions()) + const auto orig_indice_acl_tensor_shape = indices_tensor->info()->tensor_shape(); + if (k != indices_tensor->info()->num_dimensions()) { // This means that high dimension's value is 1 and indices tensor is applied dim_correction const auto indices = _ctx.at(indices_index); - indices_alloc->info()->set_tensor_shape( + indices_tensor->info()->set_tensor_shape( acl_common::asTensorShape(indices.shape(), _current_op_seq_layout, backend_layout, false)); } - fn->configure(ifm_alloc->handle(), indices_alloc->handle(), ofm_alloc->handle(), axis); + fn->configure(ifm_tensor->handle(), indices_tensor->handle(), ofm_tensor->handle(), axis); // Revert disabling applied dim_correction - ifm_alloc->info()->set_tensor_shape(orig_ifm_acl_tensor_shape); - indices_alloc->info()->set_tensor_shape(orig_indice_acl_tensor_shape); + ifm_tensor->info()->set_tensor_shape(orig_ifm_acl_tensor_shape); + indices_tensor->info()->set_tensor_shape(orig_indice_acl_tensor_shape); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1719,12 +1449,12 @@ void KernelGenerator::visit(const ir::operation::Neg &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Neg::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); auto fn = std::make_unique<::arm_compute::CLNeg>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle()); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1736,15 +1466,15 @@ void KernelGenerator::visit(const ir::operation::Abs &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Abs::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::ABS}; auto fn = std::make_unique<::arm_compute::CLActivationLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle(), act_info); + fn->configure(input_tensor->handle(), output_tensor->handle(), act_info); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1761,11 +1491,11 @@ void KernelGenerator::visit(const ir::operation::ArgMax &node) assert((ifm_shape.rank() - 1) == ofm_shape.rank()); - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); const auto ifm_rank = _ctx.at(ifm_index).shape().rank(); auto frontend_layout = _current_op_seq_layout; - auto backend_layout = ifm_alloc->layout(); + auto backend_layout = ifm_tensor->layout(); int axis_value = node.param().axis; if (axis_value < 0) @@ -1776,10 +1506,10 @@ void KernelGenerator::visit(const ir::operation::ArgMax &node) auto acl_axis = acl_common::ToARMComputeAxis(ifm_rank, axis_value, frontend_layout, backend_layout).value(); - auto fn = std::make_unique<::arm_compute::CLArgOperation>(); + auto fn = std::make_unique<::arm_compute::CLArgMinMaxLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), {acl_axis}, - ::arm_compute::ArgOperation::MAX); + fn->configure(ifm_tensor->handle(), acl_axis, ofm_tensor->handle(), + ::arm_compute::ReductionOperation::ARG_IDX_MAX); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1791,12 +1521,12 @@ void KernelGenerator::visit(const ir::operation::Dequantize &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Dequantize::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); - auto fn = std::make_unique<::arm_compute::CLCast>(); + auto fn = std::make_unique<::arm_compute::CLDequantizationLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle(), arm_compute::SubDataType::NONE); + fn->configure(input_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1814,15 +1544,15 @@ void KernelGenerator::visit(const ir::operation::LocalResponseNormalization &nod auto beta = node.param().beta; auto bias = node.param().bias; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); const auto norm_info = ::arm_compute::NormalizationLayerInfo( ::arm_compute::NormType::CROSS_MAP, radius * 2 + 1, alpha, beta, bias, false); auto fn = std::make_unique<::arm_compute::CLNormalizationLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), norm_info); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), norm_info); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1837,12 +1567,12 @@ void KernelGenerator::visit(const ir::operation::DepthToSpace &node) auto block_size = node.param().block_size; assert(block_size > 0); - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); - auto fn = std::make_unique<::arm_compute::CLDepthToSpace>(); + auto fn = std::make_unique<::arm_compute::CLDepthToSpaceLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle(), block_size); + fn->configure(input_tensor->handle(), output_tensor->handle(), block_size); auto acl_fn = asAclClFunction(std::move(fn)); @@ -1860,13 +1590,13 @@ void KernelGenerator::visit(const ir::operation::Split &node) for (const auto &output : node.getOutputs()) output_indexes.emplace_back(output); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - std::vector output_allocs; + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + std::vector output_tensors; for (const auto &ofm_ind : output_indexes) - output_allocs.emplace_back(_tensor_builder->at(ofm_ind).get()->handle()); + output_tensors.emplace_back(_tensor_builder->at(ofm_ind).get()->handle()); const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = ifm_alloc->layout(); + const auto backend_layout = ifm_tensor->layout(); auto axis = node.param().axis; if (axis < 0) axis += ifm_rank; @@ -1874,7 +1604,7 @@ void KernelGenerator::visit(const ir::operation::Split &node) auto fn = std::make_unique<::arm_compute::CLSplit>(); - fn->configure(ifm_alloc->handle(), output_allocs, axis); + fn->configure(ifm_tensor->handle(), output_tensors, axis); _return_fn = asAclClFunction(std::move(fn)); } @@ -1906,13 +1636,13 @@ void KernelGenerator::visit(const ir::operation::Unpack &node) for (const auto &output_index : output_indexes) { size_t output_rank = _ctx.at(output_index).shape().rank(); - const auto &output_alloc = _tensor_builder->at(output_index); - orig_outputs_acl_tensor_shapes.emplace_back(output_alloc->info()->tensor_shape()); - assert(output_rank == output_alloc->num_dimensions()); - if (output_rank != output_alloc->info()->num_dimensions()) + const auto &output_tensor = _tensor_builder->at(output_index); + orig_outputs_acl_tensor_shapes.emplace_back(output_tensor->info()->tensor_shape()); + assert(output_rank == output_tensor->num_dimensions()); + if (output_rank != output_tensor->info()->num_dimensions()) { // This means that high dimension's value is 1 and ifm tensor is applied dim_correction - output_alloc->info()->set_tensor_shape(acl_common::asTensorShape( + output_tensor->info()->set_tensor_shape(acl_common::asTensorShape( _ctx.at(output_index).shape(), _current_op_seq_layout, backend_layout, false)); } } @@ -1959,12 +1689,12 @@ void KernelGenerator::visit(const ir::operation::Pad &node) // Disable applied dim_correction size_t input_rank = _ctx.at(input_index).shape().rank(); - const auto &input_alloc = _tensor_builder->at(input_index); - assert(input_rank == input_alloc->num_dimensions()); - if (input_rank != input_alloc->info()->num_dimensions()) + const auto &input_tensor = _tensor_builder->at(input_index); + assert(input_rank == input_tensor->num_dimensions()); + if (input_rank != input_tensor->info()->num_dimensions()) { // This means that high dimension's value is 1 and ifm tensor is applied dim_correction - input_alloc->info()->set_tensor_shape(acl_common::asTensorShape( + input_tensor->info()->set_tensor_shape(acl_common::asTensorShape( _ctx.at(input_index).shape(), frontend_layout, backend_layout, false)); } @@ -1982,13 +1712,13 @@ void KernelGenerator::visit(const ir::operation::Min &node) const auto lhs_index{node.getInputs().at(ir::operation::Min::Input::LHS)}; const auto rhs_index{node.getInputs().at(ir::operation::Min::Input::RHS)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::CLElementwiseMin>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle()); + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -2001,13 +1731,13 @@ void KernelGenerator::visit(const ir::operation::Max &node) const auto lhs_index{node.getInputs().at(ir::operation::Max::Input::LHS)}; const auto rhs_index{node.getInputs().at(ir::operation::Max::Input::RHS)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::CLElementwiseMax>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle()); + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclClFunction(std::move(fn)); @@ -2019,12 +1749,12 @@ void KernelGenerator::visit(const ir::operation::ConvertFp32ToFp16 &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::ConvertFp32ToFp16::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); auto fn = std::make_unique<::arm_compute::CLDepthConvertLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), ::arm_compute::ConvertPolicy::SATURATE, + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), ::arm_compute::ConvertPolicy::SATURATE, 0); auto acl_fn = asAclClFunction(std::move(fn)); @@ -2037,12 +1767,12 @@ void KernelGenerator::visit(const ir::operation::ConvertFp16ToFp32 &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::ConvertFp16ToFp32::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); auto fn = std::make_unique<::arm_compute::CLDepthConvertLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), ::arm_compute::ConvertPolicy::SATURATE, + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), ::arm_compute::ConvertPolicy::SATURATE, 0); auto acl_fn = asAclClFunction(std::move(fn)); diff --git a/runtime/onert/backend/acl_common/AclKernelGen.h b/runtime/onert/backend/acl_common/AclKernelGen.h new file mode 100644 index 0000000..9f7ce37 --- /dev/null +++ b/runtime/onert/backend/acl_common/AclKernelGen.h @@ -0,0 +1,315 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_ACL_COMMON_ACL_KERNEL_GEN_H_ +#define __ONERT_BACKEND_ACL_COMMON_ACL_KERNEL_GEN_H_ + +#include +#include + +#include +#include + +namespace onert +{ +namespace backend +{ +namespace acl_common +{ + +template +std::unique_ptr +kernelGenLSTM(const ir::operation::LSTM &node, const ir::Operands &operands, + const std::shared_ptr &tensor_builder) +{ + // TODO Support dynamic rnn + // TODO Fix subtle error in the case of non-CIFG, non-peephole and No Projection. + const auto scratch_buffer_index{ + node.getOutputs().at(ir::operation::LSTM::Output::SCRATCH_BUFFER)}; + const auto output_state_out_index{ + node.getOutputs().at(ir::operation::LSTM::Output::OUTPUT_STATE_OUT)}; + const auto cell_state_out_index{ + node.getOutputs().at(ir::operation::LSTM::Output::CELL_STATE_OUT)}; + const auto output_index{node.getOutputs().at(ir::operation::LSTM::Output::OUTPUT)}; + + const auto input_index{node.getInputs().at(ir::operation::LSTM::Input::INPUT)}; + const auto input_to_input_weights_index{ + node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_INPUT_WEIGHTS)}; // optional + const auto input_to_forget_weights_index{ + node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_FORGET_WEIGHTS)}; + const auto input_to_cell_weights_index{ + node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_CELL_WEIGHTS)}; + const auto input_to_output_weights_index{ + node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_OUTPUT_WEIGHTS)}; + const auto recurrent_to_input_weights_index{ + node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_INPUT_WEIGHTS)}; // optional + const auto recurrent_to_forget_weights_index{ + node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_FORGET_WEIGHTS)}; + const auto recurrent_to_cell_weights_index{ + node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_CELL_WEIGHTS)}; + const auto recurrent_to_output_weights_index{ + node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_OUTPUT_WEIGHTS)}; + const auto cell_to_input_weights_index{ + node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_INPUT_WEIGHTS)}; // optional + const auto cell_to_forget_weights_index{ + node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_FORGET_WEIGHTS)}; // optional + const auto cell_to_output_weights_index{ + node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_OUTPUT_WEIGHTS)}; // optional + const auto input_gate_bias_index{ + node.getInputs().at(ir::operation::LSTM::Input::INPUT_GATE_BIAS)}; + const auto forget_gate_bias_index{ + node.getInputs().at(ir::operation::LSTM::Input::FORGET_GATE_BIAS)}; + const auto cell_bias_index{node.getInputs().at(ir::operation::LSTM::Input::CELL_BIAS)}; + const auto output_gate_bias_index{ + node.getInputs().at(ir::operation::LSTM::Input::OUTPUT_GATE_BIAS)}; + const auto projection_weights_index{ + node.getInputs().at(ir::operation::LSTM::Input::PROJECTION_WEIGHTS)}; // optional + const auto projection_bias_index{ + node.getInputs().at(ir::operation::LSTM::Input::PROJECTION_BIAS)}; // optional + const auto output_state_in_index{ + node.getInputs().at(ir::operation::LSTM::Input::OUTPUT_STATE_IN)}; + const auto cell_state_in_index{node.getInputs().at(ir::operation::LSTM::Input::CELL_STATE_IN)}; + const auto cell_threshold = node.param().cell_threshold; + const auto projection_threshold = node.param().projection_threshold; + + bool has_input_to_input_weights = operands.at(input_to_input_weights_index).shape().dim(0) != 0 && + operands.at(input_to_input_weights_index).shape().dim(1) != 0; + bool has_recurrent_to_input_weights = + operands.at(recurrent_to_input_weights_index).shape().dim(0) != 0 && + operands.at(recurrent_to_input_weights_index).shape().dim(1) != 0; + bool has_cell_to_forget_weights = operands.at(cell_to_forget_weights_index).shape().dim(0) != 0; + bool has_cell_to_output_weights = operands.at(cell_to_output_weights_index).shape().dim(0) != 0; + bool has_projection_weights = operands.at(projection_weights_index).shape().dim(0) != 0 && + operands.at(projection_weights_index).shape().dim(1) != 0; + bool has_projection_bias = operands.at(projection_bias_index).shape().dim(0); + + // NOTE The input_to_input_weights and the recurrent_to_input_weights do not exist in CIFG. + // true: no CIFG + // false: CIFG + // NOTE The cell_to_input_weights does not exist in non-peephole although regular LSTM(non-CIFG). + bool has_cifg_param = has_input_to_input_weights && has_recurrent_to_input_weights; + + // NOTE The cell_to_forget_weights and the cell_to_output_weights exist in peephole. + // But the cell_to_input_weights does not exist in regular CIFG although peephole. + // true: peephole + // false: no peephole + bool has_peephole_param = has_cell_to_forget_weights && has_cell_to_output_weights; + + // NOTE Although the projection weights has data the projection bias may not have data. + bool has_projection_param = has_projection_weights; + + const auto activation = node.param().activation; + const auto cell_clip = cell_threshold; + const auto projection_clip = projection_threshold; + assert(cell_clip >= 0.f && projection_clip >= 0.f); + + auto scratch_buffer_tensor = tensor_builder->at(scratch_buffer_index).get(); + auto output_state_out_tensor = tensor_builder->at(output_state_out_index).get(); + auto cell_state_out_tensor = tensor_builder->at(cell_state_out_index).get(); + auto output_tensor = tensor_builder->at(output_index).get(); + + auto input_tensor = tensor_builder->at(input_index).get(); + + auto input_to_forget_weights_tensor = tensor_builder->at(input_to_forget_weights_index).get(); + auto input_to_cell_weights_tensor = tensor_builder->at(input_to_cell_weights_index).get(); + auto input_to_output_weights_tensor = tensor_builder->at(input_to_output_weights_index).get(); + auto recurrent_to_forget_weights_tensor = + tensor_builder->at(recurrent_to_forget_weights_index).get(); + auto recurrent_to_cell_weights_tensor = tensor_builder->at(recurrent_to_cell_weights_index).get(); + auto recurrent_to_output_weights_tensor = + tensor_builder->at(recurrent_to_output_weights_index).get(); + + auto forget_gate_bias_tensor = tensor_builder->at(forget_gate_bias_index).get(); + auto cell_bias_tensor = tensor_builder->at(cell_bias_index).get(); + auto output_gate_bias_tensor = tensor_builder->at(output_gate_bias_index).get(); + auto output_state_in_tensor = tensor_builder->at(output_state_in_index).get(); + auto cell_state_in_tensor = tensor_builder->at(cell_state_in_index).get(); + + auto act_info = ::onert::backend::acl_common::asActivationLayerInfo(activation); + + auto fn = std::make_unique(); + + ::arm_compute::LSTMParams lstm_params{}; + if (has_cifg_param) + { + auto input_to_input_weights_tensor = + tensor_builder->at(input_to_input_weights_index).get(); // optional + auto recurrent_to_input_weights_tensor = + tensor_builder->at(recurrent_to_input_weights_index).get(); // optional + auto cell_to_input_weights_handle = + has_peephole_param ? tensor_builder->at(cell_to_input_weights_index).get()->handle() + : nullptr; // optional (non-cifg && peephole) + auto input_gate_bias_tensor = tensor_builder->at(input_gate_bias_index).get(); // optional + lstm_params.set_cifg_params(input_to_input_weights_tensor->handle(), + recurrent_to_input_weights_tensor->handle(), + cell_to_input_weights_handle, input_gate_bias_tensor->handle()); + } + if (has_peephole_param) + { + auto cell_to_forget_weights_tensor = + tensor_builder->at(cell_to_forget_weights_index).get(); // optional + auto cell_to_output_weights_tensor = + tensor_builder->at(cell_to_output_weights_index).get(); // optional + lstm_params.set_peephole_params(cell_to_forget_weights_tensor->handle(), + cell_to_output_weights_tensor->handle()); + } + if (has_projection_param) + { + auto projection_weights_tensor = tensor_builder->at(projection_weights_index).get(); // optional + auto projection_bias_handle = has_projection_bias + ? tensor_builder->at(projection_bias_index).get()->handle() + : nullptr; // optional + lstm_params.set_projection_params(projection_weights_tensor->handle(), projection_bias_handle); + } + + fn->configure(input_tensor->handle(), input_to_forget_weights_tensor->handle(), + input_to_cell_weights_tensor->handle(), input_to_output_weights_tensor->handle(), + recurrent_to_forget_weights_tensor->handle(), + recurrent_to_cell_weights_tensor->handle(), + recurrent_to_output_weights_tensor->handle(), forget_gate_bias_tensor->handle(), + cell_bias_tensor->handle(), output_gate_bias_tensor->handle(), + output_state_in_tensor->handle(), cell_state_in_tensor->handle(), + scratch_buffer_tensor->handle(), output_state_out_tensor->handle(), + cell_state_out_tensor->handle(), output_tensor->handle(), lstm_params, act_info, + cell_clip, projection_clip); + + return std::make_unique(std::move(fn)); +} + +template +std::unique_ptr +kernelGenFullyConnected(const ir::operation::FullyConnected &node, const ir::Operands &operands, + const std::shared_ptr &tensor_builder, ir::Layout layout) +{ + using ir::operation::FullyConnected; + + const auto output_index{node.getOutputs().at(0)}; + const auto input_index{node.getInputs().at(FullyConnected::Input::INPUT)}; + const auto weight_index{node.getInputs().at(FullyConnected::Input::WEIGHT)}; + const auto bias_index{node.getInputs().at(FullyConnected::Input::BIAS)}; + + const auto input_rank = operands.at(input_index).shape().rank(); + + const auto output_size = + operands.at(output_index).shape().dim(operands.at(output_index).shape().rank() - 1); + UNUSED_RELEASE(output_size); + assert(operands.at(bias_index).shape().dim(0) == output_size); + assert(operands.at(weight_index).shape().dim(0) == output_size); + const auto batch_size = + operands.at(output_index).shape().dim(operands.at(output_index).shape().rank() - 2); + const auto input_size = + operands.at(weight_index).shape().dim(operands.at(weight_index).shape().rank() - 1); + + // Check for reshaping input's shape into rank-2 + bool needs_reshape = false; + ir::Shape reshape(2); + if (input_rank == 3 || input_rank == 4) + { + const auto &ifm_shape = operands.at(input_index).shape(); + auto feature_size = 1; + for (int i = 0; i < ifm_shape.rank(); ++i) + { + feature_size *= ifm_shape.dim(i); + } + + UNUSED_RELEASE(feature_size); + assert(feature_size == batch_size * input_size); + + // for reshaping + needs_reshape = true; + reshape.dim(0) = batch_size; /* H */ + reshape.dim(1) = input_size; /* W */ + } + + auto output_tensor = tensor_builder->at(output_index).get(); + const auto input_tensor = tensor_builder->at(input_index).get(); + const auto weight_tensor = tensor_builder->at(weight_index).get(); + const auto bias_tensor = tensor_builder->at(bias_index).get(); + const auto frontend_layout = layout; + const auto acl_layout = output_tensor->handle()->info()->data_layout(); + + auto fn = + std::make_unique(tensor_builder->acl_tensor_manager()->internal_buffer_manager()); + + typename T_ACLLayer::KernelType kernel_type = T_ACLLayer::KernelType::GENERAL; + if (operands.at(weight_index).isConstant()) + { + kernel_type = T_ACLLayer::KernelType::PREPROCESSED_WEIGHTS; + assert(operands.at(weight_index).data()); + } + + fn->configure( + input_tensor->handle(), weight_tensor->handle(), bias_tensor->handle(), + output_tensor->handle(), needs_reshape, + ::onert::backend::acl_common::asTensorShape( + reshape, frontend_layout, ::onert::backend::acl_common::asRuntimeLayout(acl_layout)), + kernel_type); + + return std::make_unique(std::move(fn)); +} + +template +std::unique_ptr<::arm_compute::IFunction> +kernelGenPool2D(const T_PoolOp &node, const ir::Operands &operands, + const std::shared_ptr &tensor_builder, ir::Layout layout, + ::arm_compute::PoolingType pooling_type) +{ + const auto ofm_index{node.getOutputs().at(0)}; + const auto ifm_index{node.getInputs().at(0)}; + + const auto ofm_shape = operands.at(ofm_index).shape().asFeature(layout); + const auto ifm_shape = operands.at(ifm_index).shape().asFeature(layout); + + const auto kh = node.param().kh; + const auto kw = node.param().kw; + const auto stride = node.param().stride; + const auto padding = + ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride, kw, kh); + + VERBOSE(Pool2DParam) << "IFM_H: " << ifm_shape.H << std::endl; + VERBOSE(Pool2DParam) << "IFM_W: " << ifm_shape.W << std::endl; + VERBOSE(Pool2DParam) << "OFM_H: " << ofm_shape.H << std::endl; + VERBOSE(Pool2DParam) << "OFM_W: " << ofm_shape.W << std::endl; + VERBOSE(Pool2DParam) << "KER_H: " << kh << std::endl; + VERBOSE(Pool2DParam) << "KER_W: " << kw << std::endl; + VERBOSE(Pool2DParam) << "STRIDE_H: " << stride.vertical << std::endl; + VERBOSE(Pool2DParam) << "STRIDE_W: " << stride.horizontal << std::endl; + VERBOSE(Pool2DParam) << "PAD(T): " << padding.top << std::endl; + VERBOSE(Pool2DParam) << "PAD(B): " << padding.bottom << std::endl; + VERBOSE(Pool2DParam) << "PAD(L): " << padding.left << std::endl; + VERBOSE(Pool2DParam) << "PAD(R): " << padding.right << std::endl; + + auto ofm_tensor = tensor_builder->at(ofm_index).get(); + auto ifm_tensor = tensor_builder->at(ifm_index).get(); + + ::arm_compute::PoolingLayerInfo info{ + pooling_type, ::arm_compute::Size2D{kw, kh}, ifm_tensor->info()->data_layout(), + acl_common::asPadStrideInfo(padding, stride), true /* exclude_padding */}; + + auto fn = std::make_unique(); + + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), info); + + return fn; +} + +} // namespace acl_common +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_ACL_COMMON_ACL_KERNEL_GEN_H_ diff --git a/runtime/onert/backend/acl_neon/KernelGenerator.cc b/runtime/onert/backend/acl_neon/KernelGenerator.cc index e471867..1195b83 100644 --- a/runtime/onert/backend/acl_neon/KernelGenerator.cc +++ b/runtime/onert/backend/acl_neon/KernelGenerator.cc @@ -31,6 +31,7 @@ #include "exec/NopFunction.h" #include "util/logging.h" #include "util/Utils.h" +#include "AclKernelGen.h" namespace onert { @@ -74,15 +75,15 @@ void KernelGenerator::visit(const ir::operation::Abs &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Abs::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::ABS}; auto fn = std::make_unique<::arm_compute::NEActivationLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle(), act_info); + fn->configure(input_tensor->handle(), output_tensor->handle(), act_info); auto acl_fn = asAclFunction(std::move(fn)); @@ -96,10 +97,10 @@ void KernelGenerator::visit(const ir::operation::ArgMax &node) const auto ifm_rank = _ctx.at(ifm_index).shape().rank(); - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); auto frontend_layout = _current_op_seq_layout; - auto backend_layout = ifm_alloc->layout(); + auto backend_layout = ifm_tensor->layout(); int axis_value = node.param().axis; if (axis_value < 0) @@ -112,7 +113,7 @@ void KernelGenerator::visit(const ir::operation::ArgMax &node) auto fn = std::make_unique<::arm_compute::NEArgMinMaxLayer>(); - fn->configure(ifm_alloc->handle(), fixed_axis, ofm_alloc->handle(), + fn->configure(ifm_tensor->handle(), fixed_axis, ofm_tensor->handle(), arm_compute::ReductionOperation::ARG_IDX_MAX); auto acl_fn = asAclFunction(std::move(fn)); @@ -127,15 +128,15 @@ void KernelGenerator::visit(const ir::operation::BatchToSpaceND &node) const auto block_size_index{ node.getInputs().at(ir::operation::BatchToSpaceND::Input::BLOCK_SIZE)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto block_size_alloc = _tensor_builder->at(block_size_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto block_size_tensor = _tensor_builder->at(block_size_index).get(); assert(_ctx.at(block_size_index).data()); auto fn = std::make_unique<::arm_compute::NEBatchToSpaceLayer>(); - fn->configure(ifm_alloc->handle(), block_size_alloc->handle(), ofm_alloc->handle()); + fn->configure(ifm_tensor->handle(), block_size_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -147,15 +148,26 @@ void KernelGenerator::visit(const ir::operation::Cast &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Cast::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); - auto fn = std::make_unique<::arm_compute::NECast>(); + std::unique_ptr<::arm_compute::IFunction> fn; + if (ifm_tensor->data_type() == ofm_tensor->data_type()) + { + auto l = std::make_unique<::arm_compute::NECopy>(); - auto input_sub_type = _ctx.at(ifm_index).typeInfo().type() == ir::DataType::BOOL8 - ? arm_compute::SubDataType::BOOL - : arm_compute::SubDataType::NONE; - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), input_sub_type); + l->configure(ifm_tensor->handle(), ofm_tensor->handle()); + + fn = std::move(l); + } + else + { + auto l = std::make_unique<::arm_compute::NECast>(); + + l->configure(ifm_tensor->handle(), ofm_tensor->handle(), arm_compute::ConvertPolicy::SATURATE); + + fn = std::move(l); + } auto acl_fn = asAclFunction(std::move(fn)); @@ -183,10 +195,10 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node) ker_width, ker_height); const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto ker_alloc = _tensor_builder->at(ker_index).get(); - auto bias_alloc = _tensor_builder->at(bias_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto ker_tensor = _tensor_builder->at(ker_index).get(); + auto bias_tensor = _tensor_builder->at(bias_index).get(); const auto conv_info = acl_common::asPadStrideInfo(padding, stride); const auto act_info = acl_common::asActivationLayerInfo(activation); @@ -194,8 +206,9 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node) auto fn = std::make_unique<::arm_compute::NEConvolutionLayer>( _tensor_builder->acl_tensor_manager()->internal_buffer_manager()); - fn->configure(ifm_alloc->handle(), ker_alloc->handle(), bias_alloc->handle(), ofm_alloc->handle(), - conv_info, ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), act_info); + fn->configure(ifm_tensor->handle(), ker_tensor->handle(), bias_tensor->handle(), + ofm_tensor->handle(), conv_info, ::arm_compute::WeightsInfo(), + ::arm_compute::Size2D(1U, 1U), act_info); _return_fn = asAclFunction(std::move(fn)); } @@ -208,12 +221,12 @@ void KernelGenerator::visit(const ir::operation::DepthToSpace &node) auto block_size = node.param().block_size; assert(block_size > 0); - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); - auto fn = std::make_unique<::arm_compute::NEDepthToSpaceLayerEx>(); + auto fn = std::make_unique<::arm_compute::NEDepthToSpaceLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle(), block_size); + fn->configure(input_tensor->handle(), output_tensor->handle(), block_size); auto acl_fn = asAclFunction(std::move(fn)); @@ -242,10 +255,10 @@ void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node) const auto multiplier = node.param().multiplier; const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto ker_alloc = _tensor_builder->at(ker_index).get(); - auto bias_alloc = _tensor_builder->at(bias_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto ker_tensor = _tensor_builder->at(ker_index).get(); + auto bias_tensor = _tensor_builder->at(bias_index).get(); const auto conv_info = acl_common::asPadStrideInfo(padding, stride); const auto act_info = acl_common::asActivationLayerInfo(activation); @@ -253,8 +266,8 @@ void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node) { auto fn = std::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>(); - fn->configure(ifm_alloc->handle(), ker_alloc->handle(), bias_alloc->handle(), - ofm_alloc->handle(), conv_info, multiplier, act_info); + fn->configure(ifm_tensor->handle(), ker_tensor->handle(), bias_tensor->handle(), + ofm_tensor->handle(), conv_info, multiplier, act_info); _return_fn = asAclFunction(std::move(fn)); } @@ -265,12 +278,12 @@ void KernelGenerator::visit(const ir::operation::Dequantize &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Dequantize::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique<::arm_compute::NEDequantizationLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle()); + fn->configure(input_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -279,88 +292,28 @@ void KernelGenerator::visit(const ir::operation::Dequantize &node) void KernelGenerator::visit(const ir::operation::MaxPool2D &node) { - const auto ofm_index{node.getOutputs().at(0)}; - const auto ifm_index{node.getInputs().at(ir::operation::MaxPool2D::Input::INPUT)}; - - const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout); - const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout); + auto raw_fn = acl_common::kernelGenPool2D<::arm_compute::NEPoolingLayer>( + node, _ctx, _tensor_builder, _current_op_seq_layout, ::arm_compute::PoolingType::MAX); - const auto kh = node.param().kh; - const auto kw = node.param().kw; - const auto stride = node.param().stride; - const auto padding = - ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride, kw, kh); + const auto ofm_index{node.getOutputs().at(0)}; + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); const auto activation = node.param().activation; - - VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl; - VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl; - VERBOSE(MaxPool2D) << "OFM_H: " << ofm_shape.H << std::endl; - VERBOSE(MaxPool2D) << "OFM_W: " << ofm_shape.W << std::endl; - VERBOSE(MaxPool2D) << "KER_H: " << kh << std::endl; - VERBOSE(MaxPool2D) << "KER_W: " << kw << std::endl; - VERBOSE(MaxPool2D) << "STRIDE_H: " << stride.vertical << std::endl; - VERBOSE(MaxPool2D) << "STRIDE_W: " << stride.horizontal << std::endl; - VERBOSE(MaxPool2D) << "PAD(T): " << padding.top << std::endl; - VERBOSE(MaxPool2D) << "PAD(B): " << padding.bottom << std::endl; - VERBOSE(MaxPool2D) << "PAD(L): " << padding.left << std::endl; - VERBOSE(MaxPool2D) << "PAD(R): " << padding.right << std::endl; - - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - - ::arm_compute::PoolingLayerInfo info{::arm_compute::PoolingType::MAX, - ::arm_compute::Size2D{kw, kh}, - acl_common::asPadStrideInfo(padding, stride)}; - - auto fn = std::make_unique<::arm_compute::NEPoolingLayer>(); - - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), info); - _return_fn = std::make_unique( - asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclFunction(std::move(raw_fn)), + ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::AvgPool2D &node) { - const auto ofm_index{node.getOutputs().at(0)}; - const auto ifm_index{node.getInputs().at(ir::operation::AvgPool2D::Input::INPUT)}; - - const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout); - const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout); + auto raw_fn = acl_common::kernelGenPool2D<::arm_compute::NEPoolingLayer>( + node, _ctx, _tensor_builder, _current_op_seq_layout, ::arm_compute::PoolingType::AVG); - const auto kh = node.param().kh; - const auto kw = node.param().kw; - const auto stride = node.param().stride; - const auto padding = - ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride, kw, kh); + const auto ofm_index{node.getOutputs().at(0)}; + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); const auto activation = node.param().activation; - - VERBOSE(AvgPool2D) << "IFM_H: " << ifm_shape.H << std::endl; - VERBOSE(AvgPool2D) << "IFM_W: " << ifm_shape.W << std::endl; - VERBOSE(AvgPool2D) << "OFM_H: " << ofm_shape.H << std::endl; - VERBOSE(AvgPool2D) << "OFM_W: " << ofm_shape.W << std::endl; - VERBOSE(AvgPool2D) << "KER_H: " << kh << std::endl; - VERBOSE(AvgPool2D) << "KER_W: " << kw << std::endl; - VERBOSE(AvgPool2D) << "STRIDE_H: " << stride.vertical << std::endl; - VERBOSE(AvgPool2D) << "STRIDE_W: " << stride.horizontal << std::endl; - VERBOSE(AvgPool2D) << "PAD(T): " << padding.top << std::endl; - VERBOSE(AvgPool2D) << "PAD(B): " << padding.bottom << std::endl; - VERBOSE(AvgPool2D) << "PAD(L): " << padding.left << std::endl; - VERBOSE(AvgPool2D) << "PAD(R): " << padding.right << std::endl; - - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - - ::arm_compute::PoolingLayerInfo info{ - ::arm_compute::PoolingType::AVG, ::arm_compute::Size2D{kw, kh}, - acl_common::asPadStrideInfo(padding, stride), true /* exclude_padding */}; - - auto fn = std::make_unique<::arm_compute::NEPoolingLayer>(); - - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), info); - _return_fn = std::make_unique( - asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclFunction(std::move(raw_fn)), + ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::Concat &node) @@ -383,7 +336,7 @@ void KernelGenerator::visit(const ir::operation::Concat &node) return; } - auto output_alloc = _tensor_builder->at(ofm_index).get(); + auto output_tensor = _tensor_builder->at(ofm_index).get(); std::vector<::arm_compute::ITensor *> input_tensors; for (const auto &ifm_ind : input_indexes) input_tensors.emplace_back(_tensor_builder->at(ifm_ind)->handle()); @@ -392,7 +345,7 @@ void KernelGenerator::visit(const ir::operation::Concat &node) if (input_indexes.size() < 2) { auto l = std::make_unique<::arm_compute::NECopy>(); - l->configure(input_tensors.at(0), output_alloc->handle()); + l->configure(input_tensors.at(0), output_tensor->handle()); fn = std::move(l); } else @@ -400,10 +353,10 @@ void KernelGenerator::visit(const ir::operation::Concat &node) auto l = std::make_unique<::arm_compute::NEConcatenateLayer>(); const auto rank = _ctx.at(ofm_index).shape().rank(); const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = output_alloc->layout(); + const auto backend_layout = output_tensor->layout(); const auto fixed_axis = acl_common::ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value(); - l->configure(input_tensors, output_alloc->handle(), fixed_axis); + l->configure(input_tensors, output_tensor->handle(), fixed_axis); fn = std::move(l); } @@ -418,13 +371,13 @@ void KernelGenerator::visit(const ir::operation::EmbeddingLookup &node) const auto lookups_index{node.getInputs().at(ir::operation::EmbeddingLookup::Input::LOOKUPS)}; const auto values_index{node.getInputs().at(ir::operation::EmbeddingLookup::Input::VALUES)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto lookups_alloc = _tensor_builder->at(lookups_index).get(); - auto values_alloc = _tensor_builder->at(values_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto lookups_tensor = _tensor_builder->at(lookups_index).get(); + auto values_tensor = _tensor_builder->at(values_index).get(); auto fn = std::make_unique<::arm_compute::NEEmbeddingLookup>(); - fn->configure(values_alloc->handle(), output_alloc->handle(), lookups_alloc->handle()); + fn->configure(values_tensor->handle(), output_tensor->handle(), lookups_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -436,12 +389,12 @@ void KernelGenerator::visit(const ir::operation::Floor &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Floor::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); auto fn = std::make_unique<::arm_compute::NEFloor>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle()); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -450,76 +403,15 @@ void KernelGenerator::visit(const ir::operation::Floor &node) void KernelGenerator::visit(const ir::operation::FullyConnected &node) { - using ir::operation::FullyConnected; - const auto output_index{node.getOutputs().at(0)}; - const auto input_index{node.getInputs().at(FullyConnected::Input::INPUT)}; - const auto weight_index{node.getInputs().at(FullyConnected::Input::WEIGHT)}; - const auto bias_index{node.getInputs().at(FullyConnected::Input::BIAS)}; - - const auto input_rank = _ctx.at(input_index).shape().rank(); - - const auto output_size = - _ctx.at(output_index).shape().dim(_ctx.at(output_index).shape().rank() - 1); - UNUSED_RELEASE(output_size); - assert(_ctx.at(bias_index).shape().dim(0) == output_size); - assert(_ctx.at(weight_index).shape().dim(0) == output_size); - const auto batch_size = - _ctx.at(output_index).shape().dim(_ctx.at(output_index).shape().rank() - 2); - const auto input_size = - _ctx.at(weight_index).shape().dim(_ctx.at(weight_index).shape().rank() - 1); - - // Check for reshaping input's shape into rank-2 - bool needs_reshape = false; - ir::Shape reshape(2); - if (input_rank == 3 || input_rank == 4) - { - const auto &ifm_shape = _ctx.at(input_index).shape(); - auto feature_size = 1; - for (int i = 0; i < ifm_shape.rank(); ++i) - { - feature_size *= ifm_shape.dim(i); - } - - UNUSED_RELEASE(feature_size); - assert(feature_size == batch_size * input_size); - - // for reshaping - needs_reshape = true; - reshape.dim(0) = batch_size; /* H */ - reshape.dim(1) = input_size; /* W */ - } - + auto output_tensor = _tensor_builder->at(output_index).get(); const auto activation = node.param().activation; - auto output_alloc = _tensor_builder->at(output_index).get(); - const auto input_alloc = _tensor_builder->at(input_index).get(); - const auto weight_alloc = _tensor_builder->at(weight_index).get(); - const auto bias_alloc = _tensor_builder->at(bias_index).get(); - const auto frontend_layout = _current_op_seq_layout; - const auto acl_layout = output_alloc->handle()->info()->data_layout(); - - auto fn = std::make_unique( - _tensor_builder->acl_tensor_manager()->internal_buffer_manager()); - - arm_compute::NEFullyConnectedReshapingLayer::KernelType kernel_type = - arm_compute::NEFullyConnectedReshapingLayer::KernelType::GENERAL; - if (_ctx.at(weight_index).isConstant()) - { - kernel_type = arm_compute::NEFullyConnectedReshapingLayer::KernelType::PREPROCESSED_WEIGHTS; - assert(_ctx.at(weight_index).data()); - } - - fn->configure( - input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(), output_alloc->handle(), - needs_reshape, - ::onert::backend::acl_common::asTensorShape( - reshape, frontend_layout, ::onert::backend::acl_common::asRuntimeLayout(acl_layout)), - kernel_type); - + auto fn = acl_common::kernelGenFullyConnected( + node, _ctx, _tensor_builder, _current_op_seq_layout); _return_fn = std::make_unique( - asAclFunction(std::move(fn)), - ActivationBuilder::generate(activation, output_alloc->handle())); + std::move(fn), ActivationBuilder::generate(activation, output_tensor->handle())); } void KernelGenerator::visit(const ir::operation::HashtableLookup &node) @@ -531,17 +423,17 @@ void KernelGenerator::visit(const ir::operation::HashtableLookup &node) const auto keys_index{node.getInputs().at(ir::operation::HashtableLookup::Input::KEYS)}; const auto values_index{node.getInputs().at(ir::operation::HashtableLookup::Input::VALUES)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto hits_alloc = _tensor_builder->at(hits_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto hits_tensor = _tensor_builder->at(hits_index).get(); - auto lookups_alloc = _tensor_builder->at(lookups_index).get(); - auto keys_alloc = _tensor_builder->at(keys_index).get(); - auto values_alloc = _tensor_builder->at(values_index).get(); + auto lookups_tensor = _tensor_builder->at(lookups_index).get(); + auto keys_tensor = _tensor_builder->at(keys_index).get(); + auto values_tensor = _tensor_builder->at(values_index).get(); auto fn = std::make_unique<::arm_compute::NEHashtableLookup>(); - fn->configure(lookups_alloc->handle(), keys_alloc->handle(), values_alloc->handle(), - output_alloc->handle(), hits_alloc->handle()); + fn->configure(lookups_tensor->handle(), keys_tensor->handle(), values_tensor->handle(), + output_tensor->handle(), hits_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -561,10 +453,10 @@ void KernelGenerator::visit(const ir::operation::Gather &node) // Converting in reverse order const int axis = ::onert::backend::acl_common::ToARMComputeAxis(ifm_rank, axis_value).value(); - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto indices_alloc = _tensor_builder->at(indices_index).get(); - const auto backend_layout = ofm_alloc->layout(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto indices_tensor = _tensor_builder->at(indices_index).get(); + const auto backend_layout = ofm_tensor->layout(); UNUSED_RELEASE(backend_layout); // NOTE The frontend layout and backend layout must be the same for this operation. @@ -575,35 +467,35 @@ void KernelGenerator::visit(const ir::operation::Gather &node) // a model. For example, if a model in NHWC has this operation as output rank == 4, indices // rank == 2 and axis == 2, this operation should work as the axis W and C, but the axis W // and C are not sequential in NCHW. So the backend in NCHW cannot handle this case. - assert(backend_layout == ifm_alloc->layout()); - assert(backend_layout == indices_alloc->layout()); + assert(backend_layout == ifm_tensor->layout()); + assert(backend_layout == indices_tensor->layout()); assert(ifm_rank < 4 || _current_op_seq_layout == backend_layout); auto fn = std::make_unique<::arm_compute::NEGatherEx>(); // input is n-D, indices k-D, output is (n + k - 1)-D size_t n = ifm_rank; - assert(n == ifm_alloc->num_dimensions()); + assert(n == ifm_tensor->num_dimensions()); size_t k = _ctx.at(indices_index).shape().rank(); - assert(k == indices_alloc->num_dimensions()); + assert(k == indices_tensor->num_dimensions()); // Disable applied dim_correction - if (n != ifm_alloc->info()->num_dimensions()) + if (n != ifm_tensor->info()->num_dimensions()) { // This means that high dimension's value is 1 and ifm tensor is applied dim_correction const auto ifm = _ctx.at(ifm_index); - ifm_alloc->info()->set_tensor_shape( + ifm_tensor->info()->set_tensor_shape( acl_common::asTensorShape(ifm.shape(), _current_op_seq_layout, backend_layout, false)); } - if (k != indices_alloc->info()->num_dimensions()) + if (k != indices_tensor->info()->num_dimensions()) { // This means that high dimension's value is 1 and indices tensor is applied dim_correction const auto indices = _ctx.at(indices_index); - indices_alloc->info()->set_tensor_shape( + indices_tensor->info()->set_tensor_shape( acl_common::asTensorShape(indices.shape(), _current_op_seq_layout, backend_layout, false)); } - fn->configure(ifm_alloc->handle(), indices_alloc->handle(), ofm_alloc->handle(), axis); + fn->configure(ifm_tensor->handle(), indices_tensor->handle(), ofm_tensor->handle(), axis); // acl_neon doesn't not revert disabling applied dim_correction because acl_neon's kernels would // use arm_compute::TensorInfo::offset_element_in_bytes() @@ -621,20 +513,20 @@ void KernelGenerator::visit(const ir::operation::InstanceNorm &node) const auto gamma_index{node.getInputs().at(ir::operation::InstanceNorm::Input::GAMMA)}; const auto beta_index{node.getInputs().at(ir::operation::InstanceNorm::Input::BETA)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto gamma_alloc = _tensor_builder->at(gamma_index).get(); - auto beta_alloc = _tensor_builder->at(beta_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto gamma_tensor = _tensor_builder->at(gamma_index).get(); + auto beta_tensor = _tensor_builder->at(beta_index).get(); auto epsilon = node.param().epsilon; auto activation = node.param().activation; auto fn = std::make_unique<::arm_compute::NEInstanceNormalizationLayerEx>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), gamma_alloc->handle(), - beta_alloc->handle(), epsilon); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), gamma_tensor->handle(), + beta_tensor->handle(), epsilon); _return_fn = std::make_unique( - asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::L2Normalization &node) @@ -656,15 +548,15 @@ void KernelGenerator::visit(const ir::operation::L2Normalization &node) float beta = 0.5f; // pow(reduction, -0.5) = 1 / sqrt(reduction) float bias = 0.0f; // Don't offset the reduction. - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); const auto norm_info = ::arm_compute::NormalizationLayerInfo(::arm_compute::NormType::CROSS_MAP, radius, alpha, beta, bias, false); auto fn = std::make_unique<::arm_compute::NENormalizationLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), norm_info); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), norm_info); auto acl_fn = asAclFunction(std::move(fn)); @@ -673,32 +565,15 @@ void KernelGenerator::visit(const ir::operation::L2Normalization &node) void KernelGenerator::visit(const ir::operation::L2Pool2D &node) { - const auto ofm_index{node.getOutputs().at(0)}; - const auto ifm_index{node.getInputs().at(ir::operation::L2Pool2D::Input::INPUT)}; - - const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_op_seq_layout); - const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_op_seq_layout); + auto raw_fn = acl_common::kernelGenPool2D<::arm_compute::NEPoolingLayer>( + node, _ctx, _tensor_builder, _current_op_seq_layout, ::arm_compute::PoolingType::L2); - uint32_t kw = node.param().kw; - uint32_t kh = node.param().kh; - const auto stride = node.param().stride; - const auto padding = - ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride, kw, kh); + const auto ofm_index{node.getOutputs().at(0)}; + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); const auto activation = node.param().activation; - - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - - ::arm_compute::PoolingLayerInfo info{ - ::arm_compute::PoolingType::L2, ::arm_compute::Size2D{kw, kh}, - ::onert::backend::acl_common::asPadStrideInfo(padding, stride)}; - - auto fn = std::make_unique<::arm_compute::NEPoolingLayer>(); - - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), info); - _return_fn = std::make_unique( - asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclFunction(std::move(raw_fn)), + ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::LocalResponseNormalization &node) @@ -712,15 +587,15 @@ void KernelGenerator::visit(const ir::operation::LocalResponseNormalization &nod auto beta = node.param().beta; auto bias = node.param().bias; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); const auto norm_info = ::arm_compute::NormalizationLayerInfo( ::arm_compute::NormType::CROSS_MAP, radius * 2 + 1, alpha, beta, bias, false); auto fn = std::make_unique<::arm_compute::NENormalizationLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), norm_info); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), norm_info); auto acl_fn = asAclFunction(std::move(fn)); @@ -733,13 +608,13 @@ void KernelGenerator::visit(const ir::operation::LogicalAnd &node) const auto input0_index{node.getInputs().at(ir::operation::LogicalAnd::Input::INPUT0)}; const auto input1_index{node.getInputs().at(ir::operation::LogicalAnd::Input::INPUT1)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input0_alloc = _tensor_builder->at(input0_index).get(); - auto input1_alloc = _tensor_builder->at(input1_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input0_tensor = _tensor_builder->at(input0_index).get(); + auto input1_tensor = _tensor_builder->at(input1_index).get(); auto fn = std::make_unique<::arm_compute::NELogicalAnd>(); - fn->configure(input0_alloc->handle(), input1_alloc->handle(), output_alloc->handle()); + fn->configure(input0_tensor->handle(), input1_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -751,12 +626,12 @@ void KernelGenerator::visit(const ir::operation::LogicalNot &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::LogicalNot::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique<::arm_compute::NEBitwiseNot>(); - fn->configure(input_alloc->handle(), output_alloc->handle()); + fn->configure(input_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -769,13 +644,13 @@ void KernelGenerator::visit(const ir::operation::LogicalOr &node) const auto input0_index{node.getInputs().at(ir::operation::LogicalOr::Input::INPUT0)}; const auto input1_index{node.getInputs().at(ir::operation::LogicalOr::Input::INPUT1)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input0_alloc = _tensor_builder->at(input0_index).get(); - auto input1_alloc = _tensor_builder->at(input1_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input0_tensor = _tensor_builder->at(input0_index).get(); + auto input1_tensor = _tensor_builder->at(input1_index).get(); auto fn = std::make_unique<::arm_compute::NELogicalOr>(); - fn->configure(input0_alloc->handle(), input1_alloc->handle(), output_alloc->handle()); + fn->configure(input0_tensor->handle(), input1_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -787,8 +662,8 @@ void KernelGenerator::visit(const ir::operation::Logistic &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Logistic::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC}; @@ -798,7 +673,7 @@ void KernelGenerator::visit(const ir::operation::Logistic &node) // instead of 'INF', and then the result of this op will be errors due to the 'NaN'. auto fn = std::make_unique<::arm_compute::NEActivationLayerEx>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), act_info); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), act_info); auto acl_fn = asAclFunction(std::move(fn)); @@ -807,159 +682,8 @@ void KernelGenerator::visit(const ir::operation::Logistic &node) void KernelGenerator::visit(const ir::operation::LSTM &node) { - // TODO Support dynamic rnn - // TODO Fix subtle error in the case of non-CIFG, non-peephole and No Projection. - const auto scratch_buffer_index{ - node.getOutputs().at(ir::operation::LSTM::Output::SCRATCH_BUFFER)}; - const auto output_state_out_index{ - node.getOutputs().at(ir::operation::LSTM::Output::OUTPUT_STATE_OUT)}; - const auto cell_state_out_index{ - node.getOutputs().at(ir::operation::LSTM::Output::CELL_STATE_OUT)}; - const auto output_index{node.getOutputs().at(ir::operation::LSTM::Output::OUTPUT)}; - - const auto input_index{node.getInputs().at(ir::operation::LSTM::Input::INPUT)}; - const auto input_to_input_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_INPUT_WEIGHTS)}; // optional - const auto input_to_forget_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_FORGET_WEIGHTS)}; - const auto input_to_cell_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_CELL_WEIGHTS)}; - const auto input_to_output_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::INPUT_TO_OUTPUT_WEIGHTS)}; - const auto recurrent_to_input_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_INPUT_WEIGHTS)}; // optional - const auto recurrent_to_forget_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_FORGET_WEIGHTS)}; - const auto recurrent_to_cell_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_CELL_WEIGHTS)}; - const auto recurrent_to_output_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::RECURRENT_TO_OUTPUT_WEIGHTS)}; - const auto cell_to_input_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_INPUT_WEIGHTS)}; // optional - const auto cell_to_forget_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_FORGET_WEIGHTS)}; // optional - const auto cell_to_output_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::CELL_TO_OUTPUT_WEIGHTS)}; // optional - const auto input_gate_bias_index{ - node.getInputs().at(ir::operation::LSTM::Input::INPUT_GATE_BIAS)}; - const auto forget_gate_bias_index{ - node.getInputs().at(ir::operation::LSTM::Input::FORGET_GATE_BIAS)}; - const auto cell_bias_index{node.getInputs().at(ir::operation::LSTM::Input::CELL_BIAS)}; - const auto output_gate_bias_index{ - node.getInputs().at(ir::operation::LSTM::Input::OUTPUT_GATE_BIAS)}; - const auto projection_weights_index{ - node.getInputs().at(ir::operation::LSTM::Input::PROJECTION_WEIGHTS)}; // optional - const auto projection_bias_index{ - node.getInputs().at(ir::operation::LSTM::Input::PROJECTION_BIAS)}; // optional - const auto output_state_in_index{ - node.getInputs().at(ir::operation::LSTM::Input::OUTPUT_STATE_IN)}; - const auto cell_state_in_index{node.getInputs().at(ir::operation::LSTM::Input::CELL_STATE_IN)}; - const auto cell_threshold = node.param().cell_threshold; - const auto projection_threshold = node.param().projection_threshold; - - bool has_input_to_input_weights = _ctx.at(input_to_input_weights_index).shape().dim(0) != 0 && - _ctx.at(input_to_input_weights_index).shape().dim(1) != 0; - bool has_recurrent_to_input_weights = - _ctx.at(recurrent_to_input_weights_index).shape().dim(0) != 0 && - _ctx.at(recurrent_to_input_weights_index).shape().dim(1) != 0; - bool has_cell_to_forget_weights = _ctx.at(cell_to_forget_weights_index).shape().dim(0) != 0; - bool has_cell_to_output_weights = _ctx.at(cell_to_output_weights_index).shape().dim(0) != 0; - bool has_projection_weights = _ctx.at(projection_weights_index).shape().dim(0) != 0 && - _ctx.at(projection_weights_index).shape().dim(1) != 0; - bool has_projection_bias = _ctx.at(projection_bias_index).shape().dim(0); - - // NOTE The input_to_input_weights and the recurrent_to_input_weights do not exist in CIFG. - // true: no CIFG - // false: CIFG - // NOTE The cell_to_input_weights does not exist in non-peephole although regular LSTM(non-CIFG). - bool has_cifg_param = has_input_to_input_weights && has_recurrent_to_input_weights; - - // NOTE The cell_to_forget_weights and the cell_to_output_weights exist in peephole. - // But the cell_to_input_weights does not exist in regular CIFG although peephole. - // true: peephole - // false: no peephole - bool has_peephole_param = has_cell_to_forget_weights && has_cell_to_output_weights; - - // NOTE Although the projection weights has data the projection bias may not have data. - bool has_projection_param = has_projection_weights; - - const auto activation = node.param().activation; - const auto cell_clip = cell_threshold; - const auto projection_clip = projection_threshold; - assert(cell_clip >= 0.f && projection_clip >= 0.f); - - auto scratch_buffer_alloc = _tensor_builder->at(scratch_buffer_index).get(); - auto output_state_out_alloc = _tensor_builder->at(output_state_out_index).get(); - auto cell_state_out_alloc = _tensor_builder->at(cell_state_out_index).get(); - auto output_alloc = _tensor_builder->at(output_index).get(); - - auto input_alloc = _tensor_builder->at(input_index).get(); - - auto input_to_forget_weights_alloc = _tensor_builder->at(input_to_forget_weights_index).get(); - auto input_to_cell_weights_alloc = _tensor_builder->at(input_to_cell_weights_index).get(); - auto input_to_output_weights_alloc = _tensor_builder->at(input_to_output_weights_index).get(); - auto recurrent_to_forget_weights_alloc = - _tensor_builder->at(recurrent_to_forget_weights_index).get(); - auto recurrent_to_cell_weights_alloc = _tensor_builder->at(recurrent_to_cell_weights_index).get(); - auto recurrent_to_output_weights_alloc = - _tensor_builder->at(recurrent_to_output_weights_index).get(); - - auto forget_gate_bias_alloc = _tensor_builder->at(forget_gate_bias_index).get(); - auto cell_bias_alloc = _tensor_builder->at(cell_bias_index).get(); - auto output_gate_bias_alloc = _tensor_builder->at(output_gate_bias_index).get(); - auto output_state_in_alloc = _tensor_builder->at(output_state_in_index).get(); - auto cell_state_in_alloc = _tensor_builder->at(cell_state_in_index).get(); - - auto act_info = ::onert::backend::acl_common::asActivationLayerInfo(activation); - - auto fn = std::make_unique<::arm_compute::NELSTMLayer>(); - - ::arm_compute::LSTMParams<::arm_compute::ITensor> lstm_params{}; - if (has_cifg_param) - { - auto input_to_input_weights_alloc = - _tensor_builder->at(input_to_input_weights_index).get(); // optional - auto recurrent_to_input_weights_alloc = - _tensor_builder->at(recurrent_to_input_weights_index).get(); // optional - auto cell_to_input_weights_handle = - has_peephole_param ? _tensor_builder->at(cell_to_input_weights_index).get()->handle() - : nullptr; // optional (non-cifg && peephole) - auto input_gate_bias_alloc = _tensor_builder->at(input_gate_bias_index).get(); // optional - lstm_params.set_cifg_params(input_to_input_weights_alloc->handle(), - recurrent_to_input_weights_alloc->handle(), - cell_to_input_weights_handle, input_gate_bias_alloc->handle()); - } - if (has_peephole_param) - { - auto cell_to_forget_weights_alloc = - _tensor_builder->at(cell_to_forget_weights_index).get(); // optional - auto cell_to_output_weights_alloc = - _tensor_builder->at(cell_to_output_weights_index).get(); // optional - lstm_params.set_peephole_params(cell_to_forget_weights_alloc->handle(), - cell_to_output_weights_alloc->handle()); - } - if (has_projection_param) - { - auto projection_weights_alloc = _tensor_builder->at(projection_weights_index).get(); // optional - auto projection_bias_handle = has_projection_bias - ? _tensor_builder->at(projection_bias_index).get()->handle() - : nullptr; // optional - lstm_params.set_projection_params(projection_weights_alloc->handle(), projection_bias_handle); - } - - fn->configure( - input_alloc->handle(), input_to_forget_weights_alloc->handle(), - input_to_cell_weights_alloc->handle(), input_to_output_weights_alloc->handle(), - recurrent_to_forget_weights_alloc->handle(), recurrent_to_cell_weights_alloc->handle(), - recurrent_to_output_weights_alloc->handle(), forget_gate_bias_alloc->handle(), - cell_bias_alloc->handle(), output_gate_bias_alloc->handle(), output_state_in_alloc->handle(), - cell_state_in_alloc->handle(), scratch_buffer_alloc->handle(), - output_state_out_alloc->handle(), cell_state_out_alloc->handle(), output_alloc->handle(), - lstm_params, act_info, cell_clip, projection_clip); - - auto acl_fn = asAclFunction(std::move(fn)); - - _return_fn = std::move(acl_fn); + _return_fn = acl_common::kernelGenLSTM(node, _ctx, _tensor_builder); } void KernelGenerator::visit(const ir::operation::Mul &node) @@ -970,18 +694,18 @@ void KernelGenerator::visit(const ir::operation::Mul &node) const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::NEPixelWiseMultiplication>(); // RoundingPolicy for scale:1.0 is only allowed RoundingPolicy::TO_ZERO - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle(), 1.0, // scale + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(), 1.0, // scale arm_compute::ConvertPolicy::SATURATE, arm_compute::RoundingPolicy::TO_ZERO); _return_fn = std::make_unique( - asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::Neg &node) @@ -989,12 +713,12 @@ void KernelGenerator::visit(const ir::operation::Neg &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Neg::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); auto fn = std::make_unique<::arm_compute::NENegLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle()); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -1030,12 +754,12 @@ void KernelGenerator::visit(const ir::operation::Pack &node) for (const auto &input_index : input_indexes) { size_t input_rank = _ctx.at(input_index).shape().rank(); - const auto &input_alloc = _tensor_builder->at(input_index); - assert(input_rank == input_alloc->num_dimensions()); - if (input_rank != input_alloc->info()->num_dimensions()) + const auto &input_tensor = _tensor_builder->at(input_index); + assert(input_rank == input_tensor->num_dimensions()); + if (input_rank != input_tensor->info()->num_dimensions()) { // This means that high dimension's value is 1 and ifm tensor is applied dim_correction - input_alloc->info()->set_tensor_shape(acl_common::asTensorShape( + input_tensor->info()->set_tensor_shape(acl_common::asTensorShape( _ctx.at(input_index).shape(), _current_op_seq_layout, backend_layout, false)); } } @@ -1094,8 +818,8 @@ void KernelGenerator::visit(const ir::operation::Permute &node) const auto ofm_idx{node.getOutputs().at(0)}; const auto ifm_idx{node.getInputs().at(0)}; const auto permute_type = node.getPermuteType(); - auto ofm_alloc = _tensor_builder->at(ofm_idx).get(); - auto ifm_alloc = _tensor_builder->at(ifm_idx).get(); + auto ofm_tensor = _tensor_builder->at(ofm_idx).get(); + auto ifm_tensor = _tensor_builder->at(ifm_idx).get(); const auto rank = _ctx.at(ofm_idx).shape().rank(); assert(_ctx.at(ifm_idx).shape().rank() == _ctx.at(ofm_idx).shape().rank()); @@ -1108,7 +832,7 @@ void KernelGenerator::visit(const ir::operation::Permute &node) auto l = std::make_unique<::arm_compute::NEPermute>(); - l->configure(ifm_alloc->handle(), ofm_alloc->handle(), pv); + l->configure(ifm_tensor->handle(), ofm_tensor->handle(), pv); fn = std::move(l); } @@ -1119,7 +843,7 @@ void KernelGenerator::visit(const ir::operation::Permute &node) auto l = std::make_unique<::arm_compute::NEPermute>(); - l->configure(ifm_alloc->handle(), ofm_alloc->handle(), pv); + l->configure(ifm_tensor->handle(), ofm_tensor->handle(), pv); fn = std::move(l); } @@ -1127,7 +851,7 @@ void KernelGenerator::visit(const ir::operation::Permute &node) { auto l = std::make_unique<::arm_compute::NECopy>(); - l->configure(ifm_alloc->handle(), ofm_alloc->handle()); + l->configure(ifm_tensor->handle(), ofm_tensor->handle()); fn = std::move(l); } @@ -1143,15 +867,15 @@ void KernelGenerator::visit(const ir::operation::PReLU &node) const auto ifm_index{node.getInputs().at(ir::operation::PReLU::Input::INPUT)}; const auto alpha_index{node.getInputs().at(ir::operation::PReLU::Input::ALPHA)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto alpha_alloc = _tensor_builder->at(alpha_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto alpha_tensor = _tensor_builder->at(alpha_index).get(); std::unique_ptr<::arm_compute::IFunction> fn; - auto l = std::make_unique<::arm_compute::NEPReLU>(); + auto l = std::make_unique<::arm_compute::NEPReluLayer>(); - l->configure(ifm_alloc->handle(), alpha_alloc->handle(), ofm_alloc->handle()); + l->configure(ifm_tensor->handle(), alpha_tensor->handle(), ofm_tensor->handle()); fn = std::move(l); @@ -1166,14 +890,14 @@ void KernelGenerator::visit(const ir::operation::Reduce &node) const auto input_index{node.getInputs().at(ir::operation::Reduce::Input::INPUT)}; const auto axes_index{node.getInputs().at(ir::operation::Reduce::Input::AXES)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); // Convert to ACL axes taking into account negative values and possible duplicates. const auto &axes = _ctx.at(axes_index); const auto input_rank = _ctx.at(input_index).shape().rank(); const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = input_alloc->layout(); + const auto backend_layout = input_tensor->layout(); const auto reduce_axes = acl_common::asCoordinates(axes, input_rank, frontend_layout, backend_layout); const auto reduce_type = node.param().reduce_type; @@ -1182,11 +906,9 @@ void KernelGenerator::visit(const ir::operation::Reduce &node) std::unique_ptr<::arm_compute::IFunction> fn; if (reduce_type == ir::operation::Reduce::ReduceType::MEAN) { - // NOTE NEReduceMean has a bug that does not support NHWC layout - // NEReduceMean intermediate tensors are always NCHW layout - auto l = std::make_unique<::arm_compute::NEReduceMeanEx>(); + auto l = std::make_unique<::arm_compute::NEReduceMean>(); - l->configure(input_alloc->handle(), reduce_axes, keep_dims, output_alloc->handle()); + l->configure(input_tensor->handle(), reduce_axes, keep_dims, output_tensor->handle()); fn = std::move(l); } @@ -1194,7 +916,7 @@ void KernelGenerator::visit(const ir::operation::Reduce &node) { auto l = std::make_unique<::arm_compute::NEReduceSum>(); - l->configure(input_alloc->handle(), reduce_axes, keep_dims, output_alloc->handle()); + l->configure(input_tensor->handle(), reduce_axes, keep_dims, output_tensor->handle()); fn = std::move(l); } @@ -1202,7 +924,7 @@ void KernelGenerator::visit(const ir::operation::Reduce &node) { auto l = std::make_unique<::arm_compute::NEReduceOperation>(); - l->configure(input_alloc->handle(), reduce_axes, keep_dims, output_alloc->handle(), + l->configure(input_tensor->handle(), reduce_axes, keep_dims, output_tensor->handle(), acl_common::convertReduceType(reduce_type)); fn = std::move(l); @@ -1218,15 +940,15 @@ void KernelGenerator::visit(const ir::operation::ReLU &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::ReLU::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU}; - fn->configure(input_alloc->handle(), output_alloc->handle(), act_info); + fn->configure(input_tensor->handle(), output_tensor->handle(), act_info); auto acl_fn = asAclFunction(std::move(fn)); @@ -1238,15 +960,15 @@ void KernelGenerator::visit(const ir::operation::ReLU1 &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::ReLU1::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f}; auto fn = std::make_unique<::arm_compute::NEActivationLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), act_info); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), act_info); auto acl_fn = asAclFunction(std::move(fn)); @@ -1258,15 +980,15 @@ void KernelGenerator::visit(const ir::operation::ReLU6 &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::ReLU6::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0f}; auto fn = std::make_unique<::arm_compute::NEActivationLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), act_info); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), act_info); auto acl_fn = asAclFunction(std::move(fn)); @@ -1278,13 +1000,13 @@ void KernelGenerator::visit(const ir::operation::Reshape &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Reshape::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); // NOTE This operation must not be changed the layout from frontend to backend // So, PermutationOperationPass makes layouts of frontend and backend the same. const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = output_alloc->layout(); + const auto backend_layout = output_tensor->layout(); assert((_ctx.at(input_index).shape().rank() < 4 && _ctx.at(output_index).shape().rank() < 4) || frontend_layout == backend_layout); UNUSED_RELEASE(frontend_layout); @@ -1292,7 +1014,7 @@ void KernelGenerator::visit(const ir::operation::Reshape &node) auto fn = std::make_unique(); - fn->configure(input_alloc->handle(), output_alloc->handle()); + fn->configure(input_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -1305,12 +1027,12 @@ void KernelGenerator::visit(const ir::operation::ResizeBilinear &node) const auto ifm_index{node.getInputs().at(ir::operation::ResizeBilinear::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); auto fn = std::make_unique<::arm_compute::NEScale>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), ::arm_compute::InterpolationPolicy::BILINEAR, ::arm_compute::BorderMode::REPLICATE, ::arm_compute::PixelValue(0.f), ::arm_compute::SamplingPolicy::TOP_LEFT); @@ -1334,25 +1056,25 @@ void KernelGenerator::visit(const ir::operation::RNN &node) const auto activation = node.param().activation; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto hidden_state_out_alloc = _tensor_builder->at(hidden_state_out_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto hidden_state_out_tensor = _tensor_builder->at(hidden_state_out_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); - auto weights_alloc = _tensor_builder->at(weights_index).get(); - auto recurrent_weights_alloc = _tensor_builder->at(recurrent_weights_index).get(); - auto bias_alloc = _tensor_builder->at(bias_index).get(); - auto hidden_state_in_alloc = _tensor_builder->at(hidden_state_in_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); + auto weights_tensor = _tensor_builder->at(weights_index).get(); + auto recurrent_weights_tensor = _tensor_builder->at(recurrent_weights_index).get(); + auto bias_tensor = _tensor_builder->at(bias_index).get(); + auto hidden_state_in_tensor = _tensor_builder->at(hidden_state_in_index).get(); auto act_info = ::onert::backend::acl_common::asActivationLayerInfo(activation); auto copy_layer = std::make_unique<::arm_compute::NECopy>(); - copy_layer->configure(hidden_state_in_alloc->handle(), hidden_state_out_alloc->handle()); + copy_layer->configure(hidden_state_in_tensor->handle(), hidden_state_out_tensor->handle()); _return_fn = asAclFunction(std::move(copy_layer)); - auto fn = std::make_unique<::arm_compute::NERNNLayerEx>( + auto fn = std::make_unique<::arm_compute::NERNNLayer>( _tensor_builder->acl_tensor_manager()->internal_buffer_manager()); - fn->configure(input_alloc->handle(), weights_alloc->handle(), recurrent_weights_alloc->handle(), - bias_alloc->handle(), hidden_state_out_alloc->handle(), output_alloc->handle(), - act_info); + fn->configure(input_tensor->handle(), weights_tensor->handle(), + recurrent_weights_tensor->handle(), bias_tensor->handle(), + hidden_state_out_tensor->handle(), output_tensor->handle(), act_info); _return_fn = asAclFunction(std::move(fn)); } @@ -1361,12 +1083,12 @@ void KernelGenerator::visit(const ir::operation::RSQRT &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::RSQRT::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); auto fn = std::make_unique<::arm_compute::NERsqrtLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle()); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle()); _return_fn = asAclFunction(std::move(fn)); } @@ -1383,10 +1105,10 @@ void KernelGenerator::visit(const ir::operation::Squeeze &node) (void)dims; (void)ndim; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc->handle(), output_alloc->handle()); + fn->configure(input_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); _return_fn = std::move(acl_fn); } @@ -1396,15 +1118,15 @@ void KernelGenerator::visit(const ir::operation::Tanh &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Tanh::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f}; - fn->configure(input_alloc->handle(), output_alloc->handle(), act_info); + fn->configure(input_tensor->handle(), output_tensor->handle(), act_info); auto acl_fn = asAclFunction(std::move(fn)); @@ -1417,13 +1139,25 @@ void KernelGenerator::visit(const ir::operation::Softmax &node) const auto input_index{node.getInputs().at(ir::operation::Softmax::Input::INPUT)}; const auto beta = node.param().beta; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); + const auto frontend_layout = _current_op_seq_layout; + const auto backend_layout = input_tensor->layout(); + + // Disable applied dim_correction + const size_t input_rank = _ctx.at(input_index).shape().rank(); + if (input_rank != input_tensor->info()->num_dimensions()) + { + // This means that high dimension's value is 1 and input tensor is applied dim_correction + const auto input = _ctx.at(input_index); + input_tensor->info()->set_tensor_shape( + acl_common::asTensorShape(input.shape(), frontend_layout, backend_layout, false)); + } auto fn = std::make_unique<::arm_compute::NESoftmaxLayer>( _tensor_builder->acl_tensor_manager()->internal_buffer_manager()); - fn->configure(input_alloc->handle(), output_alloc->handle(), beta); + fn->configure(input_tensor->handle(), output_tensor->handle(), beta); auto acl_fn = asAclFunction(std::move(fn)); @@ -1438,20 +1172,18 @@ void KernelGenerator::visit(const ir::operation::SpaceToBatchND &node) node.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)}; const auto paddings_index{node.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto block_size_alloc = _tensor_builder->at(block_size_index).get(); - auto paddings_alloc = _tensor_builder->at(paddings_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto block_size_tensor = _tensor_builder->at(block_size_index).get(); + auto paddings_tensor = _tensor_builder->at(paddings_index).get(); assert(_ctx.at(block_size_index).data()); assert(_ctx.at(paddings_index).data()); - // NESpaceToBatchLayer has a bug that padding's values are 0 even when zero point of QASYMM8 is - // not 0. - auto fn = std::make_unique<::arm_compute::NESpaceToBatchLayerEx>(); + auto fn = std::make_unique<::arm_compute::NESpaceToBatchLayer>(); - fn->configure(ifm_alloc->handle(), block_size_alloc->handle(), paddings_alloc->handle(), - ofm_alloc->handle()); + fn->configure(ifm_tensor->handle(), block_size_tensor->handle(), paddings_tensor->handle(), + ofm_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -1465,12 +1197,12 @@ void KernelGenerator::visit(const ir::operation::SpaceToDepth &node) auto block_size = node.param().block_size; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); - auto fn = std::make_unique<::arm_compute::NESpaceToDepthLayerEx>(); + auto fn = std::make_unique<::arm_compute::NESpaceToDepthLayer>(); - fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), block_size); + fn->configure(ifm_tensor->handle(), ofm_tensor->handle(), block_size); auto acl_fn = asAclFunction(std::move(fn)); @@ -1489,13 +1221,13 @@ void KernelGenerator::visit(const ir::operation::Split &node) for (const auto &output : node.getOutputs()) output_indexes.emplace_back(output); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - std::vector output_allocs; + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + std::vector output_tensors; for (const auto &ofm_ind : output_indexes) - output_allocs.emplace_back(_tensor_builder->at(ofm_ind).get()->handle()); + output_tensors.emplace_back(_tensor_builder->at(ofm_ind).get()->handle()); const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = ifm_alloc->layout(); + const auto backend_layout = ifm_tensor->layout(); auto axis = node.param().axis; if (axis < 0) axis += ifm_rank; @@ -1503,7 +1235,7 @@ void KernelGenerator::visit(const ir::operation::Split &node) auto fn = std::make_unique<::arm_compute::NESplit>(); - fn->configure(ifm_alloc->handle(), output_allocs, axis); + fn->configure(ifm_tensor->handle(), output_tensors, axis); _return_fn = asAclFunction(std::move(fn)); } @@ -1513,15 +1245,15 @@ void KernelGenerator::visit(const ir::operation::SQRT &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::SQRT::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::SQRT}; auto fn = std::make_unique<::arm_compute::NEActivationLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle(), act_info); + fn->configure(input_tensor->handle(), output_tensor->handle(), act_info); auto acl_fn = asAclFunction(std::move(fn)); @@ -1534,13 +1266,13 @@ void KernelGenerator::visit(const ir::operation::SquaredDifference &node) const auto lhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::LHS)}; const auto rhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::RHS)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::NEElementwiseSquaredDiff>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle()); + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -1555,17 +1287,17 @@ void KernelGenerator::visit(const ir::operation::Sub &node) const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::NEArithmeticSubtraction>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle(), + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(), arm_compute::ConvertPolicy::SATURATE); _return_fn = std::make_unique( - asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::Slice &node) @@ -1575,10 +1307,10 @@ void KernelGenerator::visit(const ir::operation::Slice &node) const auto begins_index{node.getInputs().at(ir::operation::Slice::Input::BEGINS)}; const auto sizes_index{node.getInputs().at(ir::operation::Slice::Input::SIZES)}; - auto outputData_alloc = _tensor_builder->at(output_index).get(); - auto inputData_alloc = _tensor_builder->at(input_index).get(); + auto outputData_tensor = _tensor_builder->at(output_index).get(); + auto inputData_tensor = _tensor_builder->at(input_index).get(); const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = inputData_alloc->layout(); + const auto backend_layout = inputData_tensor->layout(); // Set initializers for indices data such as order of inputData int input_rank = _ctx.at(input_index).shape().rank(); @@ -1628,7 +1360,7 @@ void KernelGenerator::visit(const ir::operation::Slice &node) auto fn = std::make_unique<::arm_compute::NESlice>(); - fn->configure(inputData_alloc->handle(), outputData_alloc->handle(), starts_set, ends_set); + fn->configure(inputData_tensor->handle(), outputData_tensor->handle(), starts_set, ends_set); auto acl_fn = asAclFunction(std::move(fn)); @@ -1643,10 +1375,10 @@ void KernelGenerator::visit(const ir::operation::StridedSlice &node) const auto ends_index{node.getInputs().at(ir::operation::StridedSlice::Input::ENDS)}; const auto strides_index{node.getInputs().at(ir::operation::StridedSlice::Input::STRIDES)}; - auto outputData_alloc = _tensor_builder->at(output_index).get(); - auto inputData_alloc = _tensor_builder->at(input_index).get(); + auto outputData_tensor = _tensor_builder->at(output_index).get(); + auto inputData_tensor = _tensor_builder->at(input_index).get(); const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = inputData_alloc->layout(); + const auto backend_layout = inputData_tensor->layout(); // Set initializers for indices data such as order of inputData int input_rank = _ctx.at(input_index).shape().rank(); @@ -1715,7 +1447,7 @@ void KernelGenerator::visit(const ir::operation::StridedSlice &node) auto fn = std::make_unique<::arm_compute::NEStridedSlice>(); - fn->configure(inputData_alloc->handle(), outputData_alloc->handle(), starts_set, ends_set, + fn->configure(inputData_tensor->handle(), outputData_tensor->handle(), starts_set, ends_set, strides_set, begin_mask, end_mask, shrink_axis_mask); auto acl_fn = asAclFunction(std::move(fn)); @@ -1749,16 +1481,16 @@ void KernelGenerator::visit(const ir::operation::TransposeConv &node) invalid_vertical = ofm_shape.H - (1 + (ifm_shape.H - 1) * stride.vertical) - (ker_shape.H - 1); } - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto ifm_alloc = _tensor_builder->at(ifm_index).get(); - auto ker_alloc = _tensor_builder->at(ker_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto ifm_tensor = _tensor_builder->at(ifm_index).get(); + auto ker_tensor = _tensor_builder->at(ker_index).get(); const auto tconv_info = acl_common::asPadStrideInfo(padding, stride); auto fn = std::make_unique<::arm_compute::NETransposeConvLayer>(); - fn->configure(ifm_alloc->handle(), ker_alloc->handle(), nullptr, ofm_alloc->handle(), tconv_info, - invalid_horizontal, invalid_vertical); + fn->configure(ifm_tensor->handle(), ker_tensor->handle(), nullptr, ofm_tensor->handle(), + tconv_info, invalid_horizontal, invalid_vertical); auto acl_fn = asAclFunction(std::move(fn)); @@ -1771,10 +1503,10 @@ void KernelGenerator::visit(const ir::operation::Transpose &node) const auto ifm_idx{node.getInputs().at(ir::operation::Transpose::Input::INPUT)}; const auto &perm{node.param().perm}; - auto ofm_alloc = _tensor_builder->at(ofm_idx).get(); - const auto ifm_alloc = _tensor_builder->at(ifm_idx).get(); + auto ofm_tensor = _tensor_builder->at(ofm_idx).get(); + const auto ifm_tensor = _tensor_builder->at(ifm_idx).get(); const auto frontend_layout = _current_op_seq_layout; - const auto backend_layout = ifm_alloc->layout(); + const auto backend_layout = ifm_tensor->layout(); const auto rank = _ctx.at(ifm_idx).shape().rank(); std::vector pv(perm.cbegin(), perm.cend()); @@ -1783,11 +1515,11 @@ void KernelGenerator::visit(const ir::operation::Transpose &node) std::unique_ptr<::arm_compute::IFunction> fn; - if (ifm_alloc->num_dimensions() <= 2 && ofm_alloc->num_dimensions() <= 2) + if (ifm_tensor->num_dimensions() <= 2 && ofm_tensor->num_dimensions() <= 2) { auto l = std::make_unique<::arm_compute::NETranspose>(); - l->configure(ifm_alloc->handle(), ofm_alloc->handle()); + l->configure(ifm_tensor->handle(), ofm_tensor->handle()); fn = std::move(l); } @@ -1795,7 +1527,7 @@ void KernelGenerator::visit(const ir::operation::Transpose &node) { auto l = std::make_unique<::arm_compute::NEPermute>(); - l->configure(ifm_alloc->handle(), ofm_alloc->handle(), backend_pv); + l->configure(ifm_tensor->handle(), ofm_tensor->handle(), backend_pv); fn = std::move(l); } @@ -1834,13 +1566,13 @@ void KernelGenerator::visit(const ir::operation::Unpack &node) for (const auto &output_index : output_indexes) { size_t output_rank = _ctx.at(output_index).shape().rank(); - const auto &output_alloc = _tensor_builder->at(output_index); - orig_outputs_acl_tensor_shapes.emplace_back(output_alloc->info()->tensor_shape()); - assert(output_rank == output_alloc->num_dimensions()); - if (output_rank != output_alloc->info()->num_dimensions()) + const auto &output_tensor = _tensor_builder->at(output_index); + orig_outputs_acl_tensor_shapes.emplace_back(output_tensor->info()->tensor_shape()); + assert(output_rank == output_tensor->num_dimensions()); + if (output_rank != output_tensor->info()->num_dimensions()) { // This means that high dimension's value is 1 and ifm tensor is applied dim_correction - output_alloc->info()->set_tensor_shape(acl_common::asTensorShape( + output_tensor->info()->set_tensor_shape(acl_common::asTensorShape( _ctx.at(output_index).shape(), _current_op_seq_layout, backend_layout, false)); } } @@ -1858,17 +1590,17 @@ void KernelGenerator::visit(const ir::operation::Add &node) const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::NEArithmeticAddition>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle(), + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle(), arm_compute::ConvertPolicy::SATURATE); _return_fn = std::make_unique( - asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::Div &node) @@ -1879,16 +1611,16 @@ void KernelGenerator::visit(const ir::operation::Div &node) const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::NEElementwiseDivision>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle()); + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle()); _return_fn = std::make_unique( - asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_alloc->handle())); + asAclFunction(std::move(fn)), ActivationBuilder::generate(activation, ofm_tensor->handle())); } void KernelGenerator::visit(const ir::operation::Exp &node) @@ -1896,12 +1628,12 @@ void KernelGenerator::visit(const ir::operation::Exp &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Exp::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique<::arm_compute::NEExpLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle()); + fn->configure(input_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -1913,12 +1645,12 @@ void KernelGenerator::visit(const ir::operation::ExpandDims &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::ExpandDims::Input::INPUT)}; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input_tensor = _tensor_builder->at(input_index).get(); auto fn = std::make_unique<::arm_compute::NEReshapeLayer>(); - fn->configure(input_alloc->handle(), output_alloc->handle()); + fn->configure(input_tensor->handle(), output_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -1933,13 +1665,13 @@ void KernelGenerator::visit(const ir::operation::Comparison &node) const auto comparison_type = node.param().comparison_type; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input0_alloc = _tensor_builder->at(input0_index).get(); - auto input1_alloc = _tensor_builder->at(input1_index).get(); + auto output_tensor = _tensor_builder->at(output_index).get(); + auto input0_tensor = _tensor_builder->at(input0_index).get(); + auto input1_tensor = _tensor_builder->at(input1_index).get(); auto fn = std::make_unique<::arm_compute::NEElementwiseComparison>(); - fn->configure(input0_alloc->handle(), input1_alloc->handle(), output_alloc->handle(), + fn->configure(input0_tensor->handle(), input1_tensor->handle(), output_tensor->handle(), (arm_compute::ComparisonOperation)comparison_type); auto acl_fn = asAclFunction(std::move(fn)); @@ -1953,13 +1685,13 @@ void KernelGenerator::visit(const ir::operation::Min &node) const auto lhs_index{node.getInputs().at(ir::operation::Min::Input::LHS)}; const auto rhs_index{node.getInputs().at(ir::operation::Min::Input::RHS)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::NEElementwiseMin>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle()); + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); @@ -1972,13 +1704,13 @@ void KernelGenerator::visit(const ir::operation::Max &node) const auto lhs_index{node.getInputs().at(ir::operation::Max::Input::LHS)}; const auto rhs_index{node.getInputs().at(ir::operation::Max::Input::RHS)}; - auto ofm_alloc = _tensor_builder->at(ofm_index).get(); - auto lhs_alloc = _tensor_builder->at(lhs_index).get(); - auto rhs_alloc = _tensor_builder->at(rhs_index).get(); + auto ofm_tensor = _tensor_builder->at(ofm_index).get(); + auto lhs_tensor = _tensor_builder->at(lhs_index).get(); + auto rhs_tensor = _tensor_builder->at(rhs_index).get(); auto fn = std::make_unique<::arm_compute::NEElementwiseMax>(); - fn->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle()); + fn->configure(lhs_tensor->handle(), rhs_tensor->handle(), ofm_tensor->handle()); auto acl_fn = asAclFunction(std::move(fn)); diff --git a/runtime/onert/backend/cpu/Backend.h b/runtime/onert/backend/cpu/Backend.h index 2daf06a..56bd352 100644 --- a/runtime/onert/backend/cpu/Backend.h +++ b/runtime/onert/backend/cpu/Backend.h @@ -17,6 +17,7 @@ #ifndef __ONERT_BACKEND_CPU_BACKEND_H__ #define __ONERT_BACKEND_CPU_BACKEND_H__ +#include "BackendContext.h" #include "Config.h" #include "ConstantInitializer.h" #include "KernelGenerator.h" @@ -39,9 +40,9 @@ public: std::shared_ptr config() const override { return _config; } - std::unique_ptr newContext(const ir::Graph &graph, - const std::shared_ptr &kb, - bool) const override + std::unique_ptr + newContext(const ir::Graph &graph, const std::shared_ptr &kb, + bool) const override { const auto &operands = graph.operands(); const auto &operations = graph.operations(); @@ -49,7 +50,8 @@ public: auto tb = std::make_shared(); context->tensor_builder = tb; context->constant_initializer = std::make_shared(operands, tb); - context->kernel_gen = std::make_shared(operands, operations, tb, kb); + context->kernel_gen = std::make_shared(operands, operations, tb, kb, + context->external_context()); context->tensor_register = nullptr; context->optimizer = nullptr; return context; diff --git a/runtime/onert/backend/cpu/BackendContext.h b/runtime/onert/backend/cpu/BackendContext.h new file mode 100644 index 0000000..f314a8e --- /dev/null +++ b/runtime/onert/backend/cpu/BackendContext.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_CPU_BACKEND_CONTEXT_H__ +#define __ONERT_BACKEND_CPU_BACKEND_CONTEXT_H__ + +#include +#include "ExternalContext.h" + +namespace onert +{ +namespace backend +{ +namespace cpu +{ + +class BackendContext : public onert::backend::BackendContext +{ +public: + BackendContext(const Backend *backend, const ir::Graph *graph, + std::shared_ptr tensor_builder = nullptr, + std::shared_ptr constant_initializer = nullptr, + std::shared_ptr kernel_gen = nullptr, + std::shared_ptr tensor_register = nullptr, + std::shared_ptr optimizer = nullptr) + : onert::backend::BackendContext(backend, graph, tensor_builder, constant_initializer, + kernel_gen, tensor_register, optimizer), + _external_context(new ExternalContext) + { + } + + std::shared_ptr external_context() { return _external_context; } + +private: + // NOTE ruy context has a thread pool, and when multiple ruy contexts are created, + // the thread pool is also created in duplicate + // TODO Create one ruy context for session + std::shared_ptr _external_context; +}; + +} // namespace cpu +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_CPU_BACKEND_CONTEXT_H__ diff --git a/runtime/onert/backend/cpu/CMakeLists.txt b/runtime/onert/backend/cpu/CMakeLists.txt index e997a22..01a3cd1 100644 --- a/runtime/onert/backend/cpu/CMakeLists.txt +++ b/runtime/onert/backend/cpu/CMakeLists.txt @@ -1,5 +1,7 @@ set(LIB_ONERT_BACKEND_CPU onert_backend_cpu) +nnfw_find_package(Ruy REQUIRED) + file(GLOB_RECURSE SOURCES "*.cc") add_library(${LIB_ONERT_BACKEND_CPU} SHARED ${SOURCES}) @@ -8,6 +10,8 @@ target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE nnfw_lib_cker) target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE onert_core) target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE nnfw_common) target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE nnfw_coverage) +target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE ruy) +target_link_libraries(${LIB_ONERT_BACKEND_CPU} INTERFACE ruy_instrumentation) set_target_properties(${LIB_ONERT_BACKEND_CPU} PROPERTIES OUTPUT_NAME backend_cpu) diff --git a/runtime/onert/backend/cpu/ConstantInitializer.cc b/runtime/onert/backend/cpu/ConstantInitializer.cc index 71e3136..deb27f0 100644 --- a/runtime/onert/backend/cpu/ConstantInitializer.cc +++ b/runtime/onert/backend/cpu/ConstantInitializer.cc @@ -15,6 +15,7 @@ */ #include "ConstantInitializer.h" +#include "Tensor.h" namespace onert { @@ -30,39 +31,61 @@ ConstantInitializer::ConstantInitializer(const ir::Operands &operands, // DO NOTHING } +void ConstantInitializer::registerDefaultInitializer(const ir::OperandIndex &index, + const ir::Operand &obj) +{ + registerExternalInitializer(index, obj); +} + +void ConstantInitializer::registerExternalInitializer(const ir::OperandIndex &index, + const ir::Operand &obj) +{ + // For only CONSTANTS + // TODO Add to check if tensor has been allocated + if (!obj.isConstant()) + return; + + _init_map[index] = [](const onert::ir::Operand &model_obj, onert::backend::ITensor &itensor) { + auto data = model_obj.shareData(); + assert(data && data->base()); + ExternalTensor &tensor = dynamic_cast(itensor); + tensor.setData(data); + }; +} + void ConstantInitializer::visit(const ir::operation::Conv2D &node) { const auto &kernel_index = node.getInputs().at(ir::operation::Conv2D::KERNEL); const auto &kernel_obj = _operands.at(kernel_index); - registerCopyInitializer(kernel_index, kernel_obj); + registerExternalInitializer(kernel_index, kernel_obj); const auto &bias_index = node.getInputs().at(ir::operation::Conv2D::BIAS); const auto &bias_obj = _operands.at(bias_index); - registerCopyInitializer(bias_index, bias_obj); + registerExternalInitializer(bias_index, bias_obj); } void ConstantInitializer::visit(const ir::operation::DepthwiseConv2D &node) { const auto &kernel_index = node.getInputs().at(ir::operation::DepthwiseConv2D::KERNEL); const auto &kernel_obj = _operands.at(kernel_index); - registerCopyInitializer(kernel_index, kernel_obj); + registerExternalInitializer(kernel_index, kernel_obj); const auto &bias_index = node.getInputs().at(ir::operation::DepthwiseConv2D::BIAS); const auto &bias_obj = _operands.at(bias_index); - registerCopyInitializer(bias_index, bias_obj); + registerExternalInitializer(bias_index, bias_obj); } void ConstantInitializer::visit(const ir::operation::FullyConnected &node) { const auto &weight_index = node.getInputs().at(ir::operation::FullyConnected::WEIGHT); const auto &weight_obj = _operands.at(weight_index); - registerCopyInitializer(weight_index, weight_obj); + registerExternalInitializer(weight_index, weight_obj); const auto &bias_index = node.getInputs().at(ir::operation::FullyConnected::BIAS); if (!bias_index.undefined()) { const auto &bias_obj = _operands.at(bias_index); - registerCopyInitializer(bias_index, bias_obj); + registerExternalInitializer(bias_index, bias_obj); } } diff --git a/runtime/onert/backend/cpu/ConstantInitializer.h b/runtime/onert/backend/cpu/ConstantInitializer.h index bd06c64..de03a69 100644 --- a/runtime/onert/backend/cpu/ConstantInitializer.h +++ b/runtime/onert/backend/cpu/ConstantInitializer.h @@ -36,6 +36,15 @@ public: const std::shared_ptr &tensor_builder); public: + void registerDefaultInitializer(const ir::OperandIndex &index, const ir::Operand &obj) override; + + // TODO: For now the only cpu backend supports constant tensor to use data from external + // If the other backend supports (to do this, + // ExternalTensor should be abstract such as IExternal, maybe), + // this can be an interface of IConstantInitializer + void registerExternalInitializer(const ir::OperandIndex &, const ir::Operand &); + +public: void visit(const ir::operation::Conv2D &) override; void visit(const ir::operation::DepthwiseConv2D &) override; void visit(const ir::operation::FullyConnected &) override; diff --git a/runtime/onert/backend/cpu/ExternalContext.h b/runtime/onert/backend/cpu/ExternalContext.h new file mode 100644 index 0000000..6627412 --- /dev/null +++ b/runtime/onert/backend/cpu/ExternalContext.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_CPU_EXTERNAL_CONTEXT_H__ +#define __ONERT_BACKEND_CPU_EXTERNAL_CONTEXT_H__ + +#include +#include +#include + +namespace +{ +const int kDefaultNumThreadpoolThreads = 1; +} + +namespace onert +{ +namespace backend +{ +namespace cpu +{ + +class ExternalContext : public IExternalContext +{ +public: + ExternalContext() : _ruy_context(new ruy::Context) + { + setMaxNumThreads(onert::util::getConfigInt(onert::util::config::RUY_THREADS)); +#ifdef USE_RUY_GEMV + _ruy_context->cache_policy = ruy::kCacheLHSOnNarrowMul; +#endif + } + + void setMaxNumThreads(int max_num_threads) + { + const int target_num_threads = + max_num_threads > -1 ? max_num_threads : kDefaultNumThreadpoolThreads; + _ruy_context->max_num_threads = target_num_threads; + } + + ruy::Context *ruy_context() const { return _ruy_context.get(); } + +private: + const std::unique_ptr _ruy_context; +}; + +} // namespace cpu +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_CPU_EXTERNAL_CONTEXT_H__ diff --git a/runtime/onert/backend/cpu/KernelGenerator.cc b/runtime/onert/backend/cpu/KernelGenerator.cc index 72f9606..7939fe8 100644 --- a/runtime/onert/backend/cpu/KernelGenerator.cc +++ b/runtime/onert/backend/cpu/KernelGenerator.cc @@ -20,6 +20,7 @@ #include "ops/AddLayer.h" #include "ops/ArgMinMaxLayer.h" #include "ops/AvgPoolLayer.h" +#include "ops/BatchToSpaceNDLayer.h" #include "ops/CastLayer.h" #include "ops/CompareLayer.h" #include "ops/ConcatLayer.h" @@ -49,7 +50,9 @@ #include "ops/RangeLayer.h" #include "ops/ReduceLayer.h" #include "ops/ReLULayer.h" +#include "ops/ReLU6Layer.h" #include "ops/ReshapeLayer.h" +#include "ops/ResizeBilinearLayer.h" #include "ops/ReverseLayer.h" #include "ops/RoundLayer.h" #include "ops/RsqrtLayer.h" @@ -60,7 +63,9 @@ #include "ops/SoftMaxLayer.h" #include "ops/StridedSliceLayer.h" #include "ops/SpaceToBatchNDLayer.h" +#include "ops/SpaceToDepthLayer.h" #include "ops/SplitLayer.h" +#include "ops/SplitVLayer.h" #include "ops/SubLayer.h" #include "ops/TanhLayer.h" #include "ops/TileLayer.h" @@ -70,11 +75,14 @@ #include "ops/ZerosLikeLayer.h" #include "ops/SquaredDiffLayer.h" #include "ops/LogicalOrLayer.h" +#include "ops/L2NormLayer.h" #include "ops/MatrixBandPartLayer.h" #include "ops/BatchMatMulLayer.h" #include "ops/BroadcastToLayer.h" #include "ops/FusedBatchNormLayer.h" #include "ops/LogSoftMaxLayer.h" +#include "ops/QuantizeLayer.h" +#include "ops/StatelessRandomUniformLayer.h" #include #include @@ -119,9 +127,11 @@ ops::ReduceType convertReduceType(ir::operation::Reduce::ReduceType reduce_type_ KernelGenerator::KernelGenerator( const ir::Operands &operands_ctx, const ir::Operations &operations_ctx, const std::shared_ptr &tensor_builder, - const std::shared_ptr &kernel_builder) + const std::shared_ptr &kernel_builder, + const std::shared_ptr &external_context) : _ctx(operands_ctx), _operations_ctx{operations_ctx}, _tensor_builder(tensor_builder), - _kernel_builder(kernel_builder), _current_op_seq_layout(ir::Layout::UNKNOWN) + _kernel_builder(kernel_builder), _current_op_seq_layout(ir::Layout::UNKNOWN), + _external_context(external_context) { // DO NOTHING } @@ -184,10 +194,10 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node) const auto ker_index{node.getInputs().at(Conv2D::Input::KERNEL)}; const auto bias_index{node.getInputs().at(Conv2D::Input::BIAS)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto ifm_alloc = _tensor_builder->portableAt(ifm_index).get(); - auto ker_alloc = _tensor_builder->portableAt(ker_index).get(); - auto bias_alloc = _tensor_builder->portableAt(bias_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto ifm_tensor = _tensor_builder->portableAt(ifm_index).get(); + auto ker_tensor = _tensor_builder->portableAt(ker_index).get(); + auto bias_tensor = _tensor_builder->portableAt(bias_index).get(); const auto stride = node.param().stride; const auto activation = node.param().activation; @@ -196,9 +206,9 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node) if (_ctx.at(ifm_index).info().isDynamic() || _ctx.at(ker_index).info().isDynamic()) { - fn->configure(ifm_alloc, ker_alloc, bias_alloc, param_padding.type, param_padding.param.left, + fn->configure(ifm_tensor, ker_tensor, bias_tensor, param_padding.type, param_padding.param.left, param_padding.param.right, param_padding.param.top, param_padding.param.bottom, - stride.horizontal, stride.vertical, activation, ofm_alloc); + stride.horizontal, stride.vertical, activation, ofm_tensor); _return_fn = std::move(fn); return; @@ -213,9 +223,9 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node) const auto padding = ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height); - fn->configure(ifm_alloc, ker_alloc, bias_alloc, param_padding.type, padding.left, padding.right, - padding.top, padding.bottom, stride.horizontal, stride.vertical, activation, - ofm_alloc); + fn->configure(ifm_tensor, ker_tensor, bias_tensor, param_padding.type, padding.left, + padding.right, padding.top, padding.bottom, stride.horizontal, stride.vertical, + activation, ofm_tensor); _return_fn = std::move(fn); } @@ -241,16 +251,16 @@ void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node) const auto multiplier = node.param().multiplier; const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto ifm_alloc = _tensor_builder->portableAt(ifm_index).get(); - auto ker_alloc = _tensor_builder->portableAt(ker_index).get(); - auto bias_alloc = _tensor_builder->portableAt(bias_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto ifm_tensor = _tensor_builder->portableAt(ifm_index).get(); + auto ker_tensor = _tensor_builder->portableAt(ker_index).get(); + auto bias_tensor = _tensor_builder->portableAt(bias_index).get(); auto fn = std::make_unique(); - fn->configure(ifm_alloc, ker_alloc, bias_alloc, padding.left, padding.right, padding.top, + fn->configure(ifm_tensor, ker_tensor, bias_tensor, padding.left, padding.right, padding.top, padding.bottom, stride.horizontal, stride.vertical, multiplier, activation, - ofm_alloc); + ofm_tensor); _return_fn = std::move(fn); } @@ -270,13 +280,13 @@ void KernelGenerator::visit(const ir::operation::MaxPool2D &node) ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride, kw, kh); const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto ifm_alloc = _tensor_builder->portableAt(ifm_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto ifm_tensor = _tensor_builder->portableAt(ifm_index).get(); auto fn = std::make_unique(); - fn->configure(ifm_alloc, padding.left, padding.right, padding.top, padding.bottom, - stride.horizontal, stride.vertical, kw, kh, activation, ofm_alloc); + fn->configure(ifm_tensor, padding.left, padding.right, padding.top, padding.bottom, + stride.horizontal, stride.vertical, kw, kh, activation, ofm_tensor); _return_fn = std::move(fn); } @@ -295,13 +305,13 @@ void KernelGenerator::visit(const ir::operation::AvgPool2D &node) ir::calculatePadding(node.param().padding, ifm_shape, ofm_shape, stride, kw, kh); const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto ifm_alloc = _tensor_builder->portableAt(ifm_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto ifm_tensor = _tensor_builder->portableAt(ifm_index).get(); auto fn = std::make_unique(); - fn->configure(ifm_alloc, padding.left, padding.right, padding.top, padding.bottom, - stride.horizontal, stride.vertical, kw, kh, activation, ofm_alloc); + fn->configure(ifm_tensor, padding.left, padding.right, padding.top, padding.bottom, + stride.horizontal, stride.vertical, kw, kh, activation, ofm_tensor); _return_fn = std::move(fn); } @@ -313,7 +323,7 @@ void KernelGenerator::visit(const ir::operation::Concat &node) const auto rank = _ctx.at(ofm_index).shape().rank(); const auto axis = ops::getAxis(rank, node.param().axis, _current_op_seq_layout); - auto output_alloc = _tensor_builder->portableAt(ofm_index).get(); + auto output_tensor = _tensor_builder->portableAt(ofm_index).get(); std::vector input_tensors; for (auto &ifm_idx : node.getInputs()) @@ -321,7 +331,33 @@ void KernelGenerator::visit(const ir::operation::Concat &node) auto fn = std::make_unique(); - fn->configure(input_tensors, axis, output_alloc); + fn->configure(input_tensors, axis, output_tensor); + + _return_fn = std::move(fn); +} + +void KernelGenerator::visit(const ir::operation::BatchToSpaceND &node) +{ + const auto output_index{node.getOutputs().at(0)}; + const auto input_index{node.getInputs().at(ir::operation::BatchToSpaceND::INPUT)}; + const auto block_size_index{node.getInputs().at(ir::operation::BatchToSpaceND::BLOCK_SIZE)}; + + auto output_alloc = _tensor_builder->portableAt(output_index).get(); + auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto block_size_alloc = _tensor_builder->portableAt(block_size_index).get(); + + auto fn = std::make_unique(); + + IPortableTensor *crops_alloc = nullptr; + const auto NNApiInputs = 2; + + if (node.getInputs().size() != NNApiInputs) + { + const auto crops_data_index{node.getInputs().at(ir::operation::BatchToSpaceND::CROPS_DATA)}; + crops_alloc = _tensor_builder->portableAt(crops_data_index).get(); + } + + fn->configure(input_alloc, output_alloc, block_size_alloc, crops_alloc); _return_fn = std::move(fn); } @@ -332,13 +368,13 @@ void KernelGenerator::visit(const ir::operation::Fill &node) const auto input_index{node.getInputs().at(ir::operation::Fill::Input::INPUT)}; const auto value_index{node.getInputs().at(ir::operation::Fill::Input::VALUE)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto value_alloc = _tensor_builder->portableAt(value_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto value_tensor = _tensor_builder->portableAt(value_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, value_alloc, output_alloc); + fn->configure(input_tensor, value_tensor, output_tensor); _return_fn = std::move(fn); } @@ -353,15 +389,16 @@ void KernelGenerator::visit(const ir::operation::FullyConnected &node) const auto bias_index{node.getInputs().at(FullyConnected::Input::BIAS)}; const auto activation = node.param().activation; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto weight_alloc = _tensor_builder->portableAt(weight_index).get(); - auto bias_alloc = + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto weight_tensor = _tensor_builder->portableAt(weight_index).get(); + auto bias_tensor = bias_index.undefined() ? nullptr : _tensor_builder->portableAt(bias_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, weight_alloc, bias_alloc, activation, output_alloc); + fn->configure(input_tensor, weight_tensor, bias_tensor, activation, output_tensor, + _external_context); _return_fn = std::move(fn); } @@ -371,21 +408,21 @@ void KernelGenerator::visit(const ir::operation::Reshape &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Reshape::Input::INPUT)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); // optional 2nd input - IPortableTensor *shape_alloc = nullptr; + IPortableTensor *shape_tensor = nullptr; if (node.getInputs().size() == 2) { const auto shape_index{node.getInputs().at(ir::operation::Reshape::Input::SHAPE)}; - shape_alloc = _tensor_builder->portableAt(shape_index).get(); + shape_tensor = _tensor_builder->portableAt(shape_index).get(); } auto fn = std::make_unique(); - fn->configure(input_alloc, shape_alloc, output_alloc); + fn->configure(input_tensor, shape_tensor, output_tensor); _return_fn = std::move(fn); } @@ -394,13 +431,13 @@ void KernelGenerator::visit(const ir::operation::Squeeze &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Squeeze::Input::INPUT)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); // Squeeze can share same kernel with reshape auto fn = std::make_unique(); - fn->configure(input_alloc, nullptr, output_alloc); + fn->configure(input_tensor, nullptr, output_tensor); _return_fn = std::move(fn); } @@ -412,12 +449,12 @@ void KernelGenerator::visit(const ir::operation::Softmax &node) const auto beta = node.param().beta; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, beta, output_alloc); + fn->configure(input_tensor, beta, output_tensor); _return_fn = std::move(fn); } @@ -430,13 +467,13 @@ void KernelGenerator::visit(const ir::operation::Add &node) const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto lhs_alloc = _tensor_builder->portableAt(lhs_index).get(); - auto rhs_alloc = _tensor_builder->portableAt(rhs_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto lhs_tensor = _tensor_builder->portableAt(lhs_index).get(); + auto rhs_tensor = _tensor_builder->portableAt(rhs_index).get(); auto fn = std::make_unique(); - fn->configure(lhs_alloc, rhs_alloc, activation, ofm_alloc); + fn->configure(lhs_tensor, rhs_tensor, activation, ofm_tensor); _return_fn = std::move(fn); } @@ -447,15 +484,15 @@ void KernelGenerator::visit(const ir::operation::Comparison &node) const auto lhs_index{node.getInputs().at(ir::operation::Comparison::Input::INPUT0)}; const auto rhs_index{node.getInputs().at(ir::operation::Comparison::Input::INPUT1)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto lhs_alloc = _tensor_builder->portableAt(lhs_index).get(); - auto rhs_alloc = _tensor_builder->portableAt(rhs_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto lhs_tensor = _tensor_builder->portableAt(lhs_index).get(); + auto rhs_tensor = _tensor_builder->portableAt(rhs_index).get(); auto comparison_type = node.param().comparison_type; auto fn = std::make_unique(); - fn->configure(lhs_alloc, rhs_alloc, comparison_type, ofm_alloc); + fn->configure(lhs_tensor, rhs_tensor, comparison_type, ofm_tensor); _return_fn = std::move(fn); } @@ -466,11 +503,11 @@ void KernelGenerator::visit(const ir::operation::Gather &node) const auto input_index{node.getInputs().at(ir::operation::Gather::Input::INPUT)}; const auto indices_index{node.getInputs().at(ir::operation::Gather::Input::INDICES)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto indices_alloc = _tensor_builder->portableAt(indices_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto indices_tensor = _tensor_builder->portableAt(indices_index).get(); - const auto backend_layout = output_alloc->layout(); + const auto backend_layout = output_tensor->layout(); UNUSED_RELEASE(backend_layout); // NOTE The frontend layout and backend layout must be the same for this operation. @@ -481,8 +518,8 @@ void KernelGenerator::visit(const ir::operation::Gather &node) // a model. For example, if a model in NHWC has this operation as output rank == 4, indices // rank == 2 and axis == 2, this operation should work as the axis W and C, but the axis W // and C are not sequential in NCHW. So the backend in NCHW cannot handle this case. - assert(backend_layout == input_alloc->layout()); - assert(backend_layout == indices_alloc->layout()); + assert(backend_layout == input_tensor->layout()); + assert(backend_layout == indices_tensor->layout()); const auto &input_shape = _ctx.at(input_index).shape(); UNUSED_RELEASE(input_shape); assert(input_shape.rank() < 4 || _current_op_seq_layout == backend_layout); @@ -492,7 +529,7 @@ void KernelGenerator::visit(const ir::operation::Gather &node) auto fn = std::make_unique(); - fn->configure(input_alloc, indices_alloc, output_alloc, axis_value); + fn->configure(input_tensor, indices_tensor, output_tensor, axis_value); _return_fn = std::move(fn); } @@ -506,13 +543,13 @@ void KernelGenerator::visit(const ir::operation::Sub &node) const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto lhs_alloc = _tensor_builder->portableAt(lhs_index).get(); - auto rhs_alloc = _tensor_builder->portableAt(rhs_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto lhs_tensor = _tensor_builder->portableAt(lhs_index).get(); + auto rhs_tensor = _tensor_builder->portableAt(rhs_index).get(); auto fn = std::make_unique(); - fn->configure(lhs_alloc, rhs_alloc, activation, ofm_alloc); + fn->configure(lhs_tensor, rhs_tensor, activation, ofm_tensor); _return_fn = std::move(fn); } @@ -526,13 +563,13 @@ void KernelGenerator::visit(const ir::operation::Mul &node) const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto lhs_alloc = _tensor_builder->portableAt(lhs_index).get(); - auto rhs_alloc = _tensor_builder->portableAt(rhs_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto lhs_tensor = _tensor_builder->portableAt(lhs_index).get(); + auto rhs_tensor = _tensor_builder->portableAt(rhs_index).get(); auto fn = std::make_unique(); - fn->configure(lhs_alloc, rhs_alloc, activation, ofm_alloc); + fn->configure(lhs_tensor, rhs_tensor, activation, ofm_tensor); _return_fn = std::move(fn); } @@ -547,18 +584,18 @@ void KernelGenerator::visit(const ir::operation::OneHot &node) const auto axis = node.param().axis; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto indices_alloc = _tensor_builder->portableAt(indices_index).get(); - auto depth_alloc = _tensor_builder->portableAt(depth_index).get(); - auto onvalue_alloc = _tensor_builder->portableAt(onvalue_index).get(); - auto offvalue_alloc = _tensor_builder->portableAt(offvalue_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto indices_tensor = _tensor_builder->portableAt(indices_index).get(); + auto depth_tensor = _tensor_builder->portableAt(depth_index).get(); + auto onvalue_tensor = _tensor_builder->portableAt(onvalue_index).get(); + auto offvalue_tensor = _tensor_builder->portableAt(offvalue_index).get(); - assert(indices_alloc->data_type() == OperandType::INT32); - assert(axis <= static_cast(indices_alloc->num_dimensions())); + assert(indices_tensor->data_type() == OperandType::INT32); + assert(axis <= static_cast(indices_tensor->num_dimensions())); auto fn = std::make_unique(); - fn->configure(indices_alloc, depth_alloc, onvalue_alloc, offvalue_alloc, output_alloc, axis); + fn->configure(indices_tensor, depth_tensor, onvalue_tensor, offvalue_tensor, output_tensor, axis); _return_fn = std::move(fn); } @@ -572,13 +609,13 @@ void KernelGenerator::visit(const ir::operation::Div &node) const auto activation = node.param().activation; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto lhs_alloc = _tensor_builder->portableAt(lhs_index).get(); - auto rhs_alloc = _tensor_builder->portableAt(rhs_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto lhs_tensor = _tensor_builder->portableAt(lhs_index).get(); + auto rhs_tensor = _tensor_builder->portableAt(rhs_index).get(); auto fn = std::make_unique(); - fn->configure(lhs_alloc, rhs_alloc, activation, ofm_alloc); + fn->configure(lhs_tensor, rhs_tensor, activation, ofm_tensor); _return_fn = std::move(fn); } @@ -587,16 +624,16 @@ void KernelGenerator::visit(const ir::operation::Einsum &node) { const auto ofm_index{node.getOutputs().at(0)}; - auto output_alloc = _tensor_builder->portableAt(ofm_index).get(); - std::vector input_allocs; + auto output_tensor = _tensor_builder->portableAt(ofm_index).get(); + std::vector input_tensors; for (auto &ifm_idx : node.getInputs()) - input_allocs.emplace_back(_tensor_builder->portableAt(ifm_idx).get()); + input_tensors.emplace_back(_tensor_builder->portableAt(ifm_idx).get()); const auto equation = node.param().equation; auto fn = std::make_unique(); - fn->configure(input_allocs, equation, output_alloc); + fn->configure(input_tensors, equation, output_tensor); _return_fn = std::move(fn); } @@ -605,14 +642,14 @@ void KernelGenerator::visit(const ir::operation::Custom &node) { auto fill_op_info = [&](const ir::OperandIndexSequence &opSeq, std::vector &types, - std::vector> &allocs) { + std::vector> &tensors) { for (auto &idx : opSeq) { const auto &operand = _ctx.at(idx); // TODO make sure using `_current_op_seq_layout` is correct for custom operations types.emplace_back(custom::TypeInfo{operand.shape(), operand.typeInfo().type()}); - auto in_alloc = _tensor_builder->portableAt(idx); - allocs.emplace_back(in_alloc); + auto in_tensor = _tensor_builder->portableAt(idx); + tensors.emplace_back(in_tensor); } }; @@ -634,12 +671,12 @@ void KernelGenerator::visit(const ir::operation::Exp &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Exp::Input::INPUT)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, output_alloc); + fn->configure(input_tensor, output_tensor); _return_fn = std::move(fn); } @@ -650,13 +687,13 @@ void KernelGenerator::visit(const ir::operation::ExpandDims &node) const auto input_index{node.getInputs().at(ir::operation::ExpandDims::Input::INPUT)}; const auto axis_index{node.getInputs().at(ir::operation::ExpandDims::Input::AXIS)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto axis_alloc = _tensor_builder->portableAt(axis_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto axis_tensor = _tensor_builder->portableAt(axis_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, axis_alloc, output_alloc); + fn->configure(input_tensor, axis_tensor, output_tensor); _return_fn = std::move(fn); } @@ -666,12 +703,12 @@ void KernelGenerator::visit(const ir::operation::Logistic &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Logistic::Input::INPUT)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, output_alloc); + fn->configure(input_tensor, output_tensor); _return_fn = std::move(fn); } @@ -681,12 +718,12 @@ void KernelGenerator::visit(const ir::operation::Tanh &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Tanh::Input::INPUT)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, output_alloc); + fn->configure(input_tensor, output_tensor); _return_fn = std::move(fn); } @@ -700,7 +737,7 @@ void KernelGenerator::visit(const ir::operation::Pack &node) assert(-rank <= axis && axis < rank); - auto output_alloc = _tensor_builder->portableAt(ofm_index).get(); + auto output_tensor = _tensor_builder->portableAt(ofm_index).get(); std::vector input_tensors; for (auto &ifm_idx : node.getInputs()) @@ -708,7 +745,7 @@ void KernelGenerator::visit(const ir::operation::Pack &node) auto fn = std::make_unique(); - fn->configure(input_tensors, axis, output_alloc); + fn->configure(input_tensors, axis, output_tensor); _return_fn = std::move(fn); } @@ -722,7 +759,7 @@ void KernelGenerator::visit(const ir::operation::Unpack &node) assert(rank == 0 || (-rank <= axis && axis < rank)); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); std::vector output_tensors; for (auto &output_idx : node.getOutputs()) @@ -732,7 +769,7 @@ void KernelGenerator::visit(const ir::operation::Unpack &node) uint32_t axis_resolved = (axis < 0 ? axis + rank : axis); - fn->configure(input_alloc, axis_resolved, node.param().num, output_tensors); + fn->configure(input_tensor, axis_resolved, node.param().num, output_tensors); _return_fn = std::move(fn); } @@ -751,8 +788,16 @@ void KernelGenerator::visit(const ir::operation::Pad &node) auto fn = std::make_unique(); - fn->configure(input, output, pad_base, pad_rank); + bool isPadV2 = node.getInputs().size() == 3 ? true : false; + const void *value = nullptr; + + if (isPadV2) + { + const auto value_index{node.getInputs().at(ir::operation::Pad::Input::VALUE)}; + value = reinterpret_cast(_ctx.at(value_index).data()->base()); + } + fn->configure(input, output, pad_base, pad_rank, value); _return_fn = std::move(fn); } @@ -762,13 +807,13 @@ void KernelGenerator::visit(const ir::operation::Max &node) const auto lhs_index{node.getInputs().at(ir::operation::Max::Input::LHS)}; const auto rhs_index{node.getInputs().at(ir::operation::Max::Input::RHS)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto lhs_alloc = _tensor_builder->portableAt(lhs_index).get(); - auto rhs_alloc = _tensor_builder->portableAt(rhs_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto lhs_tensor = _tensor_builder->portableAt(lhs_index).get(); + auto rhs_tensor = _tensor_builder->portableAt(rhs_index).get(); auto fn = std::make_unique(); - fn->configure(lhs_alloc, rhs_alloc, ofm_alloc); + fn->configure(lhs_tensor, rhs_tensor, ofm_tensor); _return_fn = std::move(fn); } @@ -779,13 +824,13 @@ void KernelGenerator::visit(const ir::operation::Min &node) const auto lhs_index{node.getInputs().at(ir::operation::Min::Input::LHS)}; const auto rhs_index{node.getInputs().at(ir::operation::Min::Input::RHS)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto lhs_alloc = _tensor_builder->portableAt(lhs_index).get(); - auto rhs_alloc = _tensor_builder->portableAt(rhs_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto lhs_tensor = _tensor_builder->portableAt(lhs_index).get(); + auto rhs_tensor = _tensor_builder->portableAt(rhs_index).get(); auto fn = std::make_unique(); - fn->configure(lhs_alloc, rhs_alloc, ofm_alloc); + fn->configure(lhs_tensor, rhs_tensor, ofm_tensor); _return_fn = std::move(fn); } @@ -795,12 +840,12 @@ void KernelGenerator::visit(const ir::operation::Cast &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Cast::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto ifm_alloc = _tensor_builder->portableAt(ifm_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto ifm_tensor = _tensor_builder->portableAt(ifm_index).get(); auto fn = std::make_unique(); - fn->configure(ifm_alloc, ofm_alloc); + fn->configure(ifm_tensor, ofm_tensor); _return_fn = std::move(fn); } @@ -810,12 +855,12 @@ void KernelGenerator::visit(const ir::operation::Transpose &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Transpose::Input::INPUT)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, output_alloc, node.param().perm); + fn->configure(input_tensor, output_tensor, node.param().perm); _return_fn = std::move(fn); } @@ -827,15 +872,15 @@ void KernelGenerator::visit(const ir::operation::Reduce &node) const auto axes_index{node.getInputs().at(ir::operation::Reduce::Input::AXES)}; const auto keep_dims = node.param().keep_dims; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto axes_alloc = _tensor_builder->portableAt(axes_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto axes_tensor = _tensor_builder->portableAt(axes_index).get(); if (node.param().reduce_type == ir::operation::Reduce::ReduceType::MEAN) { auto fn = std::make_unique(); - fn->configure(input_alloc, axes_alloc, output_alloc, keep_dims); + fn->configure(input_tensor, axes_tensor, output_tensor, keep_dims); _return_fn = std::move(fn); } @@ -844,7 +889,7 @@ void KernelGenerator::visit(const ir::operation::Reduce &node) auto fn = std::make_unique(); const auto reduce_type = convertReduceType(node.param().reduce_type); - fn->configure(input_alloc, axes_alloc, output_alloc, reduce_type, keep_dims); + fn->configure(input_tensor, axes_tensor, output_tensor, reduce_type, keep_dims); _return_fn = std::move(fn); } @@ -855,12 +900,27 @@ void KernelGenerator::visit(const ir::operation::ReLU &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(0)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, output_alloc); + fn->configure(input_tensor, output_tensor); + + _return_fn = std::move(fn); +} + +void KernelGenerator::visit(const ir::operation::ReLU6 &node) +{ + const auto output_index{node.getOutputs().at(0)}; + const auto input_index{node.getInputs().at(0)}; + + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + + auto fn = std::make_unique(); + + fn->configure(input_tensor, output_tensor); _return_fn = std::move(fn); } @@ -872,14 +932,14 @@ void KernelGenerator::visit(const ir::operation::Select &node) const auto true_index{node.getInputs().at(ir::operation::Select::Input::INPUT_TRUE)}; const auto false_index{node.getInputs().at(ir::operation::Select::Input::INPUT_FALSE)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto condition_alloc = _tensor_builder->portableAt(condition_index).get(); - auto true_alloc = _tensor_builder->portableAt(true_index).get(); - auto false_alloc = _tensor_builder->portableAt(false_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto condition_tensor = _tensor_builder->portableAt(condition_index).get(); + auto true_tensor = _tensor_builder->portableAt(true_index).get(); + auto false_tensor = _tensor_builder->portableAt(false_index).get(); auto fn = std::make_unique(); - fn->configure(condition_alloc, true_alloc, false_alloc, output_alloc); + fn->configure(condition_tensor, true_tensor, false_tensor, output_tensor); _return_fn = std::move(fn); } @@ -891,14 +951,14 @@ void KernelGenerator::visit(const ir::operation::Slice &node) const auto begins_index{node.getInputs().at(ir::operation::Slice::Input::BEGINS)}; const auto sizes_index{node.getInputs().at(ir::operation::Slice::Input::SIZES)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto begins_alloc = _tensor_builder->portableAt(begins_index).get(); - auto sizes_alloc = _tensor_builder->portableAt(sizes_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto begins_tensor = _tensor_builder->portableAt(begins_index).get(); + auto sizes_tensor = _tensor_builder->portableAt(sizes_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, begins_alloc, sizes_alloc, output_alloc); + fn->configure(input_tensor, begins_tensor, sizes_tensor, output_tensor); _return_fn = std::move(fn); } @@ -911,11 +971,11 @@ void KernelGenerator::visit(const ir::operation::StridedSlice &node) const auto ends_index{node.getInputs().at(ir::operation::StridedSlice::Input::ENDS)}; const auto strides_index{node.getInputs().at(ir::operation::StridedSlice::Input::STRIDES)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto starts_alloc = _tensor_builder->portableAt(starts_index).get(); - auto ends_alloc = _tensor_builder->portableAt(ends_index).get(); - auto strides_alloc = _tensor_builder->portableAt(strides_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto starts_tensor = _tensor_builder->portableAt(starts_index).get(); + auto ends_tensor = _tensor_builder->portableAt(ends_index).get(); + auto strides_tensor = _tensor_builder->portableAt(strides_index).get(); auto begin_mask = node.param().begin_mask; auto end_mask = node.param().end_mask; @@ -923,7 +983,7 @@ void KernelGenerator::visit(const ir::operation::StridedSlice &node) auto fn = std::make_unique(); - fn->configure(input_alloc, starts_alloc, ends_alloc, strides_alloc, output_alloc, begin_mask, + fn->configure(input_tensor, starts_tensor, ends_tensor, strides_tensor, output_tensor, begin_mask, end_mask, shrink_axis_mask); _return_fn = std::move(fn); @@ -957,12 +1017,12 @@ void KernelGenerator::visit(const ir::operation::Abs &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Abs::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto ifm_alloc = _tensor_builder->portableAt(ifm_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto ifm_tensor = _tensor_builder->portableAt(ifm_index).get(); auto fn = std::make_unique(); - fn->configure(ifm_alloc, ofm_alloc); + fn->configure(ifm_tensor, ofm_tensor); _return_fn = std::move(fn); } @@ -972,12 +1032,12 @@ void KernelGenerator::visit(const ir::operation::Sin &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Sin::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto ifm_alloc = _tensor_builder->portableAt(ifm_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto ifm_tensor = _tensor_builder->portableAt(ifm_index).get(); auto fn = std::make_unique(); - fn->configure(ifm_alloc, ofm_alloc); + fn->configure(ifm_tensor, ofm_tensor); _return_fn = std::move(fn); } @@ -987,12 +1047,12 @@ void KernelGenerator::visit(const ir::operation::Cos &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Cos::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto ifm_alloc = _tensor_builder->portableAt(ifm_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto ifm_tensor = _tensor_builder->portableAt(ifm_index).get(); auto fn = std::make_unique(); - fn->configure(ifm_alloc, ofm_alloc); + fn->configure(ifm_tensor, ofm_tensor); _return_fn = std::move(fn); } @@ -1002,12 +1062,12 @@ void KernelGenerator::visit(const ir::operation::RSQRT &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::RSQRT::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto ifm_alloc = _tensor_builder->portableAt(ifm_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto ifm_tensor = _tensor_builder->portableAt(ifm_index).get(); auto fn = std::make_unique(); - fn->configure(ifm_alloc, ofm_alloc); + fn->configure(ifm_tensor, ofm_tensor); _return_fn = std::move(fn); } @@ -1017,12 +1077,33 @@ void KernelGenerator::visit(const ir::operation::Shape &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Shape::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto ifm_alloc = _tensor_builder->portableAt(ifm_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto ifm_tensor = _tensor_builder->portableAt(ifm_index).get(); auto fn = std::make_unique(); - fn->configure(ifm_alloc, ofm_alloc); + fn->configure(ifm_tensor, ofm_tensor); + + _return_fn = std::move(fn); +} + +void KernelGenerator::visit(const ir::operation::ResizeBilinear &node) +{ + const auto output_index{node.getOutputs().at(0)}; + const auto input_index{node.getInputs().at(ir::operation::ResizeBilinear::INPUT)}; + + auto output_height = node.param().height_out; + auto output_width = node.param().width_out; + auto align_corners = node.param().align_corners; + auto half_pixel_centers = node.param().half_pixel_centers; + + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + + auto fn = std::make_unique(); + + fn->configure(input_tensor, output_tensor, output_height, output_width, align_corners, + half_pixel_centers); _return_fn = std::move(fn); } @@ -1033,13 +1114,13 @@ void KernelGenerator::visit(const ir::operation::Reverse &node) const auto input_index{node.getInputs().at(ir::operation::Reverse::INPUT)}; const auto axis_index{node.getInputs().at(ir::operation::Reverse::AXIS)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto axis_alloc = _tensor_builder->portableAt(axis_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto axis_tensor = _tensor_builder->portableAt(axis_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, axis_alloc, output_alloc); + fn->configure(input_tensor, axis_tensor, output_tensor); _return_fn = std::move(fn); } @@ -1049,12 +1130,12 @@ void KernelGenerator::visit(const ir::operation::Neg &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Neg::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto ifm_alloc = _tensor_builder->portableAt(ifm_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto ifm_tensor = _tensor_builder->portableAt(ifm_index).get(); auto fn = std::make_unique(); - fn->configure(ifm_alloc, ofm_alloc); + fn->configure(ifm_tensor, ofm_tensor); _return_fn = std::move(fn); } @@ -1066,12 +1147,12 @@ void KernelGenerator::visit(const ir::operation::ArgMax &node) const auto axis = node.param().axis; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, output_alloc, axis, /* is_arg_max */ true); + fn->configure(input_tensor, output_tensor, axis, /* is_arg_max */ true); _return_fn = std::move(fn); } @@ -1082,13 +1163,13 @@ void KernelGenerator::visit(const ir::operation::Pow &node) const auto lhs_index{node.getInputs().at(ir::operation::Pow::LHS)}; const auto rhs_index{node.getInputs().at(ir::operation::Pow::RHS)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto lhs_alloc = _tensor_builder->portableAt(lhs_index).get(); - auto rhs_alloc = _tensor_builder->portableAt(rhs_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto lhs_tensor = _tensor_builder->portableAt(lhs_index).get(); + auto rhs_tensor = _tensor_builder->portableAt(rhs_index).get(); auto fn = std::make_unique(); - fn->configure(lhs_alloc, rhs_alloc, ir::Activation::NONE, output_alloc); + fn->configure(lhs_tensor, rhs_tensor, ir::Activation::NONE, output_tensor); _return_fn = std::move(fn); } @@ -1098,12 +1179,12 @@ void KernelGenerator::visit(const ir::operation::Log &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(ir::operation::Log::Input::INPUT)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto ifm_alloc = _tensor_builder->portableAt(ifm_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto ifm_tensor = _tensor_builder->portableAt(ifm_index).get(); auto fn = std::make_unique(); - fn->configure(ifm_alloc, ofm_alloc); + fn->configure(ifm_tensor, ofm_tensor); _return_fn = std::move(fn); } @@ -1113,12 +1194,12 @@ void KernelGenerator::visit(const ir::operation::Round &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::Round::INPUT)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, output_alloc); + fn->configure(input_tensor, output_tensor); _return_fn = std::move(fn); } @@ -1128,12 +1209,12 @@ void KernelGenerator::visit(const ir::operation::LogicalNot &node) const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(ir::operation::LogicalNot::INPUT)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, output_alloc); + fn->configure(input_tensor, output_tensor); _return_fn = std::move(fn); } @@ -1144,28 +1225,43 @@ void KernelGenerator::visit(const ir::operation::LogicalOr &node) const auto lhs_index{node.getInputs().at(0)}; const auto rhs_index{node.getInputs().at(1)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto lhs_alloc = _tensor_builder->portableAt(lhs_index).get(); - auto rhs_alloc = _tensor_builder->portableAt(rhs_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto lhs_tensor = _tensor_builder->portableAt(lhs_index).get(); + auto rhs_tensor = _tensor_builder->portableAt(rhs_index).get(); auto fn = std::make_unique(); - fn->configure(lhs_alloc, rhs_alloc, ofm_alloc); + fn->configure(lhs_tensor, rhs_tensor, ofm_tensor); _return_fn = std::move(fn); } -void KernelGenerator::visit(const ir::operation::ZerosLike &node) +void KernelGenerator::visit(const ir::operation::L2Normalization &node) { const auto output_index{node.getOutputs().at(0)}; - const auto input_index{node.getInputs().at(ir::operation::ZerosLike::INPUT)}; + const auto input_index{node.getInputs().at(0)}; auto output_alloc = _tensor_builder->portableAt(output_index).get(); auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto fn = std::make_unique(); + auto fn = std::make_unique(); fn->configure(input_alloc, output_alloc); + + _return_fn = std::move(fn); +} + +void KernelGenerator::visit(const ir::operation::ZerosLike &node) +{ + const auto output_index{node.getOutputs().at(0)}; + const auto input_index{node.getInputs().at(ir::operation::ZerosLike::INPUT)}; + + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + + auto fn = std::make_unique(); + + fn->configure(input_tensor, output_tensor); _return_fn = std::move(fn); } @@ -1176,14 +1272,14 @@ void KernelGenerator::visit(const ir::operation::Range &node) const auto limit_index{node.getInputs().at(ir::operation::Range::LIMIT)}; const auto delta_index{node.getInputs().at(ir::operation::Range::DELTA)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto start_alloc = _tensor_builder->portableAt(start_index).get(); - auto limit_alloc = _tensor_builder->portableAt(limit_index).get(); - auto delta_alloc = _tensor_builder->portableAt(delta_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto start_tensor = _tensor_builder->portableAt(start_index).get(); + auto limit_tensor = _tensor_builder->portableAt(limit_index).get(); + auto delta_tensor = _tensor_builder->portableAt(delta_index).get(); auto fn = std::make_unique(); - fn->configure(start_alloc, limit_alloc, delta_alloc, output_alloc); + fn->configure(start_tensor, limit_tensor, delta_tensor, output_tensor); _return_fn = std::move(fn); } @@ -1193,13 +1289,13 @@ void KernelGenerator::visit(const ir::operation::SquaredDifference &node) const auto lhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::LHS)}; const auto rhs_index{node.getInputs().at(ir::operation::SquaredDifference::Input::RHS)}; - auto ofm_alloc = _tensor_builder->portableAt(ofm_index).get(); - auto lhs_alloc = _tensor_builder->portableAt(lhs_index).get(); - auto rhs_alloc = _tensor_builder->portableAt(rhs_index).get(); + auto ofm_tensor = _tensor_builder->portableAt(ofm_index).get(); + auto lhs_tensor = _tensor_builder->portableAt(lhs_index).get(); + auto rhs_tensor = _tensor_builder->portableAt(rhs_index).get(); auto fn = std::make_unique(); - fn->configure(lhs_alloc, rhs_alloc, ofm_alloc); + fn->configure(lhs_tensor, rhs_tensor, ofm_tensor); _return_fn = std::move(fn); } @@ -1209,13 +1305,13 @@ void KernelGenerator::visit(const ir::operation::Tile &node) const auto input_index{node.getInputs().at(ir::operation::Tile::INPUT)}; const auto multiples_index{node.getInputs().at(ir::operation::Tile::MULTIPLES)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto multiples_alloc = _tensor_builder->portableAt(multiples_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto multiples_tensor = _tensor_builder->portableAt(multiples_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, multiples_alloc, output_alloc); + fn->configure(input_tensor, multiples_tensor, output_tensor); _return_fn = std::move(fn); } @@ -1226,14 +1322,14 @@ void KernelGenerator::visit(const ir::operation::MatrixBandPart &node) const auto num_lower_index{node.getInputs().at(ir::operation::MatrixBandPart::NUM_LOWER_DIAG)}; const auto num_upper_index{node.getInputs().at(ir::operation::MatrixBandPart::NUM_UPPER_DIAG)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto num_lower_alloc = _tensor_builder->portableAt(num_lower_index).get(); - auto num_upper_alloc = _tensor_builder->portableAt(num_upper_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto num_lower_tensor = _tensor_builder->portableAt(num_lower_index).get(); + auto num_upper_tensor = _tensor_builder->portableAt(num_upper_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, num_lower_alloc, num_upper_alloc, output_alloc); + fn->configure(input_tensor, num_lower_tensor, num_upper_tensor, output_tensor); _return_fn = std::move(fn); } @@ -1243,16 +1339,16 @@ void KernelGenerator::visit(const ir::operation::BatchMatMul &node) const auto lhs_index{node.getInputs().at(ir::operation::BatchMatMul::LHS)}; const auto rhs_index{node.getInputs().at(ir::operation::BatchMatMul::RHS)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto lhs_alloc = _tensor_builder->portableAt(lhs_index).get(); - auto rhs_alloc = _tensor_builder->portableAt(rhs_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto lhs_tensor = _tensor_builder->portableAt(lhs_index).get(); + auto rhs_tensor = _tensor_builder->portableAt(rhs_index).get(); const auto adj_x = node.param().adj_x; const auto adj_y = node.param().adj_y; auto fn = std::make_unique(); - fn->configure(lhs_alloc, rhs_alloc, adj_x, adj_y, output_alloc); + fn->configure(lhs_tensor, rhs_tensor, adj_x, adj_y, output_tensor); _return_fn = std::move(fn); } @@ -1262,13 +1358,13 @@ void KernelGenerator::visit(const ir::operation::BroadcastTo &node) const auto input_index{node.getInputs().at(ir::operation::BroadcastTo::INPUT)}; const auto shape_index{node.getInputs().at(ir::operation::BroadcastTo::SHAPE)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto shape_alloc = _tensor_builder->portableAt(shape_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto shape_tensor = _tensor_builder->portableAt(shape_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, shape_alloc, output_alloc); + fn->configure(input_tensor, shape_tensor, output_tensor); _return_fn = std::move(fn); } @@ -1277,10 +1373,10 @@ void KernelGenerator::visit(const ir::operation::FusedBatchNorm &node) { const auto ofm_index{node.getOutputs().at(0)}; - auto output_alloc = _tensor_builder->portableAt(ofm_index).get(); - std::vector input_allocs; + auto output_tensor = _tensor_builder->portableAt(ofm_index).get(); + std::vector input_tensors; for (auto &ifm_idx : node.getInputs()) - input_allocs.emplace_back(_tensor_builder->portableAt(ifm_idx).get()); + input_tensors.emplace_back(_tensor_builder->portableAt(ifm_idx).get()); const auto epsilon = node.param().epsilon; const auto is_training = node.param().is_training; @@ -1288,7 +1384,7 @@ void KernelGenerator::visit(const ir::operation::FusedBatchNorm &node) auto fn = std::make_unique(); - fn->configure(input_allocs, epsilon, is_training, data_format, output_alloc); + fn->configure(input_tensors, epsilon, is_training, data_format, output_tensor); _return_fn = std::move(fn); } @@ -1301,12 +1397,12 @@ void KernelGenerator::visit(const ir::operation::LogSoftmax &node) const auto beta = node.param().beta; const auto axis = node.param().axis; - auto output_alloc = _tensor_builder->at(output_index).get(); - auto input_alloc = _tensor_builder->at(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, beta, axis, output_alloc); + fn->configure(input_tensor, beta, axis, output_tensor); _return_fn = std::move(fn); } @@ -1318,14 +1414,84 @@ void KernelGenerator::visit(const ir::operation::SpaceToBatchND &node) const auto block_shape_index{node.getInputs().at(ir::operation::SpaceToBatchND::BLOCK_SIZE)}; const auto padding_index{node.getInputs().at(ir::operation::SpaceToBatchND::PADDINGS)}; - auto output_alloc = _tensor_builder->portableAt(output_index).get(); - auto input_alloc = _tensor_builder->portableAt(input_index).get(); - auto block_shape_alloc = _tensor_builder->portableAt(block_shape_index).get(); - auto padding_alloc = _tensor_builder->portableAt(padding_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto block_shape_tensor = _tensor_builder->portableAt(block_shape_index).get(); + auto padding_tensor = _tensor_builder->portableAt(padding_index).get(); auto fn = std::make_unique(); - fn->configure(input_alloc, block_shape_alloc, padding_alloc, output_alloc); + fn->configure(input_tensor, block_shape_tensor, padding_tensor, output_tensor); + + _return_fn = std::move(fn); +} + +void KernelGenerator::visit(const ir::operation::Quantize &node) +{ + const auto input_index{node.getInputs().at(ir::operation::Quantize::Input::INPUT)}; + const auto output_index{node.getOutputs().at(0)}; + + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + + auto fn = std::make_unique(); + + fn->configure(input_tensor, output_tensor); + + _return_fn = std::move(fn); +} + +void KernelGenerator::visit(const ir::operation::SpaceToDepth &node) +{ + const auto input_index{node.getInputs().at(ir::operation::SpaceToDepth::Input::INPUT)}; + const auto output_index{node.getOutputs().at(0)}; + auto block_size = node.param().block_size; + + auto input_tensor = _tensor_builder->portableAt(input_index).get(); + auto output_tensor = _tensor_builder->portableAt(output_index).get(); + + auto fn = std::make_unique(); + + fn->configure(input_tensor, block_size, output_tensor); + _return_fn = std::move(fn); +} + +void KernelGenerator::visit(const ir::operation::StatelessRandomUniform &node) +{ + const auto output_index{node.getOutputs().at(0)}; + const auto shape_index{node.getInputs().at(ir::operation::StatelessRandomUniform::SHAPE)}; + const auto seed_index{node.getInputs().at(ir::operation::StatelessRandomUniform::SEED)}; + + auto output_alloc = _tensor_builder->portableAt(output_index).get(); + auto shape_alloc = _tensor_builder->portableAt(shape_index).get(); + auto seed_alloc = _tensor_builder->portableAt(seed_index).get(); + + auto fn = std::make_unique(); + + fn->configure(shape_alloc, seed_alloc, output_alloc); + _return_fn = std::move(fn); +} + +void KernelGenerator::visit(const ir::operation::SplitV &node) +{ + const auto num_splits = node.param().num_splits; + assert(num_splits == static_cast(node.getOutputs().size())); + + const auto input_idx{node.getInputs().at(ir::operation::SplitV::Input::INPUT)}; + const auto size_splits{node.getInputs().at(ir::operation::SplitV::Input::SIZE_SPLITS)}; + const auto split_dim{node.getInputs().at(ir::operation::SplitV::Input::SPLIT_DIM)}; + + auto in_tensor = _tensor_builder->portableAt(input_idx).get(); + auto in_size_splits = _tensor_builder->portableAt(size_splits).get(); + auto in_split_dim = _tensor_builder->portableAt(split_dim).get(); + + std::vector out_tensors; + for (auto &output_idx : node.getOutputs()) + out_tensors.emplace_back(_tensor_builder->portableAt(output_idx).get()); + + auto fn = std::make_unique(); + + fn->configure(in_tensor, in_size_splits, in_split_dim, num_splits, out_tensors); _return_fn = std::move(fn); } diff --git a/runtime/onert/backend/cpu/KernelGenerator.h b/runtime/onert/backend/cpu/KernelGenerator.h index d6f4c28..40c056a 100644 --- a/runtime/onert/backend/cpu/KernelGenerator.h +++ b/runtime/onert/backend/cpu/KernelGenerator.h @@ -17,6 +17,7 @@ #ifndef __ONERT_BACKEND_CPU_KERNEL_GENERATOR_H__ #define __ONERT_BACKEND_CPU_KERNEL_GENERATOR_H__ +#include "ExternalContext.h" #include "TensorBuilder.h" #include "Tensor.h" @@ -37,7 +38,8 @@ class KernelGenerator : public IKernelGenerator public: KernelGenerator(const ir::Operands &operands_ctx, const ir::Operations &operations_ctx, const std::shared_ptr &tensor_builder, - const std::shared_ptr &kernel_builder); + const std::shared_ptr &kernel_builder, + const std::shared_ptr &external_context); using IKernelGenerator::visit; @@ -74,6 +76,7 @@ public: void visit(const ir::operation::Transpose &) override; void visit(const ir::operation::Reduce &) override; void visit(const ir::operation::ReLU &) override; + void visit(const ir::operation::ReLU6 &) override; void visit(const ir::operation::Select &) override; void visit(const ir::operation::Slice &) override; void visit(const ir::operation::StridedSlice &) override; @@ -83,6 +86,7 @@ public: void visit(const ir::operation::Sin &) override; void visit(const ir::operation::RSQRT &) override; void visit(const ir::operation::Shape &) override; + void visit(const ir::operation::ResizeBilinear &node) override; void visit(const ir::operation::Reverse &) override; void visit(const ir::operation::Neg &) override; void visit(const ir::operation::ArgMax &) override; @@ -94,13 +98,19 @@ public: void visit(const ir::operation::SquaredDifference &) override; void visit(const ir::operation::Tile &) override; void visit(const ir::operation::LogicalOr &) override; + void visit(const ir::operation::L2Normalization &) override; void visit(const ir::operation::Range &) override; void visit(const ir::operation::MatrixBandPart &) override; void visit(const ir::operation::BatchMatMul &) override; + void visit(const ir::operation::BatchToSpaceND &) override; void visit(const ir::operation::BroadcastTo &) override; void visit(const ir::operation::FusedBatchNorm &) override; void visit(const ir::operation::LogSoftmax &) override; void visit(const ir::operation::SpaceToBatchND &) override; + void visit(const ir::operation::Quantize &) override; + void visit(const ir::operation::SpaceToDepth &) override; + void visit(const ir::operation::StatelessRandomUniform &) override; + void visit(const ir::operation::SplitV &) override; private: const ir::Operands &_ctx; @@ -108,6 +118,7 @@ private: std::shared_ptr _tensor_builder; std::shared_ptr _kernel_builder; ir::Layout _current_op_seq_layout; + const std::shared_ptr _external_context; }; } // namespace cpu diff --git a/runtime/onert/backend/cpu/StaticTensorManager.cc b/runtime/onert/backend/cpu/StaticTensorManager.cc new file mode 100644 index 0000000..78c98da --- /dev/null +++ b/runtime/onert/backend/cpu/StaticTensorManager.cc @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "StaticTensorManager.h" +#include "Tensor.h" + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ + +StaticTensorManager::StaticTensorManager(const std::shared_ptr ®, + cpu_common::DynamicTensorManager *dynamic_tensor_manager) + : _nonconst_mgr{new cpu_common::MemoryManager()}, _tensors{reg}, + _dynamic_tensor_manager{dynamic_tensor_manager} +{ + // DO NOTHING +} + +void StaticTensorManager::allocateNonconsts(void) +{ + _nonconst_mgr->allocate(); + + for (auto &pair : _tensors->native_tensors()) + { + const auto &ind = pair.first; + auto tensor = pair.second; + if (!_as_constants[ind] && !tensor->is_dynamic()) + { + auto *buffer = _nonconst_mgr->getBuffer(ind); + tensor->setBuffer(buffer); + + VERBOSE(CPU_StaticTensorManager) << "TENSOR(#" << ind.value() + << "): " << static_cast(buffer) << std::endl; + } + } +} + +void StaticTensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); } + +void StaticTensorManager::buildTensor(const ir::OperandIndex &ind, + const ir::OperandInfo &tensor_info, ir::Layout backend_layout, + bool as_const) +{ + assert(!_tensors->getITensor(ind)); + if (as_const) + { + auto tensor = std::make_shared(tensor_info, backend_layout); + _tensors->setNativeTensor(ind, tensor); + } + else + { + auto tensor = std::make_shared(tensor_info, backend_layout, _dynamic_tensor_manager); + _tensors->setNativeTensor(ind, tensor); + } + _as_constants[ind] = as_const; +} + +void StaticTensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size) +{ + assert(_tensors->getITensor(ind)); + + // This method is called only when a tensor has proper shape + assert(!_tensors->getITensor(ind)->is_dynamic()); + + if (!_as_constants[ind]) + _nonconst_mgr->claimPlan(ind, size); +} + +void StaticTensorManager::releasePlan(const ir::OperandIndex &ind) +{ + assert(_tensors->getITensor(ind)); + + // This method is called only when a tensor has proper shape + assert(!_tensors->getITensor(ind)->is_dynamic()); + + if (!_as_constants[ind]) + _nonconst_mgr->releasePlan(ind); +} + +void StaticTensorManager::iterate(const std::function &fn) +{ + for (const auto &it : _tensors->native_tensors()) + fn(it.first); +} + +} // namespace cpu +} // namespace backend +} // namespace onert diff --git a/runtime/onert/backend/cpu/StaticTensorManager.h b/runtime/onert/backend/cpu/StaticTensorManager.h new file mode 100644 index 0000000..2af61e4 --- /dev/null +++ b/runtime/onert/backend/cpu/StaticTensorManager.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_CPU_STATICTENSOR_MANAGER_H__ +#define __ONERT_BACKEND_CPU_STATICTENSOR_MANAGER_H__ + +#include "backend/IStaticTensorManager.h" +#include "backend/cpu_common/DynamicTensorManager.h" +#include "backend/cpu_common/MemoryManager.h" +#include "backend/cpu_common/TensorRegistry.h" +#include "backend/ITensorManager.h" +#include "ir/OperandIndexMap.h" +#include "ir/OperandInfo.h" + +namespace onert +{ +namespace backend +{ +namespace cpu +{ + +class StaticTensorManager : public backend::IStaticTensorManager +{ +public: + StaticTensorManager(const std::shared_ptr ®, + cpu_common::DynamicTensorManager *dynamic_tensor_manager); + virtual ~StaticTensorManager() = default; + + void allocateNonconsts(void); + void deallocateNonconsts(void); + + void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, + ir::Layout backend_layout, bool as_const); + + void claimPlan(const ir::OperandIndex &ind, uint32_t size); + void releasePlan(const ir::OperandIndex &ind); + + void iterate(const std::function &fn); + +private: + std::unique_ptr _nonconst_mgr; + const std::shared_ptr _tensors; + ir::OperandIndexMap _as_constants; + cpu_common::DynamicTensorManager *_dynamic_tensor_manager; +}; + +} // namespace cpu +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_CPU_STATICTENSOR_MANAGER_H__ diff --git a/runtime/onert/backend/cpu/Tensor.h b/runtime/onert/backend/cpu/Tensor.h index 4dd251b..20e6026 100644 --- a/runtime/onert/backend/cpu/Tensor.h +++ b/runtime/onert/backend/cpu/Tensor.h @@ -29,15 +29,22 @@ namespace cpu using Tensor = cpu_common::Tensor; -// Tensor which has data from external. To support this, assume below things -// no padding, always NHWC layout, constant tensor and not dynamic +/** + * @brief Class that uses data from external memory that is not managed by a backend + * instead of allocating and copying the data. ExternalTensor's data pointer points to + * an address of memory such as where memory is already allocated, or mmapped area. + * This is meaning that ExternalTensor can take all of types' ir::Data. + * To support this, assume below things no padding, always NHWC layout, + * constant tensor and not dynamic. + */ class ExternalTensor : public Tensor { public: ExternalTensor() = delete; public: - ExternalTensor(const ir::OperandInfo &info, const ir::Layout layout) : Tensor(info, layout) + ExternalTensor(const ir::OperandInfo &info, const ir::Layout layout) + : Tensor(info, layout, nullptr) { assert(_layout == ir::Layout::NHWC); assert(_info.isConstant()); @@ -45,6 +52,11 @@ public: } public: + /** + * @brief set Data to be shared from external so that this ExternalTensor will not be + * allocated on CPU backend + * @param[in] data data of Operand to be set + */ void setData(const std::shared_ptr data) { assert(data != nullptr); diff --git a/runtime/onert/backend/cpu/TensorBuilder.cc b/runtime/onert/backend/cpu/TensorBuilder.cc index 886e8d8..ab8ba57 100644 --- a/runtime/onert/backend/cpu/TensorBuilder.cc +++ b/runtime/onert/backend/cpu/TensorBuilder.cc @@ -29,8 +29,8 @@ namespace cpu TensorBuilder::TensorBuilder() : _tensor_reg{new cpu_common::TensorRegistry()}, - _static_tensor_mgr{new cpu_common::StaticTensorManager(_tensor_reg)}, - _dynamic_tensor_mgr{new cpu_common::DynamicTensorManager(_tensor_reg)} + _dynamic_tensor_mgr{new cpu_common::DynamicTensorManager(_tensor_reg)}, + _static_tensor_mgr{new StaticTensorManager(_tensor_reg, _dynamic_tensor_mgr.get())} { /* empty */ } @@ -77,11 +77,7 @@ bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const return _tensor_info_map.find(ind) != _tensor_info_map.end(); } -void TensorBuilder::prepare(void) -{ - _static_tensor_mgr->allocateConsts(); - _static_tensor_mgr->allocateNonconsts(); -} +void TensorBuilder::prepare(void) { _static_tensor_mgr->allocateNonconsts(); } void TensorBuilder::allocate() { @@ -99,17 +95,17 @@ std::shared_ptr TensorBuilder::portableAt(const ir::OperandInde return _tensor_reg->getPortableTensor(ind); } -bool TensorBuilder::setExternalTensor(const ir::OperandIndex &ind, - const std::shared_ptr &tensor) +bool TensorBuilder::setMigrantTensor(const ir::OperandIndex &ind, + const std::shared_ptr &tensor) { - return _tensor_reg->setExternalTensor(ind, tensor); + return _tensor_reg->setMigrantTensor(ind, tensor); } void TensorBuilder::iterate(const IterateFunction &fn) { _static_tensor_mgr->iterate(fn); } -std::shared_ptr TensorBuilder::at(const ir::OperandIndex &ind) +std::shared_ptr TensorBuilder::at(const ir::OperandIndex &ind) { - return _tensor_reg->getManagedTensor(ind); + return _tensor_reg->getNativeTensor(ind); } std::unique_ptr TensorBuilder::releaseStaticTensorManager(void) diff --git a/runtime/onert/backend/cpu/TensorBuilder.h b/runtime/onert/backend/cpu/TensorBuilder.h index ba25451..6171365 100644 --- a/runtime/onert/backend/cpu/TensorBuilder.h +++ b/runtime/onert/backend/cpu/TensorBuilder.h @@ -18,13 +18,14 @@ #define __ONERT_BACKEND_CPU_TENSOR_BUILDER_H__ #include -#include #include -#include #include #include +#include "StaticTensorManager.h" +#include "Tensor.h" + #include namespace onert @@ -80,17 +81,17 @@ public: * If not, program will crash with assert or exception. * @return shared_ptr */ - std::shared_ptr at(const ir::OperandIndex &ind); + std::shared_ptr at(const ir::OperandIndex &ind); std::shared_ptr portableAt(const ir::OperandIndex &ind); - bool setExternalTensor(const ir::OperandIndex &ind, - const std::shared_ptr &tensor) override; + bool setMigrantTensor(const ir::OperandIndex &ind, + const std::shared_ptr &tensor) override; std::shared_ptr tensorRegistry() override { return _tensor_reg; } private: const std::shared_ptr _tensor_reg; - std::unique_ptr _static_tensor_mgr; std::unique_ptr _dynamic_tensor_mgr; + std::unique_ptr _static_tensor_mgr; ir::OperandIndexMap _tensor_info_map; }; diff --git a/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc b/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc new file mode 100644 index 0000000..f2f10eb --- /dev/null +++ b/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "BatchToSpaceNDLayer.h" + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +BatchToSpaceNDLayer::BatchToSpaceNDLayer() + : _input(nullptr), _output(nullptr), _block_shape(nullptr), _crops(nullptr) +{ + // DO NOTHING +} + +template void BatchToSpaceNDLayer::batchToSpaceNDGeneric() +{ + const int32_t NNapiCrops[]{0, 0, 0, 0}; + const int32_t *_crops_buffer; + + if (_crops == nullptr) + { + _crops_buffer = NNapiCrops; + } + else + { + _crops_buffer = reinterpret_cast(_crops->buffer()); + } + nnfw::cker::BatchToSpaceND( + getTensorShape(_input), reinterpret_cast(_input->buffer()), + reinterpret_cast(_block_shape->buffer()), _crops_buffer, + getTensorShape(_output), reinterpret_cast(_output->buffer())); +} + +void BatchToSpaceNDLayer::configure(const IPortableTensor *input, IPortableTensor *output, + IPortableTensor *block_shape, IPortableTensor *crops) +{ + _output = output; + _input = input; + _block_shape = block_shape; + _crops = crops; +} + +void BatchToSpaceNDLayer::run() +{ + if (_output->data_type() == OperandType::FLOAT32) + { + batchToSpaceNDGeneric(); + } + else if (_output->data_type() == OperandType::QUANT_UINT8_ASYMM) + { + batchToSpaceNDGeneric(); + } + else + { + throw std::runtime_error{"NYI"}; + } +} + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.h b/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.h new file mode 100644 index 0000000..6e25b24 --- /dev/null +++ b/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_CPU_OPS_BATCHTOSPACEND_LAYER_H__ +#define __ONERT_BACKEND_CPU_OPS_BATCHTOSPACEND_LAYER_H__ + +#include +#include "OperationUtils.h" + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +class BatchToSpaceNDLayer : public ::onert::exec::IFunction +{ +public: + BatchToSpaceNDLayer(); + +public: + template void batchToSpaceNDGeneric(); + + void configure(const IPortableTensor *input, IPortableTensor *output, + IPortableTensor *block_shape, IPortableTensor *crops); + + void run() override; + +private: + const IPortableTensor *_input; + IPortableTensor *_output; + IPortableTensor *_block_shape; + IPortableTensor *_crops; +}; + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_CPU_OPS_BATCHTOSPACEND_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/CompareLayer.cc b/runtime/onert/backend/cpu/ops/CompareLayer.cc index f557f3a..adf902a 100644 --- a/runtime/onert/backend/cpu/ops/CompareLayer.cc +++ b/runtime/onert/backend/cpu/ops/CompareLayer.cc @@ -17,6 +17,7 @@ #include "OperationUtils.h" +#include #include using namespace nnfw::cker; namespace onert @@ -34,6 +35,14 @@ namespace using OpType = onert::ir::operation::Comparison::ComparisonType; using namespace onert::backend::cpu; +// Assumes these enum values to be in the order like this +static_assert(static_cast(OpType::Equal) == 0, "An OpType value has changed!"); +static_assert(static_cast(OpType::NotEqual) == 1, "An OpType value has changed!"); +static_assert(static_cast(OpType::Greater) == 2, "An OpType value has changed!"); +static_assert(static_cast(OpType::GreaterEqual) == 3, "An OpType value has changed!"); +static_assert(static_cast(OpType::Less) == 4, "An OpType value has changed!"); +static_assert(static_cast(OpType::LessEqual) == 5, "An OpType value has changed!"); + template void compareQuant8(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output, OpType op_type) @@ -52,95 +61,33 @@ void compareQuant8(const IPortableTensor *lhs, const IPortableTensor *rhs, IPort ¶ms.input2_shift); params.is_broadcast = !HaveSameShapes(lhs, rhs); - if (params.is_broadcast) - { - switch (op_type) - { - case OpType::Equal: - Broadcast4DSlowEqualWithScaling( - params, getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::NotEqual: - Broadcast4DSlowNotEqualWithScaling( - params, getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::Greater: - Broadcast4DSlowGreaterWithScaling( - params, getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::GreaterEqual: - Broadcast4DSlowGreaterEqualWithScaling( - params, getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::Less: - Broadcast4DSlowLessWithScaling( - params, getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::LessEqual: - Broadcast4DSlowLessEqualWithScaling( - params, getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - default: - throw std::runtime_error{"Invalid OpType for CompareLayer"}; - } - } - else // if (requires_broadcast == false) - { - switch (op_type) - { - case OpType::Equal: - EqualWithScaling(params, getExtendedTensorShape(lhs), - reinterpret_cast(lhs->buffer()), getExtendedTensorShape(rhs), - reinterpret_cast(rhs->buffer()), getExtendedTensorShape(output), - reinterpret_cast(output->buffer())); - break; - case OpType::NotEqual: - NotEqualWithScaling( - params, getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::Greater: - GreaterWithScaling( - params, getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::GreaterEqual: - GreaterEqualWithScaling( - params, getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::Less: - LessWithScaling(params, getExtendedTensorShape(lhs), - reinterpret_cast(lhs->buffer()), getExtendedTensorShape(rhs), - reinterpret_cast(rhs->buffer()), getExtendedTensorShape(output), - reinterpret_cast(output->buffer())); - break; - case OpType::LessEqual: - LessEqualWithScaling( - params, getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - default: - throw std::runtime_error{"Invalid OpType for CompareLayer"}; - } - } - return; + using CompareFunction = + void (*)(ComparisonParams & params, const Shape &input1_shape, const T *input1_data, + const Shape &input2_shape, const T *input2_data, const Shape &output_shape, + bool *output_data); + + static const CompareFunction broadcast_fns[] = { + Broadcast4DSlowEqualWithScaling, Broadcast4DSlowNotEqualWithScaling, + Broadcast4DSlowGreaterWithScaling, Broadcast4DSlowGreaterEqualWithScaling, + Broadcast4DSlowLessWithScaling, Broadcast4DSlowLessEqualWithScaling, + }; + static const CompareFunction non_broadcast_fns[] = { + EqualWithScaling, NotEqualWithScaling, GreaterWithScaling, + GreaterEqualWithScaling, LessWithScaling, LessEqualWithScaling, + }; + + static_assert(sizeof(broadcast_fns) == sizeof(non_broadcast_fns), + "Sizes of broadcast_fns and non_broadcast_fns must match!"); + + auto index = static_cast(op_type); + if (index < 0 || index >= static_cast(sizeof(broadcast_fns) / sizeof(broadcast_fns[0]))) + throw std::runtime_error{"Invalid OpType for CompareLayer"}; + + CompareFunction fn = (params.is_broadcast ? broadcast_fns[index] : non_broadcast_fns[index]); + + fn(params, getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), + getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), + getExtendedTensorShape(output), reinterpret_cast(output->buffer())); } template @@ -149,94 +96,33 @@ void compareScalar(const IPortableTensor *lhs, const IPortableTensor *rhs, IPort { bool requires_broadcast = !HaveSameShapes(lhs, rhs); - if (requires_broadcast) - { - switch (op_type) - { - case OpType::Equal: - Broadcast4DSlowEqual( - getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::NotEqual: - Broadcast4DSlowNotEqual( - getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::Greater: - Broadcast4DSlowGreater( - getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::GreaterEqual: - Broadcast4DSlowGreaterEqual( - getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::Less: - Broadcast4DSlowLess(getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), - reinterpret_cast(output->buffer())); - break; - case OpType::LessEqual: - Broadcast4DSlowLessEqual( - getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - default: - throw std::runtime_error{"Invalid OpType for CompareLayer"}; - } - } - else // if (requires_broadcast == false) - { - switch (op_type) - { - case OpType::Equal: - EqualNoScaling(getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::NotEqual: - NotEqualNoScaling(getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), - reinterpret_cast(output->buffer())); - break; - case OpType::Greater: - GreaterNoScaling(getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), - reinterpret_cast(output->buffer())); - break; - case OpType::GreaterEqual: - GreaterEqualNoScaling( - getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::Less: - LessNoScaling(getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast(output->buffer())); - break; - case OpType::LessEqual: - LessEqualNoScaling(getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), - getExtendedTensorShape(output), - reinterpret_cast(output->buffer())); - break; - default: - throw std::runtime_error{"Invalid OpType for CompareLayer"}; - } - } - return; + using CompareFunction = + void (*)(const Shape &input1_shape, const T *input1_data, const Shape &input2_shape, + const T *input2_data, const Shape &output_shape, bool *output_data); + + static const CompareFunction broadcast_fns[] = { + Broadcast4DSlowEqual, Broadcast4DSlowNotEqual, Broadcast4DSlowGreater, + Broadcast4DSlowGreaterEqual, Broadcast4DSlowLess, Broadcast4DSlowLessEqual, + }; + static const CompareFunction non_broadcast_fns[] = { + EqualNoScaling, NotEqualNoScaling, GreaterNoScaling, + GreaterEqualNoScaling, LessNoScaling, LessEqualNoScaling, + }; + + static_assert(sizeof(broadcast_fns) == sizeof(non_broadcast_fns), + "Sizes of broadcast_fns and non_broadcast_fns must match!"); + + auto index = static_cast(op_type); + if (index < 0 || index >= static_cast(sizeof(broadcast_fns) / sizeof(broadcast_fns[0]))) + throw std::runtime_error{"Invalid OpType for CompareLayer"}; + + CompareFunction fn = (requires_broadcast ? broadcast_fns[index] : non_broadcast_fns[index]); + + fn(getExtendedTensorShape(lhs), reinterpret_cast(lhs->buffer()), + getExtendedTensorShape(rhs), reinterpret_cast(rhs->buffer()), + getExtendedTensorShape(output), reinterpret_cast(output->buffer())); } + } // namespace CompareLayer::CompareLayer() diff --git a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc b/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc index c00be64..05da33a 100644 --- a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc +++ b/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc @@ -18,6 +18,8 @@ #include "../Tensor.h" #include +#include +#include namespace onert { @@ -31,7 +33,7 @@ namespace ops FullyConnectedLayer::FullyConnectedLayer() : _input(nullptr), _weights(nullptr), _bias(nullptr), _output(nullptr), _activation(ir::Activation::NONE), _temp_arena(new nnfw::cker::FCTempArena()), - _is_hybrid(false) + _external_context(nullptr), _is_hybrid(false) { // DO NOTHING } @@ -102,7 +104,8 @@ void FullyConnectedLayer::fullyConnectedHybrid() op_params, getTensorShape(_input), reinterpret_cast(_input->buffer()), getTensorShape(_weights), reinterpret_cast(_weights->buffer()), getTensorShape(_bias), reinterpret_cast(_bias ? _bias->buffer() : nullptr), - getTensorShape(_output), reinterpret_cast(_output->buffer()), temp_arena); + getTensorShape(_output), reinterpret_cast(_output->buffer()), temp_arena, + _external_context->ruy_context()); #else nnfw::cker::FullyConnectedHybrid( op_params, getTensorShape(_input), reinterpret_cast(_input->buffer()), @@ -110,31 +113,67 @@ void FullyConnectedLayer::fullyConnectedHybrid() (_cached_weights) ? reinterpret_cast(_cached_weights) : reinterpret_cast(_weights->buffer()), getTensorShape(_bias), reinterpret_cast(_bias ? _bias->buffer() : nullptr), - getTensorShape(_output), reinterpret_cast(_output->buffer()), temp_arena); + getTensorShape(_output), reinterpret_cast(_output->buffer()), temp_arena, + _external_context->ruy_context()); -// TODO Enable calling decrease_ref -#if 0 if (_cached_weights == nullptr || _is_weights_freed) return; - auto weight_tensor = dynamic_cast(_weights); - if (weight_tensor) + // '_cached_weights is not nullptr and _is_weights_freed is false' means + // this weight shape is satisfied with the ruy kernel's prepack cache's condition. + // After entering here, it will not enter again except below the case - input is zero-vector + + // if input's elements are filled with zero, it by-passes(does not enter ruy-kernel path) + // so that handle this case + const int input_size = getTensorShape(_input).FlatSize(); + if (nnfw::cker::IsZeroVector(reinterpret_cast(_input->buffer()), input_size)) + return; + + auto weight_tensor = nnfw::misc::polymorphic_downcast(_weights); + + // This weight tensor could be other ops' const tensor. + // Therefore, below reference should be checked like following + auto tensor = const_cast(weight_tensor); + if (tensor->buffer() == nullptr) // ref is already 0? { - auto tensor = const_cast(weight_tensor); + _is_weights_freed = true; + return; + } - tensor->decrease_ref(); - if (tensor->buffer() == nullptr) // ref == 0? - { - _is_weights_freed = true; - } + tensor->decrease_ref(); + if (tensor->buffer() == nullptr) // ref == 0? + { + _is_weights_freed = true; } -#endif // if 0 #endif } +void FullyConnectedLayer::fullyConnectedSparseWeight() +{ + float output_activation_min = 0, output_activation_max = 0; + CalculateActivationRange(_activation, &output_activation_min, &output_activation_max); + + nnfw::cker::FullyConnectedParams op_params; + op_params.float_activation_min = output_activation_min; + op_params.float_activation_max = output_activation_max; + op_params.activation = convertActivationType(_activation); + + int w0_size = getTensorShape(_weights).Dims(0); + const uint16_t *w1_segments = _weights->w1_segments(); + const uint16_t *w1_indices = _weights->w1_indices(); + + nnfw::cker::FullyConnectedSparseWeight( + op_params, getTensorShape(_input), reinterpret_cast(_input->buffer()), + getTensorShape(_weights), reinterpret_cast(_weights->buffer()), + getTensorShape(_bias), reinterpret_cast(_bias ? _bias->buffer() : nullptr), + getTensorShape(_output), reinterpret_cast(_output->buffer()), w0_size, w1_segments, + w1_indices); +} + void FullyConnectedLayer::configure(const IPortableTensor *input, const IPortableTensor *weights, const IPortableTensor *bias, ir::Activation activation, - IPortableTensor *output) + IPortableTensor *output, + const std::shared_ptr &external_context) { _input = input; _weights = weights; @@ -143,6 +182,7 @@ void FullyConnectedLayer::configure(const IPortableTensor *input, const IPortabl _output = output; _is_hybrid = input->data_type() == OperandType::FLOAT32 && weights->data_type() == OperandType::QUANT_INT8_SYMM; + _external_context = external_context; } void FullyConnectedLayer::run() @@ -151,6 +191,10 @@ void FullyConnectedLayer::run() { fullyConnectedHybrid(); } + else if (_weights->is_sparse()) + { + fullyConnectedSparseWeight(); + } else if (_input->data_type() == OperandType::FLOAT32) { fullyConnectedFloat32(); @@ -167,7 +211,16 @@ void FullyConnectedLayer::run() void FullyConnectedLayer::prepare() { -#ifdef USE_RUY_GEMV + if (_bias && _bias->is_constant()) + { + const int bias_size = getTensorShape(_bias).FlatSize(); + if (nnfw::cker::IsZeroVector(reinterpret_cast(_bias->buffer()), bias_size)) + { + _bias = nullptr; + } + } + +#if (defined(__ARM_NEON__) || defined(__ARM_NEON)) && defined(USE_RUY_GEMV) // TODO This is workaround // The only fc hybrid will use ruy kernel if (_input->data_type() != OperandType::FLOAT32 || diff --git a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.h b/runtime/onert/backend/cpu/ops/FullyConnectedLayer.h index dd5ef24..f124267 100644 --- a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.h +++ b/runtime/onert/backend/cpu/ops/FullyConnectedLayer.h @@ -18,6 +18,7 @@ #define __ONERT_BACKEND_CPU_OPS_FULLYCONNECTEDLAYER_H__ #include +#include "../ExternalContext.h" #include "OperationUtils.h" #include @@ -52,8 +53,11 @@ public: void fullyConnectedHybrid(); + void fullyConnectedSparseWeight(); + void configure(const IPortableTensor *input, const IPortableTensor *weights, - const IPortableTensor *bias, ir::Activation activation, IPortableTensor *output); + const IPortableTensor *bias, ir::Activation activation, IPortableTensor *output, + const std::shared_ptr &external_context); void run() override; @@ -68,10 +72,13 @@ private: ir::Activation _activation; std::unique_ptr _temp_arena; + std::shared_ptr _external_context; + bool _is_hybrid; #ifdef USE_RUY_GEMV uint8_t *_cached_weights = nullptr; // weights to be cached and a key + bool _is_weights_freed = false; // is weights freed? #endif }; diff --git a/runtime/onert/backend/cpu/ops/L2NormLayer.cc b/runtime/onert/backend/cpu/ops/L2NormLayer.cc new file mode 100644 index 0000000..0d99b05 --- /dev/null +++ b/runtime/onert/backend/cpu/ops/L2NormLayer.cc @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "L2NormLayer.h" + +#include "OperationUtils.h" + +#include +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +void L2NormLayer::configure(const IPortableTensor *input, IPortableTensor *output) +{ + assert(input != nullptr); + assert(output != nullptr); + + _input = input; + _output = output; +} + +void L2NormLayer::run() +{ + switch (_input->data_type()) + { + case OperandType::FLOAT32: + nnfw::cker::L2NormalizeFloat32( + getTensorShape(_input), reinterpret_cast(_input->buffer()), + getTensorShape(_output), reinterpret_cast(_output->buffer())); + break; + + case OperandType::QUANT_UINT8_ASYMM: + { + nnfw::cker::L2NormParams params; + assert(_input->data_offset() == 128); + params.input_zero_point = _input->data_offset(); + nnfw::cker::L2NormalizeQuant8( + params, getTensorShape(_input), reinterpret_cast(_input->buffer()), + getTensorShape(_output), reinterpret_cast(_output->buffer())); + } + break; + + default: + throw std::runtime_error{"L2Norm: Unsupported data type"}; + } +} + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/L2NormLayer.h b/runtime/onert/backend/cpu/ops/L2NormLayer.h new file mode 100644 index 0000000..63f2d11 --- /dev/null +++ b/runtime/onert/backend/cpu/ops/L2NormLayer.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in riting, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_CPU_OPS_L2NORM_LAYER_H__ +#define __ONERT_BACKEND_CPU_OPS_L2NORM_LAYER_H__ + +#include + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ +class L2NormLayer : public ::onert::exec::IFunction +{ +public: + L2NormLayer() : _input(nullptr), _output(nullptr) + { + // Nothing + } + +public: + void configure(const IPortableTensor *_input, IPortableTensor *output); + + void run() override; + +private: + const IPortableTensor *_input; + IPortableTensor *_output; +}; + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_CPU_OPS_L2NORM_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.cc b/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.cc index d71e325..06dde4f 100644 --- a/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.cc +++ b/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.cc @@ -49,8 +49,8 @@ void LogSoftMaxLayer::logsoftmaxQuant8() // NYI } -void LogSoftMaxLayer::configure(const Tensor *input, const float beta, const int axis, - Tensor *output) +void LogSoftMaxLayer::configure(const IPortableTensor *input, const float beta, const int axis, + IPortableTensor *output) { _input = input; _output = output; diff --git a/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.h b/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.h index bc145ce..ba9deca 100644 --- a/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.h +++ b/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.h @@ -40,13 +40,14 @@ public: void logsoftmaxQuant8(); - void configure(const Tensor *input, const float beta, const int axis, Tensor *output); + void configure(const IPortableTensor *input, const float beta, const int axis, + IPortableTensor *output); void run(); private: - const Tensor *_input; - Tensor *_output; + const IPortableTensor *_input; + IPortableTensor *_output; float _beta; int _axis; diff --git a/runtime/onert/backend/cpu/ops/OperationUtils.h b/runtime/onert/backend/cpu/ops/OperationUtils.h index 8d29374..9838552 100644 --- a/runtime/onert/backend/cpu/ops/OperationUtils.h +++ b/runtime/onert/backend/cpu/ops/OperationUtils.h @@ -52,6 +52,17 @@ union DataPtr { void *v; }; +union ConstDataPtr { + const uint8_t *u8; + const int8_t *i8; + const uint32_t *u32; + const int32_t *i32; + const bool *b; + const float *f; + const int64_t *i64; + const void *v; +}; + uint32_t getNumberOfDimensions(const IPortableTensor *tensor); uint32_t getNumberOfElements(const IPortableTensor *tensor); diff --git a/runtime/onert/backend/cpu/ops/PadLayer.cc b/runtime/onert/backend/cpu/ops/PadLayer.cc index fcfcf7b..6a2bf9d 100644 --- a/runtime/onert/backend/cpu/ops/PadLayer.cc +++ b/runtime/onert/backend/cpu/ops/PadLayer.cc @@ -33,33 +33,40 @@ PadLayer::PadLayer() // DO NOTHING } -void PadLayer::padFloat32() +template void PadLayer::padImpl(const T *constant_value_data) { - nnfw::cker::Pad(_padData, _padRank, getTensorShape(_input), - reinterpret_cast(_input->buffer()), getTensorShape(_output), - reinterpret_cast(_output->buffer()), _constantValueData.f); + nnfw::cker::Pad(_padData, _padRank, getTensorShape(_input), + reinterpret_cast(_input->buffer()), getTensorShape(_output), + reinterpret_cast(_output->buffer()), constant_value_data); } -void PadLayer::padQuant8() { throw std::runtime_error("Quantized Pad isn't supported NYI"); } void PadLayer::configure(const IPortableTensor *input, IPortableTensor *output, - const int32_t *padData, int32_t padRank, uint8_t *constantValueData) + const int32_t *padData, int32_t padRank, const void *constantValueData) { _input = input; _output = output; memcpy(_padData, padData, sizeof(_padData)); _padRank = padRank; - _constantValueData.u8 = constantValueData; + _constantValueData.v = constantValueData; } void PadLayer::run() { if (_input->data_type() == OperandType::FLOAT32) { - padFloat32(); + padImpl(_constantValueData.f); } else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) { - padQuant8(); + if (_constantValueData.u8 == nullptr) + { + uint8_t pad_value = static_cast(_output->data_offset()); + padImpl(&pad_value); + } + else + { + padImpl(_constantValueData.u8); + } } else { diff --git a/runtime/onert/backend/cpu/ops/PadLayer.h b/runtime/onert/backend/cpu/ops/PadLayer.h index 85bd2e6..efd73d5 100644 --- a/runtime/onert/backend/cpu/ops/PadLayer.h +++ b/runtime/onert/backend/cpu/ops/PadLayer.h @@ -39,12 +39,10 @@ public: PadLayer(); public: - void padFloat32(); - - void padQuant8(); + template void padImpl(const T *constant_value_data); void configure(const IPortableTensor *input, IPortableTensor *output, const int32_t *padData, - int32_t padRank, uint8_t *constantValueData = nullptr); + int32_t padRank, const void *constantValueData = nullptr); void run() override; @@ -54,7 +52,7 @@ private: int32_t _padData[8]; int32_t _padRank; - DataPtr _constantValueData; + ConstDataPtr _constantValueData; }; } // namespace ops diff --git a/runtime/onert/backend/cpu/ops/QuantizeLayer.cc b/runtime/onert/backend/cpu/ops/QuantizeLayer.cc new file mode 100644 index 0000000..45fc148 --- /dev/null +++ b/runtime/onert/backend/cpu/ops/QuantizeLayer.cc @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "QuantizeLayer.h" + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +QuantizeLayer::QuantizeLayer() : _input(nullptr), _output(nullptr) +{ + // DO NOTHING +} + +template void QuantizeLayer::affineQuantize() +{ + nnfw::cker::Quantize(getTensorShape(_input), reinterpret_cast(_input->buffer()), + getTensorShape(_output), reinterpret_cast(_output->buffer()), + _output->data_scale(), _output->data_offset()); +} + +void QuantizeLayer::configure(const IPortableTensor *input, IPortableTensor *output) +{ + _input = input; + _output = output; +} + +void QuantizeLayer::run() +{ + if (_input->data_type() == OperandType::FLOAT32) + { + affineQuantize(); + } + else + { + throw std::runtime_error{"Quantize: unsupported data type"}; + } +} + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/QuantizeLayer.h b/runtime/onert/backend/cpu/ops/QuantizeLayer.h new file mode 100644 index 0000000..b4e7aca --- /dev/null +++ b/runtime/onert/backend/cpu/ops/QuantizeLayer.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_CPU_OPS_QUANTIZELAYER_H__ +#define __ONERT_BACKEND_CPU_OPS_QUANTIZELAYER_H__ + +#include +#include "OperationUtils.h" + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +class QuantizeLayer : public ::onert::exec::IFunction +{ +public: + QuantizeLayer(); + +public: + template void affineQuantize(); + + void configure(const IPortableTensor *input, IPortableTensor *output); + + void run() override; + +private: + const IPortableTensor *_input; + IPortableTensor *_output; +}; + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_CPU_OPS_QUANTIZELAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/ReLU6Layer.cc b/runtime/onert/backend/cpu/ops/ReLU6Layer.cc new file mode 100644 index 0000000..26eb35e --- /dev/null +++ b/runtime/onert/backend/cpu/ops/ReLU6Layer.cc @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ReLU6Layer.h" + +#include "OperationUtils.h" + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +ReLU6Layer::ReLU6Layer() : _input(nullptr), _output(nullptr) +{ + // DO NOTHING +} + +void ReLU6Layer::relu6Float32() +{ + nnfw::cker::ReLU6(getTensorShape(_input), reinterpret_cast(_input->buffer()), + reinterpret_cast(_output->buffer())); +} + +void ReLU6Layer::relu6Quant8() +{ + // cker quant8 relu is not implemented yet + throw std::runtime_error{"NYI"}; +} + +void ReLU6Layer::configure(const IPortableTensor *input, IPortableTensor *output) +{ + _input = input; + _output = output; +} + +void ReLU6Layer::run() +{ + if (_input->data_type() == OperandType::FLOAT32) + { + relu6Float32(); + } + else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) + { + relu6Quant8(); + } + else + { + throw std::runtime_error{"ReLU6: unsupported data type"}; + } +} + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ReLU6Layer.h b/runtime/onert/backend/cpu/ops/ReLU6Layer.h new file mode 100644 index 0000000..994d17a --- /dev/null +++ b/runtime/onert/backend/cpu/ops/ReLU6Layer.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_CPU_OPS_RELU6LAYER_H__ +#define __ONERT_BACKEND_CPU_OPS_RELU6LAYER_H__ + +#include + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +class ReLU6Layer : public ::onert::exec::IFunction +{ +public: + ReLU6Layer(); + +public: + void relu6Float32(); + + void relu6Quant8(); + + void configure(const IPortableTensor *input, IPortableTensor *output); + + void run() override; + +private: + const IPortableTensor *_input; + IPortableTensor *_output; +}; + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_CPU_OPS_RELU6LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/ReduceLayer.cc b/runtime/onert/backend/cpu/ops/ReduceLayer.cc index 1dad031..fe22dbe 100644 --- a/runtime/onert/backend/cpu/ops/ReduceLayer.cc +++ b/runtime/onert/backend/cpu/ops/ReduceLayer.cc @@ -116,6 +116,39 @@ void evalGeneric(const IPortableTensor *input, IPortableTensor *output, throw std::runtime_error{"Reduce(generic): unsupported data type"}; } } + +void evalSumQuantized(const IPortableTensor *input, IPortableTensor *output, + const std::vector &axes, bool keep_dims, + nnfw::cker::Reduce &reduce_kernel) +{ + const bool same_scale = (input->data_scale() == output->data_scale() && + input->data_offset() == output->data_offset()); + + reduce_kernel.prepare(input->num_dimensions(), axes.size()); + + if (!same_scale) + { + std::vector temp_sum(output->getShape().num_elements()); + bool result = reduce_kernel.QuantizedMeanOrSum( + reinterpret_cast(input->buffer()), input->data_offset(), + input->data_scale(), getTensorShape(input), reinterpret_cast(output->buffer()), + output->data_offset(), output->data_scale(), getTensorShape(output), axes, keep_dims, + temp_sum.data(), true, [](const int32_t current, const uint8_t in) -> int32_t { + const int32_t actual_in = static_cast(in); + return current + actual_in; + }); + + if (!result) + { + throw std::runtime_error{"Reduce: Fail to run"}; + } + + return; + } + + evalGeneric(input, output, axes, keep_dims, reduce_kernel); +} + } // namespace ReduceLayer::ReduceLayer() @@ -143,6 +176,11 @@ void ReduceLayer::run() switch (_reduceType) { case ReduceType::kSum: + if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) + { + evalSumQuantized(_input, _output, axes, _keep_dims, *_reduce_kernel); + return; + } evalGeneric(_input, _output, axes, _keep_dims, *_reduce_kernel); break; case ReduceType::kProd: diff --git a/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc b/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc new file mode 100644 index 0000000..180094b --- /dev/null +++ b/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "OperationUtils.h" +#include "ResizeBilinearLayer.h" +#include "cker/operation/ResizeBilinear.h" +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +ResizeBilinearLayer::ResizeBilinearLayer() + : _input(nullptr), _output(nullptr), _output_height(0), _output_width(0), _align_corners(false), + _half_pixel_centers(false) +{ + // DO NOTHING +} + +void ResizeBilinearLayer::configure(const IPortableTensor *input, IPortableTensor *output, + int32_t output_height, int32_t output_width, bool align_corners, + bool half_pixel_centers) +{ + _input = input; + _output = output; + _output_height = output_height; + _output_width = output_width; + _align_corners = align_corners; + _half_pixel_centers = half_pixel_centers; +} + +void ResizeBilinearLayer::run() +{ + nnfw::cker::ResizeBilinearParams params; + params.align_corners = _align_corners; + params.half_pixel_centers = _half_pixel_centers; + params.output_height = _output_height; + params.output_width = _output_width; + + switch (_input->data_type()) + { + case OperandType::FLOAT32: + nnfw::cker::ResizeBilinear( + params, getTensorShape(_input), reinterpret_cast(_input->buffer()), + getTensorShape(_output), reinterpret_cast(_output->buffer())); + break; + + case OperandType::QUANT_UINT8_ASYMM: + nnfw::cker::ResizeBilinear( + params, getTensorShape(_input), reinterpret_cast(_input->buffer()), + getTensorShape(_output), reinterpret_cast(_output->buffer())); + break; + + case OperandType::UINT8: + case OperandType::BOOL8: + case OperandType::FLOAT16: + case OperandType::INT32: + case OperandType::INT64: + case OperandType::QUANT_INT8_SYMM: + std::runtime_error("ResizeBilinear NYI"); + break; + default: + std::runtime_error("ResizeBilinear unsupported data type"); + } +} + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.h b/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.h new file mode 100644 index 0000000..fc49b34 --- /dev/null +++ b/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in riting, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_CPU_OPS_RESIZEBILINEAR_H__ +#define __ONERT_BACKEND_CPU_OPS_RESIZEBILINEAR_H__ + +#include + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +class ResizeBilinearLayer : public ::onert::exec::IFunction +{ +public: + ResizeBilinearLayer(); + +public: + void configure(const IPortableTensor *input1, IPortableTensor *output, int32_t output_height, + int32_t output_width, bool align_corners, bool half_pixel_centers); + + void run() override; + +private: + const IPortableTensor *_input; + IPortableTensor *_output; + int32_t _output_height; + int32_t _output_width; + bool _align_corners; + bool _half_pixel_centers; +}; + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_CPU_OPS_RESIZEBILINEAR_H__ diff --git a/runtime/onert/backend/cpu/ops/SliceLayer.cc b/runtime/onert/backend/cpu/ops/SliceLayer.cc index a9106c1..449c073 100644 --- a/runtime/onert/backend/cpu/ops/SliceLayer.cc +++ b/runtime/onert/backend/cpu/ops/SliceLayer.cc @@ -46,7 +46,7 @@ void SliceLayer::GetBeginAndSizeVectors(int dimensions, const IPortableTensor *b } } -void SliceLayer::sliceFloat32() +template void SliceLayer::sliceImpl() { const int kMaxDim = nnfw::cker::Shape::kMaxSmallSize; @@ -74,14 +74,8 @@ void SliceLayer::sliceFloat32() } nnfw::cker::Slice(op_params, getExtendedTensorShape(_input), - reinterpret_cast(_input->buffer()), - reinterpret_cast(_output->buffer())); -} - -void SliceLayer::sliceQuant8() -{ - // cker quant8 slice is not implemented yet - throw std::runtime_error{"NYI"}; + reinterpret_cast(_input->buffer()), + reinterpret_cast(_output->buffer())); } void SliceLayer::configure(const IPortableTensor *input, const IPortableTensor *begin, @@ -97,11 +91,11 @@ void SliceLayer::run() { if (_input->data_type() == OperandType::FLOAT32) { - sliceFloat32(); + sliceImpl(); } else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) { - sliceQuant8(); + sliceImpl(); } else { diff --git a/runtime/onert/backend/cpu/ops/SliceLayer.h b/runtime/onert/backend/cpu/ops/SliceLayer.h index 9945d7e..650e2c9 100644 --- a/runtime/onert/backend/cpu/ops/SliceLayer.h +++ b/runtime/onert/backend/cpu/ops/SliceLayer.h @@ -42,8 +42,7 @@ public: void run() override; private: - void sliceFloat32(); - void sliceQuant8(); + template void sliceImpl(); template void GetBeginAndSizeVectors(int dimensions, const IPortableTensor *begin, diff --git a/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc b/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc new file mode 100644 index 0000000..a0869ae --- /dev/null +++ b/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "SpaceToDepthLayer.h" + +#include "OperationUtils.h" + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ +SpaceToDepthLayer::SpaceToDepthLayer() : _input(nullptr), _block_size(0), _output(nullptr) +{ + // DO NOTHING +} + +template void SpaceToDepthLayer::spaceToDepth() +{ + + nnfw::cker::SpaceToDepthParams params; + params.block_size = _block_size; + + nnfw::cker::SpaceToDepth(params, getTensorShape(_input), + reinterpret_cast(_input->buffer()), getTensorShape(_output), + reinterpret_cast(_output->buffer())); +} + +void SpaceToDepthLayer::configure(const IPortableTensor *input, const int32_t block_size, + IPortableTensor *output) +{ + _input = input; + _block_size = block_size; + _output = output; +} + +void SpaceToDepthLayer::run() +{ + if (_input->data_type() == OperandType::FLOAT32) + { + spaceToDepth(); + } + else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) + { + spaceToDepth(); + } + else + { + throw std::runtime_error{"SpaceToDepth: unsupported data type"}; + } +} + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.h b/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.h new file mode 100644 index 0000000..c11ef2b --- /dev/null +++ b/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in riting, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_CPU_OPS_SPACE_TO_DEPTH_LAYER_H__ +#define __ONERT_BACKEND_CPU_OPS_SPACE_TO_DEPTH_LAYER_H__ + +#include + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ +class SpaceToDepthLayer : public ::onert::exec::IFunction +{ +public: + SpaceToDepthLayer(); + + void configure(const IPortableTensor *input, const int32_t block_size, IPortableTensor *output); + + void run() override; + +private: + template void spaceToDepth(); + + const IPortableTensor *_input; + int32_t _block_size; + IPortableTensor *_output; +}; + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_CPU_OPS_SPACE_TO_BATCH_ND_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/SplitVLayer.cc b/runtime/onert/backend/cpu/ops/SplitVLayer.cc new file mode 100644 index 0000000..d6ca124 --- /dev/null +++ b/runtime/onert/backend/cpu/ops/SplitVLayer.cc @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "SplitVLayer.h" + +#include "OperationUtils.h" + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +SplitVLayer::SplitVLayer() + : _input(nullptr), _size_splits(nullptr), _split_dim(nullptr), _num_splits(0), _outputs() +{ + // DO NOTHING +} + +template void SplitVLayer::splitV(void) +{ + nnfw::cker::SplitVParams op_params; + op_params.axis = *(reinterpret_cast(_split_dim->buffer())); + op_params.num_split = _num_splits; + + std::vector outputPtrs; + std::vector outshape; + + for (const auto output : _outputs) + { + assert(output->total_size() == sizeOfData(output->data_type(), output->getShape().dims())); + outputPtrs.emplace_back(reinterpret_cast(output->buffer())); + outshape.emplace_back(getTensorShape(output)); + } + + assert(_input->total_size() == sizeOfData(_input->data_type(), _input->getShape().dims())); + nnfw::cker::SplitV(op_params, getTensorShape(_input), reinterpret_cast(_input->buffer()), + outshape, outputPtrs.data()); +} + +void SplitVLayer::configure(const IPortableTensor *input, const IPortableTensor *size_splits, + const IPortableTensor *split_dim, uint16_t num_splits, + std::vector &outputs) +{ + assert(input != nullptr); + + _num_splits = num_splits; + _size_splits = size_splits; + _input = input; + _split_dim = split_dim; + _outputs = outputs; +} + +void SplitVLayer::run() +{ + if (_input->data_type() == OperandType::FLOAT32) + { + splitV(); + } + else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) + { + splitV(); + } + else if (_input->data_type() == OperandType::INT32) + { + splitV(); + } + else if (_input->data_type() == OperandType::INT64) + { + splitV(); + } + else + { + throw std::runtime_error{"SplitV: unsupported input type"}; + } +} + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/SplitVLayer.h b/runtime/onert/backend/cpu/ops/SplitVLayer.h new file mode 100644 index 0000000..98f2f44 --- /dev/null +++ b/runtime/onert/backend/cpu/ops/SplitVLayer.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_CPU_OPS_SPLIT_V_LAYER_H__ +#define __ONERT_BACKEND_CPU_OPS_SPLIT_V_LAYER_H__ + +#include + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +class SplitVLayer : public ::onert::exec::IFunction +{ +public: + SplitVLayer(); + +public: + template void splitV(void); + + void configure(const IPortableTensor *input, const IPortableTensor *size_splits, + const IPortableTensor *size_dim, uint16_t num_splits, + std::vector &outputs); + + void run() override; + +private: + const IPortableTensor *_input; + const IPortableTensor *_size_splits; + const IPortableTensor *_split_dim; + uint16_t _num_splits; + std::vector _outputs; +}; + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_CPU_OPS_SPLIT_V_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc b/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc new file mode 100644 index 0000000..b8dfcb4 --- /dev/null +++ b/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "StatelessRandomUniformLayer.h" + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +StatelessRandomUniformLayer::StatelessRandomUniformLayer() + : _shape(nullptr), _seed(nullptr), _output(nullptr) +{ + // DO NOTHING +} + +void StatelessRandomUniformLayer::configure(const IPortableTensor *shape, + const IPortableTensor *seed, IPortableTensor *output) +{ + _shape = shape; + _seed = seed; + _output = output; +} + +void StatelessRandomUniformLayer::StatelessRandomUniformFloat32() +{ + nnfw::cker::StatelessRandomUniform( + getTensorShape(_shape), reinterpret_cast(_shape->buffer()), + getTensorShape(_seed), reinterpret_cast(_seed->buffer()), + getTensorShape(_output), reinterpret_cast(_output->buffer())); +} + +void StatelessRandomUniformLayer::run() +{ + switch (_output->data_type()) + { + // ToDo : It need to support INT8 and UINT8 also when will be applied quantization. + case OperandType::FLOAT32: + StatelessRandomUniformFloat32(); + break; + default: + throw std::runtime_error{"StatelessRandomUniformLayer: unsupported data type"}; + } +} + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.h b/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.h new file mode 100644 index 0000000..ef11d62 --- /dev/null +++ b/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_CPU_OPS_STATELESS_RANDOM_UNIFORM_H__ +#define __ONERT_BACKEND_CPU_OPS_STATELESS_RANDOM_UNIFORM_H__ + +#include +#include "OperationUtils.h" + +#include + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +class StatelessRandomUniformLayer : public ::onert::exec::IFunction +{ +public: + StatelessRandomUniformLayer(); + +public: + void configure(const IPortableTensor *shape, const IPortableTensor *seed, + IPortableTensor *output); + + void StatelessRandomUniformFloat32(); + + void run() override; + +private: + const IPortableTensor *_shape; + const IPortableTensor *_seed; + + IPortableTensor *_output; +}; + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_CPU_OPS_STATELESS_RANDOM_UNIFORM_H__ diff --git a/runtime/onert/core/include/backend/BackendContext.h b/runtime/onert/core/include/backend/BackendContext.h index c82e5b7..c263aef 100644 --- a/runtime/onert/core/include/backend/BackendContext.h +++ b/runtime/onert/core/include/backend/BackendContext.h @@ -56,6 +56,8 @@ public: { } + virtual ~BackendContext() = default; + void initialize(const std::vector &operation_list, const std::vector &operand_list); void initConsts(); diff --git a/runtime/onert/core/include/backend/IExternalContext.h b/runtime/onert/core/include/backend/IExternalContext.h new file mode 100644 index 0000000..88ffb50 --- /dev/null +++ b/runtime/onert/core/include/backend/IExternalContext.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_IEXTERNAL_CONTEXT_H__ +#define __ONERT_BACKEND_IEXTERNAL_CONTEXT_H__ + +namespace onert +{ +namespace backend +{ + +struct IExternalContext +{ + virtual ~IExternalContext() = default; + virtual void setMaxNumThreads(int) = 0; +}; + +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_IEXTERNAL_CONTEXT__ diff --git a/runtime/onert/core/include/backend/IPortableTensor.h b/runtime/onert/core/include/backend/IPortableTensor.h index 2b2d008..a05b39a 100644 --- a/runtime/onert/core/include/backend/IPortableTensor.h +++ b/runtime/onert/core/include/backend/IPortableTensor.h @@ -37,6 +37,9 @@ class IPortableTensor : public ITensor { public: virtual ~IPortableTensor() = default; + virtual bool is_sparse() const { return false; } + virtual const uint16_t *w1_segments() const { return nullptr; } + virtual const uint16_t *w1_indices() const { return nullptr; } public: bool has_padding() const final { return false; } diff --git a/runtime/onert/core/include/backend/ITensor.h b/runtime/onert/core/include/backend/ITensor.h index 217d9de..12b1c54 100644 --- a/runtime/onert/core/include/backend/ITensor.h +++ b/runtime/onert/core/include/backend/ITensor.h @@ -32,6 +32,8 @@ namespace onert namespace backend { +struct IDynamicTensorManager; + class ITensor { public: @@ -51,6 +53,15 @@ public: virtual void access(const std::function &fn) = 0; /** + * @brief Return the dynamic tensor manager + * + * If dynamic tensors are not supported, it returns @c nullptr . + * + * @return IDynamicTensorManager* DynamicTensorManager + */ + virtual IDynamicTensorManager *dynamic_tensor_manager() { return nullptr; } + + /** * @brief Return true if the tensor is constant */ virtual bool is_constant() const diff --git a/runtime/onert/core/include/backend/ITensorBuilder.h b/runtime/onert/core/include/backend/ITensorBuilder.h index a49525b..b760cda 100644 --- a/runtime/onert/core/include/backend/ITensorBuilder.h +++ b/runtime/onert/core/include/backend/ITensorBuilder.h @@ -112,12 +112,12 @@ public: // methods for static tensor allocation virtual std::shared_ptr tensorAt(const ir::OperandIndex &ind) = 0; /** - * @brief Set the External Tensor object + * @brief Set the migrant tensor object * * @return true if succeeded * @return false if failed or unsupported */ - virtual bool setExternalTensor(const ir::OperandIndex &, const std::shared_ptr &) + virtual bool setMigrantTensor(const ir::OperandIndex &, const std::shared_ptr &) { return false; } diff --git a/runtime/onert/core/include/backend/ITensorRegistry.h b/runtime/onert/core/include/backend/ITensorRegistry.h index f5a95f4..8555131 100644 --- a/runtime/onert/core/include/backend/ITensorRegistry.h +++ b/runtime/onert/core/include/backend/ITensorRegistry.h @@ -35,17 +35,22 @@ struct ITensorRegistry virtual ~ITensorRegistry() = default; /** - * @brief Returns pointer of ITensor among managed and external tensors + * @brief Returns pointer of ITensor among native and migrant tensors + * + * Native Tensor is a tensor that is managed by this backend + * Migrant Tensor is a tensor that is imported from another backend + * * @note Return tensor cannot be used longer than dynamic tensor manager */ virtual std::shared_ptr getITensor(const ir::OperandIndex &) = 0; /** - * @brief Returns pointer of ITensor among managed tensors + * @brief Returns pointer of ITensor among native tensors * - * Unlike @c getITensor , this function only searches from managed tensors - * @note Return tensor cannot be used longer than dynamic tensor manager + * Unlike @c getITensor , this function only searches from native tensors + * + * @note Returned tensor cannot be used longer than dynamic tensor manager */ - virtual std::shared_ptr getManagedITensor(const ir::OperandIndex &) = 0; + virtual std::shared_ptr getNativeITensor(const ir::OperandIndex &) = 0; }; } // namespace backend @@ -73,68 +78,67 @@ public: std::shared_ptr getITensor(const ir::OperandIndex &ind) override { static_assert(std::is_base_of::value, "T_Tensor must derive from ITensor."); - auto external_tensor = _external.find(ind); - if (external_tensor != _external.end()) + auto external_tensor = _migrant.find(ind); + if (external_tensor != _migrant.end()) return external_tensor->second; - return getManagedTensor(ind); + return getNativeTensor(ind); } - std::shared_ptr getManagedITensor(const ir::OperandIndex &ind) override + std::shared_ptr getNativeITensor(const ir::OperandIndex &ind) override { - return getManagedTensor(ind); + return getNativeTensor(ind); } std::shared_ptr getPortableTensor(const ir::OperandIndex &ind) { - auto external_tensor = _external.find(ind); - if (external_tensor != _external.end()) + auto external_tensor = _migrant.find(ind); + if (external_tensor != _migrant.end()) { if (external_tensor->second) return external_tensor->second; } - return getManagedTensor(ind); + return getNativeTensor(ind); } - std::shared_ptr getManagedTensor(const ir::OperandIndex &ind) + std::shared_ptr getNativeTensor(const ir::OperandIndex &ind) { - auto tensor = _managed.find(ind); - if (tensor != _managed.end()) + auto tensor = _native.find(ind); + if (tensor != _native.end()) return tensor->second; return nullptr; } - bool setExternalTensor(const ir::OperandIndex &ind, - const std::shared_ptr &tensor) + bool setMigrantTensor(const ir::OperandIndex &ind, const std::shared_ptr &tensor) { // TODO Uncomment this as two tensors for an index is not allowed. // But now it is temporarily allowed as a workaround. External one hides Managed one. - // auto itr = _managed.find(ind); - // if (itr != _managed.end() && itr->second != nullptr && tensor != nullptr) + // auto itr = _native.find(ind); + // if (itr != _native.end() && itr->second != nullptr && tensor != nullptr) // throw std::runtime_error{ - // "Tried to set an external tensor but an managed tensor already exists."}; - _external[ind] = tensor; + // "Tried to set an migrant tensor but an native tensor already exists."}; + _migrant[ind] = tensor; return true; } - void setManagedTensor(const ir::OperandIndex &ind, const std::shared_ptr &tensor) + void setNativeTensor(const ir::OperandIndex &ind, const std::shared_ptr &tensor) { - auto itr = _external.find(ind); - if (itr != _external.end() && itr->second != nullptr && tensor != nullptr) + auto itr = _migrant.find(ind); + if (itr != _migrant.end() && itr->second != nullptr && tensor != nullptr) throw std::runtime_error{ - "Tried to set a managed tensor but an external tensor already exists."}; - _managed[ind] = tensor; + "Tried to set a native tensor but an migrant tensor already exists."}; + _native[ind] = tensor; } - const ir::OperandIndexMap> &managed_tensors() { return _managed; } + const ir::OperandIndexMap> &native_tensors() { return _native; } - const ir::OperandIndexMap> &external_tensors() + const ir::OperandIndexMap> &migrant_tensors() { - return _external; + return _migrant; } private: - ir::OperandIndexMap> _external; - ir::OperandIndexMap> _managed; + ir::OperandIndexMap> _migrant; + ir::OperandIndexMap> _native; }; } // namespace backend diff --git a/runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h b/runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h index 6ddacc7..a7e034a 100644 --- a/runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h +++ b/runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h @@ -19,7 +19,7 @@ #include "MemoryManager.h" -#include "backend/ITensorManager.h" +#include "backend/IStaticTensorManager.h" #include "ir/OperandIndexMap.h" #include "ir/OperandInfo.h" #include "TensorRegistry.h" @@ -31,7 +31,7 @@ namespace backend namespace cpu_common { -class StaticTensorManager : public backend::ITensorManager +class StaticTensorManager : public backend::IStaticTensorManager { public: StaticTensorManager(const std::shared_ptr ®); diff --git a/runtime/onert/core/include/backend/cpu_common/Tensor.h b/runtime/onert/core/include/backend/cpu_common/Tensor.h index a0db96d..974501e 100644 --- a/runtime/onert/core/include/backend/cpu_common/Tensor.h +++ b/runtime/onert/core/include/backend/cpu_common/Tensor.h @@ -35,27 +35,42 @@ public: Tensor() = delete; public: - Tensor(const ir::OperandInfo &info, const ir::Layout layout) - : _info(info), _layout(layout), _buffer(nullptr), _num_references(0), _allocator(nullptr) + Tensor(const ir::OperandInfo &info, const ir::Layout layout, + IDynamicTensorManager *dynamic_tensor_manager) + : _info(info), _layout(layout), _buffer(nullptr), _num_references(0), + _dynamic_tensor_manager(dynamic_tensor_manager), _allocator(nullptr) { // DO NOTHING } public: // Only one of two method 'setBuffer' must be called once + + /** + * @brief Set the Buffer object. This method is called for static and non-const tensor + */ void setBuffer(uint8_t *buffer) { - assert(_buffer == nullptr && _allocator == nullptr); + assert(_buffer == nullptr); _buffer = buffer; } + + /** + * @brief Set the Buffer object. This method is called for dynamic or const tensor + */ void setBuffer(const std::shared_ptr &alloc) { - assert(_buffer == nullptr && _allocator == nullptr); + assert(_buffer == nullptr); _allocator = alloc; + _buffer = alloc->base(); } // This works just as setBuffer but it simply overwrite existing Allocator without nullptr check - void overwriteBuffer(const std::shared_ptr &alloc) { _allocator = alloc; } + void overwriteBuffer(const std::shared_ptr &alloc) + { + _allocator = alloc; + _buffer = alloc->base(); + } /** * @brief Mark this tensor does not have memory. @@ -68,13 +83,7 @@ public: } public: - uint8_t *buffer() const override - { - if (_allocator != nullptr) - return _allocator->base(); - else - return _buffer; - } + uint8_t *buffer() const override { return _buffer; } /** * @brief Get dimension by index * @@ -96,12 +105,16 @@ public: bool is_constant() const override { return _info.isConstant(); } bool is_dynamic() const override { return _info.isDynamic(); } void set_dynamic() override { _info.setDynamic(); } + IDynamicTensorManager *dynamic_tensor_manager() override { return _dynamic_tensor_manager; } + bool is_sparse() const override { return _info.typeInfo().sparse(); } + virtual const uint16_t *w1_segments() const override { return _info.typeInfo().w1_segments(); } + virtual const uint16_t *w1_indices() const override { return _info.typeInfo().w1_indices(); } virtual void increase_ref() { assert(is_dynamic() || // when not dynamic - (_buffer != nullptr || _allocator != nullptr)); + (_buffer != nullptr)); ++_num_references; } @@ -110,12 +123,12 @@ public: assert(_buffer != nullptr || _allocator != nullptr); assert(_num_references > 0); --_num_references; - // Only constant tensor has allocator pointer + // constant tensor and dynamic tensor has _allocator if (_num_references == 0) { if (_buffer != nullptr) _buffer = nullptr; - else + if (_allocator != nullptr) { _allocator->release(); _allocator = nullptr; @@ -130,8 +143,15 @@ protected: ir::Layout _layout; uint8_t *_buffer; int32_t _num_references; + IDynamicTensorManager *_dynamic_tensor_manager; private: + /** + * @brief Memory allocator for dynamic tensor and const tensor + * Since maintaing _allocator and also _buffer makes confusion, + * we will mainly use _buffer (not _allocator.base()) for memory pointer in this code. + * _allocator(shared_ptr) is used to guarantee that we have valid _buffer. + */ std::shared_ptr _allocator; }; diff --git a/runtime/onert/core/include/compiler/StaticShapeInference.h b/runtime/onert/core/include/compiler/StaticShapeInference.h index 379143b..bff68c9 100644 --- a/runtime/onert/core/include/compiler/StaticShapeInference.h +++ b/runtime/onert/core/include/compiler/StaticShapeInference.h @@ -99,6 +99,7 @@ private: void visit(const ir::operation::LogicalNot &op) override; void visit(const ir::operation::LogicalOr &op) override; void visit(const ir::operation::Logistic &op) override; + void visit(const ir::operation::L2Normalization &op) override; void visit(const ir::operation::MatrixBandPart &op) override; void visit(const ir::operation::Max &op) override; void visit(const ir::operation::Min &op) override; @@ -114,6 +115,7 @@ private: void visit(const ir::operation::Reshape &op) override; void visit(const ir::operation::Round &op) override; void visit(const ir::operation::RSQRT &op) override; + void visit(const ir::operation::ResizeBilinear &op) override; void visit(const ir::operation::Reverse &op) override; void visit(const ir::operation::Select &op) override; void visit(const ir::operation::Shape &op) override; diff --git a/runtime/onert/core/include/exec/DynamicShapeInference.h b/runtime/onert/core/include/exec/DynamicShapeInference.h index 113c348..bca80db 100644 --- a/runtime/onert/core/include/exec/DynamicShapeInference.h +++ b/runtime/onert/core/include/exec/DynamicShapeInference.h @@ -72,6 +72,7 @@ public: void visit(const ir::operation::LogicalNot &op) override; void visit(const ir::operation::LogicalOr &op) override; void visit(const ir::operation::Logistic &op) override; + void visit(const ir::operation::L2Normalization &op) override; void visit(const ir::operation::MatrixBandPart &op) override; void visit(const ir::operation::Max &op) override; void visit(const ir::operation::Min &op) override; @@ -88,6 +89,7 @@ public: void visit(const ir::operation::Reshape &op) override; void visit(const ir::operation::Round &op) override; void visit(const ir::operation::RSQRT &op) override; + void visit(const ir::operation::ResizeBilinear &op) override; void visit(const ir::operation::Reverse &op) override; void visit(const ir::operation::Select &op) override; void visit(const ir::operation::Shape &op) override; @@ -127,6 +129,7 @@ private: /** * @brief To allocate memory for output tensor if needed */ + // TODO Remove this, as it is no longer used backend::IDynamicTensorManager *_dynamic_tensor_manager; /** * @brief To get tensor object and access tensor-level info, e.g., ITensor::buffer() diff --git a/runtime/onert/core/include/ir/Operand.h b/runtime/onert/core/include/ir/Operand.h index 53371d6..1b3a43b 100644 --- a/runtime/onert/core/include/ir/Operand.h +++ b/runtime/onert/core/include/ir/Operand.h @@ -49,11 +49,11 @@ public: size_t operandSize(void) const; const OperationIndexSet &getUses() const { return _uses; } - const OperationIndexSet &getDef() const { return _def; } + OperationIndex getDef() const { return _def; } void insertUse(const OperationIndex &idx); void removeUse(const OperationIndex &idx); - void insertDef(const OperationIndex &idx); - void removeDef(const OperationIndex &idx); + void setDef(const OperationIndex &idx); + void unsetDef(); public: void type(const DataType type) { _info.type(type); }; @@ -107,7 +107,7 @@ private: std::shared_ptr _data; OperationIndexSet _uses; - OperationIndexSet _def; // size is 0 (constant) or 1 (from def operation) + OperationIndex _def; }; } // namespace ir diff --git a/runtime/onert/core/include/ir/Operations.Include.h b/runtime/onert/core/include/ir/Operations.Include.h index 5fac54e..30c4ff2 100644 --- a/runtime/onert/core/include/ir/Operations.Include.h +++ b/runtime/onert/core/include/ir/Operations.Include.h @@ -79,6 +79,7 @@ #include "ir/operation/Pack.h" #include "ir/operation/Select.h" #include "ir/operation/Split.h" +#include "ir/operation/SplitV.h" #include "ir/operation/Unpack.h" #include "ir/operation/Pad.h" #include "ir/operation/Min.h" @@ -103,3 +104,5 @@ #include "ir/operation/BatchMatMul.h" #include "ir/operation/FusedBatchNorm.h" #include "ir/operation/LogSoftmax.h" +#include "ir/operation/Quantize.h" +#include "ir/operation/StatelessRandomUniform.h" diff --git a/runtime/onert/core/include/ir/Operations.lst b/runtime/onert/core/include/ir/Operations.lst index 9d0642f..75c6d82 100644 --- a/runtime/onert/core/include/ir/Operations.lst +++ b/runtime/onert/core/include/ir/Operations.lst @@ -81,6 +81,7 @@ OP(DepthToSpace) OP(Pack) OP(Select) OP(Split) +OP(SplitV) OP(Unpack) OP(Pad) OP(Custom) @@ -106,3 +107,5 @@ OP(MatrixBandPart) OP(BatchMatMul) OP(FusedBatchNorm) OP(LogSoftmax) +OP(Quantize) +OP(StatelessRandomUniform) diff --git a/runtime/onert/core/include/ir/TypeInfo.h b/runtime/onert/core/include/ir/TypeInfo.h index 07d82b6..3f7eab4 100644 --- a/runtime/onert/core/include/ir/TypeInfo.h +++ b/runtime/onert/core/include/ir/TypeInfo.h @@ -18,6 +18,7 @@ #define __ONERT_IR_TYPEINFO_H__ #include +#include #include "ir/DataType.h" @@ -32,7 +33,7 @@ public: TypeInfo() = delete; explicit TypeInfo(DataType type, float scale = 0, int32_t offset = 0) - : _type(type), _scale(scale), _offset(offset) + : _type(type), _scale(scale), _offset(offset), _sparse(false) { } @@ -40,14 +41,28 @@ public: DataType type() const { return _type; } float scale() const { return _scale; } int32_t offset() const { return _offset; } + bool sparse() const { return _sparse; } + const uint16_t *w1_segments() const { return _w1_segments.data(); } + const uint16_t *w1_indices() const { return _w1_indices.data(); } public: void type(const DataType type) { _type = type; } + void sparse2DMetadata(std::vector &&w1_segments, std::vector &&w1_indices) + { + _sparse = true; + _w1_segments = w1_segments; + _w1_indices = w1_indices; + } private: DataType _type; + // for quantization float _scale; int32_t _offset; + // for sparsity + bool _sparse; + std::vector _w1_segments; + std::vector _w1_indices; }; bool operator==(const TypeInfo &lhs, const TypeInfo &rhs); diff --git a/runtime/onert/core/include/ir/operation/BatchToSpaceND.h b/runtime/onert/core/include/ir/operation/BatchToSpaceND.h index bb6be57..3e69b42 100644 --- a/runtime/onert/core/include/ir/operation/BatchToSpaceND.h +++ b/runtime/onert/core/include/ir/operation/BatchToSpaceND.h @@ -32,7 +32,8 @@ public: enum Input { INPUT = 0, - BLOCK_SIZE = 1 + BLOCK_SIZE = 1, + CROPS_DATA = 2 }; public: diff --git a/runtime/onert/core/include/ir/operation/LogSoftmax.h b/runtime/onert/core/include/ir/operation/LogSoftmax.h index 26a92d7..391b4ba 100644 --- a/runtime/onert/core/include/ir/operation/LogSoftmax.h +++ b/runtime/onert/core/include/ir/operation/LogSoftmax.h @@ -48,7 +48,7 @@ public: public: void accept(OperationVisitor &v) const override; - OpCode opcode() const final { return OpCode::Softmax; } + OpCode opcode() const final { return OpCode::LogSoftmax; } public: const Param ¶m() const { return _param; } diff --git a/runtime/onert/core/include/ir/operation/Pad.h b/runtime/onert/core/include/ir/operation/Pad.h index a486061..00481cd 100644 --- a/runtime/onert/core/include/ir/operation/Pad.h +++ b/runtime/onert/core/include/ir/operation/Pad.h @@ -33,7 +33,7 @@ public: { INPUT = 0, PAD = 1, - // VALUE = 2 Not allow padding value operand yet + VALUE = 2 }; public: diff --git a/runtime/onert/core/include/ir/operation/Quantize.h b/runtime/onert/core/include/ir/operation/Quantize.h new file mode 100644 index 0000000..2533ce4 --- /dev/null +++ b/runtime/onert/core/include/ir/operation/Quantize.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_IR_OPERATION_QUANTIZE_H__ +#define __ONERT_IR_OPERATION_QUANTIZE_H__ + +#include "ir/Operation.h" + +namespace onert +{ +namespace ir +{ +namespace operation +{ + +class Quantize : public Operation +{ +public: + enum Input + { + INPUT = 0, + }; + +public: + Quantize(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs); + +public: + void accept(OperationVisitor &v) const override; + OpCode opcode() const final { return OpCode::Quantize; } +}; + +} // namespace operation +} // namespace ir +} // namespace onert + +#endif // __ONERT_IR_OPERATION_QUANTIZE_H__ diff --git a/runtime/onert/core/include/ir/operation/ResizeBilinear.h b/runtime/onert/core/include/ir/operation/ResizeBilinear.h index 2887ed8..29aa496 100644 --- a/runtime/onert/core/include/ir/operation/ResizeBilinear.h +++ b/runtime/onert/core/include/ir/operation/ResizeBilinear.h @@ -33,13 +33,15 @@ class ResizeBilinear : public Operation public: enum Input { - INPUT = 0 + INPUT = 0, }; struct Param { int32_t height_out; int32_t width_out; + bool align_corners; + bool half_pixel_centers; }; public: diff --git a/runtime/onert/core/include/ir/operation/SplitV.h b/runtime/onert/core/include/ir/operation/SplitV.h new file mode 100644 index 0000000..99a06ee --- /dev/null +++ b/runtime/onert/core/include/ir/operation/SplitV.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __ONERT_IR_OPERATION_SPLIT_V_H__ +#define __ONERT_IR_OPERATION_SPLIT_V_H__ + +#include "ir/Operation.h" + +namespace onert +{ +namespace ir +{ +namespace operation +{ +class SplitV : public Operation +{ +public: + enum Input + { + INPUT = 0, + SIZE_SPLITS = 1, + SPLIT_DIM = 2 + }; + + struct Param + { + int num_splits; + }; + +public: + SplitV(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs, + const Param ¶m); + +public: + void accept(OperationVisitor &v) const override; + OpCode opcode() const final { return OpCode::SplitV; } + +public: + const Param ¶m() const { return _param; } + +private: + Param _param; +}; +} // namespace operation +} // namespace ir +} // namespace onert +#endif // __ONERT_IR_OPERATION_SPLIT_V_H__ diff --git a/runtime/onert/core/include/ir/operation/StatelessRandomUniform.h b/runtime/onert/core/include/ir/operation/StatelessRandomUniform.h new file mode 100644 index 0000000..112a748 --- /dev/null +++ b/runtime/onert/core/include/ir/operation/StatelessRandomUniform.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_IR_OPERATION_STATELESS_RANDOM_UNIFORM_H__ +#define __ONERT_IR_OPERATION_STATELESS_RANDOM_UNIFORM_H__ + +#include + +#include "ir/Operation.h" + +namespace onert +{ +namespace ir +{ +namespace operation +{ + +class StatelessRandomUniform : public Operation +{ +public: + enum Input + { + SHAPE = 0, + SEED = 1 + }; + +public: + StatelessRandomUniform(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs); + +public: + void accept(OperationVisitor &v) const override; + OpCode opcode() const final { return OpCode::StatelessRandomUniform; } +}; + +} // namespace operation +} // namespace ir +} // namespace onert + +#endif // __ONERT_IR_OPERATION_STATELESS_RANDOM_UNIFORM_H__ diff --git a/runtime/onert/core/include/util/ShapeInference.h b/runtime/onert/core/include/util/ShapeInference.h index 0d45251..a68c22b 100644 --- a/runtime/onert/core/include/util/ShapeInference.h +++ b/runtime/onert/core/include/util/ShapeInference.h @@ -95,6 +95,9 @@ template ir::Shape inferRangeShape(float *start_val, float *limit_val, template ir::Shape inferRangeShape(T start_val, T limit_val, T delta_val); +ir::Shape inferResizeBilinearShape(const ir::Shape &in_shape, const int32_t output_height, + const int32_t output_width); + ir::Shape inferSelectShape(const ir::Shape &input_cond_shape, const ir::Shape &input_true_shape, const ir::Shape &input_false_shape); diff --git a/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.cc b/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.cc index 32a8041..e538f3f 100644 --- a/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.cc +++ b/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.cc @@ -36,7 +36,7 @@ DynamicTensorManager::DynamicTensorManager(const std::shared_ptrgetManagedTensor(ind); + auto user_tensor = _user_tensors->getNativeTensor(ind); if (user_tensor) { // User tensors cannot be reallocated. @@ -45,10 +45,11 @@ void DynamicTensorManager::applyShape(const ir::OperandIndex &ind, const ir::Sha if (buffer_size < new_size) throw std::runtime_error{"ExecutorBase: output buffer size is less than output tensor size"}; user_tensor->setShape(new_shape); + return; } - // NOTE Then handle managed tensors - auto tensor = _tensors->getManagedTensor(ind); + // NOTE Then handle native tensors + auto tensor = _tensors->getNativeTensor(ind); assert(tensor); bool previously_dynamic = tensor->is_dynamic(); @@ -101,9 +102,9 @@ void DynamicTensorManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, ir::Layout backend_layout) { - assert(_tensors->getManagedTensor(ind) == nullptr); - auto tensor = std::make_shared(tensor_info, backend_layout); - _tensors->setManagedTensor(ind, tensor); + assert(_tensors->getNativeTensor(ind) == nullptr); + auto tensor = std::make_shared(tensor_info, backend_layout, this); + _tensors->setNativeTensor(ind, tensor); } void DynamicTensorManager::planDealloc(ir::OperationIndex op_ind, ir::OperandIndex operand_ind) @@ -130,7 +131,7 @@ void DynamicTensorManager::deallocInput(ir::OperationIndex op_ind) auto &input_set = find->second; for (auto input_ind : input_set) { - if (!_tensors->getManagedTensor(input_ind)->is_dynamic()) + if (!_tensors->getNativeTensor(input_ind)->is_dynamic()) continue; _dynamic_mem_mgr->deallocate(input_ind); @@ -141,7 +142,7 @@ void DynamicTensorManager::deallocInput(ir::OperationIndex op_ind) void DynamicTensorManager::deallocSubgraphOutput(ir::OperandIndex output_ind) { - if (!_tensors->getManagedTensor(output_ind)->is_dynamic()) + if (!_tensors->getNativeTensor(output_ind)->is_dynamic()) return; _dynamic_mem_mgr->deallocate(output_ind); diff --git a/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.h b/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.h index 300c342..446427d 100644 --- a/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.h +++ b/runtime/onert/core/src/backend/controlflow/DynamicTensorManager.h @@ -61,6 +61,7 @@ private: * @todo DynamicMemoryManager is not optimized. Optimized one is needed */ std::shared_ptr _dynamic_mem_mgr; + // TODO Refactoring : Merge two TensorRegistries into one const std::shared_ptr _tensors; const std::shared_ptr _user_tensors; diff --git a/runtime/onert/core/src/backend/controlflow/KernelGenerator.cc b/runtime/onert/core/src/backend/controlflow/KernelGenerator.cc index 4b683fb..eb83b7d 100644 --- a/runtime/onert/core/src/backend/controlflow/KernelGenerator.cc +++ b/runtime/onert/core/src/backend/controlflow/KernelGenerator.cc @@ -81,23 +81,23 @@ void KernelGenerator::visit(const ir::operation::If &node) std::vector> input_tensors; for (const auto input_index : node.getInputs()) { - auto input_alloc = getTensor(input_index); + auto input_tensor = getTensor(input_index); - input_tensors.emplace_back(input_alloc); + input_tensors.emplace_back(input_tensor); } std::vector> output_tensors; exec::DynAllocInfoMap outputs_dyn_alloc_info; for (const auto output_index : node.getOutputs()) { - auto output_alloc = getTensor(output_index); + auto output_tensor = getTensor(output_index); - output_tensors.emplace_back(output_alloc); + output_tensors.emplace_back(output_tensor); const auto output_tensor_builder = getTensorBuilder(output_index); if (output_tensor_builder->supportDynamicTensor()) { auto output_dyn_manager = output_tensor_builder->dynamicTensorManager(); - outputs_dyn_alloc_info[output_alloc] = exec::DynAllocInfo{output_index, output_dyn_manager}; + outputs_dyn_alloc_info[output_tensor] = exec::DynAllocInfo{output_index, output_dyn_manager}; } } @@ -146,24 +146,24 @@ void KernelGenerator::visit(const ir::operation::While &node) std::vector> input_tensors; for (const auto input_index : node.getInputs()) { - auto input_alloc = getTensor(input_index); + auto input_tensor = getTensor(input_index); - input_tensors.emplace_back(input_alloc); + input_tensors.emplace_back(input_tensor); } std::vector> output_tensors; std::unordered_map, exec::DynAllocInfo> outputs_dyn_alloc_info; for (const auto output_index : node.getOutputs()) { - auto output_alloc = getTensor(output_index); + auto output_tensor = getTensor(output_index); - output_tensors.emplace_back(output_alloc); + output_tensors.emplace_back(output_tensor); const auto output_tensor_builder = getTensorBuilder(output_index); if (output_tensor_builder->supportDynamicTensor()) { auto output_dyn_manager = output_tensor_builder->dynamicTensorManager(); - outputs_dyn_alloc_info[output_alloc] = exec::DynAllocInfo{output_index, output_dyn_manager}; + outputs_dyn_alloc_info[output_tensor] = exec::DynAllocInfo{output_index, output_dyn_manager}; } } @@ -199,7 +199,7 @@ KernelGenerator::getTensorBuilder(const ir::OperandIndex &index) for (auto tensor_builder : _tensor_builder_set) { auto reg = tensor_builder->tensorRegistry(); - auto tensor = reg ? reg->getManagedITensor(index) : tensor_builder->tensorAt(index); + auto tensor = reg ? reg->getNativeITensor(index) : tensor_builder->tensorAt(index); if (tensor) { ret = tensor_builder; diff --git a/runtime/onert/core/src/backend/controlflow/TensorBuilder.cc b/runtime/onert/core/src/backend/controlflow/TensorBuilder.cc index 16cd3ec..5bddb91 100644 --- a/runtime/onert/core/src/backend/controlflow/TensorBuilder.cc +++ b/runtime/onert/core/src/backend/controlflow/TensorBuilder.cc @@ -92,7 +92,7 @@ void TensorBuilder::allocate() std::shared_ptr TensorBuilder::tensorAt(const ir::OperandIndex &ind) { // NOTE Find from User Tensor Registry first - // FIXME There may be both user tensor and managed tensor for a `ind` which is a waste + // FIXME There may be both user tensor and native tensor for a `ind` which is a waste auto user_tensor = _user_tensor_reg->getITensor(ind); auto tensor = _tensor_reg->getITensor(ind); if (user_tensor) @@ -107,7 +107,7 @@ void TensorBuilder::iterate(const IterateFunction &fn) { _static_tensor_mgr->ite std::shared_ptr TensorBuilder::at(const ir::OperandIndex &ind) { - return _tensor_reg->getManagedTensor(ind); + return _tensor_reg->getNativeTensor(ind); } std::unique_ptr TensorBuilder::releaseStaticTensorManager(void) @@ -123,7 +123,7 @@ std::unique_ptr TensorBuilder::releaseDynamicTensorManager(void) void TensorBuilder::setUserTensor(const ir::OperandIndex &ind, const std::shared_ptr &tensor) { - _user_tensor_reg->setManagedTensor(ind, tensor); + _user_tensor_reg->setNativeTensor(ind, tensor); } } // namespace controlflow diff --git a/runtime/onert/core/src/backend/controlflow/UserTensor.h b/runtime/onert/core/src/backend/controlflow/UserTensor.h index ce94ea0..9be3359 100644 --- a/runtime/onert/core/src/backend/controlflow/UserTensor.h +++ b/runtime/onert/core/src/backend/controlflow/UserTensor.h @@ -38,12 +38,16 @@ namespace controlflow class UserTensor : public IPortableTensor { public: - UserTensor(const ir::OperandInfo &info, ir::Layout layout, uint8_t *buffer, size_t size) - : _info{info}, _layout{layout}, _buffer{buffer}, _size{size}, _dynamic{false} + UserTensor(const ir::OperandInfo &info, ir::Layout layout, uint8_t *buffer, size_t size, + IDynamicTensorManager *dynamic_tensor_manager) + : _info{info}, _layout{layout}, _buffer{buffer}, _size{size}, _dynamic{false}, + _dynamic_tensor_manager{dynamic_tensor_manager} { } - UserTensor(const ir::OperandInfo &info, ir::Layout layout) : UserTensor{info, layout, nullptr, 0} + UserTensor(const ir::OperandInfo &info, ir::Layout layout, + IDynamicTensorManager *dynamic_tensor_manager) + : UserTensor{info, layout, nullptr, 0, dynamic_tensor_manager} { } @@ -68,6 +72,8 @@ public: void set_dynamic() override { _dynamic = true; } ir::Shape getShape() const override { return _info.shape(); } void setShape(const ir::Shape &new_shape) override { _info.shape(new_shape); } + bool is_constant() const override { return false; } + IDynamicTensorManager *dynamic_tensor_manager() override { return _dynamic_tensor_manager; } private: ir::OperandInfo _info; @@ -75,6 +81,7 @@ private: uint8_t *_buffer; size_t _size; bool _dynamic; + IDynamicTensorManager *_dynamic_tensor_manager; }; } // namespace controlflow diff --git a/runtime/onert/core/src/backend/cpu_common/DynamicTensorManager.cc b/runtime/onert/core/src/backend/cpu_common/DynamicTensorManager.cc index 0ccf700..cb27d75 100644 --- a/runtime/onert/core/src/backend/cpu_common/DynamicTensorManager.cc +++ b/runtime/onert/core/src/backend/cpu_common/DynamicTensorManager.cc @@ -35,7 +35,7 @@ void DynamicTensorManager::applyShape(const ir::OperandIndex &ind, const ir::Sha { VERBOSE_F() << ind << std::endl; - auto tensor = _tensors->getManagedTensor(ind); + auto tensor = _tensors->getNativeTensor(ind); assert(tensor); bool previously_dynamic = tensor->is_dynamic(); @@ -88,9 +88,9 @@ void DynamicTensorManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, ir::Layout backend_layout) { - assert(_tensors->getManagedTensor(ind) == nullptr); - auto tensor = std::make_shared(tensor_info, backend_layout); - _tensors->setManagedTensor(ind, tensor); + assert(_tensors->getNativeTensor(ind) == nullptr); + auto tensor = std::make_shared(tensor_info, backend_layout, this); + _tensors->setNativeTensor(ind, tensor); } void DynamicTensorManager::planDealloc(ir::OperationIndex op_ind, ir::OperandIndex operand_ind) @@ -117,7 +117,7 @@ void DynamicTensorManager::deallocInput(ir::OperationIndex op_ind) auto &input_set = find->second; for (auto input_ind : input_set) { - auto *tensor = _tensors->getManagedTensor(input_ind).get(); + auto *tensor = _tensors->getNativeTensor(input_ind).get(); if (!tensor->is_dynamic()) continue; @@ -131,7 +131,7 @@ void DynamicTensorManager::deallocInput(ir::OperationIndex op_ind) void DynamicTensorManager::deallocSubgraphOutput(ir::OperandIndex output_ind) { - auto *tensor = _tensors->getManagedTensor(output_ind).get(); + auto *tensor = _tensors->getNativeTensor(output_ind).get(); if (!tensor->is_dynamic()) return; diff --git a/runtime/onert/core/src/backend/cpu_common/StaticTensorManager.cc b/runtime/onert/core/src/backend/cpu_common/StaticTensorManager.cc index 47bea35..820cad3 100644 --- a/runtime/onert/core/src/backend/cpu_common/StaticTensorManager.cc +++ b/runtime/onert/core/src/backend/cpu_common/StaticTensorManager.cc @@ -16,6 +16,7 @@ #include "backend/cpu_common/StaticTensorManager.h" +#include "backend/cpu_common/DynamicTensorManager.h" #include namespace onert @@ -33,7 +34,7 @@ StaticTensorManager::StaticTensorManager(const std::shared_ptr & void StaticTensorManager::allocateConsts(void) { - for (auto &pair : _tensors->managed_tensors()) + for (auto &pair : _tensors->native_tensors()) { const auto &ind = pair.first; auto tensor = pair.second; @@ -42,9 +43,9 @@ void StaticTensorManager::allocateConsts(void) auto mem_alloc = _const_mgr->allocate(ind, tensor->total_size()); tensor->setBuffer(mem_alloc); auto buffer = mem_alloc->base(); - VERBOSE(CPU_StaticTensorManager) << "CONSTANT TENSOR(#" << ind.value() - << "): " << static_cast(buffer) - << "size : " << tensor->total_size() << std::endl; + VERBOSE(CPU_COMMON_StaticTensorManager) << "CONSTANT TENSOR(#" << ind.value() + << "): " << static_cast(buffer) + << "size : " << tensor->total_size() << std::endl; } } } @@ -53,7 +54,7 @@ void StaticTensorManager::allocateNonconsts(void) { _nonconst_mgr->allocate(); - for (auto &pair : _tensors->managed_tensors()) + for (auto &pair : _tensors->native_tensors()) { const auto &ind = pair.first; auto tensor = pair.second; @@ -62,8 +63,8 @@ void StaticTensorManager::allocateNonconsts(void) auto *buffer = _nonconst_mgr->getBuffer(ind); tensor->setBuffer(buffer); - VERBOSE(CPU_StaticTensorManager) << "TENSOR(#" << ind.value() - << "): " << static_cast(buffer) << std::endl; + VERBOSE(CPU_COMMON_StaticTensorManager) << "TENSOR(#" << ind.value() + << "): " << static_cast(buffer) << std::endl; } } } @@ -76,18 +77,18 @@ void StaticTensorManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, ir::Layout backend_layout, bool as_const) { - assert(!_tensors->getManagedTensor(ind)); - auto tensor = std::make_shared(tensor_info, backend_layout); - _tensors->setManagedTensor(ind, tensor); + assert(!_tensors->getNativeTensor(ind)); + auto tensor = std::make_shared(tensor_info, backend_layout, nullptr); + _tensors->setNativeTensor(ind, tensor); _as_constants[ind] = as_const; } void StaticTensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size) { - assert(_tensors->getManagedTensor(ind)); + assert(_tensors->getNativeTensor(ind)); // This method is called only when a tensor has proper shape - assert(!_tensors->getManagedTensor(ind)->is_dynamic()); + assert(!_tensors->getNativeTensor(ind)->is_dynamic()); if (!_as_constants[ind]) _nonconst_mgr->claimPlan(ind, size); @@ -95,10 +96,10 @@ void StaticTensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size) void StaticTensorManager::releasePlan(const ir::OperandIndex &ind) { - assert(_tensors->getManagedTensor(ind)); + assert(_tensors->getNativeTensor(ind)); // This method is called only when a tensor has proper shape - assert(!_tensors->getManagedTensor(ind)->is_dynamic()); + assert(!_tensors->getNativeTensor(ind)->is_dynamic()); if (!_as_constants[ind]) _nonconst_mgr->releasePlan(ind); @@ -106,7 +107,7 @@ void StaticTensorManager::releasePlan(const ir::OperandIndex &ind) void StaticTensorManager::iterate(const std::function &fn) { - for (const auto &it : _tensors->managed_tensors()) + for (const auto &it : _tensors->native_tensors()) fn(it.first); } diff --git a/runtime/onert/core/src/compiler/ExecutorFactory.cc b/runtime/onert/core/src/compiler/ExecutorFactory.cc index f3f69ad..82afd9e 100644 --- a/runtime/onert/core/src/compiler/ExecutorFactory.cc +++ b/runtime/onert/core/src/compiler/ExecutorFactory.cc @@ -196,23 +196,41 @@ ExecutorFactory::initializeModelIOTensors(ir::LoweredGraph &lowered_graph, const auto &operand = lowered_graph.graph().operands().at(ind); auto tensor = std::make_shared( operand.info(), - ir::Layout::NHWC /* FIXME find op_seq for this operand and use frontend_layout */); + ir::Layout::NHWC, /* FIXME find op_seq for this operand and use frontend_layout */ + cf_tensor_builder->dynamicTensorManager()); // Add tensor to controlflow TensorRegistry. cf_tensor_builder->setUserTensor(ind, tensor); ret.push_back(tensor); - - // Set other tensors as external tensors - for (auto &tensor_builder : tensor_builders) - { - // FIXME This is a workaround registering all user tensors to all backends - // FIXME Handle when it is failed - tensor_builder->setExternalTensor(ind, tensor); - } } return ret; } +void ExecutorFactory::prepareExternalTensors(ir::LoweredGraph &lowered_graph, + TensorBuilders &tensor_builders) +{ + lowered_graph.op_seqs().iterate( + [&](const ir::OpSequenceIndex &op_seq_index, const ir::OpSequence &op_seq) { + auto lower_info = lowered_graph.getLowerInfo(op_seq_index); + auto &backend_ctx = lowered_graph.backend_contexts().at(lower_info->backend()); + for (auto ind : (op_seq.getInputs() + op_seq.getOutputs()) | ir::Remove::DUPLICATED | + ir::Remove::UNDEFINED) + { + // If an OpSequence input/output tensor does not have a own tensor object, + // it must be using external tensors, so find the tensor from other tensor builders and + // set the tensor to this tensor builder if portable + if (!backend_ctx->tensor_builder->tensorAt(ind)) + { + auto tensor = tensor_builders.getITensor(ind); + assert(tensor); // The tensor must have been created in one of TensorBuilders + auto ptensor = std::dynamic_pointer_cast(tensor); + if (ptensor) + backend_ctx->tensor_builder->setMigrantTensor(ind, ptensor); + } + } + }); +} + exec::IExecutor * ExecutorFactory::createLinearExecutor(std::unique_ptr lowered_graph, const compiler::CompilerOptions &options, @@ -265,6 +283,8 @@ ExecutorFactory::createLinearExecutor(std::unique_ptr lowered_ tensor_builder->prepare(); } + prepareExternalTensors(*lowered_graph, tensor_builders); + ExecutionBuilder builder; // Generate kernels @@ -367,6 +387,8 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor( tensor_builder->prepare(); } + prepareExternalTensors(*lowered_graph, tensor_builders); + ExecutionBuilder builder; // Generate kernels diff --git a/runtime/onert/core/src/compiler/ExecutorFactory.h b/runtime/onert/core/src/compiler/ExecutorFactory.h index 1e82b98..418e5a7 100644 --- a/runtime/onert/core/src/compiler/ExecutorFactory.h +++ b/runtime/onert/core/src/compiler/ExecutorFactory.h @@ -22,6 +22,7 @@ #include "backend/ITensor.h" #include "exec/IExecutor.h" #include "ir/LoweredGraph.h" +#include "TensorBuilders.h" namespace onert { @@ -48,6 +49,8 @@ private: static std::vector> initializeModelIOTensors(ir::LoweredGraph &lowered_graph, const ir::OperandIndexSequence &indices); + static void prepareExternalTensors(ir::LoweredGraph &lowered_graph, + TensorBuilders &tensor_builders); static exec::IExecutor * createLinearExecutor(std::unique_ptr lowered_graph, const compiler::CompilerOptions &options, diff --git a/runtime/onert/core/src/compiler/Fp32ToFp16Converter.cc b/runtime/onert/core/src/compiler/Fp32ToFp16Converter.cc index c68e6c3..5c4b84e 100644 --- a/runtime/onert/core/src/compiler/Fp32ToFp16Converter.cc +++ b/runtime/onert/core/src/compiler/Fp32ToFp16Converter.cc @@ -316,7 +316,7 @@ void Fp32ToFp16Converter::appendNewOpSeqForConvertFp16ToFp32(const ir::OpSequenc // manipulate output of operation and op_seq // - replace output of the last operation's output to new operand - // with old operand's removeDef and new operand's appendDef() + // with old operand's unsetDef and new operand's appendDef() manipulateOutput(op_seq_ind, op_seq_output_ind, new_op_ind); // new op @@ -584,8 +584,9 @@ void Fp32ToFp16Converter::manipulateOutput(const ir::OpSequenceIndex &op_seq_ind last_node.replaceOutputs(op_seq_output_ind, new_op_ind); // op_seq_obj doesn't have uses/def - output_obj.removeDef(last_node_ind); - new_op_obj.insertDef(last_node_ind); + assert(output_obj.getDef() == last_node_ind); + output_obj.unsetDef(); + new_op_obj.setDef(last_node_ind); } ir::OperationIndex @@ -603,7 +604,7 @@ Fp32ToFp16Converter::newOperationConvertFp32ToFp16(const ir::OperandIndex &op_se const auto new_node_ind = operations.push(std::move(new_node)); input_obj.insertUse(new_node_ind); - new_op_obj.insertDef(new_node_ind); + new_op_obj.setDef(new_node_ind); return new_node_ind; } @@ -623,7 +624,7 @@ Fp32ToFp16Converter::newOperationConvertFp16ToFp32(const ir::OperandIndex &op_se const auto new_node_ind = operations.push(std::move(new_node)); new_op_obj.insertUse(new_node_ind); - output_obj.insertDef(new_node_ind); + output_obj.setDef(new_node_ind); return new_node_ind; } @@ -925,7 +926,8 @@ void Fp32ToFp16Converter::deleteContiguousOpSequences( for (auto &ind : first_node.getOutputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED) { auto &obj = operands.at(ind); - obj.removeDef(first_node_ind); + assert(obj.getDef() == first_node_ind); + obj.unsetDef(); VERBOSE(Fp32ToFp16Converter) << "Operand #" << ind.value() << "'s Def(Node#" << first_node_ind.value() << ") is removed" << std::endl; } diff --git a/runtime/onert/core/src/compiler/HEScheduler.cc b/runtime/onert/core/src/compiler/HEScheduler.cc index b0e61f6..de9b4fb 100644 --- a/runtime/onert/core/src/compiler/HEScheduler.cc +++ b/runtime/onert/core/src/compiler/HEScheduler.cc @@ -105,7 +105,7 @@ static bool isMergeable(const ir::Graph &graph, const ir::Operation &node) continue; // This operand is output of operation, not weight or bias - if (operand.getDef().size() > 0) + if (operand.getDef().valid()) ++prev_op_cnt; // Current node has multiple inputs as concat or at the beginning of the separated branch @@ -599,7 +599,8 @@ int64_t HEScheduler::predMaxEFT(const backend::Backend *backend, const ir::Opera const auto &input_operand = _graph->operands().at(input_operand_idx); const bool quant = input_operand.typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM; - for (const auto &input_node_idx : input_operand.getDef()) + auto input_node_idx = input_operand.getDef(); + if (input_node_idx.valid()) { // Data transfer cost from parent's node backend to current node's backend: auto parent_backend = _backend_resolver->getBackend(input_node_idx); diff --git a/runtime/onert/core/src/compiler/HEScheduler.h b/runtime/onert/core/src/compiler/HEScheduler.h index f507539..d8ceca9 100644 --- a/runtime/onert/core/src/compiler/HEScheduler.h +++ b/runtime/onert/core/src/compiler/HEScheduler.h @@ -51,16 +51,12 @@ public: * @param[in] backend_resolver backend resolver */ HEScheduler(const backend::BackendContexts &backend_contexts, const CompilerOptions &options) - : _backend_contexts{backend_contexts}, _is_supported{}, _backends_avail_time{}, _ops_eft{}, + : _is_supported{}, _backends_avail_time{}, _ops_eft{}, _op_to_rank{std::make_shared>()}, _is_profiling_mode{options.he_profiling_mode}, _is_linear_exec{options.executor == "Linear"}, _is_parallel_exec{options.executor == "Parallel"} { - // Workaround to avoid unused-private-field warning - // TODO use _backend_contexts and remove workaround - (void)_backend_contexts; - for (auto &entry : backend_contexts) { _all_backends.push_back(entry.first); @@ -165,7 +161,6 @@ private: // whether it should assign these backends to these nodes: // * It stores false for unsupported nodes // * During rank calculation with enabled profiling mode it stores true for supported nodes - const backend::BackendContexts &_backend_contexts; std::unordered_map> _is_supported; // Finishing and starting time of each backend std::unordered_map> _backends_avail_time; @@ -175,8 +170,7 @@ private: std::unique_ptr _backend_resolver; std::unique_ptr _exec_time; const ir::Graph *_graph{nullptr}; - std::vector - _all_backends; // TODO Remove this and use _backend_contexts instead + std::vector _all_backends; const backend::Backend *_cpu_backend{nullptr}; // TODO Change this to controlflow_backend bool _is_profiling_mode; bool _is_linear_exec; diff --git a/runtime/onert/core/src/compiler/Linear.cc b/runtime/onert/core/src/compiler/Linear.cc index b9db2f3..493ca1e 100644 --- a/runtime/onert/core/src/compiler/Linear.cc +++ b/runtime/onert/core/src/compiler/Linear.cc @@ -96,7 +96,7 @@ void Linear::planTensors(const ir::LoweredGraph &lowered_graph, } uses_map[ind] = obj.getUses().size(); - def_map[ind] = obj.getDef().size(); // should be 1 or 0 + def_map[ind] = obj.getDef().valid() ? 1 : 0; bool is_const = obj.isConstant(); if (is_const) diff --git a/runtime/onert/core/src/compiler/OperandContext.h b/runtime/onert/core/src/compiler/OperandContext.h deleted file mode 100644 index 390b376..0000000 --- a/runtime/onert/core/src/compiler/OperandContext.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_COMPILER_OPERAND_CONTEXT_H__ -#define __ONERT_COMPILER_OPERAND_CONTEXT_H__ - -#include "backend/ITensor.h" -#include "ir/OperandIndexMap.h" -#include -#include - -namespace onert -{ -namespace compiler -{ - -class OperandContext -{ -public: - OperandContext &set(const ir::OperandIndex &ind, const std::shared_ptr &tensor); - -public: - bool exist(const ir::OperandIndex &ind) const { return _tensors.find(ind) != _tensors.end(); } - -public: - std::shared_ptr at(const ir::OperandIndex &ind) const - { - return _tensors.at(ind); - } - - std::shared_ptr &at(const ir::OperandIndex &ind) { return _tensors.at(ind); } - - void iterate(const std::function &fn); - -private: - ir::OperandIndexMap> _tensors; -}; - -} // namespace compiler -} // namespace onert - -#endif // __ONERT_COMPILER_OPERAND_CONTEXT_H__ diff --git a/runtime/onert/core/src/compiler/OperationValidator.cc b/runtime/onert/core/src/compiler/OperationValidator.cc index 5c545ae..4449631 100644 --- a/runtime/onert/core/src/compiler/OperationValidator.cc +++ b/runtime/onert/core/src/compiler/OperationValidator.cc @@ -41,6 +41,21 @@ OperationValidator::OperationValidator(const ir::Graph &graph) { } +void OperationValidator::checkUnaryOp(const ir::Operation &node) +{ + const auto output_index{node.getOutputs().at(0)}; + const auto input_index{node.getInputs().at(0)}; + + // Check if I/O types match + OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == _ctx.at(input_index).typeInfo().type()); + + if (_ctx.at(output_index).info().isDynamic()) + return; + + // Check if I/O shapes match + OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape()); +} + void OperationValidator::operator()() { // There is no reason for each subgraph to have subgraphs since compiler has subgraphs when @@ -53,16 +68,7 @@ void OperationValidator::operator()() [&](const ir::OperationIndex &, const ir::Operation &node) { node.accept(*this); }); } -void OperationValidator::visit(const ir::operation::Abs &node) -{ - const auto output_index{node.getOutputs().at(0)}; - if (_ctx.at(output_index).info().isDynamic()) - return; - - const auto input_index{node.getInputs().at(0)}; - - OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape()); -} +void OperationValidator::visit(const ir::operation::Abs &node) { checkUnaryOp(node); } void OperationValidator::visit(const ir::operation::AvgPool2D &node) { @@ -292,17 +298,7 @@ void OperationValidator::visit(const ir::operation::RNN &node) num_units == _ctx.at(hidden_state_out_index).shape().dim(1)); } -void OperationValidator::visit(const ir::operation::Round &node) -{ - const auto output_index{node.getOutputs().at(0)}; - const auto input_index{node.getInputs().at(ir::operation::Round::Input::INPUT)}; - - OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == _ctx.at(input_index).typeInfo().type()); - - if (_ctx.at(output_index).info().isDynamic()) - return; - OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape()); -} +void OperationValidator::visit(const ir::operation::Round &node) { checkUnaryOp(node); } void OperationValidator::visit(const ir::operation::SpaceToBatchND &node) { @@ -393,17 +389,7 @@ void OperationValidator::visit(const ir::operation::EmbeddingLookup &node) } } -void OperationValidator::visit(const ir::operation::Exp &node) -{ - const auto output_index{node.getOutputs().at(0)}; - const auto input_index{node.getInputs().at(ir::operation::Exp::Input::INPUT)}; - - OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == _ctx.at(input_index).typeInfo().type()); - - if (_ctx.at(output_index).info().isDynamic()) - return; - OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape()); -} +void OperationValidator::visit(const ir::operation::Exp &node) { checkUnaryOp(node); } void OperationValidator::visit(const ir::operation::ExpandDims &node) { @@ -419,17 +405,7 @@ void OperationValidator::visit(const ir::operation::ExpandDims &node) OP_REQUIRES(_ctx.at(axis_index).shape().rank() <= 1); } -void OperationValidator::visit(const ir::operation::Floor &node) -{ - const auto output_index{node.getOutputs().at(0)}; - const auto input_index{node.getInputs().at(ir::operation::Floor::Input::INPUT)}; - - OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == _ctx.at(input_index).typeInfo().type()); - - if (_ctx.at(output_index).info().isDynamic()) - return; - OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape()); -} +void OperationValidator::visit(const ir::operation::Floor &node) { checkUnaryOp(node); } void OperationValidator::visit(const ir::operation::HashtableLookup &node) { @@ -789,6 +765,25 @@ void OperationValidator::visit(const ir::operation::LSTM &node) } } +void OperationValidator::visit(const ir::operation::L2Normalization &node) +{ + const auto ofm_index{node.getOutputs().at(0)}; + if (_ctx.at(ofm_index).info().isDynamic()) + return; + + const auto ifm_index{node.getInputs().at(ir::operation::L2Normalization::Input::INPUT)}; + + auto ifm_shape = _ctx.at(ifm_index).shape(); + auto ofm_shape = _ctx.at(ofm_index).shape(); + + OP_REQUIRES(ifm_shape.rank() == ofm_shape.rank()); + + for (auto i = 0; i < ifm_shape.rank(); i++) + { + OP_REQUIRES(ifm_shape.dim(i) == ofm_shape.dim(i)); + } +} + void OperationValidator::visit(const ir::operation::Unpack &node) { const auto num{node.param().num}; @@ -904,45 +899,39 @@ void OperationValidator::visit(const ir::operation::Split &node) OP_REQUIRES(_ctx.at(input_index).shape().dim(axis) % num_splits == 0); } -void OperationValidator::visit(const ir::operation::Cos &node) -{ - const auto output_index{node.getOutputs().at(0)}; - if (_ctx.at(output_index).info().isDynamic()) - return; +void OperationValidator::visit(const ir::operation::Cos &node) { checkUnaryOp(node); } - const auto input_index{node.getInputs().at(0)}; - OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape()); -} - -void OperationValidator::visit(const ir::operation::Sin &node) -{ - const auto output_index{node.getOutputs().at(0)}; - if (_ctx.at(output_index).info().isDynamic()) - return; +void OperationValidator::visit(const ir::operation::Sin &node) { checkUnaryOp(node); } - const auto input_index{node.getInputs().at(0)}; - OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape()); -} +void OperationValidator::visit(const ir::operation::RSQRT &node) { checkUnaryOp(node); } -void OperationValidator::visit(const ir::operation::RSQRT &node) +void OperationValidator::visit(const ir::operation::Shape &node) { const auto output_index{node.getOutputs().at(0)}; if (_ctx.at(output_index).info().isDynamic()) return; const auto input_index{node.getInputs().at(0)}; - OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape()); + UNUSED_RELEASE(input_index); + OP_REQUIRES(_ctx.at(output_index).shape().rank() == 1); } -void OperationValidator::visit(const ir::operation::Shape &node) +void OperationValidator::visit(const ir::operation::ResizeBilinear &node) { const auto output_index{node.getOutputs().at(0)}; + const auto input_index{node.getInputs().at(ir::operation::ResizeBilinear::Input::INPUT)}; + if (_ctx.at(output_index).info().isDynamic()) + { return; + } + OP_REQUIRES(_ctx.at(input_index).shape().rank() == 4); + OP_REQUIRES(_ctx.at(output_index).shape().rank() == 4); - const auto input_index{node.getInputs().at(0)}; - UNUSED_RELEASE(input_index); - OP_REQUIRES(_ctx.at(output_index).shape().rank() == 1); + auto align_corners = node.param().align_corners; + auto half_pixel_centers = node.param().half_pixel_centers; + + OP_REQUIRES(!align_corners || !half_pixel_centers); } void OperationValidator::visit(const ir::operation::Reverse &node) @@ -972,35 +961,11 @@ void OperationValidator::visit(const ir::operation::While &node) // TODO Add to validate with subgraphs } -void OperationValidator::visit(const ir::operation::Neg &node) -{ - const auto output_index{node.getOutputs().at(0)}; - if (_ctx.at(output_index).info().isDynamic()) - return; +void OperationValidator::visit(const ir::operation::Neg &node) { checkUnaryOp(node); } - const auto input_index{node.getInputs().at(0)}; - OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape()); -} +void OperationValidator::visit(const ir::operation::Log &node) { checkUnaryOp(node); } -void OperationValidator::visit(const ir::operation::Log &node) -{ - const auto output_index{node.getOutputs().at(0)}; - if (_ctx.at(output_index).info().isDynamic()) - return; - - const auto input_index{node.getInputs().at(0)}; - OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape()); -} - -void OperationValidator::visit(const ir::operation::LogicalNot &node) -{ - const auto output_index{node.getOutputs().at(0)}; - if (_ctx.at(output_index).info().isDynamic()) - return; - - const auto input_index{node.getInputs().at(0)}; - OP_REQUIRES(_ctx.at(output_index).shape() == _ctx.at(input_index).shape()); -} +void OperationValidator::visit(const ir::operation::LogicalNot &node) { checkUnaryOp(node); } void OperationValidator::visit(const ir::operation::SquaredDifference &node) { @@ -1118,5 +1083,25 @@ void OperationValidator::visit(const ir::operation::LogSoftmax &node) OP_REQUIRES(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank()); } + +void OperationValidator::visit(const ir::operation::Quantize &node) +{ + VERBOSE(Quantize) << "Configure Quantize operation" << std::endl; + + OP_REQUIRES(node.getInputs().size() == 1); + OP_REQUIRES(node.getOutputs().size() == 1); + + const auto input_index{node.getInputs().at(0)}; + const auto output_index{node.getOutputs().at(0)}; + + OP_REQUIRES(_ctx.at(input_index).typeInfo().type() == ir::DataType::FLOAT32); + + if (_ctx.at(output_index).info().isDynamic()) + return; + + OP_REQUIRES(_ctx.at(output_index).typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM); + + OP_REQUIRES(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank()); +} } // namespace compiler } // namespace onert diff --git a/runtime/onert/core/src/compiler/OperationValidator.h b/runtime/onert/core/src/compiler/OperationValidator.h index 6ceafe8..b27e686 100644 --- a/runtime/onert/core/src/compiler/OperationValidator.h +++ b/runtime/onert/core/src/compiler/OperationValidator.h @@ -70,6 +70,7 @@ public: void visit(const ir::operation::DepthToSpace &node) override; void visit(const ir::operation::Pack &node) override; void visit(const ir::operation::LSTM &node) override; + void visit(const ir::operation::L2Normalization &node) override; void visit(const ir::operation::Unpack &node) override; void visit(const ir::operation::Pad &node) override; void visit(const ir::operation::Min &node) override; @@ -81,6 +82,7 @@ public: void visit(const ir::operation::Sin &node) override; void visit(const ir::operation::RSQRT &node) override; void visit(const ir::operation::Shape &node) override; + void visit(const ir::operation::ResizeBilinear &node) override; void visit(const ir::operation::Reverse &node) override; void visit(const ir::operation::If &node) override; void visit(const ir::operation::While &node) override; @@ -93,9 +95,10 @@ public: void visit(const ir::operation::Range &node) override; void visit(const ir::operation::MatrixBandPart &node) override; void visit(const ir::operation::LogSoftmax &node) override; + void visit(const ir::operation::Quantize &node) override; private: - void checkReduceOp(const ir::OperandIndex input_index, const ir::OperandIndex output_index); + void checkUnaryOp(const ir::Operation &node); private: // TODO Remove _ctx field diff --git a/runtime/onert/core/src/compiler/StaticShapeInference.cc b/runtime/onert/core/src/compiler/StaticShapeInference.cc index 5a58f2e..76c1edc 100644 --- a/runtime/onert/core/src/compiler/StaticShapeInference.cc +++ b/runtime/onert/core/src/compiler/StaticShapeInference.cc @@ -497,6 +497,11 @@ void StaticShapeInferer::visit(const ir::operation::Logistic &op) handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Logistic::Input::INPUT)); } +void StaticShapeInferer::visit(const ir::operation::L2Normalization &op) +{ + handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::L2Normalization::Input::INPUT)); +} + void StaticShapeInferer::visit(const ir::operation::MatrixBandPart &op) { handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::MatrixBandPart::Input::INPUT)); @@ -813,6 +818,35 @@ void StaticShapeInferer::visit(const ir::operation::Reshape &op) } } +void StaticShapeInferer::visit(const ir::operation::ResizeBilinear &op) +{ + const auto input_idx{op.getInputs().at(ir::operation::ResizeBilinear::Input::INPUT)}; + const auto &input = _operands.at(input_idx); + + // get mutable output operand + const auto output_idx = op.getOutputs().at(0); + ir::Operand &output = _operands.at(output_idx); + + // if input is dynamic, output also becomes dynamic + if (input.info().isDynamic()) + { + output.info().setDynamic(); + _return_has_dynamic_tensor = true; + return; + } + + // Shape inferencing logic based on Params + ir::Shape new_shape = shape_inference::inferResizeBilinearShape( + input.shape(), op.param().height_out, op.param().width_out); + + // if size_op is from Const, TFLC put the shape of output into tensor + if (new_shape != output.shape()) + { + // change on output shape + output.info().shape(new_shape); + } +} + void StaticShapeInferer::visit(const ir::operation::Reverse &op) { handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Reverse::Input::INPUT)); diff --git a/runtime/onert/core/src/compiler/TensorBuilders.h b/runtime/onert/core/src/compiler/TensorBuilders.h index 4bb7413..c0a1ebc 100644 --- a/runtime/onert/core/src/compiler/TensorBuilders.h +++ b/runtime/onert/core/src/compiler/TensorBuilders.h @@ -23,6 +23,7 @@ #include "backend/Backend.h" #include "backend/controlflow/Config.h" #include "backend/controlflow/TensorBuilder.h" +#include "util/logging.h" namespace onert { @@ -66,6 +67,17 @@ public: return _cf_tensor_builder; } + std::shared_ptr getITensor(ir::OperandIndex ind) + { + for (auto &tensor_builder : _tensor_builders) + { + auto tensor = tensor_builder->tensorAt(ind); + if (tensor) + return tensor; + } + return nullptr; + } + private: std::unordered_set> _tensor_builders; std::shared_ptr _cf_tensor_builder; diff --git a/runtime/onert/core/src/exec/DynamicShapeInference.cc b/runtime/onert/core/src/exec/DynamicShapeInference.cc index 1b82029..5ec7012 100644 --- a/runtime/onert/core/src/exec/DynamicShapeInference.cc +++ b/runtime/onert/core/src/exec/DynamicShapeInference.cc @@ -16,12 +16,21 @@ #include "exec/DynamicShapeInference.h" #include "util/ShapeInference.h" +#include namespace onert { namespace exec { +inline backend::IDynamicTensorManager * +dynamicTensorManagerOf(const std::shared_ptr &tensor) +{ + if (!tensor->dynamic_tensor_manager()) + throw std::runtime_error{"Dynamic Tensor Manager is not available for this tensor."}; + return tensor->dynamic_tensor_manager(); +} + void DynamicShapeInferer::handleBinaryArithmeticOp(const ir::Operation &op, const ir::OperandIndex lhs_idx, const ir::OperandIndex rhs_idx) @@ -55,7 +64,7 @@ void DynamicShapeInferer::handleBinaryArithmeticOp(const ir::Operation &op, ir::Shape new_shape = shape_inference::inferEltwiseShape(lhs_shape, rhs_shape); - _dynamic_tensor_manager->applyShape(output_idx, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_idx, new_shape); assert(output->buffer() != nullptr); } @@ -87,7 +96,7 @@ void DynamicShapeInferer::handleSimpleUnaryOp(const ir::Operation &op, auto output_ind = op.getOutputs().at(0); auto output = _tensor_registry->getITensor(output_ind); - _dynamic_tensor_manager->applyShape(output_ind, output_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape); assert(output->buffer() != nullptr); } @@ -121,7 +130,7 @@ void DynamicShapeInferer::visit(const ir::operation::ArgMax &op) ir::Shape new_shape = shape_inference::inferArgMaxShape(input_shape, axis, rank); - _dynamic_tensor_manager->applyShape(output_ind, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape); assert(output->buffer() != nullptr); } @@ -143,7 +152,7 @@ void DynamicShapeInferer::visit(const ir::operation::BatchMatMul &op) // TODO auto new_shape = shape_inference::inferBatchMatMulShape(lhs_shape, rhs_shape, op.param()); - _dynamic_tensor_manager->applyShape(output_index, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_index, new_shape); } void DynamicShapeInferer::visit(const ir::operation::BroadcastTo &op) @@ -166,7 +175,7 @@ void DynamicShapeInferer::visit(const ir::operation::BroadcastTo &op) shape->getShape(), reinterpret_cast(shape->buffer())); // set output shape and output buffer - _dynamic_tensor_manager->applyShape(output_ind, output_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape); assert(output->buffer() != nullptr); } @@ -256,7 +265,7 @@ void DynamicShapeInferer::visit(const ir::operation::Concat &op) auto output = _tensor_registry->getITensor(output_ind); auto output_shape = shape_inference::inferConcatShape(in_shapes, op.param()); - _dynamic_tensor_manager->applyShape(output_ind, output_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape); } void DynamicShapeInferer::visit(const ir::operation::Conv2D &op) @@ -279,7 +288,7 @@ void DynamicShapeInferer::visit(const ir::operation::Conv2D &op) ir::Shape output_shape = shape_inference::inferConv2DShape(input_shape, ker_shape, op.param()); - _dynamic_tensor_manager->applyShape(output_ind, output_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape); assert(output->buffer() != nullptr); } @@ -339,7 +348,7 @@ void DynamicShapeInferer::visit(const ir::operation::ExpandDims &op) auto output_shape = shape_inference::inferExpandDimsShape(input_shape, axis_buf[0]); - _dynamic_tensor_manager->applyShape(output_ind, output_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape); assert(output->buffer() != nullptr); } @@ -362,7 +371,7 @@ void DynamicShapeInferer::visit(const ir::operation::Fill &op) auto output_shape = shape_inference::inferFillShape(input_shape, input_buf); - _dynamic_tensor_manager->applyShape(output_ind, output_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape); assert(output->buffer() != nullptr); } @@ -385,7 +394,7 @@ void DynamicShapeInferer::visit(const ir::operation::FullyConnected &op) auto output_ind = op.getOutputs().at(0); auto output = _tensor_registry->getITensor(output_ind); - _dynamic_tensor_manager->applyShape(output_ind, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape); assert(output->buffer() != nullptr); } @@ -417,7 +426,7 @@ void DynamicShapeInferer::visit(const ir::operation::Gather &op) auto output_ind = op.getOutputs().at(0); auto output = _tensor_registry->getITensor(output_ind); - _dynamic_tensor_manager->applyShape(output_ind, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape); assert(output->buffer() != nullptr); } @@ -442,6 +451,11 @@ void DynamicShapeInferer::visit(const ir::operation::Logistic &op) handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Logistic::INPUT)); } +void DynamicShapeInferer::visit(const ir::operation::L2Normalization &op) +{ + handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::L2Normalization::INPUT)); +} + void DynamicShapeInferer::visit(const ir::operation::MatrixBandPart &op) { handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::MatrixBandPart::INPUT)); @@ -492,7 +506,7 @@ void DynamicShapeInferer::visit(const ir::operation::OneHot &op) const auto axis_val = op.param().axis; ir::Shape new_shape = shape_inference::inferOnehotShape(indices_shape, *depth_buf, axis_val); - _dynamic_tensor_manager->applyShape(output_ind, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape); assert(output->buffer() != nullptr); } @@ -528,7 +542,7 @@ void DynamicShapeInferer::visit(const ir::operation::Pack &op) ir::Shape new_shape = shape_inference::inferPackShape(input_shape, axis, rank, num); - _dynamic_tensor_manager->applyShape(output_ind, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape); assert(output->buffer() != nullptr); } @@ -555,7 +569,7 @@ void DynamicShapeInferer::visit(const ir::operation::Pad &op) shape_inference::inferPadShape(input->getShape(), pad_buf, pad->getShape().num_elements()); // change output shape and reallocate output tensor memory - _dynamic_tensor_manager->applyShape(output_ind, output_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape); assert(output->buffer() != nullptr); } @@ -607,7 +621,7 @@ void DynamicShapeInferer::visit(const ir::operation::Range &op) *reinterpret_cast(limit_tensor->buffer()), *reinterpret_cast(delta_tensor->buffer())); } - _dynamic_tensor_manager->applyShape(output_ind, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape); assert(output->buffer() != nullptr); } @@ -651,7 +665,7 @@ void DynamicShapeInferer::visit(const ir::operation::Reduce &op) ir::Shape new_shape = shape_inference::inferReduceShape(input_shape, axes_vec, keep_dims); - _dynamic_tensor_manager->applyShape(output_ind, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape); assert(output->buffer() != nullptr); } @@ -705,7 +719,7 @@ void DynamicShapeInferer::visit(const ir::operation::Reshape &op) if (output_shape != output->getShape() || output->buffer() == nullptr) { // change on output shape - _dynamic_tensor_manager->applyShape(output_ind, output_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape); } assert(output->buffer() != nullptr); } @@ -721,7 +735,7 @@ void DynamicShapeInferer::visit(const ir::operation::Reshape &op) if (output_shape != output->getShape() || output->buffer() == nullptr) { // change on output shape - _dynamic_tensor_manager->applyShape(output_ind, output_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape); } assert(output->buffer() != nullptr); } @@ -732,6 +746,31 @@ void DynamicShapeInferer::visit(const ir::operation::Reshape &op) } } +void DynamicShapeInferer::visit(const ir::operation::ResizeBilinear &op) +{ + // check if output is not dynamic + auto output_ind = op.getOutputs().at(0); + auto output = _tensor_registry->getITensor(output_ind); + + auto input_ind = op.getInputs().at(ir::operation::Reshape::Input::INPUT); + auto input = _tensor_registry->getITensor(input_ind); + + if ((!input->is_dynamic()) && (!output->is_dynamic())) + return; + + // getting output shape from input shape and Params + auto output_shape = shape_inference::inferResizeBilinearShape( + input->getShape(), op.param().height_out, op.param().width_out); + + // if shape is changed, change output shape and reallocate output tensor memory + if (output_shape != output->getShape() || output->buffer() == nullptr) + { + // change on output shape + _dynamic_tensor_manager->applyShape(output_ind, output_shape); + } + assert(output->buffer() != nullptr); +} + void DynamicShapeInferer::visit(const ir::operation::Reverse &op) { handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Reverse::INPUT)); @@ -774,7 +813,7 @@ void DynamicShapeInferer::visit(const ir::operation::Select &op) auto output_ind = op.getOutputs().at(0); auto output = _tensor_registry->getITensor(output_ind); - _dynamic_tensor_manager->applyShape(output_ind, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape); assert(output->buffer() != nullptr); } @@ -793,7 +832,7 @@ void DynamicShapeInferer::visit(const ir::operation::Shape &op) ir::Shape output_shape; output_shape.append(input_shape.rank()); - _dynamic_tensor_manager->applyShape(output_ind, output_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape); assert(output->buffer() != nullptr); } @@ -824,7 +863,7 @@ void DynamicShapeInferer::visit(const ir::operation::Slice &op) ir::Shape new_shape = shape_inference::inferSliceShape(input_shape, begins_buf, sizes_buf); - _dynamic_tensor_manager->applyShape(output_index, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_index, new_shape); assert(output->buffer() != nullptr); } @@ -861,7 +900,7 @@ void DynamicShapeInferer::visit(const ir::operation::SpaceToBatchND &op) ir::Shape new_shape = shape_inference::inferSpaceToBatchNDShape( input_shape, block_shape_shape, padding_shape, block_shape_data, padding_data); - _dynamic_tensor_manager->applyShape(output_idx, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_idx, new_shape); assert(output->buffer() != nullptr); } @@ -890,7 +929,7 @@ void DynamicShapeInferer::visit(const ir::operation::Split &op) auto output_ind = op.getOutputs().at(out_tensor_idx); auto output = _tensor_registry->getITensor(output_ind); - _dynamic_tensor_manager->applyShape(output_ind, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape); assert(output->buffer() != nullptr); } } @@ -919,7 +958,7 @@ void DynamicShapeInferer::visit(const ir::operation::Squeeze &op) auto output_ind = op.getOutputs().at(0); auto output = _tensor_registry->getITensor(output_ind); - _dynamic_tensor_manager->applyShape(output_ind, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape); assert(output->buffer() != nullptr); } @@ -960,7 +999,7 @@ void DynamicShapeInferer::visit(const ir::operation::StridedSlice &op) ir::Shape output_shape = onert::shape_inference::inferStridedSliceShape(input_shape, op_params, rank); - _dynamic_tensor_manager->applyShape(output_index, output_shape); + dynamicTensorManagerOf(output)->applyShape(output_index, output_shape); assert(output->buffer() != nullptr); } @@ -996,7 +1035,7 @@ void DynamicShapeInferer::visit(const ir::operation::Tile &op) auto output_shape = shape_inference::inferTileShape(input_shape, multiplier_buffer); // set output shape and output buffer - _dynamic_tensor_manager->applyShape(output_ind, output_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape); assert(output->buffer() != nullptr); } @@ -1018,7 +1057,7 @@ void DynamicShapeInferer::visit(const ir::operation::Transpose &op) // set output shape, based on input and params ir::Shape new_shape = shape_inference::inferTransposeShape(input_shape, perm); - _dynamic_tensor_manager->applyShape(output_ind, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape); assert(output->buffer() != nullptr); } @@ -1046,7 +1085,7 @@ void DynamicShapeInferer::visit(const ir::operation::Unpack &op) auto output_ind = op.getOutputs().at(out_tensor_idx); auto output = _tensor_registry->getITensor(output_ind); - _dynamic_tensor_manager->applyShape(output_ind, new_shape); + dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape); assert(output->buffer() != nullptr); } diff --git a/runtime/onert/core/src/exec/ExecutorBase.cc b/runtime/onert/core/src/exec/ExecutorBase.cc index a7409b9..864ccb3 100644 --- a/runtime/onert/core/src/exec/ExecutorBase.cc +++ b/runtime/onert/core/src/exec/ExecutorBase.cc @@ -46,7 +46,7 @@ ExecutorBase::ExecutorBase(std::unique_ptr &&lowered_graph, { auto tensor_registry = tensor_builder->tensorRegistry(); assert(tensor_registry); - tensor = tensor_registry->getManagedITensor(ind); + tensor = tensor_registry->getNativeITensor(ind); if (tensor != nullptr) { if (tensor_builder->supportDynamicTensor()) @@ -71,7 +71,7 @@ ExecutorBase::ExecutorBase(std::unique_ptr &&lowered_graph, { auto tensor_registry = tensor_builder->tensorRegistry(); assert(tensor_registry); - tensor = tensor_registry->getManagedITensor(ind); + tensor = tensor_registry->getNativeITensor(ind); if (tensor != nullptr) { if (tensor_builder->supportDynamicTensor()) diff --git a/runtime/onert/core/src/exec/ExecutorBase.h b/runtime/onert/core/src/exec/ExecutorBase.h index b82d0e8..080c9bb 100644 --- a/runtime/onert/core/src/exec/ExecutorBase.h +++ b/runtime/onert/core/src/exec/ExecutorBase.h @@ -29,7 +29,6 @@ #include "ir/LowerInfoMap.h" #include "backend/IConfig.h" #include "backend/Backend.h" -#include "compiler/OperandContext.h" #include "exec/ExecTime.h" #include "exec/IFunction.h" #include "backend/IDynamicTensorManager.h" diff --git a/runtime/onert/core/src/interp/Tensor.h b/runtime/onert/core/src/interp/Tensor.h index 8b72d53..008a4b9 100644 --- a/runtime/onert/core/src/interp/Tensor.h +++ b/runtime/onert/core/src/interp/Tensor.h @@ -171,6 +171,7 @@ public: int32_t data_offset() const override { return _info.typeInfo().offset(); } const ir::OperandInfo &tensorInfo() const override { return _info; } uint64_t num_elements() const override { return _info.shape().num_elements(); }; + backend::IDynamicTensorManager *dynamic_tensor_manager() override { return nullptr; } private: const ir::OperandInfo _info; diff --git a/runtime/onert/core/src/interp/operations/Pad.cc b/runtime/onert/core/src/interp/operations/Pad.cc index d2e3627..c8dce69 100644 --- a/runtime/onert/core/src/interp/operations/Pad.cc +++ b/runtime/onert/core/src/interp/operations/Pad.cc @@ -69,8 +69,8 @@ void invoke(const ITensor *input_tensor, const ITensor *pad_tensor, const ITenso const int32_t *pad_ptr = reinterpret_cast(pad_buffer); float *output_ptr = reinterpret_cast(output_buffer); - nnfw::cker::Pad(pad_ptr, pad_rank, cker_input_shape, input_ptr, cker_output_shape, output_ptr, - nullptr); + nnfw::cker::Pad(pad_ptr, pad_rank, cker_input_shape, input_ptr, cker_output_shape, + output_ptr, nullptr); } void invokePad(const ExecEnv *env, const ir::Operation &node) diff --git a/runtime/onert/core/src/ir/Graph.cc b/runtime/onert/core/src/ir/Graph.cc index 2628630..0db9b61 100644 --- a/runtime/onert/core/src/ir/Graph.cc +++ b/runtime/onert/core/src/ir/Graph.cc @@ -89,7 +89,7 @@ void Graph::initializeUseDef() auto outputs = node.getOutputs(); for (auto output : outputs) { - operands().at(output).insertDef(index); + operands().at(output).setDef(index); } for (auto input : node.getInputs() | ir::Remove::UNDEFINED) diff --git a/runtime/onert/core/src/ir/LoweredGraph.cc b/runtime/onert/core/src/ir/LoweredGraph.cc index 6e93a23..8aedfbd 100644 --- a/runtime/onert/core/src/ir/LoweredGraph.cc +++ b/runtime/onert/core/src/ir/LoweredGraph.cc @@ -23,6 +23,7 @@ #include "pass/ConstantLoweringPass.h" #include "pass/PermutationOperationPass.h" #include "pass/PermutationInsertionPass.h" +#include "pass/PermutationEliminationPass.h" #include "ir/GraphIterator.h" #include "verifier/Verifier.h" #include "backend/Backend.h" @@ -122,9 +123,9 @@ LoweredGraph::LoweredGraph(const Graph &graph, const compiler::CompilerOptions & pass::PermutationInsertionPass pi_pass(*this); pi_pass.run(); - // Implemented code no longer works. - // pass::PermutationEliminationPass pe_pass(*this); - // pe_pass.run(); + + pass::PermutationEliminationPass pe_pass(*this); + pe_pass.run(); _op_seqs.dump("merged and sorted operations with permutation", _graph.operations()); } @@ -414,7 +415,8 @@ void LoweredGraph::dumpLowerInfo() const auto lower_info = getLowerInfo(index); const auto &shape = object.shape(); - std::string def_ops = operation_index_to_string(object.getDef()); + std::string def_ops = + object.getDef().valid() ? std::to_string(object.getDef().value()) : "N/A"; std::string use_ops = operation_index_to_string(object.getUses()); std::string def_layouts = factors_to_string(lower_info->def_factors()); std::string use_layouts = factors_to_string(lower_info->use_factors()); @@ -474,7 +476,8 @@ bool LoweredGraph::mergeable(const OpSequenceIndex &op_seq_index, const Operatio for (const auto &input : op_seq.getInputs() | Remove::DUPLICATED | ir::Remove::UNDEFINED) { const auto &input_obj = _graph.operands().at(input); - for (const auto &def : input_obj.getDef()) + auto def = input_obj.getDef(); + if (def.valid()) { branched_set.insert(def); if (branched_set.size() > 1) @@ -488,6 +491,12 @@ bool LoweredGraph::mergeable(const OpSequenceIndex &op_seq_index, const Operatio // Check for branching down for (const auto &output : node.getOutputs() | Remove::DUPLICATED) { + // TODO Fix this workaround for the case of model outputs that are used by another operation + // This is needed since the branching is decided by operation, but for model outputs, + // there is controlflow backen(use backend) but no actual use operation exists + if (_graph.getOutputs().contains(output)) + return false; + const auto &output_obj = _graph.operands().at(output); for (const auto &use : output_obj.getUses()) { diff --git a/runtime/onert/core/src/ir/Operand.cc b/runtime/onert/core/src/ir/Operand.cc index cde7fb7..e29c7a6 100644 --- a/runtime/onert/core/src/ir/Operand.cc +++ b/runtime/onert/core/src/ir/Operand.cc @@ -42,20 +42,9 @@ void Operand::insertUse(const OperationIndex &idx) { _uses.insert(idx); } void Operand::removeUse(const OperationIndex &idx) { _uses.remove(idx); } -void Operand::insertDef(const OperationIndex &idx) -{ - assert(!isConstant()); - assert(_def.size() == 0); - - _def.insert(idx); -} +void Operand::setDef(const OperationIndex &idx) { _def = idx; } -void Operand::removeDef(const OperationIndex &idx) -{ - assert(_def.contains(idx)); - - _def.remove(idx); -} +void Operand::unsetDef() { _def = OperationIndex{}; } } // namespace ir } // namespace onert diff --git a/runtime/onert/core/src/ir/OperationDumper.cc b/runtime/onert/core/src/ir/OperationDumper.cc index c4b61f3..e3cbce5 100644 --- a/runtime/onert/core/src/ir/OperationDumper.cc +++ b/runtime/onert/core/src/ir/OperationDumper.cc @@ -613,6 +613,15 @@ void OperationDumper::visit(const SquaredDifference &node) VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0) << ")" << std::endl; } +void OperationDumper::visit(const StatelessRandomUniform &node) +{ + VERBOSE(LIR) << "* StatelessRandomUniform" << std::endl; + VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(StatelessRandomUniform::Input::SHAPE) + << ", " << node.getInputs().at(StatelessRandomUniform::Input::SEED) << ")" + << std::endl; + VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0) << ")" << std::endl; +} + void OperationDumper::visit(const Squeeze &node) { VERBOSE(LIR) << "* Squeeze" << std::endl; diff --git a/runtime/onert/core/src/ir/OperationDumper.h b/runtime/onert/core/src/ir/OperationDumper.h index 27cfffc..d83f149 100644 --- a/runtime/onert/core/src/ir/OperationDumper.h +++ b/runtime/onert/core/src/ir/OperationDumper.h @@ -96,6 +96,7 @@ public: void visit(const operation::Squeeze &) override; void visit(const operation::Slice &) override; void visit(const operation::StridedSlice &) override; + void visit(const operation::StatelessRandomUniform &) override; void visit(const operation::Sub &) override; void visit(const operation::Tanh &) override; void visit(const operation::Tile &) override; diff --git a/runtime/onert/core/src/ir/operation/BatchToSpaceND.cc b/runtime/onert/core/src/ir/operation/BatchToSpaceND.cc index 0b3955c..9ef2b12 100644 --- a/runtime/onert/core/src/ir/operation/BatchToSpaceND.cc +++ b/runtime/onert/core/src/ir/operation/BatchToSpaceND.cc @@ -31,7 +31,7 @@ void BatchToSpaceND::accept(OperationVisitor &v) const { v.visit(*this); } BatchToSpaceND::BatchToSpaceND(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs) - : Operation{OperandConstraint::createExact(2u), inputs, outputs} + : Operation{OperandConstraint::createExact(3u), inputs, outputs} { } diff --git a/runtime/onert/core/src/ir/operation/Quantize.cc b/runtime/onert/core/src/ir/operation/Quantize.cc new file mode 100644 index 0000000..0e3d5b6 --- /dev/null +++ b/runtime/onert/core/src/ir/operation/Quantize.cc @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ir/operation/Quantize.h" + +#include "ir/OperationVisitor.h" + +namespace onert +{ +namespace ir +{ +namespace operation +{ + +void Quantize::accept(OperationVisitor &v) const { v.visit(*this); } + +Quantize::Quantize(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs) + : Operation{OperandConstraint::createExact(2u), inputs, outputs} +{ +} + +} // namespace operation +} // namespace ir +} // namespace onert diff --git a/runtime/onert/core/src/ir/operation/SplitV.cc b/runtime/onert/core/src/ir/operation/SplitV.cc new file mode 100644 index 0000000..e638c9a --- /dev/null +++ b/runtime/onert/core/src/ir/operation/SplitV.cc @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ir/operation/SplitV.h" +#include +#include "ir/OperationVisitor.h" +namespace onert +{ +namespace ir +{ +namespace operation +{ +void SplitV::accept(OperationVisitor &v) const { v.visit(*this); } +SplitV::SplitV(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs, + const Param ¶m) + : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param} +{ +} +} // namespace operation +} // namespace ir +} // namespace onert diff --git a/runtime/onert/core/src/compiler/OperandContext.cc b/runtime/onert/core/src/ir/operation/StatelessRandomUniform.cc similarity index 52% rename from runtime/onert/core/src/compiler/OperandContext.cc rename to runtime/onert/core/src/ir/operation/StatelessRandomUniform.cc index cce555e..cbb0ff2 100644 --- a/runtime/onert/core/src/compiler/OperandContext.cc +++ b/runtime/onert/core/src/ir/operation/StatelessRandomUniform.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,32 +14,26 @@ * limitations under the License. */ -#include "OperandContext.h" +#include "ir/operation/StatelessRandomUniform.h" #include +#include "ir/OperationVisitor.h" + namespace onert { -namespace compiler +namespace ir { - -OperandContext &OperandContext::set(const ir::OperandIndex &id, - const std::shared_ptr &tensor) +namespace operation { - // Only one tensor for an id - assert(_tensors.find(id) == _tensors.end()); - _tensors[id] = tensor; - return (*this); -} +void StatelessRandomUniform::accept(OperationVisitor &v) const { v.visit(*this); } -void OperandContext::iterate( - const std::function &fn) +StatelessRandomUniform::StatelessRandomUniform(const OperandIndexSequence &inputs, + const OperandIndexSequence &outputs) + : Operation{OperandConstraint::createExact(2u), inputs, outputs} { - for (auto &e : _tensors) - { - fn(e.first, *e.second); - } } -} // namespace compiler +} // namespace operation +} // namespace ir } // namespace onert diff --git a/runtime/onert/core/src/ir/pass/ConstantInsertionPass.cc b/runtime/onert/core/src/ir/pass/ConstantInsertionPass.cc index 29275f1..1742a0d 100644 --- a/runtime/onert/core/src/ir/pass/ConstantInsertionPass.cc +++ b/runtime/onert/core/src/ir/pass/ConstantInsertionPass.cc @@ -45,8 +45,8 @@ void ConstantInsertionPass::callback(const OperationIndex &node_index, Operation if (_replace_operands_map.count(key) == 0) { auto new_object = object; + new_object.unsetDef(); // TODO Remove const_case - const_cast(new_object.getDef()).clear(); const_cast(new_object.getUses()).clear(); const auto new_index = _graph.operands().emplace(new_object); _replace_operands_map[key] = new_index; @@ -71,7 +71,7 @@ void ConstantInsertionPass::callback(const OperationIndex &node_index, Operation // Remove this node from uses of origin operand // Constant operand has no def. - assert(object.getDef().size() == 0); + assert(!object.getDef().valid()); object.removeUse(node_index); // Remove origin operand diff --git a/runtime/onert/core/src/ir/pass/PermutationEliminationPass.cc b/runtime/onert/core/src/ir/pass/PermutationEliminationPass.cc index 9e0291e..2deccd4 100644 --- a/runtime/onert/core/src/ir/pass/PermutationEliminationPass.cc +++ b/runtime/onert/core/src/ir/pass/PermutationEliminationPass.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,11 +15,8 @@ */ #include "PermutationEliminationPass.h" +#include "backend/controlflow/Config.h" -#include "ir/Operand.h" -#include "ir/operand/LowerInfo.h" -#include "ir/Graph.h" -#include "backend/IConfig.h" #include "util/logging.h" namespace onert @@ -28,166 +25,129 @@ namespace ir { namespace pass { -void PermutationEliminationPass::callback(const OperandIndex &inp_index, Operand &object) -{ - if (_graph.getInputs().contains(inp_index)) - { - eliminateInput(inp_index, object); - } - else if (_graph.getOutputs().contains(inp_index)) - { - eliminateOutput(inp_index, object); - } -} -void PermutationEliminationPass::eliminateInput(const OperandIndex &inp_index, Operand &object) +void PermutationEliminationPass::callback(const OperationIndex &ind, Operation &node) { - auto &model_inputs = _graph.getInputs(); - - // get uses of the model's given input - auto uses = object.getUses(); + _op_ind = ind; + node.accept(*this); +}; - // input must be used just by permutation - if (uses.size() != 1) - { - return; - } +void PermutationEliminationPass::visit(const operation::Permute &node) +{ + auto in_operand = node.getInputs().at(0); + auto out_operand = node.getOutputs().at(0); - for (auto input_use : uses) + // Check if two tensors are both portable + // TODO Make this general, this is just a workaround to check two tensors are portable { - auto &perm_operation = _graph.operations().at(input_use); - auto perm_inputs = perm_operation.getInputs(); + auto in_def_factor = _lowered_graph.getLowerInfo(in_operand)->def_factors().getOnlyElement(); + auto out_def_factor = _lowered_graph.getLowerInfo(out_operand)->def_factors().getOnlyElement(); - auto perm_outputs = perm_operation.getOutputs(); + auto in_backend_id = in_def_factor.backend()->config()->id(); + auto out_backend_id = out_def_factor.backend()->config()->id(); - if (!isPermuteLayerToEliminate(perm_inputs, perm_outputs, true)) - { + // TODO Fix this workaround that removes only Permute between cpu and controlflow backend. + // This should be general. + if (!((in_backend_id == backend::controlflow::Config::ID && out_backend_id == "cpu") || + (in_backend_id == "cpu" && out_backend_id == backend::controlflow::Config::ID))) return; - } - - assert(perm_inputs.at(0) == inp_index); - - VERBOSE(PermutationEliminationPass::EliminateInput) << "remove NHWC_TO_NCHW permutation\n"; - - // set model's new input, which was output of permutation - model_inputs.replace(inp_index, perm_outputs.at(0)); - - // remove model's input, which is also input of permutation - _graph.removeOperand(inp_index); - - // remove permutation operation - assert(_lowered_graph.op_seqs().containsOperation(input_use)); - auto op_seq_idx = _lowered_graph.op_seqs().getOperation(input_use); - _lowered_graph.op_seqs().remove(op_seq_idx); - _graph.operations().remove(input_use); - - VERBOSE(PermutationEliminationPass::EliminateInput) - << inp_index.value() << " is model's input and is removed. New input is " - << perm_outputs.at(0).value() << "\n" - << input_use.value() << " is removed permutation operation\n"; - } -} - -void PermutationEliminationPass::eliminateOutput(const OperandIndex &out_index, Operand &object) -{ - auto &model_outputs = _graph.getOutputs(); - - // get defs of the model's given output - auto defs = object.getDef(); - - // output must use just permutation - if (defs.size() != 1) - { - return; } - for (auto output_def : defs) + if (_graph.getOutputs().contains(out_operand)) { - auto &perm_operation = _graph.operations().at(output_def); - auto perm_outputs = perm_operation.getOutputs(); - - auto perm_inputs = perm_operation.getInputs(); - if (!isPermuteLayerToEliminate(perm_inputs, perm_outputs, false)) + // Exceptional case : When the output operand is a model output + // In this case we keep the output and remove the input + + auto &out_operand_obj = _graph.operands().at(out_operand); + assert(out_operand_obj.getDef() == _op_ind); + out_operand_obj.unsetDef(); + _lowered_graph.op_seqs().iterate([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) { + if (!op_seq.getOutputs().contains(in_operand)) + return; + + // Update OpSequence/Operation edges and Operand edges + op_seq.replaceOutputs(in_operand, out_operand); + for (auto op : op_seq.operations()) + { + auto &operation_obj = _graph.operations().at(op); + if (operation_obj.getOutputs().contains(in_operand)) + { + operation_obj.replaceOutputs(in_operand, out_operand); + out_operand_obj.setDef(op); + } + } + }); + + // Remove Permute operation, enclosing OpSequence and the operand { - return; - } - - assert(perm_outputs.at(0) == out_index); + _graph.removeOperand(in_operand); - VERBOSE(PermutationEliminationPass::EliminateOutput) << "remove NCHW_TO_NHWC permutation\n"; - - // Update operations' output that is used by permute operand - for (auto perm_input_index : perm_inputs) - { - auto &perm_input_operand = _graph.operands().at(perm_input_index); - perm_input_operand.removeUse(output_def); + auto op_seq_ind = _lowered_graph.op_seqs().getOperation(_op_ind); + // Assumes enclosing OpSequence contatins just this Permute operation + assert(_lowered_graph.op_seqs().at(op_seq_ind).size() == 1); + _lowered_graph.op_seqs().remove(op_seq_ind); + _graph.operations().remove(_op_ind); } - // set model's new output, which was input of permutation - model_outputs.replace(out_index, perm_inputs.at(0)); - - // remove model's output, which is also output of permutation - _graph.removeOperand(out_index); - - // remove permutation operation - assert(_lowered_graph.op_seqs().containsOperation(output_def)); - auto op_seq_idx = _lowered_graph.op_seqs().getOperation(output_def); - _lowered_graph.op_seqs().remove(op_seq_idx); - _graph.operations().remove(output_def); - - VERBOSE(PermutationEliminationPass::EliminateOutput) - << out_index.value() << " is model's output and is removed. New output is " - << perm_inputs.at(0).value() << "\n" - << output_def.value() << " is removed permutation operation\n"; + _lowered_graph.op_seqs().iterate([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) { + if (!op_seq.getInputs().contains(in_operand)) + return; + + op_seq.replaceInputs(in_operand, out_operand); + for (auto op : op_seq.operations()) + { + auto &operation_obj = _graph.operations().at(op); + if (operation_obj.getInputs().contains(in_operand)) + { + operation_obj.replaceInputs(in_operand, out_operand); + out_operand_obj.insertUse(op); + } + } + }); + + VERBOSE(removePermute) << "Permute Op removed, node index : " << _op_ind << std::endl; + VERBOSE(removePermute) << " - Input (removed) Operand : " << in_operand << std::endl; + VERBOSE(removePermute) << " - Output(kept) Operand : " << out_operand << std::endl; } -} - -bool PermutationEliminationPass::isPermuteLayerToEliminate(const OperandIndexSequence &inp_indexes, - const OperandIndexSequence &out_indexes, - bool is_for_model_input) -{ - auto input_def_factors = _lowered_graph.getLowerInfo(inp_indexes.at(0))->def_factors(); - auto output_def_factors = _lowered_graph.getLowerInfo(out_indexes.at(0))->def_factors(); - - auto input_layout = input_def_factors.getOnlyElement().layout(); - auto output_layout = output_def_factors.getOnlyElement().layout(); - - if (input_def_factors.size() != 1 || output_def_factors.size() != 1) - { - return false; - } - - // all operands' factor must be the same - for (auto index : inp_indexes) - { - auto op_factor_set = _lowered_graph.getLowerInfo(index)->def_factors(); - if (op_factor_set.size() != 1 || - input_layout != _lowered_graph.getLowerInfo(index)->def_factors().getOnlyElement().layout()) - { - return false; - } - } - // all operands' factor must be the same - for (auto index : out_indexes) + else { - auto op_factor_set = _lowered_graph.getLowerInfo(index)->def_factors(); - if (op_factor_set.size() != 1 || - output_layout != - _lowered_graph.getLowerInfo(index)->def_factors().getOnlyElement().layout()) + // Otherwise keep the input and remove the output + + auto &in_operand_obj = _graph.operands().at(in_operand); + in_operand_obj.removeUse(_op_ind); + + // Make OpSequences(that use the output) use the input + _lowered_graph.op_seqs().iterate([&](const ir::OpSequenceIndex &, ir::OpSequence &op_seq) { + if (!op_seq.getInputs().contains(out_operand)) + return; + + op_seq.replaceInputs(out_operand, in_operand); + for (auto op : op_seq.operations()) + { + auto &operation_obj = _graph.operations().at(op); + if (operation_obj.getInputs().contains(out_operand)) + { + operation_obj.replaceInputs(out_operand, in_operand); + in_operand_obj.insertUse(op); + } + } + }); + + // Remove Permute operation, enclosing OpSequence and the operand { - return false; + _graph.removeOperand(out_operand); + + auto op_seq_ind = _lowered_graph.op_seqs().getOperation(_op_ind); + // Assumes enclosing OpSequence contatins just this Permute operation + assert(_lowered_graph.op_seqs().at(op_seq_ind).size() == 1); + _lowered_graph.op_seqs().remove(op_seq_ind); + _graph.operations().remove(_op_ind); } - } - if (is_for_model_input) - { - // check if this is NHWC_TO_NCHW permutation: must have single input, which is model's input - return (inp_indexes.size() == 1 && input_layout == Layout::NHWC && - output_layout == Layout::NCHW); + VERBOSE(removePermute) << "Permute Op removed, node index : " << _op_ind << std::endl; + VERBOSE(removePermute) << " - Input (kept) Operand : " << in_operand << std::endl; + VERBOSE(removePermute) << " - Output(removed) Operand : " << out_operand << std::endl; } - - // check if this is NCHW_TO_NHWC permutation: must have single output, which is model's output - return (out_indexes.size() == 1 && input_layout == Layout::NCHW && output_layout == Layout::NHWC); } } // namespace pass diff --git a/runtime/onert/core/src/ir/pass/PermutationEliminationPass.h b/runtime/onert/core/src/ir/pass/PermutationEliminationPass.h index 1c84300..614e44c 100644 --- a/runtime/onert/core/src/ir/pass/PermutationEliminationPass.h +++ b/runtime/onert/core/src/ir/pass/PermutationEliminationPass.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,9 +17,8 @@ #ifndef __ONERT_GRAPH_PASS_PERMUTATION_ELIMINATION_PASS_H__ #define __ONERT_GRAPH_PASS_PERMUTATION_ELIMINATION_PASS_H__ -#include "LoweredOperandPass.h" -#include "ir/Operand.h" -#include "ir/OperandIndexSequence.h" +#include "ir/OperationVisitor.h" +#include "LoweredOperationPass.h" namespace onert { @@ -28,55 +27,35 @@ namespace ir namespace pass { -class PermutationEliminationPass : public LoweredOperandPass +/** + * @brief An optimization pass that removes Permute operations if possible + * + * There may be some Permute operations that are inserted by PermutationInsertionPass or other + * passes. This pass checks all Permute operations and eliminates them if Permute in/out tensors + * are compatible and layouts match. + * + * Permute input tensor is kept and the output is removed for all the cases, except model outputs. + * As all output tensors have to be controlflow backend, so the output is kept. + * + * @note This is an optimization pass which means that everything should work fine even if this pass + * was skipped. + */ +class PermutationEliminationPass : public LoweredOperationPass, public OperationVisitor { public: - using LoweredOperandPass::LoweredOperandPass; + using LoweredOperationPass::LoweredOperationPass; public: - std::string id() override { return "PermutationEliminationPass"; } + std::string id() final { return "PermutationEliminationPass"; } - void callback(const OperandIndex &index, Operand &object) override; +public: + void callback(const OperationIndex &i, Operation &n) final; private: - /** - * @brief Remove Permute operation that permutates input - * - * Note: This function aslo removes model's input and - * sets output of permutation as model's new input - * - * @param inp_index is the target operand index for the elimination - * @param object is the target operand object for the elimination - * - * @return - */ - void eliminateInput(const OperandIndex &inp_index, Operand &object); - - /** - * @brief Remove Permute operation that permutates output of a model - * - * Note: This function aslo removes model's output and - * sets input of permutation as model's new output - * - * @param out_index is the target operand index for the elimination - * @param object is the target operand object for the elimination - * - * @return - */ - void eliminateOutput(const OperandIndex &out_index, Operand &object); + void visit(const operation::Permute &) final; - /** - * @brief Determine if passed operands are permute layer's input and output, that must be - * eliminated - * - * @param inp_index indexes of the input operand to operation - * @param out_index indexes of the output operand to operation - * @param is_for_model_input checking for model's input or output - * - * @return if it is permutation layer - */ - bool isPermuteLayerToEliminate(const OperandIndexSequence &inp_indexes, - const OperandIndexSequence &out_indexes, bool is_for_model_input); +private: + ir::OperationIndex _op_ind; }; } // namespace pass diff --git a/runtime/onert/core/src/ir/pass/PermutationInsertionPass.cc b/runtime/onert/core/src/ir/pass/PermutationInsertionPass.cc index 7c3da52..3578af8 100644 --- a/runtime/onert/core/src/ir/pass/PermutationInsertionPass.cc +++ b/runtime/onert/core/src/ir/pass/PermutationInsertionPass.cc @@ -60,36 +60,8 @@ void PermutationInsertionPass::callback(const OperandIndex &index, Operand &obje } auto insert_set = operand_li->use_factors() - operand_li->def_factors(); - auto def_factor = operand_li->def_factors().getOnlyElement(); - - auto compatible_backends = [](auto /* backend1 */, auto /* backend2 */) { - // TODO If other issues for Permute elimination are resolved, enable this - return false; - /* - // TODO This is a workaround for not inserting Permute between cpu and controlflow. - // To be general, we need another way of checking they are compatible. - const auto cf = backend::controlflow::Config::ID; - const auto cpu = "cpu"; - const auto id1 = backend1->config()->id(); - const auto id2 = backend2->config()->id(); - return (id1 == cpu && id2 == cf) // Allows no-Permute for Model inputs - || (id1 == cf && id2 == cpu); // Allows no-Permute for Model outputs - */ - }; - for (auto factor : insert_set) { - if (factor.layout() == def_factor.layout() && - compatible_backends(factor.backend(), def_factor.backend())) - { - // For this factor we can just reuse existing operand - Permute is not added. - VERBOSE(PermutationInsertionPass) << "Permutation Insertion is skipped for operand " - << index << " / as the tensor is compatible with backend " - << factor.backend()->config()->id() << std::endl; - factor_to_index.emplace(factor, index); - continue; - } - const auto permute_operation_index = insertPermute(index, factor); permute_indexes.push_back(permute_operation_index); const auto &permute_operation = _graph.operations().at(permute_operation_index); @@ -235,7 +207,7 @@ OperationIndex PermutationInsertionPass::insertPermute(const OperandIndex &opera // Update Use/Def info { _graph.operands().at(operand_index).insertUse(node_index); - _graph.operands().at(out_operand_index).insertDef(node_index); + _graph.operands().at(out_operand_index).setDef(node_index); } return node_index; } diff --git a/runtime/onert/core/src/ir/pass/PermutationInsertionPass.h b/runtime/onert/core/src/ir/pass/PermutationInsertionPass.h index 314a54c..6c30c6f 100644 --- a/runtime/onert/core/src/ir/pass/PermutationInsertionPass.h +++ b/runtime/onert/core/src/ir/pass/PermutationInsertionPass.h @@ -38,6 +38,7 @@ public: std::string id() override { return "PermutationInsertionPass"; } void callback(const OperandIndex &index, Operand &object) override; +private: /** * @brief Insert Permute operation that has given operand as input * @@ -48,8 +49,6 @@ public: */ OperationIndex insertPermute(const OperandIndex &operand_index, const operand::PermuteFactor &factor); - -private: }; } // namespace pass diff --git a/runtime/onert/core/src/ir/pass/PermutationOperationPass.cc b/runtime/onert/core/src/ir/pass/PermutationOperationPass.cc index 1d77b48..6eb412c 100644 --- a/runtime/onert/core/src/ir/pass/PermutationOperationPass.cc +++ b/runtime/onert/core/src/ir/pass/PermutationOperationPass.cc @@ -39,8 +39,8 @@ void PermutationOperationPass::applyExpandRanks(const Operation &node) const auto &output_ind = node.getOutputs().at(0); const auto &output = _graph.operands().at(output_ind); - assert(output.getDef().size() == 1); - const auto &node_index = *output.getDef().begin(); + assert(output.getDef().valid()); + const auto node_index = output.getDef(); const auto &op_seq_index = _lowered_graph.op_seqs().getOperation(node_index); const auto frontend_layout = _lowered_graph.op_seqs().at(op_seq_index).getLayout(); const auto backend_layout = _lowered_graph.getLowerInfo(op_seq_index)->layout(); @@ -80,8 +80,8 @@ void PermutationOperationPass::changeToKeepLayout(const Operation &node) const auto &output_ind = node.getOutputs().at(0); const auto &output_obj = _graph.operands().at(output_ind); - assert(output_obj.getDef().size() == 1); - const auto &node_index = *output_obj.getDef().begin(); + assert(output_obj.getDef().valid()); + const auto node_index = output_obj.getDef(); const auto &op_seq_index = _lowered_graph.op_seqs().getOperation(node_index); const auto frontend_layout = _lowered_graph.op_seqs().at(op_seq_index).getLayout(); @@ -200,7 +200,7 @@ void PermutationOperationPass::changeToKeepLayout(const Operation &node) lower_info->addUsePermuteFactor(new_factor); // Whether if node's input is an input of model or a constant - if (_graph.operands().at(input).getDef().size() == 0 && + if (!_graph.operands().at(input).getDef().valid() && (lower_info->def_factors().size() == 1 && lower_info->def_factors().getOnlyElement() == removed_factor)) { diff --git a/runtime/onert/core/src/ir/verifier/Verifier.cc b/runtime/onert/core/src/ir/verifier/Verifier.cc index 9b83887..09cbdcf 100644 --- a/runtime/onert/core/src/ir/verifier/Verifier.cc +++ b/runtime/onert/core/src/ir/verifier/Verifier.cc @@ -32,7 +32,7 @@ namespace verifier // DAGChecker // -bool DAGChecker::verify(const Graph &graph) const +bool DAGChecker::verify(const Graph &graph) const noexcept { auto &operations = graph.operations(); bool cyclic = false; @@ -72,23 +72,59 @@ bool DAGChecker::verify(const Graph &graph) const // EdgeConsistencyVerifier // -bool EdgeConsistencyChecker::verify(const Graph &graph) const +bool EdgeConsistencyChecker::verify(const Graph &graph) const noexcept { auto &operations = graph.operations(); - uint32_t mismatches = 0; + uint32_t errors = 0; operations.iterate([&](const OperationIndex &index, const Operation &node) { for (auto operand_index : node.getInputs() | ir::Remove::UNDEFINED) { - auto &operand = graph.operands().at(operand_index); - mismatches += (operand.getUses().contains(index) ? 0 : 1); + try + { + auto &operand = graph.operands().at(operand_index); + bool operand_has_use = operand.getUses().contains(index); + if (!operand_has_use) + { + VERBOSE(EdgeConsistencyChecker) << "[ERROR] EDGE MISMATCH : Missing USE edge - Operand " + << operand_index << " to Operation " << index + << std::endl; + errors += 1; + } + } + catch (const std::out_of_range &e) + { + VERBOSE(EdgeConsistencyChecker) + << "[ERROR] OPEARAND NOT FOUND : Operation " << index << " has Operand " + << operand_index << ", but the operand object is not present in the graph" << std::endl; + errors += 1; + } } for (auto operand_index : node.getOutputs()) { - auto &operand = graph.operands().at(operand_index); - mismatches += (operand.getDef().contains(index) ? 0 : 1); + try + { + auto &operand = graph.operands().at(operand_index); + if (operand.getDef() != index) + { + VERBOSE(EdgeConsistencyChecker) << "[ERROR] EDGE MISMATCH : Missing DEF edge - Operand" + << operand_index << " to Operation " << index + << std::endl; + errors += 1; + } + } + catch (const std::out_of_range &e) + { + VERBOSE(EdgeConsistencyChecker) + << "[ERROR] OPEARAND NOT FOUND : Operation " << index << " has Operand " + << operand_index << ", but the operand object is not present in the graph" << std::endl; + errors += 1; + } } }); - return mismatches == 0; + + VERBOSE(EdgeConsistencyChecker) << "Total Number of errors : " << errors << std::endl; + + return errors == 0; } } // namespace verifier diff --git a/runtime/onert/core/src/ir/verifier/Verifier.h b/runtime/onert/core/src/ir/verifier/Verifier.h index 0bc22bc..0c7b57b 100644 --- a/runtime/onert/core/src/ir/verifier/Verifier.h +++ b/runtime/onert/core/src/ir/verifier/Verifier.h @@ -35,7 +35,7 @@ namespace verifier struct IVerifier { virtual ~IVerifier() = default; - virtual bool verify(const Graph &graph) const = 0; + virtual bool verify(const Graph &graph) const noexcept = 0; }; } // namespace verifier @@ -52,13 +52,13 @@ namespace verifier class DAGChecker : public IVerifier { public: - bool verify(const Graph &graph) const override; + bool verify(const Graph &graph) const noexcept override; }; class EdgeConsistencyChecker : public IVerifier { public: - bool verify(const Graph &graph) const override; + bool verify(const Graph &graph) const noexcept override; }; } // namespace verifier diff --git a/runtime/onert/core/src/util/EventCollector.cc b/runtime/onert/core/src/util/EventCollector.cc index 9ecc7e3..de37276 100644 --- a/runtime/onert/core/src/util/EventCollector.cc +++ b/runtime/onert/core/src/util/EventCollector.cc @@ -54,7 +54,8 @@ private: std::string _ts; }; -void emit_rusage(EventRecorder *rec, const std::string &ts) +#ifdef DEBUG +inline void emit_rusage(EventRecorder *rec, const std::string &ts) { struct rusage ru; @@ -81,6 +82,7 @@ void emit_rusage(EventRecorder *rec, const std::string &ts) rec->emit(evt); } } +#endif } // namespace @@ -99,6 +101,9 @@ void EventCollector::onEvent(const Event &event) break; } - // Trace resource usage per each event notification +// TODO: Add resurece measurement(e.g. RSS) +// when ready with low overhead in release build +#ifdef DEBUG emit_rusage(_rec, ts); +#endif } diff --git a/runtime/onert/core/src/util/EventRecorder.h b/runtime/onert/core/src/util/EventRecorder.h index 6ba0bc0..6eea069 100644 --- a/runtime/onert/core/src/util/EventRecorder.h +++ b/runtime/onert/core/src/util/EventRecorder.h @@ -74,7 +74,8 @@ private: private: std::mutex _mu; - WriteFormat _write_format{WriteFormat::CHROME_TRACING}; + // TODO: Allow user to control write_format + WriteFormat _write_format{WriteFormat::SNPE_BENCHMARK}; std::vector _duration_events; std::vector _counter_events; }; diff --git a/runtime/onert/core/src/util/ShapeInference.cc b/runtime/onert/core/src/util/ShapeInference.cc index ac795bb..9a24f8c 100644 --- a/runtime/onert/core/src/util/ShapeInference.cc +++ b/runtime/onert/core/src/util/ShapeInference.cc @@ -486,6 +486,20 @@ ir::Shape inferPadShape(const ir::Shape &in_shape, const int32_t *pad_buf, const return ret; } +ir::Shape inferResizeBilinearShape(const ir::Shape &in_shape, const int32_t output_height, + const int32_t output_width) +{ + assert(in_shape.rank() == 4); + ir::Shape ret(in_shape.rank()); + + ret.dim(0) = in_shape.dim(0); + ret.dim(1) = output_height; + ret.dim(2) = output_width; + ret.dim(3) = in_shape.dim(3); + + return ret; +} + template ir::Shape inferRangeShape(T start_val, T limit_val, T delta_val) { ir::Shape out_shape(static_cast(1)); diff --git a/runtime/onert/frontend/base_loader/include/base_loader.h b/runtime/onert/frontend/base_loader/include/base_loader.h index f5687ad..0f6a2a5 100644 --- a/runtime/onert/frontend/base_loader/include/base_loader.h +++ b/runtime/onert/frontend/base_loader/include/base_loader.h @@ -53,6 +53,8 @@ protected: using SubGraph = typename LoaderDomain::SubGraph; using Tensor = typename LoaderDomain::Tensor; using TensorType = typename LoaderDomain::TensorType; + using DimensionType = typename LoaderDomain::DimensionType; + using SparseIndexVector = typename LoaderDomain::SparseIndexVector; protected: bool isOptionalInputTensor(std::int32_t idx) { return idx == -1; } @@ -75,6 +77,13 @@ public: * @param file_path */ void loadFromFile(const char *file_path); + /** + * @brief Load a model from a buffer + * + * @param buffer buffer pointer + * @param size buffer size + */ + void loadFromBuffer(uint8_t *buffer, size_t size); protected: ~BaseLoader() = default; @@ -107,7 +116,6 @@ protected: void loadSoftmax(const Operator *op, ir::Graph &subg); void loadMaxPool2D(const Operator *op, ir::Graph &subg); void loadConcatenation(const Operator *op, ir::Graph &subg); - void loadInstanceNorm(const Operator *op, ir::Graph &subg); void loadFill(const Operator *op, ir::Graph &subg); void loadFC(const Operator *op, ir::Graph &subg); void loadAdd(const Operator *op, ir::Graph &subg); @@ -140,6 +148,7 @@ protected: void loadSqueeze(const Operator *op, ir::Graph &subg); void loadPrelu(const Operator *op, ir::Graph &subg); void loadSplit(const Operator *op, ir::Graph &subg); + void loadSplitV(const Operator *op, ir::Graph &subg); void loadSlice(const Operator *op, ir::Graph &subg); void loadStridedSlice(const Operator *op, ir::Graph &subg); void loadUnpack(const Operator *op, ir::Graph &subg); @@ -165,12 +174,13 @@ protected: void loadTile(const Operator *op, ir::Graph &subg); void loadLogicalOr(const Operator *op, ir::Graph &subg); void loadRange(const Operator *op, ir::Graph &subg); - void loadBCQFullyConnected(const Operator *op, ir::Graph &subg); - void loadBCQGather(const Operator *op, ir::Graph &subg); void loadMatrixBandPart(const Operator *op, ir::Graph &subg); void loadBroadcastTo(const Operator *op, ir::Graph &subg); void loadFusedBatchNorm(const Operator *op, ir::Graph &subg); void loadLogSoftmax(const Operator *op, ir::Graph &subg); + void loadQuantize(const Operator *op, ir::Graph &subg); + void loadSpaceToDepth(const Operator *op, ir::Graph &subg); + void loadStatelessRandomUniform(const Operator *op, ir::Graph &subg); protected: // Base address for mapped region for loading (if needed) @@ -216,12 +226,20 @@ void BaseLoader::BaseLoader::loadFromFile(const ch _verifier = std::make_unique(reinterpret_cast(_base), size); loadModel(); - munmap(_base, size); close(_fd); } template +void BaseLoader::BaseLoader::loadFromBuffer(uint8_t *buffer, + size_t size) +{ + _base = buffer; + _verifier = std::make_unique(reinterpret_cast(_base), size); + loadModel(); +} + +template ir::Activation BaseLoader::BaseLoader::convertActivation( const ActivationFunctionType type) { @@ -299,6 +317,23 @@ void BaseLoader::BaseLoader::deallocateMmappedArea } } +/* Copied from tensorflow lite. Need to append copyright */ +template bool Copy(const T *data_ptr, std::vector &arr) +{ + if (data_ptr->values() == nullptr) + { + return false; + } + + int size = data_ptr->values()->size(); + arr.reserve(size); + for (int i = 0; i < size; i++) + { + arr.emplace_back(static_cast(data_ptr->values()->Get(i))); + } + return true; +} + template ir::OperandIndex BaseLoader::loadOperand(const Tensor *tensor, ir::Graph &subg) @@ -355,6 +390,61 @@ ir::OperandIndex BaseLoader::loadOperand(const Ten } // Create TypeInfo ir::TypeInfo type_info(data_type, scale, zero_point); + // Sparsity + auto src_sparsity = tensor->sparsity(); + if (src_sparsity != nullptr) + { + std::vector w1_segments; + std::vector w1_indices; + // ignore traversal_order, block_map + // load metadata + const size_t dim_metadata_size = src_sparsity->dim_metadata()->size(); + if (dim_metadata_size != 2) + throw std::runtime_error("sparse tensor is supported only for 2D"); + const auto *src_metadata = src_sparsity->dim_metadata()->Get(0); + if (src_metadata->format() != DimensionType::DimensionType_DENSE) + throw std::runtime_error("sparse tensor dim[0] is not DENSE"); + src_metadata = src_sparsity->dim_metadata()->Get(1); + if (src_metadata->format() != DimensionType::DimensionType_SPARSE_CSR) + throw std::runtime_error("sparse tensor dim[0] is not SPARSE_CSR"); + + auto ParseSparseIndexVector = [src_metadata, &w1_segments, &w1_indices]() { + if (src_metadata->array_segments() == nullptr || src_metadata->array_indices() == nullptr) + return false; + bool status = true; + switch (src_metadata->array_segments_type()) + { + case SparseIndexVector::SparseIndexVector_Int32Vector: + status = Copy(src_metadata->array_segments_as_Int32Vector(), w1_segments); + break; + case SparseIndexVector::SparseIndexVector_Uint16Vector: + status = Copy(src_metadata->array_segments_as_Uint16Vector(), w1_segments); + break; + case SparseIndexVector::SparseIndexVector_Uint8Vector: + status = Copy(src_metadata->array_segments_as_Uint8Vector(), w1_segments); + break; + default: + return false; + } + if (status != true) + return false; + switch (src_metadata->array_indices_type()) + { + case SparseIndexVector::SparseIndexVector_Int32Vector: + return Copy(src_metadata->array_indices_as_Int32Vector(), w1_indices); + case SparseIndexVector::SparseIndexVector_Uint16Vector: + return Copy(src_metadata->array_indices_as_Uint16Vector(), w1_indices); + case SparseIndexVector::SparseIndexVector_Uint8Vector: + return Copy(src_metadata->array_indices_as_Uint8Vector(), w1_indices); + default: + break; + } + return false; + }; + if (ParseSparseIndexVector() == false) + throw std::runtime_error("Error during parsing sparsity index information"); + type_info.sparse2DMetadata(std::move(w1_segments), std::move(w1_indices)); + } // Create operand const auto operand_index = subg.addOperand(shape, type_info); @@ -363,18 +453,17 @@ ir::OperandIndex BaseLoader::loadOperand(const Ten if (data != nullptr) { using std::ptrdiff_t; - size_t data_size = data->size(); - ptrdiff_t unaligned_offset_start = data->data() - _base; - ptrdiff_t offset_end = unaligned_offset_start + data_size; - - // Calculated aligned offset from base address of mapped region - // munmap accepts memory address which is a multiple of the pagesize - ptrdiff_t aligned_offset_start = (unaligned_offset_start / _pagesize) * _pagesize; - size_t mmap_size = offset_end - aligned_offset_start; - - auto ptr = std::make_unique(_fd, aligned_offset_start, mmap_size, - unaligned_offset_start, data_size); - subg.setOperandValue(operand_index, std::move(ptr)); + std::unique_ptr data_obj; + if (_fd == -1) // Model is from memory + { + data_obj = std::make_unique(data->data(), data->size()); + } + else // Model is loaded(mmap'd) from a file + { + data_obj = std::make_unique(data->data(), data->size()); + deallocateMmappedArea(const_cast(data->data()), data->size()); + } + subg.setOperandValue(operand_index, std::move(data_obj)); } // Name unused @@ -592,25 +681,6 @@ void BaseLoader::loadConcatenation(const Operator } template -void BaseLoader::loadInstanceNorm(const Operator *op, ir::Graph &subg) -{ - ir::OperandIndexSequence inputs; - ir::OperandIndexSequence outputs; - - loadOperationIO(op, inputs, outputs); - - ir::operation::InstanceNorm::Param param; - const auto *options = op->builtin_options_as_InstanceNormOptions(); - - param.activation = convertActivation(options->fused_activation_function()); - // Use default value 1e-5 if value of epsilon is zero - param.epsilon = options->epsilon() == 0.f ? 1e-5 : options->epsilon(); - - std::unique_ptr new_op(new ir::operation::InstanceNorm(inputs, outputs, param)); - subg.addOperation(std::move(new_op)); -} - -template void BaseLoader::loadFill(const Operator *op, ir::Graph &subg) { ir::OperandIndexSequence inputs; @@ -778,6 +848,8 @@ void BaseLoader::loadResizeBilinear(const Operator ir::operation::ResizeBilinear::Param param; param.height_out = size_v[0]; param.width_out = size_v[1]; + param.align_corners = op->builtin_options_as_ResizeBilinearOptions()->align_corners(); + param.half_pixel_centers = op->builtin_options_as_ResizeBilinearOptions()->half_pixel_centers(); std::unique_ptr new_op(new ir::operation::ResizeBilinear({input}, outputs, param)); subg.addOperation(std::move(new_op)); @@ -1046,81 +1118,61 @@ void BaseLoader::loadBatchToSpaceND(const Operator ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); - auto input = inputs.at(0); - auto block_shape = inputs.at(1); - auto crops = inputs.at(2); - - if (!subg.operands().at(crops).isConstant()) - throw std::runtime_error("BatchToSpaceND: non-constant 'crops' is not supported."); - std::vector crops_v = subg.operands().at(crops).template asVector(); - assert(crops_v.size() == 4); - if (crops_v != std::vector{0, 0, 0, 0}) - throw std::runtime_error("BatchToSpaceND: 'crops' other than {0, 0, 0, 0} is not supported."); - - std::unique_ptr new_op{ - new ir::operation::BatchToSpaceND{{input, block_shape}, outputs}}; + std::unique_ptr new_op{new ir::operation::BatchToSpaceND{inputs, outputs}}; subg.addOperation(std::move(new_op)); } template -void BaseLoader::loadBCQGather(const Operator *op, ir::Graph &subg) +void BaseLoader::loadMatrixBandPart(const Operator *op, + ir::Graph &subg) { ir::OperandIndexSequence inputs; ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); - ir::operation::BCQGather::Param param; - const auto *options = op->builtin_options_as_BCQGatherOptions(); - param.input_hidden_size = options->input_hidden_size(); - param.axis = options->axis(); - - std::unique_ptr new_op(new ir::operation::BCQGather(inputs, outputs, param)); + std::unique_ptr new_op(new ir::operation::MatrixBandPart(inputs, outputs)); subg.addOperation(std::move(new_op)); } template -void BaseLoader::loadBCQFullyConnected(const Operator *op, - ir::Graph &subg) +void BaseLoader::loadBroadcastTo(const Operator *op, ir::Graph &subg) { ir::OperandIndexSequence inputs; ir::OperandIndexSequence outputs; loadOperationIO(op, inputs, outputs); - ir::operation::BCQFullyConnected::Param param; - const auto *options = op->builtin_options_as_BCQFullyConnectedOptions(); - param.weights_hidden_size = options->weights_hidden_size(); - param.activation = convertActivation(options->fused_activation_function()); - - std::unique_ptr new_op( - new ir::operation::BCQFullyConnected(inputs, outputs, param)); + std::unique_ptr new_op(new ir::operation::BroadcastTo(inputs, outputs)); subg.addOperation(std::move(new_op)); } - template -void BaseLoader::loadMatrixBandPart(const Operator *op, - ir::Graph &subg) +void BaseLoader::loadSpaceToDepth(const Operator *op, ir::Graph &subg) { ir::OperandIndexSequence inputs; ir::OperandIndexSequence outputs; + ir::operation::SpaceToDepth::Param param; + + const auto *options = op->builtin_options_as_SpaceToDepthOptions(); + + param.block_size = options->block_size(); loadOperationIO(op, inputs, outputs); - std::unique_ptr new_op(new ir::operation::MatrixBandPart(inputs, outputs)); + std::unique_ptr new_op(new ir::operation::SpaceToDepth(inputs, outputs, param)); subg.addOperation(std::move(new_op)); } template -void BaseLoader::loadBroadcastTo(const Operator *op, ir::Graph &subg) +void BaseLoader::loadStatelessRandomUniform(const Operator *op, + ir::Graph &subg) { ir::OperandIndexSequence inputs; ir::OperandIndexSequence outputs; - loadOperationIO(op, inputs, outputs); - std::unique_ptr new_op(new ir::operation::BroadcastTo(inputs, outputs)); + std::unique_ptr new_op(new ir::operation::StatelessRandomUniform(inputs, outputs)); subg.addOperation(std::move(new_op)); } @@ -1144,7 +1196,8 @@ void BaseLoader::loadCustom(const Operator *op, ir BatchMatMul, Einsum, BroadcastTo, - FusedBatchNorm + FusedBatchNorm, + StatelessRandomUniform }; // Mapping from custom op name string to BuiltinOP enum @@ -1156,6 +1209,7 @@ void BaseLoader::loadCustom(const Operator *op, ir {"Einsum", BuiltinOP::Einsum}, {"FusedBatchNormV3", BuiltinOP::FusedBatchNorm}, {"BroadcastTo", BuiltinOP::BroadcastTo}, + {"StatelessRandomUniform", BuiltinOP::StatelessRandomUniform}, }; try @@ -1185,6 +1239,9 @@ void BaseLoader::loadCustom(const Operator *op, ir case BuiltinOP::FusedBatchNorm: loadFusedBatchNorm(op, subg); break; + case BuiltinOP::StatelessRandomUniform: + loadStatelessRandomUniform(op, subg); + break; default: throw std::runtime_error{ "Loader: Custom OP map is defined but operation loader function is not defined"}; @@ -1274,6 +1331,23 @@ void BaseLoader::loadSplit(const Operator *op, ir: } template +void BaseLoader::loadSplitV(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::SplitV::Param param{}; + + const auto *options = op->builtin_options_as_SplitVOptions(); + param.num_splits = options->num_splits(); + + std::unique_ptr new_op(new ir::operation::SplitV(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template void BaseLoader::loadSlice(const Operator *op, ir::Graph &subg) { ir::OperandIndexSequence inputs; @@ -1743,6 +1817,18 @@ void BaseLoader::loadLogSoftmax(const Operator *op } template +void BaseLoader::loadQuantize(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr new_op(new ir::operation::Quantize(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template void BaseLoader::loadOperation(const Operator *op, ir::Graph &subg) { const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code(); @@ -1870,6 +1956,9 @@ void BaseLoader::loadOperation(const Operator *op, case BuiltinOperator::BuiltinOperator_SPLIT: loadSplit(op, subg); return; + case BuiltinOperator::BuiltinOperator_SPLIT_V: + loadSplitV(op, subg); + return; case BuiltinOperator::BuiltinOperator_SLICE: loadSlice(op, subg); return; @@ -1959,6 +2048,12 @@ void BaseLoader::loadOperation(const Operator *op, case BuiltinOperator::BuiltinOperator_LOG_SOFTMAX: loadLogSoftmax(op, subg); return; + case BuiltinOperator::BuiltinOperator_QUANTIZE: + loadQuantize(op, subg); + return; + case BuiltinOperator::BuiltinOperator_SPACE_TO_DEPTH: + loadSpaceToDepth(op, subg); + return; default: throw std::runtime_error( std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op))); diff --git a/runtime/onert/frontend/circle/CMakeLists.txt b/runtime/onert/frontend/circle/CMakeLists.txt index a112def..8bcf85d 100644 --- a/runtime/onert/frontend/circle/CMakeLists.txt +++ b/runtime/onert/frontend/circle/CMakeLists.txt @@ -10,5 +10,6 @@ target_include_directories(circle_loader PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/incl target_link_libraries(circle_loader PUBLIC onert_core) target_link_libraries(circle_loader PRIVATE base_loader nnfw_common nnfw_coverage) +target_link_libraries(circle_loader PRIVATE circle_schema) install(TARGETS circle_loader DESTINATION lib) diff --git a/runtime/onert/frontend/circle/include/circle_loader.h b/runtime/onert/frontend/circle/include/circle_loader.h index 8ed4b0c..675a5b3 100644 --- a/runtime/onert/frontend/circle/include/circle_loader.h +++ b/runtime/onert/frontend/circle/include/circle_loader.h @@ -26,6 +26,7 @@ namespace onert namespace circle_loader { std::unique_ptr loadModel(const char *filename); +std::unique_ptr loadModel(uint8_t *buffer, size_t size); } // namespace circle_loader } // namespace onert diff --git a/runtime/onert/frontend/circle/src/circle_loader.cc b/runtime/onert/frontend/circle/src/circle_loader.cc index 94222e9..96dd469 100644 --- a/runtime/onert/frontend/circle/src/circle_loader.cc +++ b/runtime/onert/frontend/circle/src/circle_loader.cc @@ -53,6 +53,8 @@ struct LoaderDomain using Tensor = circle::Tensor; using TensorType = circle::TensorType; using SubGraph = circle::SubGraph; + using DimensionType = circle::DimensionType; + using SparseIndexVector = circle::SparseIndexVector; static const char *EnumNameBuiltinOperator(BuiltinOperator e) { @@ -69,6 +71,11 @@ struct LoaderDomain class CircleLoader final : public base_loader::BaseLoader { +protected: + void loadInstanceNorm(const Operator *op, ir::Graph &subg); + void loadBCQFullyConnected(const Operator *op, ir::Graph &subg); + void loadBCQGather(const Operator *op, ir::Graph &subg); + public: using BaseLoader::BaseLoader; @@ -138,6 +145,57 @@ public: } }; +void CircleLoader::loadInstanceNorm(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::InstanceNorm::Param param; + const auto *options = op->builtin_options_as_InstanceNormOptions(); + + param.activation = convertActivation(options->fused_activation_function()); + // Use default value 1e-5 if value of epsilon is zero + param.epsilon = options->epsilon() == 0.f ? 1e-5 : options->epsilon(); + + std::unique_ptr new_op(new ir::operation::InstanceNorm(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +void CircleLoader::loadBCQGather(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::BCQGather::Param param; + const auto *options = op->builtin_options_as_BCQGatherOptions(); + param.input_hidden_size = options->input_hidden_size(); + param.axis = options->axis(); + + std::unique_ptr new_op(new ir::operation::BCQGather(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +void CircleLoader::loadBCQFullyConnected(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::BCQFullyConnected::Param param; + const auto *options = op->builtin_options_as_BCQFullyConnectedOptions(); + param.weights_hidden_size = options->weights_hidden_size(); + param.activation = convertActivation(options->fused_activation_function()); + + std::unique_ptr new_op( + new ir::operation::BCQFullyConnected(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + } // namespace std::unique_ptr loadModel(const char *filename) @@ -148,5 +206,13 @@ std::unique_ptr loadModel(const char *filename) return subgraphs; } +std::unique_ptr loadModel(uint8_t *buffer, size_t size) +{ + auto subgraphs = std::make_unique(); + CircleLoader loader(subgraphs); + loader.loadFromBuffer(buffer, size); + return subgraphs; +} + } // namespace circle_loader } // namespace onert diff --git a/runtime/onert/frontend/circle_schema/CMakeLists.txt b/runtime/onert/frontend/circle_schema/CMakeLists.txt new file mode 100644 index 0000000..208103f --- /dev/null +++ b/runtime/onert/frontend/circle_schema/CMakeLists.txt @@ -0,0 +1,7 @@ +add_library(circle_schema INTERFACE) + +nnfw_find_package(FlatBuffers REQUIRED) + +target_link_libraries(circle_schema INTERFACE flatbuffers::flatbuffers) + +target_include_directories(circle_schema INTERFACE include) diff --git a/runtime/onert/frontend/circle/src/circle_schema_generated.h b/runtime/onert/frontend/circle_schema/include/circle_schema_generated.h similarity index 100% rename from runtime/onert/frontend/circle/src/circle_schema_generated.h rename to runtime/onert/frontend/circle_schema/include/circle_schema_generated.h diff --git a/runtime/onert/frontend/nnapi/model.cc b/runtime/onert/frontend/nnapi/model.cc index 337bc3a..8c7bd17 100644 --- a/runtime/onert/frontend/nnapi/model.cc +++ b/runtime/onert/frontend/nnapi/model.cc @@ -294,7 +294,7 @@ int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model, } const ANeuralNetworksOperationTypeEx FIRST_OPERATION = ANEURALNETWORKS_CAST_EX; - const ANeuralNetworksOperationTypeEx LAST_OPERATION = ANEURALNETWORKS_ADDV2_EX; + const ANeuralNetworksOperationTypeEx LAST_OPERATION = ANEURALNETWORKS_SPLIT_V_EX; if ((type < FIRST_OPERATION) || (type > LAST_OPERATION)) { VERBOSE(NNAPI::Model) << "addOperation: Invalid operation type" << std::endl; diff --git a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc index 94791f8..8ff6cbb 100644 --- a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc +++ b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc @@ -106,30 +106,122 @@ getReduceGenerator(const onert::ir::operation::Reduce::ReduceType reduce_type) }; } -} // namespace +template +Operation *CreateSimpleUnaryOp(const OperationFactory::Param &init_param, Operands &) +{ + assert(init_param.input_count == 1 && init_param.output_count == 1); -OperationFactory &OperationFactory::get() + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new T{inputs, outputs}; +} + +// A generator function for binary ops with no params +template +Operation *createSimpleBinaryOp(const OperationFactory::Param &init_param, Operands &) { - static OperationFactory factory; - return factory; + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + return new T{inputs, outputs}; } -OperationFactory::OperationFactory() +// A generator function for binary ops with no params +template +Operation *createPool2DOp(const OperationFactory::Param &init_param, Operands &operands) { - _map[ANEURALNETWORKS_BATCH_TO_SPACE_ND] = [](const OperationFactory::Param &init_param, - Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); + assert(init_param.input_count == 7 || init_param.input_count == 10); + assert(init_param.output_count == 1); - OperandIndexSequence outputs{init_param.outputs[0]}; + // In common + // 0 -> IFM Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + typename T::Param param; + if (init_param.input_count == 7) // support implicit padding + { // Each input should be interpreted as follows: // - // 0 -> Input Tensor Index - // 1 -> Block size Index - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 2 -> Horizontal (over width) Stride Index + // 3 -> Vertial (over height) Stride Index + // 4 -> Filter Width Index + // 5 -> Filter Height Index + // 6 -> FuseCode (activation) Index - return new operation::BatchToSpaceND{inputs, outputs}; - }; + const auto padding_index = OperandIndex{init_param.inputs[1]}; + const auto hstride_index = OperandIndex{init_param.inputs[2]}; + const auto vstride_index = OperandIndex{init_param.inputs[3]}; + const auto kw_index = OperandIndex{init_param.inputs[4]}; + const auto kh_index = OperandIndex{init_param.inputs[5]}; + const auto activation_index = OperandIndex{init_param.inputs[6]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar()); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = operands.at(kh_index).asScalar(); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar()); + } + else // support explicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding_left index + // 2 -> Padding_right index + // 3 -> Padding_top index + // 4 -> Padding_bottom index + // 5 -> Horizontal (over width) Stride Index + // 6 -> Vertial (over height) Stride Index + // 7 -> Filter Width Index + // 8 -> Filter Height Index + // 9 -> FuseCode (activation) Index + + const auto padding_left_index = OperandIndex{init_param.inputs[1]}; + const auto padding_right_index = OperandIndex{init_param.inputs[2]}; + const auto padding_top_index = OperandIndex{init_param.inputs[3]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; + const auto hstride_index = OperandIndex{init_param.inputs[5]}; + const auto vstride_index = OperandIndex{init_param.inputs[6]}; + const auto kw_index = OperandIndex{init_param.inputs[7]}; + const auto kh_index = OperandIndex{init_param.inputs[8]}; + const auto activation_index = OperandIndex{init_param.inputs[9]}; + + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar()); + } + return new T{inputs, outputs, param}; +} + +} // namespace + +OperationFactory &OperationFactory::get() +{ + static OperationFactory factory; + return factory; +} + +OperationFactory::OperationFactory() +{ + // Each input should be interpreted as follows: + // 0 -> Input Tensor Index + // 1 -> Block size Index + _map[ANEURALNETWORKS_BATCH_TO_SPACE_ND] = createSimpleBinaryOp; _map[ANEURALNETWORKS_DEPTHWISE_CONV_2D] = [](const OperationFactory::Param &init_param, Operands &operands) { @@ -203,153 +295,9 @@ OperationFactory::OperationFactory() return new operation::DepthwiseConv2D{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_MAX_POOL_2D] = [](const OperationFactory::Param &init_param, - Operands &operands) { - assert(init_param.input_count == 7 || init_param.input_count == 10); - assert(init_param.output_count == 1); + _map[ANEURALNETWORKS_MAX_POOL_2D] = createPool2DOp; - // In common - // 0 -> IFM Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - OperandIndexSequence outputs{init_param.outputs[0]}; - - operation::MaxPool2D::Param param; - if (init_param.input_count == 7) // support implicit padding - { - // Each input should be interpreted as follows: - // - // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index - // 2 -> Horizontal (over width) Stride Index - // 3 -> Vertial (over height) Stride Index - // 4 -> Filter Width Index - // 5 -> Filter Height Index - // 6 -> FuseCode (activation) Index - - const auto padding_index = OperandIndex{init_param.inputs[1]}; - const auto hstride_index = OperandIndex{init_param.inputs[2]}; - const auto vstride_index = OperandIndex{init_param.inputs[3]}; - const auto kw_index = OperandIndex{init_param.inputs[4]}; - const auto kh_index = OperandIndex{init_param.inputs[5]}; - const auto activation_index = OperandIndex{init_param.inputs[6]}; - - param.padding.type = - NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar()); - param.stride = makeStride(operands, hstride_index, vstride_index); - param.kw = getUint32Scalar(operands, kw_index); - param.kh = operands.at(kh_index).asScalar(); - param.activation = - NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar()); - } - else if (init_param.input_count == 10) // support explicit padding - { - // Each input should be interpreted as follows: - // - // 1 -> Padding_left index - // 2 -> Padding_right index - // 3 -> Padding_top index - // 4 -> Padding_bottom index - // 5 -> Horizontal (over width) Stride Index - // 6 -> Vertial (over height) Stride Index - // 7 -> Filter Width Index - // 8 -> Filter Height Index - // 9 -> FuseCode (activation) Index - - const auto padding_left_index = OperandIndex{init_param.inputs[1]}; - const auto padding_right_index = OperandIndex{init_param.inputs[2]}; - const auto padding_top_index = OperandIndex{init_param.inputs[3]}; - const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; - const auto hstride_index = OperandIndex{init_param.inputs[5]}; - const auto vstride_index = OperandIndex{init_param.inputs[6]}; - const auto kw_index = OperandIndex{init_param.inputs[7]}; - const auto kh_index = OperandIndex{init_param.inputs[8]}; - const auto activation_index = OperandIndex{init_param.inputs[9]}; - - param.padding.type = PaddingType::EXPLICIT; - param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, - padding_top_index, padding_bottom_index); - param.stride = makeStride(operands, hstride_index, vstride_index); - param.kw = getUint32Scalar(operands, kw_index); - param.kh = getUint32Scalar(operands, kh_index); - param.activation = - NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar()); - } - return new operation::MaxPool2D{inputs, outputs, param}; - }; - - _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = [](const OperationFactory::Param &init_param, - Operands &operands) { - // TODO We may reuse code here for MAX_POOL_2D. Seems like these two are identical - assert(init_param.input_count == 7 || init_param.input_count == 10); - assert(init_param.output_count == 1); - - // In common - // 0 -> IFM Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - OperandIndexSequence outputs{init_param.outputs[0]}; - - operation::AvgPool2D::Param param; - if (init_param.input_count == 7) // support implicit padding - { - // Each input should be interpreted as follows: - // - // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index - // 2 -> Horizontal (over width) Stride Index - // 3 -> Vertial (over height) Stride Index - // 4 -> Filter Width Index - // 5 -> Filter Height Index - // 6 -> FuseCode (activation) Index - - const auto padding_index = OperandIndex{init_param.inputs[1]}; - const auto hstride_index = OperandIndex{init_param.inputs[2]}; - const auto vstride_index = OperandIndex{init_param.inputs[3]}; - const auto kw_index = OperandIndex{init_param.inputs[4]}; - const auto kh_index = OperandIndex{init_param.inputs[5]}; - const auto activation_index = OperandIndex{init_param.inputs[6]}; - - param.padding.type = - NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar()); - param.stride = makeStride(operands, hstride_index, vstride_index); - param.kw = getUint32Scalar(operands, kw_index); - param.kh = getUint32Scalar(operands, kh_index); - param.activation = - NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar()); - } - else if (init_param.input_count == 10) // support explicit padding - { - // Each input should be interpreted as follows: - // - // 1 -> Padding_left index - // 2 -> Padding_right index - // 3 -> Padding_top index - // 4 -> Padding_bottom index - // 5 -> Horizontal (over width) Stride Index - // 6 -> Vertial (over height) Stride Index - // 7 -> Filter Width Index - // 8 -> Filter Height Index - // 9 -> FuseCode (activation) Index - - const auto padding_left_index = OperandIndex{init_param.inputs[1]}; - const auto padding_right_index = OperandIndex{init_param.inputs[2]}; - const auto padding_top_index = OperandIndex{init_param.inputs[3]}; - const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; - const auto hstride_index = OperandIndex{init_param.inputs[5]}; - const auto vstride_index = OperandIndex{init_param.inputs[6]}; - const auto kw_index = OperandIndex{init_param.inputs[7]}; - const auto kh_index = OperandIndex{init_param.inputs[8]}; - const auto activation_index = OperandIndex{init_param.inputs[9]}; - - param.padding.type = PaddingType::EXPLICIT; - param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, - padding_top_index, padding_bottom_index); - param.stride = makeStride(operands, hstride_index, vstride_index); - param.kw = getUint32Scalar(operands, kw_index); - param.kh = getUint32Scalar(operands, kh_index); - param.activation = - NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar()); - } - - return new operation::AvgPool2D{inputs, outputs, param}; - }; + _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = createPool2DOp; _map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param, Operands &operands) { @@ -724,44 +672,11 @@ OperationFactory::OperationFactory() return new operation::Squeeze{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_TANH] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> Input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::Tanh{inputs, outputs}; - }; - - _map[ANEURALNETWORKS_LOG] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> Input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::Log{inputs, outputs}; - }; - - _map[ANEURALNETWORKS_LOGISTIC] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; + _map[ANEURALNETWORKS_TANH] = CreateSimpleUnaryOp; - // Each input should be interpreted as follows: - // - // 0 -> Input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; + _map[ANEURALNETWORKS_LOG] = CreateSimpleUnaryOp; - return new operation::Logistic{inputs, outputs}; - }; + _map[ANEURALNETWORKS_LOGISTIC] = CreateSimpleUnaryOp; _map[ANEURALNETWORKS_DIV] = [](const OperationFactory::Param &init_param, Operands &operands) { assert(init_param.input_count == 3 && init_param.output_count == 1); @@ -784,36 +699,16 @@ OperationFactory::OperationFactory() return new operation::Div{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_EXP] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> Input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::Exp{inputs, outputs}; - }; + _map[ANEURALNETWORKS_EXP] = CreateSimpleUnaryOp; // ANEURALNETWORKS_EXP_EX is deprecated // TODO Remove ANEURALNETWORKS_EXP_EX _map[ANEURALNETWORKS_EXP_EX] = _map[ANEURALNETWORKS_EXP]; - _map[ANEURALNETWORKS_EXPAND_DIMS] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> Input Tensor Index - // 1 -> Axis Tensor Index - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - return new operation::ExpandDims{inputs, outputs}; - }; + // Each input should be interpreted as follows: + // 0 -> Input Tensor Index + // 1 -> Axis Tensor Index + _map[ANEURALNETWORKS_EXPAND_DIMS] = createSimpleBinaryOp; _map[ANEURALNETWORKS_GREATER] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); @@ -982,19 +877,7 @@ OperationFactory::OperationFactory() return new operation::Comparison{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_LOGICAL_AND] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> input0 Tensor Index - // 1 -> input1 Tensor Index - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - return new operation::LogicalAnd{inputs, outputs}; - }; + _map[ANEURALNETWORKS_LOGICAL_AND] = createSimpleBinaryOp; // ANEURALNETWORKS_LOGICAL_AND_EX is deprecated // TODO Remove ANEURALNETWORKS_LOGICAL_AND_EX @@ -1018,18 +901,7 @@ OperationFactory::OperationFactory() return new operation::LogicalAnd{inputs, outputs}; }; - _map[ANEURALNETWORKS_RSQRT] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> Input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::RSQRT{inputs, outputs}; - }; + _map[ANEURALNETWORKS_RSQRT] = CreateSimpleUnaryOp; _map[ANEURALNETWORKS_SELECT] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 3 && init_param.output_count == 1); @@ -1065,18 +937,7 @@ OperationFactory::OperationFactory() // TODO Remove ANEURALNETWORKS_RSQRT_EX _map[ANEURALNETWORKS_RSQRT_EX] = _map[ANEURALNETWORKS_RSQRT]; - _map[ANEURALNETWORKS_RELU] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> Input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::ReLU{inputs, outputs}; - }; + _map[ANEURALNETWORKS_RELU] = CreateSimpleUnaryOp; _map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param, Operands &operands) { @@ -1094,35 +955,14 @@ OperationFactory::OperationFactory() operation::ResizeBilinear::Param param; param.height_out = operands.at(OperandIndex{init_param.inputs[1]}).asScalar(); param.width_out = operands.at(OperandIndex{init_param.inputs[2]}).asScalar(); - + param.align_corners = false; + param.half_pixel_centers = false; return new operation::ResizeBilinear{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_RELU1] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::ReLU1{inputs, outputs}; - }; - - _map[ANEURALNETWORKS_RELU6] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); + _map[ANEURALNETWORKS_RELU1] = CreateSimpleUnaryOp; - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::ReLU6{inputs, outputs}; - }; + _map[ANEURALNETWORKS_RELU6] = CreateSimpleUnaryOp; _map[ANEURALNETWORKS_REVERSE_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); @@ -1219,76 +1059,7 @@ OperationFactory::OperationFactory() return new operation::SpaceToDepth{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_L2_POOL_2D] = [](const OperationFactory::Param &init_param, - Operands &operands) { - assert(init_param.input_count == 10 || init_param.input_count == 7); - assert(init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> IFM Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - operation::L2Pool2D::Param param; - - if (init_param.input_count == 7) // Imlicit Padding case - { - // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index - // 2 -> Horizontal (over width) Stride Index - // 3 -> Vertial (over height) Stride Index - // 4 -> Filter Width Index - // 5 -> Filter Height Index - // 6 -> FuseCode (activation) Index - const auto padding_index = OperandIndex{init_param.inputs[1]}; - const auto hstride_index = OperandIndex{init_param.inputs[2]}; - const auto vstride_index = OperandIndex{init_param.inputs[3]}; - const auto kw_index = OperandIndex{init_param.inputs[4]}; - const auto kh_index = OperandIndex{init_param.inputs[5]}; - const auto activation_index = OperandIndex{init_param.inputs[6]}; - - param.padding.type = - NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar()); - param.stride = makeStride(operands, hstride_index, vstride_index); - param.kw = getUint32Scalar(operands, kw_index); - param.kh = getUint32Scalar(operands, kh_index); - param.activation = - NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar()); - } - else // Explicit Padding case - { - // 1 -> Padding_left index - // 2 -> Padding_right index - // 3 -> Padding_top index - // 4 -> Padding_bottom index - // 5 -> Horizontal (over width) Stride Index - // 6 -> Vertial (over height) Stride Index - // 7 -> Filter Width Index - // 8 -> Filter Height Index - // 9 -> FuseCode (activation) Index - const auto padding_left_index = OperandIndex{init_param.inputs[1]}; - const auto padding_right_index = OperandIndex{init_param.inputs[2]}; - const auto padding_top_index = OperandIndex{init_param.inputs[3]}; - const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; - const auto hstride_index = OperandIndex{init_param.inputs[5]}; - const auto vstride_index = OperandIndex{init_param.inputs[6]}; - const auto kw_index = OperandIndex{init_param.inputs[7]}; - const auto kh_index = OperandIndex{init_param.inputs[8]}; - const auto activation_index = OperandIndex{init_param.inputs[9]}; - - param.padding.type = PaddingType::EXPLICIT; - param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, - padding_top_index, padding_bottom_index); - param.stride = makeStride(operands, hstride_index, vstride_index); - param.kw = getUint32Scalar(operands, kw_index); - param.kh = getUint32Scalar(operands, kh_index); - param.activation = - NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar()); - } - - return new operation::L2Pool2D{inputs, outputs, param}; - }; + _map[ANEURALNETWORKS_L2_POOL_2D] = createPool2DOp; _map[ANEURALNETWORKS_EMBEDDING_LOOKUP] = [](const OperationFactory::Param &init_param, Operands &) { @@ -1438,18 +1209,7 @@ OperationFactory::OperationFactory() return new operation::LogicalOr{inputs, outputs}; }; - _map[ANEURALNETWORKS_LOGICAL_NOT] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::LogicalNot{inputs, outputs}; - }; + _map[ANEURALNETWORKS_LOGICAL_NOT] = CreateSimpleUnaryOp; // ANEURALNETWORKS_LOGICAL_NOT_EX is deprecated // TODO Remove ANEURALNETWORKS_LOGICAL_NOT_EX @@ -1649,35 +1409,13 @@ OperationFactory::OperationFactory() // TODO Remove ANEURALNETWORKS_GATHER_EX _map[ANEURALNETWORKS_GATHER_EX] = _map[ANEURALNETWORKS_GATHER]; - _map[ANEURALNETWORKS_NEG] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> Input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::Neg{inputs, outputs}; - }; + _map[ANEURALNETWORKS_NEG] = CreateSimpleUnaryOp; // ANEURALNETWORKS_NEG_EX is deprecated // TODO Remove ANEURALNETWORKS_NEG_EX _map[ANEURALNETWORKS_NEG_EX] = _map[ANEURALNETWORKS_NEG]; - _map[ANEURALNETWORKS_ABS] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> Input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::Abs{inputs, outputs}; - }; + _map[ANEURALNETWORKS_ABS] = CreateSimpleUnaryOp; // ANEURALNETWORKS_ABS_EX is deprecated // TODO Remove ANEURALNETWORKS_ABS_EX @@ -1704,18 +1442,7 @@ OperationFactory::OperationFactory() // TODO Remove ANEURALNETWORKS_ARGMAX_EX _map[ANEURALNETWORKS_ARGMAX_EX] = _map[ANEURALNETWORKS_ARGMAX]; - _map[ANEURALNETWORKS_DEQUANTIZE] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> Input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::Dequantize{inputs, outputs}; - }; + _map[ANEURALNETWORKS_DEQUANTIZE] = CreateSimpleUnaryOp; _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, Operands &operands) { assert(init_param.input_count == 3 && init_param.output_count == 1); @@ -1816,6 +1543,23 @@ OperationFactory::OperationFactory() return new operation::Split{inputs, outputs, param}; }; + _map[ANEURALNETWORKS_SPLIT_V_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 4); + assert(init_param.output_count >= 1); // At least one output tensor and axis + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + OperandIndexSequence outputs; + for (uint32_t n = 0; n < init_param.output_count; ++n) + { + outputs.append(OperandIndex{init_param.outputs[n]}); + } + + operation::SplitV::Param param; + param.num_splits = operands.at(OperandIndex{init_param.inputs[3]}).asScalar(); + return new operation::SplitV{inputs, outputs, param}; + }; + // ANEURALNETWORKS_SPLIT_EX is deprecated // TODO Remove ANEURALNETWORKS_SPLIT_EX _map[ANEURALNETWORKS_SPLIT_EX] = _map[ANEURALNETWORKS_SPLIT]; @@ -1841,31 +1585,24 @@ OperationFactory::OperationFactory() }; _map[ANEURALNETWORKS_PAD] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count >= 1); + assert(init_param.input_count >= 2 && init_param.input_count <= 3 && + init_param.output_count >= 1); OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + if (init_param.input_count == 3) + { + inputs.append(OperandIndex{init_param.inputs[2]}); + } OperandIndexSequence outputs{init_param.outputs[0]}; return new operation::Pad{inputs, outputs}; }; - _map[ANEURALNETWORKS_MINIMUM] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - OperandIndexSequence outputs{init_param.outputs[0]}; + _map[ANEURALNETWORKS_PAD_V2] = _map[ANEURALNETWORKS_PAD]; - return new operation::Min{inputs, outputs}; - }; - - _map[ANEURALNETWORKS_MAXIMUM] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - OperandIndexSequence outputs{init_param.outputs[0]}; + _map[ANEURALNETWORKS_MINIMUM] = createSimpleBinaryOp; - return new operation::Max{inputs, outputs}; - }; + _map[ANEURALNETWORKS_MAXIMUM] = createSimpleBinaryOp; _map[ANEURALNETWORKS_ONE_HOT_EX] = [](const OperationFactory::Param &init_param, Operands &operands) { @@ -1948,34 +1685,15 @@ OperationFactory::OperationFactory() return new operation::Range{inputs, outputs}; }; - _map[ANEURALNETWORKS_POW] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> LHS Tensor Index - // 1 -> RHS Tensor Index + // Each input should be interpreted as follows: + // 0 -> LHS Tensor Index + // 1 -> RHS Tensor Index + _map[ANEURALNETWORKS_POW] = createSimpleBinaryOp; - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - return new operation::Pow{inputs, outputs}; - }; - - _map[ANEURALNETWORKS_FILL_EX] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - // Each input should be interpreted as follows: - // - // 0 -> A tensor, specifying the input. - // 1 -> A 1-D tensor, specifying the value - - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - OperandIndexSequence outputs{init_param.outputs[0]}; - - return new operation::Fill{inputs, outputs}; - }; + // Each input should be interpreted as follows: + // 0 -> A tensor, specifying the input. + // 1 -> A 1-D tensor, specifying the value + _map[ANEURALNETWORKS_FILL_EX] = createSimpleBinaryOp; _map[ANEURALNETWORKS_ZEROS_LIKE_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); @@ -1989,20 +1707,10 @@ OperationFactory::OperationFactory() return new operation::ZerosLike{inputs, outputs}; }; - _map[ANEURALNETWORKS_TILE] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> Input Tensor Index - // 1 -> Multiple Tensor Index - - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - return new operation::Tile{inputs, outputs}; - }; + // Each input should be interpreted as follows: + // 0 -> Input Tensor Index + // 1 -> Multiple Tensor Index + _map[ANEURALNETWORKS_TILE] = createSimpleBinaryOp; _map[ANEURALNETWORKS_MATRIX_BAND_PART_EX] = [](const OperationFactory::Param &init_param, Operands &) { @@ -2064,20 +1772,23 @@ OperationFactory::OperationFactory() return new operation::Einsum{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_BROADCAST_TO_EX] = [](const OperationFactory::Param &init_param, - Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); + // 0 -> Input Tensor Index + // 1 -> int32, int64, An 1-D int tensor Index + _map[ANEURALNETWORKS_BROADCAST_TO_EX] = createSimpleBinaryOp; + _map[ANEURALNETWORKS_STATELESS_RANDOM_UNIFORM_EX] = [](const OperationFactory::Param &init_param, + Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); OperandIndexSequence outputs{init_param.outputs[0]}; // Each input should be interpreted as follows: // - // 0 -> Input Tensor Index + // 0 -> Shape Tensor Index // 1 -> int32, int64, An 1-D int tensor Index OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - return new operation::BroadcastTo{inputs, outputs}; + return new operation::StatelessRandomUniform{inputs, outputs}; }; _map[ANEURALNETWORKS_FUSED_BATCH_NORM_V3_EX] = [](const OperationFactory::Param &init_param, @@ -2133,6 +1844,15 @@ OperationFactory::OperationFactory() return new operation::LogSoftmax{inputs, outputs, param}; }; + + _map[ANEURALNETWORKS_QUANTIZE] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + return new operation::Quantize{inputs, outputs}; + }; } Operation *OperationFactory::create(ANeuralNetworksOperationType type, diff --git a/runtime/onert/frontend/tflite/src/tflite_loader.cc b/runtime/onert/frontend/tflite/src/tflite_loader.cc index 649ce9b..86c2c6b 100644 --- a/runtime/onert/frontend/tflite/src/tflite_loader.cc +++ b/runtime/onert/frontend/tflite/src/tflite_loader.cc @@ -40,6 +40,8 @@ struct LoaderDomain using Tensor = onert_tflite::Tensor; using TensorType = onert_tflite::TensorType; using SubGraph = onert_tflite::SubGraph; + using DimensionType = onert_tflite::DimensionType; + using SparseIndexVector = onert_tflite::SparseIndexVector; static const char *EnumNameBuiltinOperator(BuiltinOperator e) { diff --git a/runtime/onert/sample/minimal/CMakeLists.txt b/runtime/onert/sample/minimal/CMakeLists.txt index 6f4b027..e54223e 100644 --- a/runtime/onert/sample/minimal/CMakeLists.txt +++ b/runtime/onert/sample/minimal/CMakeLists.txt @@ -4,7 +4,7 @@ endif(NOT BUILD_MINIMAL_SAMPLE) list(APPEND MINIMAL_SRCS "src/minimal.cc") -add_executable(minimal ${MINIMAL_SRCS}) -target_link_libraries(minimal nnfw-dev pthread dl) +add_executable(onert-minimal-app ${MINIMAL_SRCS}) +target_link_libraries(onert-minimal-app nnfw-dev pthread dl) -install(TARGETS minimal DESTINATION bin) +install(TARGETS onert-minimal-app DESTINATION bin) diff --git a/runtime/onert/sample/minimal/src/minimal.cc b/runtime/onert/sample/minimal/src/minimal.cc index d55569b..0436b93 100644 --- a/runtime/onert/sample/minimal/src/minimal.cc +++ b/runtime/onert/sample/minimal/src/minimal.cc @@ -16,6 +16,7 @@ #include "nnfw.h" #include +#include uint64_t num_elems(const nnfw_tensorinfo *ti) { @@ -65,5 +66,6 @@ int main(const int argc, char **argv) nnfw_close_session(session); + std::cout << "nnpackage " << argv[1] << " runs successfully." << std::endl; return 0; } diff --git a/runtime/onert/test/core/exec/ExecInstance.cc b/runtime/onert/test/core/exec/ExecInstance.cc index cc04347..0fcf372 100644 --- a/runtime/onert/test/core/exec/ExecInstance.cc +++ b/runtime/onert/test/core/exec/ExecInstance.cc @@ -73,9 +73,8 @@ public: // Compile auto subgs = std::make_shared(); subgs->push(onert::ir::SubgraphIndex{0}, graph); - auto compiler = new onert::compiler::Compiler{subgs}; - executors = compiler->compile(); - delete compiler; + onert::compiler::Compiler compiler{subgs}; + executors = compiler.compile(); } public: @@ -98,19 +97,17 @@ TEST(ExecInstance, simple) float output_buffer[4] = {}; const float output_expected[4] = {5, -2, 0, -1}; - auto execution = new onert::exec::Execution(executors); + onert::exec::Execution execution{executors}; - execution->setInput(input1, reinterpret_cast(input1_buffer), 16); - execution->setInput(input2, reinterpret_cast(input2_buffer), 16); - execution->setOutput(output, reinterpret_cast(output_buffer), 16); - execution->execute(); + execution.setInput(input1, reinterpret_cast(input1_buffer), 16); + execution.setInput(input2, reinterpret_cast(input2_buffer), 16); + execution.setOutput(output, reinterpret_cast(output_buffer), 16); + execution.execute(); for (auto i = 0; i < 4; i++) { EXPECT_EQ(output_buffer[i], output_expected[i]); } - - delete execution; } TEST(ExecInstance, twoCompile) @@ -118,7 +115,7 @@ TEST(ExecInstance, twoCompile) auto mockup = CompiledMockUpModel(); auto graph = mockup.graph; auto executors1 = mockup.executors; - auto execution1 = new onert::exec::Execution(executors1); + onert::exec::Execution execution1{executors1}; auto input1 = IOIndex{0}; auto input2 = IOIndex{1}; @@ -129,38 +126,34 @@ TEST(ExecInstance, twoCompile) float exe1_output_buffer[4] = {}; const float exe1_output_expected[4] = {5, -2, 0, -1}; - execution1->setInput(input1, reinterpret_cast(exe1_input1_buffer), 16); - execution1->setInput(input2, reinterpret_cast(exe1_input2_buffer), 16); - execution1->setOutput(output, reinterpret_cast(exe1_output_buffer), 16); + execution1.setInput(input1, reinterpret_cast(exe1_input1_buffer), 16); + execution1.setInput(input2, reinterpret_cast(exe1_input2_buffer), 16); + execution1.setOutput(output, reinterpret_cast(exe1_output_buffer), 16); // Make new executor: compile again auto subgs = std::make_shared(); subgs->push(onert::ir::SubgraphIndex{0}, graph); - auto compiler = new onert::compiler::Compiler{subgs}; - std::shared_ptr executors2 = compiler->compile(); - auto execution2 = new onert::exec::Execution(executors2); + onert::compiler::Compiler compiler{subgs}; + std::shared_ptr executors2 = compiler.compile(); + onert::exec::Execution execution2{executors2}; const float exe2_input1_buffer[4] = {2, 1, -2, 0}; const float exe2_input2_buffer[4] = {-3, 3, 1, 2}; float exe2_output_buffer[4] = {}; const float exe2_output_expected[4] = {2, 5, -2, 7}; - execution2->setInput(input1, reinterpret_cast(exe2_input1_buffer), 16); - execution2->setInput(input2, reinterpret_cast(exe2_input2_buffer), 16); - execution2->setOutput(output, reinterpret_cast(exe2_output_buffer), 16); + execution2.setInput(input1, reinterpret_cast(exe2_input1_buffer), 16); + execution2.setInput(input2, reinterpret_cast(exe2_input2_buffer), 16); + execution2.setOutput(output, reinterpret_cast(exe2_output_buffer), 16); - execution1->execute(); - execution2->execute(); + execution1.execute(); + execution2.execute(); for (auto i = 0; i < 4; i++) { EXPECT_EQ(exe1_output_buffer[i], exe1_output_expected[i]); EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]); } - - delete compiler; - delete execution1; - delete execution2; } // Support two initialized execution instance then ordered execution @@ -178,32 +171,29 @@ TEST(ExecInstance, twoExecution) const float exe1_output_expected[4] = {5, -2, 0, -1}; const float exe2_output_expected[4] = {2, 5, -2, 7}; - auto execution1 = new onert::exec::Execution(executors); - execution1->setInput(input1, reinterpret_cast(exe1_input1_buffer), 16); - execution1->setInput(input2, reinterpret_cast(exe1_input2_buffer), 16); - execution1->setOutput(output1, reinterpret_cast(exe1_output_buffer), 16); + onert::exec::Execution execution1{executors}; + execution1.setInput(input1, reinterpret_cast(exe1_input1_buffer), 16); + execution1.setInput(input2, reinterpret_cast(exe1_input2_buffer), 16); + execution1.setOutput(output1, reinterpret_cast(exe1_output_buffer), 16); const float exe2_input1_buffer[4] = {2, 1, -2, 0}; const float exe2_input2_buffer[4] = {-3, 3, 1, 2}; float exe2_output_buffer[4] = {}; // Make new execution - auto execution2 = new onert::exec::Execution(executors); - execution2->setInput(input1, reinterpret_cast(exe2_input1_buffer), 16); - execution2->setInput(input2, reinterpret_cast(exe2_input2_buffer), 16); - execution2->setOutput(output1, reinterpret_cast(exe2_output_buffer), 16); + onert::exec::Execution execution2{executors}; + execution2.setInput(input1, reinterpret_cast(exe2_input1_buffer), 16); + execution2.setInput(input2, reinterpret_cast(exe2_input2_buffer), 16); + execution2.setOutput(output1, reinterpret_cast(exe2_output_buffer), 16); - execution1->execute(); - execution2->execute(); + execution1.execute(); + execution2.execute(); for (auto i = 0; i < 4; i++) { EXPECT_EQ(exe1_output_buffer[i], exe1_output_expected[i]); EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]); } - - delete execution1; - delete execution2; } class Inference @@ -222,14 +212,12 @@ public: auto input2 = IOIndex{1}; auto output1 = IOIndex{0}; - auto execution = new onert::exec::Execution(_executors); - execution->setInput(input1, reinterpret_cast(_input1), 16); - execution->setInput(input2, reinterpret_cast(_input2), 16); - execution->setOutput(output1, reinterpret_cast(_output), 16); + onert::exec::Execution execution{_executors}; + execution.setInput(input1, reinterpret_cast(_input1), 16); + execution.setInput(input2, reinterpret_cast(_input2), 16); + execution.setOutput(output1, reinterpret_cast(_output), 16); - execution->execute(); - - delete execution; + execution.execute(); } private: @@ -288,20 +276,18 @@ TEST(ExecInstance, async) float output_buffer[4] = {}; const float output_expected[4] = {5, -2, 0, -1}; - auto execution = new onert::exec::Execution(executors); + onert::exec::Execution execution{executors}; - execution->setInput(input1, reinterpret_cast(input1_buffer), 16); - execution->setInput(input2, reinterpret_cast(input2_buffer), 16); - execution->setOutput(output, reinterpret_cast(output_buffer), 16); - execution->startExecute(); - execution->waitFinish(); + execution.setInput(input1, reinterpret_cast(input1_buffer), 16); + execution.setInput(input2, reinterpret_cast(input2_buffer), 16); + execution.setOutput(output, reinterpret_cast(output_buffer), 16); + execution.startExecute(); + execution.waitFinish(); for (auto i = 0; i < 4; i++) { EXPECT_EQ(output_buffer[i], output_expected[i]); } - - delete execution; } } // namespace diff --git a/runtime/onert/test/graph/operand/UseDef.cc b/runtime/onert/test/graph/operand/UseDef.cc index 3e8b14b..cd2cdb7 100644 --- a/runtime/onert/test/graph/operand/UseDef.cc +++ b/runtime/onert/test/graph/operand/UseDef.cc @@ -65,12 +65,12 @@ TEST(graph_operand_usedef, usedef_test) ASSERT_EQ(verifier.verify(graph), true); // Check def - ASSERT_EQ(graph.operands().at(operand_index1).getDef().contains(mocknode_index1), true); - ASSERT_EQ(graph.operands().at(operand_index2).getDef().contains(mocknode_index2), true); - ASSERT_EQ(graph.operands().at(output_operand).getDef().contains(multiinput_index), true); + ASSERT_EQ(graph.operands().at(operand_index1).getDef(), mocknode_index1); + ASSERT_EQ(graph.operands().at(operand_index2).getDef(), mocknode_index2); + ASSERT_EQ(graph.operands().at(output_operand).getDef(), multiinput_index); - ASSERT_EQ(graph.operands().at(operand_index1).getDef().contains(mocknode_index2), false); - ASSERT_EQ(graph.operands().at(operand_index1).getDef().contains(multiinput_index), false); + ASSERT_NE(graph.operands().at(operand_index1).getDef(), mocknode_index2); + ASSERT_NE(graph.operands().at(operand_index1).getDef(), multiinput_index); // Check use ASSERT_EQ(graph.operands().at(input_operand).getUses().contains(mocknode_index1), true); diff --git a/tests/custom_op/FillFrom/CMakeLists.txt b/tests/custom_op/FillFrom/CMakeLists.txt index ba03e30..91349de 100644 --- a/tests/custom_op/FillFrom/CMakeLists.txt +++ b/tests/custom_op/FillFrom/CMakeLists.txt @@ -2,6 +2,6 @@ add_nnfw_custom_op_kernel(FillFrom ON kernels/FillFromKernel.cc) add_nnfw_custom_op_app(FillFrom_runner SOURCES FillFrom_runner.cc KERNELS FillFrom) -install(TARGETS FillFrom_runner DESTINATION tests) -install(DIRECTORY nnpkgs/FillFrom DESTINATION tests/nnpkgs) -install_nnfw_custom_op_kernel(FillFrom tests/nnpkgs/FillFrom) +install(TARGETS FillFrom_runner DESTINATION test) +install(DIRECTORY nnpkgs/FillFrom DESTINATION test/nnpkgs) +install_nnfw_custom_op_kernel(FillFrom test/nnpkgs/FillFrom) diff --git a/tests/custom_op/FillFrom/FillFrom_runner.cc b/tests/custom_op/FillFrom/FillFrom_runner.cc index 82e25fa..7313086 100644 --- a/tests/custom_op/FillFrom/FillFrom_runner.cc +++ b/tests/custom_op/FillFrom/FillFrom_runner.cc @@ -15,7 +15,7 @@ */ #include "nnfw.h" -#include "nnfw_dev.h" +#include "nnfw_experimental.h" #include #include diff --git a/tests/custom_op/FillFrom/kernels/FillFromKernel.cc b/tests/custom_op/FillFrom/kernels/FillFromKernel.cc index 6771e68..6015b3b 100644 --- a/tests/custom_op/FillFrom/kernels/FillFromKernel.cc +++ b/tests/custom_op/FillFrom/kernels/FillFromKernel.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "nnfw_dev.h" +#include "nnfw_experimental.h" #include "flatbuffers/flexbuffers.h" diff --git a/tests/nnapi/nnapi_gtest.skip.aarch64-linux.acl_cl b/tests/nnapi/nnapi_gtest.skip.aarch64-linux.acl_cl index e50b941..3a6b40d 100644 --- a/tests/nnapi/nnapi_gtest.skip.aarch64-linux.acl_cl +++ b/tests/nnapi/nnapi_gtest.skip.aarch64-linux.acl_cl @@ -23,8 +23,8 @@ GeneratedTests.cast_float16_to_quant8 GeneratedTests.cast_float16_to_quant8_overflow GeneratedTests.cast_float32_to_float16 GeneratedTests.cast_float32_to_float16_relaxed +GeneratedTests.cast_float32_to_int32_nnfw GeneratedTests.cast_int32_to_float16 -GeneratedTests.cast_int32_to_quant8_overflow GeneratedTests.cast_quant8_to_float16 GeneratedTests.concat_dynamic_nnfw GeneratedTests.conv_dynamic_nnfw @@ -68,6 +68,7 @@ GeneratedTests.gather_float16_7 GeneratedTests.gather_float16_8 GeneratedTests.greater_dynamic_float_nnfw GeneratedTests.greater_equal_dynamic_float_nnfw +GeneratedTests.l2_normalization_quant8_nnfw GeneratedTests.less_dynamic_float_nnfw GeneratedTests.less_equal_dynamic_float_nnfw GeneratedTests.log_4D_float_nnfw @@ -106,11 +107,26 @@ GeneratedTests.not_equal_dynamic_float_nnfw GeneratedTests.one_hot_ex_dynamic_nnfw GeneratedTests.pack_ex_dynamic_nnfw GeneratedTests.pad_dynamic_nnfw +GeneratedTests.pad_v2_1_float +GeneratedTests.pad_v2_1_quant8 +GeneratedTests.pad_v2_all_dims +GeneratedTests.pad_v2_all_dims_quant8 +GeneratedTests.pad_v2_low_rank +GeneratedTests.pad_v2_low_rank_quant8 GeneratedTests.pow_2D_float_nnfw GeneratedTests.pow_broadcast_float_nnfw GeneratedTests.pow_broadcast_float_nnfw_2 GeneratedTests.pow_broadcast_float_nnfw_3 GeneratedTests.pow_dynamic_nnfw +GeneratedTests.quantize_quant8 +GeneratedTests.quantize_quant8_2 +GeneratedTests.quantize_quant8_3 +GeneratedTests.quantize_quant8_4 +GeneratedTests.quantize_quant8_5 +GeneratedTests.quantize_quant8_6 +GeneratedTests.quantize_quant8_7 +GeneratedTests.quantize_quant8_8 +GeneratedTests.quantize_zero_sized GeneratedTests.range_ex_float_1 GeneratedTests.range_ex_float_1_all_constant_inputs GeneratedTests.range_ex_float_1_dynamic_nnfw @@ -184,9 +200,21 @@ GeneratedTests.slice_zero_sized_quant8 GeneratedTests.softmax_dynamic_nnfw GeneratedTests.space_to_batch_dynamic_float_nnfw GeneratedTests.split_dynamic_float_nnfw +GeneratedTests.split_v_ex_1D_float_1_nnfw +GeneratedTests.split_v_ex_1D_float_2_nnfw +GeneratedTests.split_v_ex_1D_int32_nnfw +GeneratedTests.split_v_ex_4D_float_1_nnfw +GeneratedTests.split_v_ex_4D_float_2_nnfw +GeneratedTests.split_v_ex_4D_float_3_nnfw +GeneratedTests.split_v_ex_4D_float_4_nnfw +GeneratedTests.split_v_ex_4D_int32_1_nnfw +GeneratedTests.split_v_ex_4D_int32_2_nnfw +GeneratedTests.split_v_ex_4D_int32_3_nnfw +GeneratedTests.split_v_ex_4D_int32_4_nnfw GeneratedTests.sqrt_ GeneratedTests.squared_difference_ex_dynamic_nnfw GeneratedTests.squeeze_dynamic_float_nnfw +GeneratedTests.stateless_random_uniform_ex_nnfw GeneratedTests.strided_slice_dynamic_nnfw GeneratedTests.sub_dynamic_nnfw GeneratedTests.sub_v1_2_zero_sized diff --git a/tests/nnapi/nnapi_gtest.skip.aarch64-linux.acl_neon b/tests/nnapi/nnapi_gtest.skip.aarch64-linux.acl_neon index c9edee5..f4bd48b 100644 --- a/tests/nnapi/nnapi_gtest.skip.aarch64-linux.acl_neon +++ b/tests/nnapi/nnapi_gtest.skip.aarch64-linux.acl_neon @@ -23,10 +23,7 @@ GeneratedTests.cast_float16_to_quant8 GeneratedTests.cast_float16_to_quant8_overflow GeneratedTests.cast_float32_to_float16 GeneratedTests.cast_float32_to_float16_relaxed -GeneratedTests.cast_float32_to_quant8_overflow -GeneratedTests.cast_float32_to_quant8_overflow_relaxed GeneratedTests.cast_int32_to_float16 -GeneratedTests.cast_int32_to_quant8_overflow GeneratedTests.cast_quant8_to_float16 GeneratedTests.concat_dynamic_nnfw GeneratedTests.conv_dynamic_nnfw @@ -73,6 +70,7 @@ GeneratedTests.gather_float16_8 GeneratedTests.greater_dynamic_float_nnfw GeneratedTests.greater_equal_boolean GeneratedTests.greater_equal_dynamic_float_nnfw +GeneratedTests.l2_normalization_quant8_nnfw GeneratedTests.less_boolean GeneratedTests.less_dynamic_float_nnfw GeneratedTests.less_equal_dynamic_float_nnfw @@ -112,11 +110,26 @@ GeneratedTests.not_equal_dynamic_float_nnfw GeneratedTests.one_hot_ex_dynamic_nnfw GeneratedTests.pack_ex_dynamic_nnfw GeneratedTests.pad_dynamic_nnfw +GeneratedTests.pad_v2_1_float +GeneratedTests.pad_v2_1_quant8 +GeneratedTests.pad_v2_all_dims +GeneratedTests.pad_v2_all_dims_quant8 +GeneratedTests.pad_v2_low_rank +GeneratedTests.pad_v2_low_rank_quant8 GeneratedTests.pow_2D_float_nnfw GeneratedTests.pow_broadcast_float_nnfw GeneratedTests.pow_broadcast_float_nnfw_2 GeneratedTests.pow_broadcast_float_nnfw_3 GeneratedTests.pow_dynamic_nnfw +GeneratedTests.quantize_quant8 +GeneratedTests.quantize_quant8_2 +GeneratedTests.quantize_quant8_3 +GeneratedTests.quantize_quant8_4 +GeneratedTests.quantize_quant8_5 +GeneratedTests.quantize_quant8_6 +GeneratedTests.quantize_quant8_7 +GeneratedTests.quantize_quant8_8 +GeneratedTests.quantize_zero_sized GeneratedTests.range_ex_float_1 GeneratedTests.range_ex_float_1_all_constant_inputs GeneratedTests.range_ex_float_1_dynamic_nnfw @@ -199,9 +212,21 @@ GeneratedTests.space_to_batch_quant8_2 GeneratedTests.space_to_batch_quant8_2_nnfw GeneratedTests.space_to_batch_quant8_3 GeneratedTests.split_dynamic_float_nnfw +GeneratedTests.split_v_ex_1D_float_1_nnfw +GeneratedTests.split_v_ex_1D_float_2_nnfw +GeneratedTests.split_v_ex_1D_int32_nnfw +GeneratedTests.split_v_ex_4D_float_1_nnfw +GeneratedTests.split_v_ex_4D_float_2_nnfw +GeneratedTests.split_v_ex_4D_float_3_nnfw +GeneratedTests.split_v_ex_4D_float_4_nnfw +GeneratedTests.split_v_ex_4D_int32_1_nnfw +GeneratedTests.split_v_ex_4D_int32_2_nnfw +GeneratedTests.split_v_ex_4D_int32_3_nnfw +GeneratedTests.split_v_ex_4D_int32_4_nnfw GeneratedTests.sqrt_ GeneratedTests.squared_difference_ex_dynamic_nnfw GeneratedTests.squeeze_dynamic_float_nnfw +GeneratedTests.stateless_random_uniform_ex_nnfw GeneratedTests.strided_slice_dynamic_nnfw GeneratedTests.sub_dynamic_nnfw GeneratedTests.sub_v1_2_zero_sized diff --git a/tests/nnapi/nnapi_gtest.skip.aarch64-linux.cpu b/tests/nnapi/nnapi_gtest.skip.aarch64-linux.cpu index 3cce4f3..e98007e 100644 --- a/tests/nnapi/nnapi_gtest.skip.aarch64-linux.cpu +++ b/tests/nnapi/nnapi_gtest.skip.aarch64-linux.cpu @@ -1,7 +1,4 @@ GeneratedTests.abs_ -GeneratedTests.batch_to_space -GeneratedTests.batch_to_space_float_1 -GeneratedTests.batch_to_space_quant8_1 GeneratedTests.cast_float16_to_float16 GeneratedTests.cast_float16_to_float32 GeneratedTests.cast_float16_to_float32_relaxed @@ -38,9 +35,6 @@ GeneratedTests.gather_float16_8 GeneratedTests.hashtable_lookup_float GeneratedTests.hashtable_lookup_float_4D_nnfw GeneratedTests.hashtable_lookup_quant8 -GeneratedTests.l2_normalization -GeneratedTests.l2_normalization_2 -GeneratedTests.l2_normalization_large GeneratedTests.l2_pool_float GeneratedTests.l2_pool_float_2 GeneratedTests.l2_pool_float_large @@ -79,7 +73,6 @@ GeneratedTests.minimum_simple_quant8 GeneratedTests.neg GeneratedTests.neg_3D_int_nnfw GeneratedTests.neg_4D_int_nnfw -GeneratedTests.pad_quant8_nnfw GeneratedTests.prelu GeneratedTests.prelu_broadcast_float_1_nnfw GeneratedTests.prelu_broadcast_quant8_1_nnfw @@ -94,6 +87,11 @@ GeneratedTests.prelu_weight_as_input_quant8 GeneratedTests.prelu_weight_as_input_quant8_2 GeneratedTests.prelu_weight_as_input_quant8_3 GeneratedTests.prelu_weight_as_input_quant8_4 +GeneratedTests.quantize_quant8_5 +GeneratedTests.quantize_quant8_6 +GeneratedTests.quantize_quant8_7 +GeneratedTests.quantize_quant8_8 +GeneratedTests.quantize_zero_sized GeneratedTests.reduce_max_quant8 GeneratedTests.reduce_max_quant8_1_nnfw GeneratedTests.reduce_max_quant8_2 @@ -108,14 +106,10 @@ GeneratedTests.relu1_float_1 GeneratedTests.relu1_float_2 GeneratedTests.relu1_quant8_1 GeneratedTests.relu1_quant8_2 -GeneratedTests.relu6_float_1 -GeneratedTests.relu6_float_2 GeneratedTests.relu6_quant8_1 GeneratedTests.relu6_quant8_2 GeneratedTests.relu_quant8_1 GeneratedTests.relu_quant8_2 -GeneratedTests.resize_bilinear -GeneratedTests.resize_bilinear_2 GeneratedTests.rnn GeneratedTests.rnn_state GeneratedTests.rsqrt @@ -125,15 +119,9 @@ GeneratedTests.select_v1_2_one_dim_quant8 GeneratedTests.select_v1_2_two_dim_quant8 GeneratedTests.slice_5 GeneratedTests.slice_6 -GeneratedTests.slice_7 GeneratedTests.slice_8 GeneratedTests.slice_zero_sized GeneratedTests.slice_zero_sized_quant8 -GeneratedTests.space_to_depth_float_1 -GeneratedTests.space_to_depth_float_2 -GeneratedTests.space_to_depth_float_3 -GeneratedTests.space_to_depth_quant8_1 -GeneratedTests.space_to_depth_quant8_2 GeneratedTests.sqrt_ GeneratedTests.sqrt_1D_float_nnfw GeneratedTests.sqrt_2D_float_nnfw diff --git a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.acl_cl b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.acl_cl index e50b941..3a6b40d 100644 --- a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.acl_cl +++ b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.acl_cl @@ -23,8 +23,8 @@ GeneratedTests.cast_float16_to_quant8 GeneratedTests.cast_float16_to_quant8_overflow GeneratedTests.cast_float32_to_float16 GeneratedTests.cast_float32_to_float16_relaxed +GeneratedTests.cast_float32_to_int32_nnfw GeneratedTests.cast_int32_to_float16 -GeneratedTests.cast_int32_to_quant8_overflow GeneratedTests.cast_quant8_to_float16 GeneratedTests.concat_dynamic_nnfw GeneratedTests.conv_dynamic_nnfw @@ -68,6 +68,7 @@ GeneratedTests.gather_float16_7 GeneratedTests.gather_float16_8 GeneratedTests.greater_dynamic_float_nnfw GeneratedTests.greater_equal_dynamic_float_nnfw +GeneratedTests.l2_normalization_quant8_nnfw GeneratedTests.less_dynamic_float_nnfw GeneratedTests.less_equal_dynamic_float_nnfw GeneratedTests.log_4D_float_nnfw @@ -106,11 +107,26 @@ GeneratedTests.not_equal_dynamic_float_nnfw GeneratedTests.one_hot_ex_dynamic_nnfw GeneratedTests.pack_ex_dynamic_nnfw GeneratedTests.pad_dynamic_nnfw +GeneratedTests.pad_v2_1_float +GeneratedTests.pad_v2_1_quant8 +GeneratedTests.pad_v2_all_dims +GeneratedTests.pad_v2_all_dims_quant8 +GeneratedTests.pad_v2_low_rank +GeneratedTests.pad_v2_low_rank_quant8 GeneratedTests.pow_2D_float_nnfw GeneratedTests.pow_broadcast_float_nnfw GeneratedTests.pow_broadcast_float_nnfw_2 GeneratedTests.pow_broadcast_float_nnfw_3 GeneratedTests.pow_dynamic_nnfw +GeneratedTests.quantize_quant8 +GeneratedTests.quantize_quant8_2 +GeneratedTests.quantize_quant8_3 +GeneratedTests.quantize_quant8_4 +GeneratedTests.quantize_quant8_5 +GeneratedTests.quantize_quant8_6 +GeneratedTests.quantize_quant8_7 +GeneratedTests.quantize_quant8_8 +GeneratedTests.quantize_zero_sized GeneratedTests.range_ex_float_1 GeneratedTests.range_ex_float_1_all_constant_inputs GeneratedTests.range_ex_float_1_dynamic_nnfw @@ -184,9 +200,21 @@ GeneratedTests.slice_zero_sized_quant8 GeneratedTests.softmax_dynamic_nnfw GeneratedTests.space_to_batch_dynamic_float_nnfw GeneratedTests.split_dynamic_float_nnfw +GeneratedTests.split_v_ex_1D_float_1_nnfw +GeneratedTests.split_v_ex_1D_float_2_nnfw +GeneratedTests.split_v_ex_1D_int32_nnfw +GeneratedTests.split_v_ex_4D_float_1_nnfw +GeneratedTests.split_v_ex_4D_float_2_nnfw +GeneratedTests.split_v_ex_4D_float_3_nnfw +GeneratedTests.split_v_ex_4D_float_4_nnfw +GeneratedTests.split_v_ex_4D_int32_1_nnfw +GeneratedTests.split_v_ex_4D_int32_2_nnfw +GeneratedTests.split_v_ex_4D_int32_3_nnfw +GeneratedTests.split_v_ex_4D_int32_4_nnfw GeneratedTests.sqrt_ GeneratedTests.squared_difference_ex_dynamic_nnfw GeneratedTests.squeeze_dynamic_float_nnfw +GeneratedTests.stateless_random_uniform_ex_nnfw GeneratedTests.strided_slice_dynamic_nnfw GeneratedTests.sub_dynamic_nnfw GeneratedTests.sub_v1_2_zero_sized diff --git a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.acl_neon b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.acl_neon index 55cfe39..fcd8b3e 100644 --- a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.acl_neon +++ b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.acl_neon @@ -23,10 +23,7 @@ GeneratedTests.cast_float16_to_quant8 GeneratedTests.cast_float16_to_quant8_overflow GeneratedTests.cast_float32_to_float16 GeneratedTests.cast_float32_to_float16_relaxed -GeneratedTests.cast_float32_to_quant8_overflow -GeneratedTests.cast_float32_to_quant8_overflow_relaxed GeneratedTests.cast_int32_to_float16 -GeneratedTests.cast_int32_to_quant8_overflow GeneratedTests.cast_quant8_to_float16 GeneratedTests.concat_dynamic_nnfw GeneratedTests.conv_dynamic_nnfw @@ -73,6 +70,7 @@ GeneratedTests.greater_dynamic_float_nnfw GeneratedTests.greater_equal_boolean GeneratedTests.greater_equal_dynamic_float_nnfw GeneratedTests.less_boolean +GeneratedTests.l2_normalization_quant8_nnfw GeneratedTests.less_dynamic_float_nnfw GeneratedTests.less_equal_dynamic_float_nnfw GeneratedTests.log_4D_float_nnfw @@ -111,11 +109,26 @@ GeneratedTests.not_equal_dynamic_float_nnfw GeneratedTests.one_hot_ex_dynamic_nnfw GeneratedTests.pack_ex_dynamic_nnfw GeneratedTests.pad_dynamic_nnfw +GeneratedTests.pad_v2_1_float +GeneratedTests.pad_v2_1_quant8 +GeneratedTests.pad_v2_all_dims +GeneratedTests.pad_v2_all_dims_quant8 +GeneratedTests.pad_v2_low_rank +GeneratedTests.pad_v2_low_rank_quant8 GeneratedTests.pow_2D_float_nnfw GeneratedTests.pow_broadcast_float_nnfw GeneratedTests.pow_broadcast_float_nnfw_2 GeneratedTests.pow_broadcast_float_nnfw_3 GeneratedTests.pow_dynamic_nnfw +GeneratedTests.quantize_quant8 +GeneratedTests.quantize_quant8_2 +GeneratedTests.quantize_quant8_3 +GeneratedTests.quantize_quant8_4 +GeneratedTests.quantize_quant8_5 +GeneratedTests.quantize_quant8_6 +GeneratedTests.quantize_quant8_7 +GeneratedTests.quantize_quant8_8 +GeneratedTests.quantize_zero_sized GeneratedTests.range_ex_float_1 GeneratedTests.range_ex_float_1_all_constant_inputs GeneratedTests.range_ex_float_1_dynamic_nnfw @@ -191,9 +204,21 @@ GeneratedTests.slice_zero_sized_quant8 GeneratedTests.softmax_dynamic_nnfw GeneratedTests.space_to_batch_dynamic_float_nnfw GeneratedTests.split_dynamic_float_nnfw +GeneratedTests.split_v_ex_1D_float_1_nnfw +GeneratedTests.split_v_ex_1D_float_2_nnfw +GeneratedTests.split_v_ex_1D_int32_nnfw +GeneratedTests.split_v_ex_4D_float_1_nnfw +GeneratedTests.split_v_ex_4D_float_2_nnfw +GeneratedTests.split_v_ex_4D_float_3_nnfw +GeneratedTests.split_v_ex_4D_float_4_nnfw +GeneratedTests.split_v_ex_4D_int32_1_nnfw +GeneratedTests.split_v_ex_4D_int32_2_nnfw +GeneratedTests.split_v_ex_4D_int32_3_nnfw +GeneratedTests.split_v_ex_4D_int32_4_nnfw GeneratedTests.sqrt_ GeneratedTests.squared_difference_ex_dynamic_nnfw GeneratedTests.squeeze_dynamic_float_nnfw +GeneratedTests.stateless_random_uniform_ex_nnfw GeneratedTests.strided_slice_dynamic_nnfw GeneratedTests.sub_dynamic_nnfw GeneratedTests.sub_v1_2_zero_sized diff --git a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.cpu b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.cpu index 3cce4f3..e98007e 100644 --- a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.cpu +++ b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.cpu @@ -1,7 +1,4 @@ GeneratedTests.abs_ -GeneratedTests.batch_to_space -GeneratedTests.batch_to_space_float_1 -GeneratedTests.batch_to_space_quant8_1 GeneratedTests.cast_float16_to_float16 GeneratedTests.cast_float16_to_float32 GeneratedTests.cast_float16_to_float32_relaxed @@ -38,9 +35,6 @@ GeneratedTests.gather_float16_8 GeneratedTests.hashtable_lookup_float GeneratedTests.hashtable_lookup_float_4D_nnfw GeneratedTests.hashtable_lookup_quant8 -GeneratedTests.l2_normalization -GeneratedTests.l2_normalization_2 -GeneratedTests.l2_normalization_large GeneratedTests.l2_pool_float GeneratedTests.l2_pool_float_2 GeneratedTests.l2_pool_float_large @@ -79,7 +73,6 @@ GeneratedTests.minimum_simple_quant8 GeneratedTests.neg GeneratedTests.neg_3D_int_nnfw GeneratedTests.neg_4D_int_nnfw -GeneratedTests.pad_quant8_nnfw GeneratedTests.prelu GeneratedTests.prelu_broadcast_float_1_nnfw GeneratedTests.prelu_broadcast_quant8_1_nnfw @@ -94,6 +87,11 @@ GeneratedTests.prelu_weight_as_input_quant8 GeneratedTests.prelu_weight_as_input_quant8_2 GeneratedTests.prelu_weight_as_input_quant8_3 GeneratedTests.prelu_weight_as_input_quant8_4 +GeneratedTests.quantize_quant8_5 +GeneratedTests.quantize_quant8_6 +GeneratedTests.quantize_quant8_7 +GeneratedTests.quantize_quant8_8 +GeneratedTests.quantize_zero_sized GeneratedTests.reduce_max_quant8 GeneratedTests.reduce_max_quant8_1_nnfw GeneratedTests.reduce_max_quant8_2 @@ -108,14 +106,10 @@ GeneratedTests.relu1_float_1 GeneratedTests.relu1_float_2 GeneratedTests.relu1_quant8_1 GeneratedTests.relu1_quant8_2 -GeneratedTests.relu6_float_1 -GeneratedTests.relu6_float_2 GeneratedTests.relu6_quant8_1 GeneratedTests.relu6_quant8_2 GeneratedTests.relu_quant8_1 GeneratedTests.relu_quant8_2 -GeneratedTests.resize_bilinear -GeneratedTests.resize_bilinear_2 GeneratedTests.rnn GeneratedTests.rnn_state GeneratedTests.rsqrt @@ -125,15 +119,9 @@ GeneratedTests.select_v1_2_one_dim_quant8 GeneratedTests.select_v1_2_two_dim_quant8 GeneratedTests.slice_5 GeneratedTests.slice_6 -GeneratedTests.slice_7 GeneratedTests.slice_8 GeneratedTests.slice_zero_sized GeneratedTests.slice_zero_sized_quant8 -GeneratedTests.space_to_depth_float_1 -GeneratedTests.space_to_depth_float_2 -GeneratedTests.space_to_depth_float_3 -GeneratedTests.space_to_depth_quant8_1 -GeneratedTests.space_to_depth_quant8_2 GeneratedTests.sqrt_ GeneratedTests.sqrt_1D_float_nnfw GeneratedTests.sqrt_2D_float_nnfw diff --git a/tests/nnapi/nnapi_gtest.skip.noarch.interp b/tests/nnapi/nnapi_gtest.skip.noarch.interp index 08118ca..a0ae9d3 100644 --- a/tests/nnapi/nnapi_gtest.skip.noarch.interp +++ b/tests/nnapi/nnapi_gtest.skip.noarch.interp @@ -188,6 +188,7 @@ GeneratedTests.hashtable_lookup_quant8 GeneratedTests.l2_normalization GeneratedTests.l2_normalization_2 GeneratedTests.l2_normalization_large +GeneratedTests.l2_normalization_quant8_nnfw GeneratedTests.l2_pool_float GeneratedTests.l2_pool_float_2 GeneratedTests.l2_pool_float_large @@ -312,6 +313,12 @@ GeneratedTests.pack_ex_2D_int_2 GeneratedTests.pack_ex_dynamic_nnfw GeneratedTests.pad_dynamic_nnfw GeneratedTests.pad_quant8_nnfw +GeneratedTests.pad_v2_1_float +GeneratedTests.pad_v2_1_quant8 +GeneratedTests.pad_v2_all_dims +GeneratedTests.pad_v2_all_dims_quant8 +GeneratedTests.pad_v2_low_rank +GeneratedTests.pad_v2_low_rank_quant8 GeneratedTests.pow_2D_float_nnfw GeneratedTests.pow_broadcast_float_nnfw GeneratedTests.pow_broadcast_float_nnfw_2 @@ -331,6 +338,15 @@ GeneratedTests.prelu_weight_as_input_quant8 GeneratedTests.prelu_weight_as_input_quant8_2 GeneratedTests.prelu_weight_as_input_quant8_3 GeneratedTests.prelu_weight_as_input_quant8_4 +GeneratedTests.quantize_quant8 +GeneratedTests.quantize_quant8_2 +GeneratedTests.quantize_quant8_3 +GeneratedTests.quantize_quant8_4 +GeneratedTests.quantize_quant8_5 +GeneratedTests.quantize_quant8_6 +GeneratedTests.quantize_quant8_7 +GeneratedTests.quantize_quant8_8 +GeneratedTests.quantize_zero_sized GeneratedTests.range_ex_float_1 GeneratedTests.range_ex_float_1_all_constant_inputs GeneratedTests.range_ex_float_1_dynamic_nnfw @@ -407,6 +423,7 @@ GeneratedTests.relu_quant8_2 GeneratedTests.reshape_dynamic_nnfw GeneratedTests.resize_bilinear GeneratedTests.resize_bilinear_2 +GeneratedTests.resize_bilinear_quant8_nnfw GeneratedTests.reverse_ex_1d GeneratedTests.reverse_ex_3d GeneratedTests.reverse_ex_dynamic_1D @@ -499,6 +516,17 @@ GeneratedTests.split_quant8_2 GeneratedTests.split_quant8_2_relaxed GeneratedTests.split_quant8_3 GeneratedTests.split_quant8_4 +GeneratedTests.split_v_ex_1D_float_1_nnfw +GeneratedTests.split_v_ex_1D_float_2_nnfw +GeneratedTests.split_v_ex_1D_int32_nnfw +GeneratedTests.split_v_ex_4D_float_1_nnfw +GeneratedTests.split_v_ex_4D_float_2_nnfw +GeneratedTests.split_v_ex_4D_float_3_nnfw +GeneratedTests.split_v_ex_4D_float_4_nnfw +GeneratedTests.split_v_ex_4D_int32_1_nnfw +GeneratedTests.split_v_ex_4D_int32_2_nnfw +GeneratedTests.split_v_ex_4D_int32_3_nnfw +GeneratedTests.split_v_ex_4D_int32_4_nnfw GeneratedTests.sqrt_ GeneratedTests.sqrt_1D_float_nnfw GeneratedTests.sqrt_2D_float_nnfw @@ -518,6 +546,7 @@ GeneratedTests.squeeze_float_1 GeneratedTests.squeeze_float_1_relaxed GeneratedTests.squeeze_quant8_1 GeneratedTests.squeeze_relaxed +GeneratedTests.stateless_random_uniform_ex_nnfw GeneratedTests.strided_slice GeneratedTests.strided_slice_dynamic_nnfw GeneratedTests.strided_slice_float_1 diff --git a/tests/nnapi/nnapi_gtest.skip.x86_64-linux.cpu b/tests/nnapi/nnapi_gtest.skip.x86_64-linux.cpu index 3cce4f3..e98007e 100644 --- a/tests/nnapi/nnapi_gtest.skip.x86_64-linux.cpu +++ b/tests/nnapi/nnapi_gtest.skip.x86_64-linux.cpu @@ -1,7 +1,4 @@ GeneratedTests.abs_ -GeneratedTests.batch_to_space -GeneratedTests.batch_to_space_float_1 -GeneratedTests.batch_to_space_quant8_1 GeneratedTests.cast_float16_to_float16 GeneratedTests.cast_float16_to_float32 GeneratedTests.cast_float16_to_float32_relaxed @@ -38,9 +35,6 @@ GeneratedTests.gather_float16_8 GeneratedTests.hashtable_lookup_float GeneratedTests.hashtable_lookup_float_4D_nnfw GeneratedTests.hashtable_lookup_quant8 -GeneratedTests.l2_normalization -GeneratedTests.l2_normalization_2 -GeneratedTests.l2_normalization_large GeneratedTests.l2_pool_float GeneratedTests.l2_pool_float_2 GeneratedTests.l2_pool_float_large @@ -79,7 +73,6 @@ GeneratedTests.minimum_simple_quant8 GeneratedTests.neg GeneratedTests.neg_3D_int_nnfw GeneratedTests.neg_4D_int_nnfw -GeneratedTests.pad_quant8_nnfw GeneratedTests.prelu GeneratedTests.prelu_broadcast_float_1_nnfw GeneratedTests.prelu_broadcast_quant8_1_nnfw @@ -94,6 +87,11 @@ GeneratedTests.prelu_weight_as_input_quant8 GeneratedTests.prelu_weight_as_input_quant8_2 GeneratedTests.prelu_weight_as_input_quant8_3 GeneratedTests.prelu_weight_as_input_quant8_4 +GeneratedTests.quantize_quant8_5 +GeneratedTests.quantize_quant8_6 +GeneratedTests.quantize_quant8_7 +GeneratedTests.quantize_quant8_8 +GeneratedTests.quantize_zero_sized GeneratedTests.reduce_max_quant8 GeneratedTests.reduce_max_quant8_1_nnfw GeneratedTests.reduce_max_quant8_2 @@ -108,14 +106,10 @@ GeneratedTests.relu1_float_1 GeneratedTests.relu1_float_2 GeneratedTests.relu1_quant8_1 GeneratedTests.relu1_quant8_2 -GeneratedTests.relu6_float_1 -GeneratedTests.relu6_float_2 GeneratedTests.relu6_quant8_1 GeneratedTests.relu6_quant8_2 GeneratedTests.relu_quant8_1 GeneratedTests.relu_quant8_2 -GeneratedTests.resize_bilinear -GeneratedTests.resize_bilinear_2 GeneratedTests.rnn GeneratedTests.rnn_state GeneratedTests.rsqrt @@ -125,15 +119,9 @@ GeneratedTests.select_v1_2_one_dim_quant8 GeneratedTests.select_v1_2_two_dim_quant8 GeneratedTests.slice_5 GeneratedTests.slice_6 -GeneratedTests.slice_7 GeneratedTests.slice_8 GeneratedTests.slice_zero_sized GeneratedTests.slice_zero_sized_quant8 -GeneratedTests.space_to_depth_float_1 -GeneratedTests.space_to_depth_float_2 -GeneratedTests.space_to_depth_float_3 -GeneratedTests.space_to_depth_quant8_1 -GeneratedTests.space_to_depth_quant8_2 GeneratedTests.sqrt_ GeneratedTests.sqrt_1D_float_nnfw GeneratedTests.sqrt_2D_float_nnfw diff --git a/tests/nnapi/specs/Ex/split_v_ex_1D_float_1_nnfw.mod.py b/tests/nnapi/specs/Ex/split_v_ex_1D_float_1_nnfw.mod.py new file mode 100644 index 0000000..6a2b716 --- /dev/null +++ b/tests/nnapi/specs/Ex/split_v_ex_1D_float_1_nnfw.mod.py @@ -0,0 +1,47 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_FLOAT32", "{8}") +size_splits = Input("size_splits", "TENSOR_INT32", "{8}") +split_dim = Input("split_dim", "TENSOR_INT32", "{1}") +num_splits = Int32Scalar("num_splits", 8) + +i2 = Output("op2", "TENSOR_FLOAT32", "{1}") +i3 = Output("op3", "TENSOR_FLOAT32", "{1}") +i4 = Output("op4", "TENSOR_FLOAT32", "{1}") +i5 = Output("op5", "TENSOR_FLOAT32", "{1}") +i6 = Output("op6", "TENSOR_FLOAT32", "{1}") +i7 = Output("op7", "TENSOR_FLOAT32", "{1}") +i8 = Output("op8", "TENSOR_FLOAT32", "{1}") +i9 = Output("op9", "TENSOR_FLOAT32", "{1}") + +model = model.Operation("SPLIT_V_EX", i1, size_splits, split_dim, num_splits).To([i2, i3, i4, i5, i6, i7, i8, i9]) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + size_splits: + [1, 1, 1, 1, 1, 1, 1, 1], + split_dim: + [0] + } + +output0 = { + i2: # output 0 + [1.0], + i3: # output 1 + [2.0], + i4: # output 2 + [3.0], + i5: # output 3 + [4.0], + i6: # output 4 + [5.0], + i7: # output 5 + [6.0], + i8: # output 6 + [7.0], + i9: # output 7 + [8.0]} + +# Instantiate an example +Example((input0, output0)) diff --git a/tests/nnapi/specs/Ex/split_v_ex_1D_float_2_nnfw.mod.py b/tests/nnapi/specs/Ex/split_v_ex_1D_float_2_nnfw.mod.py new file mode 100644 index 0000000..6224852 --- /dev/null +++ b/tests/nnapi/specs/Ex/split_v_ex_1D_float_2_nnfw.mod.py @@ -0,0 +1,25 @@ +# model +input0 = Input("input0", "TENSOR_FLOAT32", "{12}") +size_splits = Input("size_splits", "TENSOR_INT32", "{3}") +split_dim = Input("split_dim", "TENSOR_INT32", "{1}") +num_splits = Int32Scalar("num_splits",3); + +output0 = Output("output0", "TENSOR_FLOAT32", "{3}") +output1 = Output("output1", "TENSOR_FLOAT32", "{5}") +output2 = Output("output2", "TENSOR_FLOAT32", "{4}") + +model = Model().Operation("SPLIT_V_EX", input0, size_splits, split_dim, num_splits).To((output0, output1, output2)) + +# Example 1. +input_dict = { + input0: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0], + size_splits: [3, 5, 4], + split_dim: [0] +} +output_dict = { + output0: [1.0, 2.0, 3.0], + output1: [4.0, 5.0, 6.0, 7.0, 8.0], + output2: [9.0, 10.0, 11.0, 12.0] +} + +Example((input_dict, output_dict)) diff --git a/tests/nnapi/specs/Ex/split_v_ex_1D_int32_nnfw.mod.py b/tests/nnapi/specs/Ex/split_v_ex_1D_int32_nnfw.mod.py new file mode 100644 index 0000000..2dea4d6 --- /dev/null +++ b/tests/nnapi/specs/Ex/split_v_ex_1D_int32_nnfw.mod.py @@ -0,0 +1,47 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_INT32", "{8}") +size_splits = Input("size_splits", "TENSOR_INT32", "{8}") +split_dim = Input("split_dim", "TENSOR_INT32", "{1}") +num_splits = Int32Scalar("num_splits", 8) + +i2 = Output("op2", "TENSOR_INT32", "{1}") +i3 = Output("op3", "TENSOR_INT32", "{1}") +i4 = Output("op4", "TENSOR_INT32", "{1}") +i5 = Output("op5", "TENSOR_INT32", "{1}") +i6 = Output("op6", "TENSOR_INT32", "{1}") +i7 = Output("op7", "TENSOR_INT32", "{1}") +i8 = Output("op8", "TENSOR_INT32", "{1}") +i9 = Output("op9", "TENSOR_INT32", "{1}") + +model = model.Operation("SPLIT_V_EX", i1, size_splits, split_dim, num_splits).To([i2, i3, i4, i5, i6, i7, i8, i9]) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [1, 2, 3, 4, 5, 6, 7, 8], + size_splits: + [1, 1, 1, 1, 1, 1, 1, 1], + split_dim: + [0] + } + +output0 = { + i2: # output 0 + [1], + i3: # output 1 + [2], + i4: # output 2 + [3], + i5: # output 3 + [4], + i6: # output 4 + [5], + i7: # output 5 + [6], + i8: # output 6 + [7], + i9: # output 7 + [8]} + +# Instantiate an example +Example((input0, output0)) diff --git a/tests/nnapi/specs/Ex/split_v_ex_4D_float_1_nnfw.mod.py b/tests/nnapi/specs/Ex/split_v_ex_4D_float_1_nnfw.mod.py new file mode 100644 index 0000000..c53ae1c --- /dev/null +++ b/tests/nnapi/specs/Ex/split_v_ex_4D_float_1_nnfw.mod.py @@ -0,0 +1,28 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_FLOAT32", "{2,2,2,2}") +size_splits = Input("size_splits", "TENSOR_INT32", "{2}") +split_dim = Input("split_dim", "TENSOR_INT32", "{1}") +num_splits = Int32Scalar("num_splits", 2) + +i2 = Output("op2", "TENSOR_FLOAT32", "{1,2,2,2}") +i3 = Output("op3", "TENSOR_FLOAT32", "{1,2,2,2}") +model = model.Operation("SPLIT_V_EX", i1, size_splits, split_dim, num_splits).To([i2, i3]) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0], + size_splits: + [8, 8], + split_dim: + [0] + } + +output0 = { + i2: # output 0 + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + i3: # output 1 + [9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0]} + +# Instantiate an example +Example((input0, output0)) diff --git a/tests/nnapi/specs/Ex/split_v_ex_4D_float_2_nnfw.mod.py b/tests/nnapi/specs/Ex/split_v_ex_4D_float_2_nnfw.mod.py new file mode 100644 index 0000000..593412d --- /dev/null +++ b/tests/nnapi/specs/Ex/split_v_ex_4D_float_2_nnfw.mod.py @@ -0,0 +1,27 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_FLOAT32", "{2,2,2,2}") +size_splits = Input("size_splits", "TENSOR_INT32", "{2}") +split_dim = Input("split_dim", "TENSOR_INT32", "{1}") +num_splits = Int32Scalar("num_splits", 2) + +i2 = Output("op2", "TENSOR_FLOAT32", "{2,2,2,1}") +i3 = Output("op3", "TENSOR_FLOAT32", "{2,2,2,1}") +model = model.Operation("SPLIT_V_EX", i1, size_splits, split_dim, num_splits).To([i2, i3]) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0], + size_splits: + [8, 8], + split_dim: + [3]} + +output0 = { + i2: # output 0 + [1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0], + i3: # output 1 + [2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0]} + +# Instantiate an example +Example((input0, output0)) diff --git a/tests/nnapi/specs/Ex/split_v_ex_4D_float_3_nnfw.mod.py b/tests/nnapi/specs/Ex/split_v_ex_4D_float_3_nnfw.mod.py new file mode 100644 index 0000000..ef77536 --- /dev/null +++ b/tests/nnapi/specs/Ex/split_v_ex_4D_float_3_nnfw.mod.py @@ -0,0 +1,28 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_FLOAT32", "{2,2,2,2}") +size_splits = Input("size_splits", "TENSOR_INT32", "{2}") +split_dim = Input("split_dim", "TENSOR_INT32", "{1}") +num_splits = Int32Scalar("num_splits", 2) + +i2 = Output("op2", "TENSOR_FLOAT32", "{1,2,2,2}") +i3 = Output("op3", "TENSOR_FLOAT32", "{1,2,2,2}") +model = model.Operation("SPLIT_V_EX", i1, size_splits, split_dim, num_splits).To([i2, i3]) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0], + size_splits: + [8, 8], + split_dim: + [-4] + } + +output0 = { + i2: # output 0 + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], + i3: # output 1 + [9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0]} + +# Instantiate an example +Example((input0, output0)) diff --git a/tests/nnapi/specs/Ex/split_v_ex_4D_float_4_nnfw.mod.py b/tests/nnapi/specs/Ex/split_v_ex_4D_float_4_nnfw.mod.py new file mode 100644 index 0000000..b995f9e --- /dev/null +++ b/tests/nnapi/specs/Ex/split_v_ex_4D_float_4_nnfw.mod.py @@ -0,0 +1,32 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_FLOAT32", "{4,1,1,8}") +size_splits = Input("size_splits", "TENSOR_INT32", "{3}") +split_dim = Input("split_dim", "TENSOR_INT32", "{1}") +num_splits = Int32Scalar("num_splits", 3) + +i2 = Output("op2", "TENSOR_FLOAT32", "{4,1,1,2}") +i3 = Output("op3", "TENSOR_FLOAT32", "{4,1,1,4}") +i4 = Output("op4", "TENSOR_FLOAT32", "{4,1,1,2}") + +model = model.Operation("SPLIT_V_EX", i1, size_splits, split_dim, num_splits).To([i2, i3, i4]) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0], + size_splits: + [2,4,2], + split_dim: + [3] + } + +output0 = { + i2: # output 0 + [1.0, 2.0, 9.0, 10.0, 17.0, 18.0, 25.0, 26.0], + i3: # output 1 + [3.0, 4.0, 5.0, 6.0, 11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 27.0, 28.0, 29.0, 30.0], + i4: [7.0, 8.0, 15.0, 16.0, 23.0, 24.0, 31.0, 32.0]} + +# Instantiate an example +Example((input0, output0)) diff --git a/tests/nnapi/specs/Ex/split_v_ex_4D_int32_1_nnfw.mod.py b/tests/nnapi/specs/Ex/split_v_ex_4D_int32_1_nnfw.mod.py new file mode 100644 index 0000000..f544d0a --- /dev/null +++ b/tests/nnapi/specs/Ex/split_v_ex_4D_int32_1_nnfw.mod.py @@ -0,0 +1,27 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_INT32", "{2,2,2,2}") +size_splits = Input("size_splits", "TENSOR_INT32", "{2}") +split_dim = Input("split_dim", "TENSOR_INT32", "{1}") +num_splits = Int32Scalar("num_splits", 2) + +i2 = Output("op2", "TENSOR_INT32", "{1,2,2,2}") +i3 = Output("op3", "TENSOR_INT32", "{1,2,2,2}") +model = model.Operation("SPLIT_V_EX", i1, size_splits, split_dim, num_splits).To([i2, i3]) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + size_splits: + [8, 8], + split_dim: + [0]} + +output0 = { + i2: # output 0 + [1, 2, 3, 4, 5, 6, 7, 8], + i3: # output 1 + [9, 10, 11, 12, 13, 14, 15, 16]} + +# Instantiate an example +Example((input0, output0)) diff --git a/tests/nnapi/specs/Ex/split_v_ex_4D_int32_2_nnfw.mod.py b/tests/nnapi/specs/Ex/split_v_ex_4D_int32_2_nnfw.mod.py new file mode 100644 index 0000000..5ed0165 --- /dev/null +++ b/tests/nnapi/specs/Ex/split_v_ex_4D_int32_2_nnfw.mod.py @@ -0,0 +1,28 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_INT32", "{2,2,2,2}") +size_splits = Input("size_splits", "TENSOR_INT32", "{2}") +split_dim = Input("split_dim", "TENSOR_INT32", "{1}") +num_splits = Int32Scalar("num_splits", 2) + +i2 = Output("op2", "TENSOR_INT32", "{2,1,2,2}") +i3 = Output("op3", "TENSOR_INT32", "{2,1,2,2}") + +model = model.Operation("SPLIT_V_EX", i1, size_splits, split_dim, num_splits).To([i2, i3]) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + size_splits: + [8, 8], + split_dim: + [1]} + +output0 = { + i2: # output 0 + [1, 2, 3, 4, 9, 10, 11, 12], + i3: # output 1 + [5, 6, 7, 8, 13, 14, 15, 16]} + +# Instantiate an example +Example((input0, output0)) diff --git a/tests/nnapi/specs/Ex/split_v_ex_4D_int32_3_nnfw.mod.py b/tests/nnapi/specs/Ex/split_v_ex_4D_int32_3_nnfw.mod.py new file mode 100644 index 0000000..99f3b4f --- /dev/null +++ b/tests/nnapi/specs/Ex/split_v_ex_4D_int32_3_nnfw.mod.py @@ -0,0 +1,28 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_INT32", "{2,2,2,2}") + +size_splits = Input("size_splits", "TENSOR_INT32", "{2}") +split_dim = Input("split_dim", "TENSOR_INT32", "{1}") +num_splits = Int32Scalar("num_splits", 2) + +i2 = Output("op2", "TENSOR_INT32", "{2,2,1,2}") +i3 = Output("op3", "TENSOR_INT32", "{2,2,1,2}") +model = model.Operation("SPLIT_V_EX", i1, size_splits, split_dim, num_splits).To([i2, i3]) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + size_splits: + [8, 8], + split_dim: + [2]} + +output0 = { + i2: # output 0 + [1, 2, 5, 6, 9, 10, 13, 14], + i3: # output 1 + [3, 4, 7, 8, 11, 12, 15, 16]} + +# Instantiate an example +Example((input0, output0)) diff --git a/tests/nnapi/specs/Ex/split_v_ex_4D_int32_4_nnfw.mod.py b/tests/nnapi/specs/Ex/split_v_ex_4D_int32_4_nnfw.mod.py new file mode 100644 index 0000000..38ae4da --- /dev/null +++ b/tests/nnapi/specs/Ex/split_v_ex_4D_int32_4_nnfw.mod.py @@ -0,0 +1,28 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_INT32", "{2,2,2,2}") + +size_splits = Input("size_splits", "TENSOR_INT32", "{2}") +split_dim = Input("split_dim", "TENSOR_INT32", "{1}") +num_splits = Int32Scalar("num_splits", 2) + +i2 = Output("op2", "TENSOR_INT32", "{2,2,2,1}") +i3 = Output("op3", "TENSOR_INT32", "{2,2,2,1}") +model = model.Operation("SPLIT_V_EX", i1, size_splits, split_dim, num_splits).To([i2, i3]) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + size_splits: + [8, 8], + split_dim: + [3]} + +output0 = { + i2: # output 0 + [1, 3, 5, 7, 9, 11, 13, 15], + i3: # output 1 + [2, 4, 6, 8, 10, 12, 14, 16]} + +# Instantiate an example +Example((input0, output0)) diff --git a/tests/nnapi/specs/Ex/stateless_random_uniform_ex_nnfw.mod.py b/tests/nnapi/specs/Ex/stateless_random_uniform_ex_nnfw.mod.py new file mode 100644 index 0000000..9c29555 --- /dev/null +++ b/tests/nnapi/specs/Ex/stateless_random_uniform_ex_nnfw.mod.py @@ -0,0 +1,40 @@ +# +# Copyright (C) 2020 Samsung Electronics Co., Ltd. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +model = Model() + +i1 = Input("input1", "TENSOR_INT32", "{1}") +i2 = Input("input2", "TENSOR_INT32", "{2}") + +o1 = Output("output0", "TENSOR_FLOAT32", "{10}") + +model = model.Operation("STATELESS_RANDOM_UNIFORM_EX", i1, i2).To(o1) + +# Example. +input0 = { + i1 : [10], #input1 + i2 : [1, 1] #input2 +} + +output0 = { + o1: [0.09827709, 0.14063823, 0.4553436, + 0.10658443, 0.2075988, 0.30841374, + 0.7489233, 0.90613365, 0.63342273, + 0.37854457] +} + +Example((input0, output0)) diff --git a/tests/nnapi/specs/V1_0/l2_normalization_quant8_nnfw.mod.py b/tests/nnapi/specs/V1_0/l2_normalization_quant8_nnfw.mod.py new file mode 100644 index 0000000..ca3770c --- /dev/null +++ b/tests/nnapi/specs/V1_0/l2_normalization_quant8_nnfw.mod.py @@ -0,0 +1,30 @@ +# +# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved +# Copyright (C) 2017 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +model = Model() +in0 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 1, 1, 3}, 2e-7, 128") +out0 = Output("op2", "TENSOR_QUANT8_ASYMM", "{1, 1, 1, 3}, 2e-7, 128") +model = model.Operation("L2_NORMALIZATION", in0).To(out0) + +# Example 1. Input in operand 0, +input0 = {in0: # input 0 + [0, 5, 12]} +output0 = {out0: # output 0 + [51, 54, 58]} + +# Instantiate an example +Example((input0, output0)) diff --git a/tests/nnapi/specs/V1_0/resize_bilinear_quant8_nnfw.mod.py b/tests/nnapi/specs/V1_0/resize_bilinear_quant8_nnfw.mod.py new file mode 100644 index 0000000..182e046 --- /dev/null +++ b/tests/nnapi/specs/V1_0/resize_bilinear_quant8_nnfw.mod.py @@ -0,0 +1,18 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 1}, 0.8, 5") +i2 = Output("op2", "TENSOR_QUANT8_ASYMM", "{1, 3, 3, 1}, 0.8, 5") +w = Int32Scalar("width", 3) +h = Int32Scalar("height", 3) +model = model.Operation("RESIZE_BILINEAR", i1, w, h).To(i2) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [1, 1, 2, 2]} +output0 = {i2: # output 0 + [1, 1, 1, + 2, 2, 2, + 2, 2, 2]} + +# Instantiate an example +Example((input0, output0)) diff --git a/tests/nnapi/specs/skip/V1_2/pad_v2_1_float.mod.py b/tests/nnapi/specs/V1_2/pad_v2_1_float.mod.py similarity index 100% rename from tests/nnapi/specs/skip/V1_2/pad_v2_1_float.mod.py rename to tests/nnapi/specs/V1_2/pad_v2_1_float.mod.py diff --git a/tests/nnapi/specs/skip/V1_2/pad_v2_1_quant8.mod.py b/tests/nnapi/specs/V1_2/pad_v2_1_quant8.mod.py similarity index 100% rename from tests/nnapi/specs/skip/V1_2/pad_v2_1_quant8.mod.py rename to tests/nnapi/specs/V1_2/pad_v2_1_quant8.mod.py diff --git a/tests/nnapi/specs/skip/V1_2/pad_v2_all_dims.mod.py b/tests/nnapi/specs/V1_2/pad_v2_all_dims.mod.py similarity index 100% rename from tests/nnapi/specs/skip/V1_2/pad_v2_all_dims.mod.py rename to tests/nnapi/specs/V1_2/pad_v2_all_dims.mod.py diff --git a/tests/nnapi/specs/skip/V1_2/pad_v2_all_dims_quant8.mod.py b/tests/nnapi/specs/V1_2/pad_v2_all_dims_quant8.mod.py similarity index 100% rename from tests/nnapi/specs/skip/V1_2/pad_v2_all_dims_quant8.mod.py rename to tests/nnapi/specs/V1_2/pad_v2_all_dims_quant8.mod.py diff --git a/tests/nnapi/specs/skip/V1_2/pad_v2_low_rank.mod.py b/tests/nnapi/specs/V1_2/pad_v2_low_rank.mod.py similarity index 100% rename from tests/nnapi/specs/skip/V1_2/pad_v2_low_rank.mod.py rename to tests/nnapi/specs/V1_2/pad_v2_low_rank.mod.py diff --git a/tests/nnapi/specs/skip/V1_2/pad_v2_low_rank_quant8.mod.py b/tests/nnapi/specs/V1_2/pad_v2_low_rank_quant8.mod.py similarity index 100% rename from tests/nnapi/specs/skip/V1_2/pad_v2_low_rank_quant8.mod.py rename to tests/nnapi/specs/V1_2/pad_v2_low_rank_quant8.mod.py diff --git a/tests/nnapi/specs/skip/V1_2/quantize.mod.py b/tests/nnapi/specs/V1_2/quantize.mod.py similarity index 100% rename from tests/nnapi/specs/skip/V1_2/quantize.mod.py rename to tests/nnapi/specs/V1_2/quantize.mod.py diff --git a/tests/nnfw_api/CMakeLists.txt b/tests/nnfw_api/CMakeLists.txt index e8d46cb..6e0696d 100644 --- a/tests/nnfw_api/CMakeLists.txt +++ b/tests/nnfw_api/CMakeLists.txt @@ -21,5 +21,6 @@ target_include_directories(${RUNTIME_NNFW_API_TEST} PRIVATE ${RUNTIME_NNFW_API_T target_link_libraries(${RUNTIME_NNFW_API_TEST} nnfw-dev) target_link_libraries(${RUNTIME_NNFW_API_TEST} gtest gmock) target_link_libraries(${RUNTIME_NNFW_API_TEST} ${LIB_PTHREAD} dl) +target_link_libraries(${RUNTIME_NNFW_API_TEST} circle_schema) install(TARGETS ${RUNTIME_NNFW_API_TEST} DESTINATION unittest_standalone) diff --git a/tests/nnfw_api/README.md b/tests/nnfw_api/README.md index 25fbc6e..7e14fc4 100644 --- a/tests/nnfw_api/README.md +++ b/tests/nnfw_api/README.md @@ -6,6 +6,8 @@ This test framework consists of 3 kinds of tests: - Validation Tests (fixture format `ValidationTest???`) - Basic positive/negative tests with simple nnpackages +- Generated Model Tests (fixture format `GenModelTest`) + - One-time inference test with variety of generated models - Regression Tests (fixture format `RegressionTest`, test format `GitHub###`) - When you see bugs/crashes while using those API - Must refer a github issue diff --git a/tests/nnfw_api/src/CircleGen.h b/tests/nnfw_api/src/CircleGen.h new file mode 100644 index 0000000..899c800 --- /dev/null +++ b/tests/nnfw_api/src/CircleGen.h @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_API_TEST_CIRCLE_GEN_H__ +#define __NNFW_API_TEST_CIRCLE_GEN_H__ + +#include + +#include + +/** + * @brief Class for storing flatbuffer buffer + * + * This is a simple wrapper for a finished FlatBufferBuilder. It owns the buffer and a user can + * get the buffer pointer and size. + */ +class CircleBuffer +{ +public: + CircleBuffer() = default; + explicit CircleBuffer(flatbuffers::FlatBufferBuilder &&fbb) : _fbb{std::move(fbb)} + { + _fbb.Finished(); // The build must have been finished, so check that here + } + + uint8_t *buffer() { return _fbb.GetBufferPointer(); } + size_t size() { return _fbb.GetSize(); } + +private: + flatbuffers::FlatBufferBuilder _fbb; +}; + +/** + * @brief Circle flatbuffer file generator + * + * This is a helper class for generating circle file. + * + */ +class CircleGen +{ +public: + struct TensorParams + { + std::vector shape; + circle::TensorType tensor_type = circle::TensorType::TensorType_FLOAT32; + uint32_t buffer = 0; + std::string name; + }; + + struct OperatorParams + { + std::vector inputs; + std::vector outputs; + int version = 1; + }; + +public: + CircleGen() + { + // 0th buffer is always the empty buffer for non-const tensors + addBuffer(nullptr, 0); + } + + template uint32_t addBuffer(const std::vector &buf_vec) + { + auto buf = reinterpret_cast(buf_vec.data()); + auto size = buf_vec.size() * sizeof(T); + return addBuffer(buf, size); + } + + uint32_t addBuffer(const uint8_t *buf, size_t size) + { + uint32_t ind = _buffers.size(); + _buffers.emplace_back(buildBuffer(buf, size)); + return ind; + } + + uint32_t addTensor(const TensorParams ¶ms) + { + int ind = _tensors.size(); + _tensors.emplace_back(buildTensor(params)); + return ind; + } + + uint32_t setInputsAndOutputs(const std::vector &inputs, const std::vector &outputs) + { + _inputs = inputs; + _outputs = outputs; + } + + CircleBuffer finish() + { + // TODO Support multiple subgraphs, for now only single subgraph model is supported. + std::vector> subgraphs{buildSubGraph()}; + auto model = + circle::CreateModelDirect(_fbb, 3, &_opcodes, &subgraphs, "CircleGen generated", &_buffers); + _fbb.Finish(model); + return CircleBuffer{std::move(_fbb)}; + } + + // ===== Add Operator methods begin ===== + + uint32_t addOperatorAdd(const OperatorParams ¶ms, circle::ActivationFunctionType actfn) + { + auto options = circle::CreateAddOptions(_fbb, actfn).Union(); + return addOperatorWithOptions(params, circle::BuiltinOperator_ADD, + circle::BuiltinOptions_AddOptions, options); + } + + uint32_t addOperatorAveragePool2D(const OperatorParams ¶ms, circle::Padding padding, + int stride_w, int stride_h, int filter_w, int filter_h, + circle::ActivationFunctionType actfn) + { + auto options = + circle::CreatePool2DOptions(_fbb, padding, stride_w, stride_h, filter_w, filter_h, actfn) + .Union(); + return addOperatorWithOptions(params, circle::BuiltinOperator_AVERAGE_POOL_2D, + circle::BuiltinOptions_Pool2DOptions, options); + } + + // NOTE Please add addOperator functions ABOVE this lie + // + // % How to add a new addOperatorXXX fuction + // 0. Copy code from one of the existing addOperatorXXX function + // 1. Change the function signature (need BuiltinOperator params) + // 2. Change enum BuiltinOperator + // 3. Change enum BuiltinOptions + // 4. Change CreateXXXOptions accordingly + + // ===== Add Operator methods end ===== + +private: + uint32_t addOperatorWithOptions(const OperatorParams ¶ms, circle::BuiltinOperator opcode, + circle::BuiltinOptions options_type, + flatbuffers::Offset options) + { + uint32_t opcode_ind = addOperatorCode(opcode); + auto op = circle::CreateOperatorDirect(_fbb, opcode_ind, ¶ms.inputs, ¶ms.outputs, + options_type, options); + + uint32_t ind = _operators.size(); + _operators.emplace_back(op); + return ind; + } + + uint32_t addOperatorCode(circle::BuiltinOperator opcode) + { + // TODO If the same OperatorCode is registered already, just return it + uint32_t ind = _opcodes.size(); + _opcodes.emplace_back(circle::CreateOperatorCode(_fbb, opcode)); + return ind; + } + + flatbuffers::Offset buildBuffer(const uint8_t *buf, size_t size) + { + if (buf == nullptr && size == 0) + return circle::CreateBuffer(_fbb); + auto buffer = _fbb.CreateVector(buf, size); + return circle::CreateBuffer(_fbb, buffer); + } + + flatbuffers::Offset buildTensor(const TensorParams ¶ms) + { + auto shape = _fbb.CreateVector(params.shape); + auto name = _fbb.CreateString(params.name); + return circle::CreateTensor(_fbb, shape, params.tensor_type, params.buffer, name, + 0 /* QuantParam */, false /* is_variable */, 0 /* sparsity */, + 0 /* shape_signature */); + } + + flatbuffers::Offset buildSubGraph() + { + return circle::CreateSubGraphDirect(_fbb, &_tensors, &_inputs, &_outputs, &_operators, nullptr); + } + +private: + flatbuffers::FlatBufferBuilder _fbb{1024}; + std::vector> _buffers; + std::vector> _opcodes; + + // per-subgraph + std::vector _inputs; + std::vector _outputs; + std::vector> _tensors; + std::vector> _operators; +}; + +#endif // __NNFW_API_TEST_CIRCLE_GEN_H__ diff --git a/tests/nnfw_api/src/GenModelTests.cc b/tests/nnfw_api/src/GenModelTests.cc new file mode 100644 index 0000000..2bd839a --- /dev/null +++ b/tests/nnfw_api/src/GenModelTests.cc @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include + +#include "CircleGen.h" +#include "fixtures.h" + +/** + * @brief Generated Model test fixture for a one time inference + * + * This fixture is for one-time inference test with variety of generated models. + * It is the user's responsiblity to create @c _cbuf , @c _ref_inputs and @c _ref_outputs in the + * test body, which are generated circle buffer, model input data and output data respectively. + * The rest(calling API functions for execution) is done by @c Setup and @c TearDown . + * + */ +class GenModelTest : public ::testing::Test +{ +protected: + void SetUp() override { NNFW_ENSURE_SUCCESS(nnfw_create_session(&_so.session)); } + + void TearDown() override + { + NNFW_ENSURE_SUCCESS(nnfw_load_circle_from_buffer(_so.session, _cbuf.buffer(), _cbuf.size())); + NNFW_ENSURE_SUCCESS(nnfw_prepare(_so.session)); + + // In/Out buffer settings + { + uint32_t num_inputs; + NNFW_ENSURE_SUCCESS(nnfw_input_size(_so.session, &num_inputs)); + _so.inputs.resize(num_inputs); + for (uint32_t ind = 0; ind < _so.inputs.size(); ind++) + { + nnfw_tensorinfo ti; + NNFW_ENSURE_SUCCESS(nnfw_input_tensorinfo(_so.session, ind, &ti)); + uint64_t input_elements = num_elems(&ti); + _so.inputs[ind].resize(input_elements); + + ASSERT_EQ(nnfw_set_input(_so.session, ind, ti.dtype, _so.inputs[ind].data(), + sizeof(float) * input_elements), + NNFW_STATUS_NO_ERROR); + } + + uint32_t num_outputs; + NNFW_ENSURE_SUCCESS(nnfw_output_size(_so.session, &num_outputs)); + _so.outputs.resize(num_outputs); + for (uint32_t ind = 0; ind < _so.outputs.size(); ind++) + { + nnfw_tensorinfo ti; + NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_so.session, ind, &ti)); + uint64_t output_elements = num_elems(&ti); + _so.outputs[ind].resize(output_elements); + ASSERT_EQ(nnfw_set_output(_so.session, ind, ti.dtype, _so.outputs[ind].data(), + sizeof(float) * output_elements), + NNFW_STATUS_NO_ERROR); + } + } + + // Set input values, run, and check output values + { + ASSERT_EQ(_so.inputs.size(), _ref_inputs.size()); + for (uint32_t i = 0; i < _so.inputs.size(); i++) + { + // Fill the values + ASSERT_EQ(_so.inputs[i].size(), _ref_inputs[i].size()); + memcpy(_so.inputs[i].data(), _ref_inputs[i].data(), _so.inputs[i].size() * sizeof(float)); + } + + NNFW_ENSURE_SUCCESS(nnfw_run(_so.session)); + + ASSERT_EQ(_so.outputs.size(), _ref_outputs.size()); + for (uint32_t i = 0; i < _so.outputs.size(); i++) + { + // Check output tensor values + auto &ref_output = _ref_outputs[i]; + auto &output = _so.outputs[i]; + ASSERT_EQ(output.size(), ref_output.size()); + for (uint32_t e = 0; e < ref_output.size(); e++) + ASSERT_FLOAT_EQ(ref_output[e], output[e]); + } + } + + NNFW_ENSURE_SUCCESS(nnfw_close_session(_so.session)); + } + +protected: + SessionObject _so; + CircleBuffer _cbuf; + std::vector> _ref_inputs; + std::vector> _ref_outputs; +}; + +TEST_F(GenModelTest, OneOp_Add_VarToConst) +{ + CircleGen cgen; + std::vector rhs_data{5, 4, 7, 4}; + uint32_t rhs_buf = cgen.addBuffer(rhs_data); + int lhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32}); + int rhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32, rhs_buf}); + int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32}); + cgen.addOperatorAdd({{lhs, rhs}, {out}}, circle::ActivationFunctionType_NONE); + cgen.setInputsAndOutputs({lhs}, {out}); + _cbuf = cgen.finish(); + + _ref_inputs = {{1, 3, 2, 4}}; + _ref_outputs = {{6, 7, 9, 8}}; +} + +TEST_F(GenModelTest, OneOp_Add_VarToVar) +{ + CircleGen cgen; + int lhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32}); + int rhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32}); + int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32}); + cgen.addOperatorAdd({{lhs, rhs}, {out}}, circle::ActivationFunctionType_NONE); + cgen.setInputsAndOutputs({lhs, rhs}, {out}); + _cbuf = cgen.finish(); + + _ref_inputs = {{1, 3, 2, 4}, {5, 4, 7, 4}}; + _ref_outputs = {{6, 7, 9, 8}}; +} + +TEST_F(GenModelTest, OneOp_AvgPool2D) +{ + CircleGen cgen; + int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32}); + int out = cgen.addTensor({{1, 1, 1, 1}, circle::TensorType::TensorType_FLOAT32}); + cgen.addOperatorAveragePool2D({{in}, {out}}, circle::Padding_SAME, 2, 2, 2, 2, + circle::ActivationFunctionType_NONE); + cgen.setInputsAndOutputs({in}, {out}); + _cbuf = cgen.finish(); + + _ref_inputs = {{1, 3, 2, 4}}; + _ref_outputs = {{2.5}}; +} diff --git a/tests/nnfw_api/src/ModelTestDynamicTensor.cc b/tests/nnfw_api/src/ModelTestDynamicTensor.cc index 2f9ef31..c1f4369 100644 --- a/tests/nnfw_api/src/ModelTestDynamicTensor.cc +++ b/tests/nnfw_api/src/ModelTestDynamicTensor.cc @@ -15,7 +15,7 @@ */ #include -#include +#include #include "common.h" #include "fixtures.h" @@ -67,22 +67,22 @@ protected: { NNFW_STATUS res = nnfw_set_input(_session, 0, NNFW_TYPE_TENSOR_INT32, new_shape.data(), sizeof(int) * new_shape.size()); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); res = nnfw_set_output(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, actual_output->data(), sizeof(float) * actual_output_size); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); } void prepare_and_set_input_output(const std::vector &new_shape, int actual_output_size, std::vector *actual_output) { - ASSERT_EQ(nnfw_set_available_backends(_session, "cpu"), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu")); NNFW_STATUS res = NNFW_STATUS_ERROR; res = nnfw_prepare(_session); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); set_input_output(new_shape, actual_output_size, actual_output); // real test case should start from calling nnfw_run() @@ -102,11 +102,11 @@ protected: if (no_run_error) { - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); // output shape check nnfw_tensorinfo info; - ASSERT_EQ(nnfw_output_tensorinfo(_session, 0, &info), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_session, 0, &info)); ASSERT_EQ(info.rank, new_shape.size()); for (uint32_t d = 0; d < info.rank; ++d) ASSERT_EQ(info.dims[d], new_shape[d]); @@ -137,7 +137,7 @@ TEST_F(TestDynamicTensorReshapeModelLoaded, reshape_to_3x2) // Do inference NNFW_STATUS res = nnfw_run(_session); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); // output value check for (int i = 0; i < expected.size(); ++i) @@ -163,10 +163,10 @@ TEST_F(TestDynamicTensorReshapeModelLoaded, neg_reshape_to_wrong_3x3) TEST_F(TestDynamicTensorReshapeModelLoaded, reshape_multiple_executions) { - ASSERT_EQ(nnfw_set_available_backends(_session, "cpu"), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu")); NNFW_STATUS res = nnfw_prepare(_session); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); std::vector new_shape; std::vector expected = {-1.5, -1.0, -0.5, 0.5, 1.0, 1.5}; @@ -184,10 +184,10 @@ TEST_F(TestDynamicTensorReshapeModelLoaded, reshape_multiple_executions) TEST_F(TestDynamicTensorReshapeModelLoaded, neg_reshape_multiple_executions) { - ASSERT_EQ(nnfw_set_available_backends(_session, "cpu"), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu")); NNFW_STATUS res = nnfw_prepare(_session); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); std::vector new_shape; std::vector expected = {-1.5, -1.0, -0.5, 0.5, 1.0, 1.5}; @@ -217,8 +217,8 @@ protected: const std::vector &input1, std::vector *actual_output, nnfw_tensorinfo input0_ti) { - ASSERT_EQ(nnfw_prepare(_session), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_set_input_tensorinfo(_session, 0, &input0_ti), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_prepare(_session)); + NNFW_ENSURE_SUCCESS(nnfw_set_input_tensorinfo(_session, 0, &input0_ti)); ASSERT_EQ(nnfw_set_input(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, input0.data(), sizeof(float) * input0.size()), @@ -250,7 +250,7 @@ protected: */ TEST_F(TestInputUnknownDimInputConcatModelLoaded, concat_input0_to_2x3) { - ASSERT_EQ(nnfw_set_available_backends(_session, "cpu"), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu")); const std::vector input0 = {1, 2, 3}; // of shape [1, 3] const std::vector input1 = {4, 5, 6, 7, 8, 9}; // of shape [2, 3] @@ -260,14 +260,14 @@ TEST_F(TestInputUnknownDimInputConcatModelLoaded, concat_input0_to_2x3) // input reshaping to [1, 3] nnfw_tensorinfo ti = {NNFW_TYPE_TENSOR_FLOAT32, 2, {1, 3}}; - ASSERT_EQ(nnfw_set_input_tensorinfo(_session, 0, &ti), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_prepare(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_input_tensorinfo(_session, 0, &ti)); + NNFW_ENSURE_SUCCESS(nnfw_prepare(_session)); set_input_output(_session, input0, input1, actual_output); // Do inference NNFW_STATUS res = nnfw_run(_session); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); // output value check for (int i = 0; i < expected.size(); ++i) @@ -291,7 +291,7 @@ TEST_F(TestInputUnknownDimInputConcatModelLoaded, concat_input0_to_2x3) */ TEST_F(TestInputUnknownDimInputConcatModelLoaded, neg_concat_input0_to_wrong_shape) { - ASSERT_EQ(nnfw_set_available_backends(_session, "cpu"), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu")); const std::vector input0 = {1, 2, 3}; // of shape [3, 1], wrong shape const std::vector input1 = {4, 5, 6, 7, 8, 9}; // of shape [2, 3] @@ -300,7 +300,7 @@ TEST_F(TestInputUnknownDimInputConcatModelLoaded, neg_concat_input0_to_wrong_sha // input reshaping to [3, 1] nnfw_tensorinfo ti = {NNFW_TYPE_TENSOR_FLOAT32, 2, {3, 1}}; - ASSERT_EQ(nnfw_set_input_tensorinfo(_session, 0, &ti), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_input_tensorinfo(_session, 0, &ti)); ASSERT_EQ(nnfw_prepare(_session), NNFW_STATUS_ERROR); } @@ -330,7 +330,7 @@ using TestDynamicTensorApplyTensorInfoBinaryOp = TEST_F(TestDynamicTensorApplyTensorInfoBinaryOp, set_input_tensorinfo_after_compilation_add) { - ASSERT_EQ(nnfw_set_available_backends(_session, "cpu"), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu")); // input reshaping to [2, 2, 3] nnfw_tensorinfo input0_ti = {NNFW_TYPE_TENSOR_FLOAT32, 3, {2, 2, 3}}; @@ -341,15 +341,15 @@ TEST_F(TestDynamicTensorApplyTensorInfoBinaryOp, set_input_tensorinfo_after_comp std::vector expected_output = {1.1 * 2, 2.1 * 2, 3.1 * 2, 4.1 * 2, 5.1 * 2, 6.1 * 2, 7.1 * 2, 8.1 * 2, 9.1 * 2, 10.1 * 2, 11.1 * 2, 12.1 * 2}; - ASSERT_EQ(nnfw_prepare(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_prepare(_session)); - ASSERT_EQ(nnfw_set_input_tensorinfo(_session, 0, &input0_ti), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_input_tensorinfo(_session, 0, &input0_ti)); set_input_output(_session, input0, input1, actual_output); // Do inference NNFW_STATUS res = nnfw_run(_session); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); // output value check for (int i = 0; i < expected_output.size(); ++i) @@ -374,7 +374,7 @@ using TestDynamicTensorApplyTensorInfoUnaryOp = ValidationTestModelLoaded while_dynamic_output0{ 0.0388205424, 0.042615629 TEST_F(TestWhileDynamicModelLoaded, run_verify) { - ASSERT_EQ(nnfw_set_available_backends(_session, "cpu"), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_prepare(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu")); + NNFW_ENSURE_SUCCESS(nnfw_prepare(_session)); std::vector actual_output0(10); nnfw_tensorinfo ti = {NNFW_TYPE_TENSOR_FLOAT32, 3, {1, 28, 28}}; - ASSERT_EQ(nnfw_set_input_tensorinfo(_session, 0, &ti), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_input_tensorinfo(_session, 0, &ti)); set_input_output(_session, while_dynamic_input0, actual_output0); - ASSERT_EQ(nnfw_run(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_run(_session)); nnfw_tensorinfo ti_output0_expected = {NNFW_TYPE_TENSOR_FLOAT32, 2, {1, 10}}; - ASSERT_EQ(nnfw_output_tensorinfo(_session, 0, &ti), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_session, 0, &ti)); ASSERT_TRUE(tensorInfoEqual(ti, ti_output0_expected)); // output value check @@ -458,11 +458,11 @@ TEST_F(TestWhileDynamicModelLoaded, run_verify) TEST_F(TestWhileDynamicModelLoaded, neg_run_verify) { - ASSERT_EQ(nnfw_set_available_backends(_session, "cpu"), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_prepare(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu")); + NNFW_ENSURE_SUCCESS(nnfw_prepare(_session)); nnfw_tensorinfo ti = {NNFW_TYPE_TENSOR_FLOAT32, 3, {1, 28, 28}}; - ASSERT_EQ(nnfw_set_input_tensorinfo(_session, 0, &ti), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_input_tensorinfo(_session, 0, &ti)); // Insufficient size of output (10 or more is sufficient) std::vector actual_output0(9); @@ -482,27 +482,27 @@ const static std::vector if_dynamic_output0{ 0.0444660522, 0.0271649156, TEST_F(TestIfDynamicModelLoaded, run_verify) { - ASSERT_EQ(nnfw_set_available_backends(_session, "cpu"), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_prepare(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu")); + NNFW_ENSURE_SUCCESS(nnfw_prepare(_session)); nnfw_tensorinfo ti_output0_expected = {NNFW_TYPE_TENSOR_FLOAT32, 2, {1, 10}}; // Output tensor sizes are inferenced after `nnfw_prepare` { nnfw_tensorinfo ti; - ASSERT_EQ(nnfw_output_tensorinfo(_session, 0, &ti), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_session, 0, &ti)); ASSERT_TRUE(tensorInfoEqual(ti, ti_output0_expected)); } std::vector actual_output0(10); set_input_output(_session, if_dynamic_input0, actual_output0); - ASSERT_EQ(nnfw_run(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_run(_session)); // Check output tensor sizes again { nnfw_tensorinfo ti; - ASSERT_EQ(nnfw_output_tensorinfo(_session, 0, &ti), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_session, 0, &ti)); ASSERT_TRUE(tensorInfoEqual(ti, ti_output0_expected)); } diff --git a/tests/nnfw_api/src/ModelTestInputReshaping.cc b/tests/nnfw_api/src/ModelTestInputReshaping.cc index f5053e3..bfe347f 100644 --- a/tests/nnfw_api/src/ModelTestInputReshaping.cc +++ b/tests/nnfw_api/src/ModelTestInputReshaping.cc @@ -15,7 +15,7 @@ */ #include -#include +#include #include "fixtures.h" #include "NNPackages.h" @@ -35,8 +35,8 @@ TEST_F(TestInputReshapingAddModelLoaded, reshaping_2x2_to_4x2) { NNFW_STATUS res = NNFW_STATUS_ERROR; - ASSERT_EQ(nnfw_set_available_backends(_session, "cpu"), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_set_config(_session, "EXECUTOR", "Linear"), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu")); + NNFW_ENSURE_SUCCESS(nnfw_set_config(_session, "EXECUTOR", "Linear")); // input and output values const std::vector input1 = {0, 1, 2, 3, 4, 5, 6, 7}; // of changed shape [4, 2] @@ -56,7 +56,7 @@ TEST_F(TestInputReshapingAddModelLoaded, reshaping_2x2_to_4x2) res = nnfw_set_input_tensorinfo(_session, 0, &ti); res = nnfw_prepare(_session); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); nnfw_tensorinfo ti_input = {}; // Static inference result will be stored nnfw_input_tensorinfo(_session, 0, &ti_input); @@ -68,21 +68,21 @@ TEST_F(TestInputReshapingAddModelLoaded, reshaping_2x2_to_4x2) res = nnfw_set_input(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, input1.data(), sizeof(float) * input1.size()); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); res = nnfw_set_input(_session, 1, NNFW_TYPE_TENSOR_FLOAT32, input2.data(), sizeof(float) * input2.size()); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); uint64_t output_num_elements = tensorInfoNumElements(ti_output); ASSERT_EQ(output_num_elements, expected.size()); std::vector actual_output(output_num_elements); res = nnfw_set_output(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, actual_output.data(), sizeof(float) * actual_output.size()); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); // Do inference res = nnfw_run(_session); - ASSERT_EQ(res, NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(res); // compare for (int i = 0; i < expected.size(); ++i) diff --git a/tests/nnfw_api/src/RegressionTests.cc b/tests/nnfw_api/src/RegressionTests.cc index 54ebc96..e4dfa91 100644 --- a/tests/nnfw_api/src/RegressionTests.cc +++ b/tests/nnfw_api/src/RegressionTests.cc @@ -22,17 +22,17 @@ TEST_F(RegressionTest, github_1535) auto package_path = NNPackages::get().getModelAbsolutePath(NNPackages::ADD); nnfw_session *session1 = nullptr; - ASSERT_EQ(nnfw_create_session(&session1), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_load_model_from_file(session1, package_path.c_str()), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_set_available_backends(session1, "cpu;acl_cl;acl_neon"), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_prepare(session1), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_create_session(&session1)); + NNFW_ENSURE_SUCCESS(nnfw_load_model_from_file(session1, package_path.c_str())); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(session1, "cpu;acl_cl;acl_neon")); + NNFW_ENSURE_SUCCESS(nnfw_prepare(session1)); nnfw_session *session2 = nullptr; - ASSERT_EQ(nnfw_create_session(&session2), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_load_model_from_file(session2, package_path.c_str()), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_set_available_backends(session2, "cpu"), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_prepare(session2), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_create_session(&session2)); + NNFW_ENSURE_SUCCESS(nnfw_load_model_from_file(session2, package_path.c_str())); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(session2, "cpu")); + NNFW_ENSURE_SUCCESS(nnfw_prepare(session2)); - ASSERT_EQ(nnfw_close_session(session1), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_close_session(session2), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_close_session(session1)); + NNFW_ENSURE_SUCCESS(nnfw_close_session(session2)); } diff --git a/tests/nnfw_api/src/ValidationTestAddModelLoaded.cc b/tests/nnfw_api/src/ValidationTestAddModelLoaded.cc index 67f2467..11c6034 100644 --- a/tests/nnfw_api/src/ValidationTestAddModelLoaded.cc +++ b/tests/nnfw_api/src/ValidationTestAddModelLoaded.cc @@ -19,61 +19,63 @@ using ValidationTestAddModelLoaded = ValidationTestModelLoaded; -TEST_F(ValidationTestAddModelLoaded, prepare_001) -{ - ASSERT_EQ(nnfw_prepare(_session), NNFW_STATUS_NO_ERROR); -} +TEST_F(ValidationTestAddModelLoaded, prepare_001) { NNFW_ENSURE_SUCCESS(nnfw_prepare(_session)); } TEST_F(ValidationTestAddModelLoaded, set_available_backends_001) { - ASSERT_EQ(nnfw_set_available_backends(_session, "cpu"), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu")); } TEST_F(ValidationTestAddModelLoaded, get_input_size) { uint32_t size = 0; - ASSERT_EQ(nnfw_input_size(_session, &size), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_input_size(_session, &size)); ASSERT_EQ(size, 1); } TEST_F(ValidationTestAddModelLoaded, get_output_size) { uint32_t size = 0; - ASSERT_EQ(nnfw_output_size(_session, &size), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_output_size(_session, &size)); ASSERT_EQ(size, 1); } TEST_F(ValidationTestAddModelLoaded, output_tensorinfo) { nnfw_tensorinfo tensor_info; - ASSERT_EQ(nnfw_output_tensorinfo(_session, 0, &tensor_info), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_session, 0, &tensor_info)); ASSERT_EQ(tensor_info.rank, 1); ASSERT_EQ(tensor_info.dims[0], 1); } -TEST_F(ValidationTestAddModelLoaded, neg_run_001) +TEST_F(ValidationTestAddModelLoaded, neg_run) { - ASSERT_EQ(nnfw_run(_session), NNFW_STATUS_ERROR); + // nnfw_prepare is not called + ASSERT_EQ(nnfw_run(_session), NNFW_STATUS_INVALID_STATE); } -TEST_F(ValidationTestAddModelLoaded, neg_set_input_001) +TEST_F(ValidationTestAddModelLoaded, neg_set_input) { - ASSERT_EQ(nnfw_set_input(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, nullptr, 0), NNFW_STATUS_ERROR); + // nnfw_prepare is not called + ASSERT_EQ(nnfw_set_input(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, nullptr, 0), + NNFW_STATUS_INVALID_STATE); } -TEST_F(ValidationTestAddModelLoaded, neg_set_output_001) +TEST_F(ValidationTestAddModelLoaded, neg_set_output) { - ASSERT_EQ(nnfw_set_output(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, nullptr, 0), NNFW_STATUS_ERROR); + // nnfw_prepare is not called + ASSERT_EQ(nnfw_set_output(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, nullptr, 0), + NNFW_STATUS_INVALID_STATE); } TEST_F(ValidationTestAddModelLoaded, neg_get_input_size) { - ASSERT_EQ(nnfw_input_size(_session, nullptr), NNFW_STATUS_ERROR); + ASSERT_EQ(nnfw_input_size(_session, nullptr), NNFW_STATUS_UNEXPECTED_NULL); } TEST_F(ValidationTestAddModelLoaded, neg_get_output_size) { - ASSERT_EQ(nnfw_output_size(_session, nullptr), NNFW_STATUS_ERROR); + ASSERT_EQ(nnfw_output_size(_session, nullptr), NNFW_STATUS_UNEXPECTED_NULL); } TEST_F(ValidationTestAddModelLoaded, neg_load_model) @@ -81,11 +83,11 @@ TEST_F(ValidationTestAddModelLoaded, neg_load_model) // load model twice ASSERT_EQ(nnfw_load_model_from_file( _session, NNPackages::get().getModelAbsolutePath(NNPackages::ADD).c_str()), - NNFW_STATUS_ERROR); + NNFW_STATUS_INVALID_STATE); } TEST_F(ValidationTestAddModelLoaded, neg_output_tensorinfo) { // tensor_info is null - ASSERT_EQ(nnfw_output_tensorinfo(_session, 0, nullptr), NNFW_STATUS_ERROR); + ASSERT_EQ(nnfw_output_tensorinfo(_session, 0, nullptr), NNFW_STATUS_UNEXPECTED_NULL); } diff --git a/tests/nnfw_api/src/ValidationTestAddSessionPrepared.cc b/tests/nnfw_api/src/ValidationTestAddSessionPrepared.cc index 1bb4182..f19bb78 100644 --- a/tests/nnfw_api/src/ValidationTestAddSessionPrepared.cc +++ b/tests/nnfw_api/src/ValidationTestAddSessionPrepared.cc @@ -23,7 +23,7 @@ TEST_F(ValidationTestAddSessionPrepared, run) { SetInOutBuffers(); _input[0] = 3.0; - ASSERT_EQ(nnfw_run(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_run(_session)); ASSERT_FLOAT_EQ(_output[0], 5.0); } @@ -31,11 +31,11 @@ TEST_F(ValidationTestAddSessionPrepared, run_twice) { SetInOutBuffers(); _input[0] = 4.0; - ASSERT_EQ(nnfw_run(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_run(_session)); ASSERT_FLOAT_EQ(_output[0], 6.0); _input[0] = 5.0f; - ASSERT_EQ(nnfw_run(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_run(_session)); ASSERT_FLOAT_EQ(_output[0], 7.0); } @@ -43,8 +43,8 @@ TEST_F(ValidationTestAddSessionPrepared, run_async) { SetInOutBuffers(); _input[0] = 3.0; - ASSERT_EQ(nnfw_run_async(_session), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_await(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_run_async(_session)); + NNFW_ENSURE_SUCCESS(nnfw_await(_session)); ASSERT_FLOAT_EQ(_output[0], 5.0); } @@ -58,21 +58,21 @@ TEST_F(ValidationTestAddSessionPrepared, set_input_001) TEST_F(ValidationTestAddSessionPrepared, get_input_size) { uint32_t size = 0; - ASSERT_EQ(nnfw_input_size(_session, &size), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_input_size(_session, &size)); ASSERT_EQ(size, 1); } TEST_F(ValidationTestAddSessionPrepared, get_output_size) { uint32_t size = 0; - ASSERT_EQ(nnfw_output_size(_session, &size), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_output_size(_session, &size)); ASSERT_EQ(size, 1); } TEST_F(ValidationTestAddSessionPrepared, output_tensorinfo) { nnfw_tensorinfo tensor_info; - ASSERT_EQ(nnfw_output_tensorinfo(_session, 0, &tensor_info), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_session, 0, &tensor_info)); ASSERT_EQ(tensor_info.rank, 1); ASSERT_EQ(tensor_info.dims[0], 1); } @@ -86,24 +86,24 @@ TEST_F(ValidationTestAddSessionPrepared, neg_await_without_async_run) TEST_F(ValidationTestAddSessionPrepared, neg_await_after_sync_run) { SetInOutBuffers(); - ASSERT_EQ(nnfw_run(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_run(_session)); ASSERT_EQ(nnfw_await(_session), NNFW_STATUS_ERROR); } TEST_F(ValidationTestAddSessionPrepared, neg_await_twice) { SetInOutBuffers(); - ASSERT_EQ(nnfw_run_async(_session), NNFW_STATUS_NO_ERROR); - ASSERT_EQ(nnfw_await(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_run_async(_session)); + NNFW_ENSURE_SUCCESS(nnfw_await(_session)); ASSERT_EQ(nnfw_await(_session), NNFW_STATUS_ERROR); } TEST_F(ValidationTestAddSessionPrepared, neg_run_during_async_run) { SetInOutBuffers(); - ASSERT_EQ(nnfw_run_async(_session), NNFW_STATUS_NO_ERROR); - EXPECT_EQ(nnfw_run(_session), NNFW_STATUS_ERROR); - ASSERT_EQ(nnfw_await(_session), NNFW_STATUS_NO_ERROR); + NNFW_ENSURE_SUCCESS(nnfw_run_async(_session)); + EXPECT_EQ(nnfw_run(_session), NNFW_STATUS_INVALID_STATE); + NNFW_ENSURE_SUCCESS(nnfw_await(_session)); } TEST_F(ValidationTestAddSessionPrepared, neg_set_input_001) @@ -139,12 +139,12 @@ TEST_F(ValidationTestAddSessionPrepared, neg_set_output_002) TEST_F(ValidationTestAddSessionPrepared, neg_get_input_size) { - ASSERT_EQ(nnfw_input_size(_session, nullptr), NNFW_STATUS_ERROR); + ASSERT_EQ(nnfw_input_size(_session, nullptr), NNFW_STATUS_UNEXPECTED_NULL); } TEST_F(ValidationTestAddSessionPrepared, neg_get_output_size) { - ASSERT_EQ(nnfw_output_size(_session, nullptr), NNFW_STATUS_ERROR); + ASSERT_EQ(nnfw_output_size(_session, nullptr), NNFW_STATUS_UNEXPECTED_NULL); } TEST_F(ValidationTestAddSessionPrepared, neg_load_model) @@ -152,13 +152,13 @@ TEST_F(ValidationTestAddSessionPrepared, neg_load_model) // Load model twice ASSERT_EQ(nnfw_load_model_from_file( _session, NNPackages::get().getModelAbsolutePath(NNPackages::ADD).c_str()), - NNFW_STATUS_ERROR); + NNFW_STATUS_INVALID_STATE); } TEST_F(ValidationTestAddSessionPrepared, neg_prepare) { // Call Prepare twice - ASSERT_EQ(nnfw_prepare(_session), NNFW_STATUS_ERROR); + ASSERT_EQ(nnfw_prepare(_session), NNFW_STATUS_INVALID_STATE); } // TODO Validation check when "nnfw_run" is called without input & output tensor setting diff --git a/tests/nnfw_api/src/ValidationTestFourAddModelsSetInput.cc b/tests/nnfw_api/src/ValidationTestFourAddModelsSetInput.cc index b3fb9c6..4e2a905 100644 --- a/tests/nnfw_api/src/ValidationTestFourAddModelsSetInput.cc +++ b/tests/nnfw_api/src/ValidationTestFourAddModelsSetInput.cc @@ -21,8 +21,8 @@ using ValidationTestFourAddModelsSetInput = ValidationTestFourModelsSetInput run $indir/Add_000 and check output" - echo " $progname -i nnpkg-tcs Add_000 => run nnpkg-tcs/Add_000 and check output" + echo " $0 $progname Add_000 => run $indir/Add_000 and check output" + echo " $0 $progname -i nnpkg-tcs Add_000 => run nnpkg-tcs/Add_000 and check output" exit 1 } @@ -61,11 +61,6 @@ if [ $# -ne 1 ]; then exit 1 fi -if [ ! -e Product ]; then - echo "error: please make sure to run this script in nnfw home." - exit 1 -fi - tcname=$(basename "$1") nnpkg="$indir/$tcname" @@ -78,6 +73,7 @@ fi if ! command_exists $nnpkg_run; then echo "error: runner "$nnpkg_run" does not exist." + echo " if $nnpkg_run exists, please set PATH to $nnpkg_run" exit 1 fi diff --git a/tests/scripts/command/prepare-model b/tests/scripts/command/prepare-model new file mode 100644 index 0000000..feb658c --- /dev/null +++ b/tests/scripts/command/prepare-model @@ -0,0 +1,64 @@ +#!/bin/bash +# +# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +COMMAND_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +INSTALL_DIR="$(dirname $(dirname $COMMAND_DIR))" + +MD5_CHECK="on" +DOWNLOAD_MODEL="all" + +function Usage() +{ + echo "Usage: $0 $(basename ${BASH_SOURCE[0]}) [OPTIONS]" + echo "" + echo "Options:" + echo " --ignoremd5 Ignore MD5 check when download model files" + echo " --model=(all|nnpackage|tflite) Download test model (default=all)" +} + +for i in "$@" +do + case $i in + -h|--help|help) + Usage + exit 1 + ;; + --ignoremd5) + MD5_CHECK="off" + ;; + --model=*) + DOWNLOAD_MODEL=${i#*=} + ;; + *) + echo "Unknown option: $i" + exit 1 + ;; + esac + shift +done + +if [[ $DOWNLOAD_MODEL == "all" ]] || [[ $DOWNLOAD_MODEL == "tflite" ]]; then + # Download tflite models + $INSTALL_DIR/test/models/run_test.sh --download=on --run=off --md5=$MD5_CHECK +fi + +if [[ $DOWNLOAD_MODEL == "all" ]] || [[ $DOWNLOAD_MODEL == "nnpackage" ]]; then + # Download nnpackage model + NNPACKAGE_CONFIG_DIR=$INSTALL_DIR/test/models/nnpackage/ + NNPACKAGE_CACHE_DIR=$INSTALL_DIR/unittest_standalone/nnfw_api_gtest_models/ + $INSTALL_DIR/test/models/run_test.sh --download=on --run=off --md5=$MD5_CHECK \ + --configdir=$NNPACKAGE_CONFIG_DIR --cachedir=$NNPACKAGE_CACHE_DIR +fi diff --git a/tests/scripts/unittest.sh b/tests/scripts/command/unittest old mode 100755 new mode 100644 similarity index 72% rename from tests/scripts/unittest.sh rename to tests/scripts/command/unittest index 717779d..135ebea --- a/tests/scripts/unittest.sh +++ b/tests/scripts/command/unittest @@ -14,22 +14,31 @@ # See the License for the specific language governing permissions and # limitations under the License. +COMMAND_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +INSTALL_DIR="$(dirname $(dirname $COMMAND_DIR))" UNITTEST_REPORT_DIR= -UNITTEST_TEST_DIR= +UNITTEST_TEST_DIR=$INSTALL_DIR/unittest UNITTEST_RESULT=0 UNITTEST_RUN_ALL="" function Usage() { # TODO: Fill this - echo "Usage: LD_LIBRARY_PATH=Product/out/lib ./$0 --reportdir=report --unittestdir=Product/out/unittest" + echo "Usage: $0 $(basename ${BASH_SOURCE[0]}) [OPTIONS]" + echo "" + echo "Options:" + echo " --reportdir=PATH Path to write unittest report" + echo " --unittestdir=PATH Path to run unittest (default: $UNITTEST_TEST_DIR" } -get_gtest_option() +function get_gtest_option() { local UNITTEST_REPORT_FILE=$(basename $TEST_BIN) - local output_option="--gtest_output=xml:$UNITTEST_REPORT_DIR/$UNITTEST_REPORT_FILE.xml" + local output_option local filter_option + if [ -n "$UNITTEST_REPORT_DIR" ]; then + output_option="--gtest_output=xml:$UNITTEST_REPORT_DIR/$UNITTEST_REPORT_FILE.xml" + fi if [ -r "$TEST_BIN.skip" ]; then filter_option="--gtest_filter=-$(grep -v '#' "$TEST_BIN.skip" | tr '\n' ':')" fi @@ -49,15 +58,15 @@ do --unittestdir=*) UNITTEST_TEST_DIR=${i#*=} ;; - --runall) - UNITTEST_RUN_ALL="true" + *) + echo "Unknown option: $i" + exit 1 + ;; esac shift done -# TODO: handle exceptions for params - -if [ ! -e "$UNITTEST_REPORT_DIR" ]; then +if [ -n "$UNITTEST_REPORT_DIR" ] && [ ! -e "$UNITTEST_REPORT_DIR" ]; then mkdir -p $UNITTEST_REPORT_DIR fi @@ -73,21 +82,9 @@ for TEST_BIN in `find $UNITTEST_TEST_DIR -maxdepth 1 -type f -executable`; do echo "============================================" echo "Starting set $num_unittest: $TEST_BIN..." echo "============================================" - TEMP_UNITTEST_RESULT=0 - if [ "$UNITTEST_RUN_ALL" == "true" ]; then - for TEST_LIST_VERBOSE_LINE in $($TEST_BIN --gtest_list_tests); do - if [[ $TEST_LIST_VERBOSE_LINE == *\. ]]; then - TEST_LIST_CATEGORY=$TEST_LIST_VERBOSE_LINE - else - TEST_LIST_ITEM="$TEST_LIST_CATEGORY""$TEST_LIST_VERBOSE_LINE" - $TEST_BIN --gtest_filter=$TEST_LIST_ITEM --gtest_output="xml:$UNITTEST_REPORT_DIR/$TEST_LIST_ITEM.xml" - fi - done - else - $TEST_BIN $(get_gtest_option) - TEMP_UNITTEST_RESULT=$? - fi + $TEST_BIN $(get_gtest_option) + TEMP_UNITTEST_RESULT=$? if [[ $TEMP_UNITTEST_RESULT -ne 0 ]]; then UNITTEST_RESULT=$TEMP_UNITTEST_RESULT diff --git a/tests/scripts/command/verify-tflite b/tests/scripts/command/verify-tflite new file mode 100644 index 0000000..48863ff --- /dev/null +++ b/tests/scripts/command/verify-tflite @@ -0,0 +1,106 @@ +#!/bin/bash +# +# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +COMMAND_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +INSTALL_DIR="$(dirname $(dirname $COMMAND_DIR))" + +MD5_CHECK="on" +TFLITE_LOADER="nnapi" +REPORT_DIR="report" +TEST_LIST_FILE= + +function Usage() +{ + echo "Usage: $0 $(basename ${BASH_SOURCE[0]}) [OPTIONS]" + echo "" + echo "Options:" + echo " --ignoremd5 Ignore MD5 check when download model files" + echo " --api=(nnapi|loader) TFLite model file loading API (default=$TFLITE_LOADER)" + echo " --reportdir=PATH Path to write report (default=$REPORT_DIR)" + echo " --list=FILE List file to test. Test all if list option is not passed" +} + +for i in "$@" +do + case $i in + -h|--help|help) + Usage + exit 1 + ;; + --ignoremd5) + MD5_CHECK="off" + ;; + --api=*) + TFLITE_LOADER=${i#*=} + ;; + --reportdir=*) + REPORT_DIR=${i#*=} + ;; + --list=*) + TEST_LIST_FILE=${i#*=} + ;; + *) + echo "Unknown option: $i" + exit 1 + ;; + esac + shift +done + +if [ ! -z "$TEST_LIST_FILE" ]; then + MODELLIST=$(cat "${TEST_LIST_FILE}") +fi + +if [ ! -e "$REPORT_DIR" ]; then + mkdir -p $REPORT_DIR +fi + +TEST_RESULT=0 +TAP_NAME=verification_test.tap +TEST_NAME="Verification" +TEST_DRIVER= + +if [[ $TFLITE_LOADER == "nnapi" ]]; then + TEST_NAME="NNAPI Verification" + TEST_DRIVER=nnapi_test +elif [[ $TFLITE_LOADER == "loader" ]]; then + TEST_NAME="Loader Verification" + TEST_DRIVER=tflite_loader_test_tool +else + Usage + exit 1 +fi + +$INSTALL_DIR/test/models/run_test.sh --driverbin=$TEST_DRIVER \ + --reportdir=$REPORT_DIR \ + --tapname=$TAP_NAME \ + ${MODELLIST:-} > $REPORT_DIR/verification_test.log 2>&1 +TEST_RESULT=$? + +if [[ $TEST_RESULT -ne 0 ]]; then + echo "" + cat $REPORT_DIR/$TAP_NAME + echo "" + echo "$TEST_NAME failed... exit code: $TEST_RESULT" + echo "============================================" + echo "" + exit $TEST_RESULT +fi + +echo "" +cat $REPORT_DIR/$TAP_NAME +echo "============================================" +echo "" diff --git a/tests/scripts/common.sh b/tests/scripts/common.sh index 8800290..87aec86 100755 --- a/tests/scripts/common.sh +++ b/tests/scripts/common.sh @@ -18,13 +18,12 @@ MY_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" function get_result_of_benchmark_test() { - local RUN_TEST_SH=$1 - local DRIVER_BIN=$2 - local MODEL=$3 - local LOG_FILE=$4 + local DRIVER_BIN=$1 + local MODEL=$2 + local LOG_FILE=$3 local RET=0 - $RUN_TEST_SH --driverbin="$DRIVER_BIN -r 5 -w 3" $MODEL > $LOG_FILE 2>&1 + $MY_PATH/models/run_test.sh --driverbin="$DRIVER_BIN -r 5 -w 3" $MODEL > $LOG_FILE 2>&1 RET=$? if [[ $RET -ne 0 ]]; then echo "Testing $MODEL aborted... exit code: $RET" @@ -68,7 +67,7 @@ function run_benchmark_and_print() LOG_FILE=$REPORT_MODEL_DIR/$WRITE_FILE_NAME.txt RESULT_FILE=$REPORT_MODEL_DIR/$WRITE_FILE_NAME.result print_with_dots $MSG - RESULT=$(get_result_of_benchmark_test $BENCHMARK_RUN_TEST_SH $DRIVER_BIN $MODEL $LOG_FILE) + RESULT=$(get_result_of_benchmark_test $DRIVER_BIN $MODEL $LOG_FILE) echo "$RESULT ms" print_result_of_benchmark_test "$MSG" "$RESULT" $RESULT_FILE sleep $PAUSE_TIME_IN_SEC diff --git a/tests/scripts/list/frameworktest_list.aarch64.acl_cl.txt b/tests/scripts/list/frameworktest_list.aarch64.acl_cl.txt index fc7e71a..dd8d3b7 100644 --- a/tests/scripts/list/frameworktest_list.aarch64.acl_cl.txt +++ b/tests/scripts/list/frameworktest_list.aarch64.acl_cl.txt @@ -27,7 +27,7 @@ pack pad reduce_max reduce_mean -reduce_sum +reduce_sum/float relu relu6 reshape diff --git a/tests/scripts/list/frameworktest_list.armv7l.acl_cl.txt b/tests/scripts/list/frameworktest_list.armv7l.acl_cl.txt index fc7e71a..dd8d3b7 100644 --- a/tests/scripts/list/frameworktest_list.armv7l.acl_cl.txt +++ b/tests/scripts/list/frameworktest_list.armv7l.acl_cl.txt @@ -27,7 +27,7 @@ pack pad reduce_max reduce_mean -reduce_sum +reduce_sum/float relu relu6 reshape diff --git a/tests/scripts/list/tflite_loader_list.aarch64.txt b/tests/scripts/list/tflite_loader_list.aarch64.txt index aca8f16..e04d89d 100644 --- a/tests/scripts/list/tflite_loader_list.aarch64.txt +++ b/tests/scripts/list/tflite_loader_list.aarch64.txt @@ -18,7 +18,7 @@ mul pack pad reduce_max -reduce_sum +reduce_sum/float relu relu6 reshape/reshape1 diff --git a/tests/scripts/list/tflite_loader_list.armv7l.txt b/tests/scripts/list/tflite_loader_list.armv7l.txt index aca8f16..e04d89d 100644 --- a/tests/scripts/list/tflite_loader_list.armv7l.txt +++ b/tests/scripts/list/tflite_loader_list.armv7l.txt @@ -18,7 +18,7 @@ mul pack pad reduce_max -reduce_sum +reduce_sum/float relu relu6 reshape/reshape1 diff --git a/tests/scripts/framework/tests/MODELS/inception_module/config.sh b/tests/scripts/models/config/MODELS/inception_module/config.sh similarity index 100% rename from tests/scripts/framework/tests/MODELS/inception_module/config.sh rename to tests/scripts/models/config/MODELS/inception_module/config.sh diff --git a/tests/scripts/framework/tests/MODELS/inception_nonslim/config.sh b/tests/scripts/models/config/MODELS/inception_nonslim/config.sh similarity index 100% rename from tests/scripts/framework/tests/MODELS/inception_nonslim/config.sh rename to tests/scripts/models/config/MODELS/inception_nonslim/config.sh diff --git a/tests/scripts/framework/tests/MODELS/inception_slim/config.sh b/tests/scripts/models/config/MODELS/inception_slim/config.sh similarity index 100% rename from tests/scripts/framework/tests/MODELS/inception_slim/config.sh rename to tests/scripts/models/config/MODELS/inception_slim/config.sh diff --git a/tests/scripts/framework/tests/MODELS/mobilenet/config.sh b/tests/scripts/models/config/MODELS/mobilenet/config.sh similarity index 100% rename from tests/scripts/framework/tests/MODELS/mobilenet/config.sh rename to tests/scripts/models/config/MODELS/mobilenet/config.sh diff --git a/tests/scripts/framework/tests/MODELS/mobilenet_quant8/config.sh b/tests/scripts/models/config/MODELS/mobilenet_quant8/config.sh similarity index 100% rename from tests/scripts/framework/tests/MODELS/mobilenet_quant8/config.sh rename to tests/scripts/models/config/MODELS/mobilenet_quant8/config.sh diff --git a/tests/scripts/framework/tests/abs/config.sh b/tests/scripts/models/config/abs/config.sh similarity index 100% rename from tests/scripts/framework/tests/abs/config.sh rename to tests/scripts/models/config/abs/config.sh diff --git a/tests/scripts/framework/tests/add/1D/config.sh b/tests/scripts/models/config/add/1D/config.sh similarity index 100% rename from tests/scripts/framework/tests/add/1D/config.sh rename to tests/scripts/models/config/add/1D/config.sh diff --git a/tests/scripts/framework/tests/add/4D/config.sh b/tests/scripts/models/config/add/4D/config.sh similarity index 100% rename from tests/scripts/framework/tests/add/4D/config.sh rename to tests/scripts/models/config/add/4D/config.sh diff --git a/tests/scripts/framework/tests/average_pool_2d/aligned/config.sh b/tests/scripts/models/config/average_pool_2d/aligned/config.sh similarity index 100% rename from tests/scripts/framework/tests/average_pool_2d/aligned/config.sh rename to tests/scripts/models/config/average_pool_2d/aligned/config.sh diff --git a/tests/scripts/framework/tests/average_pool_2d/avgpool1/config.sh b/tests/scripts/models/config/average_pool_2d/avgpool1/config.sh similarity index 100% rename from tests/scripts/framework/tests/average_pool_2d/avgpool1/config.sh rename to tests/scripts/models/config/average_pool_2d/avgpool1/config.sh diff --git a/tests/scripts/framework/tests/average_pool_2d/avgpool2/config.sh b/tests/scripts/models/config/average_pool_2d/avgpool2/config.sh similarity index 100% rename from tests/scripts/framework/tests/average_pool_2d/avgpool2/config.sh rename to tests/scripts/models/config/average_pool_2d/avgpool2/config.sh diff --git a/tests/scripts/framework/tests/batch_to_space_nd2/config.sh b/tests/scripts/models/config/batch_to_space_nd2/config.sh similarity index 100% rename from tests/scripts/framework/tests/batch_to_space_nd2/config.sh rename to tests/scripts/models/config/batch_to_space_nd2/config.sh diff --git a/tests/scripts/framework/tests/cast/config.sh b/tests/scripts/models/config/cast/config.sh similarity index 100% rename from tests/scripts/framework/tests/cast/config.sh rename to tests/scripts/models/config/cast/config.sh diff --git a/tests/scripts/framework/tests/concat/2D/config.sh b/tests/scripts/models/config/concat/2D/config.sh similarity index 100% rename from tests/scripts/framework/tests/concat/2D/config.sh rename to tests/scripts/models/config/concat/2D/config.sh diff --git a/tests/scripts/framework/tests/concat/concat1/config.sh b/tests/scripts/models/config/concat/concat1/config.sh similarity index 100% rename from tests/scripts/framework/tests/concat/concat1/config.sh rename to tests/scripts/models/config/concat/concat1/config.sh diff --git a/tests/scripts/framework/tests/concat/concat2/config.sh b/tests/scripts/models/config/concat/concat2/config.sh similarity index 100% rename from tests/scripts/framework/tests/concat/concat2/config.sh rename to tests/scripts/models/config/concat/concat2/config.sh diff --git a/tests/scripts/framework/tests/conv_2d/convolution1/config.sh b/tests/scripts/models/config/conv_2d/convolution1/config.sh similarity index 100% rename from tests/scripts/framework/tests/conv_2d/convolution1/config.sh rename to tests/scripts/models/config/conv_2d/convolution1/config.sh diff --git a/tests/scripts/framework/tests/conv_2d/convolution2/config.sh b/tests/scripts/models/config/conv_2d/convolution2/config.sh similarity index 100% rename from tests/scripts/framework/tests/conv_2d/convolution2/config.sh rename to tests/scripts/models/config/conv_2d/convolution2/config.sh diff --git a/tests/scripts/framework/tests/custom/squared_difference/config.sh b/tests/scripts/models/config/custom/squared_difference/config.sh similarity index 100% rename from tests/scripts/framework/tests/custom/squared_difference/config.sh rename to tests/scripts/models/config/custom/squared_difference/config.sh diff --git a/tests/scripts/framework/tests/depthwise_conv_2d/depthconv1/config.sh b/tests/scripts/models/config/depthwise_conv_2d/depthconv1/config.sh similarity index 100% rename from tests/scripts/framework/tests/depthwise_conv_2d/depthconv1/config.sh rename to tests/scripts/models/config/depthwise_conv_2d/depthconv1/config.sh diff --git a/tests/scripts/framework/tests/depthwise_conv_2d/depthconv2/config.sh b/tests/scripts/models/config/depthwise_conv_2d/depthconv2/config.sh similarity index 100% rename from tests/scripts/framework/tests/depthwise_conv_2d/depthconv2/config.sh rename to tests/scripts/models/config/depthwise_conv_2d/depthconv2/config.sh diff --git a/tests/scripts/framework/tests/depthwise_conv_2d_no_fuse/config.sh b/tests/scripts/models/config/depthwise_conv_2d_no_fuse/config.sh similarity index 100% rename from tests/scripts/framework/tests/depthwise_conv_2d_no_fuse/config.sh rename to tests/scripts/models/config/depthwise_conv_2d_no_fuse/config.sh diff --git a/tests/scripts/framework/tests/div/broadcast/config.sh b/tests/scripts/models/config/div/broadcast/config.sh similarity index 100% rename from tests/scripts/framework/tests/div/broadcast/config.sh rename to tests/scripts/models/config/div/broadcast/config.sh diff --git a/tests/scripts/framework/tests/embedding_lookup/config.sh b/tests/scripts/models/config/embedding_lookup/config.sh similarity index 100% rename from tests/scripts/framework/tests/embedding_lookup/config.sh rename to tests/scripts/models/config/embedding_lookup/config.sh diff --git a/tests/scripts/framework/tests/equal/config.sh b/tests/scripts/models/config/equal/config.sh similarity index 100% rename from tests/scripts/framework/tests/equal/config.sh rename to tests/scripts/models/config/equal/config.sh diff --git a/tests/scripts/framework/tests/exp/config.sh b/tests/scripts/models/config/exp/config.sh similarity index 100% rename from tests/scripts/framework/tests/exp/config.sh rename to tests/scripts/models/config/exp/config.sh diff --git a/tests/scripts/framework/tests/floor/floor1/config.sh b/tests/scripts/models/config/floor/floor1/config.sh similarity index 100% rename from tests/scripts/framework/tests/floor/floor1/config.sh rename to tests/scripts/models/config/floor/floor1/config.sh diff --git a/tests/scripts/framework/tests/floor/floor2/config.sh b/tests/scripts/models/config/floor/floor2/config.sh similarity index 100% rename from tests/scripts/framework/tests/floor/floor2/config.sh rename to tests/scripts/models/config/floor/floor2/config.sh diff --git a/tests/scripts/framework/tests/fullyconnected/fc1/config.sh b/tests/scripts/models/config/fullyconnected/fc1/config.sh similarity index 100% rename from tests/scripts/framework/tests/fullyconnected/fc1/config.sh rename to tests/scripts/models/config/fullyconnected/fc1/config.sh diff --git a/tests/scripts/framework/tests/fullyconnected/hybrid/config.sh b/tests/scripts/models/config/fullyconnected/hybrid/config.sh similarity index 100% rename from tests/scripts/framework/tests/fullyconnected/hybrid/config.sh rename to tests/scripts/models/config/fullyconnected/hybrid/config.sh diff --git a/tests/scripts/framework/tests/fullyconnected/matmul2x2/config.sh b/tests/scripts/models/config/fullyconnected/matmul2x2/config.sh similarity index 100% rename from tests/scripts/framework/tests/fullyconnected/matmul2x2/config.sh rename to tests/scripts/models/config/fullyconnected/matmul2x2/config.sh diff --git a/tests/scripts/framework/tests/fullyconnected/weights_as_input/config.sh b/tests/scripts/models/config/fullyconnected/weights_as_input/config.sh similarity index 100% rename from tests/scripts/framework/tests/fullyconnected/weights_as_input/config.sh rename to tests/scripts/models/config/fullyconnected/weights_as_input/config.sh diff --git a/tests/scripts/framework/tests/gather/config.sh b/tests/scripts/models/config/gather/config.sh similarity index 100% rename from tests/scripts/framework/tests/gather/config.sh rename to tests/scripts/models/config/gather/config.sh diff --git a/tests/scripts/framework/tests/greater/config.sh b/tests/scripts/models/config/greater/config.sh similarity index 100% rename from tests/scripts/framework/tests/greater/config.sh rename to tests/scripts/models/config/greater/config.sh diff --git a/tests/scripts/framework/tests/greater_equal/config.sh b/tests/scripts/models/config/greater_equal/config.sh similarity index 100% rename from tests/scripts/framework/tests/greater_equal/config.sh rename to tests/scripts/models/config/greater_equal/config.sh diff --git a/tests/scripts/framework/tests/hashtable_lookup/config.sh b/tests/scripts/models/config/hashtable_lookup/config.sh similarity index 100% rename from tests/scripts/framework/tests/hashtable_lookup/config.sh rename to tests/scripts/models/config/hashtable_lookup/config.sh diff --git a/tests/scripts/framework/tests/l2_normalization/config.sh b/tests/scripts/models/config/l2_normalization/config.sh similarity index 100% rename from tests/scripts/framework/tests/l2_normalization/config.sh rename to tests/scripts/models/config/l2_normalization/config.sh diff --git a/tests/scripts/framework/tests/l2_pool_2d/config.sh b/tests/scripts/models/config/l2_pool_2d/config.sh similarity index 100% rename from tests/scripts/framework/tests/l2_pool_2d/config.sh rename to tests/scripts/models/config/l2_pool_2d/config.sh diff --git a/tests/scripts/framework/tests/less/config.sh b/tests/scripts/models/config/less/config.sh similarity index 100% rename from tests/scripts/framework/tests/less/config.sh rename to tests/scripts/models/config/less/config.sh diff --git a/tests/scripts/framework/tests/less_equal/config.sh b/tests/scripts/models/config/less_equal/config.sh similarity index 100% rename from tests/scripts/framework/tests/less_equal/config.sh rename to tests/scripts/models/config/less_equal/config.sh diff --git a/tests/scripts/framework/tests/logistic/config.sh b/tests/scripts/models/config/logistic/config.sh similarity index 100% rename from tests/scripts/framework/tests/logistic/config.sh rename to tests/scripts/models/config/logistic/config.sh diff --git a/tests/scripts/framework/tests/max/config.sh b/tests/scripts/models/config/max/config.sh similarity index 100% rename from tests/scripts/framework/tests/max/config.sh rename to tests/scripts/models/config/max/config.sh diff --git a/tests/scripts/framework/tests/max_pool_2d/maxpool1/config.sh b/tests/scripts/models/config/max_pool_2d/maxpool1/config.sh similarity index 100% rename from tests/scripts/framework/tests/max_pool_2d/maxpool1/config.sh rename to tests/scripts/models/config/max_pool_2d/maxpool1/config.sh diff --git a/tests/scripts/framework/tests/max_pool_2d/maxpool2/config.sh b/tests/scripts/models/config/max_pool_2d/maxpool2/config.sh similarity index 100% rename from tests/scripts/framework/tests/max_pool_2d/maxpool2/config.sh rename to tests/scripts/models/config/max_pool_2d/maxpool2/config.sh diff --git a/tests/scripts/framework/tests/mean/config.sh b/tests/scripts/models/config/mean/config.sh similarity index 100% rename from tests/scripts/framework/tests/mean/config.sh rename to tests/scripts/models/config/mean/config.sh diff --git a/tests/scripts/framework/tests/min/config.sh b/tests/scripts/models/config/min/config.sh similarity index 100% rename from tests/scripts/framework/tests/min/config.sh rename to tests/scripts/models/config/min/config.sh diff --git a/tests/scripts/framework/tests/mul/broadcast/config.sh b/tests/scripts/models/config/mul/broadcast/config.sh similarity index 100% rename from tests/scripts/framework/tests/mul/broadcast/config.sh rename to tests/scripts/models/config/mul/broadcast/config.sh diff --git a/tests/scripts/framework/tests/neg/config.sh b/tests/scripts/models/config/neg/config.sh similarity index 100% rename from tests/scripts/framework/tests/neg/config.sh rename to tests/scripts/models/config/neg/config.sh diff --git a/tests/scripts/framework/tests/not_equal/config.sh b/tests/scripts/models/config/not_equal/config.sh similarity index 100% rename from tests/scripts/framework/tests/not_equal/config.sh rename to tests/scripts/models/config/not_equal/config.sh diff --git a/tests/scripts/framework/tests/one_hot/config.sh b/tests/scripts/models/config/one_hot/config.sh similarity index 100% rename from tests/scripts/framework/tests/one_hot/config.sh rename to tests/scripts/models/config/one_hot/config.sh diff --git a/tests/scripts/framework/tests/pack/config.sh b/tests/scripts/models/config/pack/config.sh similarity index 100% rename from tests/scripts/framework/tests/pack/config.sh rename to tests/scripts/models/config/pack/config.sh diff --git a/tests/scripts/framework/tests/pad/4D_2D/config.sh b/tests/scripts/models/config/pad/4D_2D/config.sh similarity index 100% rename from tests/scripts/framework/tests/pad/4D_2D/config.sh rename to tests/scripts/models/config/pad/4D_2D/config.sh diff --git a/tests/scripts/framework/tests/pad/pad1/config.sh b/tests/scripts/models/config/pad/pad1/config.sh similarity index 100% rename from tests/scripts/framework/tests/pad/pad1/config.sh rename to tests/scripts/models/config/pad/pad1/config.sh diff --git a/tests/scripts/framework/tests/pad/pad2/config.sh b/tests/scripts/models/config/pad/pad2/config.sh similarity index 100% rename from tests/scripts/framework/tests/pad/pad2/config.sh rename to tests/scripts/models/config/pad/pad2/config.sh diff --git a/tests/scripts/framework/tests/reduce_max/config.sh b/tests/scripts/models/config/reduce_max/config.sh similarity index 100% rename from tests/scripts/framework/tests/reduce_max/config.sh rename to tests/scripts/models/config/reduce_max/config.sh diff --git a/tests/scripts/framework/tests/reduce_mean/test1/config.sh b/tests/scripts/models/config/reduce_mean/test1/config.sh similarity index 100% rename from tests/scripts/framework/tests/reduce_mean/test1/config.sh rename to tests/scripts/models/config/reduce_mean/test1/config.sh diff --git a/tests/scripts/framework/tests/reduce_mean/test2/config.sh b/tests/scripts/models/config/reduce_mean/test2/config.sh similarity index 100% rename from tests/scripts/framework/tests/reduce_mean/test2/config.sh rename to tests/scripts/models/config/reduce_mean/test2/config.sh diff --git a/tests/scripts/framework/tests/reduce_sum/config.sh b/tests/scripts/models/config/reduce_sum/float/config.sh similarity index 100% rename from tests/scripts/framework/tests/reduce_sum/config.sh rename to tests/scripts/models/config/reduce_sum/float/config.sh diff --git a/tests/scripts/models/config/reduce_sum/uint8/config.sh b/tests/scripts/models/config/reduce_sum/uint8/config.sh new file mode 100755 index 0000000..d7d9f73 --- /dev/null +++ b/tests/scripts/models/config/reduce_sum/uint8/config.sh @@ -0,0 +1 @@ +MODELFILE_NAME="reduce_sum_uint8.tflite" diff --git a/tests/scripts/framework/tests/relu/config.sh b/tests/scripts/models/config/relu/config.sh similarity index 100% rename from tests/scripts/framework/tests/relu/config.sh rename to tests/scripts/models/config/relu/config.sh diff --git a/tests/scripts/framework/tests/relu6/config.sh b/tests/scripts/models/config/relu6/config.sh similarity index 100% rename from tests/scripts/framework/tests/relu6/config.sh rename to tests/scripts/models/config/relu6/config.sh diff --git a/tests/scripts/framework/tests/reshape/3D/config.sh b/tests/scripts/models/config/reshape/3D/config.sh similarity index 100% rename from tests/scripts/framework/tests/reshape/3D/config.sh rename to tests/scripts/models/config/reshape/3D/config.sh diff --git a/tests/scripts/framework/tests/reshape/reshape1/config.sh b/tests/scripts/models/config/reshape/reshape1/config.sh similarity index 100% rename from tests/scripts/framework/tests/reshape/reshape1/config.sh rename to tests/scripts/models/config/reshape/reshape1/config.sh diff --git a/tests/scripts/framework/tests/reshape/reshape2/config.sh b/tests/scripts/models/config/reshape/reshape2/config.sh similarity index 100% rename from tests/scripts/framework/tests/reshape/reshape2/config.sh rename to tests/scripts/models/config/reshape/reshape2/config.sh diff --git a/tests/scripts/framework/tests/resize_bilinear/config.sh b/tests/scripts/models/config/resize_bilinear/config.sh similarity index 100% rename from tests/scripts/framework/tests/resize_bilinear/config.sh rename to tests/scripts/models/config/resize_bilinear/config.sh diff --git a/tests/scripts/framework/tests/rnn/config.sh b/tests/scripts/models/config/rnn/config.sh similarity index 100% rename from tests/scripts/framework/tests/rnn/config.sh rename to tests/scripts/models/config/rnn/config.sh diff --git a/tests/scripts/framework/tests/rsqrt/config.sh b/tests/scripts/models/config/rsqrt/config.sh similarity index 100% rename from tests/scripts/framework/tests/rsqrt/config.sh rename to tests/scripts/models/config/rsqrt/config.sh diff --git a/tests/scripts/framework/tests/select/config.sh b/tests/scripts/models/config/select/config.sh similarity index 100% rename from tests/scripts/framework/tests/select/config.sh rename to tests/scripts/models/config/select/config.sh diff --git a/tests/scripts/framework/tests/shape/config.sh b/tests/scripts/models/config/shape/config.sh similarity index 100% rename from tests/scripts/framework/tests/shape/config.sh rename to tests/scripts/models/config/shape/config.sh diff --git a/tests/scripts/framework/tests/sin/config.sh b/tests/scripts/models/config/sin/config.sh similarity index 100% rename from tests/scripts/framework/tests/sin/config.sh rename to tests/scripts/models/config/sin/config.sh diff --git a/tests/scripts/framework/tests/slice/config.sh b/tests/scripts/models/config/slice/config.sh similarity index 100% rename from tests/scripts/framework/tests/slice/config.sh rename to tests/scripts/models/config/slice/config.sh diff --git a/tests/scripts/framework/tests/softmax/config.sh b/tests/scripts/models/config/softmax/config.sh similarity index 100% rename from tests/scripts/framework/tests/softmax/config.sh rename to tests/scripts/models/config/softmax/config.sh diff --git a/tests/scripts/framework/tests/space_to_batch_nd2/config.sh b/tests/scripts/models/config/space_to_batch_nd2/config.sh similarity index 100% rename from tests/scripts/framework/tests/space_to_batch_nd2/config.sh rename to tests/scripts/models/config/space_to_batch_nd2/config.sh diff --git a/tests/scripts/framework/tests/space_to_depth/config.sh b/tests/scripts/models/config/space_to_depth/config.sh similarity index 100% rename from tests/scripts/framework/tests/space_to_depth/config.sh rename to tests/scripts/models/config/space_to_depth/config.sh diff --git a/tests/scripts/framework/tests/sqrt/config.sh b/tests/scripts/models/config/sqrt/config.sh similarity index 100% rename from tests/scripts/framework/tests/sqrt/config.sh rename to tests/scripts/models/config/sqrt/config.sh diff --git a/tests/scripts/framework/tests/squeeze/config.sh b/tests/scripts/models/config/squeeze/config.sh similarity index 100% rename from tests/scripts/framework/tests/squeeze/config.sh rename to tests/scripts/models/config/squeeze/config.sh diff --git a/tests/scripts/framework/tests/strided_slice/config.sh b/tests/scripts/models/config/strided_slice/config.sh similarity index 100% rename from tests/scripts/framework/tests/strided_slice/config.sh rename to tests/scripts/models/config/strided_slice/config.sh diff --git a/tests/scripts/framework/tests/sub/broadcast/config.sh b/tests/scripts/models/config/sub/broadcast/config.sh similarity index 100% rename from tests/scripts/framework/tests/sub/broadcast/config.sh rename to tests/scripts/models/config/sub/broadcast/config.sh diff --git a/tests/scripts/framework/tests/tanh/config.sh b/tests/scripts/models/config/tanh/config.sh similarity index 100% rename from tests/scripts/framework/tests/tanh/config.sh rename to tests/scripts/models/config/tanh/config.sh diff --git a/tests/scripts/framework/tests/tile/config.sh b/tests/scripts/models/config/tile/config.sh similarity index 100% rename from tests/scripts/framework/tests/tile/config.sh rename to tests/scripts/models/config/tile/config.sh diff --git a/tests/scripts/framework/tests/topk_v2/config.sh b/tests/scripts/models/config/topk_v2/config.sh similarity index 100% rename from tests/scripts/framework/tests/topk_v2/config.sh rename to tests/scripts/models/config/topk_v2/config.sh diff --git a/tests/scripts/framework/tests/transpose/config.sh b/tests/scripts/models/config/transpose/config.sh similarity index 100% rename from tests/scripts/framework/tests/transpose/config.sh rename to tests/scripts/models/config/transpose/config.sh diff --git a/tests/scripts/framework/tests/transpose_conv/same/config.sh b/tests/scripts/models/config/transpose_conv/same/config.sh similarity index 100% rename from tests/scripts/framework/tests/transpose_conv/same/config.sh rename to tests/scripts/models/config/transpose_conv/same/config.sh diff --git a/tests/scripts/framework/tests/transpose_conv/valid/config.sh b/tests/scripts/models/config/transpose_conv/valid/config.sh similarity index 100% rename from tests/scripts/framework/tests/transpose_conv/valid/config.sh rename to tests/scripts/models/config/transpose_conv/valid/config.sh diff --git a/tests/scripts/framework/tests/zeros_like/config.sh b/tests/scripts/models/config/zeros_like/config.sh similarity index 100% rename from tests/scripts/framework/tests/zeros_like/config.sh rename to tests/scripts/models/config/zeros_like/config.sh diff --git a/tests/scripts/framework/run_test.sh b/tests/scripts/models/run_test.sh similarity index 81% rename from tests/scripts/framework/run_test.sh rename to tests/scripts/models/run_test.sh index 44b7149..0aa363f 100755 --- a/tests/scripts/framework/run_test.sh +++ b/tests/scripts/models/run_test.sh @@ -18,20 +18,28 @@ MY_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" NNFW_HOME="$(dirname $(dirname $(dirname ${MY_PATH})))" CACHE_ROOT_PATH=$MY_PATH/"cache" -TEST_ROOT_PATH=$MY_PATH/"tests" +TEST_ROOT_PATH=$MY_PATH/"config" REPORT_DIR="report" RUN_DISABLED="true" +function command_exists() { + command -v "$@" > /dev/null 2>&1 +} + function Usage() { echo "Usage: ./$0 --driverbin={such as tflite_run} {tests to test or empty for all of tests}" echo "Usage: ./$0 --driverbin=Product/out/bin/tflite_run --reportdir=report --tapname=verification.tap avgpool1 avgpool2" echo "" - echo "--download - (default=off) Download model files. Other options is ignored" - echo "--driverbin - (default=../../Product/out/bin/tflite_run) runner for runnning framework tests" - echo "--reportdir - (default=report) directory to place tap files" - echo "--tapname - (default=framework_test.tap) file name to be written for tap" + echo "--download - (default=on) Download model files" + echo "--run - (default=on) Test model files" + echo "--driverbin - (default=../../Product/out/bin/tflite_run) Runner for runnning model tests" + echo "--reportdir - (default=report) Directory to place tap files" + echo "--tapname - (default=framework_test.tap) File name to be written for tap" + echo "--md5 - (default=on) MD5 check when download model files" + echo "--configdir - (default=$TEST_ROOT_PATH) Config directory to download and test model" + echo "--cachedir - (default=$CACHE_ROOT_PATH) Directory to download model" echo "" } @@ -43,9 +51,13 @@ function need_download() return 0; fi # Ignore checking md5 in cache + # TODO Use "--md5" option only and remove IGNORE_MD5 environment variable if [ ! -z $IGNORE_MD5 ] && [ "$IGNORE_MD5" == "1" ]; then return 1 fi + if [ "$MD5_CHECK" = "off" ]; then + return 1 + fi LOCAL_HASH=$(md5sum $LOCAL_PATH | awk '{ print $1 }') REMOTE_HASH=$(curl -ss $REMOTE_URL | md5sum | awk '{ print $1 }') @@ -60,7 +72,9 @@ function need_download() DRIVER_BIN="" TAP_NAME="framework_test.tap" TEST_LIST=() -DOWNLOAD_MODE="off" +DOWNLOAD_MODEL="on" +RUN_TEST="on" +MD5_CHECK="on" # Support environment variable setting for mirror server FIXED_MODELFILE_SERVER="${MODELFILE_SERVER:-}" @@ -84,6 +98,18 @@ do --download=*) DOWNLOAD_MODE=${i#*=} ;; + --md5=*) + MD5_CHECK=${i#*=} + ;; + --run=*) + RUN_TEST=${i#*=} + ;; + --configdir=*) + TEST_ROOT_PATH=${i#*=} + ;; + --cachedir=*) + CACHE_ROOT_PATH=${i#*=} + ;; *) TEST_LIST+=( $i ) ;; @@ -99,8 +125,13 @@ if [ ! -n "$DRIVER_BIN" ]; then DRIVER_BIN="$NNFW_HOME/Product/out/bin/tflite_run" fi +if [ ! -d "$TEST_ROOT_PATH" ]; then + echo "Cannot find config directory for test: please set proper configdir" + exit 1 +fi + # Check test driver setting -if [ ! -e $DRIVER_BIN ] && [ "$DOWNLOAD_MODE" != "on" ]; then +if [ ! command_exists $DRIVER_BIN ] && [ "$RUN_TEST" = "on" ]; then echo "Cannot find test driver" $DRIVER_BIN ": please set proper DRIVER_BIN" exit 1 fi @@ -139,33 +170,9 @@ run_tests() TEST_CACHE_PATH=$CACHE_ROOT_PATH/$TEST_NAME MODELFILE=$TEST_CACHE_PATH/$MODELFILE_NAME - MODELFILE_URL="$MODELFILE_SERVER_PATH/$MODELFILE_NAME" - if [ -n "$FIXED_MODELFILE_SERVER" ]; then - MODELFILE_URL="$FIXED_MODELFILE_SERVER/$MODELFILE_NAME" - fi - - # Download model file - if [ ! -e $TEST_CACHE_PATH ]; then - mkdir -p $TEST_CACHE_PATH - fi - - # Download unless we have it in cache (Also check md5sum) - if need_download "$MODELFILE" "$MODELFILE_URL"; then - echo "" - echo "Download test file for $TEST_NAME" - echo "======================" - - rm -f $MODELFILE # Remove invalid file if exists - pushd $TEST_CACHE_PATH - wget -nv $MODELFILE_URL - if [ "${MODELFILE_NAME##*.}" == "zip" ]; then - unzip -o $MODELFILE_NAME - fi - popd - fi # Find model file for downloaded by zip - if [ "${MODELFILE_NAME##*.}" == "zip" ]; then + if [ "${MODELFILE_NAME##*.}" = "zip" ]; then pushd $TEST_CACHE_PATH MODELFILE=$TEST_CACHE_PATH/$(ls *.tflite) popd @@ -178,7 +185,6 @@ run_tests() # Run driver to test framework $DRIVER_BIN $MODELFILE - #$DRIVER_BIN $MODELFILE if [[ $? -eq 0 ]]; then echo "ok $i - $TEST_NAME" >> $REPORT_DIR/$TAP_NAME else @@ -268,10 +274,11 @@ find_tests() mkdir -p $REPORT_DIR TESTS_TO_RUN=$(find_tests ${TEST_LIST[@]}) -if [[ "$DOWNLOAD_MODE" == "on" ]]; then +if [ "$DOWNLOAD_MODEL" = "on" ]; then download_tests $TESTS_TO_RUN - exit 0; fi -run_tests $TESTS_TO_RUN +if [ "$RUN_TEST" = "on" ]; then + run_tests $TESTS_TO_RUN +fi exit $? diff --git a/tests/scripts/onert-test b/tests/scripts/onert-test new file mode 100644 index 0000000..99c107c --- /dev/null +++ b/tests/scripts/onert-test @@ -0,0 +1,49 @@ +#!/bin/bash + +# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[[ "${BASH_SOURCE[0]}" != "${0}" ]] && echo "Please don't source ${BASH_SOURCE[0]}, execute it" && return + +DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +INSTALL_PATH="$(dirname $DRIVER_PATH)" +COMMAND_PATH=$INSTALL_PATH/test/command +BIN_PATH=$INSTALL_PATH/bin + +export PATH=$BIN_PATH:$PATH + +function Usage() +{ + echo "Usage: $0 [COMMAND] ..." + echo "Command:" + for file in $COMMAND_PATH/*; + do + echo " $(basename "$file")" + done + exit 255 +} + +COMMAND=$1; shift +if [[ -z $COMMAND ]] || [[ $COMMAND == "--help" ]]; then + Usage + exit 255 +fi + +COMMAND_FILE=$COMMAND_PATH/$COMMAND +if [[ ! -f $COMMAND_FILE ]]; then + echo "ERROR: '$COMMAND' is not supported" + exit 255 +fi + +source $COMMAND_FILE $@ diff --git a/tests/scripts/test-driver.sh b/tests/scripts/test-driver.sh index 615fc2c..aa97d95 100755 --- a/tests/scripts/test-driver.sh +++ b/tests/scripts/test-driver.sh @@ -27,7 +27,6 @@ function Usage() echo "--artifactpath - (default={test-driver.sh's path}/../../) it should contain tests/ and Product/" echo "" echo "Following options are needed when you want to tests of specific types. If you don't pass any one, unittest and verification will be run" - echo "--unittest - (default=on) run unit test" echo "--frameworktest - (default=off) run framework test" echo "--verification - (default=on) run verification" echo "--frameworktest_list_file - filepath of model list for test" @@ -38,8 +37,6 @@ function Usage() echo "etc." echo "--framework_driverbin - (default=../../Product/out/bin/tflite_run) runner for runnning framework tests" echo "--verification_driverbin - (default=../../Product/out/bin/nnapi_test) runner for runnning verification tests" - echo "--runtestsh - (default=\$ARTIFACT_PATH/tests/scripts/framework/run_test.sh) run_test.sh with path where it is for framework test and verification" - echo "--unittestdir - (default=\$ARTIFACT_PATH/Product/out/unittest) directory that has unittest binaries for unit test" echo "" echo "--reportdir - (default=\$ARTIFACT_PATH/report) directory to save report" echo "" @@ -49,10 +46,7 @@ TEST_DRIVER_DIR="$( cd "$( dirname "${BASH_SOURCE}" )" && pwd )" ARTIFACT_PATH="$TEST_DRIVER_DIR/../../" FRAMEWORK_DRIVER_BIN="" VERIFICATION_DRIVER_BIN="" -RUN_TEST_SH="" -UNIT_TEST_DIR="" ALLTEST_ON="true" -UNITTEST_ON="false" FRAMEWORKTEST_ON="false" VERIFICATION_ON="false" BENCHMARK_ONERT_OP_ON="false" @@ -74,16 +68,6 @@ do --verification_driverbin=*) VERIFICATION_DRIVER_BIN=${i#*=} ;; - --runtestsh=*) - RUN_TEST_SH=${i#*=} - ;; - --unittestdir=*) - UNIT_TEST_DIR=${i#*=} - ;; - --unittest) - ALLTEST_ON="false" - UNITTEST_ON="true" - ;; --frameworktest) ALLTEST_ON="false" FRAMEWORKTEST_ON="true" @@ -116,15 +100,6 @@ done ARTIFACT_PATH="$(readlink -f $ARTIFACT_PATH)" -if [ -z "$RUN_TEST_SH" ]; then - RUN_TEST_SH=$ARTIFACT_PATH/tests/scripts/framework/run_test.sh -fi - -if [ ! -e "$RUN_TEST_SH" ]; then - echo "Cannot find $RUN_TEST_SH" - exit 1 -fi - if [ -z "$UNIT_TEST_DIR" ]; then UNIT_TEST_DIR=$ARTIFACT_PATH/Product/out/unittest fi @@ -135,13 +110,6 @@ fi source $TEST_DRIVER_DIR/common.sh -# Run unittest in each part such as Runtime -if [ "$ALLTEST_ON" == "true" ] || [ "$UNITTEST_ON" == "true" ]; then - $TEST_DRIVER_DIR/unittest.sh \ - --reportdir=$REPORT_DIR \ - --unittestdir=$UNIT_TEST_DIR -fi - # Run tflite_run with various tflite models if [ "$FRAMEWORKTEST_ON" == "true" ]; then if [ -z "$FRAMEWORK_DRIVER_BIN" ]; then @@ -149,7 +117,6 @@ if [ "$FRAMEWORKTEST_ON" == "true" ]; then fi $TEST_DRIVER_DIR/test_framework.sh \ - --runtestsh=$RUN_TEST_SH \ --driverbin=$FRAMEWORK_DRIVER_BIN \ --reportdir=$REPORT_DIR \ --tapname=framework_test.tap \ @@ -166,7 +133,6 @@ if [ "$ALLTEST_ON" == "true" ] || [ "$VERIFICATION_ON" == "true" ]; then # verification uses the same script as frameworktest does $TEST_DRIVER_DIR/test_framework.sh \ - --runtestsh=$RUN_TEST_SH \ --driverbin=$VERIFICATION_DRIVER_BIN \ --reportdir=$REPORT_DIR \ --tapname=verification_test.tap \ @@ -180,10 +146,9 @@ if [ "$BENCHMARK_ONERT_OP_ON" == "true" ]; then $TEST_DRIVER_DIR/benchmark_nnapi.sh \ --test_op \ - --runtestsh=$RUN_TEST_SH \ --driverbin=$DRIVER_BIN \ --reportdir=$REPORT_DIR/benchmark_op \ - --modelfilepath=$ARTIFACT_PATH/tests/scripts/framework + --modelfilepath=$ARTIFACT_PATH/tests/scripts/models fi # Make json file. Actually, this process is only needed on CI. That's why it is in test-driver.sh. diff --git a/tests/scripts/test_framework.sh b/tests/scripts/test_framework.sh index 1d97515..6bf9c89 100755 --- a/tests/scripts/test_framework.sh +++ b/tests/scripts/test_framework.sh @@ -14,7 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -FWTEST_RUN_TEST_SH= +MY_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + FWTEST_DRIVER_BIN= FWTEST_REPORT_DIR= FWTEST_TAP_NAME= @@ -25,7 +26,6 @@ function Usage() { echo "Usage Example:" echo "./$0 \\" - echo " --runtestsh=tests/scripts/framework/run_test.sh \\ # Test runner script path" echo " --driverbin=Product/out/bin/tflite_run \\ # Test driver path" echo " --frameworktest_list_file=tests/scripts/list/frameworktest_list.armv7l.cpu.txt \\" echo " --reportdir=report \\ # Directory for the report files will be saved" @@ -42,9 +42,6 @@ do -h|--help|help) Usage ;; - --runtestsh=*) - FWTEST_RUN_TEST_SH=${i#*=} - ;; --driverbin=*) FWTEST_DRIVER_BIN=${i#*=} ;; @@ -67,7 +64,6 @@ do shift done -[ ! -z "$FWTEST_RUN_TEST_SH" ] || Usage [ ! -z "$FWTEST_DRIVER_BIN" ] || Usage [ ! -z "$FWTEST_REPORT_DIR" ] || Usage [ ! -z "$FWTEST_TAP_NAME" ] || Usage @@ -86,7 +82,7 @@ if [ ! -z "$FRAMEWORKTEST_LIST_FILE" ]; then MODELLIST=$(cat "${FRAMEWORKTEST_LIST_FILE}") fi -$FWTEST_RUN_TEST_SH --driverbin=$FWTEST_DRIVER_BIN \ +$MY_PATH/models/run_test.sh --driverbin=$FWTEST_DRIVER_BIN \ --reportdir=$FWTEST_REPORT_DIR \ --tapname=$FWTEST_TAP_NAME \ ${MODELLIST:-} \ diff --git a/tests/scripts/test_scheduler_with_profiling.sh b/tests/scripts/test_scheduler_with_profiling.sh index 8b2f2d2..5c24572 100755 --- a/tests/scripts/test_scheduler_with_profiling.sh +++ b/tests/scripts/test_scheduler_with_profiling.sh @@ -11,7 +11,7 @@ TEST_DRIVER_DIR="$( cd "$( dirname "${BASH_SOURCE}" )" && pwd )" ARTIFACT_PATH="$TEST_DRIVER_DIR/../.." BENCHMARK_DRIVER_BIN=$ARTIFACT_PATH/Product/out/bin/tflite_run REPORT_DIR=$ARTIFACT_PATH/report -RUN_TEST_SH=$ARTIFACT_PATH/tests/scripts/framework/run_test.sh +RUN_TEST_SH=$ARTIFACT_PATH/tests/scripts/models/run_test.sh BENCHMARK_MODEL_LIST="MODELS/inception_nonslim MODELS/inception_slim MODELS/mobilenet" if [ ! -e "$RUN_TEST_SH" ]; then diff --git a/tests/tools/nnpackage_run/CMakeLists.txt b/tests/tools/nnpackage_run/CMakeLists.txt index 0e333a0..ec45db4 100644 --- a/tests/tools/nnpackage_run/CMakeLists.txt +++ b/tests/tools/nnpackage_run/CMakeLists.txt @@ -33,7 +33,7 @@ target_include_directories(nnpackage_run PRIVATE src) target_include_directories(nnpackage_run PRIVATE ${Boost_INCLUDE_DIRS}) target_link_libraries(nnpackage_run onert_core onert tflite_loader) -target_link_libraries(nnpackage_run tensorflow-lite ${LIB_PTHREAD} dl nnfw_lib_tflite jsoncpp) +target_link_libraries(nnpackage_run nnfw_lib_tflite jsoncpp) target_link_libraries(nnpackage_run nnfw-dev) target_link_libraries(nnpackage_run ${Boost_PROGRAM_OPTIONS_LIBRARY}) target_link_libraries(nnpackage_run nnfw_lib_benchmark) diff --git a/tests/tools/nnpackage_run/src/args.cc b/tests/tools/nnpackage_run/src/args.cc index 0dbcafc..cb4a7db 100644 --- a/tests/tools/nnpackage_run/src/args.cc +++ b/tests/tools/nnpackage_run/src/args.cc @@ -16,6 +16,7 @@ #include "args.h" +#include #include #include @@ -105,6 +106,75 @@ Args::Args(const int argc, char **argv) void Args::Initialize(void) { + auto process_nnpackage = [&](const std::string &package_filename) { + _package_filename = package_filename; + + std::cerr << "Package Filename " << _package_filename << std::endl; + if (_package_filename.empty()) + { + // TODO Print usage instead of the below message + std::cerr << "Please specify nnpackage file. Run with `--help` for usage." + << "\n"; + + exit(1); + } + else + { + if (access(_package_filename.c_str(), F_OK) == -1) + { + std::cerr << "nnpackage not found: " << _package_filename << "\n"; + } + } + }; + + auto process_output_sizes = [&](const std::string &output_sizes_json_str) { + Json::Value root; + Json::Reader reader; + if (!reader.parse(output_sizes_json_str, root, false)) + { + std::cerr << "Invalid JSON format for output_sizes \"" << output_sizes_json_str << "\"\n"; + exit(1); + } + + auto arg_map = argArrayToMap(root); + for (auto &pair : arg_map) + { + uint32_t key = pair.first; + Json::Value &val_json = pair.second; + if (!val_json.isUInt()) + { + std::cerr << "All the values in `output_sizes` must be unsigned integers\n"; + exit(1); + } + uint32_t val = val_json.asUInt(); + _output_sizes[key] = val; + } + }; + + auto process_shape_prepare = [&](const std::string &shape_str) { + try + { + handleShapeParam(_shape_prepare, shape_str); + } + catch (const std::exception &e) + { + std::cerr << "error with '--shape_prepare' option: " << shape_str << std::endl; + exit(1); + } + }; + + auto process_shape_run = [&](const std::string &shape_str) { + try + { + handleShapeParam(_shape_run, shape_str); + } + catch (const std::exception &e) + { + std::cerr << "error with '--shape_run' option: " << shape_str << std::endl; + exit(1); + } + }; + // General options po::options_description general("General options", 100); @@ -112,32 +182,33 @@ void Args::Initialize(void) general.add_options() ("help,h", "Print available options") ("version", "Print version and exit immediately") - ("nnpackage", po::value()->required()) + ("nnpackage", po::value()->required()->notifier(process_nnpackage)) #if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1 - ("dump,d", po::value()->default_value(""), "Output filename") - ("load,l", po::value()->default_value(""), "Input filename") + ("dump,d", po::value()->default_value("")->notifier([&](const auto &v) { _dump_filename = v; }), "Output filename") + ("load,l", po::value()->default_value("")->notifier([&](const auto &v) { _load_filename = v; }), "Input filename") #endif - ("output_sizes", po::value(), + ("output_sizes", po::value()->notifier(process_output_sizes), "The output buffer size in JSON 1D array\n" "If not given, the model's output sizes are used\n" "e.g. '[0, 40, 2, 80]' to set 0th tensor to 40 and 2nd tensor to 80.\n") - ("num_runs,r", po::value()->default_value(1), "The number of runs") - ("warmup_runs,w", po::value()->default_value(0), "The number of warmup runs") - ("run_delay,t", po::value()->default_value(-1), "Delay time(ms) between runs (as default no delay") - ("gpumem_poll,g", po::value()->default_value(false), "Check gpu memory polling separately") - ("mem_poll,m", po::value()->default_value(false), "Check memory polling") - ("write_report,p", po::value()->default_value(false), + ("num_runs,r", po::value()->default_value(1)->notifier([&](const auto &v) { _num_runs = v; }), "The number of runs") + ("warmup_runs,w", po::value()->default_value(0)->notifier([&](const auto &v) { _warmup_runs = v; }), "The number of warmup runs") + ("run_delay,t", po::value()->default_value(-1)->notifier([&](const auto &v) { _run_delay = v; }), "Delay time(ms) between runs (as default no delay") + ("gpumem_poll,g", po::value()->default_value(false)->notifier([&](const auto &v) { _gpumem_poll = v; }), "Check gpu memory polling separately") + ("mem_poll,m", po::value()->default_value(false)->notifier([&](const auto &v) { _mem_poll = v; }), "Check memory polling") + ("write_report,p", po::value()->default_value(false)->notifier([&](const auto &v) { _write_report = v; }), "Write report\n" "{exec}-{nnpkg}-{backend}.csv will be generated.\n" "e.g. nnpackage_run-UNIT_Add_000-acl_cl.csv.\n" "{nnpkg} name may be changed to realpath if you use symbolic-link.") - ("shape_prepare", po::value()->default_value("[]"), + ("shape_prepare", po::value()->default_value("[]")->notifier(process_shape_prepare), "set shape of specified tensor before compilation\n" "e.g. '[0, [1, 2], 2, []]' to set 0th tensor to [1, 2] and 2nd tensor to [].\n") - ("shape_run", po::value()->default_value("[]"), + ("shape_run", po::value()->default_value("[]")->notifier(process_shape_run), "set shape of specified tensor right before running\n" "e.g. '[1, [1, 2]]` to set 1st tensor to [1, 2].\n") - ("verbose_level,v", po::value()->default_value(0), "Verbose level\n" + ("verbose_level,v", po::value()->default_value(0)->notifier([&](const auto &v) { _verbose_level = v; }), + "Verbose level\n" "0: prints the only result. Messages btw run don't print\n" "1: prints result and message btw run\n" "2: prints all of messages to print\n") @@ -180,158 +251,23 @@ void Args::Parse(const int argc, char **argv) return; } - po::notify(vm); try { -#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1 - if (vm.count("dump")) - { - _dump_filename = vm["dump"].as(); - } - - if (vm.count("load")) - { - _load_filename = vm["load"].as(); - } -#endif - - if (vm.count("nnpackage")) - { - _package_filename = vm["nnpackage"].as(); - - if (_package_filename.empty()) - { - // TODO Print usage instead of the below message - std::cerr << "Please specify nnpackage file. Run with `--help` for usage." - << "\n"; - - exit(1); - } - else - { - if (access(_package_filename.c_str(), F_OK) == -1) - { - std::cerr << "nnpackage not found: " << _package_filename << "\n"; - } - } - } - - if (vm.count("output_sizes")) - { - auto output_sizes_json_str = vm["output_sizes"].as(); - - Json::Value root; - Json::Reader reader; - if (!reader.parse(output_sizes_json_str, root, false)) - { - std::cerr << "Invalid JSON format for output_sizes \"" << output_sizes_json_str << "\"\n"; - exit(1); - } - - auto arg_map = argArrayToMap(root); - for (auto &pair : arg_map) - { - uint32_t key = pair.first; - Json::Value &val_json = pair.second; - if (!val_json.isUInt()) - { - std::cerr << "All the values in `output_sizes` must be unsigned integers\n"; - exit(1); - } - uint32_t val = val_json.asUInt(); - _output_sizes[key] = val; - } - } - - if (vm.count("num_runs")) - { - _num_runs = vm["num_runs"].as(); - } - - if (vm.count("warmup_runs")) - { - _warmup_runs = vm["warmup_runs"].as(); - } - - if (vm.count("run_delay")) - { - _run_delay = vm["run_delay"].as(); - } - - if (vm.count("gpumem_poll")) - { - _gpumem_poll = vm["gpumem_poll"].as(); - } - - if (vm.count("mem_poll")) - { - _mem_poll = vm["mem_poll"].as(); - // Instead of EXECUTE to avoid overhead, memory polling runs on WARMUP - if (_mem_poll && _warmup_runs == 0) - { - _warmup_runs = 1; - } - } - - if (vm.count("write_report")) - { - _write_report = vm["write_report"].as(); - } - - if (vm.count("verbose_level")) - { - _verbose_level = vm["verbose_level"].as(); - } + po::notify(vm); } catch (const std::bad_cast &e) { - std::cerr << "error by bad cast" << e.what() << '\n'; + std::cerr << "Bad cast error - " << e.what() << '\n'; exit(1); } - if (vm.count("shape_prepare")) - { - std::string shape_str; - try - { - shape_str = vm["shape_prepare"].as(); - } - catch (const std::bad_cast &e) - { - std::cerr << "error by bad cast with '--shape_prepare' option" << e.what() << '\n'; - exit(1); - } - try - { - handleShapeParam(_shape_prepare, shape_str); - } - catch (const std::exception &e) - { - std::cerr << "error with '--shape_prepare' option: " << shape_str << std::endl; - exit(1); - } - } - - if (vm.count("shape_run")) + // This must be run after `notify` as `_warm_up_runs` must have been processed before. + if (vm.count("mem_poll")) { - std::string shape_str; - try - { - shape_str = vm["shape_run"].as(); - } - catch (const std::bad_cast &e) + // Instead of EXECUTE to avoid overhead, memory polling runs on WARMUP + if (_mem_poll && _warmup_runs == 0) { - std::cerr << "error by bad cast with '--shape_run' option" << e.what() << '\n'; - exit(1); - } - try - { - handleShapeParam(_shape_run, shape_str); - } - catch (const std::exception &e) - { - std::cerr << "error with '--shape_run' option: " << shape_str << std::endl; - exit(1); + _warmup_runs = 1; } } } diff --git a/tests/tools/nnpackage_run/src/h5formatter.cc b/tests/tools/nnpackage_run/src/h5formatter.cc index 34c075c..09ace47 100644 --- a/tests/tools/nnpackage_run/src/h5formatter.cc +++ b/tests/tools/nnpackage_run/src/h5formatter.cc @@ -145,6 +145,7 @@ void H5Formatter::dumpOutputs(const std::string &filename, std::vector()->default_value(""), "Input filename") - ("dump,d", po::value()->default_value(""), "Output filename") - ("ishapes", po::value>()->multitoken(), "Input shapes") - ("compare,c", po::value()->default_value(""), "filename to be compared with") - ("tflite", po::value()->required()) - ("num_runs,r", po::value()->default_value(1), "The number of runs") - ("warmup_runs,w", po::value()->default_value(0), "The number of warmup runs") - ("run_delay,t", po::value()->default_value(-1), "Delay time(ms) between runs (as default no delay") - ("gpumem_poll,g", po::value()->default_value(false), "Check gpu memory polling separately") + ("input,i", po::value()->default_value("")->notifier(process_input), "Input filename") + ("dump,d", po::value()->default_value("")->notifier([&](const auto &v) { _dump_filename = v; }), "Output filename") + ("ishapes", po::value>()->multitoken()->notifier([&](const auto &v) { _input_shapes = v; }), "Input shapes") + ("compare,c", po::value()->default_value("")->notifier([&](const auto &v) { _compare_filename = v; }), "filename to be compared with") + ("tflite", po::value()->required()->notifier(process_tflite)) + ("num_runs,r", po::value()->default_value(1)->notifier([&](const auto &v) { _num_runs = v; }), "The number of runs") + ("warmup_runs,w", po::value()->default_value(0)->notifier([&](const auto &v) { _warmup_runs = v; }), "The number of warmup runs") + ("run_delay,t", po::value()->default_value(-1)->notifier([&](const auto &v) { _run_delay = v; }), "Delay time(ms) between runs (as default no delay)") + ("gpumem_poll,g", po::value()->default_value(false)->notifier([&](const auto &v) { _gpumem_poll = v; }), "Check gpu memory polling separately") ("mem_poll,m", po::value()->default_value(false), "Check memory polling") - ("write_report,p", po::value()->default_value(false), "Write report") - ("validate", po::value()->default_value(true), "Validate tflite model") - ("verbose_level,v", po::value()->default_value(0), "Verbose level\n" + ("write_report,p", po::value()->default_value(false)->notifier([&](const auto &v) { _write_report = v; }), "Write report") + ("validate", po::value()->default_value(true)->notifier([&](const auto &v) { _tflite_validate = v; }), "Validate tflite model") + ("verbose_level,v", po::value()->default_value(0)->notifier([&](const auto &v) { _verbose_level = v; }), "Verbose level\n" "0: prints the only result. Messages btw run don't print\n" "1: prints result and message btw run\n" "2: prints all of messages to print\n") @@ -105,80 +138,7 @@ void Args::Parse(const int argc, char **argv) po::notify(vm); - if (vm.count("dump")) - { - _dump_filename = vm["dump"].as(); - } - - if (vm.count("compare")) - { - _compare_filename = vm["compare"].as(); - } - - if (vm.count("input")) - { - _input_filename = vm["input"].as(); - - if (!_input_filename.empty()) - { - if (access(_input_filename.c_str(), F_OK) == -1) - { - std::cerr << "input image file not found: " << _input_filename << "\n"; - } - } - } - - if (vm.count("ishapes")) - { - _input_shapes.resize(vm["ishapes"].as>().size()); - for (auto i = 0; i < _input_shapes.size(); i++) - { - _input_shapes[i] = vm["ishapes"].as>()[i]; - } - } - - if (vm.count("tflite")) - { - _tflite_filename = vm["tflite"].as(); - - if (_tflite_filename.empty()) - { - // TODO Print usage instead of the below message - std::cerr << "Please specify tflite file. Run with `--help` for usage." - << "\n"; - - exit(1); - } - else - { - if (access(_tflite_filename.c_str(), F_OK) == -1) - { - std::cerr << "tflite file not found: " << _tflite_filename << "\n"; - exit(1); - } - } - } - - if (vm.count("num_runs")) - { - _num_runs = vm["num_runs"].as(); - } - - if (vm.count("warmup_runs")) - { - _warmup_runs = vm["warmup_runs"].as(); - } - - if (vm.count("run_delay")) - { - _run_delay = vm["run_delay"].as(); - } - - if (vm.count("gpumem_poll")) - { - _gpumem_poll = vm["gpumem_poll"].as(); - } - + // This must be run after `notify` as `_warm_up_runs` must have been processed before. if (vm.count("mem_poll")) { _mem_poll = vm["mem_poll"].as(); @@ -188,21 +148,6 @@ void Args::Parse(const int argc, char **argv) _warmup_runs = 1; } } - - if (vm.count("write_report")) - { - _write_report = vm["write_report"].as(); - } - - if (vm.count("validate")) - { - _tflite_validate = vm["validate"].as(); - } - - if (vm.count("verbose_level")) - { - _verbose_level = vm["verbose_level"].as(); - } } } // end of namespace TFLiteRun diff --git a/tests/tools/tflite_run_2_2_0/CMakeLists.txt b/tests/tools/tflite_run_2_2_0/CMakeLists.txt deleted file mode 100644 index a2c85c5..0000000 --- a/tests/tools/tflite_run_2_2_0/CMakeLists.txt +++ /dev/null @@ -1,23 +0,0 @@ -if(NOT BUILD_TFLITE_RUN_2_2_0) - return() -endif() - -if(NOT BUILD_TENSORFLOW_LITE_2_2_0) - set(BUILD_TENSORFLOW_LITE_2_2_0 ON) -endif() - -nnfw_find_package(TensorFlowLite-2.2.0 REQUIRED) -nnfw_find_package(Boost REQUIRED) - -list(APPEND TFLITE_RUN_SRCS "src/tflite_run_2_2_0.cc") -list(APPEND TFLITE_RUN_SRCS "src/args.cc") - -add_executable(tflite_run_2_2_0 ${TFLITE_RUN_SRCS}) -target_include_directories(tflite_run_2_2_0 PRIVATE src) -target_include_directories(tflite_run_2_2_0 PRIVATE ${Boost_INCLUDE_DIRS}) - -target_link_libraries(tflite_run_2_2_0 tensorflow-lite-2.2.0 ${LIB_PTHREAD} dl) -target_link_libraries(tflite_run_2_2_0 ${Boost_PROGRAM_OPTIONS_LIBRARY}) -target_link_libraries(tflite_run_2_2_0 nnfw_lib_benchmark nnfw_lib_misc) - -install(TARGETS tflite_run_2_2_0 DESTINATION bin) diff --git a/tests/tools/tflite_vanilla_run/CMakeLists.txt b/tests/tools/tflite_vanilla_run/CMakeLists.txt new file mode 100644 index 0000000..19e21e9 --- /dev/null +++ b/tests/tools/tflite_vanilla_run/CMakeLists.txt @@ -0,0 +1,23 @@ +if(NOT BUILD_TFLITE_VANILLA_RUN) + return() +endif() + +if(NOT BUILD_TENSORFLOW_LITE_2_3_0) + set(BUILD_TENSORFLOW_LITE_2_3_0 ON) +endif() + +nnfw_find_package(TensorFlowLite-2.3.0 REQUIRED) +nnfw_find_package(Boost REQUIRED) + +list(APPEND TFLITE_RUN_SRCS "src/tflite_vanilla_run.cc") +list(APPEND TFLITE_RUN_SRCS "src/args.cc") + +add_executable(tflite_vanilla_run ${TFLITE_RUN_SRCS}) +target_include_directories(tflite_vanilla_run PRIVATE src) +target_include_directories(tflite_vanilla_run PRIVATE ${Boost_INCLUDE_DIRS}) + +target_link_libraries(tflite_vanilla_run tensorflow-lite-2.3.0 ${LIB_PTHREAD} dl) +target_link_libraries(tflite_vanilla_run ${Boost_PROGRAM_OPTIONS_LIBRARY}) +target_link_libraries(tflite_vanilla_run nnfw_lib_benchmark nnfw_lib_misc) + +install(TARGETS tflite_vanilla_run DESTINATION bin) diff --git a/tests/tools/tflite_run_2_2_0/src/args.cc b/tests/tools/tflite_vanilla_run/src/args.cc similarity index 98% rename from tests/tools/tflite_run_2_2_0/src/args.cc rename to tests/tools/tflite_vanilla_run/src/args.cc index 1f89692..dc9f250 100644 --- a/tests/tools/tflite_run_2_2_0/src/args.cc +++ b/tests/tools/tflite_vanilla_run/src/args.cc @@ -18,7 +18,7 @@ #include -namespace TFLiteRun220 +namespace TFLiteVanillaRun { Args::Args(const int argc, char **argv) noexcept @@ -205,4 +205,4 @@ void Args::Parse(const int argc, char **argv) } } -} // end of namespace TFLiteRun220 +} // end of namespace TFLiteVanillaRun diff --git a/tests/tools/tflite_run_2_2_0/src/args.h b/tests/tools/tflite_vanilla_run/src/args.h similarity index 92% rename from tests/tools/tflite_run_2_2_0/src/args.h rename to tests/tools/tflite_vanilla_run/src/args.h index 630d50b..3605b65 100644 --- a/tests/tools/tflite_run_2_2_0/src/args.h +++ b/tests/tools/tflite_vanilla_run/src/args.h @@ -14,15 +14,15 @@ * limitations under the License. */ -#ifndef __TFLITE_RUN_2_2_0_ARGS_H__ -#define __TFLITE_RUN_2_2_0_ARGS_H__ +#ifndef __TFLITE_VANILLA_RUN_ARGS_H__ +#define __TFLITE_VANILLA_RUN_ARGS_H__ #include #include namespace po = boost::program_options; -namespace TFLiteRun220 +namespace TFLiteVanillaRun { class Args @@ -68,6 +68,6 @@ private: int _verbose_level; }; -} // end of namespace TFLiteRun220 +} // end of namespace TFLiteVanillaRun -#endif // __TFLITE_RUN_2_2_0_ARGS_H__ +#endif // __TFLITE_VANILLA_RUN_ARGS_H__ diff --git a/tests/tools/tflite_run_2_2_0/src/tensor_view.h b/tests/tools/tflite_vanilla_run/src/tensor_view.h similarity index 94% rename from tests/tools/tflite_run_2_2_0/src/tensor_view.h rename to tests/tools/tflite_vanilla_run/src/tensor_view.h index ef9dfbc..ca04a05 100644 --- a/tests/tools/tflite_run_2_2_0/src/tensor_view.h +++ b/tests/tools/tflite_vanilla_run/src/tensor_view.h @@ -20,8 +20,8 @@ * @ingroup COM_AI_RUNTIME */ -#ifndef __TFLITE_RUN_2_2_0_TENSOR_VIEW_H__ -#define __TFLITE_RUN_2_2_0_TENSOR_VIEW_H__ +#ifndef __TFLITE_VANILLA_RUN_TENSOR_VIEW_H__ +#define __TFLITE_VANILLA_RUN_TENSOR_VIEW_H__ #include "tensorflow/lite/interpreter.h" @@ -30,7 +30,7 @@ #include "misc/tensor/Reader.h" #include "misc/tensor/NonIncreasingStride.h" -namespace TFLiteRun220 +namespace TFLiteVanillaRun { /** @@ -112,6 +112,6 @@ public: } }; -} // namespace TFLiteRun220 +} // namespace TFLiteVanillaRun -#endif // __TFLITE_RUN_2_2_0_TENSOR_VIEW_H__ +#endif // __TFLITE_VANILLA_RUN_TENSOR_VIEW_H__ diff --git a/tests/tools/tflite_run_2_2_0/src/tflite_run_2_2_0.cc b/tests/tools/tflite_vanilla_run/src/tflite_vanilla_run.cc similarity index 96% rename from tests/tools/tflite_run_2_2_0/src/tflite_run_2_2_0.cc rename to tests/tools/tflite_vanilla_run/src/tflite_vanilla_run.cc index ca80e1c..d44ea60 100644 --- a/tests/tools/tflite_run_2_2_0/src/tflite_run_2_2_0.cc +++ b/tests/tools/tflite_vanilla_run/src/tflite_vanilla_run.cc @@ -79,7 +79,7 @@ int main(const int argc, char **argv) { tflite::StderrReporter error_reporter; - TFLiteRun220::Args args(argc, argv); + TFLiteVanillaRun::Args args(argc, argv); std::chrono::milliseconds t_model_load(0), t_prepare(0); @@ -148,7 +148,7 @@ int main(const int argc, char **argv) if (tensor->type == kTfLiteInt32) { // Generate singed 32-bit integer (s32) input - auto tensor_view = TFLiteRun220::TensorView::make(*interpreter, o); + auto tensor_view = TFLiteVanillaRun::TensorView::make(*interpreter, o); int32_t value = 0; @@ -163,7 +163,7 @@ int main(const int argc, char **argv) else if (tensor->type == kTfLiteUInt8) { // Generate unsigned 8-bit integer input - auto tensor_view = TFLiteRun220::TensorView::make(*interpreter, o); + auto tensor_view = TFLiteVanillaRun::TensorView::make(*interpreter, o); uint8_t value = 0; @@ -177,7 +177,7 @@ int main(const int argc, char **argv) else if (tensor->type == kTfLiteBool) { // Generate bool input - auto tensor_view = TFLiteRun220::TensorView::make(*interpreter, o); + auto tensor_view = TFLiteVanillaRun::TensorView::make(*interpreter, o); auto fp = static_cast( diff --git a/tools/nnpackage_tool/nncc-tc-to-nnpkg-tc/nncc-tc-to-nnpkg-tc.sh b/tools/nnpackage_tool/nncc-tc-to-nnpkg-tc/nncc-tc-to-nnpkg-tc.sh index cf3e544..bbc5b3e 100755 --- a/tools/nnpackage_tool/nncc-tc-to-nnpkg-tc/nncc-tc-to-nnpkg-tc.sh +++ b/tools/nnpackage_tool/nncc-tc-to-nnpkg-tc/nncc-tc-to-nnpkg-tc.sh @@ -62,6 +62,7 @@ tflite " model_type="" +tf_intf_version="" for ext in $supported_model_types; do [ -e "$indir/$tcname"."$ext" ] && model_type=$ext @@ -73,7 +74,9 @@ if [[ "$model_type" == "" ]]; then fi if [[ "$model_type" == "pb" ]]; then - $tf2nnpkg --info "$indir/$tcname".info --graphdef "$indir/$tcname"."$model_type" -o "$outdir" + [ -f "$indir/$tcname"."v2" ] && tf_intf_version="--v2" + $tf2nnpkg --info "$indir/$tcname".info --graphdef "$indir/$tcname"."$model_type" \ + "$tf_intf_version" -o "$outdir" else $model2nnpkg -o "$outdir" "$indir/$tcname"."$model_type" fi diff --git a/tools/nnpackage_tool/sth2nnpkgtc/pb2nnpkgtc.md b/tools/nnpackage_tool/sth2nnpkgtc/pb2nnpkgtc.md index df90d0a..faf66fb 100644 --- a/tools/nnpackage_tool/sth2nnpkgtc/pb2nnpkgtc.md +++ b/tools/nnpackage_tool/sth2nnpkgtc/pb2nnpkgtc.md @@ -55,7 +55,7 @@ test_model.conv2d_transpose # @ target $ OP_BACKEND_ALLOPS=cpu \ -tests/scripts/nnpkg_test.sh test_model.conv2d_transpose +onert/test/onert-test nnpkg-test test_model.conv2d_transpose [ Run ] ./test_model.out Pass [Compare] ./test_model.out Pass ``` diff --git a/tools/nnpackage_tool/sth2nnpkgtc/tflite2nnpkgtc.md b/tools/nnpackage_tool/sth2nnpkgtc/tflite2nnpkgtc.md index dab6ba4..9f28eba 100644 --- a/tools/nnpackage_tool/sth2nnpkgtc/tflite2nnpkgtc.md +++ b/tools/nnpackage_tool/sth2nnpkgtc/tflite2nnpkgtc.md @@ -42,5 +42,5 @@ nnpkg-tcs/cast # @ target # run nnpkg with nnpackage_run and compare with h5diff -$ tests/scripts/nnpkg_test.sh -i nnpkg-tcs cast +$ onert/test/onert-test nnpkg-test -i nnpkg-tcs cast ``` diff --git a/tools/tflitefile_tool/select_operator.py b/tools/tflitefile_tool/select_operator.py index 1ad44a3..333ca32 100755 --- a/tools/tflitefile_tool/select_operator.py +++ b/tools/tflitefile_tool/select_operator.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved # @@ -1180,23 +1180,6 @@ def GenerateModel(args, new_builder, sample_model, operator_list, new_input_tens return tflite.Model.ModelEnd(new_builder) -def Finish(new_builder, new_model): - # Cusrom implementation: identifier - # Python API don't support identifier input yet - # Reference: Finish(self, rootTable)) in builder.py, Finish(uoffset_t root, const char *file_identifier, bool size_prefix) in flatbuffers.h - new_builder.Prep(new_builder.minalign, - flatbuffers.number_types.UOffsetTFlags.bytewidth) - - new_builder.PrependByte(0x33) - new_builder.PrependByte(0x4c) - new_builder.PrependByte(0x46) - new_builder.PrependByte(0x54) - - new_builder.PrependUOffsetTRelative(new_model) - new_builder.finished = True - return new_builder.Head() - - def main(args): input_model_file = args.input_model oplist_file = args.opcode_list @@ -1343,7 +1326,7 @@ def main(args): new_input_tensors, new_output_tensors, used_tensors_dic, used_buffers_dic, used_opcodes_dic, used_subgraphs_dic) - Finish(new_builder, new_model) + new_builder.Finish(new_model, file_identifier=b'TFL3') new_buf = new_builder.Output() output_model_file.write(new_buf) diff --git a/tools/tflkit/README.md b/tools/tflkit/README.md index a0c40c6..9e18834 100644 --- a/tools/tflkit/README.md +++ b/tools/tflkit/README.md @@ -1,4 +1,4 @@ -# tflkit +# tflkit ## Purpose @@ -114,11 +114,11 @@ Number of all operators : 126 (total instrs: 11,484,469 ### TensorFlow -TensorFlow provides some kinds of converting guideline. In Python, the [TFLiteConverter](https://www.tensorflow.org/api_docs/python/tf/contrib/lite/TFLiteConverter) class will help you to convert a TensorFlow GraphDef or SavedModel into `output_format` using TOCO. The `output_format` can be `TFLITE` or `GRAPHVIZ_DOT` format. The default `output_format` is `TFLITE`. And there is a Python command line interface for running TOCO, and its name is [tflite_convert](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/python/tflite_convert.py). This converts a TensorFlow GraphDef or SavedModel into `TFLITE` or `GRAPHVIZ_DOT` format like [TFLiteConverter](https://www.tensorflow.org/api_docs/python/tf/contrib/lite/TFLiteConverter). These two way also supports to convert a TensorFlow Keras model into `output_format`. Both functions are implemented using a tool called TOCO. +TensorFlow provides some kinds of converting guideline. In Python, the [TFLiteConverter](https://www.tensorflow.org/api_docs/python/tf/lite/TFLiteConverter) class will help you to convert a TensorFlow GraphDef or SavedModel into `output_format` using TOCO. The `output_format` can be `TFLITE` or `GRAPHVIZ_DOT` format. The default `output_format` is `TFLITE`. And there is a Python command line interface for running TOCO, and its name is [tflite_convert](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/python/tflite_convert.py). This converts a TensorFlow GraphDef or SavedModel into `TFLITE` or `GRAPHVIZ_DOT` format like [TFLiteConverter](https://www.tensorflow.org/api_docs/python/tf/lite/TFLiteConverter). These two way also supports to convert a TensorFlow Keras model into `output_format`. Both functions are implemented using a tool called TOCO. ### with tflkit -The tflkit uses the [tflite_convert](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/python/tflite_convert.py) python command line interface to convert a TensorFlow model into TfLite model. It only supports to convert a TensorFlow GraphDef file into `TFLITE` format file. This tool supports the creation of individual `TFLITE` files for different input shapes. When converting to multiple `TFLITE` files, it needs to put a string called `NAME` in `TFLITE_PATH`. The string `NAME` will be replaced by what is listed in teh `NAME` environment. This tool requires an information file as a parameter. There is an [example file](info/convert.template) for a convert information. The `--tensorflow_path` and `--tensorflow_version` can change the TensorFlow location. By default, it uses `externals/tensorflow` directory. +The tflkit uses the [tflite_convert](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/python/tflite_convert.py) python command line interface to convert a TensorFlow model into TfLite model. It only supports to convert a TensorFlow GraphDef file into `TFLITE` format file. This tool supports the creation of individual `TFLITE` files for different input shapes. When converting to multiple `TFLITE` files, it needs to put a string called `NAME` in `TFLITE_PATH`. The string `NAME` will be replaced by what is listed in the `NAME` environment. This tool requires an information file as a parameter. There is an [example file](convert.template) for a convert information. The `--tensorflow_path` and `--tensorflow_version` can change the TensorFlow location. By default, it uses `externals/tensorflow` directory. Convert information: * GRAPHDEF_PATH : Full filepath of file containing frozen TensorFlow GraphDef. @@ -176,7 +176,7 @@ The input and output file of this tool is a TensorFlow GraphDef file. ### with tflkit -The [optimize_for_inference.sh](optimize_for_inference.sh) file invokes the TensorFlow [optimize tool](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/optimize_for_inference.py). This tool requires a optimize information file as a parameter. Here is an [example file](info/optimize.template) for this tool. The information file needs `INPUT` and `OUTPUT` array names. The [summarize_pb.sh](summarize_pb.sh) file will help you to define the `INPUT` and `OUTPUT` array names. The `--tensorflow_path` can change the TensorFlow location. By default, it uses `externals/tensorflow` directory. +The [optimize_for_inference.sh](optimize_for_inference.sh) file invokes the TensorFlow [optimize tool](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/optimize_for_inference.py). This tool requires a optimize information file as a parameter. Here is an [example file](optimize.template) for this tool. The information file needs `INPUT` and `OUTPUT` array names. The [summarize_pb.sh](summarize_pb.sh) file will help you to define the `INPUT` and `OUTPUT` array names. The `--tensorflow_path` can change the TensorFlow location. By default, it uses `externals/tensorflow` directory. Optimize information: * GRAPHDEF_PATH : Full filepath of file containing frozen TensorFlow GraphDef. @@ -207,7 +207,7 @@ The trained TensorFlow model can be trasformed by some variants to deploy it in ### with tflkit -The [transform_graph.sh](transform_graph.sh) file supports to transform a TensorFlow GraphDef using various transform options. This tool requires a transform information file as a parameter and the transform options are described in the information file. There is an [example file](info/transform.template) for this tool. The information file needs `INPUT` and `OUTPUT` array names. The [summarize_pb.sh](summarize_pb.sh) file will help you to define the `INPUT` and `OUTPUT` array names. The `--tensorflow_path` can change the TensorFlow location. By default, it uses `externals/tensorflow` directory. +The [transform_graph.sh](transform_graph.sh) file supports to transform a TensorFlow GraphDef using various transform options. This tool requires a transform information file as a parameter and the transform options are described in the information file. There is an [example file](transform.template) for this tool. The information file needs `INPUT` and `OUTPUT` array names. The [summarize_pb.sh](summarize_pb.sh) file will help you to define the `INPUT` and `OUTPUT` array names. The `--tensorflow_path` can change the TensorFlow location. By default, it uses `externals/tensorflow` directory. Transform information: * GRAPHDEF_PATH : Full filepath of file containing frozen TensorFlow GraphDef. @@ -270,7 +270,7 @@ The [freeze_graph](https://github.com/tensorflow/tensorflow/blob/master/tensorfl ### with tflkit -The tflkit provides the simple way to create a frozen graph using [freeze_graph](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py) tool. This tool requires an information file as a parameter. There is an [example file](info/freeze.info) for a freeze tool. Either `SAVED_MODEL` or `META_GRAPH` must be declared. And `META_GRAPH` is always used with `CKPT_PATH`. The `--tensorflow_path` can change the TensorFlow location. By default, it uses `externals/tensorflow` directory. +The tflkit provides the simple way to create a frozen graph using [freeze_graph](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py) tool. This tool requires an information file as a parameter. There is an [example file](freeze.template) for a freeze tool. Either `SAVED_MODEL` or `META_GRAPH` must be declared. And `META_GRAPH` is always used with `CKPT_PATH`. The `--tensorflow_path` can change the TensorFlow location. By default, it uses `externals/tensorflow` directory. Freeze information: * SAVED_MODEL : Full directory path with TensorFlow `SavedModel` file and variables. diff --git a/tools/update_version/update-version b/tools/update_version/update-version index 4169327..1b77c10 100644 --- a/tools/update_version/update-version +++ b/tools/update_version/update-version @@ -40,11 +40,12 @@ fi version=$1 -sed -i "s/^release = .*/release = \'$version\'/" ${nnfw_root}/docs/conf.py -sed -i "s/^Version: .*/Version: $version/" ${nnfw_root}/packaging/nnfw.spec +perl -pi -e "s/^release = .*/release = \'$version\'/" ${nnfw_root}/docs/conf.py -IFS=. read M m p <<< $version +perl -pi -e "s/^Version: .*/Version: $version/" ${nnfw_root}/packaging/nnfw.spec + +IFS=. read M m p <<< "$version" hex=$(printf '0x%08x' $(( (($M << 24)) | (($m << 8)) | $p ))) -sed -i "s/^#define NNFW_VERSION.*/#define NNFW_VERSION $hex/" ${nnfw_root}/runtime/onert/api/include/nnfw_version.h +perl -pi -e "s/^#define NNFW_VERSION.*/#define NNFW_VERSION $hex/" ${nnfw_root}/runtime/onert/api/include/nnfw_version.h -sed -i "s/versionName .*$/versionName \"$version\"/" ${nnfw_root}/runtime/contrib/android/api/build.gradle +perl -pi -e "s/versionName .*$/versionName \"$version\"/" ${nnfw_root}/runtime/contrib/android/api/build.gradle -- 2.7.4

    )9RMQQ%Kx1X+g`ZMG3jH-aR-E5`|?T-Xsc7is_3qeE$=}IxyBcG=W zRSkCY_=Ya$rZ*y{2=W}$$PEI29w76}7hKyevj!VtkbK$-N66bBxH8+NG^Yp)$B<>G zb3b$~nk7_fw0(z;1U$3!E6<{{)XpXrIma$5cAsw6Wv5f}Y#kJrj<5*_Y*OGwGfS%H zcE)G&aPQtNQpx3+C`l^r_bO#C^~mIjEHy2{ho^L9$m=!eU>Em$m!j!?(V)!cnJ&>r zaTKLOnLOd8F|yp}#k=~udENJ8nI-k;^z8jsidB3{g-eGsd16aFi~6&9W=e#LRlG_G zOSLn3LQ8c~qOp=!DAD!O5RDX=UQl6BX|~QiElST)29!iz@0HFnJY`c?T$e*qk65oy zLodA4rl?d-FIPt|hi9my6rLhO0Mo-z_Y7|y9PPnR$*GB^bdY~Q&8r)MBcNaG>iVNN z-iaxesdsf~J-D1{fT?$LXd8XYJbM9?u3hE>Zk=CnA&K27hBNcv5jLH4_8vi9fLoaI zXvB)Z;3fK&1q2NZ9RfxV0)av3G9XYl8UzO`1PzRZp@oLP2)=<97A(Vnwg_xg$dKD- zOF=_qxX|QeAyjBJ5HMkoB^ZGtBMb_|gfL(&qJv2=(9uDlU<4X8OhyS!P7ZPn4KW61 z4v~ZKplPvyse-D(2m%>X5Ec&P1{NzOm=+@k9Yhp7iPrr@#tkh*9wLD@!~$l5AiTAh z7v)){(DqnJnS+iK5F|2wO5L~+DfUH7{qAqYBq%xAUm@unV^KcoE@1eB5J93`iN43u zCx-F>;^ea_hMt1|?(w$(|H+Cxk{gXcl^*&33*ar4{*nDv(f_BApWTkNF7Jo7D6egR z_xEZqMM-c1l}1O91RtYyN64VeAgI;>1FZu_wGIf?I#`RCU}g-InQSI!cf{n+mvCmU zEHHD(sU{#|Tm8?UJ0m%Vgd60Pw0+%{1vEj z4wxB4622?r$pys?k!QIDrpK=d9)yF4piks%AU7y5Fx_*&)F6WJYPSQ{!4f$*7Z$d) z9LqH@89s~b4g~`AxJX83P!fAFc$OPXe4r6*Cv$@f3)5Pbg-W>X<$MN1sT?~5Oo4&! zo=w>HVL|aRdRP2GX=vbT0fw~p&$csp0m|a2$ME7P3HZS<^g{4lX@;@&_TjZ%Hu1o7 zrFKf`q>aw_CEDirB^o&8mpi4bwTgN+hX*{0kk0#1(J1Dj%3boypkur&ih#~*URi^q zALzvcuDdR|_R~Ll#3tanbo{zC(I@Kz{iC1%NQeb~pg+m@p-7 z;9r^i2f-lyE7>cL?Fl|qu&d0cQJJIcfp1Lww|WDaUjW6UTKZ>AYvLcN{JY*L?`c%M zt4w~i#9zusRTGsr@Lzfxbq8xId;_ zwk~Xa4&C-XQ+GVKxO_T#5>9haXEnJ8Q56g8Va1!@uZEt&RPmHAF;yg`Zz@l+F`u(XWv-uqt zW~K?(#+K&(Z(A2-8#5ajMP*xMJL^@*rq^@M%uM`q7ncGP(#}=wXD2ScJ;r-uhi6;c z3#Z;cWn!DQ`29U|UAhFy1o#6IiLB|m#~LVUbkBSl)k-hgnzZUA^wUn&Z{D`qFhJj` z^D5l#s?t4tq7bd0mQvZ0ahOr>?)9@Q;G4LV-k9sHhJa}kBj=4Y3GY&4aRct}`o;#7 zReIu%A63elrHwkGe(0o>`EshOI^5sNEGe&;tKLwp2)UV3;!tGm7(B05kZDw*Hq%I? zsVAa7eY*7?s-fp4FgO1FZjtFdM`p%`+&lN7i)wlXMm@$uvnB&yBXv@ShH$)ns9)dZ ziO^X#jQjIb#2BhksQS_)(#35Asd;27Bdl2B{A@csHn-5M!?> zZxyNK2WuF$829N-%6u;u>JKKGqY5i2i#&b&whOJO{%Lan<{op7zAXnf$jGQ_@7?G` ziG|?0OUKV)>-sv&jp#AXW69vB119?Q#v-^rUyo0hKk9qm+X(g2V4C2m3SX|TX82I^ zr$^B1`jxbmDs`gMp?fFhk z4Sbely2B(8@Kb;~@N`8ofqF;G=ddyD&yhx`uUc9t*@tr)cN_PzlFNhxU9O>WqQ?#YvAS4hvsW~j~_f}DdD}xbH33ycljmp z_-A9ISS2zp-L?XGv4J#2F2n<`%z=r(6|tWYJ{dDEzBlKOIyu=sW-ET{=@)neIuer zISyn)mQp6lU^##9n$sJNA3uzAeOtL)T&(pWnVx2Tq1Jzx8CY1L0K8%cwzqa>zs`2d z-5%fd-G6j3gXE}fdicP#hN&lb0i+|@;YX3UwNYa2}7Ht%TfotWO2 z>0(k>MS7hb0T-RV6UefdWlB`zJ1Ux)8TsbQ6mWJTK=k?CtEKH>xzWMG*teqv_gI>y zwm`2oU;lm2djCVSi$i9ypDkNmUCS+V%*Jn(w#zntPS125ZfytOA{Kb;ncIH)$B1HK z+WSVc-erEiWU8poU(clRLEw*7!`9jTpN}m@^-s1&zI_?ce&LjwX{O&pmDR?V$}}0x zmv}kX_3h$NZ)=9xrP9K6B6F;~Y@{Xd;zIhtC{ibP8>xKPpa9tjr5!`68Y-+^67ANy zg=CramU(ZtZkvu*oG;4xyXSN?-H?3{U3*a{5m3|`f88}9R)k9D`bE38lv?BgQq;hJ zT*5bTcp5o!WOuF^sGns<%XQJg(O-ONahJBBc4EWC(Pyl=%EFfB$Mi@4S(9DZ0*3eLhPlwQYT~u0sE|g+kSJ&+p;r z=&vrmtLQ8E;3tEd%_6jsBbA;lP5I;ZI4%fYjLk$ba4+Gjq|HC~!IO%sT%)FDWe8v* zOy$?FZrGg4@N8(oj~lLdQ5n|`9@ZOX`e5W(K=s~hQ-!YEaH}l4_KYqV-c@@c7Nn1} zfM!hJcfh?BBYvqsz&43ZcS{hDQMA5B`YRa%*{cC{MmT+zWx|r(Z{<*EHLX9RFynOrysZKJ*T8*P|JH)*OypCTWZgaX40IHnzVci56?f3Ur4irasSb+vhLFs z?1)+Ne3+-1d@r?Kv#xMIE-h%Z?bB3AAh5Ggls+tB>CtL=Y4yObDc z8}}&uL*T@B-%*@5qtXY64N%j!lZ`)(do-jzsDDwmYh58m=+=;%~^#dLzlO*Sr3K=(}|SKgBiRB@zdsS0&G3a_TzXRF7SmARqY5nLei}ES+r^AUkv( zmU}`3+h>bfF`#G`sV!94mycRWqZrWSPM^~jmM*jlP`8>A|5o_K9yi9fBs7&|r0I}Ls5tnIl^F!(QD zIf50Sc7F}_XLr@H#g3Ape=PG4GwsIYoaryYol4<}_-vF4JTba7B{#IK^8Z)xzwPmV zS9du0NV7I!5qPn{BlG3eD8y6`^o9&^2lwH9nqGV zqe4w}e_soQ?H%B>AQ6*Du!ZRV1Iw*L*UM<=x^uwBe40z3K*HuvjOVYB(*6(F%>&aY z-$hVL-KW02(ooVHz6>Y6FR?k}CZ60yOZ;6?M!a=NexbwT*7t>T5LZN+AJ@47@%P&= zn9-MalSsmm{R=N5_Sv!-%zxzUT8O{ccMHHu?oySaxa5s^!1O6T$m3yOL}%c#+o!G* zQaG`|3H-Q<;nSmA(ir^<9L)I0lrC|S|1IQyo9q8`z02$PaKfVF_xU}~?lTvKX%CNn z9L~(FI+m|n$2eT#mjU1T%fPTMkRrDXPr?wM{H(9dQy|@9=pS@O)c?<`%%#7qG6((s zag~WM{vTJF-;NG}Jtkns>rivLY$u?7+T4GJcBtcgC%SR^y$J1B*8tx=RWqyohZm=; z7Ot9Lq6*=G^NFqLjZ8((4!?j$zTHywlUd$An(;u*x4w4>V;xLv9WB-rPDB{QPoMIG z(HehNRr*x9zN~DhYA@@!{I(L%=>M?E@8_93FU^_1*F`{!Z&Uls9q`ElEM8;O0D14Q zr$=Bhn!rdh^@zEn_elPuQIO3i44Wr@$b*5zhZko0G|wg2A5pvcP0uz*I?T-$?j3#? z;5T)TW9}6X(3)Ot)QtZwGJM}MEw{ewRN5+vFE!4X`)0BdawnkCL~Q$^WXXt+?vD#; z^x<;i3u(PT0RsqZ4aujaaEK&M)spbXJTH;-%q8?sIxcayI+V`2n7q{Sy3f{LF-Op7 za6Yp>tgFIPG@2p6zphC+iqO{g%mQvOlQ0>}(oZ9rqM%vTpe-q{>8vkepHz9)Futxm z)>~om-sUVvp-6D6gSut(5^wx_EWBBVTR3RL!QRLyym`YwnU{TCbX@6f3%NE3Ug#KG zT^e$fl~NK@CV>Zscok{g%Cq4_Pa!|3NQHYD81%2DusVC)tYOKnmvuhufN|!0?Khgn z!yMgOWiM~gpRc{yFpl68!LJ*c=^|~=kMq$N(^v?1D!{(PX&#=+r5bA$=6$?4m5Ugz zXNp*yDkq$+_Bagf+=(5DN_Q#ndiX&DkF&*Oyg6Wrj@j1VXYh_1FK-Ux9Y3QIhkLye z<=b>Rb6x$5W)plRv4xI#C9asoYY@uI7l*Za$Jg}_Z(8(qVU}gYtQ6>tMNY&Q1Qgqa zRFV+~HtJnW6P(^N(Dxkh`h=kT5>-(lZ_;c(ct#+OU8XTmdZh4bJs@_9p*}~^Of2YS zULvoa7u9zMayi~I*E#Xz)cRP0b7kHFEGY#u3q`bs$mXZ#OJR* zA6F9NJ#Va!bycJ_sn9>hTF)9{5qc5cR)J}(R$)4N%yt}{Lgrq^d%q=cd`zFw)8X!` zcU$)i@U!&q=@awe#$e7UPso5roec620sHAV?5rl-ugS=USPKf7$vwl!>L{o zKJ%b-_3cD!a4M_AB#&R|;pZtc*f!WY$sXXQocAoB8jx;g(MiUIZ~88&+!n`u9W7>>hGO zCs5CKL`661aW3$aJ|z-J7Ka@Rd(WpS&bQm3a|SUXfS-!X)Iw&j;aY!h*}jQi zQT$=rv%5N!y{vS*#UL|}g7==~j?4yA6!|1(iSKZ|h4AD}(k)m`i`E!XQ2@#4OiroG zX~spgsegSxe5ayir*!*^?t6>!b^h$pYko3h)g2GdD@xAz3w>{R-JdCs3%t%JnI+&b z(camZ+$?F%+e!RUaX58;;7Fc`I_>R(=HpxpMYZlx(x)Vv-poH93X6L9<*s~oNXhfi z`lbbXaPT_7z4L9jM(N6kLwG2oC50fGomt0y84(t55X&4yq49LanAL4W`xMF4u_V`UhwmG=a&W4KVPU`zr63e&wiuXhtGBu@8|$o zm-sFkN#%95fzWi!>_S_>c`Vzul9W?_h%__FfsUuTe0^_|U1r{G}>h#Cf4HtZ~X#Iu^uEDf{+b#BE(;xIxTEdQ8y}0SbR~>?z zoa966H!F*CTAt`vh|X|(%=E3lBBh*MulxcLn68GPjZW)f&0x`s(5aQlPJg)8tkR<4 z?Dgd(#X~)GXZrAFC;92{?J4|~S46(J(~-d&rRcM-jq1l@R;Qe7Ca^HmoL6g$1!hc5 zCO?RJsCZIS>-{`#28naqRs*05}n-Q>7A9^`j2m)F1m_Gdbp&a0Y7UUqJPXH`wwj zXzo7 z-ZCRiTd1;Q8H>_C8H}2ttbA|2#uI>@T+}ni8#P{(Ow1damd!E1E3dao)&%^rwLe;# zSJm|n?&gq`w-W=6X9Z^u?-<*(y?S^@b)sw8gx8o?ePw4Aze(7CS&6ff#zcI0O|6{h zlLGywlC&Ag#GN6Y13ehQ_}XJi^Qzp9A&>H?ZFFi&AIIVV#j<@YGo5m466JEpxJ-?< zmX&&uJGR1O$t$TPi19MgiOns_T^pvw`ACmF`mdP4lPjeCofre;QI5 zaR5OuVez|>cjIE5Sa#TfYxzKnnCQulVVp~Lo*|mb9C}G{P0U*rBP}Y@+e^xBRN~x5K5vdD_#Lv&+$tS<^ z=%@hBnq=9K7kG=KY0xV}EoroBX0%a~>~lUDH(b*T z4L4ksTzNNK+2=J%MU$MfxdL#`w~UD(ET4n}?6v$CT39|ZCRibo^YO%3KBmX*n#9_z_Z4l964LQLTBRv?h2c#mMSSF3e)e)+xebs`FBP znEPj>*c!p8GH3$frv3|_1to$jNf**laJHF7M_P%ua0LjFE0Z?OhGL}<> zpQOAGj*^^6 zaW-85rmOo7n%8*mZsWZxgk=)NW)i7q5)bMbyiqAU$V?^;QL@qMz?9zU z{${^v==8XhMa`s6jI2Yqc)U%#DPQvexBH~RY^%Eosh+v!W5#JmAg88!n_QPqq9inxiOz0MIn`(66o=WfDem#JUb z{}J9RKh)Ek*gNZBDmS!#$&ydPP?b2=ijQOuZ6WM6Q!xG5_Ur3PVFmqZTe1nDsyg&@alarphvnojQi*|goi3xZSGZ_DL zXgaB8I_a;7&sm%oh|hmoMhkQj;C|4yAMNCctAkkluY*P_ENE0Ktj z%L}|Kk&;u+ z7#)zzVsC-?V&NxhcKGELO19ON`udTKurcM(7W7=>ts-zWtsP<9rx(&&NF zeXs@H3~EkBReIi-m4KRfDZiDgz;2ak3w|oq4Xforu{|!TFjw9eU!B@xW>e#4Qse#s zsV)a?k+jaCc7<~3B?wy{dA(lI1luEDZC%CzUFglRfIO_>NMRk)0bQk=ltXIaC=&95 zH}^TdeIaXlNrElUZ<1jRlG%#~y-67y|0@{R;RBekj#i6AI%#r=$d4zG8|T$0^ywjQ z47*ock2eSi)toM$NH@LVf-NX=4;u|-QVc2JIqBx(vfnVRIiYt>exm?86(4AvELNIO zFdeQ%c-msU6BZ53i%ps=j>E_Zw3L*y+XB#jMWrNdSb#*ec5LsWBuyN)%C zS4^(NIS$X)$3D<;+M-ntbC5YC9 zfgVCPYOhqZC1o;fuT{j6N#$kyZz3{_am}-MBvB(mf^n_eZ;e+G_zTI@`}vwRlXAUW z>3+Bb@32wMrzlVT;-`?=9OVRHhs5S9q~s)`Q$W~1*nY+!wcYI)y9AmEjEP)=N^r}B z>}-f^uxv<32v$a5wuY>PRyccRQadCQ#3p-#%<8@cT-S0x{!Ix8pR^oF>b-`IO(=>4 z-OkNkeY?UOOin?eGI`E6&LdXYz7yVb`9oAAW~eRyKS<+PZ?( zG`iY?h3jkerQ&1?n8NSMtFuGqoZwF&ZG^vI0lCEf1vI5{5~RqGT>*)|SpC~cn)b$@ z?FR$!!U7kRaMw0>1wcrQz5j$v9OdIQ-cyWKf^ZAF)6RM7Kbm7R;2t0cnFD|b1mL(# zrL&{^JtrO#MS4dK zQ#^FFPszZ{oHUGhd-Em6{AO(Zp_87y8 z!`$Sw;84qpcsF7#xF67i4a-1n;7=6zQ4CI(q>hvebe$(MvV!4@Nj@xl<*Xme+{QKG ztycT-N@Wx^!NxG-0t6j}b||X@oJ(djJAsZuD^$(_4kf}bktG%3%Ey6?27z>-L{|go zzeIo7tr!0`$p)`R70a^Z{&oiW!A7%-7obEA0zixWD`sgP5bht#kfG*qfGZK9f{1YC z;@m67UxT8u_zw^NDppbWstYA4+8u(BRymR~&@%p=a#l;;gX*a2a@0VX8KH+|7|x_m zszCYJ5LT_bYDLMSl>-Rdw>gK07KCHB^%WyVgip;G88t>$5ni6R)Y%R^0ra^wic)7^ zA!rOmyItWV3hT^J_DR4>Hyk{XNGlVZ6ErYqmj^zV5cwHaAGG^56ijY6*>o@G<0}?5Z>}TTKJ?ki#Fvm0neaJSqy^HABhkr<)m z*w~y={I|6JCc-DwprClcb>m9WlkSxaKlLmT8zzuIg!>sBwKL5++dt<)m8cCd-`M#;yKE{#}0@ zZJ0~Wi)tTF>qw33&0ea6)TU^M>}8Li<#f9kI55U)wdZMrCq z1QURDb4I;;0>q#rAjc@*YJC+OcWC4}Bq}j}f5>8b)i4V-=ahHXpXyF7pkP6B@+|+% zdUhp`a0TS!2`fNPG`KQP2l!McF2HjwEgFc@JcDxI1&o<9J%KTl&yd=g!XwNJAgus0 zId4OOeoQMewp)OJ(3yf_it9$57ThlH&7gv8PArBzJ$ATg6#q@Oa8dmKuKwlmZbe`X zYVO|6OZ=#ZwCdIprDAJH{2V|j1G^O{ZZW}tVJ}J^Wm5U=ul%1}p3EZ*7=A>tnY%}^ zD{)>Cy}WS`d%Ri~dk-anUs{}g~#h7*T~d7q`1 znW()AmhIjFWvNwm#V#}H>B95UkI+j7g;~T%I$7*lUl>&DM zrD$!tQe2=E#ccB^(clIOvo}%bcI7R@)JJoT3!?@Yo%JY5Whm*z%MnDSlf723QFsMw zMJa-_^u|J5HqE=MjvKCPB~zEZ`Umq`*J`phyGhYK@`W+!h;CAQwkq3*ZnrR{kUC6F zhXqCIy%(kwR1a6v0rb6pC5g2Ce_N%n0xS4`+l&eR%l}WIuE^t-pmyTJvJ#RzW~7(A zLi{0qunyLo*4+27H7*ae_D`y@*QpUq6$g)i6UYj~L8XmLR)Zj2PUd&W!PLG)>$yZfv)loJeAb+eJ zH4L0Q<>XeWI>Bwxc~k4@{6-SHnSnVF;@&JOYBn@>jKEakZLVW=Z>D3m0fcp(l6syZ zz!cO|B=%?F8U~#}HUO@QPc>pCMAo3_@)AVr!uccF8UO(_1CAHnuzkztR8Qkkd*vcn z=AVeg`@pPo-7|kAN8=X(j2lK?S^t`O*8XcUino+|!3A2v*wO-5VyDMIObO1hv1E=ODhXhYt+BLZMbF_>OJv9a|Qm*jMvOPU?Uwg75 zdcB7tlxUDX@3>4_nXNl~CNS9Go9o$G?L+&{7MWIpPD$G93ucg6>_5XTR5t*0 zR|bCr-y&JE+2yEqv{O0MUigjU@B#sb09hW%R@KfHXtfFe1ZiPDk*N^89Wv%D_Exr5 zjs>%AV3COMQGS>PK%4<!f#3{zIlPL%@Whq^r zBwzw>pzNZkUd-5-o`HRAgt-1q3JX61EvqWl#zNu1q|pnjEu;rDeZL(EK9O zvz1*|mleu!Vr;@Q30obw3N*arzSJWpd50#WY7~o(nxa4f;^z- z@(=GNg_%dLri7Ub++&U$sh8wZ4S7|LiqMpba0Eav{~HJ15}7PN-cq99aFX>A`@2o$ zs4tF}q_1@#7-;NhzaS_MEeM2`@DEUf2StOR-bV&8ST1D$HHD4-V?5qj_zR_=N(b0t&6zI9 z0OqC#G7t!7=H93-1GV7SY!oa{t9~oM>%|W)hn8y=g2$rZvGZ`DaZa72Pum@D%K8km z)L3RI7tB~@Q(|WAa06iT2A}|JU-VYGU<(#oieQVJJ3JWDaweq5lrzl17F!%joFNtf zg95!@=2t#{hN5V)KP;}yQc&*`Z>vD(rI;P8yS!L`29gT!PSmc(=McYFatPj#CICFT z@U<#HWBsVv1G*7u0wsn~Vyzy+3^s{^OFb6gGMf6cNMUhZMta&2n}>7@rF}W;lfBQb zH93TpUE3+W%XT+?w-no7HZ3|f#=f&mq1C~oRYFv#5U zwjU4l=1Y-93-ZhQxEbz5bY*f{J9bQUo9skF;lyB3-EjUOo9swJPeq*cUKuu8J66nC z6fc|cYT{%kHg_2T4j>P5$3ZvZSMX=0AhB{Dg-IXkx zbT(AvjIuWo3i6X8c1M8i6B*$!Gx-h&PppB#2P}{b{|iIc0Z1uP&Xi}dC@KJ0%o~Yq z&W9>%=gF=Mc;}shD}^T6F;U1|(`qa$IgK?&sG6nefdZp_5&f3kTQDC@KDK8JY4JZMQJs|Y_{E3XuFdthrwpxiG)$T1TpJ6U`be&}+m6PIM`Rl_W9L85h=z%cR zI!g|JVCh74eJBKGT)p(3Pbiq2Bcw?sIS9K*qeghqp+@ z;sV?-gc=C6ocjn&uwHRMript~&nshz>Wna6!5}6-T0}($ZSi`kf}Q$yrY2=FIM_|( zEe4mZF`ZKroJjjpO<$pGj35 zyOOu0BDeB!6i4S1=oqv@yKMi$$~(A0iHK1)kQ2#YzAsQyBvcRv?NA^x<5f_X8B+RN zHkgV|^Mg+%f2A>)z$iP*vIIa*hj*CAeboB1V-+-6iP^wrDY^7O(a;-l4}evKnJ4HI zODdxktyw`d@mp~r(vVkqbF30r-&gU#$us7@v+e=F64h{bsnLVKY+v>{UYMd0$ zWsNJAh$}|0@hl;tZsp4TCQbdq>)@n(kjFe8>y$#WFf}*}x$e??UaUaad=+X%4wCKk zg%zsIHs@v~L(GT1vErHmJD0lh^j$a|v0Q;iNh?@%av{Rt7*6QUbe zXf5a@Bj^-#h2Ur_%qXi+)zj;14hu?*a6)=ZLV5tZttW%5C(-Vr5JGhoo94fi@Rs93 zDXNkxZuB2ZtGHCWrHxB5ghMlgLoqad&gNZ|URklHA!5eQP=Xod70N$O#5G=VqPVF+ zWHvRQN!OenIa89OO5ds@Z$Qu(iV+ow854>5PZ+A&8O@eS>?JY01~2+W0891q=FgOC zUy#9Y^iZ~O`Qm9b^pqyLgXsGS;vr5XcVjlomU9US}b{Qj)~)X=+&^GPppf?J~_ z|1L+nyBK7>nEl@rfEq$aj+=JCOa89q*)7DoZgk-3L7}Yh@9diYo8rc`$`krFt=r0| z55`GphcIJOF{4rebi7n>9ZmFZ9*U!a0+ZV%kk%G3HP9QPhjoIXG#GgDx;6A$o~qU) zuX6=fZ@-FQKT7CjN}7~s^;NPa0WKB;c#yyHCJU(mAj~yWwkC=EY(eG=F#^=Dk-*L{ z$}Vz~rNji1c!Qnv39x0Ntti8)sI&-*WMJw0shW?iTn9;{Vt?)HfPJ;)xU^7!8dK)e zmPR=i&B$i&MffPY+$OWarLe-KxxyNdWHw0h%@rbGljy*sfI`{poy1(7#5bXQ}8Z)XIGX{mBhGC5GFh}K5zV%^NXQvZVjjSZ!SQ2ldKEi254 zlGxUmtCMI5A_&7VW>gzyOdEiIOj2K>E#X04&OaAYdSQr}AQEJV39QT_<P1I8dy=u6}Ppx%K_un?MPD9HYX5^V~hX@MZ&09Xp3BSg@QFcBiF z3J6ZLM;HK{VaSZ4p(dlANEs(IMtZaw^z8Szzo);P%mkB|gp-)~lcB1QCliO{-UQvr z4!Q#%0%sE)c(73@D{M*9{NEHeEJ?U5N%&i)YOehL|MMng_&@P(TX6*zToX~xWLSc9 zZR)@`l1aFdN%%<*c!Z2WG!wWK6Sy=Jz{dD27ugfmPPh_{3e2_cE(vTiC*e+V+$@)N zBrA`W5=T@qlqH2Lm&}1!D^iX}3(A3C0wqo1lGjt>ys}_KDZ_Ph5h@hFJ(@}IzsQsN z4(j9*hst%rp)~k-@>-YbQy6=FB zs>{{{0TD?eqM$^{IpuMq=yR{=Z{EC%VFGCV$IFulOeb|9b~_;sCBE2%av7{Cnya6_>u&-2e; z$?$HW$kEli|NO_{bYMS}@g|1w28Qr03+TgI1c8Zod1hf7^UHL1biON6-nP9@`Dpb= zb}rkI_o-{Q4IZ1o#UxJB$4VfCnwOxBby+oprY4$nE zG@OlOB$z2(;0M1zC&{WjiC|CyHVG{xpm_OmGmIzwnn3zBj`V8{Huu9HP+kIGN?5!v z@)rfD&K(YRj$ng^6O(uYdLm4wB214jQ1Lwx?t7xZT%w#PwqTooqyW`Sw;x@3yShZG z6_{fpDs^nLT7Qiy74&V%`x&XIb1#h^QJW732Mh<_yTEHPZ1~Y^_>ow|UefT1b~q7< z2hTrzPcUElHB?r>!`qVWIu|}jxpGjjS-1en_0Wh|S&z6M*M0OX$)Re-*B@||E3)1! z>MS)su}#qB*^Aur>Wci~XTU^tiex0Wwμ_Ax0r^f#&5A_sSyzF7R5^BmBrm_|aGl)^oV~*x|0}1$LTK z2AWgmT&Vwn0@nhoX1bYiI*FYp_u(cvGoGOyVx#uvE9G*?=RVgU+4oWM`cea7I2_!ElAkE}e{I(v-0(9W-G2Vc1rBfoNhcnE9RaDn00T|KVu+ z5S2exZhfqr{aCq95kgP=5})KH{#7gn+Y~aJ7BWA+Kt%<*98mWU6}Sp?f%oa=D;o89 zzEcQ%r{K82+??2eoY;F8c#ZUBFt~}^KUDnjdBJMF;eTxi*qrcCsY~E11dI1YzEd<@ zwD2D&{`fq;VS4kw`Mh6H)+R{QTekMXc!7_rzpTc}m6bNNn0$1G+xo8-eYne26tCL! z$nOM>we%*^EAPH>QDKr6(;FQ{);HK+1K~c})JmAw*3G?($JAc9D*t3BC)eX8 zZO+fuhf0A-S5Nky-r$}@1xad!yb#S2{WGX`u5_aRjlhdseo?<6y_NV%L%?4FXXi%t z{fBRpyda6CjC)uAgjOW(hj>21nHSl_IL0`umjZ6#G~jrm3&~gQ0#5MUaZ5^9FJ0x& zUcKZC!}(sYYIhZ?JWO!I_}(zV=g@rO)gS!1Yj(G%-v1@8iVd!!Sn;TCH)OUU1>FgP zbOrtdN~98r(N_;RuTkp)0neY%?N6bmTj>Q!;R}G%8MV$6&>FRN`BNzCx_L>*^8Hd_ zGRj_;({&HzI*!tDuf7T`$R*bC3XJ4?JWp_CiS9B%pdn)~2-(9#?bY`TiQB7N!x*nj zTzYxEwDF#@Qef;=JUS`@DJ~Lsog1r#0^hW>vfnqbP~jUhVIyi_4EE1JY)$!De`acX zC7l;Rpf2NIpqBWB1I7Sgw;fs-yVB%7^p|(PEQXquwc+zTQM7m_xbu8FyI_A0XtoMJLh|F1V0-@&(B!bPR)|k zHShLa5ndgx+wH6C4JrbBj@G9NdKwFQ=44RE2PU^gVKYwr^cz#YUPpC)UWN#;paxgm zzx$jI!VsuxKHpwx+@iAY(w*3;Z4BF(n!yuYsA)VLj%Xl7l(f{HD*3xGJ#y4KLRgtT z-r9L7P;`8<@L+8Hbmrq$O3CNvuBZ5$N!D;4O8aB#6R3Yf-uI4&`8vXR>6~LvAH8YJ{;sNr!qqb=p{q6?&qk_D#wl3<#aOq#Vp6V~UzCH!P{aFckYu{`u zWo#mlMrC5#(&;V~>`sr~jj%O~zjk-J*c7I*uc^+C7jB9}LBjYNS5Y9~%H?ZqICrV< z1*<*}uieew;-vdp&Mi=NB{bzLgh8&{L&)pOxux*!-njXI+$kc09tODZu zMehncXz}FhHzsN+e1$O6)oUblMr6|+?D-+{@&Ynf{`>=$M&`aB8l@vA?9EnMDB8wL z3SpM3@g#JKISN8yOf>W?9YK~}@g(Gl^YTJ6DxrjZE4^oKfdez%K2()%1J&+1XJMKp zV}HEgT_u#P?;8`16wBXS^+2EGL9OBNpP5OSfBxC~E! zuQY)r!g?U(KBlWUp=AFNEHeb05t?FfQwQ-XUzv)L4=e{x$_QlW4lGmoj7Ls3Me-!e zW~u?{svlsF_`Rm2dwA8#-BSF_Jv-yyQ@h?!h&AukB9V+_QWjj|2&L`%H{k$;W0~Q` z0eR#ZVb`^6>Pt5i0?e;GCc%wlx+jRw5sG6ag!nXwsu4Sw`YB@1A!pwQ^Q2$y91_&w z4CU&2oJalfhQb^3!Y3q8BXd+QgoBUsCegN`T!S(Z*6$RBHFMM0hX;~uLJ0?VvuWr> zI)bddzLAip&&vzT{IkNaJMQ!v1U?!d+`&TQqixNQMpt@`ybOx%o>vlGPDzq?22FEl zctnCitUrGvxtZQ}Us%uPGVz6w#J<|>vjEA>(6)8~J?hKE9j0c~JS0IO|9N3*hQPoh z>SZ>P4Gx-dEuoV4mU_y40#l*`9;B5<9S##0^ikdD21ciDXk%XIx{q-@7v#+c9R6oV zr^!aF8S@06_O;XfDmF*=k}64xC#M8pQMNos_*s zD=d|^{uH~{Xi>9~x+P`Z7&}=?`A|Y4K{1c=p^g-INCFROPNC}J3i%mKqk{qN!qnch z3UL;zE+n$CIX!}h5~1`x-xmJ`Ld`D!waA6B;h*P(o4R~-{H&U^b&v%{gX+I42W`dz~uICyrF-Yf)4t`s+UUe6CZ=jKy zA?bjW6AonS2y#3nQKbrc()CYJI{Akpl)v|s$1_z?GJ{2(L!(&I4mW&Jpx8jeSqsX= znSYO`Yp+>>X z>QQC}B%|i3H#@4R^Sgw2Q*6uCqpS>gMi)|V_EbsacVSI)QEE!$A7EB@ec*oQ)gSDG za{9*+wiN17rUv+MH%<@if^|hI*Ve$H ze6N9qBriQjM>yFo^vU4kk2I>H3Psi&WB)aw=;nmTKnL}*8Oeb_=qu_DnP@ZmZUJ(O z)G)z+Uu@kNI$3H~W1qTp`(H^7tQ%9ns`)kcXp}25)u&>C4tVt((-Tba<5(rXC~%v4NBS2SVkgRA$!! z3d!YgS~ta5$-6zY2^O)vg8!6wvh*(#09EBopzSHz1n_I)WH zYBg2QEAnO%dACwh^{j!10=%#PYW%Qn@P+Y9w8hOQNWE!l5C9S-LEcDPE%m4ii<|au z7FY0ZEUw<)SX|n_vA9&DPXu`{;GZ(0GriW%VcP0esZnRnk!$3Y0t_1EY++|_PYP0- zm23Q@GtI>**^K0{hvnxN2Wx{r&h%#C<^d57TOOd_!lqv0i#-e!fd>dd+BdR-AA6GE$OQz7ACXi;$+5rIE1j9vgVXOS5E5BC$*iR2PbWcvu#@xGiXM_?;f6w{dAK%-OC*y9C<~S7x>i8_zY)G7?z)$sTKkwpHxi`TAFxyw zOL5*~DRcTO;=z)8!q6O=>mmm?(`gXav-gF4CI+6{ST9`Y3_x8Pll?NUNTt!BgRbz@ z*no%Ve+L!)JBRddeAB<7%s!NK8iH@ubj>AtZ_Vs}L1# zwOct5rxz;G4^qYp%8jLuE3i^}EDb2fQVJ~n{25E}v9ueS#!x;m%X`3_l2&Ks-zijF z1L~}MQ_d**9VUdPss}uHD~&Gb%K;C%N~0c!3BjrVRyxz`!H2aL=P{Fl#j%_7t`c48 znf?|Y1GCI`YH2SzR!}-LxYvU>OuJQ%6`BtYV~632i(#Dg`(haPJQL(ls+FcsMno*nZMDP<~;QV{5M3by=X0=7T~@-5Q$r9{vT7xH<4!L z<{EkCyTz&6Mbryi%Q{?_DmhOY z|4XrU@eShNU9|mQbTU|QI4IB=@jXiP^W@61_PI6nO{F^Vo%tI;atlr96L4*+R=^Te za`@HuxUWL9=TQntMWXyOpPt{Roe)^OI5jn3bcp>zSGmn2daKaJ#qzk02I#{ZN zrMuc#DlzCAXRWC#JolcaROCQ5UH&1KdSfZ>11x=jrC?YAq@Y=s#hUdNWvp30!J73m z`oR(dP=__^KL16t{?AzLq=UKEWdxHfKwcp+DY{rh-?Y#;UhwfLs5vb$?T$)oC@B>SHO#1y#pV zy(+8}f~Db=SSp02bro33ilwXNSW1nh)EAT(OI1FDQfpMoE7tza8KJVJ)Cg(opjiEC z3*$aP1&;x4$%`1qU%Xi`obfNBY%fLw2lExY#pr%GNPYP4J_&;NXTr_Jll(Kvs4a^R zr+mA1H&{7WUT<2fpIeh)hy3M($hIbPfimg=Zjja&Mlv;;^L;coElrL3NbS$PoXd52 z5-ZNBGUr?OaxyNjf7P|TOqG$L>+t{o2@ki|K}15Hlw22yzJS8bfp`LTAOFZ|&~G(; z#w&pkW*z7x5O!%(H*{8gp(+A&>Wm*_{$Ec#oE;r3?<#2{H)h7Vwlbmyx6P5G%jlUw zFa-gu9U!+BN70y!FqmBn5>@%c_4k&CE@ZK1YmD35p==iFZ!kBOQn~FPUfs~C{kCjp zTciE~p0MEf*+JCfie=i*@G*^_)X2#$yX}maHw9rmgXpfV7d!Md zwGfH40S-b2lpIiIvWr&~D?!aIa|CL&6 z`cr>jYb|aglr&uovvIt2^wlEJL=Mp;dpE{lQ?v2k@-VZf8y-G;dUI_Z#XVaci?=@k z$C$CB?W4ibvEwn%qY2pb^iB;q`P&_Tm<<8*r?S#Hs3-YEZ6o0VPt)pf#QFLksTW@t zCVSwZE96R(rrI_kFq-Hyj_1eWIVM(xK?uFvX?FTllu>MW0ySHwyx*IB)KxUN_`Rht z<`B#YRX(?UQOMBvMvToM!?L`4q>IZc6k9skv2^_J3KIQ9Z)^azucpCb-&?@Lr+;pV z)GDNU0@XVyb1RwYG5Jo!S2H?BW~D-F*){*MJm|I~W$g2(47_QFA zvimgC`dqRl`2CfZ1Yz6IUw4iurlFGlg%&iqu-UK0O%S=V>#u-f0Hz1sfS>-Q#y*ghdV*di$HeVEMSSo6zCr5{zgLsh1St!6GC5*rY4AnS6e?3M3KiG%CfU|8+>}m ze4CR`#?=gw0LtA2v!v|!mZA7}b=>yg^0FDSt7{Rh?I&{{fBVHNu~2;}54a^*gxq*; z+n=S(AGvtk(%n@h8PI}(i}dU<w&Oi0m6SJEA6dH3( zv^@`7>YPmsbxaJy#jbw9Akst%SrsXVb2I8s&jAC~x)D}Xa%1pNcc`Od@>7eF;&G{s zAVX28z($ZwddM1|3uaiV5$Z1_jw7Hb#H8`Z${6rIejY4?+QFT1iq{4mjSw- zL{@)Rx=T#aIuB?*SVPVWKf-Ne@D0yvcJ|8|+e=-Hyeq4@xjY8w#%O*5poby~N}qfj zr!B#{(SMezCbN^gX2c>W(i~<$mR|UpZKRIoxkGNWeSNodv4MtYT)wS7WuReyk%7h} z-);Rduq5^9pT(*(o)`YLX0<^nk%3rsrfqIoPLz$)`K3SWRIBs8$Y>id_$tPnS)KXm zcRGErBfToNV)gSkjA?PDbGusZk!JcFn!*bygc<=Dk=2{4zV6>Rjkcd0vx)&Qe~{;H!z z3`#(?-s%7)MtS*slefy9(&T>MAz){r7gc7hcj*UMRDNUw5Ctv@%0EOSEc!Xnt#bUu zY@blvavnPtc)E?S&v1wDGs?q{c&w$vCjC{d_0L1$s^FNQ-*zfxn1GCvHMs2aULSW= z6$gu_6E1t9G{p~qFDaP0yH+{?KUkvv&JXDyuBkyguW>C8Y-bIxnHa7f+x&d*7*{I$ zY(~n_uykhZrYq|8R4ICEDd*>$(p|dI1CRNkX(iYV*`D`>Z=1^pQZP%q25-tyXseY= z_XH2)2EM78xT{7S0hr^e8H`0^%F;qtr(i1vRo31QuzsSrD7rs!QFI@5M3$2&))wlo zS?y15fMB00eaGRnk=FPrE*B=2!~e`-f3&i1OWjma8bLI~a%(^*h z`xgj8Xclz28>nN!p`gp-Z-56gkacs)wmj?Rgssc}1U9@_L6H>i(}KM9DxIv9kE2gs z8hq4H8?!C?w%WV?ZS$FpC$~rwmBH-QN6?U(+WU*obB;cJLt%F<>n@BVN#jXVPjg6% zPAf?psIOibdF3~oH4z6s2&ch#h&%9v=ZsZvf27Q;90P2PZhwr}doMQ|HVaTa8;=5R zbf=sFN?+u6+yGFg3|q9vmAxMvi`& zX2R5ut~jR9`{=;w0CRo>KAcfxu`NV#qRfA;9zGg6J>eDKxttiv>EcqsU_ zjz)?2d68-Ja2&w~sUKe%)U#}($LlrPe9(ohn~T&hrDWtAVIS8RSjJ@S9M>ziK9K8y z0ZEzYKE1Mmn$bqj`McS+^Bq_81V6BU#Ce37dke#i>RNEMN8Ae5=J!f}iIJEcDEzWf zdI#<7J$N)%I{}>=L%$pBIB%d4m5`qpT*ho2ciIUX!ccG6jof5Rk6&t>)i-^ttnxb~ z_b`OU-pq8~nqsIuIVuyss%t!^|8Ay8seD11Y=_#*71tRF8lf*=($ktZU=4$ zB%V_fgYhFo&GIHnEE|WmT%qP{#y*9W;S=;hwb7wTThbrhp7<==^)V$uqKMQhG#DA@pr+#*B}=S8?Sd1V{V?4o1WRWgU(Fv_nw4Tg|>NSei!)u z@PjA`oq|J+m7iCM>;@6Nhl%HAki}RigttU&zqzkW{M4I#XWa9o763O2dN0ubG0WqV zcEtYj{2qGOW4vO6&Dq-L{u>I>%Tun*(k=3Ozk`dT3kC1Hqv)dT29blB6Fcx(_@xQ9 zqXi;iMdRum&*i6g&XHbYp(8(9eyX_cZp5@SCIp@MH@BU5HcOwqfHlxeKDD!>T~?Ye zw(jWGLa(R3*2=OgHPv}hT=aADtAf#vzw~|ubE!~+{>fBELEu76AvMwWMJa`?x?Mc8ezS+nx%IrPWJ?WCI;r89m*^?@V9lyrfA^)c%Sh8WeadnzOsRK7t0*$E7_AfoyO@xyL-oN3gvxt6)YO> zNU41Le#b3BP{M0^b9Qfd*DdTIhA-^-)dk3Jp*=Vw$rXNJMHMgkbcprK0=Ts*g!VX= zf%2vPL=yXW5amV@7cug^UisATDFyLSTbd+cJp zS-aA=ZYNW&Z`DAjoI5E?+O?hSh`};H-Wn4Eckh<<_6Di|iFNj{46w z%sxb@8tvQw#=cAUrTRBaZGtO|i5<;-3*|9ok;5ut(wC`F@i_2@a_nfku#fNi)*ruY zN-#4zdum>AM7a(hIc^YAZ<14Q!W1D=Wze^hDE$RfPovs8O*ZS~G9QD}V(UkCQ)>H` zM*k^P{oJNM^;4I^U9`t&34g{ys!C5^X~YfnlLW(28B(dW5%$`%LmxS&Xe;Edz?w_# zRSo|yj$3PpvxIcXbXNnsZo?N2jT$z?TT@9tJ{EbJr>axtkGeZ`7DoJ7Oy6HhcXl0g z8SU;7MT4u+3&^4mOW);6Y0_bzC9 zX3=~;WA5-tJCkZ$dfu)8jjzCJ-;iPZjWgfwChH>aN%rq0xpDcfwWrD^cBrgbaODR1 zQcX?e&|D_M<&I}B>K4V^9o%HVZd9Q^C$;cqq*!!6@#QD&5=4=B+jhn}I}W;(ly|>O z+ODo$S6&Nm@nxp>p4KhNyjRR`59Y^LWBOt@3TdCxPH0Mf^m@{CYm8ps?BpOqTu)bK z>#`>#uK1gV=evLh#(syp^IAvEWl)6uGkuJi*dx#lqqYNlA8~+9Aihvx;$b>N0Tc@scW23#cja% zndM06U(~@)r_hTJ`7uU=N&crb%bffG$V&1)y}BwWPDS`d&H-9e2DOzwBI_RzDFup?KqWc3U~Pp4jXls=1bXj|0tIJ-SFNz^5kV>qwJ?aqnXj4 z(_>j9)}lG3az)#xg6XB}eau=$N9Fd{$X%SwEwX<7L7_{HkaKG5Q5TY9|wrmk5i zyiJp>|L*aNX<_Q-dy9o5b<(%%c~YL<_SGyn2a;xhK5&7%GWU#@3wels7+YF8ev0mh zpYDys>zX3qXYYkOtHn)-PgP&J6sK%Y&CJ7Y;dI|J7UY>iw!)S< z*9&y$k<5gX(*@t5NhR=H95ykWe6e3k>%uiFOOe`B@v0En zxZ7VCWs57w-TMntJL?X=A8^2x`kEZ?JU=_{VSHlc&SRt47oLu%MENRIc@}CI)nEWI ztj#FU9(OA;erFrk?;NSLMN+IKWFy%Ol==O;%m%O%ZvaLup=BP@_qB!%qLtXuCTbxHR z5X=e3>$2kL$Nue>B$@`I*8$`GkLA^z9Zyc|M9m(AuhB@0-YAwi+ka1KNT*Am_3Ps& z-=9>PgOOeKnqI~Q%Xb&63&c%#6@aNx4`kd{L~%~h@mhE6)dvQWWui9=jkXddZrkhY zIa-eXXxSy2 z#ObB#Oj7Ad?Vxs4%cy#q9o#z3B7vdUH(fOD_rBim(al#&Yz4z#3iL&u$qjxe-?tTu zc|&;TX!KoLzC`xbhtinO)tOWdj`6F-;I@`C%+@bh3u zeZ%cVDx1dFJ5kW*UYMctxNmTyRq|nKMAPosoD1H%a<|OF?XxrBP5cqG1wiMd0fc#u zX@E&?r}d7$dTYi;##R?}c6tufbuliUZvF1kt-aMa)f2TfcC<*LYi!)|5kQuTRW={@ zdjcB=L-s;*HP8&%bAJDM-i-vOP026cr{H3P9A0Ig-W&EzZ+h&T0JJ3ORawG`e^! z{%UOwz>qtx=OIL5P`RH}^2*GUejjdaAN%`=md;LXEvEbD%%P5Jfy1*hI4s!dAc<2B zV6U-AcaNdRnCQDD19u#@fKQ^u?EttPIEq0qmdF8CGPvU5Q3~(V@33BQ1gt4}{CHB9 zmMoayfpJ4trzuf+Ki;U3{(8aypt%Ekru0mJvTlqPz_5;imkixP@oPPphd^m53mJ2M zya-9wgUEj{kfYSKq*s3}c3m`)Br*P^kA*2^{*7hKjV>1MB#K%ra#b4`(h9E~a#Fh1 zW2`DvE6UPJv}D>U!7uotHbQdmwjT;H@^cZ!)Q>Ug2frI!B$jA^Vf|1Na(8&>I3fRQ z=fPfByy@v_~4?RjecA(bW9Q9pBN|dlFca&5{#c0*8Y%beIjc|; z9i_(bOZA%1hfy5zI2+kj&lgJtOM$sffG>tB1bEBw{uD7M!6xxK+I5b3NfBwJu+3+^ zPa#XrHP0hdooLLg-Q&qXGaSv(H?As~?2xyhFPD3L3t%X?p3ZdTb1hKTx&v@?5(E5N z-6sHZw_r6ARwDEh`Qj3uG*FV|c8o?7#&4W6^e_8hdJ>0Yf%1DM2zbABBigXa`GA;P z4j@+#6#l|}xChbO*h7frb^<(49_#@Ui>be;E*tlrP$?82`NZdVN+p@l?3q%|T4#rT zAiu3QccPA0Y=s&#CFSH7)05wGOaL2IAWW6z9y5D7^k#P%7E+N1AXI4PfVew7-*s^` z?GJfd-_o4k{Uz@8)ibcSqqmvNYvD}%lBUG@lV5#@&A0FuCUt}EH3+P->v}K1?&|Y& zn^;Vr^ADE-22@~gt49EP(H1Q~7@ka1o>bntLw8UKJqwFCk@OenF!w9${wBeYxD&C* zR6f5a8+IGd!iS71($g!nYXvAULLXor1F>PU6&eHQt{jW!z#ys&hhqb{#rggeVnIvU z3&|gQfhkmnY$@N7pq!f_-yAmcKt->|P>_X^o?UGDtQU%e1j;@7LR}^x+Km z1J96)knxnWIWR`D2bWt+6~a#jjq0U5Rzec|Jg8Y|dG@A)Dk&i=cb;-^;?vjiIUg*Y z3^}wpB}I~pv}L-L^)F#|X7DUfq8ik|+jejq=0-09B`)erK;1&X8KpU%4PB&t+OB@v9`eRR=f${^G8@H1Z^|kg6Lg%u&hTZ0xz!T8I&FK@qtXW_E%^qQm-Jr= zCjj;atzo*}NLcqi%B20O{w&zD1xr9W)2|>AR#!h#qO~95*0W8<_1UD?*ZSwFf~~%t zpwAkF0&fvUP+J^UU0D>lEJ|X9oSI22=(xO+$wHA?^`~Li7Du4ieq)7nC)>*__e(_dRFJemqxU< z{Q9?FbMomqymvcSyL9*j19kk2F-blFtiGHcLEgEj8=Wew=W_cWvlH?iP1ZVo6_x7| z&wM_u;Cl8{mW9gbs{%W6e2;dig4m19{9!41H{k3U*FXrHMZ>-zfxEvypkWK$f*lyh zyM^xHY01#O8;RXyvj3UEqVGMVn*w0=&U z>Z8*4JD^B4jViE+Yd*I%+0w(vwBd=jt!abC9vOqx-Wfav-FtkJ@#0FhC+ZgJ2*#b) z54IJwdhF2_mo|{M{YuHdo(TJX>rL~L^*A6BJ5fra-6jTir-TpfzT`+ZUwd5Pd=SOU z1el1b$pNQxuvFWF+Mx$uf$BX3chQ$~gwhukOdL%Q%lV6|3dbu2Vet^?yu&oZv{&MJ znq}x&nxP76(>>)xwohcs=XJ=D8qq0&X1uYnxw)-ew%Q9|aOeOYyhSlYVePp4j z#HvrgFJA=lmD`d@&nS{_CZ|C}d zhb^3#2Q5M_0x1}O^k+v`e-sC2*6|0cRzEbP2!$fgR>Q!pzJL!N3&j!z$4~4CE5^A) ztPZEYs?&}D->!d1EK9Xa^A&}A?2pjUuNSA@&2RC}<8QDbng!ra<8yO!PNl|BdispL zyL)35dkkAh1pe~{$W6}0SpXSr@kQm{6HfuQ_!L?JsrZ(V`fj2Ma!yh6_>yFLX_aS0 z4=557>-U1y^Okz==WEO##Z{c5UR3D9< z>b=0JehfR+yMR;u0CuWh2dDZh>{Oo`l}DWlPW97zfTo}5+%!zvd#|h{4#upCnz(D_ zjYA_ab7d z-e>m4W4|4k-R~}d*y0%h0Ji1yMYkaCc@{mx(WxWkS2>Na8QNK!DAn`Q!(h(>KWEQ5 zYv)IoI8{c^DC&(HxMl*<1@H_MbNzVx`^!c6IV4N+_A*7vL%XjJD*jBPDdU%^%O&L9W}TMZTR_SyWVqU`abPKO2mpS}&dHTPOJP6g z%NE`(<9eT&oS!iS2MS@Qye>c=oIP7ycc z1*4g{n(YS!dnUEeG83rNc*abX>|Ctc&N(?L2E)*ue1wQfcG)utbWtY)q&SFk5wRMi zS1?j`V}|7>s0Z3PCWJ-qOB3?eXy=(K-f%!lWvLwzo0m9j(sSRTrLxg|sASXS`H5MR zLPlds(Cthri$}Ayzb0Kb+&J3KO=?28+2Y@&5|~e)YK{WXOE_3Rg>4hqr0H?+fh5hM zT84az%6;T^GSKqJK63sXLC4vF)Mghgd|tU@HnCW5YS6K!Qm|TzZGwHrY^^LV0vAHQHyzv;QD^c9hGn^ z)V+!aUnp3(Z)on4s8Iv24isn6GH0-7ju-~51=vka=0%9YV(N{PS0DJIPh{jIS-u|B zIaYRgdW&a9Z^$I}ce>F(3~|X+X0P1KQ0Jqo?E`C!0+Zt}ovdeZTL=NhALnpu)PxPk z&c%%Y5sM|NgN~hg?X+z6yqXA2+igbV;FnGvJd~+@LHWL6tDYo!&-ZGJmp6*qX(E4V z)$s=p?4Fp7-hD>MvkXTgHMI$#@TrLr;#Fny+&29Ld6~EZ{le{YgFCmb+yXjFVXhk) zzfzB8wvL>7@pS!auiaX04pCr!Fx#^gH-!`zuZ5XF9PFnJUs7gmY>00lw~TFC{nlEL zk;E+9_A2i+nuo+EPL4MEBWi8WorL+md1kDMkt(3Jwgx>a?|k#0n!wcI^!RvI@28w> zs(k}|0QB+ty7!5PpPLV|V|2#A_z^wMCLJ)^J_YFB?*p3EX(oXFJ3YyKX7}B`sj2so zu1e(JhUl6I9XLOk=~sLsGQrM;Uj>ivf2or6G|}(7WsK1EQwLQ6z{CfMeDCyoV{7kj zHtRkhb^ytCPXqKnL}vgD=Cs-iI2GqXUI9&q0d)bWtpO^AZ^&;?TrN#Uu6YGIR5Lm; za%V*`tQMt4Gp`ou`u60q{sOuW)6<&8G=(qA^EY71(tw2GrxzET2AC{plf+(!uaSBN zhpUoK-oE>DbMiP`MK*cKnxgz_nh~@4!u&CXV9}=B^}VZWq^t}3A!YEFM5n;mK-Xhv~4IfxNgXo?dC7l zv)E#p*y`8TKAp#M46~m&=BEN!1AfEoPGHZ8^iN?CuI(7;v;^A@5W2pAL=5-kAQ4(q z)-%A?YG@@Akz+W7fpSpI0i>G8+d$3R{vDv^)|~{Ulri;Ba~icNb|el*j4om)(NvK= z>3R4E2*?cG6aT>~;(JiQ$Nq%S*4MFUH6Q zd|xW%&v-X>#2U0_Xu>EScjpTmyR*XZzQ%4lXy%~L8yieKwXBl&J=3MZB~*3$B8*i% z4KTv4eFD1$+vpCB+D^!t_t9T-Px7tz=@jUV+us~MedL|e64~;8Eota93-ggv{JTk| zS04E@U(6f|%*c59_z17ygDqr0oen+i0~#KKCKGI5>q*N4_*~Ed+LvC$l?@X+-N-XdiKvTX7ytJN`HwVqQ$W zxjM-aspwu~LVk6|;kl4WG~0}x6m?}nuac423iJG%!HF}8FB*3#1ty7nucc`G21;P# z2b;jp-#}HxFIa->6UWdS)|%DVrI1J|Uc$dhBA-{TpVx0@z>XI5o-56aa=|EcQ{5k5NOwXkX3& z@V!QV1&DK!kOtyhWra65sNh;XNzDgLub<`c>$+@^h82NRo2Hk##smjLdyemxgl`_#$m+HMkMfv4-Tx37{ zfl60bfA}!_liV`|*_m&8%)6!U&e?kh2-9Rv6he^(4iwKx0AMCo?_6j`>v&JLldN4|0wS>H$ZdGjUmStss6ME35x{DbaZGyi0iJv(7?9$;e z&^8A=D%FVq*ZJy`K$~#vnOFd0m#p-?&8`Pf+;VW0Hi@@z+D0;k|JsgZhT!#*PfZ|W z`8@I-tsTkqm4(MSpAUt3_V!HFX=R4?=P*Ym4rr(}hPb!_SiyRBCPNZz?n#MpGnooyefHY%{qF$f??q^1Ng_z(>m1}JUP@oSYrvXc-MBC<*5yJMKi zz+xikh?bJVzdN)Jzf~m7(k> zUB^IjZZyEJ{a_C^F2NQLGb|fI5f%CW)D-by`>F{^#-H!uW|@@Z+t&6o@`xHV!URf8K|+8lkP9Ay<5GT8AK+ZuA z)HVLYw&_c7M~_zXghSqeY7Z7<{1df@iEl=@)xb4p2;0NH>4qfG1rMTU|!xZd@NkZ0jv5yQGV>~`yYA?l% z-FrvIGMa@dZtuj4}Yj{}^LS(Qrw#m8Mh2wZNhL|eQj4F7oK zch9hJj<|D5CW%2sED-~~tGTgyW3rq4ARMB~epkv{} zFS|liKkakBLUrGYN&mf9*bD8~7#QhE^-|-mx*zfCAW&1`kI}RM{jn$AM}QA_z32A~ zUhVS5Q_$@`h_8>TpC#TW=Q{F;FMZ>$tTI6~dt)TG{v^ck)6#Q=(kI{=Q;TRB2Az{> znL{aV+_Tol@x6}`=Tt?3Ys_gBbX+2T0Jn#V@Sv5b9M`QaB4tRFA=6)=NOImQ=CH}x zP%iaOY}D`NFm2I9x6Y!gArs(Y_yCxe8O+2uE@2poJpgk3eQY3V86b+Wc=8p_{L<_l zd@)JE6-is`SmP@(+=BnUlXrX!~ya)P>iOn(J>4(8w)@iI0F(Mnsl7t;n^J` zU*pz_B|)fR{X0MBM0FYh^)po|w=FSNZ+P3*4hLvCw< zi<~FjU4Rl8gZ;svORb1}RX~V;_KG)?yN17MPe+B}t-A8QOZ`jq<{V_EOD4qp;FN64+)QP-=4{7sZ~XTM zJ+#+ z{4Om=CLv(hjB*@y)-5=7&M)YIek<_`{r2?p2BLHI$i#f<4;ghcMlIS1V7-cz+gLaN z!+X#fIY z#2ZngJyk6vzkWRT8M${ zLy}LifzG2j(7mfzn$o2y*(bxl%(F@MQ*y#!`v;y1@7%G~D0hutpJ_r3fBf^ebYIxs znuq0rah}f$2QbG5h=_ctQOi!XUJEc+3Or^74uq_hFbL4^uEG_Vpx(yW%wnh@_&m`& zd5l8_m8&NbxHY(WX@H=^G2rZMQ?9ke#+%vHq~bFs3S2xQUNzL8y)sfTuk|y6N%!kL zhGN*)R)DaEizw;>fD7Kwx0b4q{!H9cJIod0LP>~KA3dVEgyNJ#=6cl@&AvWslv<0y zmx(2NDuml{VmJV0J9T+xbAJ8v>?y-m7obK|EQCR*NU~xeR3FGCs+BaU}oY!Tiw+ZC?zc-mlr#wN7Sw>>0vn z0)q63JT$Nd-j)I;3&1U|KXL_i0bs6x7|a#W$zyW`1lU{w8JH`eX99BtSHWC?N$!`^K6r{xNdkexA(C&Q%o*U z8Y@OD18J{tf1Dm6?yDGq>n9;OV5mtR68Oc&8lEsOYbL!iFexk$&mKScVzDn|6aSLm zE00iu6-7nj4Fou(fJN&QCs~vAp_;>8;N#?G(iXrS&9?~Y2crlu2U0Ua)&rw{`s<~w^9D+o}fEDGXqpgqb z(7kpN8;XeIY99Wc{pH-H&nX^eK?=8jK0K}wX75g)BqFo7$~f~^VSe5=D`x#ckMemv z`7EL4GO*tL+B6nFesKXYg^GKm`U~sdjqby`Vk}}sOy4bAiV2uFDLxTE?76L6j*`i2 zxnXAf2BU6f9Aeonpy{MC`#D<`dj2{40gaNE7;89iibEODyol(&jD!@8U{iKcz$v)p z5JjH+joyda0p;UgsfdN_t_$4FioLQ1zZ@ODv~d4_&~)bEP`+;)Pqr4aRmd{3BvM2n z+fYI%qNE~ZOJr-Y&sZw^z9q{fB$Xv9WEoMmETNK=u}hXQ1~X%3p8LJ0@9+Jijw2nF zV|wPkuj@QN=Q-`>97Ydz=3-mnO-UK;^=^WooRi%5RV5PYEO7Wr*voP!{&UFybjc9t zlC82_vSpS_whmpgS?H2YvRpD0bjdK#B}3v5hS40}C$;>8Yrwxc_hk+;xX4#Ga-z>Iu`Y2z2v26v5TQ( zZp7xp19hU$my1XhdcX&4ys5v8#bO)kgPVc^i+gsZU*j->9QrUe(l(R9(!yv2I1QO9 zQs1mf9SSwNqaR7_vG2)*M%fc9kEx-zgQC+zcOzX^JqGNq(d^1y;%Igy$q(tKT{GZ{ z7w}2pie%z1vR2Vp8h*Bin%PG>uK0485!A@#y1Xyt<)Qq1PfNXu{i1c}MgTpf&eM^- zXyQ2O-bP$fa{Gy!cW(}_KKVdmb~4|&6`vb<%Q(3I=lH`kMvS|5-H5DS<9eCyaFkm| zfHh>rQsapAdQ{pj?}J^*LCvRf&rY^ot$e6{mG`EPQQYwhiJ0anG1FD&0^%39^uYN; zm~g>l@F;+7ypD!Hf)p)+8uJtT5$7?^@WQ^pwT@1?aJX6QqaXDZa6I@+h?aVlOT=`o zJ&fn-Hj(thKJ&^?ITrA_6Z@*~-%}Br-Xy(?H$o4fpl!HqDG?|-K|$84y&Op16t~`c zpK?vccK?`zb4wZtxti4tEf+erpG$I95;CANsvB1K(5;lcU}zbq6UGN0P&rzWVRTCrSOA^El<)YqU8tuALw>;@5k;rF#9M^6tY443t#HM5Y>{lW2DM{Qn)=nVDKs4z#Q364;REp zUzVR&PCFioPu<-^O*YTZKy_VEK&EAs+rDEz`yQU!hW*k3Od~ZA9~)Q1Cr7nyllaE3 zr+HS}zszZ&(Rq)B{is9(f$x2RHYwb=d?M+ONWHiAQ>PzSmP|ekEYuWfwU>7#y$B>B z3VXp(@`gj{61YmmrmAWo&a;cyh;Cb#&L2HkCvEoxQPIci$|=1@=`Lyr=T=?61MD@- z(}oT@sOC3=>ELSqVGz&--k88MU{oWCwu*fjLBoHWhd;RcKzaKduyKYThRymK+{tg= zL44K)+%yzRDmh4K7U#UKJ;dUimoOgWH`(FWkZc3wlBX$v-^md~-d)^U8v8Uj>JhH) zYxoUZV9i_{`HWu1h5Ht7PHMfASClkdH!Sfd>BR2nGAXzpI)|~e!dwYt2KVjR z(!@|^P*F~|E`xfO_|V{nMkt`{*SxNhAI5i{a8CUGk>O7lBIr=k;R^r5d*Ex+?|+D?6@ z89By4jVFVMYb;P8Aq0pyid$dfx&1dvqy5!N_@%DJrk?er5lV}dGOj*&zIM>AeEjZl z`Mi+i-eI%m@!6Mq`u(2?&SKx!W9(j5x{XlzKEwP%E^z4jw?*Ut1rPD`RlNsS+CnV^ za`p!mNb?pP##=L-WXW0&CUZj175YBvP{`<-^@)ouC98ib-f4#Qm+J&;chubfT)G-O z3?k4-O*NDRq+RWh(mFr$Gho_-ry5P0NLddn^hZQ>b0qxX<+6 zF$ae9IMW^sJ9x!@0?ic;gX#@0+nKoiAV3&g{0km8Hk_(ih@+;u-5# zwVi+YOdGg7#pup^^;F7>#Z%V}gD1n-=k4D3NZB>E$=d~!!QG-e8oBh^(YHFcjt8PZ z6loZE{5!*+&4ACM9eG__iiYQVc|F$br92Wt>HC z+owg%4~pd7Q8|wftn`}i0)@gT;6cV$Bq);MO{zdcukoA@FK90aMFocz@G&D+L;g$z z>8^bVm3%Ls94%RZKgHKKTDa4=d0e#$po-6llMlf2W*>uLVN(v}T)A#>CV+$lI4c$O zDT2r`zbTKGC`PqLhgo#$G5maZ&RldRNTkJaHA4(N8H?+DzK`aXI$9a5 zd4nLj8bI0yzSkf2STwg6ARxud!axRX4$#>XUBVJS0bjKiZ7!Mu)N=ApcDeUBl$gW2 zUgL-l??ONo(qISh%79T?sRu2(%DL6iwG^`;B9d-Jgr*KAV4w zIQQd}i8pg`r~5t3#_6_1Tv+U4bAO?4@aUZ7JL-Mm^PvbpkbAe+gUKsDynY71*`sM5 z!}-@9lt7V=PXy`;*I2rO6Wl{Y*X&v-JYkbe@2A8hw^dPgNxRmFwXf#E=>@^fp7R3z z)SR)$3Z+9=Uk5(ngnK_8j$X{6MujJE=Qo{)mTW^cYn^naVD(g!JR0;g;-o`#YaRH0 zPSoG(3Z-ieC6)x-_LlVz(bSk5<{Lg6A}yw=;Z7b^4-m}heXPxv)m718XMxchduM3% z*jA`$WL^4j@8{F|7G1{|JSbz`juB(Z!l1nnd)q_wr#2ye`vMHY?ucBHW!Azq58}=>3?gbU_KDm+(R8^ zg*JG^S~g~}s^dhU=)#)Ws5Fq^XU@wWQ&t&Ekxjf3lwcZ;RV*fao`mw2)P@8S=#uMIf8Qg zy*_^N8~d>N+$BT4Yebzp$8S&kva%)7K<`j*Z+~LhB1Dn-oZLT5QjQiBCWFkCVXWt& ze;~*~1mz(vq|^505KYh7w+Z8;N_BHzRdbf~Ze{1an7VfM)E??J&l^n%Xt4_EnfZsX z%+l8(6sg4hwPv>83&4)LZCSp~{!_UUYMzkFw&scqCgX3eY=HCc)>xTVDi;2yNCAUv zQV!O(1oIPhFImkG8Rvd1#V}Y;_d-8^XR#io=o*M|WbW?bdv#3&K_h#|KcLfYLftvQCu7A)Knj-5KF}xxx7H>i;UL$K!XL!jSp2f!0 zsnrkT7&|463fzlS@j;7=2lMDWW+{M(EWr>6^>cU45@3JP1E8ul1I(c2ygR&bP->^6 zp^LtBi;3r4H>U`ezwAzU;T=$j`1VWmzV)sHA4+Tl<~*Mb@19%WJd{A5tY2>rJD&bBnXb4OTdfaYMijBK z^4+C{=-h?jjAx#=vabQTR|-t{XxEWIo}alWlKhv~2uxYAbAhSe0?$P>&C===7}znN z3Z4h5te`Tx6Y27bve8VT)dwZGyRkjM`|+=^OT#{6Ua+Zmvvv%5gIhS!%Lgr)%ndlh zsUkJzqS|28M||j6gxaP|&4&~72I;N!qd7j_b#sL!r7>!IZmWIDxtY3sD&Y=hO!Mkd zwXnSMON_3ka%q4bc>CaM2IBK>6x2@xyW)giAfcAz5!;f}Zerw%!C2n(9qXj|-{;oA zo!i@IcsnOM$5EE7s*SUdT8?#7jSWeAaNEIa{wnx3()r&8dfKsR0k0HD#NO)c=Qt&JH$@U_>Bz{1DiH6DAV z??toe>nXA84$%jzM5XmZI=}-w@>0)!77H=CS0HX*$J+WeyIa@8vgMBj>Shnm1`Yc> z;FHB)i~me1@1GouUAMJ+j%8*l6Kck~g@Frgr6ltPGPj(KVR~DKbmT^Ni9UgU=_O=S znO_b--ysy4gG6BO7D#{hvgPwjJmS>y6TUqgaSr|5?!ceG^3WrH0`qUE|Eas-p!erO zxtGzO3ni1iVQ_bvTfLQcN2B78pi>Ky!>)aq$6)>KdBfJN@sKT}v< z45D%97VA{J&%i|`I=|y7WM1l-Fh&Hpm=?ESQ z{59(iO9+;{;I-*DM(fl;g;qNh~mj!`;jl(<#>9z-q zhwP4=>9t#Yl=PbIV|UcgMu#%(#u3jGRTH|Aujc9Z5^jby+*sN~8OQVeLphu=_mA^5 ztZSCkZCtdQ?`|>G~`Hc;?hLOZNwDuaZ8kq+Z zbUJO4-tU0M39{LxbAe#kl@XBR9}IlPdX&$68^KI;dMVFuU}KQU79Qm;EoX+l&Py{Z z-y0qNwghDGnsmB5PvYAji_C*)}1O@4zg{HG?&w;4Ab zWz_d^>2Q}?tjm*0B#<+p{6=0 zY569}JyPrs5k%p@o;$2}K;1f)9s00>EqR-cdmL>7TTZsX9p&xbDq=)Q7W?eql1m)3 zH#y_C-fpeG8$k>1Dv6-o{1l&8CQHUYO^+6R7!f=Nc2j8%<{f&c(wPH8Eom(&I#t{wV}@1S1Fdb!uQEeD>P`VHCv5JH`_!9`;#KX0ahIhXEfq~LK;fN%^y;VU&gNN2+Y==MmKXNW}M2J)u zZzTD%+#IO;F;2tNqw&Ad6`bW$+HDBYas%s71-CE@crVr>mL4k=C;`!xhbZX964#*@ z8=T8mGDKV{^jevfjGG=ChSy^4+9Th2Axx`WAn@_mP*4q5FSHg?pmuwanq*^+RMK3~f?wZyKX zIZ=AanR;yt?E^m*2-_*~Um(9y=#Sk%Qwz(`9e4sJ9Vn7$$?!0bl4 zPp8hK`|&1UO@ia@Hxg9@uZ!YSU#0q^x^{eu*J*>J^-zU+Kk6}IPxA}H9k=aU7ngwN z5(#hHCCDnc_yo>CX@gmK4!15N$h?p8_-k&5!fxCCnD0S44=QeRTGG@dQ1?LI)3rxl zPGsTpkyIjH_f{?Vac+!EsbeAY6kwloF_uMl#e*dHbZqmjP>Erqoa2kxO1YwWNjxxc zs?5aqNKO*xoWH>_O>}-dH$^U8V4~U4iE1@Tb8w5@L}!exqXiiZ2h%JjL0DmmjPH50 zDwi|kYJNR?`qaCRH4a&x`h485G zgTT?&f|IzNj3H*ic&+b&Ycan9BPDdYWQ_y;^{fL+)j|bKp4rOqD>3m_t2CV7!yo6? zwl0O@txC*lLvF} z@D|=6?NWaAkqnrDUVA)``YA$gyknDuwaQ+{ea;YC1+EFs{;DmHmIK{do=<-{7BO)3 z(Q$Xjzx!Q{SNSA8eDr0d7=I8;^bTG0!^m<-uxu3B8>+zgz@MRF=?7g^eq8_a=kmH zF*jxkcx9g6eMl5beXi$Ak1<@DbT-N!V<@ZfY+c^~$rK3IfzO!&tO;Of72k7g+wS4f z++No|zLrl%P>LF6gci%j))n(Z1N)!WeSOrZpc{1>u3Po}@8RY)J3l)+H!DAO%VC(% zS1t7M`8tpQ!$6TmNPPl9kmTJW7#GHuBDfI1R+a^w235WT6C5!AbWLhMu_5BVZ9AW= zXR)|J^+^&(e-#G}4+D|t?#%$5(M{CvS0L3D3CK}B0Z7^YOdj{4Yc6oI;e7)-W|M2YU+-duT9WpAi5E}4rE&zr=JesNSun(_SP zJkVvmyQh|Z^CjL+Ra@So;mx$j1!oqjK^rR+04bHNbmMQEzfi(|-I2gt_eth7`7VuwHPgn?n@rPWzWx*bFtztE3_?y|5 zn^1Z1qX8?3(g`>|QS00$V#;H}E24h4LOfz1I>mIa$hOiNF0Q8rn=Sg%*~Ti;c~Is` zJM}K z!MTbGul|Av59=vB{3w{ss-d|w8tyR#>tP0ba{LFWK5+gQ;ofz(v^`2Dt*zsM44nU| zzHt6e5Q@$W?uc(wEc2U!^FP%O&i|VJug=Y%#$-TS@kY_6dl(aS0t~lfSdLB3^V^|VzL7g^rL5RkWkuaur&FHsh%?CjM8QZI_&yoh5=U5NMt*t6w z_T{kgxN@Ckd&pbhvrA=DWiUNFBG7xU3V)J0JC`ags3j)QO55z6q5)Jm3>FWpqfpK4ethUfPl|q^BgbIBp+at+8rw zyLDrkBjcsIs4EY@QRJKg;c zbhrwJoW-qe*h(&Ndq$*1vfFUTjiQI&Kl#OOTE5Hl+nGuM({I~sc9d}3UU)EgH|^!a z!HYZEu+wrq;4azak8z#?f1@%o`cnqs9$f@VHgbr2Dzw)w-4IXr$4HdzB!^1i#M4F> zAU!`rUiH?@XZ4MpwwxS^|7k6{~ z$T2`z2GSQ{rHxg42X-dG`Q~oh3jxRWDjhkYBCUkkdAt3A9Lnm5meLgkl+508!NlG1 zrW>E)#L&cB4aVRxCcy&IT@kl)MiF}6KH-F5mXFr1H3M$C5F zucH6G4v8vB3WbiRjtCh(;C*pSO5f{?Zr`3m>T40tTH7CfwJ=WMEtB9#=Pk>KKO|;@ z7QL?iDww3s4oPETWdN(Hr2p~O=g~ke9^9M-59~*(!2;{JzYRy9mFJyvzMJoHC1rAI zq?Lh0xkr2D((K~?(}F_>Kh}s|`DIod5nVe$jm9!?;(h_Ihgc^~dq@v7}rrqNLB*sZO8e zd$PZ3Uyi2UxUO9*6N`lXdgR;BFPk6Ncjy+=KW((9t zcU#&ptM=%($w#hIRt<)?tXpuqE+pD6`OmCnhjz&VE5iCxM}gIxtq-8ogb9FGVE>xvN_c z*qhz#64Q$>K zlwEvW4uCI|D(J&rIy|rJp9y`~^j>Om=v(N+4l5$li}3us<(8<01A(JWC1)`^MTEg< z1-JxR94P9=gC(^j1rq+uZv%$U6@Pt(&*t9QIMAzCe1YLpZKZ{1T!w%Ctm*6|RxLA) zgwN4>uz>D{V}N~)Z{RHNI5Y~5N9lSYmqg0VkZhJ#%?hntAiN-5ZlYJ?S{l@QK7ON% znSSvP(d%kix{UK0RaR?^o3q{>SL*DQ<9S+o_Mn&fu?vT8Sf7hKP2`DF*7BaXvdg;& zwUBYs_2iMkzPI8-gN~wZ?@LswO8n|=&7925zO7`vh&Y`1`O}pbTyNKiqYJe}$>H41 zldyGCDouINvC_z)Av&gJHe}Da`uK_G@t4GdETYbfR%%S_;x1Ibxc;U=Jo4{_UWEWH zj`gw^@?-N`Y)ro8`InxumASb7QCp%#vF!0o{zO!UBhx|l0r@NBW7@GQAVd@{9p-&N z@6y)JH_Q8J1n$!Co?orpJzy-CL)G5n0zwJg*ZF3;lR*rNmAZWwj?c!IBN-O3wWd0- zPY6LD0oK|GzS@$N6q;H^%fa&rk#}%S0Sf#g@F#ev&~R|F&IDcAXXmi1a#0M6W_|dF z4(9fN-En)fFivt~fM`b%uLq9@b3xQ6?2Cus($h5{tRTMz>ix`@SBu^UD#PsJ(huuV zx24ah-@fnYN&dRJ_cG!F21zbH_)z6+VjS_4QICAp=7*;5=u=^Zt|e_wl1EszaPsv zws!x=bd-nA-<_2|+GYmsom9yYdNYIzC0*Tz#@#x!*4r)n^Rf9&kvAv)o{7wIFNO_z zAq5eXf#BE3_fr-PMRV4Ny(>>t=acGtstR9}77mch9`1Uaqw{9%V3pUFpWok?mJN`d ztVb?dcrM&Hw0zppreaRg=&I_@KMQ#7p`ndGG_PD^TQxpCapo_&@1x9x`e-2>U7zc< z?&0JDi?@N|QR3f3!AJTwxkK5vKx}`=L812r4~_dv_k{`yO-@t)7>^kzT&{=hOhL0c z7Xr5xUDD-EE^z1Fs65e}f2!)BjV^EEPM0fMJc$M?>oA-megmn?%0j&ItT*hvjceu&Y&Aj(s(cQSsb>^`kQGZcPWcIc4N&tL9) z{2~};HYpx{b|`1V1^VV`zlZ|wPdeo&zAw$WUEREmrRh3TR6DGb>|IT&vTEv-|)i^HqoX;=k&9t*Cb*1_2aZd|l=Qr0vuM1dxVEEKLuof2ht`~V? zHZo)iZNNdtcDKU8m_)}vXg_*)xK<`{plK|rpYPYiQeEop<%QYbq23y`G6M`rZ{x^; z{L!R7%F=MVVNJuEqg@sWL#CqFLJ=(f_u`FFYW-w5D_DLR;T~87&UZ%u91y$#4xaE{ zTkjMf{;~ZJ^;h^(c#@d>E;I~cn%B{j_u-FIlApp` zkaAr3&DERtZ1xLdA>sup6Am(9KC>2B|I&Q6h(HT|1GC!C_k-32$yU1FX>Yx^3+eKY z$H-kmJoiFZ`{p)6b{M8N=t2)3hEo2wW{Ys&<=IOFwG5xLBLI#VNkFLm=qEO+B0C@d zHXdI#n5lm2@TL$m@qpTmRRuvoRtycb8{Isp-CR5q9Cv|};8Z-V zijlFGo6#FQ9w!{6UZRXRPkyhw1y|N3q|(aZpTYMD+f%#lX?oj1r!$7vAw3c3llHT} z3p#sIR9rcY$K9ecF$OhH7(8xYv;tUd8-!oYO_;z&OjCbJ(;AiUhc}~?wa&pX3q+^* zu}=ow8J16Qf7|Z$eIyNVr$|xs)w}*i6kE(i=s)a`saDvU9J6;8o^&ywVay(n4Mu-% zuDk%J^$r^9DkA(0#yzYdgkA#iI&nusPkug;bWwg`5ziGnQbcTYYMf9)Oz9M-Osl~! zN5cccXpub@?E*$6@d0pN*8#-jj+&EmKftM6)>CRt0Wo47I&^H{do?crV{0>jO7HOl8KDQuA_{LjSo>pnK}SS7&UB_d zR&Y;R)amWC)&-W%ovvTy8>ZmI=$~S)f7`gp zgoUrc@kGCyPlf+T0DH$8MjQ44z!=N-3xdBuU^*ppG9>6UMAV&VNS}Ptvd@ss>1H=D!1=0~Gq1 z7cymTv5lf|V*0Bcyt(nCO}S^!Z>HKl?n=Htsenn6^Iz=!m{#0d+OD08{~W8$4#Cun+-0He*&TWg_rc9#%~yg$qT{Jeb@z#!3c zT!&o+{%{!LaMOnwU3JJQ4t5toT^^?EUEkrYclRt!4YRwgzLvY)JBspFI&MZdQ#?1_ zK>olJO1bOZoG+VJvmnbKGS^*uKv00+BbgW=bJ#RarF(JoL*^CEs4JYza?{uIijGH2NRv)wwY8stiWJ6 zgtZ@45VExjdL*QDSW8yhcJqkaXQ8Yx%dP~ct}8jeoVv7}_P#E@p^meCW}{VO`^<{8 zofS`FbrO4A>#3+9G>(kHZqM3KM;nPehT}h`@v;xnf9&c>$cwQI4>718JmZdbP?t<>V%P*|H(TEX~HwOPy{V|x4>ry z+^{a~ZDc5~Uxxa|UIqDCEKY4IM5}}T{M9mf&JUGQAEsZiap&HK0qsY$iz7PcZ774V z{B9wjHt_|`s%0YyL=r%{@yIZHoAWJc?yAim_K=$y4PfViL$pW<@eHIX$MIu0{U4D@ad|CFO%+m+G4iFZ> ztEZLzZ1TB>R#rz2WoTUk+)LC-MG^|4J`9vd2gfA(&#EZ+V~+lYI__~G37T_Klqk!z zZ+y~v`KUgZK!)wX)8iegGB(`eFCOgV5WGbe-tzHkcKW`zR^u>*b3Nk1PAAKqYF4C+ z>~mayxR2}y1fAZL)y&K>8u}d)@~r|KAz@)Mk@b(s^7($*yBS_|`&47g`8GE)JHsu# zU|cF|q9)3+(>?n3Wvs7K*2cCDEB0XW01f*Z!@U4uM+ZFP^YC03D06|>aH=W>LBkY! z+m@t@Mn!CoT3ViL`QCTJnVrF%vQt1atJipkxS0_%B`~8W2=hT@hM}~toa>%n3jvukA{(u4x+@EyaXBxjKx{AkL`YAVR+((JJ>y~LD zO&y9Z8$rpj(IJf;#ntqNjzh(id&!@L(WFf_IgpXA9hO*r9_IwX%x+ zItd>o2tNX}x43mMC}@8|B;rtC$P~yv;?e%(vTJzx$IIl9NP}d8Hg18){j_TgBXwDm z@hR&MLwQ$U6Q*`#>E@6FfYj~A@2R%nA+PyR${JOrkJVfyf@NAPasHFZ7!0E2)FUm0 zUm^K_%>A(C5kno!+7okmt52dI=iXSpn1TyiE|<(tTQ2{mVWF#UaBnqOpE)^NE%ylg zZbY2Ee*!pO5pbDC$H$}rXI9&|2J37HlerQ#GWMt^{LGkb)ZF9IvpNZ1GrSFTbkoHI zb{+7QQLPUeeIDrlUR}|?ZY>=Ggw97CG(!+#HRzC@$zTwmaaI7dFXVRu2?6mupbgb< zXr`hp^+14asWSXYCKYJ!UhD@FFNe~BwoOT30t+j{*JJ4z^3eMcrC{>V@nvC+f6Y_t zQ`jfSd)BZ5OUxi%#+~wgDI`3>1;4ZNS1WA304cEf!d_vXbMRuQMoLx>HNvhWL#Fea zLcw^E(f|poc}1wYnbn{8V&eS;fbRm*xhYRr=@HTt5QQ(^G|E~+VkUXVF7Y49@34KZ z8DH7Ec?lhVr;Qh#k3WrCJNlv7j~1A}Xc^jQ3Mr4q;&#C2<8RghYU%i^S0`C3Z6e@? zl}LYpf&0xxU~-+>3C>%)OH$_e3f;c~sbAW&SpKt#RFo`N0tK~`lDL8T37q$ie-Gi^W0VylgD19^<_Tci3PVM@Twt)Z1ME~`HfYtf`BHAk$ z7}2J{h;{)hqK#oiv@2mmo2GMQnH&fs+5{NU9y`}1+>H4ymkB7dvvzc}ff#Uxhpg4P zZXl+SB$~z+5@($g5%zk*Ba)fbpQom|>KH7zE#!Ex+9XVFPS+Lu2-j&dNNg@{Gk7q$ zy})UE2Po}XYd7PA3(1>MhOx+b)<-wUbO859^W~ZMWf-U2d~#p*t3Sl$rk3}+8O=vs z{=WHgQ8UzS_Kj|k&Da_Vc#~I_{m9JT|MhW5B=tU5sBbY!1n(Bm2H6ZeG!m#N7g*i5 zzmnjdC^?TgIbWZjQ}+xM%H25VaJ0*kc2h5CrbMmr`=z6&gTmHLmHh|OxZQm# zo!S|$lVbi}SwL3kNCE>UCy*&0i_sSl(`IR zNSGZ!uS;lc7OYZ0hG{kp@7k?E#=>W0)5UQ^F*fHzX#R(x`!unmtT!?0U~js^R+u$VHS!3+XS0);kE%dud zz250^k1IAV`R4)IU}G{-G%eVdDHikb=B|;mvNp;!Z%xcYf9T1O9Gi{nId$5N^Bfm%e~?wX zeH0f%8{r{tMk+jBpV{#;%OAE55|BH!1=?GxG?3&((0Ksi`+F&p^>#haz{3AxTl~P} z?J&AMx~-P?QS9)%o1oWN-Z}rk+k@`Qvkg2B0j1VTzw>q3HRx0<8TNOR-(&7KEG=wa z?XJ@EKs|vM=ix>wdVBREs4@|K03@L{yj`JJ&q8))vtDL|nnRVP_zH@rzetW9d_uwV7hT#?QWa}^`MzFGPR6s+(fmj-DQvQW5Beh z`nyKfyDS!%h;^(zPmWnOd}VCz?|a{Paqn#6f{9LX=rQABD+eCqVpC~<8R-#Jr9k;t zp%Q^|Ybq#Gg0bA-(%dYLdv-|j5kPs70Q2MrdH^e2YdC(*4`Z|RpU;{ zo+n+cVTlnn4?}2-wTWw9D>{?s6P<+OjQ8wlLqqQroBO#b#3#3Ifn2!O4$fd1s3+4f zYMVe{bw^m&hMU|-Uf!CTCQ&C~_}6Im*G_42mH()L?2DOz`z9CSymzFgER70FUq0M$ z+_2h)B;u}o9wCNqdO1mjkx(|5`#|f@f1$BJ1kU)G%i6Bz?{!$ScZpv;E+2sMK4$za z|3X2IcFsY8fYQyt?BeQUW+#hmzja`10!H&f@ERo=h;SoSxxSiJUjo?%n6}l2P&ICQ zB$;`KAC{?{gN165BB2Aa+SEwB)6_XN9f7!KozfXf%qrol{|OS3iOYBF18h|QRv6R@ z%CYd6I2I-QVduy7&1R|n--rT&y#-y$#VFHO?T?3K!wl_yQFi;4CE!gingk_ggpwrS z9!x^>SmB+bu%n)D;YX3 z0lOLc1YkFV=Jmm@ElIGOfof4#yb5{+sS&KL93nC?tkpjqeo2@*;b}gzDJJk&;F$kx zBZfUvJ9J=798>1I3oyb%!XFQcvN8814ly6j$boJ^kt??--jjNa8C zggjiwzP!i8L2v~w&lFZUq$pd=p$|F0*$SlIaL+KY-=h>$9Z` z(V36(D6ytQYUKtQ#BVOJicb8d@CvY`Xp8tC9;5J6v23SWU>bnq00t>%$hQTQ@ zP}ogn2{%+MlnQwmA^V~Pu<+3;#NbnJCz6c4^^VsOW1ktX>wa+5gcGY+2cKa&J2k?^ ziSY0J)Ib>1S(3loSBqF@F$k>f2M4{CBT`SlQnnsV#TfH@E}1t=k)#BdrKI!Mwh3G& zC>R>5V)iTM`}3ZkfVu4nRTpOeSCN(U?;wM<22Zb})jozBf7^8#sLAft8Jp#+L*#{p zBr{QO5&0P2?51CN^A!i8q(Q~HxX+d;Dt-ZEGYMI&p0gpi zP;aU|RZn0hZ0C(-B)koJSJ(fM{ftqV(X`r8oWQ)wq?&X_A|;`_v`DvT@&Q1n3eEx4 z0;-lappC8E`x~6qAOSdms?KmA{URMM^87;*6pr&4hzmpRCn90UeY@UMcs|!Y(0IVzlg?2GD{+}j zbn&1o10IF{gw(CH(qr@(Ne~WUQbSmE9fC4CmGhrKd2)mJsKb_s^Q+f!8QOVBee`&HnNVME#))2-t zMqBb^z^TvRWCqLP@uQ)vAy2{U0#Oz&QHZka5QQkquM`%_A|Sf0RP8iGSz0?G%A%nP zQ5KYF>S4QoC`<2Fel_=1Nd&zM~Z zlT#!A=WO!!X8}~_>mOz@i(cLiXbYA45}f>{z^05M!D(!fF5B$LW*HO@U`ZoZZB!~kk0JKienk|{Eus(}tNap4Gp*eZ(pV3m$}fKsRuypX-Oh?Z4ou-;3{o8V{W3lgkvz9cjG_DHR{Lv*du=&|geJRt(*eqR z#Hhb|$pe$wUA*y>^%p!b8|ybS0$SazCmGnjb?lc4Q02;m3nvEx z_!ofH<0%Gqa1#%o`5AB?{`z7t6^~^)m{w$ij#t2J_#gv#L89r6MMs{b<1eTO0ONn( z!yC{ay|TsQcUeC6-0IextB->Eapw!9?8^dbaTp)iTUyoMFOr<{(mJ%&_;M&hRA0w5 z+6Rn=_N(&i+(hud!R3nX&1BdAg`bvT_$dg6pZaO6@Dl}wpT;*~_^BU;pORqsiK=^X z|EDEL<3SaDZ3TUcQ?(1WE2C|3fHQ-oChq_t?MHF!uun|VQCa;b=i*hwDYS(YB|6LT zyxme+e^u8z{NqD<#HqxHq;bo_z({x7$tj?A1fhDlFruw~Z{q(IayT)t?>?c-?L&*P z)M~!5cb7ZX=)$2Y^#ud;k5+v5ssr+x15%^q)in`k^3m><4V(2ypTXHvmlp??7B&#oC6%@0)WSqWx3Xso$Ub4K` zUppH1Dc}A~AkTkp4s17!VxS=n1uP6TbAY_`RhM{| zr(pjzrKP~8v=&xVTKK=FG)-1h+7?z*S_*7R3uiT@iNmHeHtzb?g%a^hM@H#reYA9- z+nFstKZAAxFnhjfF#$Nc73kR0aGz>+=r9$chaFA*Ko5l-5DoZjAhk*lmN26%&D~YX z{n4>tTZ{!qj{L6i5f(loIVE&tVw3o?ba-^VZHPWHh=$m&eGnZ#?0y#V+a^$~8dXa= z6OTW0sctm{>(*IK3`=*rN)8K6@8TrLW?Wu6D?MT5qm^zy7shX@ysFpv(e{0<>uc9P z1MkXbQeY!b(r9f~+f|Xq{UuknMO}H6dj0VIiNie4Jgp92`0tk&YN*$7P!`nns{9wC z8&24!?io^w-?2u*UqBFtFEKFN@dSd_25`2OpI)b9o*KU=1hCqDkz%+XhnmASs2tb^ zRncwc&aig)l+pk9<)tz8y@z8FKwpq}K(#=z@7n`@mjh)Ic4wybCTrYARL2jUYZT?U z9_1hunblA4;}21x32(z2IbesfE$--sy+$YrRYv`0#Kg6X0uOLE`G>sK3PC5;r@%nWt% zN9Cl&UGBzqPOsJ*eoD`=dph_@DAXTb&87CY%F)N#{J$U@LfJ^I>jef@8&S5YhOKri zBXCgjKo$1+&>RwDh$b69))FFipiV}vpPG=Ji+-v%6)h;*oV*=^q6e03F4*Q8zIq)i9OTuo02xI&eJdC z!i+1@h^PIlMX_m>S*uF4E1x>@nu_7aie=+#X0Y7nH((ECo(K>enuB7`?2;Hab_wAbJwcINVOQ1Rs%AE{lvZnTZ9TOSLF}Iv5jfn#WYy!` zrUQ4PPp(*fD%0R=Wq;`(x|{R8qQ%2sW$s=NkeSX}T$Z@|vQ#c)Hcw1L@nG4NNsqXU zGp4@_u+#=ZuBpnZg*8Tc7csYZwwx>kMHwn2oa|ZlOQ3Cs z$2|6$CfALy7iu=cL#VRjI=YIdg7bsU>*reAJ=s+nXxn=>Om<*c;)sb^z@uX+QqS)t z3T@Nwd?<0;=C}{{k0-O=MS}J`jgt)glqgmk_f~B&?y~b&s(_4fykuG5L9t;eOw}!R ziLMsDrkUZ)U5UassK;eF(2R%n&THa~?V8K{8rMa0_il~TH(iOkC3fS?_TWE<@$WLm z9iB`KrwXpEh}}yZxXpiTyiVpmMi@o-GcAw`y=Q`K9&N-6HGivJ;#>lq-9WW$x0Ikv>s=VJr8`J{hKtJFL z*#NxtWb`6#U7Bkk4PU2?MqH4&+eF%o)A1k)a<9)|cRc%uQ`gU9G<&J)_8*^>$G#i` z6U$s02Uz{FpP*zL8#KF$uy`j#!hZ2&JUmgIiX}E~M4KZ}8mb>ik5(f;22Vv9&-7Cr zvSzcSk|yMHCsFkdL)&!T4;+gFHHkUc>FX2UwG#N+|BWB1FunaVPP>Ck+&j*-sJP5^ z?6dPnzRg&9tx+!QJdkocL~HK|D}YY=$Fm6A2s>6-{yy1d^8B}g6lCYn;&wbqzhWu0 zMLx<)N+%8aUS`ZsdGvzfvojrqwhUjLh8_dN=ax35-`)|XKsfAz@T8hO3@2X6q$7V>M)JaB?SO_6IAW3EW5h0p$a1fMuG7jJ-zL*PQPP9r#{N|MM zJ2l|6&YSWqg~inZC1;^p!0*O0Ecx!|xR@|~-#L9Bu7CUPY0rMPG$VX!nUr0YHXd+@ zA|;VCKK%nzwxaZNn${o*pNuG?_U>-kTj(EmQxhsXZcef_D#UR84anX@y@3q|mE8-$E z@4tc&-m52Pwl8f>6pO&Xo3~$QZjbI^WQfQ*vTRbdR?v2CWQ&pjtZqU+1lFtgPBib0 zQdWD&qSRGp@~H*0cUpe6WIW3XulZ;)7tCPVRqK718dzsH(T;o1Rxs|$r1d#O$`PVH zwi%i!B-(g#dSz>I(CzmK9P4K@yRt4~IJHA-B~&+QV?JXk81<^Y&PFqRYTUDA+JOnW z{>R0Vrhq$cCZJ9!mJ~GrBI*Fk>pS3j+cCN%DUHO<&=ZG~H1azPYd2N%uJ@;rHX9>taMO9Fg`AYsej^gNHV5tRH$*^?h;}9B-X- zEahb?83eca&O;)^)kEN`Zw6c$Iu+MK&~KZ4omKQKxZ?fPoc#9B3VgqT3$|Bfz{WMd zpW*M`$TmW-yA}@h#(hhrC8DZHsBb{(6=VTj-2))~KVjlgUX!4k>3@xhD8PN zZ(nB_MuQS4@o3ABQjC?ogZiA-(01it9f0!oECFGvZw!hf>|%IfCyOOyJ+X@P+p-yz zOoIktw}yZ4)F`npY&&P($_ zLw4EPZxNh$+LHCpGN~h~&ZG^9< z4;|SmMC5R@Or!+UNKp;0wNRI9a#ubLc-SR5gw>)jK~WHlkO)`X1H_|^=6izHsiZ^= z9QuHWTHi&NnqcwBs5O}Gm76l*E2C7@G^i#=p6^DA!<6^qB0QQWC}KFw9y3}P@Ob?G z!6(MYX?h7IDf^calbSEUfwRJ?+TJS;g3tYHTLssGvm&FXfgc`w8fx^_silLz+%%or zV|2OSpE#71z-Gc<`{9;WLV?c%m0N#yPZyP~6^BtIM;hdj2Ak zy5*!J>iHYWR1%AL%P=~T9ovq6Ali!){_4p{Pj?3qnaHm5^#>)hsMUbE9Gl1aAKPL+ zPYv2$)hP)_z6-)vEd)=gC`%{~}}JQ}tqr?PnDNkCQH!nZ7N{{)E7?qW`FSahUa zNFBHLFBdxQ~vg0 zBP>nZ6cM&WwWhyaV)O@dKDiU9T=*DtvV#a60;bMd1Qv0Dj#>2I;DsH8rNR_0ccc7z zEO;kH#R-N~wK~Ap^<7+U8iOFHat(X4^9{v&JvSL@zvcsLA zpP|zeg&2{Vo+!9DkP{eC=tw;5k?3mNKU4Kf%D94D$p3}z25L)DwqZ@ss^AZj3i@G9 z4cVn?Rcqe`s(lDRQGO21ZU)+!Awz0Gm;wF$y6Mr=+~NxeHGSOx2pv)tV(7L1@qtypXg~PrlMY>UU5y%YqZ+SXS@6zRPdL3i#(>dGf z@BJCJgC^D}kcQ}27C=~wlEgv!R!TRXvs{EdnCioY)HT^0TIt^OrKqPi&#V;a(wts7 z9K5Lc5n;tvH8cayFejpcI=uMbWxjKSu_PUE0?D9&1e0wwsMdLa&t*#>yMft=9oe72 z6px(vDn@xHbME$T`T9P zFt^a>vcduk#|JmGu8W%jpn;VCGah(&p#T&0>C?@A!*_Q;v}hdQr#1qXqVmK<=ox<@ z|6VCRUg&Z821j%2UP4~93290HPVuerD`)ZQ#~$`8WzIC`o?TNem^r%okksEA_ zRKEbs6w$yoa(@@CHN%rkX!ZN$K%zBgeT)eEo%!pS&6qM}yY*@7xaMVe`f6)c*KXj| zN$cdqnDLcG?ptqxRIb7UZzu5|ScC`xb1n-Y3BAYY(xe=4T|_|l?GUnfLMkd^ffhDTenrX`v2^@p8 z0v{&+TqN%UzYXrzYn-1UGwA^As-$Wh+>cbPbEOYJ;C-FUx{mV%W`4GWJ4CSRHX_7Y zCqD-kg`8oqu4(@Vi!xYYnZ_*$ktIxxCvfY$-^2VID@(q&1oFedqqo3-UGLQ0Kh6cc z0w>oBg62iB^W#=t`RO_H*J%9PYaaza#KxIA`~m65PilYlxWE?h#?sJQopXQCHztV~ z&cO#j5DGtJtHI`i1BCmfRGBtBgB11Y|u+=cUohkn?I(0cbjcky1V@Z6Ck{MLv z>Z+0&^Mrlr?{xt0Rgk)a&`!Q|f{NVVMyIwYQPS~jzHOyUD;FCGl|U~!3hEt7e?nX1?zvBV8Z0+{r@%-v5@~@YR3W&G7`m5|-KiXZdVa-h20}R%| zcEuz}=%f+*7GUX_IzR#wBov&o9l*@xOk;O4#;_G<_x(yD34(SZ7(2tsEpwJb4d3TA z+`4Z`{Y%FOXP=98yay?Ggnke;sK=??dP}O-4#4MfB#1sh|KTlIgN6|C_1_C5vK?hTU&paczW+0j3*c2!@;~}@CK8B2K1TeDhnj&cg5BDq1^(sd@iH+E2a__1I$ zF=p3jgy+_Xw+oV1X8v9-ImH!~N z;n%(7>z^P!JBvwYLCQXRtV}9d0v-2>YS?U0nq)<&zN#K|H8`$r!L}*#R*W-hCd07( znJx$UHUBu=%$Nt88uy62ZF2k5xZ@xC9!m99V(*AubY8p#fO% zyTxX?uWa>>Z@-!H^3=x2oWP7GN`ov$qu~X&56aBz*XF7W^*r%mYh3MXSMzKuw1dPr zP)k-KKsW`^DAhyqyq5p6gd@=4nQ;+jLcC!FQ%N6X3laK$tPE2=x2g*pfjy1q>dg>RqzfTO)^l_HMP>L9y4V!UuR(!=%ekOI;ny) zWAQ_8D>cxW#-4M=XgSQ*){`3^C-pBqmKOB|fdh<6=Obth83DJGYfgk^RpdynZf9tx zMSt;B{D@&4n26`RKFTjh?i`-2)@^f=})UP6A zJx`Go5;me8EzKCxivFgPd4zZ}!{g0O>D`NE(0-YrA?8fz*o>3lp!_FD<}Puhuf;$z zqO(r<2M=|hu0LBIJJd^KL?9vy{$Yi7K_)(z`P3bL29ITc@mT45lGG(&e0kAp6)m#| zA_!ynTqsu(y9J#N{D6q$tH-g(=Es9p+DTk&%`-1p&?Rm>UWQG@f{ zPtSOCVR!z4$cUd{cW&&z>qj((lBv&>zxKOEC>;>j@6kBWJT5XSoL+eM;vJ*xLyZ)h z`J$Nfx!tR=PfDyEZYK=_a1q#OJ34Ok&R|j2?HM0JEM-uK) z8NE_O@Tk@#HkoRD1mPi5AX>r~muspICz;6$=!%6&Ha=BqId64!xP1NEt#&rqvkWif z(p19j;w9s2mhI@uwla(cayjk!*Zy&Iy1Doit z`v`FX3h$r^j^aPnXtTBL4Qs{~SNsBOYO*pUpvJ-4$K%8=Hh8K+tIoSqbTI1$8`)>+ zmQcF2^DjCItk+Y%D4SOFy)s~503%NIK0EQHPu6yXIZ3+H_0VXR{A@r&*V%ke>x<=W zQ<46No2>JlZi;3m>Y~YCwG)t;+Ff38$#fUbUbA`^f=siOw)WFF;?|*uGJ1#pzc6Wlge<6wP-)nmR9cnN+FQ83#u(#)-$xMaDBo-a@C@1otLge$523dJk;rS>h zRb%}NuXbOGTDacdOe&F2x+dZJzE3K-+SH!wC%;C$a!3tsD{uBg zTGog|LGMf~nNz?#G~J+|tzsG?KH9?>tb|_BIhoJYct*X7V{+0=Z;yUjV*OIRb46vj zSki!$SRrfeKr&8wa*X(L`m$sJI~C&fWa5W(g0jvgi^2iT>tk^P!jA=F-e9oGw^~Lp zx7a`X$mvh`>*iDvHl-8@)*PT>1Ln}Qk*%yS{c031Ic@7_8nTv#7Fr$=f`q7+R$aV_ zw8+%i^E^okQtUJl?h$0EkCn9I4E3UvuGQ(VmR zE;A8wj;6_F_{&>96KLXl{>;U9D)r~#DUMN3bRP>{4R%g{lq&vr!S@oO{nceK^pZhq z1sseMj1k_)z}H!eRIt)u{H&?KH)l!xID5Il$X>JHaw11ICboCN`_km?Xeh1Fn3`OW zwHqkoT-gBKCkXaPX3&3{ILCT2mXp7?val4Hziml56T&o3<$NBtCSptD9uf2A+Zi#T z*hbbWT~pnj4Y$rXC;>pdSnw!Z?@zmjN4(ntHvo$PJNJKYH2OM}x)JJxO*_@l_gxPh zd7}1ln;&7lkCa2|>PQ(QeFsQMe2qhhiswnK-WR8-{LwK9?U#iP_v3H%{V~{cQ}+_* zT*`kzEptPK^X1)+vp@Ax6DxI^+8HZt?<1|`3z8o>+9ay!RSTUAj)vIZF=}a{=1uB*>#K&LtMQB*OspI!E)!Sq9-00keQ<$==f0L6z3|_jm0^KbJP|yTo#ZS5NgId4_K^lfUkLqS7*4Bt7 za@!8KL*|%V)?-PNbfgc%@B;Oh(7Icz&qdOZUTkfZ8dC(Fm~KE{8NE>19$4hE$=t+v zrvohsZAfB|`a-Ja9P9?a#%&}nYlyCrN%hn0&*u_Eg$T4Q0X#5!%Wa9o8_7PG76vL!1aMOvqxrM2h zFG%RtHJ&7g+`uY@W(Ib)$ZStZu{RnjYp-T5u6+Jjiv#DQYjk4F4Y}32Pu9JZWUqbL zAK3Syk|v%eXPi{At>|i<0BE5wl>b_=fFx>R>0v;W@6>S72MC6HZ&IGat z$Vqtd8kxekD4HmD{j%2ACv8g5yX?=H-RuKOftq2FrB+g%6eqf5v=7rI-7CLM3v!7tw-0Zinem zx*XirhEB@I!3!X;u1YH7?Lo5j!PzJNNaU1}x0)qP)aRtOvT7g7SO+W&G~813C7!uf z^6Wy6yH@rSeYZ9Z4e6bwY#VpsC|--Tk4-;49Fa})#eo$+NB`Vyo3CJ7pVW`cg_QH} zqmKlSF?%EDpPTn_#l$9@Y2^<7B0%z>4A?3U{ACu&hEFTqvwbx=>?@TObWW~6&46fB zF`FHcOR*Kfd%NF{aNe?1?Fv%qcEkJarz5g06gs7w3r}*4xmPO+UQ{q3w!UoOZ*Qsq z`WOtb?De2O2N8#Vo>kdpo5urrD7%yhfhM+#^qwHh)u|uh>FO3x_^Y=v)~@`{h@V;^ z;~=-1r^88B`;mK)ei=1OG!}5|wx&PT@m)Fy5^Y-n(6+;u8CQW$8??8Nh`hIoPOc%K zW%CP83Dft4TFr~S{ZB=PH6bGY5>lNmsbwj{#=XDdNm0&R(#kV3<-&QY2BTIC{Vb$; zo)|yy&f#>ey^2=0g?-&ctP(H-BcNP2fP^$rkE#hjHoCY~9zr!E$MteFWy}0BTaxUX zrKq4Qm*GphGZyyDPHbw+Xf7pYi)-HrLP@yQSCZBR*HV;5E67b9b9OEFO89&<0WUqgl^e z7jiG`VT$qnbY90$vRqH9`nlWhdEit!+rQ|F_;3RuE`w-|%)RZCtK^XT{C?0swtAA; z@!h5eODZBrf7=VB4@KY&r3cYcv=`rB4w2bJx#hGnyqs;znLw`iar06qYn|rv{p7(0 zkxpc4$VrQ4iPAN>`RJRM7{;d*b|YDf0p@bw5;~1eHjWT5WJ=;$rZrgwN$qF`ax>7$ zyfT7Lqwh>&ADB(3l^KBXzh_lZd~)|&-b`%hFYP$XX3lr*gon4Cm9k0{REEjrn3yhj zs?PATpCx6ji<2fG)@EQDhdOUWQ2Wt_ex0*}weP{9-tz*(ud^6B=tIsOXbuPj%z6{! zL?O1-7}LKgyv6h$9K8T>I{#zJgdvXRFLK~q@yRtgmtHtusc0k1Ey}zDs>!@Q?(IrG zWV6`sBOMR@35GY>U(i3=I??Gnxy3q9;!=~G;@G3T3R(1H!Dn24qLI%FO4$>% zy{4^~0xzrgyohCp#q`XJ3S9HrTy(An7&M3#1IzR}o{PL;_m(9B7e4 zyV+uXKMcN)Z@O%U%*ZeM)oqqPYdM^^j)yBFZdz9!2d`m1y(6Xox1;ZaDs0SMMsn=t!)Ip*xB}ta_TATZcN{NJy-q zFymP?2-yao9-D#J!^RMaqXXqYTnXAQFsm_ta0j_cMNLAnN`jamC6ih+0rpa>puXb0 z)&4tI{&_ajH`wr8d;0VUu`uD7<=}f^>LK>c53)N@BQU;9!tgElL%%Wwx+3hlB9qFI zn^F2bku~SpStf=B+11Ytd`vDU3+$G#U;2_A@v!o_bRL<%64iD+alY!bE?}hQcl^r@ z76M5!MdEAm=+D9$L{#|K{Zr($d;%dS41}E4_D+%05rmwAAmoIvg^*L{DRQcI2-$Z( zsny5axIXyyfQ`aM{r+5X<<7PKvo*K-xUv+R|391*&2Qr{s~^T|*IFd-F{MnoFN#C$ z&hX`;C01p0SFJ0-urTyngZ{;Sp0%&v9Wait}7n#yi!w0hO_QYiYdlSs6PX z&%DGF6vg9H#3|J;iN}3~K31N_Et{2L^S$H}^f9;A7b%84Cj3TM#;?ZvbiYh9?D4N| zsA22Oc-8TXja(r6Rr>?zW9Hp)sfIm@mZpN^SDC)sT>Ev_v5=`C$L)Pbor7NYS;q!T ztI3$#&utGpc&b?RHbeNwwU|m4+M$nS$QLKLy{Dci7jxiH9*(+etm5=>QzMBz81$mKvEc?ZzO~~b?Lrz$EHz@klOY9@=K;KF}!eO43H*aQ1vv8BQk|X0M zge<0T-=N3SAs@9| zQKSGxW}tKsXfLl~#zn6XP>{>~LE`f3nF*$FqwPUi&wtu(f`UqUQ@gzNTa*$adPm;z z^~`f!c`Y|oVQ7_Df6`SP+&&ivcOV|IgZ2^hcb&gDI0V_-(BlVEE$DZe?K2pYR^=El z81$Hq)SYnq1*o*+n8B=T-k-({ygX0f&g_5u`1+Y0Jepzk-(Uap`;-t_CnahLM14uZ z8pGA3a1-^OGa<{q@}FTp6kSq7tUFLm)RYMARWa5n)eITdDX!%6tdH~_3etXOde2HZ z(PoiFA=L*s?u}%A+8f6dWQ{+unF7dmfcsnzzCoDew5sn#P`vU)Nciw+L~*oW{yE`9 z=(LuYTj`(s@aXkrw+1Tg&3TsOQAE#qKfsSb=@<6%(bT7J8)N_~d6Yn$Cgk6v?8#_P z7M_r`J&bt?+bK6(pbB|S?m$D!-+Yl=$5-L5R+nTqH;gaDf&zxA670`}maf?N%T+Kt-yoAwERdnXow@bKq_a&%>yExy z@~?ki0yPZ)t-cXWwZ4iy%SR_aY*h0 zvtWwBjwd)7Lf!)XHcOs_qj*p9OY()(O6(DRkQu-310$$Re)68fv??8ar+8;~CCb}( zPb?%%L6hp6*A8!SKpetg-|gUB{=H(Bqay9`1B`_s_gb_AMTe;sVf2VxHxCABaOY%B z2Sl8f!phj8T#IZybA(gz!-MeVIkkMVd=5%y{&J?m@Mf+OM{-_C{Eb2CLEGfB9)AAQ z5;34gp{b5K?5QvgexKx&RCj`q&F2R{7&^;ziIR|W3CPG!Gwg^) zaw7NEWP5-Q=jOu(VeXA)pS0%REMkPROS4#e#+X4cS9JjN-`1VdnX!P1zEflw-ee|p zaI-m_jKIz~#xt7ke_ksCp7SVdyIpy2+Uj!RnLjt%m~{hUYqHz@z1rHrm8n3bAhN;+ zXeKhzI9bsPhdd<-QBnfBwxkzUHR_mMRLoe@6WFKOTilDpnVQB~T?NxJT$Y?> z8J_3m&tlD~@Zg1AM2<&Q^um*Wb=!vaWHLu+s1U%QV+MwpR~FD+PzujSJD{A-M7|@M ztdsX6>lf3jTg)k9ar!BL+dR2^J%Nb&7AZJ=UutVpn@3w z$*30@*{bLtl#`%09a}l#z2k#A7KYqYl}^wmXlqd-@FlHq*D%@F4y1>zm-qf-qCa~r z_*$2W$<&$i>&=m(tsk4z=J}+*uv`pw63n?!fzOrgaBqf5Ly8bk^#aH;g$w~z{W4P3 zJ@u%RP{^O(52qeuz4k8jRaOxblQH8xu?LgnU(U!cF4617jobTtc*kXZ>Z$e>S(TzyQ=`=<#;3hd$zh`G(?i8anbI}cX36i-!_OCkj?0pPr1eZ_6e;@ zvV$18Atl`lSixl!o127z6Xk^ZnrBkPByhs+j&DpkW*f$p1@COv{ElTbTLIFvZ;0qS zgx`=z$KDpO&_^G0kOAp=UhRSMqtDvgO>;ToZ5$Js;23R!wN zUO>~yP^l1ic_3U|gwqT9j43<84$NTwo-&tE!Wn_y%YO13OU`w5yW0BeCtFNA2U%qB z-lXTeT7NfH@Sa>{4T?>T=TP1@_~6p6BnLMTm)UbVAXR?`E4%2Dfo*&)^(2@LzI}jD z>O)(P*+H#9wWM~Y{Z;gn1hi5*G{QA@LU`71bjE_C7?gd=TB~=f>XEUy03#jf8WXop zWl{h0DJ1Tr?p1Wi%~B9*!#@ZhpMpnD!iIsZuPKPDmB0(Tz*f*P*cEW}B79l=K7bx5#O4wY_ z**CC17e!H9Fg=rX(l|8wPdUnSNIEJ8i>v6r&^NHqb;3UO`}}kX$k8tSckIPCxY9xg zJ9Dv1s^Y&w0XSO6JU zg6J4anAOEe8a5X4J#7{1yZ$WA_FCpFe( zzTQ8RdyEueTdx6U9RZ?vgaI9W=YX?ulbwxQK?oD?&vQJrKWhwk+PdDW9CPhv+{!`n zg#N7hJS%@qKL2;>d+NSRmvwCqK;W~i(sZP%gN^6SgItQ)g14Y^2K@H}AzHG%|FFv? zaP#T6q9y9b`kirBLMEGXvV|iSdZyV`xi|OK`{~bl=G@J%eI>qVnB?7K5hDC8GhN7! zYdPWaF#oecr6-aor}=lAUE+fzIqIdK|J^8I)#6X$@X< zWwS4F84`KUpsT8*bUUOX=_`3@R6didrbG$5hL}4Q@0rKqP8Q5DH3Y=o3O4fu6#fNL zUr6<*G$p?%(K4GjC42n5VG>n3jEe6;`OH?E{;M|nTiuNh{?;}HO13JaM|RQl^;awv z4sU}irE8!LDzN4I0de*O(P~6Csir=6vmhobcXRvJ)QKmYX$tSnV}lBgj^CU2(@dQ@ zQhYh>&eFQHUnl+|1=6DBSkn@6`9LCT2jGzUTnv>pqh`R*$W{r0@V0~`UU>Km4NiE! z(#{4LoaJU9(s+knCsAB7AqQj|-kzXYVW1ps&C*Bh0UdnJ1O8H?Rq_)cTzQNQzVYyX z+$$?XXVN6gr|Sw^;Y5K~dq0>5?=?k3MXiS{QJj5y);zTWEQ?Pqf@wPG7kxe78VNT+ zN+uAb`A8+|srewyHF_da^10u3u6||*Gx$jl@hQ_#Bxt88Qv1?DLF9d3L~%v&u8%-Q zdZdh+iJNCB^^4fzKCPg$#hn8-6tarmHLL-GF~Ab>Z%cu)2-C||JzkGK{L4{vh_<*Sh3p?9!kUXWS}^5k#{0bLjHB-k;w@QgYS$Bb|7?XI&eI@j_2$wc%D{(Y9F}qsNNGy)rn<&8d^E|i=*wVv=z%ZqE} ze&mN>=HC%eAa&CpjDzqNr9jZ{$FvTpUM>?c)&$2RFiV%b1&&pVhzKnC7}$HcO2ib# z+XA@DtO_BNgB9FvfAv*6Om~Yc6_0S(0$~!q$5_)sV*-twKmgAu%KGi0Ea` zo(><_Yl*}~R+T&Lp0t^;SN!BgJ`GXjuf%TXj&)dsg-Zy5Ij;w^TK{^Q0)LtvzsFtL z+nUJ83RZ>4oLFZfaum>h7(0gK5obv>I_lImpUigz{s zQcMT$hT%gq;g6IARZTF^+us|sAp%?0+!3y@gfx%?Bx4Z)>T0z2R?!qP+6=w-=h>@Q zhr#RiqSe9P%KLqZ9{GQ9zZ>{Ynmm-DEmsr2Ht>f=0;E-MtkZdiF6_TyBM6nuEQnLMc+N#fX8BW9VqpHhO{OY7gU_{LL z;}vL63lZyhq5d4>L4am`U)EmhZ!Lzrmq5fu4ZuYV)^|Zk(m!G@Bv5&H5?i<6aT0MW z=B6f;D@|=^bC5q;iN(!iU1n4B3zf1>o|=d~{;_rm5~$F@BOnp;{{i>+53G7evCr(~ zN%b!Qt!*@LJEaSt+b{+zcO7MhZi6KZx(!#7j99HXpxf{WW?BB7%syp|#(=*|Pd`&# z!)qGCl){TuMO*`J6?EZpDl=rK9qu?|o7Gta^n4_wn1Jpfc<)h5#3a#G0PckGVUlnr zt22PlLhJLyJUjs0HhBZI6Q)VXp3Qwc14P;yn^hkTZ7r7P-n_E~cmcw|VEGi5umB8n z_dP*Yj(P()+@gI8d=)=!6F;YiL!&jgc=|Cm)1HJZO!xLlwVa3tNYl*^;uLpzh%XGi z(6G95j%US2wWQBffX535?_OKEY{^NEEXiyjNAUT{_`D#=IOSO9BUhfSh#st z|MT!jw>xOX0rEpy3Z}`XO+hG^*OQXDpK36HS$zSpGkm-oP2RjZ z11{{Kp*-Q8(2g+s437Y%*s7%fo!ER^R5&g2hx0936@$TM*4yIOykiPu)vsP{`}~rF zO)Z%wE@nb~!Yc_%g40=$RRztAMRQ8-M;I?zNdLU^7#W&~!FiU{K@< z9!tG40v4b+xPXM>;36D~gMZh5kGNcHtj+mSyu4-IC~5dS^flQD_^TyMcE!Y6{OegVFXw2 zgKLm~yMghpi!5sLezQ%5tg};nxE7hkdhJa*Gp4fTW;DOd=Aezvu#XMyzT_hw--5?T zT8K8Q=%SCW8AlSzARdT%lhjWSab|kzQOktyvPS(Vgx2!mhX1XVr+6w+!GF8W0rd;h z-aaRxw-8(YTG`w~L6m!t7k&3Z%MAc|?nvJ74^-k=0E*2UuKIQF`>ZA_~eDc;q?Z_oI4m=1RPaz+S@Zng) zVxtWJd&K6H@qV$s%ohDQMsUQF6T!V6cg66+MM z&Xs*oEEx-`OQ$u)C`!_S*$801;|H7~@pv>MK|Ml|Be=s^MQ!vvJn_7V^fqp?8T z2$&8r}hNU-eyNa}O1X!Qb8s==dtG zBfB}i@1s|8eo)^U`cQi=#P&P<8hUoq~>2~|}kMY?C<=$V#*Q?)Vc4jVgJ-OIlz%YN3 zpK%oWrVi>NubhZGJWC=$ON@`kN8B_m=Z8{XJ<8i9<>s#w(Qj*TaH#Hminq0oG_#qH zPjWUpGoI&c(#B71dBU0ht$C|U9$ggFBncJlRqT_ZW+JxSdG!1;{h(5A=QYs|00r4NYiSAF-&UaAw3 z+UkGA*!VAzNzbA0J^<)1h?qv+^WV`u#hN)!kj`>l?uer9<77E$z=NjH=h3e}m%uVwLOb zF{I?@S@Z`Z0%`>q&Nf4B`={iQBoyR>9MmR*%kbj1xn}V{AT3Ccs58- z!{h7Yq*9W=Wc3JymOYEP!fxbEs+&j4sj-J>cbi!zM(qvK>v0t6i8~JL{v7=<(@+8N zxkbNYc-xxHR-AF!2t=m&1wJKRClw_Z6*^m$SQi!Bs6J=*Vdpz-&UZ$f+ZeNd=NJCI z#rok*A@n87FCD}93^}73xu}>KqQ2Rg#vA__=Uq-Pd)me{`f{{6OBJ`%%?B&{t@5n> zxs@+aYgF>v!a~E$(kY=i_&n(+h-69+;1vG#5Unx8Q+^_Q$>u0{A$M2<1JR;~LA`OX z`li{tCRLod6%R^qukA5rLG&AkMJz`F_(cNxIH33b1Lvd`ycJC`0j>RO1TFgRKWITh zK;?xU8kwBtW19N}TJ*cskGGCl*k02db8s5kfcNi3mr(b%y9tTEKII|qj1L}5T^FY# z4INB*S4c~!ouu`9Y^kof)qUBc{FA8%i4fxqg~P!a8U;vgbkL5zLm$m+EsaxRZ<96< zmzO^GQzNr8BiXo+FOc3NQTt<<<<04$X+3oTS&QeC&8a1|RXqB$+ZfZKcDzk(UzN+J zy{%sRXo?2%UxcycoD7Cq9jc$ z-iYfb{S6K?ApK?jX}-x#OtxM%>z4RQ<6?1Mtc&P^Ncj}Jkulvs_u5UL^2{Nu zCer!^U7)_Jd3+ct()s-O;`$>vV&UQPg0MsIF8=8NuNB(O9V-_D;0(oEN~PX;b#(T=wMR{RJ<@o3K^;p-hx^ISTOmtM2JF}97A6BIem)d^Idx|b1MTeSodn* zTG`7pxI|rDgv=nMWX2_mihhPzCudsHhw%XxgC+Wh`n@46i-JU zkSOsFGLTBARE$`&{Ifq{x)=O?4eY7=duVL8FAvr*uxSW5iq86Gp0u+G0Glu54TFGV=ophf3?#g8 ziKc~4gy>rO?Z8~mwrD+0{~spto&F*+KlXZJ3YiP}F87DNm@_ep;NH3P2*|{=qRoTH zCygg)g~YcJjjLz~l%Do+2uGaualD@pji&n%_);s{TF;qYDh%yKtMtB`&Pm_zxTa@$ zv-A9Ax-^cfY1X{iQGRV}px4FsHP+sT8eJvGLreL2A5cz`5I)CPU#J7K`?Ld-^|0H5 zLW#RYIwsovmnu|?dP8eNF?NS`{9?H1eDXL`9+|TyEZFJVDP4?yh27)^kVxe%Wz#9a z*og19oZ-21d=azYqREZcx%XTl$D^q4DN)QMa*4q8B_I>~#K&mQ(n54;_ilN_)8iTK z8T4??kHtWY?Bs}7oh>e9)^p_Cr%&-U^8R^=CD3EKz0yqwb0Nr}}b zpV?_1ZcI2FdnZY182%?NZ8e&Z`v=f zhR6~zbX9;sA7Td}-y07UxDrBHw{I1Qo$*hjqM__h7!~1y{J(_fHe>Y}wwxKx&=zGm z`iK4rmefrBO7$EhhO-_3c<)Kd-ka?Zp>0?tw(_ z=p#DYehh!|XJYV@v8r{DuD2p1wy+e+1QjRV8(cu|mWBJ}M0Djs7E}76NhEX8> zo#fI}8xJrw^br1-*8^hiE|R3=Zj#p@phCZno&qRos&)! zgQU%;4FZt5#5o93mwe$?{(Pa~R9$ijQkST5?}lG`*u^C6DOz^boq~{5Nj;|VWh}s& zKDQE={YeeHCVmDtvm5F2GFT1w1?v})^w4Ce7G!Y360)tw0g z1DeL$v(3zjGn78ut(~jVi*`Tt66PYldkTEeKx(khfJsB)C$CqLrGS{x3+Nva{WS99 zKr7Vl+$CIuAp6BaJV!qU?XVKs<~ip$s}yDsl#mMe(q9w~PVnov!$H+HejE zY6b>NeOs?cLZIT8BtyRIj9)`2UW8KjRbJM;(~izir*U2YrU!tYxV4KY8OX!2++Bis zVTznt_G@80hx!$Qzn+HGkX=gF4fJ4r`nb)#u#b%uGlcKYgfzpi5pJ7Jy_pZJwE5GN zbH8}(HAPzA$0H6ch`IPLq+&q`!Zk+Ng*+N~F60VVu`J$*SzP0eWjM6876Y7R9tj)cvIfVjncd(0kv|iQjbHb_A21B{s_M@GHFAo-44@b4!Wn?} z++XA|vBA)9VW_1G{m15TBsSJ}E!5`y2udEA`IZyn$o4xJW~jD5a~_x5TirlI6jmpq zBB`g*=UZ$TLDhy?0;pXWYzLFf&(9Ey1MZmMa@{E;NKV}foxrVj0GC@&txo6{#0-J{ zOcPu`1w+`;Z&1;SuGDGW$z?i2$$b>pg6>FRq=~w6mQ+`cTCp|Sm_ zEw@gxfpVPiWIP0VrPfJ^eLG0urYqh7%fqNIKHk?d4bfY*XV2?lio=rrEY$G{Z76D|tIKjalumIKc54XOkX&$BH8DAgY zWa=^hhdOfjQGylBl34QVY;z)PYQA1P8Fcgf4)2W|kk7=GHR`}pcc6|?z=-=Lup9ZooM1Ee>_5VON{S*lOrUQ z5wF%^CzO8FHwq@|Jt}*o%o?Gm-WyVjQWxN|H`>R(r zI8PqaG5J1vs&ejn|BoA5t<1OQQaydB`CR(p0nq>cA60K171iT~dy^8afg}*Q$TkL6Y0p1bKJ)>WCXT1sAzmN@OkX?V3 zaE|6O_Y!+>)uBJRol18gr~dW#ir@r{_juSJyVFD~>z;I|h;1Trd=)l+s z*N+BMr=}NlrSX4Fq;19qR0|6<+c~h<2Z6rXoXOq>1$W#KX$z1ywq0B-XNL!dDqpi; zb-`D&;eOaZSkM}CIJGvCY>8xw{cng3m>EBef3NjsmELfq0>PBT*b-VGKYcOC;n=V2 z6F+Abbv=^0?sVp+CR5vgCU6txPXlBe01NnMHSMl_)c;G@E4~bi=zjF858+#<9cp_gYjt$n>Re=J)TM5OA<6_N|aPjcE^i#sS8aAH0(?ix_ali zCOpuHUVz18LQG01E>YqZ!AIERJaeR`y&HzdiQ1a z(K|X(J;(+-Qf@ z>|QQrTL2fAez{mDa=gchz`HmK+>>LuW?a>q*()Y+VypaX=k*N9GWASwfuijFtH=-= zRb4-nYS_e@E%MR!{Y0EKic7htHu5UQ0%)EZwdfS$XtiNuk)OZzCcHX=4xL)j$OCW= zFo-M!29a-eSDSU$Q zFXr$(W1&BtR1G&QAyZVzlY`Q}{{y)g~<7X8)S` zv4Vwy@g*YsAFbOw_(dZLUU)lgIZ0%CPnj8*xn7cJko++B<|dGIH2WCQ&cQ6q<+*hR zd`f5oXfuNp7w=;Oz*{&twolWzAsQ2d{k#k1L{-bK)g1((Go?7oovEq!NUZuCt3LO_05pV zv{d5@(T~g zs~CvP5232^DE3!hWOkXMu5XN_cTIh#*~>corpdymxhkEfg)qVX)8(^ehL|!n-Y?G!agYw(+i4~MIluJ5aoS6ha%*1Zowew2w+z(A*6bIa+iEts2{O(nzO!n$n8 zOEFv_n4pgCzGeo57>iVJKzdT+nDy{p10~h^&gsyy4KenC~^4p*?7!4D z*`Qqi0girAb^c9_b@K9aHVK2uFu7|;)>|Xw{x+TRc!T=mPuqU1xS4c8TpGJN27cY| z9-yFay(m%u-U<|Sk{6ej!UzmLjo`zjNK7ck91MxnF)m-~Fr`*qLU;OiF zao+WmxHRcnjhyG<_`@4ccoQ@*GDvbLVRq^`k?5WLri=8VfO7@C#aCC`d~ z#VeD&1$a~$!if#*K7(+ko=4vwc@f>2kGaWD ze9ytVIQ@}zLU~c@)Fg>k6bf)jwD6ZgpWL?EOx~+-o~mWD*~GzXub@c=plgnNX8_oU zu`_)fvCa}DjBj-F*?Pq8%*ZAk7eB3zn4WZ&1H0e{@^l&okv~CXLj&p0h#j6g>88*y z0N-Bd4^~B9bPr)+JuAQsGH)E@ze0eHicmlQ#hjk7;CMnB>E@uPyy!D|wWPm#)v1z? z-o359T1dsGmLdZzV>(?m1}ly2P>cCXdQ(d?=_NZ!sQI*w2&4 zT&%OTf-~n2g!%D+;S8Xy{vZGEXw?SP?a-osGw+8m9doI!JS3EjJ)qLQL9`=k9shQT zc!WKoEkYv>A!<*uwivslW7Q=OCcXFC_H7z5a7?h&bJ6<$OYVyOD9p(zx2Mvi3Zm`I zu#ij`IB_G`xrBN3Nc2$_1rlsi8XWU}RIkXE++O zU2|b!^j6ek`SGuG2=$SpP>J7ZaNDn(H%T4~AHS*iQJHu%75z_Epyj9RYL;?`x)2r; z>xR)?*SIz{Dr0NFd9fSemD$JT?&b&c44AvQ^>XGJ6bn3l0=0SS0pAzI{hquJoBB)i zHH7TtK*Ia{Fjl5t z06x%O1J2&e0gxVLdTAuL|DgsbvphgDQ&gQE+{C}ze#QO*F;luKc8z0^N_1HtD zsflfE7S`FBtMaF+w;PAm{gZtGZjm})CEuyK&Y0>`CJA_%-|tV;69kVy@k^Qvl5*~c zhNk!Lu=)%=`7So~64aB244XoWout_ME&^b~wxjVJx6QpJEh|Ma2{5#1sBXzDq0j;2= z&RUY|mn^bbLo|s*7ewFHy&7Gq@q6> zf11X1<%|Ay1LdozIW<2%JzSX&XET;L@$DrgfXcz`yx`4W9?x%!luqr{Wc^=5#ioMX zUt;6G02iq`KW>Wpe?g^U0HpeF?OiyJ>_`}S-rf0NAy-vL{U^dY_v}_X-zS_w%vCfgr4JeN1pUzpB&MhV(O|I6_?#>$KR zXY>0rURsH0Z@2jbhHCd)kT4<8d{T{TxGrG=!C4(X4YiFsUPRU6}k2`cIu$Oyr3^7ECk+1#F8*@>3EU~m~T7&!z>ea6WF;1=1S z>e8W3Hc$JZC?VB$o1#uMYGsK?)2l&@s@VWtpOKsu)z=F95Z} zr_hH(sG1ycG{BYe0+-Fm>H|jD)=x(rH^A{@tdw>?lzDI)SaY>h?IS}nA9k4;F#E<|26tc+UNHT4?}q50Wl>B0^D)5 zBS!ZCI8HnTogtqtVHRuR=>+qBWKQ@^6bgwE2lA=g1P1i^+9Ym)wBKG(3Vdvz5zjv% zXl~z~?iQ(>>a9ru%ai@Yig$HY2o4H~8{oZ_LFx)hLAVWoK?yVrY#Q$2X`;7v@O(V_ zXjO3vC1ZF)TG3SalW|9PR2NeeZ)kpV*v5N!|A;H!d0Y665gjPTc^(qdJK=SpS8JDm zL6EdYg-ifwZw~(c1u1#%Jxx6vLy58(bd#Rd5a*s0hs0UYuJw8)M~5);JG`JC6rBzz@GJ0}v zc|Uuay8KNYjkzO}R@NoX~fd zNOr=9CeqVs#e^?cvk^h+ntIHo=6-vOuZ(Q0hN$^bOV00sF#}4jf4GaZnGy;T#3~Ld z1PbPBU?i&pZKVmC({P~iQ?5&iWPLFkQcZlkQ2Gw^?0E9 zWDdZL4^O2rZNnSj~~KI9iYL zJlO)O-LKMs@w2UxpuSaHa|KWl)XT(msm{3SFRv!`arDc)HUmVwY6uD(pf+|q;c4S4 zpDb~wnA26U`i4`h)h=a~;V~$am^nq~J`bw$O4)uz_H1S37Ij)KGS?Q*Gb=6kdx(jHeNnQ3}0RBgf@vNDLfx>O2&-N_}qgv zARc4fo95f!&#cQ7hP$aMKyfu4G?##-DtliIj}KQPCL_n zlW;xrEBMsQg_zzLc1&K_0?@!+q@U@X-I6`^G&u{Vf!`9q^ZtE(_jb8r3gIqH)y_W{ z>=f2s4j)%43BR}R>*|r_K@26cvw*@a+kwNW{p|~ANL$ZtzsyLKI!JzaBaVW3gD&xSM%FlxZJRnV8uhGgctqV@ zm*<<=Z&m`#nzJKO!ST!>!%&oxwr6OL#FE79n^nQty|x|Ox8qQyF}a5LOJS=5^QSep zWyOkCg(V$Mhq6nx--<~+yj|1$GEz1W*{-k4U0*uA#JNn4Q~L*(gR2d{V-(b~p*>e|1^g(Z{LKSo83mb@xmzD|{Hg5s6J2;Q`7dTj zvfSS$dI`~wk6Au0owAK9W4iG7H2N@qkPoQm;+hAUvh%Wvi>p@#%Ie|Zz%P7~Ly5e0$R&NBkw7eNO9dJean0IOd?&GCBKuLKjzXKQ0Q(K1l)@ zUA`9@ilr_o8N1ah7S5KAfTi>2q2NdHCsTcEfLVd%#QDwwDw2Ko&jG*qa(s)^SP4+;4Y76{ZSXlF^ypDnA{2GpLVet=)uEN2wKBAVhY zMn(r9btQ?Rhn7|Q{%udAp0)pAWD9U039&@l1H&s-Xr}gmh#6&J+O7j@u;2aO;jc6J z2k4k-bq9y;tnyp%ehkN#g+;p6yW$5-@IY;B_zMr|0U+TtqvMIPHtTt#5=0zCF zFSZ72Fx`=m8q%+zT#OSi8eUafTMX;%d_NX(6%<{(#~s8-X^SxQ;Q zjto85S_E;G((~cN+-Z*dv#|u$$QZNmO?9#P1gNH)M-2W%M!-@#IBB(9YmDQ_e@LaMc-- z)~Ip2G3`jbM$*tydp;*#U9#W1+;6QSRl}&&aaCZ(#kE;5y6KNZL5W~z4H{i9;US#h z-`!fQ{5{~Sz0Z0_t;F@}RqRWy8~;mZ1usb&qraqhh|9 zmkn1dL+%VR9}Q2cNdt!Kf6(Rih|Vps)=;ZQgP{tl(Ruo)DlQ{m)h zGH?CmtAgzG>YX5AGBB4SY0?2N=z%3Epn;+a;8qJ5U~+rkBwB4uBymP*)9b@KXU3QM&2nFZ;U}g2yY`=?8>GPHMvIQ=_A9-j%ei@wOtJ?h{#$g?TAp~<<51?ATFRDO1+NSA&054SiG!TICo7gOVd zU7sA`W8>qlF9zRtVJnKdYP(-AlNH3@Gp_w0mh=qa&xzg6wY{|}z4)cP^F@l|91J;u z*bJL!V%-*#L0vkUVb#;SGJi6Tdpq^c!9FPK2PkEJhU^mPUBP%oaZvK*OYpru1EU)r zK$TcQF~h+#(18BI*}p+RTid!KikBY_$UJ}7pSQ=+$wC4S|1%=K1C$u)R~IlaBLzI+ zV=F8TPu0T@qCWL{wVrbbUVkt!*jyli1P^?Mws180z6>Tr_AjGy71Ge1ehL@NQ@S%wk2&cZ-sey-sH7mbhi`ldiw2{8y>)^MV$HIwY}$;mZrWUWxm5Ylq~XqRi!MK`L#;^uQVm+=$|`4{0?$XvU$#$Dj&3=_6g(Iv^4+xZ+!L!tQ+t&-`$HaZ-yml ztLcruRPVUm*ySVkPwBBc?@|!=XzG|(jj8uwLH|Zr(@&T-^Q~0R#Uspp6i$u0-WtX8D1I{8wPu(H za_QzamZ47_z|kqNg8_taT_a>(=c_kd7GcJ+kH&FxgyH%mxa565H|R~gw~$rp(^#L7 z$6pfY&Tp$D-slbrY=rRS(fbI~^}$9U8SdYo-@StK`C&?}b^#D_v@*xSRi^Wry@EgvM6G_)T!(CitLSF{Y(l9M4-N+WK}cjj=c*;i^b2{E&#q% z_4WU~HXIAg(8DKkdnR{LtA39cQ0VZK%zKI*EnboMV?RL29kAluK7|fni6&!OQNF(Y zilju3Ujg)wBsKstyK5em%MPsAE#)88WFuLT1{qi05LDKEFq4kolv13Q=P4~!P@QP? z)GJluBv(1xxvAuAwAJi0-Q8C;#t9 z!vW0HBGC;9svA3u8J}^!8+(a^RRDX#1yt?W1SB#4intp8>SlUGSpU}N$6fB&%Q+$9 z(G{nZnnn50(eH0~wgyt6gJ813Jm zzbvbl-GW^O^PxE%IHL`qNEp|wyM!C3aybCx7i#McD%YK#=r#dUMZ)uyjMUOATWQkB z;M%fxqrIp2IA=55zgGs7nr%$t z-MtbQuX2vlY%YOFN2rhE>iaN_5FS$C5L8x_cPJkKh6r$tZ37C%?G2s}Wz;yho-EZe zV+0|Az1=wDxDV;~v)A1aI}I;pt`J7nmG})<9Rff8R=X$hmNU&U3bqOno&)&FY}Nsg zbdY_$bLR*ks<$D;PDpI5Fgk06==EYH|oa2oMmR`rC=++1M@V-a_ za(|BCpZl)i9OhS$^i&fo0&Tp=({s${N8R49y4g8IkB`A~+sqL0%6k4H>kI?aX9O}@ z|JTZl4h{TGg4{UUiNy}GFcuB!qzBCmI+*aljeN@qedC956+xLAiJx>jA@Kt!QajIp zDYfmlHjRBMk^cD67r6mf5;MCC8uz3Qe=vBTcbRfN zOTDnI{12_w|6cjIFl4raDpm*$=YNN#9zL~VSHyq}G6$g1-p+53tAN&@(vy0ZP~f97 zr>liz6xa9E5c*j96y-(ZV>3iST)lY0ld)v|Ok2~9E!t(QsW!yPt1F@mLm?j2ljc&e(%Ag7h?mC^@^|LG%2{tnL zub9X>lQ%LHx>Q7S1)h0?Dclg(Y4IH{itn=87zc*MawWS9jkmsw|H=LUE+r+)1~_7_ z12Jb~KrQ&TBTLxyx7=3H6<`XHl8Jgy6J2Vc@~(+Ws4pXG86gv;r>j@GdTJ%As3z)A z(^UAJojvg>cr_?_^99-Klb0>8tGoG1*YHny7qQ+N3}rL21RAcqx4|~E7$~l<117E9 z>YSOywEh1j+uzRX7qnK`?$AO~R|pmh#T6MIzKl(gQOj$8oDU$+EKl5aOTr6i7PQbpm(YRKoRS48HyyeH2fv!V^URA3c6o`~=r27@o;rDO6%qnj zy%ka3*|K0OSK8LRoLZ=IhgC)J-{W8VRLSu$W52T#O&XxF&oW$ zde^lhoarvx1}A#s~XBpY2TEx zG*+dQp0WNgJ7MDxX3|`oJZ?{(u#1yBqUQ&qAJkoY^+2_*&TU>3U!NojIdt(XGl2MC z5dNVmk(XUzHuatH2b?1%Z;dKo_{qd$O5NsVnB|RatLJG~O*9oq^HQ-1uX~c}N;t#1 zcOY$V)Cgv<2=E~Zn{f2rC|!Rrx#=RnwqbH5&XHH;&0~1u1)F9)<_NcertS*Mw@ux{ zH=rHhCAxL^N&PtyfcKQ(t`6losd@#^og$69=C^A#UNEL>$A)}kUU*{odXg!6Qs9T> zZb$+dorWdPds@ric9fC2AG_xWR+9>pzfq^$G5AVCSrsV5{^CK;@OG=9qO4F-Reb!f z9{ER>B-s4dO6TMIf`59Os2(MP{r1umSbbj6DV5;ar9X{t-}fYl+Jq63~gO9K6=$tX_1&_T2B6bC4GBy3_RoO7FFCmueh<1%@OU zfV80?*I@-=2qw6+PR{PH?1kroi)YbY3oDYry}2ZjC+lAq`HTE)ig%p#r4*!I6a? z1f#1XJFpC}^gVj}ui;S?R(kLAK64U|N~?5=!>DfqhB=lOQJ($(_Z7MPShdW$wrnMg z0#t?5{V;Hfi=i%dGsP#8Ng=9|Cd;BJ6vG`Oj<>-^^AA?6>tnbI7Ip6TQmcY+NKETxc7Z*@YwgG!T22d{0)*gQ~cFUKUm-Te`r?^%2=&h0NRk zMRDFZFCa={AFOUEBP_R1gSxUBiDzBghH4-t{r(j}HZRb4&S_yU%as3(7R=5!moA!6 z_ra#PSLx70Q6=-oar<&%&;Pc7yQDI|7VzNb((cMs43y0k1#xsdqORNo{>*KbJX5Bw zt9_5re^G7it}e-POTR}u+Bn%oqN3C_JMT`CUCD@D^>zb^QLa`1xgX+7&CRypuxtzE zEGNO1pS0pHpS-`psLR%rDCGK(Hgd@7)UT6xw^hL4#f9Hu(E6u$b5D^L9H)(v;(GPo z;v&j6CP~BX!4xSu&MA>DJ7jIFQK&$F?{BHKXLyx+YWy?wh$qyNai2yegCwQ4zRM0} zuyEgGEu8z%>UNjhu2O0%N)_`*ls;EJ-uMmHFYy}&LZ<){6VHyI59)2W z0fFN0s592m=%*uHZA3a{+nmcBwD#T`6*oO$Fb}z( zUk~WhiKE8#KVI`>rsaSqJZ!SRRqt+mVdBt>j-{#PX6D5^f>yeT|J*7H;LoYz6L+19 z7|0h5>aa*A9F1ChV)ID+;3pH7fln><9q^TnGMfx(ka~y@uJ{~S`!kN*+h>?J0v*O$1T-AlC84ByV zzGh0Lk^}J&^bU#-t(-3|rYL3%b$o$0(yI}?+)N%i)3@+{xPr6OffODTZg`*rHFn2o z3{X?$0n1@2s~KLiR2?-}Rklt1Sy`uI?T5-owXY-b4w>_N zza5q&KU4vZg+-{r5%_=a+Et zNc{H3J-?4^+bnNTxWS02c%IuhINbjN_Ma!jDfa9T=5}~#PTCdOQlqxv_a=Zoq51WL z##j4BOqQ~mmCpyW{Yqo*%8_^wAm|6)q*@iNptPj&-?gcbiXRalTrDAU5Kr&1!UI9C zQ~Ljg$4)S9$dkrL7;h|$HX2|@ff7(X7{Sv857_)oSrgdV80ozgGeVQ3|=+ybi}x!rnD&pdS}^Yno_Bi zdy&N?{NBhd!}Btb3?ZWqExB}U6N{bCtLo4(JUeY#k6bQxXA9JRewJ1HR>G~wxA)(x z#kGJ|4(|R=`{qo6U5EJ_$p1&~vse$mo8Nfm)@9~8*4!|`kzUDqGL(n_HZM$q@*E`o zYp(4lMt@0L8{s?|Q2SWVoc}BvjX=iF0cN&tf4jJwNRMxOXCl(fOXu9$QkSy-_<`4p z19y6AM~Y2tz6|cf!*z9aR$v$zMMWrp_Jj)&8nh=~#F0IabffsIA)o7|*9Ql#;Yy#j zN8!mk*!@KU9PHOOAQ2;plS}+hPss!D7sM;jMqKZ_W?T=Seknf2N8|CpDh||mnj-h0{2vsft>a;o{h|9} zPf_Gd8}V!_|!&e zIo$94&`*y!I8@6il1YXWWo%`GSwwJz%;z#?LdP&vhRy2>e+F*8~o)~Vug#yj>#u^@&694Eg~%Y?34 zfx<=$3wfeeYuKP*1)Sy9hUV^Dls>atn`ZB_-D^y37zBExWeNnpLuC|&$G&;f^6A3% zgXUzXsf2rmzn_|f_kqc82(gXg|YV z{nWNl=TGk+>GLY6vG-|sBWlGB4_Y#QPzS7fp&bVAxH#2+8npJ2CQIA3`f0Cjeu*qx z*FC-em%Ji)c zt;dsJX$30MzM@*Aw#L8fPg2#I8XYj*iqrUB6n^|UZ?#?Y&UW37r$IuvlvF>tTZqcz zw9m!{S_}_I1N4PQuFw^HN>t6!ucNehziZfdO^5sHvp3n&Y9Q4`x_G+YPOIA@`k#Cj z|KR2{zS{X%`hC&C-z1skuQ!J`e|^a-GmT13DGr}j649P2bo7bl(m|Nq$)PuZwQWi_ zdm3O?bYP}R{4TK_G{st{#O#@mOD)D5bF$Lz%<8la+0iwo>uI|6>Iu&n`y?X~Iv#q0 zF~&1S$c84dC+mevDD<7+PC4MYmv+ zI;8Iz9($=iT~@c;QEcA&tEo|AL}2N>;)rCjLK*KFTs5N7&CBl?hs=+#B(mihm;Ex{ z&Rmpqxrd$Qi^-7pkWsBWPC?v*qpD={9=KF~CKo|y>aq${sR=&QF$hIYN>v{=9`ffT z5|M`^LSmP!!W9+KHIh94Hg*uhQy$tcE_IfDS_XyAbiMjFs~v!Hy{?z#AP;(qyH%9t;}lSfqm+oKm)E6`FHPcaQ5P{EZ& z0+C_&@3ho+jWHkFP9M)5ea zrORC4`xxO+uDaxc%I}8}SP1h7%$a^0+H~U-@)#mRv}HwlR5KK}qK;-GMnZAev(w|6 zQeHgtA=%{Wy>~FY-LBZIy{@{d0<@!qju_~hHN5UrZww`cw##!V+>13bLU1*yL`w>Q$5 z;A3Kf@9HXEI^kJ?nM|0joK?Ng>3>kfPsM*!z z%L3;5I0^8f_IyDZq=Ao86khY!e|pN=-t_Szdz~%x#afj9qHOMhGFAT;*bOkx=xj!) zzb1bY_ZWnapkxgHJn8$~!kqdD8Ie{0Gvz2G>cLCh+7RgeMBrSH-SZ1Fs0}=oLfu7! zYu|4%_cNjL(?ax8K6j3px9? zVWJQwIR{giG~Z?+co^Vs#4jP?9~1zvOdJLneh&OabT4jvVs|5b!&SO*dg%$#ZE|53 zrTZ~aK$iw!2RHmy7=juY0G1**4E6C;A4tRmeV8I(W1DFuJ>2>A5*wc-T83xP|1n4> z`TafLpOIsoIXtR>RaMqh?%qzYT@PK^9u%BHP!GTR->z{i^-x?Mt99fj2^t17BR{sYfq2k)$h+45MZ)rv>jMbk-FRKz#{o`%`gmU+hT_SMhNvW&fD0W$mHBPqJItfL+<`fu1&#;7A; z4t(D0z*I~%V6hG`!Ar()&kIygK&o3x_))JzsP9oX`DrIHsYD_=tv~u*+y@d0f$FiB zRbICVe>FBP%O66D{b}MhHGTv12LyP1h0j?9@x6`_@v<>Xo>v}_x+i|8ql4j65ivM? zb47m%Cw$v`Y^E>&wzEcA&a$|y_Rk$0~&CJB>9*{LW(mH%-F3b@!Ed4;(aJnbiA;>@fRb0kQA7-ca zfrnMSt({~4Eh^~1`MW@*f` z&z!zJi4&i&`bS21mj5C+QA6$-&t3gtr5Ty+at%4@_8~-vI$GsU>>_BAR-x}CHE_-R)@nX#@;g@EVD%uI3w)`0&&RSF*t8#;(oHP7i8bf$exVjAy|ZxVeQ9^damF$K8J8 zysTM(HHn`mJR;@c$w~6odq`yXWiDW!KW9^$ZLrQRc@EAc;T$@q!*)-^ALr2ZJ=oLa+sDdnLiTMB zdY73a2#nSNl!`CFlnxFhzN!Z!yBO*vt~_c)ZnZ;vO`^sfL|sM6WsX@Cq_tAG{Z+ac}}E>C}f)XPGq9ld+*nG#;6}}ZOO|9H3_-c z{A;E>tQiw^oess?Ud82(0gIPC8Zlm<(E(W#Zymt)rh1@Ff>dzkQjCoL7NGd}+V^OF z4oRncPNVmOepP-Y>_wf3RBlZ7QGu^smD4xH0M%!=oswb7ouL6&-(IG0FdUTqhlFC= z3_IPn4TiRHdf-3X?&7oA00_Z(-!)U9){R}AhU=pUUL5dB5}zR}pCi+w8@}lH%rO1V zkhB56&0E;;Y`_!=yQN`47Q*b05z--yp1^0e zs8eU`l|mxLtD9-=VO}m2QD57W0|wzkbmQ+Rf)(-6m5)80|7?q;?kB9OOkUDHB{?Ii zW1%goa%BG6mD70f3ymeomldQ-k$ zB{;i&ci~6W7x44VVC{N~NQ5#UiEM`ILbEAjpwUT4UL3Z!X1%VbX4f-P7SVau;ToRi z5g&dFEn_Pu;HId%G*$bw@A$A3o(5AMRmp?HXVgrkVCVxam`!1At6?9t{}-aiR_}zr z9VlIm%k{T+y7(>UVnV{Prmz1W(?W4DMf7O>RV*Lu?!+xBUjcj+EY${@#n%kD&=pbO zpjwl^z?F@9)Id$F^;tHX7Y^&6!*IF|{=8%c!!NU9(3W2x$UXgDwdmB~ifFX= zJT^9T_c(auW&h57@3E=8G5T&J32Wu^@^Sm(*_(qa>e$JTxD*j1T0LH^^F~v%_J`@GVXxi7Z&r7epD8%)RE{=Nt88)KsjeaZJWTkU(}-@#7N(ON z)_g`)xlilyemEO4OnFNee|@_9O7rigd&xR&TkGxoYRoQY&HCG5@5*8J@5yYGWI}g+ z>}|c9lNU4Pdp;sYtK5C~lzroOU%Fa_J43L2qEhp2W`)YaK)yyq(sN+k1?xFibU@#B zPlN_9vA1PfG~&9%Ae;`v7J~Ytmj%f_iO)GAx4l|3gBzc)JJUN7>mDUzHJ$37#Ca8U%?g;m z3|pV5I-9+s#qy1qLr?z3J(>&Nb9lMBonXG__{0FbrZWbz!cM6e511C(O zZCCs7-HXQc`}!w)vhO%voEDJQ2C>O_I?lwq_3UyQhn2-k?;B%%Um)9LoIRJ>h~D5$ z%@w*63s+OVc);c+#o(t-cd9<0sbh~vD_nEyD8`LJc~)UUP}!mm^xS^ zvi9l&tHts0xkVFo!EC5f&xf@4GLQ?aQ&|bY4|Xx%Zr8Z16YkJ|RYkmtzH_A|SGMa?cg>mkltJnc6dM6uW}7*3tA<`ekN&}!BFke5)IfL)U0 zPt)Owg8WXuxh-$m_a4m4SPLddetd1&v#bAC#D+CNJ+DlC(T{GDBq=Ip`F4sPi~A8p zB>tUXu{C?NC?a0(i9Cx9nZO90&Xw5OG>&^^iG23!B7NU0Wr!El`C zpz5zv>*wamk#m+%&pp-_r9;zWo64L~yM=L~f8JsLjE|)(`VZ3UVPz|GlCK%&{{DYl zy=6dDZ3DGSmvl>aN+<|O4WWc6At^|SfJjT{&NvgTr`VwL7A%9DK_GIMBud`TQ9`G++Aj@&YH6&Wu1g)6=k z?$HyP4yOmPg;*D+GAQnGN0TiSP~N_D4#2C1Hsk)+Un+TinU>&CeRZ;56unur8;Sqb z_6nCojKiJyXY4go<_N!+Q2u4q14dpI+_?sbDLA-$Vz+p$4daN!p#VgL&9NT z)cFZ1?WpcuoDo5uXr4HlX1}kKvuYUQ=}9f9DV1qX_QKemX}A%&G2;N ze5PDBaFr>XD5WB`yxvbLAdg!{Vc*!>Vb?OUI0n3dXw?p%?)^QnF1fzBB=<_5XLUt@ z`=4E&(UsP-g;^Xa;AZb*AaanbwZCM z>4EL|&+FgcK1W;v0mkNgQ(n?GWn=nF^{@*O>C2brYxeA#fcaC&8LthN%K5$9)tEad z=+Gi?WeJ+KH{rrohxaXqaq}^Lz3t|QUHJ7Bv&3IEn*ouP@kI&@)|rdboYQ-CaJ}>XkpF2nk53 zqe_L=c2 zn_JsGzMNxNdgu{9F^pAOM4N%5cVjHN6KTVz$H}5!kqLZiGo?CH{#;3eQX+xP*u4v! zHxR9|bYH+@nKZDa^~z2m+3v}BN3oEmIsmYTJa^(FHao$cXD<}=$R6)F3< zxxm_~FV41kGF)Ebp5l}sdwrvD+};1*s>zH8@rbm^#_9GYQpHWCI2zC*u)KiYWfw+w z6M_6#Rn1*raWx|pw*H;2W!yF9o|8okwI#E z)gpvV1Zl%cviqFHJ12KzUY!%g|9=#}`Y5|4EnpP~(#X9jjy=zFd?mLTv4yv~RV14mN)rK!caQ zA>yHeSdl2}UvzY1Slff%9rjA!o|YZ^GrTd=AK5o7(}SIV0u=F60aQ4O4AR%&lHMNd zp+ui3e1Sum^C4R75{v!A0#7?#eCqv}DdvYKq~gMKAa=FS)&2*X>q)6qka0In>o3kp ziEki|{JFMt+}kp|o;9XfTYGv611?I^`hA^-WZNKZh_9YSwv`MCIX{hqLZV5ffy~6KSMF~IUk7)S_1f0^-`D z8oRoau`Q?K>QVvdc!!rom^Iv*2<1c(_e+mHXIgIc%WWC)a2M^_1RT0SX+=vh0Ef3N zv?Jxp*{*997N)+o_g5m+8SYC9w3t<5oWsa|59=Cc!d#p^%YE~V6z5eTLwFAbMa=?H zlw?;>rI)zE=f5m@t6Y2Iou}S(sfBk3FmYQPF|%zh_2$#AM?cHl(+oKU{Oh0Y-}XFk zR<{K5_;S{PJOMzG?MB=fIfV!^(!On{eASgodqtLRSm+8jHhe64YwJc_DIM&s790`s zTAK+@NlNo?<9dZBXb^ZSyDy3cPiNmIR;3_C&hADpCBv1L_JcMFEl;fV9fK#oQenAH|lrvecwNmH4E zdSBq|kK!#k=J<;~NGHm5sn+y&bIx_69@a?4Zx%R@dvdjWqL+CUgp}wTeHkx%W(*t~ z=lua^h;(l+GiJQ1qsGl!gW_Xa?m}r9F(Sb_O5ZO4`y9Q@naTQckvT@AL8j#LJe9+d ziZkOOq^x99T#iMI!KuzoVnTF%eHxU6_H7{6O;WFpVebx)AYbEyugGHF zWkudTcg^l(?4yAA^*hdX5z>S^TtRSYxJ;N+uXPmwO{g|NoI@rpq_!EFN>O$k5~NRd zN2*(%lv0=hJN!a`bfk(V4%&WHZ*hlot9QbZ3X0B>vsTGPd*+t|x5ch1xd_4CNXHvK^F~;n zPyM(}u6q3;kl->w)p*Ym^#G$ZvlVD_bEm(pFs+YQr}+iFLLKrg(5T|n(R*xFIfwy> z=>>?fR&L-eaJp%@&(a8oQ_*~kyRr1@v`sU+kzYek4peZTo4?gq|5|3iGJVB{ogxfj zg|=R~2fEIaXqT$&b0nIz-t`X$r*~1uX`6sknM@L<9boe8Clu8657`Jz(csf31_-gX zLXreYAR9bC#htM8w0#(H&-hL}XV=y$xU)TbEm9iQU&oq8dmgu9(oZSVW^x7eklP#sE-L#Wzk1UTJ(<0+^}^o_$ISy@?6TO|mw@Z)n6 zVoa(zGJ;FbjImQ6*>1wxp?>}!nGfIKU40nkw6Y|v)yg?vXMaiT79SM+pyb&e*OraZ z-5x?iIXI=6{FupHwX3?ruS!Cpn7xN->DZjq3PB?5c`LtuH8%KL^LsV>h#) z3&l0l#EWz^C?gZGuLH&XuWx?L;Xz z?=0_Z#g?}J_={Tev1IT~J@e4oNzKo^u$DI8JW-g+4OXpNkT2fRKJ-Vx&we*F6wQH% z!hUS=xgVI>&}LB;Nt}X1NEUU+q4;0~K|MEl1c>6eVX7lGfbkkjOrrNnpb##yMr0+{ z>p*lBJKxHtuc}btvQ#ySe>e619VVWV$hnu%g^^#}&OT#WvD$7Jud-Q%=27m{xwD5^ zH=T$IyS^NTJ+gzFz_VP>?ciCK=Zf$wok1o#NbC@#w>MI?I9%lmk0b#|>=uRTcW=9k_ClJRpeTP|e;>qi?UN)Ul1r+UzBV42 z->)Iy1R7fD>Dw+z-+B)n+6^TnbUg>l-fWPOdJ-yenB{=F`;RWR z(@LuIILU#s?U6kSuOhem>5u98m1Q#^i%(r$%HVkYuO-;C1MLuvf^v>O_gI#m_|$w& z&>7<2WosD-)_&~iJfjxW-UZ41>@d%s>#fpzs57Fi#SbeUjp;d|bVTOknAePj$Ooxw*nl&mi=V524WxvFIcGV6Nnbg_90V z|4?Dk^!VAmZ)sIxaRx=2`Y;8`pU|i~GtelUbMr4?!-(As^cQFeev89k#9rI!klNo? zi_1S|ba0TC|cwsna2T$K*C0V7VMISGb4twksgL9BohZc$HjCr@ND zA0L@*jzEWC*YlUHKBQ!_UkY$=>*+`CJ$}Q*$W{9^jtQcT=S$Wwc~>hD0hxA$f>P&) zcffBt+x84poW^z5)y-4uyd2<+|8Q3f)5WDNB;+{Y4aR@~$+?opjytO%xX2flN+yu> z^!8Qdy(jsxcB~|W{MQxTNZxzyulV?MGh8m~u=_bHO3eI4rpNLTp9~rL!qzis_E5I#|wlzKg z{QoW%UqG`Rm{j1Zr*~y53&#&5SBhKA?%6oRw(QFBtsO&udKH9TLQi&|X-5Vc_Q?9(f<-U*m>X0hY5<4c)jozEhv=B(WKRzt$m zE95%^=4e|R;dN>+Y3C2i2ft>!*DAvM>sf3F$Ap(tG!KExGsn;<-9QmDv28Q4yfZP? zw;?esyDwH_iK&bL*QTL?pf6;fL4Tsdb{!_5#%2o$X;&iROy}rpiMetSd5XWYI&B{$ z2ipnkXFxqC>PBDfrC(jGOglbb`AV`q=3Fbl@bVqI_8)*7GX~Jax~XzDLsq_!*we!x z`s`g+@1j-i#?-Tf@7%OKEtX#5)tLv*?sZiDfw72`6x!$M_I-P%d0wpiZ zm)<@Hk?2Q|+d?TJSl<^?(dTu&)^fJm+=aNI}P_yQ`(_;F3n{dh@w)hS}s{$z)d z0bk+;{kuluaPyhKrM|DEcho6zN5=jtKsu2PHIs$`zE&Hpb3~jy%(WZDUi#ryKF=*5 znOc={Qa%xtoIc6iiJg+M=SWgsz>55xro30iF2mXN_o2N4^_Z(RAg4r9b&O2Gk z%nzICkI(GS&{;9S)rl0iqPXTK1lB8!Ii{qhJIJ*_(>CV>vb12ElF}3$rB`F(k3S(G zuGBJMQCdC?ki!lJJA@34vk2%RN&VM zq;Ck2`VUJI#_!n(dapD9UjCY!_dPB|)WOst#AXzPq+5_$g0EJQV<3q#$6oF9IzpbP zl@{Id)SF?Bki|q_O1znbJY&_GcMB7L!^l-r{zRYGb$gqB5wCIbcvqGg<{z>H(~%Ov zE`ycq!?JiBPz|D7;AJvf{(L2@#*@+qF&hONfxbI4(IpJ9I1gsAO>PpEO@L!5Mk$l@sAZc;l-+0aEdI7v?ZfYNv z7_Qx`j9ZIeY;Ao6p)~Mx z)6I%afe6LQ>)Npjm?JtOI@!h-^1)WySYM4#BU6Jhh)h8Ld zV`bPg?&-4?Zhsr4sdk>`NU^mE5)!5{Fv4r>_e(OMA#Qe#c%^mXsKuZgqEOq}JF(04 z+~a2~2TqM?tKxc5Cl`g&!n-D`;}F$k++rB>VDqg{+yYI3QMU~?QV82Vb1RPvChEo| zlGgBDEO78hT4w0AH6cV&d0^o2O@#W{ZhH8RM0E#Y zZI?Rm0yEw2RhobG1g#I2k;n6=2YSA5bVWP{5!R```H#=$tNWxiOz(v~gqEl>1Wq>x zt4dBCf9-3&kh$23h%oeSsad#;Dxn#T4vnH=5fzl(f_14riDa||#xj5t2)IGC_onX4 z@(Zn*be=w|agyvBbmiDIz=z%$g-WRHB6(9kxON9(Ic)R`Cu^`~nQNQhd)XFH=;j^~ z_M1TbVB1S`Gm}=t|6>Mvm?qoz+{-8z!CHIWf4%o-toJO?(=it-`!SF@$npdgM@hMO zQ_5=sDDyGq;rpu?A1Ew^z!2~Z0zmn^@HU#~^?z>l;v?;xlfP~?oaUC(9r2)@2=#=i zsD)$mRvEOr@prEKs>@DT=u%Rwk!w`AyqdohdggC>iqUT*Jpiv@jvI0hGk$$upucnV(<8igOp_CKn=If7vmTNFBQ~+4c(RX8M>tD_LLDMHLurFtU z00bP#A>#*JPVTF;EL)|YTexG@U&{zDULaB^i-4KGLB0n01+wVtH|pB}p&jZl6~0m( z1Jr;k>*j2X*UO077H?(C=kUJ3iL8^HvwbskUem+Qt_Y#*|@-wn45jGYKQkt@T`Gp80`=~m9WhkQ}F26 zbJ=BEjZXz^1KorHyXEbCudlf1mYL;$J6{MJbDr{Y?fFtXE78OjkGd)@wa=k5<8 zwVMQoS>;!^wd(sg~Z7| z*S5|U69wa;gfc#ZL)Uw3Q84wi8->#s4i(}>8`wN;zp$!go5c}f+!Cc|$U?Bh&_$jDK zs8n27+`Daba2GEWO)hW6EaL(u4v>A}e>(2|Tp@{V@I^;%BLsJho)cq^w6vRy(3;1= z#(r+`U~`f_e-iWNO|@$F=Y>3zoH&IINoBcBRl7ig!M**@uQRvC4<~OV?5#-0UM#jI7kZ*4uewFKzvHmhaL~~4Y#mokV@4@#rCh-~U+3BIh zi!^Gq-8Xh0*D8J~DbA~JITR-qIMReh?0vzxQ!7-lh9n-jS(0HX>jwD)2K588@7QvA zJ$vA|mSGs)axP9D<>YU$y;&#(C#3c=5iMQEr4({!F$GrK(yuqlP}~H4Djno<)tnQN zQBj@h;)S&0#G0YXg_aEr(n>;216oEoU?8zvd)+(z@Y$ejgm%Nr$Mfb`42^?#j)Hzi zG3OkRsfyYJ*lHH9qe|@Rrynj8wV5X1#JCKZb`A4;5gvw_;19+28{z8+rtFI-KKzPZ z>q?l&>-kd=h!0ni$PnCoK=lHsYhS*rjn=;n+~(pp0q(26Kt|H+7y>DYA13k?7KSSS zoG9QhN`>;XS2UIh9z?goU3*QJmMzA5Rsdx6{Hwla5P@fFurSd6B9c$+v0L5y8?v*r z?v9us6Kr8+Z0osAR2A1e3iNMkI7gBK&DEQiLd&t#T;Mb+2BLw(8Z|axdkWSrY=AvU z?dK3jC7|>k0)ZPcWSTav|F!IxACj=mGaPfve{JYVBT?39kPL$g23jlT$o|zWFiWLQ zU3(5&@5lhkwaJjI)eiJaSOZH<7F$`uM{XfR!_W43If}iM=_mBAXt6k(sX7XJ-o;k6 zkTVn`j^)d6F#BWQyX1SjDt^3Q`NJ#lb&$gOcO9)ngM<0HquHO`vSXZ^UL=DQID86h}#b0`JAx4~vV5PtfA zU4j%VM|ZpN{}CmB>lyd+*U8m-i>5}p_3cG<6SY>LK^u>z6^ecG!oY8INO1GzvGdMz zQ~P;rZnXW-Pw2aVm2J>0*aRL8^`!+>H9uMvcf<^m6da!VRJ^asZzZ)i1K$gbSid8$ znmagUo=zR`u}0)nCL?rdycNZeSNNG#x=RhS!{FFYPYjp91eN^8sideUx=0VnGEID* zyyLeWG&uOnQy?(zO(A9@Cjm&3A&+MYf2)aUjM(du@jX2-U^@Ef*T6n2aq(F3_F#yi zoQ#`e=oJDQ=ays(OPsnoq|gPXwRCN7t}iG^FxXXbaf__{g7r%rX>%}UgXy2PEmn;plOe2+d8AqQh#2=zmTpCC8%>izWp)v!f-X}+%XY;Jy4bD@+uaVrbyw$e zA)P3(l8o(sFxB?Aiu7Y-^P@mh+{haObMyfs*>o5ZXfuV& z$5;A?xD{Nf9SOgLLN%A_gxCDDdjeBm4&P$JG88}X%ilr|UC!gFS-|P)KET1|q*_40 zbs2JFwa8I8*bNcBY`E_!OltOugW48gr6U7NUJH^W%Xj>5D+C$NK@$u2P?fAr|DtrD zHZr+jerN09HrAFtj&LV-&+S_=xsyKCiYr5Dd zIl6(qVndV%WmQ3lJ>FWOd?tG5{wkq<@GynFt+eUE0M79x4DLSBcRAY$2-gKPpfz0C zo_qnofrlmFUd`RNbE%I}yWwao-rnF|^Qxmvo5=v!A9(lRo>`ZyF%8Y17y7N0hi&k? zcL2!IJDR9i*ZuUDac_lgJb~|d@Ly+Foaw-V=i##;=C&u4(?GlMnIGe?mVc~Uc1F-m zVgdX*2pxeHYs^2s0we={TelT{@m{G`&BVFoNwI7(OwrV;*4=*5 z4BvOX3-qTRjzK($qztu+*AmN8?+4f%;7}8?Qlx}CN;osv1^)8_y=LVK{&|5-zAffM zxa4^0*5|j>(e=;e{)|n!xFKL|zX&zoXJDmV!Q7ErrU3s_=9{{=9{~7C8D28Bqj$ro z>=rTC?gW*dPz{&9x0o{%aysetZDQDPn)pp)A@(FgrlSxPj-jLtLEo8I>)EtV535cM z_P%DrYq>7{y&B!QxCsDP(|3;UFY$pvFj7)}lm%*05Mz+Wj#HYWXU4k6)If@{*bo}n z!wx{Kw)$gM!?NACFeRNwa|#$n9J01)W?kgJp0X}__(NkgA{_FC9}$EQ(Xvj%FL~A$eQ5vt8kSad z3{hO=z1a?BIXCYCKL8FkjG=|f14fot_+{TTmI`$#3;6TIlD@R@k;G!siaOFu`%@%} zsGM-CwNnpDDxNTZDy8ALUPEd`$2GqEtpU>I7^cehj4AFht49;vy7wLp?H zVfiy|`2Ohm8N%KB^pdKN!8d%v;Ih>kZ1T$aKH8652B@TA2m?lkA_|zumndBd&h;^{ zEfePPgF4eMWvsM9?yY(Z9i?`xmieQl7QgH!2kz!p0$TuekB)mX<+ENKxOJM=5fx#s z_=i|eC&bY1k5t!NL+4$vJ`1Z5YQesCa}Gfc*uPCBI}t)*HV-9%tcAP&I_+6$epN zz!W8E0v`RtL6!L8CR5%^q=lrcC?aV-9^no-iQ_DgBZK#=Ps(%Ke5{%O>#z$Q{BD0v zV@%jkED9bZZ zcNW!s_N@Y zr!=dLRfO<6I6m0yelS+H#>nvdW{Er;(+?e|6baflcp1QhIVnDT1*>VsJTn>0Qfwn~ z9>q^DX@gn_Hfd+GOe_oz?mU2D*onT2Ln2lt^5I@nNI55T3)=9D9#8>A)8mZ5()-jW zpFi`=g)&sCZpE%2NVPlf%6*Z#{%;5Fr%%adR>;pmnVTgzS0rGW_V(O7bha1zoL%U; z5Cr2;S?>AZ(m3stLMgSNGQ%!^KVnRIz(~10;nmpr>SUrz3lo@=0)} z<`#li?&(~MpxLP8^Hss;vJ7B5gSuo9yZq3)da8IF?%|JpzK&%cz^omv6gln@^x9rK zaC_EK@o&<`t6a?rD^kip%C%cQ*?tQ%U|Y)ULfh{0|4{fslE$zZvyV#>Wt8zt4YR00 z>E}4QH9`f$Sicw+Gafm4c~)_i0!I%Q&dW!y%xxzno=qG0Lsq_(aMzhpt%|j zj`F-7i1@H%QCv=piE~I*m2*2kEmV==R#i(UIgF7w;kuc;sZuvPr)>*05-33v+CaI! z&*+)=%agp~RirD{G2~W(DUVNjF=qJ?*cEJfY>s6J&ZnGr0k2+JJsB9Y)k8Jl{ziHS zZkJ-n_=0gQKpzjyV=`tt)AWfA?8$({HX)$xEjO$6a8NWi!oskBQE%+Cx{ehz!Ff-Y zYE1x^A>D|UKA|C6Gsz8YC*mHoqzNdJc+4ek;A4@0uVKaUw6JJizDs)?z+bk_2^|JZoq z|GuxictoWtehg&5GWCtd)q=T)STR2jSIG>{rTzHFzHYUaq(2kn(sm zN<@$`&mOw%D3p!zm_+$uc68=Niyy{Y?aTarBP`Z>-}v`R&H2(9X6h3ozIOk*v{CL@ zk2@Tuo1{jBm|=}YvB#HF)MaOx(uMt72PI@%nTUUU-bQCCrMENh2vq<2gNhNbMRc3ZZv{oOP%WdBEQxTPq!TpcZQI0%ff0IBC@ zGD!1CVD|{RB>ziWO%Q)hS9FNtMoR7)5 zF9fVZu+1w{J|ud@Zvr=W9SP#ml}8=$Kgz501SSB+*jf3LSC~oyJqno%-_oT@TxHjN z)5Bel6-}?U!>0TiEJBjS2GJR}c9QQ3c1hQ9f6&hD6K0^QQK7fwY%BO0kLh=Eap+(7 zedy^A5|tzWDfReoSjIN|-7iF=9sDeCWJhsi-yq%_Pd&)P8g&sh`1%@)+AOZ~R}`G8 z+L>uplUsz0X_eQQ6qOf|P68x-MV))F+e*b_TW%@D>6^1@PGkfn-()C-8SLUK@`w!wE_pH>) z|BPqGRvg;!M{GWmXb2Pb(|L{P*GF3- z*nSmNmgO_=Hm1A@-t+#rBcT0o$Fn5e$K4IL!V1->lxuhSaNlZ2x|+c}d90;@x*|K* z+4&j+H*;f@M$bRb+g!U7ET>M6O+XabS8soJeuJ(DS-)oFEt1U!Yy@!9m~!n0@d7}r>nw}zBKLD=Ird#{=349M?2 z)?(r@*81%2F39XEm75g`{}>ClNT5wP+7FW69eRA;@z)76piDsMn9wNt`WteM`x%w- z5IoaL?k`Zc!Y$zq_%u4xv}XrKY@C_`45o`Ry{Eu`AM;Jn&D%`{n3c&@&5#NH;CrJJOowewaXkt-m2xqqUq z^K*4zNL;=d*N&*!;kBm|S4EN0Jk}t0|E=J|h!|*IP;-kSWnT z7}AW@bED?7<&C!@fDe(Ly`Z_ilp)`F$m1akdWpNTWD#;?!7Fgu{MN9~%P0iPOB?Ag zhn6@i*vwdh`%;Q<7MAoAqc4}iVRj|cu?PeUOG&B0naww(U(Voj{D;-f0>JSuFat~h z1&6O+v;OiY8J$`E|Nf|wyMVM5H*YHYqcCx(+fGl!pfD;LBya1-?qQ9Ztfru0V zh_QNBvqwPY?b$>up>YL_b+v36xsT1ysGMB%?JpOpoQVl zD|c##k|y}zmKX{G*IJ2Tg8&%{^zu#EpCVaH9AvFpwAP8YZ4AV%UpHWj>EXR@Z$%q7 zbymSqlD)!DYCPyGOGiEPztOeJ)@gh{zwt@37umJvwx(q!QxzTcEqdEjmd1BI*?nEY zAj^T$!u7}Rpd9)R3oU?5;5zZES)^BTjpmzAZIxpht6ksiTTC%j; zok4PKPVa{Y6jT8vEkK=;?aejm6eNsu8Uv2)!Ek#AW$jLjtL1Y4Z5uLYy$jdFMX(!Ry9_{J2!Z3 z9{2#|kEquE){;xT;gf?12n(Uo2TQhEx+uq@d(N}cGnW3NJtjSvwObCnh_sgguAH$c z;MO|W;Z&=HIM1@KvD~EARy862&9C5^rtN|KGkQJ+Cw`gRPX9w9Ne!N#E~`>GLj|dS zPI(uOhjT%xsABbF`$z84+Ugh_XVDsM<&^l>C4!=jpF6w@uwT$@2!oes-@4K7ihyB$ zD?qq?YJL=|l_>i55^_UIHPl#gZ^p%*`>SjL{Sb4hBdFJbuQ14ay;QgLsqW8eU_||5 z$9N26X*DAt#qyq@)NQv7vif(vTr2${$d@{}n-GX^kmwvA4)+fHltEkT_Q&6Ew2zou z6G+_>+;xz@Hiln#RQiU2vU24P?7C+OY|r}(x?Po+<1=FOG)BP>C&xCq-%qc2Ns0M^1I93k2D~U2bHVZkRDB!7 zdsRxq#qDH^@_FbTC!55nwd@u_9S;@`Ngj%~R=ISA)r>i#?Kw`rs=wBGlI0EzzXjq^ zn&BqT%;Or;{Scj22Y0eJTbLs`Bf$8h`SNH*(P??>?%go#uipV8jz4ITca%5EAlU9S z*q6q~ww|NVpyfh8?kSo*HVO>@F0RU3yWNe(BQ3CdkAd@_XoOR<5jAw!0Pc^FgWV@}F#Ck3I@<#sGnV4fh|^_0zc`^H1<$3+qHc}|1lC`6F@LBFYVSB$3W zY9e{PsVV?3OiPsf;2ZEyY-(6IHw!rP^f5m(YP%FEU<>d3*WB!A5C`8~UXps2fC1Fb zbETJK7MuasgrR^E<=QCtu~GrLcNirk0&^;`F*-=}lY>Jf{2J|XkBC+YXiQzSq>}z|q;aMV3wl=l7RHB5epwDNhyQtsKF<5A`NbCN#Fk+rKOFWzH!Fr0Q{kda7Tsw zRE%wWf2l8x&z$Hw{!dto1av_Hak*gZsZt ze|ngKFR^D<#2k1}rYkDI=5H;P#hW286U_@gxq9cW8;bNEHK|D0PyD|b2>j~3cRZb_ zF=WCw!a$p#j$}AM5e`rm76AQPe~Hpoz!L23_y+v&BG0+Udn9v5@E53YQ#hB=WsG{b zr-2WV(r^6@bJAl&uVJfw7_{GZkcUO=p}?1`oNh3y4H}^u5~S4}#9lF#7_JiTirN6x zES~GnZZ74A@A1l2F?;93i2e(VKW|s6OYHmPxkk<9S~yQfFT(dTG2L@dxhaBWd?px_ z1-K*7aP*qHH%76=W2T4a?N3uVvAKqTCgE7Gb9NEw!N*d`qMXWW3i_Ksa@94g>JH)I z1(a4z5>ORj{OHoz9~?aInZ-o^>JGLb_gt0^W0QWn@cR^XdOlzK{GQ+wxzdr?!>6;wpWwOdXgt48$oF)+6snpY80xni(j@(r~V_CSN{x{x~=7?iMh!d_)|Lqw@!#6h^1U z58M=rL;-|gC&)s(kr4?^z(S-efn$>*BnjCpvaMUI&nfK<9uGdJwD|4?WODeuZnaq0 zSF`Kac~Dl9J>ke1w~wKDsK3dcAVD~&cc2qSwu*FrLB9s3n>N>n8xCvYc=gD6&ujLn zOv~dr(|Z)j3XX4~=)n3NTe%BQl*^9%@n)k4XqY^fj03$3wCBUUqaN$rIEZhGbiTYlS&7mGKHCeRbG|XF%n#g8An?xpBuF> zk?}^?9Jip49kAXeBw)?NmFZjh-?J9nDfhmgzV=3Wam0ye_+v_6p=^K7&ci2_l3-K) zfDPH?wBEB?4m=hvp%R*3ajm{v`g@Jl6siA0-V3c`tdN7)%n`%=jSuTB!QMI@wdmU{{mm#T`8>T_iVX-n;gBj z(6aMUQ6jvr+;mC#`07_fZCeDTv=t$_Hc(fxZ-)zTvhkY&_XQ52(ZB-`PxGCE7QC|Z zN5)kPv~$MNGO&F$-dMh0S}#Ch+_XiKf%SIi&52xD&v1Y#c4?0ru6covS|L$;$i|w< z>lxEt@6vDfsFAvMo*(F!7ccl8`i5t2J7jalN?n^rUO0Yy&b((R=ywk!3bvPl`&X3* zNL%o?gWO@{#1G!WMegGIdm>|X&dEI8L{)kUcT^sa-MbYk8hSTG^ln5gX>hBoa|lOo zb-Xav4<1(p%xG#jW$qT+*DDw$=#F!9g-rsqVBYQURre2Ea;%GEZQ-Eki7cq6q(W>a zoht&GuwoOuf92ea*46HXs#e?#EB*jLUtors+W7Q#co0+;HT?-gbqqX#LuHm0H_G0A zlHAVdb`1IfD6azsh^(Fx5UaR3cqfzG%E3rsf2|W3r6@U0?ywTXnoKCh4b$wR-R|up z+bTG}=OuT7fwgB!)>n`A^@ksvxiLP{_S)ALibx~?PD@OePq%j@SOWl7+UTrKfGcnf z9MlbUKZf|sRXxX)nCkY~e5$6aVKza6K}b=$x~8109(8p1hDO*-WSJO~PRkmzWOa?- z!0ri?ska#(GIn^u=SxcBcY}--gEJC=js$fQBJk+{F@eS~*O^6|p_x-hTlwyof87hX zm6G$q<5ybB7!>Qcm?n$A zovaS@Dapg}QR_a%3z_iECjlS>DzjEYUSBk&vy&W!uW0(Lmdq%Np)ivGv>EJ4Y#PrzOHI>0iq`4oIG#sGR?9k^>w80om`0Vc(^Ym1# zYSPG_HBo$VPuu!BxNX@}; z!D#Eg^2e+XcuMFA#_hgrX(-9W@ZB#V%*|vPWYM^P+;}mx8?xj5V^l1_@+b*-bTL&J zK4aG$U_4#Mb2R^72=z_D&)f!+*HHb6TpMOoLA4-qqakK8iPjK_JdNDDtXgJw@;lw( z-n_p%SoUymp(1QFb?(#J{z7VK+3&Hf107k4=!yF4$8V&ZAP1x%OGTTAo45YA7+i>Y2}KFCG18QCoN`vtb>Ve21_B?(CVsQ|#6<;F+JR_=@ExbK#nW*%_6ePFwA|-!>rZ3gWt*G? zx!*nozkA3|SPJ7w@_w6KFQRi{q-Dpza$ig2%==5dmR3xjzgXL#&KfIo-68Fj<#k71 zF_>R3z zs_|2kQ(bh`<6ZJq%dYtBG6Kd@H~lXSj85+FM)ltx$Xysa`5zKCU-sBH9k&(DZsnFu z**A^!((2DwP_jH5^?M+*A(ITY_f5@Bn+A;Z%s>Y=<9gKQU_$)s(8TN*;PzGUFwme3 zuG@nf1;Lm*ZR)uQKt@WN@=2!Q!|~0@+Ld=Pgm0TKJJi6}1@VQw#$%+GG0TvgaD{D)^jmrBtDY(fMC5=?^*-oo{7an`VK6LK{A|M1;&=l{Rg z)w=dJ1oZ^%%$zrUHQqXhcbC3q-cjt&SzbO(J_x-q{~FqcDEAb?=^EPs2)Oq-5Mn63 zU#1r79L0l}=QaIrO)cN1`aM%t>qK#>XN`vA9wBZTNJx*IT~ zy~i|9Vm}SroPAK#w`MNt`0$hXcI{`wjPM#kJ9Eg|uzUMQo@_l5*;3|Y_mGxo2z-Cv z?gCWrTTVF7{Tz1ZTZVnWp_nryfO@7lg$B|Ffpqeu~c|KIG6g?3G^$dQakf9UJE1TtA5yj3kb>)9l!o6aGzoM0pG^z32Kg?rc8|_ zfk>B$oi70_Ha$Kf8{g<8atbKscZe|#I?vZ}4`GYaTYMBBqE97;#T3ieLG-Q7khjC6 zx`ubaBln~0gIN7-v-{AKr%I>$MjZw~xmQ@bh=wevB_u^HdwPGGIZ-mV(w=3j!M^n-FoH#abwH zO$mp^%Tmq`l{?o9)dCrvjc;LcA)TUl6DikqFF7!gaR@;%car=rs8FXoJ(6r?37E}c zxnfOr*z+2$q&aB`|0x-8e)^tVfs|#M(_5mLg|<_cbez^zewHVq`>SWe{L&lAs(CVQ zl{YO_S#$M2DH65MA%l(QkkAg>U1(p)FsJ~9?m{X3!m~Yc(dQi9eP4k=o;!{AGUc!$ zNNtqwD*22C=>L)JNa}4r|7Fj)h31u-EAS?3MXN3X%Q6D}mx*%=CeYOFOq=*5GkqbA1BB;v@L=?C? zC!sAB5%rxGcYT`g2MCl=S;B5cAf-tT1P1Qw#jIT~P)jfZ?+SzaIP)JY8MWkZ*4C#j zycqdn7U8zkx9&F{;ECdXE#K<&n-HM@$)si2gMW5fKbG5)Pxtfq-o0SBc`bGH`=FNX z+C4B(#oA-ot?4$Q)pE-<^EcMm$dF3`FHMoqsw)W7KoK5o{2M@38ot7f)?!GQy75m4 zZ&c6s?GGFtOYU}eKGq=YDEWU#d+VsE!nTc->}AFh3WHQaeju4AK&~Z=bi24?;s-=< zmJo^mId7*dA$20bsYw7LCAiD+d_eJ_E#Wou-gMUzn#`kxUmHLrVBj2A1T8!0zy>=Q zW^ieD5bn9R2E^KQyR2q^7V!dNIBD80Ve3rU(tGfct^Hl*_Z`6Kj8ShJ3HuBX{>)-= z2}ogQqcwDfxp}2#KvM&rcT8-Z`8}?(NNK3E3FjC3G^p1Ne z1^OmGA7wxO8jl0?y>~82G#TK6q`giUGqij&Ek>*?NZOFz{WfjIZp@*Q%KrTjt1YHc z*H#QQj&(9H^kfkRwRPdBto9?WKEXgG!aFW3iidtBE+ z+`5{a*vBC*HXp-N<-W?4p56ZeQ2U0(`U*p;52sEb&E8yFNM6|*l!9=VkC0nXD@F6? z<=2&2lRuNys^ZW;BG;?pXjRHEANqSj*kmk~%utU;t(ok6MU@WKP^!73ekf2doMoM_ z@z*Tcx}VG8dxsh*)=Y82t#$vE@H!-x`&WWek`wr9eBaS%>cW{r`X2S!hTQDhp0Bk?l9J*OQUt>?EE{+W%2Sz%AQ z25*RIfyzMu58k;15-<@rX1PuVFHq@L9{!b~d%8AF!vw zi7f8|i0>t8`yqe|m;%}&+n!@zfBfdyKJYC+3Q{E;nv71=h)3itD49JQ9WG-pGTy#(ujw zm|ddU6Hk@+1SfJZcj!LrPj}a+mNx|X%Z)8(9B}6ZI?4b!GJW9Zz3mvxu`|RaN++4D zTz?uZKoU_aMEPi(B~iNBBGKwHrPNC6(j_@XYpFAzE=Elm6@*3UDBQr#VW%pFL(Q!x z-v?PHwP$3lCl?Ftl4|RYw?;^hf!9HEi|QSXri|nqa#Oe2BfzyPU`7V+!6U#hBec1y z4OHHI6GXOU;T@PyG(UlA02#RMW`p2bwvv-jlcl;AQH6!>MO;1q4m=GkGaj6k_jQ#r2y73@h1?5$8+wh1wMnC232<1laoElVB) z-K5MiaDi~AI3g_cf=mJ_!jj*SQxqqTroyedhX0wyQ`;GFRozeHy|jLd3IA3a769J) zpZ>kke(?0g@Q22)!5uu>6Rj&|_R^OJI};m26Qgqu)eMsPEvux|ar3;Vs?n%48A5MG zu|lpo<~9~Oq4?vU+JN_=_X_^WA*4u+#TpI`Yd7TLXUMl@h6l=vZ{j|-NP18WVBnsv zyktZM$z=d!;PqS`3%c0mZ(RSL><~c(_D=L4cD1Br*S1uqcHVR-e0zn7Qnmp`V8N-M z@dGP(mocEkyc-kciV)=q(s!N6V}|Y0z`3~&37z!0k0#Ow9sT9pF%A@37)P5Wk`W< zSltE4t?eU#_xas%vHc<5*n03LKpy8x;COUof9y^1@pW4>Q8o%3+p-$@nuOyWqH$mi zY6!0A#2Fw%=R7+u3}#>M!5MZ~mAaHnQXqp{cle%`ro5X|zV`2l3QCOhJMLuH&`oJt z+;#<`EK!v9yqm$&{+BwlltcS!(T;Xl^`T`s^PZ-z_(q?K6y*M|28u8l3c6<&U67@t zM~W;XNOk>2n;0<9W0=-mAi`x#T<$YKN!oz)A$ttRzBd7H)d=mYRP~)q5~BQMz$uOT zgcYKAO!&JCB2sjuL*fr zNxG#93Uao=#58Nomd;pi-_|TXms%g-&=u?A462G?Qsf6_?sMV5yk15WM!kRwgCQEO z9AJy_ndEJNG{KNkM5P~+xsi{4vz#ZgMPU7n7tdYwjapEuLJg}LjRF_!>Q%LA^S{g- z+n=%-jL^d@JMSfF{Ow)86fR`I*F`G~1f?bre-j6cs$1QUM_k|%oOI$m9ecfS)Ww)@ zl{?9)g~Gd7c@zDEby7h6#F(w)N0A~Fa2pV75}5Td2)WwRb40ujS5kP0WMc+tJwy(D z(C!)`9+Yo$gV}gb|76_^Qn07`(X7Q?3!1z-j<;TBR8vXRN>|&L$Y(tLM+7)Ce~u2` zb^|r0a+fXsp6}g5%SHailJ*lWrYaZBFJTJ8i-(1!%OjuLJIY`r1O8;zx#xI-1J1z-GYm zeZK+k``5hQRH~1OcS21%v zg~Ut>U(WfouH^}}7aqPlX<^kK)0ZjL%g6TymiPwtl9ut)M=!1g*fZyhNu3GHp3dts zunY|)HkJ$5xg{)iRX4jebUFD!BaCD*nm!&`u+=r%JZv7I?OcCU${2rjdG66XQ zMd0`YY*?N`zd6kgAg1_lY4%pUXF8#O#D@-lpmd4EH2JbKjA$Nqm(kJQGVNJ3g;+%- z{v@wcLwY*pR(P^|7!v@kn2MdbPM^M5O?D89@kE38x z9o?e1oCAQq4X0*z0L55_EP;2RHcH%4fs9vnvcjUo9*2>FGMeS)5u4rDOONlF&fX0_ z?vi#PD-RQkUK^Owa&w`R!aF8+!$}9y^<3D@=_~x|G4VakJpNU_bi(3DoXDfuA50<1cyGfLFzt4%K2K0I9h%jbDQ1a!2|0iWEDVVrB$>SDiFsd!*1)&Megiq;h(RFSi zn^%DRdJDK013$T1b5(9z{mVhzPV%^`(sOfYs&+W_5H-?wF`D@uL9#OKPY@ZIR3e_@ zK?Tnz8u1%_-NCt;K;k(pd+ySs`dVD`!PKN-Ty3y2=z~j?f1+&Re%kg zz)2-7f2E-#n{7Ki9X7qfDNp(Qf?GHc;>q#jGIstY%ce=Y+mh7%*bEH8N-rsKgb+_=8wKmPKJ@T34xBx zOoYDQ-Nos}(U!wCvh-clQGIeOb-mm>#7-2aUOYRdNqi z{>k6XWe)E7$)%#Tw;#O|XJNvz#9u255jJoN(UZ^EHhWDkhaX0F@8K0c%BYxasgO)1 zDgSQdYw`MT7;6v!^gGs;pSrN4ynFyqKKqT#PMX{TFS0g7hlY5c0D35<2_#jyDi-># z-80ubAq(~N8d3{;GSmSMs4Kq#bR4P3L{&sKMg_8k5==ouwYHj^?=Z43>ue)&YoBOc z)kH!4wK1KgYCos#_pod0oGWyUVz1#UbWE+|Je>qAGHNEEe}>y`r@mw5J7@1%j?*7S zNZxgDQ=Qye{1~Ynqrgce14^xcp&R1$C~ytE*Xe^*aR40uk?})gmVx%_m0om(CyY!) zJ7Q)_2Td;VQ-VC%sx1bONVpzjc+B^y!?ZjIE)79V8&-8?v%g zBu5x25DzkdxL#!-8|4EJ03$WjQ2BOLx|~E5d#vP-KMqsezSN@9)bnDge2j ztU_*84)1$MY4_7V3v-mOiQm%MI5g)^nl&72JVFL`{dqr4tf6w~k_QtWlzswdsv-I8 z3_#E=>}F=-J`?OgL>h(#`=82wL)r;$7!@C06Gi`Iv_@=_62CX(Lp<1VQmK;K*zdUPX^kRY6bUqnVoE($sux`R{a4e%=2Q4X{)U&rZWURXJ(%RR zt;xH`&)y`~c388?zkL1CB^9w8SwA)6wEz0j+iXs@HYrC)rvGJM;G5$$_9|fF;K>4S z?N_+P+Zi61yeXKeQLMhv0i#3~#{vyczua5t{ zOKt>H8gd29BVJ%Rx~&9dvuJRqb~N7DyEt{!vfmnxQddQ%ace}TQ;p1{<7>?Hsn4OJ zZOs>c#r&cNGcy-cGrCC5uBn!*=B1w;cG5Tw3)jVcZWr2Im`V!`r=PZ;=+G|L1WL*N z3L=&g>o2_?{N%|BefJF8ZkgVJ3<|U~w`B)OkX=tDcOj#~3zlwNlgZM-U>FeH!96$Y z)9M3}D@Noz{w%W)ADS25d~ydrh_aghVQ?kwcyaSHd%3CVl$L^t<@VvGLAA5Z*m=2F zgaFfbBb?mVIHN8sb?bW#0SS&>Ye1qA@CYY*vMqPe<0kV&0+lRC6a#?_KEtKIZCj9}2g6i4l5Q7T<4zpATOz|y zN)Dm4&tJmLXnx$f-ZRY~*;s8J^ririwb*G(#SyM zGFr!wK-NOPl`fbxCaWc*Ytb0KpYAEeIR94elHSvw$aD2tmI(^KOzmD-h)=$isUjG_ zer2^oZc9tFsKPw~yo0Zt4HbEweZ_&lQKkf_F+slLPJc)XP_5v93I#!-L4+6>z2wZv z-5Q6c?VoAg?GG}FhoXe*R|+_n?*nCN$p+lqkwY9?V-*2jntqib!SKr60`BwY1m?j! zrybqF?*X570^h#pd)eVzameXh zLH_mtAFm)d@6Q1v78yW*mo9%MkhotFRS5=d*1OAk!tdD%=-0xeZRy;6X9Ynj`wnJB=U)8kDqOU_IxROlbkUjluB>Vwkn#U-@Oy4T;%={`LS0a2^^2O0e5&(suUfRxme%MN3N zk92_p7?nnI4zHxU#r$L&vBc!$`9AH>;gs~( z%7$KJh30pYfUo-;$X~s$_TK>S8z9wV5NV*GTeLoX>^eu z0u{W=K3a(Ufc;0b4|Ns5DR%-;npj}|l#=Ju;zV4mhoS81+1rAwH+U)sJY7o(|1ZD{%I+!Ukf>jt2^s8~spex`#ng)2I7(6}^v+)e9HrQGaS!I-}L zWjQo8@>hX_%D8p1Xb1M1=BLSyG~{=|ICzN*X}^1F-Aj2g_5GD@%kJ0JseH?cz@Z|4ptJuAhl`U6zg?$WyFpO2}jU`m5!h&Z%~rj!yp8!yB61i$`XwIG{5LP z$M@xU-IEyQ*{ftQMs4hgsIsyrMIAKp;gRozC2}9eH6$)3Lh^x&UH)>$noK=m{EB0c z@qqhPcmDQX#CGDu)t&>1_!8{@-{4P{*?lq&f|pX2nIJ0V*zuM5G3#)SiUPcO2lw%p zIx_L8)wvzr^TvqMxoi$rPuD7|Ed*}#sGg9T(NrWF%5+eGamT@ zp%s=+4_9;#>lbg9$vuxF#O#~yCx35h%Dcuy7rR})FK0pFZG1`^4lOTyl^-8(9i|>@ z$U0=`gmxB^wbHDgGF41Rn2WKPdhd563tI03FbA-z)C0qT<=N@-sZj)tK6#mUGAc?i z0j+$7+m^fZ4B~5N9kp)1?|?Bo1Y}DVWNFZeJ^@<5SwJ@`ZZ-O_ZE&n2rs#XqXXC!% z7wxMg7`X-R$MHOkuRlmJa%uH{47uRLLsoENAKOaihghRh zBdu*W55Wx?LAyf43P%PgzU{X;0E3it#pdn>a(cX{{!vg-N4P#sq$ivjq+M!70WJ|2 zXC2IQG>D60F68^LegX||dGAG${g~E4kQAIj$W@0?!=-;oxb|OEuv$zn$yn<_%aqgU z<;|T_yo)eNNLbs4@}ji&h%`>^bTGv+cVG-NZ^9h5HHKABLH;ZMo+gKb__SSq)Si05 zUwco8&&zJ4B)*t(xNni>324zh=+657&&FdW)WB9SYv=viFV7Tfo6Yop0SmO`rH@xe6n&Ik)VGus7Ll(1HLX$@`dV&c8F(RyaV-8qRaQp;Z{;@t_5j#J?GR>j zsR8x3n{xeqbhJ$zveLjMVqv59@P_T^7sIcRg^Q!srUV+T41kV$MG!mq44xec!f{Sa z5U$dqt8n$9NX%>3EzI4t%GogKPPtcvJM?ZNsQ*1kkFGOVuQuvSgorgE_I%gbZ%KQEAvn0O#8!15AS-NKOZ^@!JD^ zB>U=~6u2J&=z$Wg=N+C;+udW#LLNhpV)rLM{*MdTsSO{c&${OA^ssf4 znJEQ4SdX91{mjyrnFDHk4v?6YcYhwiJcb~0-`=6e61^x4t*M`krHBgD4una@UB>8s zCVpP2wM9~==Ma3UG<|Qqu*-~`d@EkAbSX?I)113zGru2v6{z5tTT^3hSyBtd75#3!?U32^PFmJK@mct)L?ScyF$80sxxtey5Ht6I+r+td0gIAVi&-%Go zE5q}WC;i*vC&M3PNzJJiA5o@GV~y{bI`K-sF-5TaT3vSkFzn_e_Rrt#&w^P|rpHH8 zu4Y_C)6#Tfc9v}-5i|>S7ly~Vfno86$Ilq!#`*2H=A@GQ>$s%l_A*Ju&@5u{59K+M z80mb9$+N1}W-GJ<%2?))8O>RLd?b`VOkr~%$?{)P72iHolS2~>=&5=kXA83tc+m9> zIyEty($0!3zxSqm6f&H2RQHmA|I2BbU6^=w2j(=*4o5agj9O6z0D{++TZStr=2R`E zgmS!)8;PNX0dezZUY*Y9S2@JWhZJ3JG;kK4-*&OeHQglk_jX_0zA;?q__p5)#$Q!- z0c;L~)4*wdPcL*9RCp1C;2A?WXX#5)jd44TtrhEq@1MitYwVqLnI5E@h47OZm-A=N z%30^G#>^O0uPz;EQz`jZE!~46b_D|)y0Slz^`dP&5HD(Z_Aqtf(fC;W5&{19&2V4iH zgUZQ^GvEQ}D!<~Mjzb}Q*A##FnA}XnF6SMIiYID8aB(LKfA2#~M~vnRl%N|0;-D0r zaV**yjC$@jZD>-4G&gMqy@u;L(;e@I>>7Or0_PeWsu^9*vz#l>VXbCE^IC7}O}6ee zh}AMBW;8RhDDze;w2p}ZZ%f3kj!Lhl&`(!92O4|YYjU`~TUqX2tx(+xwS0Ny?)>Hd zC9(RUp$zede~HyzC-X=Ov!?y#APK^Ks!6`JIxyX={$P|Mq=kH2Xy$w_tZZ2GKdIH_ z@9tBpgJ8*(ZfpIS4zCAwN_>|KUTIxDE(+zEtqhUceva39sRpLmq?|E)Y%eApX#4YEQvxojkuP%GACc?hua*B9m{Z88xBHpRCNu}Ts`UdmXroyr^X9a2KP0=g`zpR<*W8}_7#^02`W97Tyh*HSskK(>L17B zI^_+Wt3yYFh>Zk?XkV99EQ(snvo=G0YID^ykfR-t)Pn^_ggy~4qn_hy7}fV42ks0IfXsf`Z2mYFV6q?vIFr7Ep4}TIyl-6f zz(xyblt4cxe2nJBP1XdndNwh{S#v!q8J0CEo4ixe6X{ zWfXtVymj5o7nNyr-_>69t2)pJvle$t5=^D!Z=eVLcf5{`w^J*&U{1plALGdLZF*%*=i+5HEg(ZMd+VBe ze0G1T5uhgU_W{J~b|!i%s39;fxwk68{>O*oMXW&-<)p^QFe2nQIxH!UXJr0Ag2{jY z1D_+j^>{UKUINkkD%>`)ikWkU6v#nzXqEyh?(ClTG^8mgIjHU{mN~$b!!|)Tg>Re} zwqSc+Jc%9!e$O0G9d&6TigPxAH~Wwux1gj=0MKTpqvC-&rXieEm1@8C1yaZBt)Awe z@u>;$8`s<0+585TwUz0y#oymRWf55`acuzc`_XAAm{Gsp0d^da)ZpUuhdGzPg{%in zBjQ6aH3aYGwgTP3^B~mg-1FqfmEFstq&&A-i#Nv0|9c#tLm$`A^y^G9rPZGPQ}iPJMxjc70w! zt$Q!}3ChbEy1K>_{|#qY6bx?#EbC|JO3H z{_g+VGVu{7!9`$ufEnBgz00DGz2N4uKvsa$(BVtZwqK!Noz9)-DuwHKkesV$02#y^ z2A;!FpqztFFE5-=CsJ>bwBaunS^`#|PZt)oKDj3|X2}8X`8tg4PguWBmZuZcudYTnx!<7<}_cF zoa3FCnJIL5kX%$)?`GS!p=n8p@sCS^C~sZf_RoKYv~PiOKMc!!{`T?A(D8Ez87zQ? zG*l(td`mQ0h7Orw{?XcO5JS;EB%5`}8nc(~oV}|M%V5aLf~;MFuZYHV@gqf4^2V0H zrnfWPPMR}dxFrk62`%f0O|XLIH|BJNE7on)hp%lC&8x#0?_wz^j~DaA%HJ_6C8O5u z{vm3avyhNJwCyX5=yyr@B(yK~(@G*%{?o^E68x`wma2&%OE2p?tt5(nTM0`3PWel* z+wjv;f(f2$Mq_MD zEoHtaqiSwLJ=dY-@Z-)c=q$LAO*9bibHlFJr1U<8Oj$HwDUgi(kIRQw8mX)#*lbLs z6u`^~z>qwZF1a-@57^~tZrop5dknD0Okg<(WWzNSF&+{D-FSOz5RiGTLqIZo5KQt> z(6pogE@aZF^_2TNw`lZ$T9q{ev+K^oB{|4U?$>Y;XBpX#PR`uiy;yhS01 z8*DsB?ZvNMxOySdQ-4B<;F6x8p&GpRf!-Ol0sYSKcbHNvX1hZc7A1x~kn2tlWR;8G z!~Br|?U~Mbq_zp;rLk0_|SrQo_f%6%j zZFvWJ>cIOCQfZ*?Q-5$y>71SHRfF$32km2n};xqq76742EW-nI_R*TsU-yBTYpM2x+ccD zZrq$9VEsg2lrF=^(`LJ537s|zI? ze(jonAt<4aZ@SC}CNz^N%8CTfLAZ6YqJs_~i;D))VGe>VH$Y_J;+s!{V+G=H@A$51 zj?`hbJbtX7?3~Y?*}idi+~+&SUwKi2y!y5@x|M*?zcQ!ys#EUfYfx)?+NNG5Xs;g; z>ev+JoTa7)F=@Zsxr?13qBp81;d#KZb^SUBf6MqISb6!->q|44Z)%KY;htVXi&tu+ zEXn9HM$O;BkD1lLWJFg(();i_u&Zwd(yPAW8*^1QjoFqGyNviVAf$;)5I50ni!W=Q z=U?#=#-KYc6f2k)8pH}%vZ>PL8~A)?RSZqLpvN0Ncw?TC23nF#pam0hw^b|}#h37W zCf)mB7Y3av@d<3=iupRQP((jqV*(!CE5-DV1#FR50m;a&_3z%CVT|hrYmV*sT2D#M z0H5lT3IE?A!C;$Bp&wA}T~VQp;75WOrzxP&#_FO*o!kUa2aP2rkY7X{Xh&Bew83)B z_r{W@Kq2(PKg919uz#XvBB2s3+!W%hpCM#NmO3g|LMO})w=GzI?X7tbCP6=hMG*{? zZl|Xk{0Fa-)|(O9nS6uqT;WMC?uJKTpSXMGpSoP5p2@e*$LTTxZ(%mtS?Bv~j6&|H z1#y5H*%gxWpUiB$8wj_t@>CXlPHIloIY=^Q3wGWfD?A!kl$OBSdg}1?1F*|E9vfN{ z()dOkgK~2no<6ibp!8Fls%<_p-*dz&0AmVDtUf5ba-_hNh6_G+%-n6Ft_n^uz^9RY zdu$2ro1c`J;4%_X9qSTgeP1AH&O4Ed{`3eBa3a7i2B=}*EpE)b5I$1K#z}x3_uUSt zIyAcN_&`V(5!l6ZF0AOX?N0r6Fq{f~l^>joIZKQp4R9Yi9|LP-D)fZx%Ye4Y+>-SH z>`=;8!mpb*lm{ts&%u*WQMgZ+$^x*H0RT8tluqqP~lw&8gd zj4>6-^hiirqq_e-#OH_no4%5l(%nT)e++cRZuE2zFW)d%?AsY5GISu_`*vD0Mmn~7 z?~}eY%oHl+@m5s%Q0ebFe`Tv}8m!b{3u-1Byu5SbyYx;&pcUh<{lT%iYJi8m9con3 zbk_5`G&y|u?JgzWkUvs$MWibI30BB2*2V~rI(<;E89{Z{kvz6@-f_P#r%eVZo8gt+ zzX7AI^M3ZXhUTNv_;CoT>Ji0;OwPZ1B&#Na_e0|A+jlX>S@;dHC@Te^&*H^~ z-JSD-n)h}4TCxqtpOrIJ4(V6dMu@+}a!C|8WSw#t7IQy&XEbesnScKLZ7z6Dmk6wd z|6YekE{~}q;K;S0KP#{f%HFE)*@fmm^SA{)UvDu;piV~m4VX<=`om6Vq&llp7wivs z8F=TbX$SQt*{nu>_5{A+8_SG0!%vtF#SOJkf`Z*tJS{TOw)M%Nn&@azz6DmG|wdGCn?K{?HQJX?tdIZz7 zsD-1bk{G6VJf9qGP_9m)9|=$8C?C}y20N>Ir+)Hj{Q1D2sRV~Ey>OrEpn*NQCilu` zM&HX0CeDt|^muT53V6>rWcmrq@6s=Hwms;&{8+e03|QOR{$4pWq8p7%X+LRYSF*L< zBxX8BDXRHQjtmb8{?3%fELmZ>_b7Zp*}g0~n;!pFee~HvQH-);bt&32vsP5hr1n`I z)S^)_n{#N9l!!8IrQDq8f_P?S$&I$z<0bKqEScdBQ`w3%^)W^s8cBAw>OAYpFvDz&19Z;PtE&OTxl-Oe4{ef~k<_SME3J!t0A_WEq;-YLch{xK$ zYU)$qeD!48_*RqU$?=7DbeP@2>dRE2pg~mdG1t?qeSdb@tXsRn?gTO78<$lY+K0d5 zZ3T#KOdr;!i`oiIkD8L5Fo@Om6Q<(Pmoab=-Xxra2x|uopta}YLgI4xA5BZG27IpW zp0zJ<@4Oba!0zcku^M=zU@^9f2i3%Rkf^JQYZ&qXM!;&S)^nT7!yU3&-%T0z+%`-% z84dM0=2B<;i_5|I3%`bWUY-1EM%0yB#`313%|Mwa4Hx*^!UmSkvML%Td5A$v81ENc zc^wt`0`jmV8Dynh#||LXe4CV=WxU<5#Hplj{YH~}SAkpnLa >g%_^g=Fa;MTd|T z0&f*pFhpF?M?46>*p}7>yU{(oKIF=#XF~LKdNiiH7*{0Ae+)07c5SP1Y_IgIS*P|t zcr8L0WcAym=1AppZjf$aOHWO4!CAbU$nfaN(?nmoWxrE0M)HNfia7wAmOsa1eJqi; z?`_^IE$PR2S-kxy{XC$shaE-Tudr_81=p5_zX-^C@NAUu5vDZW9P}^>k^Lj8h_*}Gp*5xN zKZoUtl%jDk#qQNJ-v_-H`U=oOHf?Ic;S`5l@b+yfwx9nOX&D*501MUx=l1m zV>W-rN)8FR39sfuQQdIoXbYv762j_IR&#|`)aSe(W_3tad&Dp?aC+kk#a>d3xARBHfM9WqAfm50>4-HL41{s zban##VKh^mWjw9G`$X|^>}_R*c)3GC`zlfizjzI{f!E2;==L_CkqKgsSScq8Xe7yO z_+p`8WD>p8CO=Y%ahih43oRx->I#N(`l|Y9fPMlc6M^%)lpTXtuGq|&==jHK{@X$Q z%P(wiO;v6w#tJRA*~d2dBV^vDOP!UX=60$y>-Tk7A>bAT7r(P=x(OdLsHs3jfa`s-kIFr33wijq!Mht zgt=>%w4V1!tN%X^?~EqKCSw4&aKLS|!Ga2$TSXs`cp+SkG|PAyf@$94Ja7#;g6d(L-H6O24V z98DA6AzK#Eq|O!e)j05zsVF(uY+(A^Ls9Nfx=ZMlw9ZXo@6ocRDp2Vbq^|qZ5w;R~ z$!i_FyAR|=4*|^CplC_*^mYaF!5+ZBOk)hI%~q}?V`8-Mie_Y_q#aNw>k}GLALUmY zVEhY7DbE*aPGoD;!`W04;%Lei<>!AL*kiirFIuLO&JFH$v$57=xYeO={gZmqJ=Xx^aK=zfN(GS8Xt0S zUaGRc)qxKx)Vojn9)%(`4{#jLa!jVG0A=YWcl^}`jRFP&{da=17Tn__qc3piiR(9V zy-jHyi{sySALh$+cb{%gzAk7$=sO(^FjhuOxCH}GzqLK90~F4n%gDF#wpV7c67)xR z!y3Wt->VDiA16QKP2~Se+DoyrO#9#A!lDUu=CU4X0AwsXY1@XXp z7{csAJ9=F70tw{=-<~(iZj`5Vt*XJ&yw9&3xLm^OoC6{NV`Jp`df7Z~`0ISLo}zm5 z%cxhikc;t!6qfo;0KSHN&A4Tj4_D8CexYY#*goiWyb5#|(Ltn3syzpo`rbDJnMohu z`sb0lg*m0Q-@APIf>_UKazuU8Ris}8hG^!NR@LQGyr!b~gIBI{cT%F^5dWn*#+fv| z*!_F(;VMHKUK3IKXSA{~*P`I6V3*#Pojkt^PWyY-1U5uD(01RRB)n6ranE2sd@B{> z-sQ$yYagyT$jqnY-c#ifgxhbJnlPBfrK;ord!V$zWy2Kvq>+orYMMFhs&Bc zxqvi5@o+s~G;S!?O8BMG6@5QfSw*sqRosxgd(65}MLU>1h_JOr&b~9Jx7#A<;YuLk zZ-1rq%RWPwSiW+gNHn%@YxfyCyP=qWWd50lg_K(NX_LP=z2a2x#;3MeuI_wyF%HAD z>YxG#u{*AW+N#?OTl*eHh7=Xmp#>aw+Wh^Z_h>P*Gb+it$Bg;x{Kt@l@sckIyo~zC zSjAR4%c3td2P&Q@Y-nC~4H$=8t4;N?b?0mkdk^}LmoOT-dytRkz4t3){FWpoY#v)8 z@`4#Fih}G8L2HoP*zS)O@71IhS|$eH=f2s0MrT|csXy8oLjLTJ@qsx5^Vj&W+6nc? zverK^F;^&#Q%0mS8Ey$Y3oKfdoclEc_iljY4|YNNM%c$xE7a$9wVqvlc$1L&LFb3b4olS=?Bl+Kk&hpA?G)=_%umk(=xUa9{<;nxh0@Ct9HY1k zomkXEiW}5TP9d?qP-+f#y#)GINP#5f26SR}1M-+<;wGEQm4F2y9o%b$)jTqIrz|*f zml}FZt3@-amg@RADoRzAl3}apf!@t^}-{)o^9>9(=6hV@+#kC=h!{aug60VYisLv{84?8$SM+J)d znF5o5gQQf~l=&;5d&3z?xU*X|zkNN8Y6PgW0bjF#Jl>1572MBrfY!3xDyHS-p1mif z)?e&6xz%u?Dq8|E`X<7ug3jOaa+r}Jc)xjf?m)F%GvV@_-18q&(tr{YSAYKW>exAM zA{ZMVehJ0FoJ#|&+sHR6x^95qT{ZsP;Iffeqmu+kJ~c&3vxvYdx8p-(aW*-$orgwc3hyFaGgOkK+PErL1jev;L8iLxpZfDO+0fek;p?dGWpVky%vpUF{abAlC(+jR-=8x;~SK3e^~9* zbp3F4QLEFuNPT?pR5!!RltY(kFpZbrw2-)w6Wt?V|3}-eBaY)AR#)qtb5fOd$#Ald zi;OBlJ4eb#PI9u0ezGoSLRRIC^@+NhP=g_q$v$YV!Eb;WdVL20uPtQM27w$Ak#{k@ zoC@B1DH`4ibNgQT6}>zh{xUlXj)uFU*iKcu8_y3inkalQz~<`0t9qn^zhexFgT6?_ z2jsKA3O{EG*FFRnW#MFCsa2>+Kv0+uh2uD<3y9V!EXo_%wNwr2?w|Ab#`YK{*{jY# zFMoG+$=~28@ZoEC7bjIbdWCw2`Rd9;eWoJnMYX)vpvMePT_5gSm}XMfHZJtL$=k!z zoB<7?;1Ni{rLeB*Oi?UX(COvpn@bq$!CSYeDb+`T37{vbhlXoV%8(mjnk0*akgfyW zqE~m|S{DgS|D@|Sf<~SID2Y6QOI3uoj(&~d_3TjSmj@I%)ejgZQ7=@kraL}vb;lGF z&gNqr%3{x3`Xp>hEZ(Ux++>&U47S-ly#X+y0oO@r?oBpJR}gRME$p{p zsXhM8M72ixySz z{ss8MjzfQ+Kw5mQPLeySB**xlNmN~@=fy@wMs%>-#f^x5s5yUhK2bV$;~OSX{OCqjPzgzy-#TSvuoc<&1~+pV_L-#sUkom}|0$R$SFT zU@jBV#yy}grk>>O9aX+tM%v!lT~&nnJpJqxd2MXyn=DN?J&WH~f8AGN1whZAm~HwD z1-xNSqP-)Ia_3!zSRNigTUfdg9jOwL^=KOmp(y4;W*L-)4S>BYH++%F+HSaAz=AX4 z`$gM}Dbor;mD+qSKNB5TQ=H<7yeQx}c7{d7S zJSBhPJKxW&{02;LqdM_5zX8~BS9}Jacp4-V@CEw4wXKaT)a#asjTB&F+x%Be0ZJou zE`yyt98_a!@~c%za{SMgtVlgqY>XBym447li+A6brC+sWu4l>IBypJ{NkKu-DMkT;X?I0thE67rFOHV<;|0% z?lS(0ebn;n5Ti4|_VniBa&*h~!zk0=jy1A!VYA_BGpffULSFEkk~If9i(8*As_)(#CIo=`BFkvmZ$SvNS7(|b!uBk@bDJC4pT`&Wc{q5P3zs4 zXcLVI*vuRPVdQuMhr;xYZurMaq~@vB7t|QM zXMbP%Cs*R43YIT+N-4a2`iX9BjZ|+|-oepB>2d~7IyUk7OY>nZgW7K$e)!VgxxD#2 z$0b#_c23gWzmTC`JoZ(R?RTKE6S{lSCCS@yUz=+L|2Rn}vCG$#v|X{=aL=eo+~ex| z)nxy#j_&Q}>SnvLp^6uLF_#XO)a297aW%YF`il9IU0XAf_$jF-p^J(=w-Fn>FO;_U z;;kqm*sdP*x>FTp>nc+F?dJqt%*-T*=1(S_!4#h-q!;64$ ztSKR^Ol+1NOjO=J@gmdtzNWAKQ=b#9$ceDSAFIBo^8F#t>+9M=Km29`Im~Wmsbf#? zU6HX5k?t$?GCs>hjHzHa%^am6AM%QP6^@->gox6jzrD>?8yG-7^6Pf~WoKWoqxNZ3 zh)H|@Y_mV~f%YDbFnc9Ck0fv#6_h_c+lyjQCaq|`9f z1GT_-#RaWvT`TJD3a^&Tc!k*3sKy8Xe3f#ssOs7C2f2cZ3&IL?kxtC)Rj2#Cs4L|B zoU9WBnOv*VL~IW=>a8EiYs#4PsNlH=SU%Np!lnwxoNsgL3a%%8ea0$W>9D>mn#6Hi z8cV;o6=7+?nb#WZtc0I6p{VclmFEfI?3erMxnL4?^&U?GdOMr3F?(MqH||YV_W-^z z-@*$uoy2svGlfkyx-(1ZDrSL`zh|r-TmqKCttj6uDDlb{aeh&>ZRXh*FyeptLBUfO zz%z*deBmVtTFhG+O;fIstNAkpcle)h!bVyp}#_Y<`9zsIPA=E34}S6GG}VKB4kCSW1*|c-qiG^Pk(-p$B3Hz5c0@adTuRx^7GHmRO{%$ zaeJkQrJK@e4C=}h(zxNz9^^U3J^yLGimvM%IIr-`i@M5~BFM(did*Y3N>V8k#RF-D zgEn;9amp`xj~;)|{>GR_&bo)9U@cw8{A_$FLitKcfxu`7CK ztgn<{%gSwyevg}IpL`Gt7p!Sis?SwlkoUm_sePw&KsnyGmY2^+L%68zHw%} zFfQhfhYf3cLEwGd)=dhRu9pUy8a}Rxl&4m0uYyC<it-QJz6nX` z?otG41Ocg~1Qn#EyE~<02?<$|ZY3p@?q&(;lim?1i8QWM`J%aQGxFvp%C8wu`TBhlvZ<^{i z+HGyn>Il{NjP2Gp2ZSbtUh%uk!a^g=hSA#=c)C-g2l=qfP69Ef`!{c3u*1VtYTdz# zKo#UMFiBHC@DLOc9N}d5c4*{#`@mcgO7;Z6j{g>xHMRq8&XK$Vetbe<+83H*X7$ew z0Y~qnMr(Q)ub;eCgoSlC2T_r={awk>cF2@MK1Jn%cAi#2^|xjYBIbtu>wE?_WX;x+ z)&nEdgPJY?5hO~^|Kc800C>Ysg1%jX9{VxO+z$r>xigxT6-)_1^5xN@^7GZznMT1+ z3az#8F0&Zfdb`l;yx$?<`JV8UE6V-tgHNZ1Oxy@CI`DMjo|iqU73L*vb!9BFOr_Ox zkl!2^sy$1r7vjKQvTQXN!I3ET;8WpieZ;{-3H_<${Vl;Npc<9M{U#7JfxBz1@Cb^ zo{ZfA8_!ABLC?OBI0KDLUAJIXXOSrMkB(H~3kUO`m&&b$2dz_Iv>1(;@I1`zpB>;$ zp8K9Xu`inFtl%5|@U%JtGb^Ba8+GNQ-}NvH-(I3?*SFWEDK$P7N5499t!2p)hNFq? zd)Vi(41~wnFWcg*P~Xsb=+(*Ygoyn(QqP{>MA zpl1@<@B5}t#GufwAS`S*1kwE>?n zTHBVepepccq!AxYuMxf8lBlCcePMO6K%~^kJ%d9*k~U}|H>ZQ($dqoneEChiG!bF5 z87I35L__gooXNbXovIK+rxqkaexMpFRvYTIQ-T>XC}92ifg%HUv8>zfPmipQL;u*U z-8IwH{>|}^DJc~D?GmBL?t)G-d)JIVXG9eGqJr2d5{IStQ(`+p387$gC)&Se8YXV_ zjUT7Hu_4hBg(H}&bxNysCBpTMpZy=LRQ4)dCnXo_B{(j^p$#&uqCda0&BWGIVcH;`bQdk4ScVZj2;oCP~c= z;dFI=7nmVb6nW0pLJ{1lFGP~gVL;DLu2>q_J9XQQIqR*MfIXTacrCR<=a)pXZ+|C8K4qsDp+fAIVKF))!Qz)$8XuM5-`6 z@XhDqShJm-#e@-j#_EG1(1lCWiGSX+c0*c-%Bqm8{D#+B}GKNFt2H4c5 zBo4o8+K3ZkD(L?CP_q|{uV*=b>CX21A7*N61HWHo;PvycUz)BAr84U8x?KKHld!P8z0E09Txv_M>mUz`}Z#1jt<3h^}y8xX}=_HjJ4&Y$2zGZFG4iAX0V=)@MqbQzNm^dJGu{F{n^U2eVtNs7K{ zVi9u7Z^KPOZn5W}@FaU${|Rz>a?fG?7EjAGAL9oOh2T*!v_sh6)iY7$drY7?vC)E} zFpqYrTIH!?b1pj8=!jL~^4;`u&0CbBRT)VLIA~kC^LX!Au(F%6eomsmM z?IV>KrMz1*^UPX-d4QVrN8D=^r*D!k&sJn$ls&fF7to)vYOs`aVN626>b2q!pNSHr zCH*0aCCQmf$TOeJJ)ui>gn%a=gaVx%-uC|6IRzvo-BKqHaN~VK6S-1hba+U^Xo@#fpP4vH9 zNf;iz9>K!BOO`-CNxb1-?b1*LKqF4~+kRhvb%EcrO#TJS>eBeakmAHI00U{otO<~5 z$%YYP_}G4v^R;B6st*Y9A+FIR{!RnSR<(^T79>ZekraLH4bwQ*7;<&RQ)+M?hE1W+*2uPdJ&}!@ zNSXM;9HS}w4_SCgy;IMpgcQ&w?H8Uzm#v=JT?d74Owyy}@Xihzol|{ouR=b3vC8o$ zOw?WQTja~L4>;gIU@6*^3K0`su=Cqplcv`yoS2r28c0g5QBEQ z*}f^Z_!0r2#2Q2v9u9($jEw>4#-W}D?``8_v96v{M793u4?nyMwFq19?r`{l7X%00 zjCwVSa?yG(w$tief2VyS9M^<4_Z{nGi%e?tW^h|N&sZhQKhWUs4Y@V-A^!lRS_5B8 z-LoMNA8~Kg=v(#C5e^(}SHC?J{$ORbN>K`z_X-Oy4w_S8E&V52g@9nS0 zy2o!;4gqkbY{H%6^@+B9_`-AeYT{rIJD zRLfI4nnAN4K1f7YsYIOAj}QlYg<_g(G1ZYPo(YU2)hw($-3d5N8B}+s7cZJv#v)ntZi~aNMnlTjh6ATz&@VOcq{s;D;d<$ZIM}@817#T) zY)!0Wd?l*A$v?fE#ke>KqzD`uK(7&N&+ zgmQr1YJ7r||C=dqE$Js{FfaDyhYPkg{Lm7c z?9JWeY92JTM|V_MGlzSY6K@hdqf{qqf(i)?+&c zv0Tw#dsiyZ=b%2tZmlthP;W-?jxhEMCi#gubYYJj0~*f{fi5Z^7#5p_7Eq5Aq4=0C zvsJ`i096xD9LR5j%`6U4jFPJd#Gp->_mw0fzjByPN^z{6(;Tr?}DS`O+7yL*6)Cff-;P(a$ zcsM`48?nMJk_QTPBOBqMf(P&yI8uS9&ID#!_(c}6Ud^iN&P`@$-qpI41iZR~?^-;S zUc0%dz*R&k8z-aV(k)g#>_~ehrR|mUXIcsyXEn(v>}STe`$nDt29#nZfFl-_mxx(nOp}3>HeKhKci_+HcTbg78ms1 z^LEO{tiCQ{2AQ1Q20nmr5VExmB@L*84}gf>Az;1_#Q4LYwDYJy@D9FkAE8?5+B3gi z6vrVfS1QHtSkKxc%%V((YcuMSktVqRZCC~4tV3!$r~>%~T_C`gtIc0rm#SbE3DeBq z#y5S7v;U6D4%#aG=)BTvmdz@n^WwJ%?sDSq=a}WzQL3B_T-`XG1@MClVruD2Zznps z==Ti7PyKwG7}p9T*3mxzH@5JW*}3yl3Pk2S6eMbU0sdfLu7VcJgJ8oI;0LfC?wy^A zv=K3Vv;>~$Zz+hbf=%>>=eXn=*a14l2U~75O)=*t-qnOJ^Fo~$@otf+FXW29njM2` zWKGl02TCuqX*z@)@NCklpL}IHZ0U~y$<=cQFN*OqD_m7V%b9wT0So-b1b%do1 zPtcD*)X_b2p|$5A2+b`o^)N8<3XItJGIVPEWSJ^()Y%B@oyL7*G31GzvL5C;^>GE{ zs~cR-H$bn9DxP2_m9w9ndNvYl`IqDm5%h!A@C!TWyD ztheo=+pbn$rQl!meOW)ksr7|#mIPl3DETr%vbF?t*l*dIqcVEMi}?k*SF}WW-{;!Q zc^TN-+kN%&WUNHPHO9(tpqKyPvLWym4DnIJd@n}$-dfnjw)`A%N#3Sz zO7qRP9pZ1<$#*ezt~O&m`2%f2GULs^l8f@YtBajcNNvlY&MW(YR~AbXm`7&yZfUvU zI^s6?mEnV0-lZWAS~$GZht!{qKA^LX97~&!hWZitV*X5tUqZL2_VxGisFKh*o^A?O z%Ew%~e--<(ODca4Es5sorYMK%%j|6Z<|iam#P2R;m~i3QtpCuZ4Y0U%={{r6OC{z8 zv$KR2!0dU$DU9nbewgtv^k;g#uE#M+XEL{S%T z@p>nnB1LXtu_w?F zapvNPh=m=6SjOP5eq#k0h;pZi@%j2a&xDF>g8NN;QmVD0$n403MA(D-t>PErhu9Z_ z#!ulPc@2>l@2ZL6vIezO@c8_%<3R^;?QvI(#KX`gohck+tO(+aH(#!NvJ0OQjuFrV_P z7^LCl*$%{=^{fV?Y{kS$pa@_J_REN+q0!enRSuy{vA;NFI-FYKBA?F`AYht(7VV<) zRtn?fhgMiwCs$-jFZx~)M1v`0GfT(t2g^{bpG1e}u|7NVr%K~NRFM0kiaa@k=NXJ^hKeCCAXyUxSe=1_{tp~*j&qqS8aOnb`azi*2G$oXi3;V87zDqitHuHZj}*$)3}+Pk-WY&*IJJC}Rq9q1;`kHIegjN`3Ou!>zCHsf_Z`CO z`)Ot0Js6LUd{@}2G&|+7+ASS(yR0B$GRzTwDs%BD^M&@i2Kv@T_I%{xZ;{hxhAY`SY5HQ~u<4A~SR)q&QOb zLJPAecOO%A@eDh_Er1uE)^nJscJlfsAdAkcrAkodO zQTU_%*Q&x_UbL;8;d;9Wd04FOygS0f6RgrkCxmz=~aRt{|9IPU}Q|128NwJ1h*Q@0F; zc;&4wHE5^Z%S%)a_>yo$#c6*`Xix0=Oir?)#Af_Ksqf`2vEo;siGI>AO^47ob7R^B zfwbi9HMhpL$Rq&r;G%;sN=Hy62H1u+`;_6~F!GK(lq30QLYEMQCjYy4xURn^pX{BD zRRxZQAVGs9To90AQ~8@Ml1el^cW=BuQP|nQ?zD1-L&m6hWXSKFDS8TZ%G`nYU-;K| zdvmqvFN6CDJ*%)T&lvq-#&VZnx0eZP^LmE`haJ|TCcI_W-I=9%w4LGakj>1rgn;vLwH3?n)) zJe0ZGoP$j8o|;}$tw&I#lw%%iIJ4`g{wHVURE?TeRIu4q^7~H!#wS02K?itBO&p** z34Sk|^;l<1fpH|U&=epqzs&I7zR=Rxzn0Rmma<4ft4=ZKNZq6vZ`SFFgD^u))9`MR zR(%fC?4C0Rs(#Oz2bI5^xy`p*kviVT;(M=m4_sW>U1=+O-v2iuEgs*vJ+VE27UZk~ z9oYZ1|GyqN&30n4v&;-bGjJRNEr1?0Ahz?&$5Q@FK#*ur*Nz_XM2GdW-!kqw1EBpi z3$-Se8p))QkN2+`wQ?_u3?ihbd`Hbct$@WP5yDG%S;~ZE1;eeNfGAP2DN(vCg>}D= zNSQk3ZCv?y|BCyDaXbO=a{B-vJ7Z*z3V;=oz8VKemR;O&pEE2!PaX+kLLNN+s)od8 z)4&v6*~T}=@rSehoX_R7up$ZIY#=SJKp_|;gy1FF0928TK~oMr3`*a^{~Sf6=2RH( zCI>0Ug}8UghPadbXc|n!@Fh~^G!fI2rr#}G^bTI@5~KTRfvaTaP1*h8HV`iKA-H@UHFE?+%{L#RmIwP?2W#|%#6nPKz8cH;jzqlie z8ba{^goepW)hty#PBG|8(ep8l0%5hB%&MpJo#mRD-a=)}nCy62syZ=y+&?99>VTA< zC+q%odjB*o+Qw8c_AWtoNR$K_x>XiOB#^$k0P1|YFt+pf(Sq7&U?Z4P2CFF1c4QwE z-{SNQEpwefr38SeiY4nX<}ozCNvlKIS_h9plA2#dw=WGg_G8c1S)?!A4AZ^;HscTE zB1ilS)D7sjsjbh&lEGuGgg@5rHu-=r2e^a_g=?5+mW<*@{J-}BRh4qcg)!Nh7JL+0 zKG~2*2Qg*}W0s+7($^yMuC+$+!qaPt%w^P{eMYB#^It_rz}6QSrK*93lYrHGhJV>- z(8w5o)vtrde7y4!r}IW|ZbWy~2lJOdDY*~1%QNX3Hql$u#4A_DQdgNNj<|DF_F^4d z%C$2kVJ1Cop0>2t4nX4ZaPC1${%~z!ivnPwh2OvXWLEP?QBN@@2qNvk zaq5;%kmAJ$E;IBOa{6qeLq5Z5b9F*(bTt|?u?H^DkM1*E=Il%sFHUP6Ie(ak z#e~BMZQ;&(B-4XnEQ?4$_aYqP4(+>K9MwFB^zZm(*8U3#S-)VJV*0W4HBJKE2y+DQ z>^(EVyLiB>H86E)$U1|KHZ4f+E=x`Zra+d@0QBM<#R7$^_$g^Y3+p=G3VbLCn$~fY z`tHB}G}$WX{+}k}0hKR)LNSE25C*1FfUW-tK!_{w*mxnyJvQe<)_Lgjcg9Vmk;O!H ze+!;P`omdxvds*Z?=&>Yxh{Y1JAApuZ5V+ozb)f=QJAu0o$}+Pu%YJW{*ct;7Az8p z#ddoV*@q}VGJ)6+FkUu`U4YoT>-D)pScIhS>q-;-gE_2VpLtMx4?yC%>3J`^4$8y+ zdIlA_xK(koDZE{AT6UqvESbY}S&E(5H7zplF-tt#yc}z`Zuv78_v;)mbyS3*Z-Ute z05YG46`Z5zfS6Ag1fV1atSOD|{q~>&wOhdU9>LD#mJS4Uybs`7KY+?nM+5zR$w(X% z%i;x+`{4IKWP3L++J6MZcjMzMlc(-%I=yW18|VV)e^ja{Ck z5c$BWDbs^35jvvSziOTy^!lR^e*;#I^_b6ZNg|=%6m5g(2d7fnK25u^QeO15S9qG~ zJgUDdrONWqlF)FQY89E(=!4~WpX81a>XAqg{q-1&@0O0p#+Q3Z6Up+*W&nC^W^-l< zd0(WR{n8^O`5n#^AGO*F#=Nyg2cWU!{OWQe-IQJ~=Y~j^$rj-pb}`5T$1DXP=KKf& zT=N)U^NZ&IQp1ckU6SsxZ4YIVwtqGJSu%SD4GVw3#L4l%;S=;|#6)Ym9zV51oUC0S zg~~8goG^=y+Yggb_`y8B4skLAEt?t4pd(BHA`(Q1_W^m=O{7?_T7D1ZUZz@pg1yYD zDw~b>d^_qi+-eKl_=+rCLC)>({*30XaN*f<*&nzkYD%8xJ9Y-`*L^TUiucsr#7jD6~Fv}bpPOhgqiam^f#41|VJ}N4Iq%JI2uw=0e=+_yM$tQ*)l1mgtW0xXYjrAyjVa>2NKZV^prz;YxuDaBj zgG7Y0UM%58$Y^|;eMU0R0rRJN>iB7P&`iI(r^92a>g;~ z_-2XUSgrqeqtHo3?t6@sM-Z<8vY&|G+xd;XM(3dP8{YhP{AFDAu(=wV;#-ZDr%x=# zrVe>6*BQ5!{Uk|QFItR#3^9-&k)FwGE@pj={&cuQ|-|pp%wJ!LFq-w1%#5N3;2$e-JB7_0TDTz3HtSgkGX6iGq>e4v&Uo?LXSgQW= zPaJ-@wxe~Y`%Ov%AwXSGOOToKX7KJNofcnhw$1v@uW2FZrG`vrrv0xcxgK=W&4Q|PIZTI>DWhG1h zF87S>uIvRb?wO2?MsBSQgW!EyE9aMC%;LXK&<1dtu;`NRlW6xY9BOroqC zTt5z*zm`+C+&Hi#{qu!1Ua?R|(rz)fu{*UJbnnXfKFn;mfoP@U|IKv1)f3Q|4hz{# z(&+|G6_*xdH|2k*%LmVBzsV~PPisZB9>@IXPM*2u{=Y7!7&jfme>Ud(UmuJ#A&o}1 zSE5BeHS3JNJNnJTOczC;I~^XT57nUMaLw`&t5P`<{Y+e1z%#zRw!`Ta_$~{g#?sZ^JM^dUWAQMYwQepvsflT_PIt ze(StgcyHh=kR}%Rhq>jX_aX5Ac`0=3f<^vb6jS2_{G_d1wq60WIjOzioGWA1^ydr! zQs3VKBlUd;ME!FOE%i-|bWqW3D5T*+sy#`j^Cb^tl2$XaB@=P6vDtu|il<0CWLX)0 z{fxCy4H5A)4sYed_c}T%DH>ijl>^MJ&)Pqt*51rB;}yoVvg^or-O<#3ou{}X|5WnV z{Sf8rOmr}f(nENB+0?oUY4ywP&>FkFD7eN|zclBa?C3vS6-yR3L9Ur*|6aXWP6%Bz zr~HO`RTrvT6)pN}k$dhfbFr;mrNMun0RGrKa-I_k1)GyJvPj>o1LD+!2NbEDYA&%>3VFnXEZStmRp(Gd8t~ zc^uGHvis*8JVd0-Jj}B(?VMadl%xTm#P)%2~8eyxk}fRS$$ z{P;$-E9&Ppe~E$%9K-*}nO4ftp$YL6>aNVQXy;E2ZjZ-wp`OPe@4dke z76!H8$U=_eb$U~l4ELV^%vEqi(O2Lr|A@>9;DHphj4_0hs*n+m^5)`-M<2quUcTV{%8lfQ4 zJ$zyOP2CLhf50T@H({eW*NUVlnrP21Q3sXLvi)eiPoGS)MRB)ki-DVNvSm?ZumyML zMw6^08L)h7r7QP|Q6f75V25 zc5iCm#s7uuNb5qw5#~2cjcOFuH|FNN{m7#Mkg1u&7cm4Egn(#;={@DI1OP`~2EH;j z2v$H!SYqDhIulv>!HT(uWV4y!tideXCZCNVDvj=ALD5f-%dFPKHS?*o-XGxBC)TtW zZry+ASkAlXUpw_9gGyAsJGyrke8U<;ILIAnA^miB1l-9^Is#UWzrRHy09wGu9$Mv^ zAT45>SS3yN0@#)#JId7_b|LC<)`^>Cx)_=rxp<@&F@aj3)(g$_%v{aTM0b>rRood^ z(Sh(kDMp!$lrO`=w#{PHj9L~`|LM}G#+}XF%nv1q+3f*^5xomA+L5!IT6Cr*1aCKT ziH$NYJPT=#enFa}iNctYDDh@pH4T|*7py7{sdh~)x;dwAu<%-emd5#sW^Pz6;r5`J{FNrPQO=LT*xJO)_&1lj1 zxgO0V4UUM+8QymylT6o7tr;o5i)K#p(z&jY{W$boV}iO}yKb?2kp*sjE~DG@-jjMI z10Q#7`^*CU{4M4Cpo4}BF$&eKBzD`+9Iemj?ZuS~t`BIfL0?&1O>HZ~rEZT(Yiq^6 z#63&3m|(37UUdnLi)!dekqHC)lb>Ty%uPutZ5i8#BIlTR=G4mzz1Ct&EqiVv-6 zYz8fx*}kFDaNk+dHy`PfSH7uml)Wbb`ct{ae*qlV^&@0!9S)2kMXvC`>dQ|h_$V`4%?#&cy6!X^0)#gsFna3-LqMMXwK4UmND~gb$1C$@9Q9+TWfx0#Ot>k zCutg8GpNh1oHewg_j>JZ6sPv(7>nC>#)Pvo=z&L9K??>)_Gl03?al=+Trye%3{8wF zZ-*x_oT}@~@el3M7mDx~x4DcNZ-;#n^bsrFOIPdLRz3i$v4r@dB#3|!h+v_^y~^t3 z`rZo6U{K$Ik#VFxGNx40hOaxge>SA2&)3EJI_zU3|GcNJRV#&W&g`wN{o8ZCJDZyW zNoG=pq=~;jEmN|md){32<_Yijn=x|O_`zPSngaZ0%3ZHh%$)5m3%i>RVV|gmtPU~2 z>KD8N=u_mRzeH$;)58pF`MWyiw6jYR`Kv9J0j$K??8Db+V6gDiLZunW&Qqr1+)c=~ z`TQ8B+%i*WB@3=Y$VNVSV0?fvJ{D%Addc!M686og1%=FD(4d@QyZ#OSPIXlP-{8g) z0l{$P(tqjl5E8<|9c{+*;%eBrzZ+laXBJT8OboBW&nsg`RA>^C%IZ>Mp9T+$BQj`T zl>z%`x23XzU3{w?7)&_nDRwA%EK-$ZIWp;7q~vL@-s3aQ&{joH2%z-U^}GqCsEUSg zKI2_vDi~tmI`~+f=m-y$#&CQOWVgLbIj0FuN}-1dzW-@JAQZh=s^K75P9Z`5n169T zdW#SkKLXan0p7c>huCTqk6_5ROK?9ex*uHW+B=leerGLHlN=@c?UHGlJ+FSTty`bA z>&iM}fwxCH`D-}>P~h#kL7PklR@ydWEZh{OylAPi=i$c5*a(nZ+x7VhMq2-4)?bFU zhSN~4n-Kr_Dfs=bE1qx-lD|?jRzBqTJ94pI90Oz?0d6b?DJ#i%Wy7_;IfNLzU{?*3 zy+nz28*sP36f*{-2XzKZT*>cM&i7HzQ7O;bOuMUzOSAI@K@dnxY{a$Hte$iMHar@3 ztMY)rR39@EhPvAAfjdnv<}_H>O67ufufe+0Q2?YLWCVyYJ%g`w&=S;uWd@zTFsoj9 z8He`7?v|V8am+MaIMpgd@p1mL!IgcLr&{-ds8h(^Fep0e5i{8iPfCBifW3bSx+5M1kN0gLpQ*AiEF5P%9JD73a7STxSK^o03utyh;lI3BlB*hS zNjmPi$}pGfg&>^k`l0A}2CH)m{lC)#PZ~}O_qw+Pvk=QlWi%9A&Hhm_88|L*Y9q^2R8WDk{DrAt+wh`Lz;vMphrY~+8@x6&-08>&RF9@D}%ZNb~ z*$d3_UM-LV@L?WUmNG)YbTJCJMi?tt&>%0-!x#teqjGk@X1(v`)yl9Z7o>WB_gY1K zspa$*Xx6ZybJB0{(Idsdwfk@pWjjKQhqtMw_OIiYTcJy)uBQ1{$cpY+#wYp@Q7d`t zUYamv$cp*Lq|xJvk*2ntR4)nbn~9?IkG~Q32D*2_&d=) zIRY@xr~1(ynl@Cf1>X`%rZxhKj7fdtZjmL6b<>ibk-6LCUfqO+L7r$y*5G zVR3YXkoF-{=$()d))T=o`he{%a4--PKZ$%^F+aQG-M|+n z;%ngPZ&bt-nKo{hm(qEIw;k@f$F6FYh@U`s`-;kvjWLGSPb5aIjuzQt@2?HIN$PJA z9*B$9Bw*ww03u`;08ytTuz*9|4>%z!)xEZ^x0A-HOmD%1A=F`YfvE=~g-p294D zI|95`H{?yS(2AJ!nmb0*r$AQC&10dP16Z6z_+Z5@%DDafxxEtdjX_h+D|IDOBRo8+i2!O^xo!Y z+NMs1{H7K{*j$F?|KbGJ?xvL{E*QT1F381I(*yqghb&U@7u&=X8Lc`EUaX zUo#^h@xpXrsL83>@>n7JA#Ue`pEJG-p{XGZmc;`w(%u*z4OGwE2AGqMIT%lK?aN<2ZL_?C0wt!27&!iN!Tw0=)sP+=r z*_qle_eZ#Zp{WVwgBqDr4sc7PnOYL@QF4K=B@cgyWvmBatBeJ*!b~@>kfX^s(1ZNw zOG%mG;#Z&x4jBcbod!PFk=C>PAoa{~g@)*E@Yj?_&p44 zhE7z8`S2b1H2EcBZ(Z7Mm9bw%AIuYlDTNdA$izpK+=gxQwWFkphd+35-c}p4X=5>- zPjvA-sX2;Vc0A~3t-S)p?QgQ~2oH{CQ66-|TTpLW5@>bGfoG_|Go*bOLHlX~U|OCn z#tmv$BEsnu7Li9u{-q;cPC)iU;~GWPMR^8Ysw1b&tlH6ZO1ZgpNoDp_(X7KziSVK; z@8+bCa$+J>D5N1ZWA3DrmCL~6edX>fpUdVjnd)XOlY0;W)0l~lA z0C;l)_!;{K(fm!gga&P2HGus5q3AYl6TfK0M^|Ckw)}~`QReS#fv1yLJ*uC*9p9+H zQ3VSv%4=_rN_5felRMKb;jU>Wg_Go|etn^h5zucFiIG1?JH*c4C|8R>=5~mCOz%A6 zqc@)Cu!%yyh|orN@;^w@#%v@8C@JtO_qu9-HveA6ElPUeZ0 z)XlGGx`}z91tNVfo1ZjBM_xVsZ~u%wO)opH!l6La2A9(7$V*dIkS`VsJQAe(`vvZq zRXoLcn~8-5nh%&hR6uzn+YVpF{$sng%KurxVyjvkZY+V$W@;flHP0XwSQ&&HF8z6V z?{LgI`x!pz`+xE;i+SmR6|wW7lMi7R|ATrGRe%l<;eqKzR3C2%8%H&JHicoh3;iRORy2osq)@rkk*k8ep}RF4AeJv z2_8EktAbakB>2psTBC_K!pLM}Mrj_~ls{ncH+*aIXlKr@S`~{Ro&a^G zT&uYoLw`ZciONEp0QWYtD$YnSHzZ6nQ>jc@lEJH6JyAtfqECuHCKf%MQIe!88W4YI zYw>HVj=M38qX0l3iPj74vgiBdHa-151PSqW|O&4Du6^daS^QQVIlejO~E6&E(uXq)ZN zoVw>;I}rBe$Azh8aXyyuL)$aQ8v=_@wMP{PUJ6G?!2?dtC)*J`?MpqtJghP>cdM1E zArt~B?_3-x$ZnDLHf9J&VS4QD;$SIfJ(5XaYE1OS+}>li0H0!$sF@27VI=aFjw_8E6GM1J*yQL&|jx|-PEXeY8>ARqn(!P(G9Q+J&#~s zK=%~iKg{|w`tlukakqTOAuKK<@<*vnMxBVC7yISX8(M9=h&Lm3PwAH#iaO?+L%)(b zx%bYUExURaY#sgqsQTuj9CabpXvk`N0ZOG-XTFpJN zI%`;ONi}zE#)>%#rr~OPKz(j1=EaqXkm4$P=f()(LjJN{Wkc?n3s2WJ!m^!&k)o+zMo%6=arA{LJ3k0Z6050 zxLaCFw2 zNQgM~=t8m`NT0py0*?RjxK#P>;Q-~?o$)4^^z7sU`Xe?ChOFAA9ziw|$1%GneSWPX z58l2yrv83+C$_+ER-m85YUknZ>fY2-Aj>iHULI|K6FfNpM2`>k0&kEVU-?e-nrz1~ zv9xLIs#U}}Z>;{5y}p~T=cfm|I#d_QhU17r9#{k{t_2&y!(@oyjf3X`Y{;=1$+&cb zZO(f1G0-)wE8TMalz&yM$B%tz=~~5XdS}M{0wujm@A3KXm)YU_>P`a0R)Ct)%`s@W z#rP3sEwU+BZYy!cWCFJqe~x(Ovcf(d;xaO*=7?5GZtza7oL2>QC#7?OkK zS&bBCqVGS9m5_YCL`%xHe@<>mZD))#zkmA%y3eSft7h<5R2dXUZB|uZe4@$9 z4r)|UZ=SL}*ufzpuu{~=NP?SDd$5~kQgM&a_IW2~XUDKoTs_AARJvycrTih)yRzDy z<+QH7yQv5;y`l2MvuF>cuUL$Jwnv+`HfTSAo4;r?)8Hr;WR-nk?DsH=aU}_O#bA8* zVL~QwjgfTLpVgpNd6O$OG^4vTRa&5#U_WPjR(9mD%81=%{+`hE1#$tOH3PDnMn%H)QjqQC}M~sFL zAq{*rX_vDMh4NRq=o1;^gg1iJ75`6PThi|D;rdxbVu&{;hM>TpMXEpjo5#NJ;INMb z?Xdp2G)5-PWF3I;HsV4wSA@da-_-*uG|Fpm16hLI!!6M zHpb>u>?oXlpE-ASTzs$uYClg$_vCZ_sW{q|Y0pp(bM(U~RqT6qBsFl5#aK82xY$9O zTeRgKq>=K%?P+1_kg+_?9b+CJ**<7(b^~a5{8k`4p;l-*UWF%DvQd@s#0hgqz#05( zEfbr8Q+)|-VC-%VnOGcOAl&z(eU}>*de~^Qgo74{RhKYN=9b4gU0ADcsfBz1`XzMb zwtH1_V!@p#?^<8ujaE8i-vnS4B=wZabC`>%UD8B)kXt*J;$rH-ybnqSO`8`a3heQ_ zs*eNFnxQrD_1}!OXN-MqC&za@(ElNgn~Q)H%eWvQXnb-9j0~IoH*sffnAScqu}7I; z0cBr)Uw`l3aIzbCX5Drt>i5Ocog}AKXJu4ttV$|YLy#8Bi73gJzZJ&!rqGGm>3hi0 z6PTgMw}G7p+V7JtpM5`{pM5!@OH`2@n=X2|t`9xF)B3Fg{6sK9ss4(<%~X2vFCNzlP43PAl5M{##^T_y zq44tHmw6Gj*dNZs#X5vON(qU2uP`LJxVN_4Z2yRJ9$9v(eWJh%s0H z*YIP*2)kAdX-MK|5%Gtq=lTZE#tpmV)us5yb?DyP)3jHOMHN&SnEokIQVw$Lo~7K7 zF&}?a(+Z7iE<5J-I=-|d_fP$sv(2wBrde&Evwk;ah}5z^dy64%?$RM=%vzF?bWI3Z zBw!Z~P-)TL_E8i%s={#*uS`CsFoGT11@I%j z!exEWJ)h|!6xxhRtdO0x@*(&e#vNk;@(;*Xqlmy7rIjT7yA#|}ARc>v&u_Ye)cE6* zR=C@Ew?{9J>t)IQ+vOJk+3_6>Qpse9B4mfJ6kc7d*V-pL!HjW_JKL8XQKW1}0b&Xm zDAoTzxp4#Vya-1Jsp6N3p*4j}I?&(77cHo#_gU$FTa9nP=R;=yAwN7y7hnHf)d;cngB&sYY z6TA)<=@#rSmM=7ViTuoB-zPm2fVzc@&sLx&EI&tqQHh*OQ@m-fB`0%Hy zo9&vlXR1?BKrV5L=puV=uvZjI%JnRV6)#galfL|@s$aXRAKSHolaxZ2%O2Br{AX<* zyK_?L%|dc*M{5gItwBs*Zbif)N38!}Yi}7<)w{oY)1A^SBHfBKEJCD9Qt1>>O6gi4 z-AFeGD4j|ONQiWIcS(0FV6Ay3{`TI_e*WW(GtP_ihA{^(7;`P=xaWOe-|PBZkq22e zE<0LOM+dzDx~#2HtmuS$itSV4A1p!*gMR(2MU9^6S-qqnY=)Cj6^D8UH5*;wOvVEP zA0|dvOLh)w4h(}% z6oXEQnZ9C&K-xIvz4sy(rO)-eBHwVzSd(@%Z)UWI0&OGB$fE6^zU$+ECyLbZ6{X{=#P3!A&<$Sv+NzX^UqGtw zZ9F8t-W(kk{;2|isU&w-7$@`5`;HLPG6)#$rE!SMp`;|4!-n-@r0=cozi_7gCATve z9v9Mi#vWh-Yk+vSRi3lw_EdhiuR37t_I=RB#)5={WPHB?S}`-m=x{r=_2P%wr|0Bk z(QRK>{ra{zq;n<&c&tG>+LygH4AeF3)&a?K?9j#gw>a*}bR}Nc3qW2o9_zX_RUh3L zXnOec&V=2ufelrrfSJS1G8g4x61KmJ)AFFt-6tn`w_L``zw(rg!Xs@n{MpFBTJA1L zn!SUxnZgEno4t_9bpP#q@6{&8La;5wpjL5a;=OgpaCo1CDF`^BnGye78NwU?l{&Nz zzfPjfBGHk|?rqXw;0wsH=`#bV@`^^;hv(1IWD}6@Q_&1QPiO|P>AuZ!{%KSi&cpss zKs&5kxm-V?S`BXfL!|OezNa$1`6P*Y)PqyvaWHN&?*{-w20HqUV8 z3UoY!)0-P|w?HHpNEEI8bp>sRxJo@#9U(2OVJg%N{qg0eVMrK2yKbwDIg0`^tM*Tjr zUrFD>wf#*8EWb&*S#T8s*^hE;@1}q@O%)ZCN`MYjKroNHh|+UPW;K6M2(STt?jKC1 zYKMy@o{P}sz};N{(&SCugr!foEGnGX((G&Ts!BnQ>R+dR%tu&a?^`< zRi~WAVtOHs(hXF_`7_jH!B%6`j6KiNA2eN+PM0w^_RZLfF8`K( zbyezR%(dz0_Bfy*if8zp{nbJ66nP|gl*$OBC3sufji*?4CREW-e&dgcNw-g3-M(B| zkZVHr1b>ui_vT#Hc+#FBj=ZeEnLpYs0{=+m%h#3unc0`m^Q>Q%%UPnW1e&)}OV3%i z$-&0x>*0BBh@+eS#r-CU?IZP@JBFZ|Ef~Y29B`P4owfAjtn=Y9%sW zl%ZTez8l*e04d1(tuSyqaH-vOxrVcE+jHkg%hHepME_;dFx{#TfuPAiY#k#_8_EYn zp%KPnn^4C}iLE1rQaQ6vRv4eqw3pem&rGo=IvZFx>L;Q_v8ckvB!tBzQsXH2e~5~T zmAg0|9P@Njy~I|IvADv`8ZLEHXDJ8+>b8)_b^u{H@HD09gQz5#<41V^IfiI^AbMlN za3tP|zr6i3V}$4a^t@Zuf0Pq%B=EN>QW{PwMCw*QjKi+_v*x79L9qSPHNmfKua19} zhT(hJ2H2;~odB_=%yLQvxA26=oqgVxRWGlq_{65%u5M&_7LI;V&Zh-_EVlPXGe62!Fx>8z*ZVk}!|+inLHgo# z388F|8P?oO!7V?74_`8VD`WFAHz#^Xvsd#T_P>7Ual44$y9cS#%J%CSj&eb?3OqN5 zhYFl*L)C2RZwP4}@w`vvJD{?UYDd`0e)8$M_*#THQ#aFm;S(ZM*@48ja)WVu|EGcJ z@hK0qKAdmD&JcOuJi)Tb z-8T*PS?2A71Vj7jX8n+h(wBZVL$1$==b`oWA`?Fj5b{JVRd>@9P}7fo6T?^|Q_!$a zL_pw}z!^yNxhMkKPC!g|>XZa|!Dq#GCaW45CI?!tib6ULoa>l63Y*%yQp|{u7*!uK#&EG!)&SjXQVw<=B`1aR z4;-!=F7?BWau~wrG#E+Rd@6V-sa)wSETsm{xW%yDn2hYmDz2)}Q8uP@s0b7mIl%`O zI+hqjGU$0J*Fv|wcHLFP#Ut8(jC~R^j(a*$qhZwP+S2QeX5q&is6?ppUD8gXG zf7q@3Dsg(trN;FwFW3a~8cJV3^i(T28{J2KzQ6eiMG0nKgOx*ll(a~8)qp#%l~nSQ z!SO3F(djpDJsXtPCL_)|p< z#V9#6ZLqdo@MnL|cUdJobH$I1zO5Zm39;RV0qJWcElPt34Kc)e>;;L@G&!!+-Uj5+ zua-0*SY!8ISa}6z9UbvFuaJ$AIBRBQ$PYFlycMZs77vCFC2JerT{tr&cpIlDNgPP! zUbnieT(@E%I3t06aYQM}E*+2}s)gLP0tp{4esi1?d*@yL2}+)au>54V;vy^cJLgd? z@}2OCP3utm<)b-un2+S#u>7qj%>;+*YO7!WzZCRB5J|NFHjX^)*+x{vi zCSo2PB`1Y~!v%iE<}uL`4Ek86`4-g8{RMB5`rgI;V&@>mg-Ay zMw@KHIxDPZqgwEPfq(3&OZF8}>JT|*PG;$|ff!DQ$c{f$p?mbu6Nd6|juP@aqS z(wOiIO=0sKP1-kRH?OR^;tC0N z_x#6&Y}K}T|HnnIJ1Vn3R;r;5)ZP`e-0o7%V6oQhiGDK;r3V3=<~u(-MO1tF!v2Fb zCvd#`F^H?#kMPc1+-UW?o({;$&-wTrgp)o7Q3Z$`PLQJcx|v`68j};nW&$w=TD$^P5*MWGmQMBbyC-|%Kj!5JUvo)qFL6IC-A-2?bP-|2xV*e2vl3-nWW zZ=(#%+r8PrQz{RP)=YNl_uK2=CfBE_iZ(CSu%|TKHyp_I2I$kTHrVNO%RJlM#~V9S zW#iS-0_5Wb3G1>1Bozgp^UvPVD?NQND!I}a@ZvLX$;rd;8)=0z(|JWfw1JIWjPB0+ z)|2!1={ea(nJO`{i1DIg-JjHg?wcLeC-B)*gZw%~H|_6I5`ba!CJ(_1y@G*(V-+B> zxtak1xqT%b`;CS>TWQouP2f>kOu~~@%v}Njt$}lPyoVlCMWq$mg?umgkw+4${-Q<# zI(!2bzE}(dERlQvIS_aTJyT~vJT>e}SRV4;Rm_65Cf~J??cB9GM`XZS*_8QD@6l`9 z0Kxv1eJ;X$>1*%9P^tG;w-^=paOQg1&;xj6FG4QR{CMwm)*K+%1#D2e zGZ6xX2KLh~B90Xhj8~|a@ADpZl#noYL^C>rce;}j7b5d|0CSJtg)8cU1x02;A$x9x zkfb1|pD^L9m=I|e3aa!Oo$KS%kdzELwS4%As%M+ll-0ZgOH!N@sAfFu#^gTx zy$6z%LQM-k=4*WkH6``vN(~p@&joA>MdO_V&qQ%HIz@UVji{mJ16|n1A<4`m=vv`6`_po z3C?ch_t1Ap0gdOB^?kL-FVo#Qr>EAmZ0ZzCEgt@u%D%1qYnwv2x`^06cGqsY{wQrp zU|;QG(49D8j6|H*o%f2>_$Ii2yhXyG+6o}{ZSA92F3uUABCc`(7&^rf$}cCN_`Yz$ zdj^N=Nf!}$jz1EC&>3F27*|l@K^pR#2ZrMCrFg_qY)tIg&XMHz>!f#`CP4z%CIjQs z-;j#P0Z!#$ID~-sUR=0n1J*n{0vITd_L1ve5qv+$BzNqS4f-h>#%Dr1L4kSUWF}c~ zi&>Bizj}V|8}lyMkF1~?FsBU#u{%myAoDQ6~3%3`;}!RvevY;*)=AG#^9BF1R5nJmwO6>DA^GVYEt{QA@#edf)5Y z8b|%hO4%)89n}tl8i)F+_I>3aobndX$GQ1b%^%a4fje;ZB>1>4AW55z*Rz^#9s{8d zO}X3RKG#z^IBNyR*1XiqA6io(I*+*iLW*HIk(()%>KV;FMPKnSaB;`CppPcbffr_( zUUv+vmTri8P7t0+o{Z=7GVD82J8w$JdwgY`;BJ#wxDdazqLeF!*jPZ7$ylurQ*YdQ zpx;&4;}VCiz?E_rRbTvo*>xR|XYRTM2d#6EI)G5}SWI-C?7O z{{dbEKytLNx(>5E|}ZDSslZd+mcdL<6+FC=x(9;8namDFI)pJMEPNdHXi z?V`fM&F#qa+cv5sqpN`PR#dTOchMs8_a?kslUir|Q|J2b;)+XDU&ae|oY^FK8}Slj z%CyM10o&LoxQa}>#=6$X+1j~WKZsnj=ekFFyiMM>rx+gJhS#fc-vY{J_2p-K7ajZ7 zlJnv^&VEfyziduSoLE+%^3MBb5O7cmuiV5JGRvLe!uw8FQ{3cVWwcV&)0tfP<%q{)i>m?Yf=bgN#V899*mcGcZRazN&#pD!aHeP^F3WWPmh_H<Z< zJkq5Q@gFz04-6k=+#lMbE*Af7xNdp-%}t-vKWT-U01|2*@>Wu zh>1l4fB(lTKSZ-W@8LGC@fu+O5a{%H3;b|f7p){u`hjtEnIgNeu; zGe3`W1pM6G%Zu;AbKY1&__G^4ba31#alF34DG0~}mj%oY=uj1Cn&>G*$R-G3P+Wf1 z^c7CL(u{tJqZ%OFBc<>%REheDKXbl|m3LJUMvYvOtCD5b;{&bE_5SR~T03UF!b~Gy zeZ+0ZD)OvAWDSaDX^-|uOv9m4az{#MjPG{jq+NZsN;u7h5vXOpHIGdCKW0DW_5Q$L ztYsi%HY_T5kh=eOz#5U-w-;wb?rvIyN#74EH6RpN?BRAO{u24_oef061$L6`h@_|te2jki**j)-*8PAG0?BGl0B02{?sY_{!RGt`0xH;DIVutU}y&m@LFGAv|B+XSk|DIC|+dkde;j}rrsg0{_n;^6gqnxPX z0ZrSICVvI3r=FKiBhL3Ww#g=x!TCH#tYZ9#{?$RtnI`3m_n1 zM}!b);9^~mW{hmx*TponKzmOif7fa6fz-BoB_Lqv**H~2LU#EWgIYc0kE(3Yr27V5 z9J@ccHRt{5QsQJ{I5xI|13>bQI$eW0lI{5izlH62AMEt<*Iqz1=Qp=1Z)X?ZJ4nNT zf@y%CKn@=33j)DF&rVh|KOebx;G2*1$TmvdioVict21>py$b4SA_|9AS0n#Te#xVB zWO6&Ut!~LFC#}PNLr9Hntr?^($rPjWIDeky<_r~iRoNz zs!NHZ=`GX<9rdY|;UYUUu5vU&Cnb0Gm4RMGlv{arGL7?Hzdqge>kI6lLtk@)Vb)L> z>j0UZjW`93<;+L?TNA16FTp8=OQ0NVU~~D|4Z~;QuJW6snQ16!-@R!K z6v7xr4o*W-g(BM=@wWYnewOOQp@+0`+}0*KvWkE51qwLXl8W=PPce87LXVry_!vnj ztlOJ}c_6uQ9|b>EYBhechN>q8zXq^MBBwZ-Xrnj|s#R>=rPx(s)p#Mc zm}9xi)A_D!P^vpB{M4|{=lNJF*ThNKosczW3L;V_eVRV+V7L6W4pg&p%{-cMNA!he zWw2p@0iz&ZeX<;DS#)PJovQwj&6TlTxMcA7f0e(HDCwqV?)C zE992Gat@Pxp!Y5BAk}2BL~2-^rtepLe95IZg`UG0PrV_+mUf52QkTXS0#xsp75){AkkE9Wl z+QQQr{iSa>QPM@+TB3u$K2@SytA?lN`uNr4yJy|+m??mcTfs6PsP=4J7lrOxh}a67W4T(O2T&>&D1Sm&DAz6YwL=H zXyeaE>Wt1!|OaIQlmRheg6u+ZfM-yvazjS^ysFwPuMVI7g zV<}p67jPgKnig=*83?3K7b4B*_1Za-_&G^3#24hS@C*9jn0 z4PSQnNkfx}M%9H|CYwc(QBezXov-50@Q!r1d*GlUQ(&&77H^ zeoV!c&=AOVt>&T0%NH;bIlze1vWm?eom`=8=`i#JPTqT^0-`@eufYmYqs)z*Jclbk zRG6R9D-P#%e1WfdEy_aOC}Uz^6XnO0sh8Oyd2V;Wc)HVt>X7-*{u|?0m3(0BVW?IG z;zN`l2L}r58Wb|f0p3k~%;060H-0s_QU*v;$4GvG54|6m2aH~KPISzXujdR1);~0e zT0m(y*xK{*go?ph7W|w3#G{%{r3#3wym3{6aIxn;efFt^+V3IrKAS!5yC#23b`#F) zfWAxVd?woRTys1=8|g_#qMP)Ayk38+v)oZa$qf9Lv7pZ5&No(fQ z+aoeS{D#EX9Dz1DQedl5e^eOx4~#}mO*D(%P?hkA)Rcpy7PH~9wU!Ie#pU% ziEv@4SP(kU@3e+l2}Eho;@sr;Yz_mUkOQw&TXJ$fm-l;F*f(B~XVJui!K20jmTCZL zhICn37|dI--87fq-%J<*qq>{&FK%vP6^ek=n>#QmXbg<%`JE>*=)9t9InA%UvzeRM z^V{EF=Y-t!#s2-xAMi5W2fVx|$2~EDolI)>10RHLvmx=bjGz!#Fg)1gxEGS9IgygA6J2Wi5ltUDjfCw`5&CB8X~9wUr+>i+HczW7qs z{cjTq<|-KpQaO$0zeY+dJ}Z`|-uF~a!z3e_cOi{_XR-Ik=EML(o?}ZHd$>Az@8X#e z^O`-WKiK-dp4biTl7kn{Oam@5);FygLWYl_Ridh{4RriOMlADnW|y1sv-3xSYSMpO6m8_FmcR3@yymi8dOXajkpHw_e)%{ z=(#^14Ys%3B);~GSFe;H|5B*}YZHh-_CYOI$K*V>sAo~ zfH34xWO)oMshGYtMjfk0$^yF*C!Pptv{QZZZ}qI38xvm~U`2$kxw4#79beZzlBYZ? zH|r<*_&Xmq{B80DHi0%>UET01y41t4_q0XU3!Uf3>mZzl>nhiERaj8&VyB9i=4#iy z;Peb)z1dCf<}V759aj(rarpyBKqeS&f`U4B#U=mq0~#_YMM5NBQB-NWqFGDYV*3QQ z1R;t8d`lhn4`FrO1thE%2gN(6+Fq= zrN3j$EA#{3n(BnH_1}9~MY+EC03=dbfRG$$`G}1Ft5Q?#fYoMrZ_#?}G|tE2@#&PB z^-RbbYxs5#*__6yISkDxOmjQ!?&Mcyd+BnKFpE_BxfwfFk`ZShVo82i|$XRp^Xa_Ile2@zmzN)J0|ou8-Oy%Rjo zws_{62~=cjvafJ&_-Tx-K~^=+@2zss_c9^dttg;g>*!i7zD->dDx{ zT45G&?NIj_ECBNw_Sm3B`(Du7na7)N~bS5I=ujbih8}bDb{q;Y0 zmsXWKAcw{-QQ+AWH$sL7?+V&Pc`Zds-7K6{D3hK%wvUu=ofA2vL>yqrd8 ziXlG0gTHzvd5g1Nn<$coUz>c21zcpUPjVKUR|Rpeu(Xa6o!HJW=b$*$H zyZZ(R@a*^$*(R*kNF{0Joa%(9m?|gCX)*PY2Y9^-k zk8}IL+-pfIjrPAFy~l3(g=%)Q(3~8-($*rsR+E9=>7>XvtlcJxqIZNr=Iua=<)?d- z@{7wHY)4@Z9E+{fJ*Pffo`+(*V=%nQrMA-{Pe`sufWx^>N*c~IAF8HpYo}EI1kof+ zZXoI(u}C#)e5ZZ#yRD^RXdtWbh_p|9AZyd(NT9*Qr#CXt4EJH&%^idv!G!av63lJ7 zg#&TNU49RxukcaBmn|uKhCjukp4e3ff)ODY!db9&kYf4l&&W&iT+o=U^89X@?c|@b zSu2nLxxxD65%YV`xaVyL!3PSrx7$rVXAQ3<+bvr@zvn~7M9#ycurDXWospC*^iC{! zGtWfi-SKls)u>R_aY1KNwY5HRv3vZiw;giIUU+oPYPT!d_iBGpn_>TuRBYn2i$W!% zgr?mz;A!kk$?$wAzoW$#2~0x9<6OrIU*1B5z?i)}_#QAoR(OjprHt83Oi779Fm3Fi zsxC?IO_>VUajChf1Tjw;;&oc6FX{o@{O4r=^_ybQaAlKnQ@IFg)fXo2ylAYgZm!9g zJfoui?iyNB&%AVYEWLH4YKvUF{G8opTwW0vY)yFcNF1Pp(G_Vd_J?ywt_p~W0gEWqRW_%79Wiv1s>D;ze&0#r&&KzQN?24=6 zk=Io5l13zl%X*US^inIHLk||e1t5LB0=}BVa~_{uFRR&L-p4j?HBoHbmmSwn1qEGR zd~FNtGkV+f*n=1D$ol=OERev?r->lc$yJ>kj+x_i`(ls<9N%py$9sE?gRdzQYjqQf zJz9qM{Mj8~xu^h$IOFXCAFbVc7D4=dS3^K2!l{_{v`DEKR_30LhoIw z;|^O}-IrA8Kh@XIzFjHIKYn#LypP}&8(Pi*yImDY78Za{tzX0c{>!d&;)Ozpt|vt@ zCLcq-HPEG2>XF1Uy*Ny@bg*$mBKR;7RW`ap+nZ;Hv?>-x6Z~t?22%!kc~k~zc;kFI zb2%AGS@G+gaC4;_p=(G3#|J$9l&rV%77v|6H$F|AM=<*~8nE{54&Bt2>AQcbdQPUR z^GIN_QSW-PtRZ;DN91hMPD8VBYdZ{4Xyh!s!aGviV9~o46BT%t2Ucfzy6WZSd*y~1 zEX&e%Jv0kG^23oO&u)^V|GDuZ)HShn(VGat5s* z8&82U`#YtvF6ikkBpm=7&Afpek|%?}e2y0+=EIeTUKWQt;cmd$4{u20o;`076T$*4 z_EsnKM7}hVb6=MfmG(vidH^O0!%&TlGjNk^b{Co`v@ih+?-!7u{8OkY9#&SQ!G&kd zdgS=m<%h;YNkd8fwjK2NRknO0Co$hQ$>JY4wC|#$X=9xiVV#9RT@+$HXQ_Do!q1?w zz-o}22ynC&-EM&tEsM0_9hSWfaCtsF-3AbMKrLr@@*VWioJRv~yw9T?hv2qqSQa2ZF4uWwW-Af$MIj)sSO#_A1i`_pSeRPT6 zGHAvQw=hmHt`Ya?mUBN#OwhumhFE|88!AP7`p;^`3{sjmcoo+4BAY>!#=tJr<+y84=;netzu{~F3H@JgTJ1zt zxjT&2zuJw8d62uLUP$nUp+fwoS`ObybdYrMzsulL9 zzzxy86?>6jaLd3*3?$v{9Y9$vlY!Aeu%HnsQqs*|Zdu*Q7;%<2z{E?^jX6MdB{QG} zjo$=DMIGJLpvc#}{0~lAo&T&uGe>w}P@`u4Dgq5=njSUipqc4LFE%|2ZDs_LFjDdt z##C!YeGIS}g!`Ow#kdg6ZRY?!(1kqj8E2&3E2#OMUaHmK?aUA;KzGP3q{vy;eZKM^ zUmk`NjEuhp`NlbrcQyff8S+wdXAHCv7tr03p3k>Qm4wh5@yVQwA%|KWC8pND^e226Y}T37fY+bq_7erlt4$rSkfg z_E@O#r|mkF%lazGeK{V7PYjUOM!YGm1hzolIRtIV!!=a0X&vqm8$z3Nv%o_h8`>{s z>X2*wW`y@(__SU9naV}bh^Xw}_$=#0ugiN5dW3e$)ixx1iQ|7p2kZMFR0EOD*5^xV{Vjt6x@uZKmX|R`*H21u zYHB{Or_64>x9)ycw(xM#u7aZU4P93`_w$e2%vKGoHcI`WkqUkBYi|t7 zvvUm}sU2X9ZCR+o^eR*I6rwv^G&jr0$%-_0J1J2qLfS%sbHV#vAAdUN}2i!gJ__Q?&_}zkNB6-pQB!KfsyWGoq zJ2R7LLEi-{ZfmlI!?(y+27kzR@4+|oeTG?C022^Y$~{odo7=Mdco6vdnO-N?U7Zmm~i?P0>;B@Jz8){!bKWdJGpyFQ4V8_s?NeMqF=se zUiji_(J>e~w_Wdls*`nMwh6Q=39V$|#oJ8EXYHT(1VAA#aS-8SN z2fb`uF`YM##j$|1to3#)#}ef!9;NP;mP>!X#2Lr1RGFw%n47}DsOKM;#YQLfpI%IM zs4vzNOzI{uYR{bt6|%jfz>B%8uGGpieJU!}e4uHmv3eh`kBMAXJQ3sed((NOdE%|& zS2`XsEiLifzJ!!XOV(;(v(1T))aWPEACLRRVBDp-6|ei2)8=v?_xXEQevzNzkfzL+ zOvhTm3csdwPKJuCy1JF?vu}CHLrq0cJ~sw#!UaiJlRWJnUjWq1lD8rLJ6AxlKM=7# zFbz1#h)n={V1aog67@8K{%*nKJ;eE&vQY|=HkVJ%5a61Pi`opEdGYY#Vd$!kz1*RI z&q#d3qR^4@hL;y3$ZDG0+TkfQ{8ohyd>IE`C58;%F(g{GApUL@Q5^AdPTw{h#C|a< z@PZ_Bye~dX$z=OI#@IN|I<|*-^jNlFYfAw`bel4%+~smaKS?TU?zcSW>n(Lc6}>Ge zPqIl1N+8SspriK9|C^4w+D1<~#h`p6oAY{#;c}y9X{2!bh6451vdhFFw(mwv#mH^+ zZy|VaPs5#;NML7Avb)VxafAFfp3U`CS1&`zU2A9NT=$Kqf@o85sNDf+DnYM~@ql(J zMn3)1_j=1Xfskp^!M-4~tis2#=^sl~xAM*6KP0seVhjVlem#QtN=(Qhw_nyl& za=C4o*ss#}z!mrSgkk=9^d68p72ccK_3Q1X?3@6B*c1!ELB?h91N1w(#YVhnMG$EYO?>|6Ke9z%GRPrW;j~!**9!FJYg0VT@;h zCV8hhoEK#JgNpM#HT>kX7dn4xXq|3iY_44b-!u#Pi2QJhw{*{M2YFx?4K)FS%+@0l z`#AbAockjNscI|7=O`{VfH0w-P%ptGw(wv4paf6L53(e!2%H%$dmT58g-<%2i_d5) z%V3NqS$)a3P~}vh2tWqG+!l#Arwsqh!kb`FPY|4s^3mz}g)$o|@(1*{=hE_n;bHJM zFI~;CWh1>GLk*Uo$k_JDtss?SX@Z0g>O&)Ii6r}`I-b)>l(ai>ln;oy%77W3PqOTkc45kmM^gAjmP zkBmNkbq~727lC%?PRbMuo@3nFpa(0_jP=k4(VI!jzyob`G;E4`XY+Sx7^QfF(AhCY z6qNnp-CQ_5QI#cu%CSkLuvSYB-Ny8qxI6CcjmC?2usP=JCBA!OU+JYgAy^C4RpICYV<)T2a9)<-rF28=Z&jMKn&g%f4wv3e-|J{XHciWQ$eJCPGq~X*mri zWg4N?Nf9a5U-FSpoVk01HMFe!4N>)ZmS(REcl*HY$QG+8cXYW}q{y(%f(VS45r~$F zU{P@n!`(0m{`7L!)UdyWTUtMT{-PP4ow*8I4){taa9$&HY?i>XjhAJ^u0vIlw_*TvtRE=S1cJMu z`E2JWe@#~IXIcA0fO`{hFD0&Mhyxd}))>&R;4TDexkzPw+l$;r8Wnw@&9iU(xt?OSai7;*8YqN- zGRT)CUnkvlWTq#@7I3UlA%kw*$e zsMfwNiXrlPz2HbcRTcKg>CdOdFyli(k!AG)+zpU0P>wiW&tZ(vb}xd(q(A@b+faIL z&Zu99Ob!anig6%hSyxL@0IJDD_a z_uO)KiPMB*D(P2CUMz>M>arC#(Q8 zBP8KyJY4_@#Ty27!qT4bl2+v&`^pa@ zpsDXKRxzVIANV+N%CO4Tn+_gg%o06dAi!$_(oIug-cpW%XqV|-eS{#Vew#v3)gEDF zeAW;WR+h18IJxu=)g5>7=OTvHyx(l2zq?&j*f)oC89`juoi%xPGEJ7|AqAkx>R3qOFwYz1l|6`Oe_Z zqalYW+esNnVMD83Ud}VJRx^lBEkqt^WGF6Z?H%y<7FB6^MkeM^$h00ckl}pDIa<+E zH03X?FL5&}RUD;vuVAsqJhoG6D`m$nnbOtM0&mMsi8?cD#za-o`H>B>J2XXlhHkKQQ3HdeyKjvm+h3N&hKBp>pzoy4T>ude(1l>ua&FF6snB8Sw#FNX zCMrNob^-N)yWb0%>RmJ)4p;*?ZEp*j=9pQcE&W6I{WlO7YyGG6$!F4NB>x2-dgO=w zU*I7h6Nfe<)6T8WOTvP+KC{1`Z^m2P2fHYJqFSig7O4J)v~|Z;Ly;55X3?VkR@&|A z{AEfe!Sc<@UfOg#{lD4mgjrLyKdFPp($hL#+YcCa{{K?iFs1Ca$Hl;1 zw6*6%u;03$qEzFZNz`i?q&gOGlNFnQCWFKZ(tyL5c4T@BYMtBBpp{w&!>HQ*Rl(do z7TzWF5dkja`N5Z;#0F+9Nb9uSPhRXi)L+RR3q>xS?CBV+5ffmi8T0}ZC@sQOa4eVE}@p5f{{NPbUya-PN$KoYJ_J}j%6l`WIDsJy}&t=@4DcGMt56irM4 z&#AaiEa70Nv3v=FMS zVZ~9`wVUbu!Ht_K)!D^?&8Ve)P-=S_HqC{AX^Px}tLFMI-^!>ToaG$*K9tn>a7Xg0Sfrvu<%E$iei!_Bp05NO9o|Kc2fk768HYvly1 z^a2TAUStY}$mj|-=8kCWDEnY>Twi>$j6x&z;pxr;Jqm&6Z-Y0_(wf`1pADcC|I^4S z{7g9#Gth@zhuKWSnhJI*IDIWE{Mf!4MX*TAD4#NU?<+s+OQw|mOIYrX!+jeo4J|6S z8i~MZqmgOznUVz4X9!)(vZxi>5dmTkST)?9mkPVrCl<`D`=<1)BhWQ5MK^GNl{qWr$Q^Aw^&4nM^V4 z3y0G!w~lA_?zF;fWAtXTm3Bvf(ru2$`FI+ISRfnwx^+X@n% zItE#!!;%QfiLvFkMawsT6UO4dJnAv@ud zrw-U}6%h!5Z+YHv)rXon73AtPRr1&~z61raqVlJ6w#y)O1aiMU+&l}tz26_&TX5!n z0u-MDtlXzirlWg$->FGEcyK{~-0AhD1nJ9|Kq6T2!b(l4Z;0-~*LYVc<%IM3>{@gABE}MZ?nLgGWo#d??0*oBAhPCOb6!M= z9v3K)TxT29PvGm!Kb0kY5#~yhFTh!u^pZjLf{NK}KysZ+@kNr8Uk9Dz1qj+0Eki$+Y09J=%~Wd}Lx~OHB|nBQiq3K>&#mJV>pwn7 zoE)a?em4-dAm4ZRIEu;8X3l_%?X(AaK!&-dt( zrVh)s5&230&N|t3&x}spLhXB2t#U*j{i=wqK6wa;w>fOv9*zGB46GVY&T-G24plAfJk_kVhQ_WZy`1sgrn3Rf5%O zv#B!1%zEaEbv-|>MUY^1C*$|LEEi5GTef@8PmGmR#Psdu2N#v}sD6s)%aTo_MDnW* zb^*UMYbCLnz^`sYw#Z9X7{A|}n)sDrEz23Cgas`+j7U3cCUNB#NQTr>xLf=FmFvgi z2--BGNqOKC;ZMz*yR5v;$&Hyl%)G^G)HQSY?}s#A_6aG-COP>DJ9541V$u|t`GtZx1ba@o@3E0^ z^S4X)ZCvhtpfY!VNTL^f58ZjHBa5Giwtohd@I2k`i#_#|D literal 802276 zcmbSyXH*kW*De<9g4nP@RO|>55l~tzC<+2nLyw4pbm=vT4N(E3AfU9UAT5DNuSryd zNEa!g2M7=#gb+e{nTy|hzwfSlf8Lc@^J6CG%&fEb+535(y$2M(Nv8fX+f`=Wf{O!+ zdLwC(Cf)P%?6-aEY_Sc=D+_Yt9~Vb8?$(Es$Kv2edv{_?9;W2Jvtjrjlrl3Eh;222mK^kngU2gMMXL3dYN)S^4w^KqUy~(r)#s_TPy>!Q*}OUX<2{m=D5zL zU9Ex)1!S#hYa81`&lPneKv}0{+4`p3BD5O*Y5GVsKo#hBrcUx;lPzJp0+KtzcHOqa zXw&|H?Kg$ZN=%;`O-Pb;ULJlikfESUHd9uGN~O%j8^A}xN;e6o!v4wJ8_)6%t9Ce^ z+~M+i^Rw8yY`&dsB+qT7Ck}RlNr40Lu9@wm__$%ww9un1OsC1ooyn zzuyH+>S>86Ppmd|Q6&0=%vkyTEeeaG(=KiC3whrXMD?fW8=#8XiA5wm3y=5?wslW6 z9Fox$Up@Hk02?zC7|rWq3&;^=&S#TerS@}MlKABN+@*qF+=yLWdoiBJ>nse$PJB!c zE4!08Ao|j}(QEmFjmWu+@C!tW_i8wRt5%k;nBSqyD|J4`qW-Psdg}|OhU(y|1Jy^p zTm!3?bkQaKE56DokKziWq=}==rFn<8{4!aG+4^w(0RCFmM3A`Xoo1%6*OO>&c#>{r z6(iJb`@>cPRn(`zfgl$xX*t5lej=kI*h|G9X8&e{f?vT)V>G=FoLZnFwytUlIEhfej!Tfvq!oqgL@;8 zQjeATqH|-NCvvEq8(t~hM9i7_ARtx0eee8`G4*;KgTV}VHf=Kc>t6aI7cc!OX3u0) z6Hzu<=;ZqqF6y*{ImK2zydXGjWpEVi&4D!2ua^z3M^R>=tgu{+>h+6t68h}u!f6Lz zk9n1_l?$$gW^=c;9_$@N8tddxSRF4v04pfS!WZcv;N1t((KD8!K!!2At;grwvWCQW zYK{lr+5JhBVHxLU51=f^d5_B2l3j3vVPOV>UuN-ZajVu>nn2Nn_! zp!rOIznG661HMHEBaw%|dt1cMhli2p?K4*%{*;{DWv9Q711v5!va29V$V=FqEeSbF zxtcO7<7*`SpB%{u;)m4r00D9K!C=OYa-%Hi)^&w5XJ1|hwmSoy*gutPE~Km>zdSBSxL|eN`}x?H%QrkWKh}Np zX{{wayl@;DKXruh>EU%znUxQaQ}7EoxB5`C)rYqJmKlQPAAR)|u6(BK-B7u(x4<2t zx%2gjGjcVdQWh{3`PILv^PWAg)Skgx8}=!uHG?YYf&0-6_Ns zTSfnn)UD^L_1WohtOp3T;{@ATg6&H}V#Q|*ywTHc#|BlUE_#N?`2}&O=902 zVxK*+?=CU19afjxEK-vppdN3O69i3vlUV3^mW9(kJ+9m zkHOx8`XFjtdRZbihHB4wti63_juCp^6-)` z;8dLAA27hUH zzgQ~oTF8C6DN4Tiku=Z$j_h@r-#^y*EAE$f)%XOKGe+(Y}w!!HSSlZ7}Zp-cIWJ-?fX35?|3$TbNtlH9W)!6 z3mfG+Ud+FUDmXZkSt{5y!RT`l*1+gH zPKInwqaf|2ApSnGdjXs(X(}fgann{pG?y-ig%uyGw@Y_XZ7WIfB%dksNg1rXYCQ|H zmN68}e|-uhLj3ELfLlhFq#O5cYet4y{l{HIn7)eN$lR>=pA%oSwr73|+4t~yVBPgi zG=$mF>JyU#lBVZ_8NqsoPk;O z`XMD<8~}9Bc8LFNdM-5oD%%Fv`_Kwk<5X*bepC3s^H*nqR_w)RFS8UXtVbLjr{cnD zHkMHhcBpMz^TXdm$E_~BAf(s0?k)*_*m2*Mr=#uuR-XO^v)0Q`io~6l8N{j~rmT)% zcV-x&&^%x>FSfV*e6WE+906h`IMpsv!x#Rp}iPizJXwv=k z)~xV{v|o2E=s8%XtV)>@eysbksh5d~bxerrT3qbbE6aDVu6O=g{Qzc;xjsC94lT*s zy&R;FoW=#H6}^Dzj>hjm-Kkaols*4gnC0Pp2e=-8e@B&di;(EFb#1A;el9&~ZO0|w zj=`;R5gQ8TC-!ehs$ysf5saOc`C;^)RaFUMlvujY^g7KUEC|y_E|& zdyEf+{`1N$_hkT#b5d8fwO&e?BxUx#oW4)*CxTyXsr`#em}%m#G6TJL=HhMx?(g9+(3 zY0TYq*NpnNzjln?VQYLra_`v7mCvU`Gd^D0q$Rm4ly_Tq#LCO!YF}|W$MUci^KQRp z{KVtAzJ&EQ6y=;St>8Gf5kJK+{Ds^h{ArJC9-84d{c&%dTsueE)ZQ`qYQ^6S)A663 zxtj|*7>(3-_#02!RTMkhZeKJ$cZH#~WhRvoyz8gym$zZv%BDtPo^vr>$NE`Ep4{D@ zahEe0{W$%im7-eC%8nJcEhn|s6|T41Y$*Cvamtp3?w7Zqf>}o+Uy@)R8CF><&hO@2ELbl>`y%nz@tfn`N; zG(LykKYdyj6*qEP(TpASrg7caoD5_<^p0z;4GG9h=$S$+WNC2zXdjW87enr7068qc zcIzQNO!2D%p!a&)-6#1DGG3SD@4M|eUA!Fjv-jDV)7p|Ck5}j0bDa)n-aMMAdV*?o z_r6lR<-XT1jnoSg?;gu_i(3EVSjw+UO1lnO9*7(WBef?k-GD z{v&YRny0+M1ZUJzzNuxU)NSw4xye+O+n2(+!F}DF7KbSEslOISqD{{4oaKISx6#wneXZ{1z0V6fdNH65N>?{ z4lq>K84Ar`V*boH$z8>-Y1y~8WwwcYyUot{=HboO2}5;POe^maWZ%1PIqLhda;@L@ zL&K!!pD$a{CyNOi*ZxrHc^wk_))5+bU36*Ji@R}aY@H1+z+|l8u{Td=4PUZC?{Iz> zP<|};{>3{T+lGF98#Y3|3bz%brW<|FXOm3OJ}Lpx8|>+T>77@A!qdZ2r<1Gp#$4TS z?t9N!pKOQsCC-Z(8zUgu0ZAF%VUh+-gDC^ueZ_ya=B=OgePzD&W?c8A?-G9j?DHh3 z#EK61BpNhqUx(!Yd%i3HZL$f#d~U@OVD@g!wx#1E$gK{qTit0t4xIDF4{aTzDa}|9YJp8;PhpiZ)#r_RECl7 z^FkAE^FROYuzVYd*OJ@pykh*~$spdk2Y)y*ZoK={EhGEen=L!nt?>;SI47%p3A-8) ztL9_t zb;K)X>$47R*qZ*ZjM%m%k$&vsZ<0peYxuWYXYJ}fH_?8KT`9R~9n#WrPWJqU*OL#3 zZceA4|1^8sNLpJ#$bLN_cpbsWeoH@VD+PcU^CU`=W{9}f3cZST#6X|iZsEV6F9fXa zUnaqdNyyeo#}Lf9p3nEnnm)$(sz1qaAZBRG|0T-1#B!f5X*^c_zNev~q{yZTxU=@S zd2buq6EViP_oHtaa>bMd+1GeZTAuv(C7@)y;oHw$QI3JxyA8^3N2t1Fi|)`S8eR6@ zwb&K&;P9q9HMrtO(Ldxxj8lK0Q+u91-<`kX=z0_R)>kaZm$XB`D~(@3MDgEtDfHF0 zarntoo#nAk-xm8)$GsZ2M98kGrZ;Vpv08ibd(gnQ_1@vzS3!_LLN%&I_XP9rHX>LF zSQn4y6L0SrUuCGP_~gL;@4)X(epHv)?!DzpiWiOjzjj-C2@rK6Upg z9do=WwZQS{#FiR%YOv{L8A zxGjpT5N^e#u6=K*VwB}8hgZOnE5}#KgNddTdU?Y+Vj9Crb8e`Rl}&rnF|sA{@RQ{% zFjevLu1|wW(ktJqp9ourUfiyDl{tGva=~WThSZ?9qQ|ZDD-WPYDBFx;))Nai1tD?I zD3vo#QD^l}emlK`KWa3;<|0?=&GzH?$R_2dW2V1hagXCHXl;Hj_#NC_zncAzXJoII ze;(&42s@R2nR4?(aaU!(LszbAmp(B?UY@@GTgQtpXyI@~!+oS9?~IL1j2Z0y zu7foz8*N;VosszJ8!l52>JhJy&^@`37#sPLCoahY8!vK|-?(Zkg95u^)*IAsQIH4y zO&ca`P+we=p0@Shgo6LS31>iwn4{7jJC{Eml_SSk_iOps-Ot-ruEKM!^fdvy8{Ptz z<2olF~PZ~u<%MdrU~YsN>vIa{d68-CbT;8kawzpX|D$!(*!$V|-V z^%n9*Gwxz+#R0X#-N+NV9r0h&z*#`t?|Wj45-QPptpKTex`Pipe*UqjZ|=^}73pr% z^P$mJCmTJQ8A;zXy*xi>UH5-*-($e|cBpsVRuIWGZ$)mOXeMaFq zYp=Y|A8!n$bw#awtP6XjtsPog7;om{JAcnwF{5svH`F{KPhBJQRiUYk%VD2J{~`Pz zHP44zA2+&DcYHDCn)HPD-0#an=xvra`?7A=YdN!}&zm)N9ghEG=?Y>${)w1Pwjo@5 zf1ZDTP0;`|Z*;5dGqd=m#kreK>qCoW&Rz1oef4gItCtc#=1FQy*3k5xw*i)Nj~C*+ z2e;$7LsVUJwLMg%Hb&rk(1NJmY*GFye(A6GrU(W<;I=81c>3DTW+23JF+Bme^m5z2 z?Kb)LM#@s!NT5x`-pgOT)*to-Z!`3c%8QJ;61)6)lnefwM?6RJH#}9pqo!49>|o2| z@$4h}UIfeM+AKkK4R@Uh--$~W*uP4$Q~BW|K);oh8i$BMH+K9BJ*z2hHPRyT_wTXC zV3k9AnKv`_6WEWUK0)J(q0v9K4O0i#1|v;suVG_#XM&K5cH3%R%Je)VU3k<4J-bKD zq(1c9ZTr&1{=>^7 zBY#{vZbj2O@8|_RB5uvl+>&QVzo4CDY@2*vb ze3R>WJlT642y0Wfy7>G<>mCU>e+0?fg!mseus6- z(}mO8w||Bl)#mIPy6~mE$gAgNXM7%_F6Rz9&71vlakQtfyW*9RHj}JS-wih2^A6uThXu{&TmLOg$(-NjH=ob$DIBfUJqs1FZqgdMU7qXK zX9(wftGywgna+%pxt(A05bw-i=OEt7=OWv46>S_2eL{mrP1+KR5~TN3)pcru+6Mfl zr)TW_bsn6)<6klzW}CWmddAs5yxuBzWaP68m*DbKvGKvGYzU$FqHkhvIsDBQ(XAQ* z*?d`P*Yi$5WqkrLmwRCn(EBa3h=R(De zLc#GHSC7#78ple%J+U~o{ne)= ziYN0bS|!P^=Vcg|eJK38y0sx!GTWg}?} zyTypzoHu0{J?*WO^qjq7zT@-zxXgj$ohl~1V7$x?qnkaXADhzN4-ZcPU1K1>TS4xg z6twzv#ZS5J0SovwhlWzg%db!|0{OG_H8LT|2syK5bu;IK{c@uCyJqmr`)I@J<%EAaTZ>i+@(Zsp9%@ zQiJhLlAoB?FC;0n;^GS!`d4HK895wRpQRYWb#q{PyW$?=Nwsoq2#{M_8Y(1%RYF0O6r^f@+D(#yn)-h!}G>` zn8$sT1|kZ<)5p1?y%~#33db`FIOm(C z#>&d>heVBMoUS+P86&^C_}gNpjOZlwxAb+e3IF;tHPH7KoGEK1j+kyeEePur z+vG_9BEgJ8dVZ9$_mD5Ue5&y^cvl%54-&rfou7BKCL*Y5iX)ybGDr%D;l z?}zM&+bt`9El7s(ZO3?+I18Q*g!sDko&j66Rf1({b*7pE2m=4<~qEH!^rpqf}2CP~*=QJrso+j!hHoKr@K~h;Qf>5Dh?2 zvRr8ZE-is1`xvG%M}oAjK!%djMzdq~DpiwqgHSugvkH>Wpny35&f!oHVjdvm#^GRD zYy&>D{s*ww#stJ13?LK^LjVy9(jZ)pgHp1K>GH%c&-f)$So9cBF9OCuR0w*8B8bSP%?M&((F6>O|-- zMnwzEB%a+4m3nW>Q1;t@*tCKqU1_jWt7pn2D^*HCAtl2AYaSv=8BGaiqcX%iG@^+E zY11hIBFnk60Q@y6ngfivo>v}i2-k8!-`GLhpO-xDN?DK=cOnfAF+9mp@VzcY