From b1028bf0fc694c6d7b9c2b9b264959c61048e390 Mon Sep 17 00:00:00 2001 From: Parichay Kapoor Date: Thu, 25 Nov 2021 22:46:25 +0900 Subject: [PATCH] [layer] Unittests for reduce mean layer This patch adds unittests for reduce mean layer with bug fix. Signed-off-by: Parichay Kapoor --- api/ccapi/include/layer.h | 9 ++++ jni/Android.mk | 1 + nntrainer/app_context.cpp | 3 ++ nntrainer/layers/common_properties.h | 6 +++ nntrainer/layers/reduce_mean_layer.cpp | 27 ++++++------ nntrainer/layers/reduce_mean_layer.h | 4 +- packaging/unittest_models_v2.tar.gz | Bin 3187 -> 3385 bytes test/input_gen/genModelTests_v2.py | 35 ++++++++++++++++ test/unittest/models/meson.build | 3 +- test/unittest/models/unittest_models.cpp | 51 +++++++++++++++++++++++ 10 files changed, 122 insertions(+), 17 deletions(-) create mode 100644 test/input_gen/genModelTests_v2.py create mode 100644 test/unittest/models/unittest_models.cpp diff --git a/api/ccapi/include/layer.h b/api/ccapi/include/layer.h index 62d62b6f..d49ca6f4 100644 --- a/api/ccapi/include/layer.h +++ b/api/ccapi/include/layer.h @@ -72,6 +72,7 @@ enum LayerType { LAYER_RNNCELL, /**< RNN Cell Layer type */ LAYER_LSTMCELL, /**< LSTM Cell Layer type */ LAYER_GRUCELL, /**< GRU Cell Layer type */ + LAYER_REDUCE_MEAN, /**< Reduce mean Layer type */ LAYER_LOSS_MSE = 500, /**< Mean Squared Error Loss Layer type */ LAYER_LOSS_CROSS_ENTROPY_SIGMOID, /**< Cross Entropy with Sigmoid Loss Layer type */ @@ -393,6 +394,14 @@ Permute(const std::vector &properties = {}) { return createLayer(LayerType::LAYER_PERMUTE, properties); } +/** + * @brief Helper function to create Reduce Mean Layer + */ +inline std::unique_ptr +ReduceMean(const std::vector &properties = {}) { + return createLayer(LayerType::LAYER_REDUCE_MEAN, properties); +} + /** * @brief Helper function to create activation layer */ diff --git a/jni/Android.mk b/jni/Android.mk index aca70080..16a19cde 100644 --- a/jni/Android.mk +++ b/jni/Android.mk @@ -184,6 +184,7 @@ NNTRAINER_SRCS := $(NNTRAINER_ROOT)/nntrainer/models/neuralnet.cpp \ $(NNTRAINER_ROOT)/nntrainer/layers/split_layer.cpp \ $(NNTRAINER_ROOT)/nntrainer/layers/common_properties.cpp \ $(NNTRAINER_ROOT)/nntrainer/layers/layer_impl.cpp \ + $(NNTRAINER_ROOT)/nntrainer/layers/reduce_mean_layer.cpp \ $(NNTRAINER_ROOT)/nntrainer/graph/network_graph.cpp \ $(NNTRAINER_ROOT)/nntrainer/graph/graph_core.cpp \ $(NNTRAINER_ROOT)/nntrainer/optimizers/optimizer_context.cpp \ diff --git a/nntrainer/app_context.cpp b/nntrainer/app_context.cpp index 409f5e46..7a52e70e 100644 --- a/nntrainer/app_context.cpp +++ b/nntrainer/app_context.cpp @@ -59,6 +59,7 @@ #include #include #include +#include #include #include #include @@ -262,6 +263,8 @@ static void add_default_object(AppContext &ac) { AttentionLayer::type, LayerType::LAYER_ATTENTION); ac.registerFactory(nntrainer::createLayer, MoLAttentionLayer::type, LayerType::LAYER_MOL_ATTENTION); + ac.registerFactory(nntrainer::createLayer, + ReduceMeanLayer::type, LayerType::LAYER_REDUCE_MEAN); #ifdef ENABLE_NNSTREAMER_BACKBONE ac.registerFactory(nntrainer::createLayer, diff --git a/nntrainer/layers/common_properties.h b/nntrainer/layers/common_properties.h index ea50276d..ea136bea 100644 --- a/nntrainer/layers/common_properties.h +++ b/nntrainer/layers/common_properties.h @@ -348,6 +348,12 @@ public: */ class ConcatDimension : public SplitDimension {}; +/** + * @brief ReduceDimension property, dimension along which to reduce the input + * + */ +class ReduceDimension : public SplitDimension {}; + /** * @brief FilterSize property, filter size is used to measure how many filters * are there diff --git a/nntrainer/layers/reduce_mean_layer.cpp b/nntrainer/layers/reduce_mean_layer.cpp index c73f6fb0..77106919 100644 --- a/nntrainer/layers/reduce_mean_layer.cpp +++ b/nntrainer/layers/reduce_mean_layer.cpp @@ -27,9 +27,11 @@ void ReduceMeanLayer::finalize(InitLayerContext &context) { const TensorDim &in_dim = context.getInputDimensions()[0]; TensorDim out_dim = in_dim; - /** if reduce axis is not provided, reduction is performed across all the - * dimensions */ - auto &reduce_axis = std::get(reduce_mean_props); + /** + * if reduce axis is not provided, reduction is performed across all the + * dimensions except the batch + */ + auto &reduce_axis = std::get(reduce_mean_props); if (reduce_axis.empty()) { out_dim = TensorDim({1, 1, 1, 1}); } @@ -39,10 +41,10 @@ void ReduceMeanLayer::finalize(InitLayerContext &context) { } void ReduceMeanLayer::forwarding(RunLayerContext &context, bool training) { - auto &reduce_axis = std::get(reduce_mean_props); + auto &reduce_axis = std::get(reduce_mean_props); if (reduce_axis.empty()) { context.getInput(SINGLE_INOUT_IDX) - .average(context.getOutput(SINGLE_INOUT_IDX)); + .average({1, 2, 3}, context.getOutput(SINGLE_INOUT_IDX)); } else { context.getInput(SINGLE_INOUT_IDX) .average(reduce_axis, context.getOutput(SINGLE_INOUT_IDX)); @@ -50,19 +52,14 @@ void ReduceMeanLayer::forwarding(RunLayerContext &context, bool training) { } void ReduceMeanLayer::calcDerivative(RunLayerContext &context) { - auto &deriv = context.getOutgoingDerivative(SINGLE_INOUT_IDX); - auto &ret_deriv = context.getIncomingDerivative(SINGLE_INOUT_IDX); + auto &deriv = context.getIncomingDerivative(SINGLE_INOUT_IDX); + auto &ret_deriv = context.getOutgoingDerivative(SINGLE_INOUT_IDX); unsigned int div = ret_deriv.size() / deriv.size(); - auto &reduce_axis = std::get(reduce_mean_props); - if (reduce_axis.empty()) { - ret_deriv.setValue(deriv.getValue(0)); - } else { - /** TODO: optimize this by supporting broadcast in copy */ - ret_deriv.setZero(); - ret_deriv.add_i(deriv); - } + /** TODO: optimize this by supporting broadcast in copy */ + ret_deriv.setZero(); + ret_deriv.add_i(deriv); ret_deriv.divide_i(div); } diff --git a/nntrainer/layers/reduce_mean_layer.h b/nntrainer/layers/reduce_mean_layer.h index 75b5171e..6cb54f7b 100644 --- a/nntrainer/layers/reduce_mean_layer.h +++ b/nntrainer/layers/reduce_mean_layer.h @@ -15,6 +15,8 @@ #define __REDUCE_MEAN_LAYER_H__ #ifdef __cplusplus +#include +#include #include namespace nntrainer { @@ -86,7 +88,7 @@ public: private: /** TODO: support scalar multiplier to simulate reduce_sum */ - std::tuple + std::tuple reduce_mean_props; /**< reduce_mean properties : axis to reduce along */ }; diff --git a/packaging/unittest_models_v2.tar.gz b/packaging/unittest_models_v2.tar.gz index c2a657f95e805880082642695df38d05c8a89e74..7889c91d5eaaba9835d629b2a68dfb9286e06281 100644 GIT binary patch literal 3385 zcmV-94aV{xiwFP!000001MQj#R8&XS$J-?;1O-LFtpP~_$l4p-7^z!IM3Kn2B`RuY z5n52$!r}r!5dk+)F(7IbTu|Il!EpoLeV|5+j!QJe9ozz_U;O4|m~orL>3YmdW+sz3 z=R1P)&Ak6Ped_dkRqs{R?R)EYt9p8&@|Q14~;|DUS8@zc)uDUlBaR%ZCxQC4ml@G;NIi2MG?%FJt9gz}pFmQe4# zt|NqcM_e+6dafa>h5Pf%gZbAq6iNRFP5;UN6GB2~8N&3Tkuzdr4Pmb^5WLj?wSxa^ zj9%}hHO2ykO6jGM{J%{k`u~DG!v%l#%~>w^cZB0K!LKhQo)x}dNHPfDV^h;ORt_^? zCiwf&tZd=_g5iNed);5#ZJ+)3?cCE_+U4G~lH$X^k$&d?tpEW3qW}N2{;L!UwMz2; zw$Ly7|HD5D{_VDM3I4Nq@W?uo{xknCbE^>Q=MS?G%Gd;dq21d{(}j9#%YT5^Fic6m zpzyfFP(!QI|4aQ}sc7MUjj@ixOQ&G`ua?&Twh{CHPO=vMXY9}Tp0PcP2P_tt$y$7z z`FEjzBDEt^=_U1H&<&B%sK*|(C|*m`ayVGA%bc2@mQjnm5FgfN@qIhqZ>R&`d$9|z zx>te~Wyzq9ItzaOIk0PmH+SEn8&=v4!2{hBaM|`lgte*NvH`UNIQ*bP4ZL-+5Ie2u z0X=d{G2qEGI60>nJV!Laj@64Ga&IeJl73ct03HCwgyz2hrWB!-8OHH?|(K zf&%FVoh8HtXz0g!4P|XM%kB9q=lb)Z zfrc@W5i2KdcGlpDlt(y;flJ zmVv@Nd|R5sO)9%xIq+L&YPYxqx()prOx*4W^KfZ4_rM*@rvPT2J_^YuoAI^$5GB19{TTmSjs9L9|268L#eXk_T8jT|Au;}& zzBv`_PejsTQ}4i|QxP=%Spum(=p)3v4UdPA7bo242LpT2j*Gj{PZN}+XSq2IJiP;A zj>+g%=WmGF$9-wlZ426c+s#3&%}wWaX!j_Fl%IV9bKZW3`mR`wBaS`bEYmc6Zc-rp z?%G#eu48Zh4^!5_kw7`m+I(-H0q^OoY0^_uy3gz|ta9(l?;Tyuna;0*{JY)J*Sj7f z!z?H@E5i;Bg`Cju;Spea&y zcH_F}d{{PkGsoIA->Jdm3R|*qVHe)t={T&i?#U1F zD*~(WsnGu}1*--p+Iy@Mzv*f<_ULTNvp&pitbl!18lt~#gMnU!FtTwBY%|*|%-z9R z_T>G+){uUBAqmR}gO4V766G{Im=cpoc3zkT-6pOic112w5qyX^`pp$$`A!R4dN*|j zJ*^L*U7x1VE(wWr00s!_$xO4=u>6@FJ#?xWEcN~%=Z{12t|??TG{Ua1WV)yJd-61; znOrp|bV-o4(B_b5JA57(OJa<+m~XU2d(ThN-t_@DG*iPHjt7Ek{a0Ly(H3(T9!UEwr9X@n-pbO#bs}pFKz0@%oYd zY46|c#_)_h*wMH{u*I2sHTdM9Eg6#5g?BgF;)F?0{+WLf=trl5;u;0zJtvxIw8h}8 zYTW>+a4d*-H|$A7KvALGAHq0&n6zb(`n z{Lf;M$nYZ5i!9GLoiV$}@*>-dOwSme@jQD5WBEA~Za~T0eDw76Mv>!1u4kNYK4B4J zZ4;L>T!kXfi+s=G8RL1z?u_XfzcZd^?9P~;@jH7D<95dIjN9cNJGu2frZlvy7z~MW zULLS%5Mz4A_l)UXF8iVVYz0mAEkNJhD&D7Q8M0^W_R^xa(v{{K7IXOJKDbTX_(LPz z>7kH*cr)CRx8HV#Taf2UU891qRvymfb&3RCiaGzRlN?4q?<4f}MAz*Q95e<$aV#VA z@=fWNpV{(eE1F=)#vt4rXen?sIY9B?qkK}=Yax#BTtnI~7>c((%p-U-4Ig?QC*NK1 zz{`eW5`JbjKDt^0zU73j(USOQSYcxP1@-^X}Eb zD7^*UpHv0K-*iXI;!=EHUBJB=cpjQ>w%GPkY!L35d;>5lAJqfA(N(GBQ#Vw>fs42C z_hZjN`6w5h`{5$Ibh3&2$WMg^-~QBL!fuSa^M#L1LyK+aB%Q)>>QtCAK~9FAqQEb( zdQ@NHExHuT;zPUXYQc*8HJEAQ%(FF zHiNFt5BF;oRKBeMOVU*Q!tYybyXKM?*15S-?X65s|GRylO?BhvDcq@QabHZ1vE(N| zKEu70?@G;=24PXxaL#OYBrK0J=c9M`gQzDhww>{25lqS)g|#F1k@#(j@z_|8)O8R}b2!kG06UX`h9JDl#q}Zn8 zi^<1c$+{Y62uln#=%WlF(fY`c_=MN-{~t}u`Jb0cq0y+>`M*-9Rcoa9-xhiq|96o6 zJ^y!;{W{zK8rOF=wq@+k_@1#n<9EjQjNuvMvv*yJ-C8-*CYoC`&_vje|D}9`)&1L? zgY_$6uP5K6I)ZmB-r+aRa^go@g+q$%aVYq0CFB=5;>-g<82v*!ltpKArDHSEeL)79 z_~N?Iue9R_akcMc(Bzh3mX!|j`dq^B%p6!b{VZYcdb%hW{q4HrxN`CzwLcyg9sRn` z{}f)@ANkMAOQmi(|C9W`P1G9se@EF*F+z+7?3x%a#Mr>z&Ekf5{--S@;2wya-opms zuViySy%>TU<3d#T5LamYiF`p6l z`NaJ^c1_&R6Zh@dyV<@T+q+};#r-{TACFxV_xi*=J=VrSXHM=fOc453gDIf*n*=v( zKK1GK;XEi-X2HU+3DEC5v&xI-7J^fs_qoX2xlp=n9(Od|`IVoqus$Bju7S0y3$XFa zCOCRDQy5Ed{st_v;?NKq4)?Nl2xC2Dy$hADBZd3Dwm8xBAYU(r?<>N>i+phzw zg8gu{VLD{gc~m$y=KfclUr3T9Ns=Tx? zh7ke5$Q2F`2nq;zfryo6n^O|nx>I@#SM zYu-N5>`VXOn^&*9>+7znZ@#a7Uri6zCq%_YM?~o3W<-TWgt|sWMMj5&MubI2goH-P zo{AI(X*BMvRcbvH|Jn*ts@!EtccohGp;Bu-G%~|ejaDhMRXm-~|92(C#RtXO+RA3e z1jWt>4iB398~vX2{Qp!P4ZjYCU#WaJur|ZThO&0kp!a#!hTrx@)@Igk654wCHKE^Y z-A4)i4mxED{hWhV3eV?N1oF>mDw6&OP5(LmN;_zt(9CdBpGgj2%x*~C!cIwmEJW93Q`5(0a>gvDwAoHmU`-ty};RyRxzKgrUI3-QmJg?uT3x&hAS`+QgN@JGG~vu;4MW z_i=Aqf4E%_3@5(#y;4*UOo0bYb+F>j3b1ZfaIahTz;ivk z@%z+hbnvbd*1?TmhqX7WP}ZduUbs?-U%qD!rCZ9dUL6d9LyBSEh!&84v;ZdcS%$5f zh6wBMRe26KY0velAzwLCn}wy&WB3tu{`XL*rTqUaB<6qP z7pH>l(Fi(X>J7MeJe;OKiYGPuyo9{B?!Gtq;iwCJYe*m3d0{vDVZ4&`t}vzkC$>TK zVHv&T_$4uUzdxhK-TJk6cYP4tIP z>p$mm?fdfIPFV#9{pCEf`OY>2Uej9A#D~Uox5)umF}ORwYitc?Jg*w^Z}vnV&qjy{ zF{9LE4|cLEj74Jp;b!0eEzRlaTjxH?QNtk*DF7T*=SbT@BV% zi_k6OHlEJ-#LKm%1Fsr2kk*YU!8K9&uw>Xqj@h){sKum8YqCDI8}I9I1Xft~=Dl@A zU@;*D2HvD#(d0n;zUjbkxKx9^x*GFr40D<*VYh`l(OGqm~0f*Zr@B> z^7=4KNI#KELNY?&-HC2QIn4&9L}!xiXJca{^IA}6Q}+)wOvbA(*J-OQTaOqoGX z=>2H-hsm^Cd;%SWe!_n8p2(B$h(;(GQ$wW1JS`#=v5&uO3KXB{}gpfL|lAiyTZTh$@yRD-j@HBN{w2r(i-x=Mx&AV z|Cy*g_}|Q6BXYl32Z;PHvc1UvVm%<%0V3nGcZl_XSO+l1XS~lC|M7ts5bFZ59$@^> zY5~Uooxh64ZnCz0i0wISFJk)++lSbm!}cO^-#N|g;%;A15eMI!TvdJ$v6@uKIXddd z<>PhSD=tx_5g%3cH}WFRA8+8al}Y4j*9mN{aV1W?#imlc*kr?R(An`*o997Q^C^(E z9)p~1h3MyNijHI$_R-})U79CXUlWC=Z7a#~@0y|JLfd+c&0B|yTsf@S&^dvi`#bP6ntA018171!Op~DSW}*j58oLI1$RrKzm*C1x01)W>Bf)i72g@FySnoo z`AFhX=?_VPOYqh3H8B42*W9AZ1BCS&wwN+6Kt;2|7uJ}bw2=fPzHS9X-N+ld|TjG!<`?qzS8BbaF~0m0mkUf zXjx)46o1(h&5O(Nb#(#vqW>9az1rs6^DzOqbMh6y$b3`}@ViR&9KwH&9^Ufvci#Nl@O_X6MBu@&NWsT(Hh(eFFDEi8n@h`aWe`b{yjC(*=+=E zgAU6y3M$`HfTd|FKJ~jc->y3EfekLsRC6tp(|@`fG$}6pT*Y9jTG$_xqRsiq_fK*! zVJeulmGLo(o-1sHI*hS)dy;n2l-B-uJ0f0%su zsqCwT8Lz!7>!>vBTxX=A?9D*Aygzs#akd9}4KftBk`q50~ z7to*`O=1?U<=*otfPS0rk{f5U;L(EjNyELNR-gahe9F6 z|Fckg@E?mAvCbFkd3H~%=f%36eVf(wtafM5#rj^X;aesC# ztX^4w&7ZZvp+lL%TmtjgVUY!gp)p}_D{GrD*Zr0|Q0Y8cc;07|15HoL;?8W}1BFek zRqUQc<{8**6$dLy9$;)`lrUF5d?nFlrwel(>r@UyHiyC4;y`k3(n@$xa15rVOoK6A z_N2Mv8n7rRgG)`*A)~>y(!M$OPjP-JNs=TE001UVjhO%d diff --git a/test/input_gen/genModelTests_v2.py b/test/input_gen/genModelTests_v2.py new file mode 100644 index 00000000..f090382b --- /dev/null +++ b/test/input_gen/genModelTests_v2.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: Apache-2.0 +## +# Copyright (C) 2021 Parichay Kapoor +# +# @file genModelTests_v2.py +# @date 25 November 2021 +# @brief Generate model tcs +# @author Parichay Kapoor + +from recorder_v2 import record_v2, inspect_file +import torch + +class ReduceMeanLast(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc = torch.nn.Linear(2, 7) + self.loss = torch.nn.Identity() + + def forward(self, inputs, labels): + out = self.fc(inputs[0]) + out = torch.mean(out, dim=-1) + loss = self.loss(torch.sum(out)) + return out, loss + +if __name__ == "__main__": + record_v2( + ReduceMeanLast(), + iteration=2, + input_dims=[(3, 2,)], + label_dims=[(3, 1,)], + name="reduce_mean_last", + ) + + # inspect_file("lstm_single.nnmodelgolden") diff --git a/test/unittest/models/meson.build b/test/unittest/models/meson.build index 3864d3b4..4ac104e3 100644 --- a/test/unittest/models/meson.build +++ b/test/unittest/models/meson.build @@ -6,7 +6,8 @@ models_targets = [ 'models_test_utils.cpp', 'models_golden_test.cpp', 'unittest_models_recurrent.cpp', - 'unittest_models_multiout.cpp' + 'unittest_models_multiout.cpp', + 'unittest_models.cpp', ] test_target += models_targets diff --git a/test/unittest/models/unittest_models.cpp b/test/unittest/models/unittest_models.cpp new file mode 100644 index 00000000..a2384da0 --- /dev/null +++ b/test/unittest/models/unittest_models.cpp @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: Apache-2.0 +/** + * Copyright (C) 2021 Parichay Kapoor + * + * @file unittest_models_v2.cpp + * @date 25 Nov 2021 + * @brief unittest models for v2 version + * @see https://github.com/nnstreamer/nntrainer + * @author Parichay Kapoor + * @bug No known bugs except for NYI items + */ + +#include + +#include + +#include +#include +#include + +#include + +using namespace nntrainer; + +static inline constexpr const int NOT_USED_ = 1; + +static IniSection nn_base("model", "type = NeuralNetwork"); +static std::string fc_base = "type = Fully_connected"; +static std::string red_mean_base = "type = reduce_mean"; +static IniSection sgd_base("optimizer", "Type = sgd"); +static IniSection constant_loss("loss", "type = constant_derivative"); + +IniWrapper reduce_mean_last("reduce_mean_last", + { + nn_base + "batch_size=3", + sgd_base + "learning_rate=0.1", + IniSection("fc_1") + fc_base + + "unit=7 | input_shape=1:1:2", + IniSection("red_mean") + red_mean_base + "axis=3", + constant_loss, + }); + +INSTANTIATE_TEST_CASE_P( + model, nntrainerModelTest, + ::testing::ValuesIn({ + mkModelIniTc(reduce_mean_last, DIM_UNUSED, NOT_USED_, + ModelTestOption::COMPARE_V2), + }), + [](const testing::TestParamInfo &info) { + return std::get<1>(info.param); + }); -- 2.34.1