# binary files
*.out
+*.in
+*.info
*.so
*.o
*.a
+++ /dev/null
-diff --git a/Applications/TransferLearning/Draw_Classification/jni/main.cpp b/Applications/TransferLearning/Draw_Classification/jni/main.cpp
-index fa0c749..86a2df8 100644
---- a/Applications/TransferLearning/Draw_Classification/jni/main.cpp
-+++ b/Applications/TransferLearning/Draw_Classification/jni/main.cpp
-@@ -405,6 +405,7 @@ int trainModel(const char *config) {
- #if defined(__TIZEN__)
- void sink_cb(const ml_tensors_data_h data, const ml_tensors_info_h info,
- void *user_data) {
-+ static int PREDICTION_THRESHOLD = 0.7;
- static int test_file_idx = 0;
- int status = ML_ERROR_NONE;
- ml_tensor_dimension dim;
-@@ -421,14 +422,19 @@ void sink_cb(const ml_tensors_data_h data, const ml_tensors_info_h info,
- return;
-
- for (int i = 0; i < LABEL_SIZE; i++) {
-- if (raw_data[i] > max_val) {
-+ if (raw_data[i] > max_val && raw_data[i] > PREDICTION_THRESHOLD) {
- max_val = raw_data[i];
- max_idx = i;
- }
- }
-
-- std::cout << "Label for test file test" << test_file_idx
-- << ".bmp = " << label_names[max_idx] << std::endl;
-+ std::cout << "Label for test file test" << test_file_idx + 1
-+ << ".bmp = ";
-+ if (max_idx >= 0) {
-+ std::cout << label_names[max_idx] << " with softmax value " << max_val << std::endl;
-+ } else {
-+ std::cout << "cannot be predicted with enough confidence.";
-+ }
- test_file_idx += 1;
- }
- #endif
return strides;
}
+ static constexpr float epsilon = 1e-5;
+
private:
/**
* @brief Get linear index given the n-d index
std::shared_ptr<float> data;
template <typename T> void setDist(T dist);
- static constexpr float epsilon = 1e-5;
};
/**
TensorDim(const TensorDim &rhs) = default;
+ TensorDim(const std::string &shape);
+
~TensorDim(){};
/**
const unsigned int getTensorDim(unsigned int idx) const;
void setTensorDim(unsigned int idx, unsigned int value);
- int setTensorDim(std::string input_shape);
+ int setTensorDim(const std::string &input_shape);
TensorDim &operator=(const TensorDim &rhs);
bool operator==(const TensorDim &rhs) const;
namespace nntrainer {
+TensorDim::TensorDim(const std::string &shape) : TensorDim() {
+ if (setTensorDim(shape) != ML_ERROR_NONE) {
+ throw std::invalid_argument("[TensorDim] Setting TensorDim failed");
+ }
+}
+
TensorDim &TensorDim::operator=(const TensorDim &rhs) {
using std::swap;
resetLen();
}
-int TensorDim::setTensorDim(std::string input_shape) {
+int TensorDim::setTensorDim(const std::string &input_shape) {
int status = ML_ERROR_NONE;
std::regex words_regex("[^\\s.,:;!?]+");
auto words_begin =
import warnings
-from .recorder import KerasRecorder
+from recorder import KerasRecorder
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
d = K.layers.Activation("softmax")(c)
KerasRecorder(
- file_name="a.info",
+ file_name="fc_softmax_mse.info",
inputs=inp,
outputs=[inp, a, b, c, d],
input_shape=(3, 3),
label_shape=(3, 10),
- loss_fn=tf.keras.losses.CategoricalCrossentropy(),
- ).run(2)
+ loss_fn=tf.keras.losses.MeanSquaredError(),
+ ).run(10)
+
+ inp = K.Input(shape=(3, 3))
+ a = K.layers.Dense(10)(inp)
+ b = K.layers.Activation("relu")(a)
+ c = K.layers.Dense(10)(b)
+ d = K.layers.Activation("relu")(c)
+ e = K.layers.Dense(2)(d)
+ f = K.layers.Activation("relu")(e)
+
+ KerasRecorder(
+ file_name="fc_relu_mse.info",
+ inputs=inp,
+ outputs=[inp, a, b, c, d, e, f],
+ input_shape=(3, 3),
+ label_shape=(3, 2),
+ loss_fn=tf.keras.losses.MeanSquaredError(),
+ optimizer=tf.keras.optimizers.SGD(lr=0.001)
+ ).run(10)
def _rand_like(self, tensorOrShape, scale=10):
try:
- return tf.random.uniform(tensorOrShape.shape, dtype=tf.float32) * scale
+ t = np.random.randint(1, 10, size=tensorOrShape.shape).astype(dtype=np.float32)
except AttributeError:
- return tf.random.uniform(tensorOrShape, dtype=tf.float32) * scale
+ t = np.random.randint(1, 10, size=tensorOrShape).astype(dtype=np.float32)
+ return tf.convert_to_tensor(t)
##
# @brief generate data using uniform data from a function and save to the file.
def generate_data(self, input_shape, label_shape):
"""This part loads data, should be changed if you are gonna load real data"""
self.initial_input = self._rand_like(input_shape)
- self.label = tf.one_hot(indices=[1] * label_shape[0], depth=label_shape[1])
+ self.label = tf.one_hot(
+ indices=np.random.randint(0, label_shape[1] - 1, label_shape[0]),
+ depth=label_shape[1]
+ )
self.initial_input.numpy().tofile(self.file)
self.label.numpy().tofile(self.file)
def _write_items(self, *items):
for item in items:
- print(item)
try:
item.numpy().tofile(self.file)
except AttributeError:
pass
- try:
- print(item.shape, " data is generated")
- except:
- pass
##
# @brief model iteration wrapper that listen to the gradient and outputs of the model
# each results are recorded.
def step(self):
- self.model.summary()
-
with tf.GradientTape(persistent=True) as tape:
tape.watch(self.initial_input)
outputs = self.model(self.initial_input)
if self.loss_fn:
loss = self.loss_fn(self.label, outputs[-1])
outputs.append(loss)
- print("loss is %s" % loss)
results = [self.initial_input] + outputs
for idx, layer in enumerate(self.model.layers):
- print("generating for %s" % layer.name)
+ # print("generating for %s" % layer.name)
weights = layer.trainable_weights.copy()
gradients = tape.gradient(results[-1], layer.trainable_weights)
self.optimizer.apply_gradients(zip(gradients, layer.trainable_weights))
self._write_items(results[-1])
+ print("loss is %s" % results[-1])
##
# @brief run function
# @param iteration number of iteration to run
def run(self, iteration = 1):
+ print(self.model.summary())
for _ in range(iteration):
self.step()
unittest_nntrainer_deps = [nntrainer_test_deps] # if unittest-wide dep is added, this is the place to add
# test material need to be unzipped from "(project_home)/packaging/"
-unzip_target = ['trainset.tar.gz', 'valset.tar.gz', 'testset.tar.gz', 'unittest_layers.tar.gz']
+unzip_target = [
+ 'trainset.tar.gz',
+ 'valset.tar.gz',
+ 'testset.tar.gz',
+ 'unittest_layers.tar.gz',
+ 'unittest_models.tar.gz'
+]
src_path = join_paths(meson.source_root(), 'packaging')
dest_path = meson.build_root()
mkIniTc("buffer_size_smaller_than_batch_size_p", {nw_adam, dataset + "BufferSize=26", input, out}, SUCCESS),
mkIniTc("buffer_size_smaller_than_batch_size2_p", {nw_adam, input, out, dataset + "BufferSize=26"}, SUCCESS),
-
/**< half negative: init fail cases (1 positive and 4 negative cases) */
mkIniTc("unknown_loss_n", {nw_adam + "loss = unknown", input, out}, INITFAIL),
mkIniTc("activation_very_first_n", {nw_sgd, act_relu, input, out}, INITFAIL),
#include <neuralnet.h>
#include <weight.h>
+#include "nntrainer_test_util.h"
+
+/********************************************************
+ * Watcher Classes *
+ ********************************************************/
+
using NodeType = nntrainer::NeuralNetwork::NodeType;
using FlatGraphType = nntrainer::NeuralNetwork::FlatGraphType;
void GraphWatcher::compareFor(const std::string &reference,
const nntrainer::TensorDim &label_shape,
- unsigned int iterations) {
+ const unsigned int iterations) {
std::ifstream ref(reference, std::ios_base::in | std::ios_base::binary);
if (ref.bad()) {
nntrainer::sharedConstTensor label = MAKE_SHARED_TENSOR(lb.clone());
readIteration(ref);
-
iteration == 1 ? prepareInitialWeight() : matchWeightAfterUpdation();
/// forward pass
for (auto &i : nodes)
input = i.forward(input, iteration);
- loss_node.lossForward(input, label, iteration);
- EXPECT_FLOAT_EQ(expected_loss, loss_node.getLoss());
+ loss_node.lossForward(input, label, iteration);
+ EXPECT_NEAR(expected_loss, loss_node.getLoss(), nntrainer::Tensor::epsilon);
/// backward pass and update weights
nntrainer::sharedConstTensor output =
f.read((char *)&expected_loss, sizeof(float));
}
+/********************************************************
+ * Tester Classes *
+ ********************************************************/
+
+/**
+ * @brief nntrainerModelTest fixture for parametrized test
+ *
+ * @param const char * name of the ini and test. the tester generates name.ini
+ * and try to read name.info
+ * @param IniTestWrapper::Sections ini data
+ * @param nntrainer::TensorDim label dimension
+ * @param int Iteration
+ */
+class nntrainerModelTest
+ : public ::testing::TestWithParam<
+ std::tuple<const char *, const IniTestWrapper::Sections,
+ const nntrainer::TensorDim, const unsigned int>> {
+
+protected:
+ virtual void SetUp() {
+ auto param = GetParam();
+ name = std::string(std::get<0>(param));
+ std::cout << "starting test case : " << name << "\n\n";
+
+ auto sections = std::get<1>(param);
+ ini = IniTestWrapper(name, sections);
+
+ label_dim = std::get<2>(param);
+ iteration = std::get<3>(param);
+ ini.save_ini();
+ }
+
+ virtual void TearDown() { ini.erase_ini(); }
+
+ std::string getIniName() { return ini.getIniName(); }
+ std::string getGoldenName() { return name + ".info"; }
+ int getIteration() { return iteration; };
+ nntrainer::TensorDim getLabelDim() { return label_dim; }
+
+private:
+ nntrainer::TensorDim label_dim;
+ int iteration;
+ std::string name;
+ IniTestWrapper ini;
+};
+
+/**
+ * @brief check given ini is failing/suceeding at load
+ */
+TEST_P(nntrainerModelTest, model_test) {
+ GraphWatcher g(getIniName());
+
+ g.compareFor(getGoldenName(), getLabelDim(), getIteration());
+
+ /// add stub test for tcm
+ EXPECT_EQ(std::get<0>(GetParam()), std::get<0>(GetParam()));
+}
+
+/**
+ * @brief helper function to make model testcase
+ *
+ * @param const char * name of the ini and test. the tester generates name.ini
+ * and try to read name.info
+ * @param IniTestWrapper::Sections ini data
+ * @param nntrainer::TensorDim label dimension
+ * @param int Iteration
+ */
+auto mkModelTc(const char *name, const IniTestWrapper::Sections &vec,
+ const std::string &label_dim, const unsigned int iteration) {
+ return std::make_tuple(name, vec, nntrainer::TensorDim(label_dim), iteration);
+}
+
+/********************************************************
+ * Actual Test *
+ ********************************************************/
+
+static IniSection nn_base("model", "Type = NeuralNetwork");
+static IniSection input_base("input", "Type = input");
+static IniSection fc_base("fc", "Type = Fully_connected");
+
+static IniSection act_base("activation", "Type = Activation");
+static IniSection softmax = act_base + "Activation = softmax";
+static IniSection sigmoid = act_base + "Activation = sigmoid";
+static IniSection relu = act_base + "Activation = relu";
+
+using I = IniSection;
+
+/**
+ * This is just a wrapper for an ini file with save / erase attached.
+ * for example, fc_softmax_mse contains following ini file representation as a
+ * series of IniSection
+ *
+ * [Model]
+ * Type = NeuralNetwork
+ * Learning_rate = 1
+ * Optimizer = sgd
+ * Loss = mse
+ * batch_Size = 3
+ *
+ * [input_1]
+ * Type = input
+ * Input_Shape = 1:1:3
+ *
+ * [dense]
+ * Type = fully_connected
+ * Unit = 10
+ *
+ * [dense_1]
+ * Type = fully_connected
+ * Unit = 10
+ *
+ * [dense_2]
+ * Type = fully_connected
+ * Unit = 10
+ *
+ * [activation]
+ * Type = Activation
+ * Activation = softmax
+ */
+IniTestWrapper::Sections fc_softmax_mse{
+ nn_base + "Learning_rate=1 | Optimizer=sgd | Loss=mse | batch_size = 3",
+ I("input") + input_base + "input_shape = 1:1:3",
+ I("dense") + fc_base + "unit = 5",
+ I("dense_1") + fc_base + "unit = 5",
+ I("dense_2") + fc_base + "unit = 10",
+ softmax};
+
+IniTestWrapper::Sections fc_sigmoid_mse{
+ nn_base + "Learning_rate=1 | Optimizer=sgd | Loss=mse | batch_size = 3",
+ I("input") + input_base + "input_shape = 1:1:3",
+ I("dense") + fc_base + "unit = 10",
+ I("dense_1") + fc_base + "unit = 10",
+ I("dense_2") + fc_base + "unit = 2",
+ sigmoid};
+
+IniTestWrapper::Sections fc_relu_mse{
+ nn_base + "Learning_rate=0.001 | Optimizer=sgd | Loss=mse | batch_size = 3",
+ I("input") + input_base + "input_shape = 1:1:3",
+ I("dense") + fc_base + "unit = 10",
+ I("act") + relu,
+ I("dense_1") + fc_base + "unit = 10",
+ I("act_1") + relu,
+ I("dense_2") + fc_base + "unit = 2",
+ I("act_2") + relu};
+
+// clang-format off
+INSTANTIATE_TEST_CASE_P(
+ nntrainerModelAutoTests, nntrainerModelTest, ::testing::Values(
+mkModelTc("fc_softmax_mse", fc_softmax_mse, "3:1:1:10", 10),
+mkModelTc("fc_relu_mse", fc_relu_mse, "3:1:1:2", 10)
+/// #if gtest_version <= 1.7.0
+));
+/// #else gtest_version > 1.8.0
+// ), [](const testing::TestParamInfo<nntrainerModelTest::ParamType>& info){
+// return std::get<0>(info.param);
+// });
+/// #end if */
+// clang-format on
+
/**
* @brief Main gtest
*/
try {
result = RUN_ALL_TESTS();
} catch (...) {
- std::cerr << "Error duing RUN_ALL_TSETS()" << std::endl;
+ std::cerr << "Error duing RUN_ALL_TESTS()" << std::endl;
}
return result;