EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
}
+TEST(nntrainer_LossLayer, forward_nolabel_n) {
+ nntrainer::LossLayer layer;
+ nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
+ EXPECT_THROW(layer.forwarding(MAKE_SHARED_TENSOR(a)), std::runtime_error);
+}
+
+TEST(nntrainer_LossLayer, forward_cost_unknown_n) {
+ nntrainer::LossLayer layer;
+ nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
+ nntrainer::Tensor b = constant(1.0, 1, 1, 1, 1);
+ EXPECT_THROW(layer.forwarding(MAKE_SHARED_TENSOR(a), MAKE_SHARED_TENSOR(b)),
+ std::runtime_error);
+}
+
+TEST(nntrainer_LossLayer, backward_cost_unknown_n) {
+ nntrainer::LossLayer layer;
+ nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
+ EXPECT_THROW(layer.backwarding(MAKE_SHARED_TENSOR(a), 1), std::runtime_error);
+}
+
+TEST(nntrainer_LossLayer, forward_cost_forward_entropy_n) {
+ nntrainer::LossLayer layer;
+ layer.setCost(nntrainer::COST_ENTROPY);
+ nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
+ nntrainer::Tensor b = constant(1.0, 1, 1, 1, 1);
+ EXPECT_THROW(layer.forwarding(MAKE_SHARED_TENSOR(a), MAKE_SHARED_TENSOR(b)),
+ std::runtime_error);
+}
+
+TEST(nntrainer_LossLayer, backward_cost_backward_entropy_n) {
+ nntrainer::LossLayer layer;
+ layer.setCost(nntrainer::COST_ENTROPY);
+ nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
+ EXPECT_THROW(layer.backwarding(MAKE_SHARED_TENSOR(a), 1), std::runtime_error);
+}
+
/**
* @brief Loss Layer
*/
-TEST(nntrainer_LossLayer, setProperty_01_n) {
+TEST(nntrainer_LossLayer, setProperty_through_vector_n) {
int status = ML_ERROR_NONE;
nntrainer::LossLayer layer;
status = layer.setProperty({"loss=cross"});
EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
}
+TEST(nntrainer_LossLayer, init_with_last_false_n) {
+ int status = ML_ERROR_NONE;
+ nntrainer::LossLayer layer;
+ status = layer.initialize(false);
+ EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
+}
+
+TEST(nntrainer_LossLayer, setProperty_individual_n) {
+ nntrainer::LossLayer layer;
+ EXPECT_THROW(
+ layer.setProperty(nntrainer::Layer::PropertyType::input_shape, "1:2:3:4"),
+ nntrainer::exception::not_supported);
+}
+
+TEST(nntrainer_LossLayer, setProperty_individual2_n) {
+ nntrainer::LossLayer layer;
+ EXPECT_THROW(layer.setProperty(nntrainer::Layer::PropertyType::filter, "1:2"),
+ nntrainer::exception::not_supported);
+}
+
+TEST(nntrainer_LossLayer, setProperty_individual3_n) {
+ nntrainer::LossLayer layer;
+ EXPECT_THROW(layer.setProperty(nntrainer::Layer::PropertyType::input_shape,
+ "invalid_string"),
+ nntrainer::exception::not_supported);
+}
+
+TEST(nntrainer_LossLayer, setProperty_individual4_n) {
+ nntrainer::LossLayer layer;
+ EXPECT_THROW(
+ layer.setProperty(nntrainer::Layer::PropertyType::filter, "invalid_string"),
+ nntrainer::exception::not_supported);
+}
+
TEST(nntrainer_ActivationLayer, init_01_n) {
nntrainer::ActivationLayer layer;
EXPECT_THROW(layer.initialize(false), std::invalid_argument);
/**
* @brief Negative test given ini is failing at loadingTwice
*/
-TEST_P(nntrainerIniTest, loadConfigTwice) {
+TEST_P(nntrainerIniTest, loadConfigTwice_n) {
std::cout << std::get<0>(GetParam()) << std::endl;
NN.loadFromConfig();
int status = NN.loadFromConfig();
* @brief check given ini is failing/succeeding when init happens three times.
* this should fail at all time.
*/
-TEST_P(nntrainerIniTest, initThreetime) {
+TEST_P(nntrainerIniTest, initThreetime_n) {
std::cout << std::get<0>(GetParam()) << std::endl;
int status = NN.loadFromConfig();
status = NN.init();
}
/// @todo add run test could be added with iniTest flag to control skip
-
static IniSection nw_base("network", "Type = NeuralNetwork | "
"minibatch = 32 | "
"epsilon = 1e-7 | "
nntrainer::Tensor test(batch - 1, height - 1, width - 1);
- ASSERT_EXCEPTION({ input.multiply(test); }, std::runtime_error,
- "Error: Dimension must be equal each other");
+ EXPECT_THROW({ input.multiply(test); }, std::runtime_error);
}
TEST(nntrainer_Tensor, multiply_float_01_p) {
nntrainer::Tensor input(batch, channel, height, width);
GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
- ASSERT_EXCEPTION({ input.divide(0.0); }, std::runtime_error,
- "Error: Divide by zero");
+ EXPECT_THROW({ input.divide(0.0); }, std::runtime_error);
}
TEST(nntrainer_Tensor, divide_03_n) {
nntrainer::Tensor test(batch - 1, channel, height - 1, width - 1);
- ASSERT_EXCEPTION({ input.divide(test); }, std::runtime_error,
- "Error: Dimension must be equal each other");
+ EXPECT_THROW({ input.divide(test); }, std::runtime_error);
}
TEST(nntrainer_Tensor, add_i_01_p) {
nntrainer::Tensor test(batch - 1, channel, height - 1, width - 1);
- ASSERT_EXCEPTION({ input.add(test); }, std::runtime_error,
- "Error: Dimension must be equal each other");
+ EXPECT_THROW({ input.add(test); }, std::runtime_error);
}
TEST(nntrainer_Tensor, subtract_i_01_p) {
nntrainer::Tensor test(batch - 1, channel, height - 1, width - 1);
- ASSERT_EXCEPTION({ input.subtract(test); }, std::runtime_error,
- "Error: Dimension must be equal each other");
+ EXPECT_THROW({ input.subtract(test); }, std::runtime_error);
}
TEST(nntrainer_Tensor, subtract_float_01_p) {
nntrainer::Tensor input(batch, channel, height, width);
GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
- ASSERT_EXCEPTION({ input.sum(4); }, std::out_of_range,
- "Error: Cannot exceede 3");
+ EXPECT_THROW({ input.sum(4); }, std::out_of_range);
}
TEST(nntrainer_Tensor, sum_02_p) {
}
/// #412
-TEST(nntrainer_Tensor, copy_and_resize_n) {
+TEST(nntrainer_Tensor, DISABLED_copy_and_resize_n) {
nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6);
nntrainer::Tensor B = A;
nntrainer::Tensor C = A.clone();
/// this is undefined behavior
B.setDim(nntrainer::TensorDim(9, 9, 9, 9));
- /// @todo add appropriate test.
+ /// @todo replace this to appropriate test;
+ EXPECT_TRUE(true);
}
/// @note this test case demonstrates it is dangeruous to use sharedConstTensor
#include <gtest/gtest.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
+#include <nntrainer_logger.h>
#include <nntrainer_test_util.h>
+#include <parse_util.h>
#include <util_func.h>
TEST(nntrainer_util_func, sqrtFloat_01_p) {
}
}
+TEST(nntrainer_parse_util, throw_status_no_error_p) {
+ EXPECT_NO_THROW(nntrainer::throw_status(ML_ERROR_NONE));
+}
+
+TEST(nntrainer_parse_util, throw_status_invalid_argument_n) {
+ EXPECT_THROW(nntrainer::throw_status(ML_ERROR_INVALID_PARAMETER),
+ std::invalid_argument);
+}
+
+TEST(nntrainer_parse_util, throw_status_out_of_memory_n) {
+ EXPECT_THROW(nntrainer::throw_status(ML_ERROR_OUT_OF_MEMORY), std::bad_alloc);
+}
+
+TEST(nntrainer_parse_util, throw_status_timed_out_n) {
+ EXPECT_THROW(nntrainer::throw_status(ML_ERROR_TIMED_OUT), std::runtime_error);
+}
+
+TEST(nntrainer_parse_util, throw_status_permission_denied_n) {
+ EXPECT_THROW(nntrainer::throw_status(ML_ERROR_PERMISSION_DENIED),
+ std::runtime_error);
+}
+
+TEST(nntrainer_parse_util, throw_status_unknown_error_n) {
+ EXPECT_THROW(nntrainer::throw_status(ML_ERROR_UNKNOWN), std::runtime_error);
+}
+
+TEST(nntrainer_parse_util, throw_status_default_n) {
+ EXPECT_THROW(nntrainer::throw_status(-12345), std::runtime_error);
+}
+
/**
* @brief Main gtest
*/