From bc974ea69dcf7e54e8bbd054d6a103a185c59979 Mon Sep 17 00:00:00 2001 From: "jijoong.moon" Date: Fri, 15 Nov 2019 16:58:37 +0900 Subject: [PATCH] Add Another Fullyconnected Layer Add Fully Connnected Layer : ( Two FC ) Resolves: **Self evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: jijoong.moon --- Training/jni/bitmap_helpers.cpp | 4 +-- Training/jni/bitmap_helpers.h | 3 ++- Training/jni/main.cpp | 25 ++++++++--------- Training/jni/matrix.cpp | 12 +++++++++ Training/jni/matrix.h | 3 +++ Training/jni/neuralnet.cpp | 60 ++++++++++++++++++++++++++++++++++++----- Training/jni/neuralnet.h | 4 ++- 7 files changed, 88 insertions(+), 23 deletions(-) diff --git a/Training/jni/bitmap_helpers.cpp b/Training/jni/bitmap_helpers.cpp index fb83e9a..86dcb06 100644 --- a/Training/jni/bitmap_helpers.cpp +++ b/Training/jni/bitmap_helpers.cpp @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include + #include #include #include -#include + #include // NOLINT(build/include_order) diff --git a/Training/jni/bitmap_helpers.h b/Training/jni/bitmap_helpers.h index f7cff1f..d33a914 100644 --- a/Training/jni/bitmap_helpers.h +++ b/Training/jni/bitmap_helpers.h @@ -15,7 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_CONTRIB_LITE_EXAMPLES_LABEL_IMAGE_BITMAP_HELPERS_H_ #define TENSORFLOW_CONTRIB_LITE_EXAMPLES_LABEL_IMAGE_BITMAP_HELPERS_H_ - +#include +#include namespace tflite { namespace label_image diff --git a/Training/jni/main.cpp b/Training/jni/main.cpp index 0e13400..0e8e014 100644 --- a/Training/jni/main.cpp +++ b/Training/jni/main.cpp @@ -20,7 +20,7 @@ using namespace std; -string data_path = "/sdcard/Transfer-Learning/"; +string data_path; double stepFunction(double x) { if (x > 0.9) { @@ -35,8 +35,6 @@ double stepFunction(double x) { } void getFeature(const string filename, vector &feature_input) { - int tensor_size; - int node_size; int input_size; int output_size; int *output_idx_list; @@ -45,18 +43,16 @@ void getFeature(const string filename, vector &feature_input) { int outputDim[4]; int input_idx_list_len = 0; int output_idx_list_len = 0; - + std::string model_path = data_path+"ssd_mobilenet_v2_coco_feature.tflite"; std::unique_ptr model = tflite::FlatBufferModel::BuildFromFile( - "/sdcard/Transfer-Learning/ssd_mobilenet_v2_coco_feature.tflite"); + model_path.c_str()); assert(model != NULL); tflite::ops::builtin::BuiltinOpResolver resolver; std::unique_ptr interpreter; tflite::InterpreterBuilder (*model.get(), resolver)(&interpreter); - tensor_size = interpreter->tensors_size(); - node_size = interpreter->nodes_size(); input_size = interpreter->inputs().size(); output_size = interpreter->outputs().size(); @@ -129,7 +125,7 @@ void getFeature(const string filename, vector &feature_input) { delete[] output_idx_list; } -void ExtractFeatures(const char *path, vector> &feature_input, +void ExtractFeatures(std::string p, vector> &feature_input, vector> &feature_output) { string total_label[TOTAL_LABEL_SIZE] = {"happy", "sad", "soso"}; @@ -141,7 +137,7 @@ void ExtractFeatures(const char *path, vector> &feature_input, int count = 0; for (int i = 0; i < TOTAL_LABEL_SIZE; i++) { - std::string path = data_path; + std::string path = p; path += total_label[i]; for (int j = 0; j < TOTAL_DATA_SIZE; j++) { @@ -160,11 +156,13 @@ void ExtractFeatures(const char *path, vector> &feature_input, } int main(int argc, char *argv[]) { - + const vector args(argv+1, argv+argc); + data_path = args[0]; + std::string ini_file=data_path+"ini.bin"; srand(time(NULL)); std::vector> inputVector, outputVector; - ExtractFeatures("/sdcard/Transfer-Learning/", inputVector, outputVector); + ExtractFeatures(data_path, inputVector, outputVector); Network::NeuralNetwork NN; Network::NeuralNetwork NN2; @@ -172,8 +170,11 @@ int main(int argc, char *argv[]) { NN.init(128, 20, TOTAL_LABEL_SIZE, 0.7); NN2.init(128, 20, TOTAL_LABEL_SIZE, 0.7); + // NN.saveModel(ini_file); + NN.readModel(ini_file); + for (int i = 0; i < ITERATION; i++) { - for (int j = 0; j < inputVector.size(); j++) { + for (unsigned int j = 0; j < inputVector.size(); j++) { NN.forwarding(inputVector[j]); NN.backwarding(outputVector[j]); } diff --git a/Training/jni/matrix.cpp b/Training/jni/matrix.cpp index 9db4ead..1439347 100644 --- a/Training/jni/matrix.cpp +++ b/Training/jni/matrix.cpp @@ -180,3 +180,15 @@ Matrix &Matrix::copy(const Matrix &from) { } return *this; } + +void Matrix::read(std::ifstream &file){ + for (int i=0;i +#include #include class Matrix { @@ -23,6 +24,8 @@ public: Matrix applyFunction(double (*function)(double)) const; void print(std::ostream &flux) const; + void save(std::ofstream &file); + void read(std::ifstream &file); Matrix ©(Matrix const &from); diff --git a/Training/jni/neuralnet.cpp b/Training/jni/neuralnet.cpp index 85abcf3..220711d 100644 --- a/Training/jni/neuralnet.cpp +++ b/Training/jni/neuralnet.cpp @@ -17,20 +17,30 @@ void NeuralNetwork::init(int input, int hidden, int output, double rate) { learning_rate = rate; loss = 0.0; W1 = Matrix(inputNeuron, hiddenNeuron); - W2 = Matrix(hiddenNeuron, outputNeuron); + // W2 = Matrix(hiddenNeuron, outputNeuron); + W2 = Matrix(hiddenNeuron, hiddenNeuron); + W3 = Matrix(hiddenNeuron, outputNeuron); B1 = Matrix(1, hiddenNeuron); - B2 = Matrix(1, outputNeuron); + B2 = Matrix(1, hiddenNeuron); + B3 = Matrix(1, outputNeuron); W1 = W1.applyFunction(random); W2 = W2.applyFunction(random); + W3 = W3.applyFunction(random); B1 = B1.applyFunction(random); B2 = B2.applyFunction(random); + B3 = B3.applyFunction(random); } Matrix NeuralNetwork::forwarding(std::vector input) { X = Matrix({input}); - H = X.dot(W1).add(B1).applyFunction(sigmoid); - Y = H.dot(W2).add(B2).applyFunction(sigmoid); + // H = X.dot(W1).add(B1).applyFunction(sigmoid); + // Y = H.dot(W2).add(B2).applyFunction(sigmoid); + + H1 = X.dot(W1).add(B1).applyFunction(sigmoid); + H2 = H1.dot(W2).add(B2).applyFunction(sigmoid); + Y = H2.dot(W3).add(B3).applyFunction(sigmoid); + return Y; } @@ -40,17 +50,30 @@ void NeuralNetwork::backwarding(std::vector expectedOutput) { if (l > loss) loss = l; Y2 = Matrix({expectedOutput}); - dJdB2 = - Y.subtract(Y2).multiply(H.dot(W2).add(B2).applyFunction(sigmoidePrime)); + + dJdB3 = + Y.subtract(Y2).multiply(H2.dot(W3).add(B3).applyFunction(sigmoidePrime)); + dJdB2 =dJdB3.dot(W3.transpose()) + .multiply(H1.dot(W2).add(B2).applyFunction(sigmoidePrime)); dJdB1 = dJdB2.dot(W2.transpose()) .multiply(X.dot(W1).add(B1).applyFunction(sigmoidePrime)); - dJdW2 = H.transpose().dot(dJdB2); + dJdW3 = H2.transpose().dot(dJdB3); + dJdW2 = H1.transpose().dot(dJdB2); dJdW1 = X.transpose().dot(dJdB1); + + // dJdB2 = + // Y.subtract(Y2).multiply(H.dot(W2).add(B2).applyFunction(sigmoidePrime)); + // dJdB1 = dJdB2.dot(W2.transpose()) + // .multiply(X.dot(W1).add(B1).applyFunction(sigmoidePrime)); + // dJdW2 = H.transpose().dot(dJdB2); + // dJdW1 = X.transpose().dot(dJdB1); W1 = W1.subtract(dJdW1.multiply(learning_rate)); W2 = W2.subtract(dJdW2.multiply(learning_rate)); + W3 = W3.subtract(dJdW3.multiply(learning_rate)); B1 = B1.subtract(dJdB1.multiply(learning_rate)); B2 = B2.subtract(dJdB2.multiply(learning_rate)); + B3 = B3.subtract(dJdB3.multiply(learning_rate)); } double NeuralNetwork::getLoss() { return loss; } @@ -71,4 +94,27 @@ NeuralNetwork &NeuralNetwork::copy(NeuralNetwork const &from) { } return *this; } +void NeuralNetwork::saveModel(std::string model_path) { + std::ofstream modelFile(model_path, std::ios::out | std::ios::binary); + W1.save(modelFile); + W2.save(modelFile); + W3.save(modelFile); + B1.save(modelFile); + B2.save(modelFile); + B3.save(modelFile); + modelFile.close(); } + +void NeuralNetwork::readModel(std::string model_path) { + std::ifstream modelFile(model_path, std::ios::in | std::ios::binary); + W1.read(modelFile); + W2.read(modelFile); + W3.read(modelFile); + B1.read(modelFile); + B2.read(modelFile); + B3.read(modelFile); + modelFile.close(); +} + +} + diff --git a/Training/jni/neuralnet.h b/Training/jni/neuralnet.h index b0342f7..172c0a0 100644 --- a/Training/jni/neuralnet.h +++ b/Training/jni/neuralnet.h @@ -17,11 +17,13 @@ public: void init(int input, int hidden, int output, double rate); Matrix forwarding(std::vector input); void backwarding(std::vector expectedOutput); + void saveModel(std::string model_path); + void readModel(std::string model_path); NeuralNetwork ©(NeuralNetwork const &from); private: - Matrix X, W1, H, W2, Y, B1, B2, Y2, dJdB1, dJdB2, dJdW1, dJdW2; + Matrix X, W1, H, W2, Y, B1, B2, Y2, dJdB1, dJdB2, dJdW1, dJdW2, W3, B3, dJdB3, dJdW3, H1, H2; int inputNeuron; int outputNeuron; -- 2.7.4