Now, PICO GPT and LLAMA are adding extra_defines meson option in the application side.
However, even if this code is executed during build, this definition is not reflected when actually running the app.
Because the application area is built after the process of reflecting extra_defines to add_project_arguments has already been completed, so adding extra_defines during application build is meaningless.
In addition, it is impossible to call add_project_arguments after build, so the structure to add extra_defines during build process is wrong.
The reason why PICO GPT and LLAMA add extra_defines is that the encoder-related script created now does not run on tizen, so encoder-related option was added to the root meson and the options on the application side were removed.
**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped
Signed-off-by: Seungbaek Hong <sb92.hong@samsung.com>
#include <swiglu.h>
#include <transpose_layer.h>
-#if defined(ENABLE_ENCODER2)
+#if defined(DENABLE_ENCODER)
#include "json.hpp"
#include <codecvt>
#include <encoder.hpp>
unsigned int init_len;
-#if defined(ENABLE_ENCODER2)
+#if defined(DENABLE_ENCODER)
std::string vocab_file_name = "../Applications/LLaMA/jni/vocab.json";
std::string merge_file_name = "../Applications/LLaMA/jni/merges.txt";
std::cout << " Progress Reading: 100 % " << std::endl;
std::cout << std::endl << "### Output : " << std::endl;
if (init_len < INIT_SEQ_LEN) {
-#if defined(ENABLE_ENCODER2)
+#if defined(DENABLE_ENCODER)
auto decoded_str = tokenizer.decode({static_cast<int64_t>(ids)});
std::cout << decoded_str << " ";
std::cout.flush();
input_sample[0] = static_cast<float>(init_input[i]);
} else {
input_sample[0] = static_cast<float>(ids);
-#if defined(ENABLE_ENCODER2)
+#if defined(DENABLE_ENCODER)
auto decoded_str = tokenizer.decode({static_cast<int64_t>(ids)});
std::cout << decoded_str << " ";
std::cout.flush();
g_model->load(weight_path);
}
-#if defined(ENABLE_ENCODER2)
+#if defined(DENABLE_ENCODER)
std::wstring decodeUnicodeEscape(const std::wstring &input) {
std::wstringstream result;
// Setting locale
std::locale::global(std::locale("ko_KR.UTF-8"));
-#if defined(ENABLE_ENCODER2)
+#if defined(DENABLE_ENCODER)
// Getting arguments From terminal
std::wstring input;
std::getline(std::wcin, input);
)
if get_option('platform') != 'tizen'
- extra_defines += '-DENABLE_ENCODER2=1'
run_command(meson.source_root() / 'jni' / 'prepare_encoder.sh', meson.build_root(), '0.2' ,check: true)
endif
#include <string.h>
#include <tensor.h>
-#if defined(ENABLE_ENCODER)
+#if defined(DENABLE_ENCODER)
#include "encoder.hpp"
#endif
// bool optimize = true;
bool optimize_attention = false;
-#if defined(ENABLE_ENCODER)
+#if defined(DENABLE_ENCODER)
template <typename T>
T unwrap(std::optional<T> &&value, const std::string &error_msg) {
if (value.has_value()) {
std::vector<int64_t> init_input;
-#if defined(ENABLE_ENCODER)
+#if defined(DENABLE_ENCODER)
std::string vocab_file_name = "../Applications/PicoGPT/jni/vocab.json";
std::string merge_file_name = "../Applications/PicoGPT/jni/merges.txt";
((uint *)(wpe_input))[0] = i;
-#if defined(ENABLE_ENCODER)
+#if defined(DENABLE_ENCODER)
std::vector<int64_t> token_ids;
for (auto element : ids) {
token_ids.push_back(static_cast<int64_t>(element));
run_command('cp', '-lr', res_path, nntr_pico_gpt_resdir)
if get_option('platform') != 'tizen'
- extra_defines += '-DENABLE_ENCODER=1'
run_command(meson.source_root() / 'jni' / 'prepare_encoder.sh', meson.build_root(), '0.1' , check: true)
endif
if get_option('enable-tizen-feature-check')
add_project_arguments('-D__FEATURE_CHECK_SUPPORT__', language: ['c', 'cpp'])
endif
+elif get_option('platform') == 'android'
+ add_project_arguments('-DENABLE_ENCODER=1', language: ['c', 'cpp'])
endif
warning_flags = [