File tree 5 files changed +21
-2
lines changed
examples/models/llama/runner
5 files changed +21
-2
lines changed Original file line number Diff line number Diff line change @@ -77,10 +77,19 @@ add_subdirectory(
77
77
${EXECUTORCH_ROOT} /extension/llm/tokenizers/third-party/re2
78
78
${CMAKE_CURRENT_BINARY_DIR} /re2
79
79
)
80
+ add_subdirectory (
81
+ ${EXECUTORCH_ROOT} /extension/llm/tokenizers/third-party/json
82
+ ${CMAKE_CURRENT_BINARY_DIR} /json
83
+ )
84
+ target_include_directories (llama_runner
85
+ PRIVATE ${CMAKE_INSTALL_PREFIX} /include
86
+ ${EXECUTORCH_ROOT} /extension/llm/tokenizers/third-party/llama.cpp-unicode/include
87
+ ${EXECUTORCH_ROOT} /extension/llm/tokenizers/third-party/llama.cpp-unicode/src
88
+ )
80
89
set (CMAKE_POSITION_INDEPENDENT_CODE ${_pic_flag} )
81
90
82
91
set (llama_runner_deps executorch extension_data_loader extension_module
83
- extension_tensor re2::re2
92
+ extension_tensor re2::re2 nlohmann_json::nlohmann_json
84
93
)
85
94
86
95
target_link_libraries (llama_runner PUBLIC ${llama_runner_deps} )
Original file line number Diff line number Diff line change @@ -80,8 +80,10 @@ Error Runner::load() {
80
80
tokenizer_ = nullptr ;
81
81
// Check if tokenizer_path_ ends with ".json".
82
82
if (tokenizer_path_.size () >= 5 &&
83
+
83
84
tokenizer_path_.compare (tokenizer_path_.size () - 5 , 5 , " .json" ) == 0 ) {
84
85
tokenizer_ = std::make_unique<tokenizers::HFTokenizer>();
86
+ ET_LOG (Info, " Loading json tokenizer" );
85
87
tokenizer_->load (tokenizer_path_);
86
88
ET_LOG (
87
89
Info, " Loaded tokenizer %s as HF tokenizer" , tokenizer_path_.c_str ());
Original file line number Diff line number Diff line change @@ -49,7 +49,7 @@ def define_common_targets():
49
49
"//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix ,
50
50
"//executorch/examples/models/llama/tokenizer:tiktoken" ,
51
51
"//pytorch/tokenizers:llama2c_tokenizer" ,
52
- "//pytorch/tokenizers:hf_tokenizer" ,
52
+ "//pytorch/tokenizers:hf_tokenizer" ,
53
53
] + (_get_operator_lib (aten )) + ([
54
54
# Vulkan API currently cannot build on some platforms (e.g. Apple, FBCODE)
55
55
# Therefore enable it explicitly for now to avoid failing tests
Original file line number Diff line number Diff line change @@ -49,6 +49,13 @@ set(runner_deps executorch extension_data_loader extension_module
49
49
50
50
target_link_libraries (extension_llm_runner PUBLIC ${runner_deps} )
51
51
52
+ target_include_directories (
53
+ extension_llm_runner
54
+ PUBLIC
55
+ ${EXECUTORCH_ROOT} /extension/llm/tokenizers/third-party/llama.cpp-unicode/include
56
+ ${EXECUTORCH_ROOT} /extension/llm/tokenizers/third-party/llama.cpp-unicode/src
57
+ )
58
+
52
59
target_include_directories (
53
60
extension_llm_runner INTERFACE ${_common_include_directories}
54
61
${EXECUTORCH_ROOT} /extension/llm/tokenizers/include
Original file line number Diff line number Diff line change @@ -28,6 +28,7 @@ add_subdirectory(
28
28
${CMAKE_CURRENT_SOURCE_DIR} /../tokenizers/third-party/re2
29
29
${CMAKE_CURRENT_BINARY_DIR} /re2
30
30
)
31
+
31
32
set (CMAKE_POSITION_INDEPENDENT_CODE ${_pic_flag} )
32
33
33
34
list (TRANSFORM _extension_llm_tokenizer__srcs PREPEND "${EXECUTORCH_ROOT} /" )
You can’t perform that action at this time.
0 commit comments