Skip to content

Commit c615cc9

Browse files
larryliu0820facebook-github-bot
authored andcommitted
Remove old tokenizer/ directory in ExecuTorch (#9728)
Summary: X-link: pytorch-labs/tokenizers#39 Pull Request resolved: #9728 See what happens Differential Revision: D72007597
1 parent 65ebabb commit c615cc9

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+23
-130943
lines changed

.ci/scripts/test_phi_3_mini.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ cmake_build_phi_3_mini() {
5656
prepare_tokenizer() {
5757
echo "Downloading and converting tokenizer.model"
5858
wget -O tokenizer.model "https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/tokenizer.model?download=true"
59-
$PYTHON_EXECUTABLE -m executorch.extension.llm.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
59+
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
6060
}
6161

6262
# Export phi-3-mini model to pte

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -751,7 +751,7 @@ if(EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR)
751751
endif()
752752

753753
if(EXECUTORCH_BUILD_EXTENSION_LLM)
754-
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/tokenizer)
754+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/tokenizers)
755755
endif()
756756

757757
if(EXECUTORCH_BUILD_EXTENSION_MODULE)

examples/models/llama/TARGETS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ runtime.python_library(
202202
":export_library",
203203
"//executorch/examples/models/llama/tokenizer:tiktoken_py",
204204
"//executorch/extension/llm/export:export_lib",
205-
"//executorch/extension/llm/tokenizer:tokenizer_py_lib",
205+
"//pytorch/tokenizers/pytorch_tokenizers:tokenizers",
206206
"//executorch/extension/pybindings:portable_lib",
207207
],
208208
)

examples/models/llama/eval_llama_lib.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,13 @@
1515
from executorch.examples.models.llama.export_llama_lib import (
1616
get_quantizer_and_quant_params,
1717
)
18-
from executorch.examples.models.llama.tokenizer.tiktoken import Tokenizer as Tiktoken
18+
from pytorch_tokenizers.tiktoken import TiktokenTokenizer as Tiktoken
1919

2020
from executorch.extension.llm.export.builder import LLMEdgeManager
21-
from executorch.extension.llm.tokenizer.tokenizer import (
22-
Tokenizer as SentencePieceTokenizer,
21+
from pytorch_tokenizers.llama2c import (
22+
Llama2cTokenizer as SentencePieceTokenizer,
2323
)
24-
from executorch.extension.llm.tokenizer.utils import get_tokenizer
24+
from pytorch_tokenizers import get_tokenizer
2525
from lm_eval.evaluator import simple_evaluate
2626
from torch.nn import CrossEntropyLoss
2727
from tqdm import tqdm

examples/models/llama/evaluate/eager_eval.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,10 @@
88
from typing import Optional, Union
99

1010
import torch
11-
from executorch.examples.models.llama.tokenizer.tiktoken import Tokenizer as Tiktoken
12-
from executorch.extension.llm.tokenizer.tokenizer import (
13-
Tokenizer as SentencePieceTokenizer,
11+
from pytorch_tokenizers.llama2c import (
12+
Llama2cTokenizer as SentencePieceTokenizer,
1413
)
14+
from pytorch_tokenizers.tiktoken import TiktokenTokenizer as Tiktoken
1515

1616
from lm_eval.models.huggingface import HFLM as eval_wrapper
1717

examples/models/llama/runner/generation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
import torch
1212

13-
from executorch.extension.llm.tokenizer.utils import get_tokenizer
13+
from pytorch_tokenizers import get_tokenizer
1414

1515

1616
def sample_top_p(probs, p):

examples/models/llama/tokenizer/targets.bzl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ def define_common_targets():
1616
],
1717
exported_deps = [
1818
"//pytorch/tokenizers:tiktoken",
19-
"//executorch/extension/llm/tokenizer:tiktoken", # TODO: remove
2019
],
2120
visibility = [
2221
"@EXECUTORCH_CLIENTS",

examples/models/llama/tokenizer/test/test_tiktoken.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
#include <vector>
1212

13-
#include <executorch/extension/llm/tokenizer/tiktoken.h>
13+
#include <pytorch/tokenizers/tiktoken.h>
1414

1515
#include <gtest/gtest.h>
1616

@@ -21,9 +21,9 @@
2121
using namespace ::testing;
2222

2323
using ::example::Version;
24-
using ::executorch::extension::llm::Tokenizer;
25-
using ::executorch::runtime::Error;
26-
using ::executorch::runtime::Result;
24+
using ::tokenizers::Tokenizer;
25+
using ::tokenizers::Error;
26+
using ::tokenizers::Result;
2727

2828
static std::string get_resource_path(const std::string& name) {
2929
#ifdef EXECUTORCH_FB_BUCK
@@ -36,7 +36,7 @@ static std::string get_resource_path(const std::string& name) {
3636
class MultimodalTiktokenV5ExtensionTest : public Test {
3737
public:
3838
void SetUp() override {
39-
tokenizer_ = std::make_unique<executorch::extension::llm::Tiktoken>(
39+
tokenizer_ = std::make_unique<tokenizers::Tiktoken>(
4040
example::get_multimodal_special_tokens(), 0, 1);
4141
modelPath_ = get_resource_path("test_tiktoken_tokenizer.model");
4242
}

examples/models/llava/export_llava.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,8 @@
4646
)
4747

4848
from executorch.extension.llm.export.builder import DType, LLMEdgeManager
49-
from executorch.extension.llm.tokenizer.tokenizer import Tokenizer
5049
from executorch.util.activation_memory_profiler import generate_memory_trace
50+
from pytorch_tokenizers.llama2c import Llama2cTokenizer as Tokenizer
5151
from torch.export import Dim
5252
from torch.nn.attention import SDPBackend
5353

examples/qualcomm/oss_scripts/llama/CMakeLists.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
# LICENSE file in the root directory of this source tree.
66

77
# model sharding with custom op
8-
set(CUSTOM_OP_SRCS_FILE
8+
set(CUSTOM_OP_SRCS_FILE
99
"${EXECUTORCH_SOURCE_DIR}/extension/llm/custom_ops/op_fallback.cpp"
1010
)
1111
add_library(custom_ops ${CUSTOM_OP_SRCS_FILE})
@@ -35,7 +35,7 @@ list(
3535
list(
3636
APPEND
3737
_llama_runner__srcs
38-
${CMAKE_CURRENT_SOURCE_DIR}/../../../../extension/llm/tokenizer/tiktoken.cpp
38+
${CMAKE_CURRENT_SOURCE_DIR}/../../../../extension/llm/tokenizers/src/tiktoken.cpp
3939
${CMAKE_CURRENT_SOURCE_DIR}/../../../models/llama/tokenizer/llama_tiktoken.cpp
4040
)
4141

extension/llm/export/TARGETS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,6 @@ runtime.python_library(
4040
"//executorch/exir:lib",
4141
"//executorch/exir/backend:backend_details",
4242
"//executorch/extension/export_util:export_util",
43-
"//executorch/extension/llm/tokenizer:tokenizer_py_lib",
43+
"//pytorch/tokenizers/pytorch_tokenizers:tokenizers",
4444
],
4545
)

extension/llm/export/builder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
from executorch.extension.export_util.utils import export_to_edge, save_pte_program
3636

3737
from executorch.extension.llm.export.export_passes import RemoveRedundantTransposes
38-
from executorch.extension.llm.tokenizer.utils import get_tokenizer
38+
from pytorch_tokenizers import get_tokenizer
3939
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
4040
from torch.ao.quantization.quantizer import Quantizer
4141
from torch.ao.quantization.quantizer.composable_quantizer import ComposableQuantizer

extension/llm/tokenizer/CMakeLists.txt

Lines changed: 0 additions & 62 deletions
This file was deleted.

extension/llm/tokenizer/TARGETS

Lines changed: 0 additions & 8 deletions
This file was deleted.

extension/llm/tokenizer/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)