Skip to content
This repository was archived by the owner on Oct 25, 2024. It is now read-only.

Commit 58cd3de

Browse files
committed
update INC version
1 parent 004707a commit 58cd3de

File tree

6 files changed

+9
-6
lines changed

6 files changed

+9
-6
lines changed
Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
1+
git+https://github.com/intel/neural-compressor.git@old_api_examples
12
datasets >= 1.17
23
sentencepiece != 0.1.92
34
protobuf
45
intel-tensorflow == 2.10.0
5-
transformers
6+
transformers>=4.12.0

examples/optimization/tensorflow/huggingface/text-classification/distillation/requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
git+https://github.com/intel/neural-compressor.git@old_api_examples
12
datasets >= 1.17
23
sentencepiece != 0.1.92
34
protobuf

examples/optimization/tensorflow/huggingface/text-classification/pruning/requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
git+https://github.com/intel/neural-compressor.git@old_api_examples
12
datasets >= 1.17
23
sentencepiece != 0.1.92
34
protobuf

intel_extension_for_transformers/backends/neural_engine/test/pytest/test_quant_onnx_execute.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def test_onnx_qlinear_compile(self):
3939
input_1 = np.random.uniform(low=0, high=1, size=[1, 128]).astype('int32')
4040
input_2 = np.random.uniform(low=0, high=1, size=[1, 128]).astype('int32')
4141
# compile and execute qlinear model
42-
qlinear_model_path = "/tf_dataset2/models/nlp_toolkit/qlinear/bert_mini_sst2_qlinear.onnx"
42+
qlinear_model_path = "/home/tensorflow/localfile/nlptoolkit_ut_model/qlinear/bert_mini_sst2_qlinear.onnx"
4343
if is_win():
4444
qlinear_model_path = "D:\\dataset\\nlptoolkit_ut_model\\qlinear\\bert_mini_sst2_qlinear.onnx"
4545
os.environ['GLOG_minloglevel'] = '2'
@@ -49,7 +49,7 @@ def test_onnx_qlinear_compile(self):
4949
qlinear_output_dict = qlinear_model.inference([input_0, input_1, input_2])
5050
qlinear_output = list(qlinear_output_dict.values())[0]
5151
# compile and execute qdq model
52-
qdq_model_path = "/tf_dataset2/models/nlp_toolkit/qlinear/bert_mini_sst2_qdq.onnx"
52+
qdq_model_path = "/home/tensorflow/localfile/nlptoolkit_ut_model/qlinear/bert_mini_sst2_qdq.onnx"
5353
if is_win():
5454
qdq_model_path = "D:\\dataset\\nlptoolkit_ut_model\\qlinear\\bert_mini_sst2_qdq.onnx"
5555
self.assertTrue(os.path.exists(qdq_model_path),

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
git+https://github.com/intel/neural-compressor.git@old_api_examples
1+
neural-compressor==1.14.2
22
numpy
33
packaging
44
protobuf --no-binary=protobuf

tests/requirements.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
11
datasets>=1.17
22
mlflow
3-
neural-compressor
3+
git+https://github.com/intel/neural-compressor.git@old_api_examples
44
nlpaug
55
sacremoses
66
onnx>=1.10
77
onnxruntime>=1.11
88
onnxruntime-extensions
99
--find-links https://download.pytorch.org/whl/torch_stable.html
1010
torch>=1.10.0
11-
transformers
11+
transformers<=4.19.2
1212
tensorflow==2.10.0
1313
torchprofile

0 commit comments

Comments
 (0)