Skip to content
This repository was archived by the owner on Jun 3, 2025. It is now read-only.

Commit ee0be47

Browse files
authored
Bump pydantic support to v2 (#1645)
* update setup.py * changes from bump-pydantic * Style and some more changes * update test client usage with fastapi update * fix tests/test_pipeline_benchmark.py * pin fastapi version * Use pydantic v1 for Numpy Schemas * Fix tests/deepsparse/pipelines/test_bucketing.py * Deprecate haystack * Remove launch.json
1 parent 3408ec8 commit ee0be47

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

52 files changed

+133
-1736
lines changed

.github/workflows/test-check.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,6 @@ jobs:
9797
- name: "Clean sparsezoo directory"
9898
run: rm -r sparsezoo/
9999
- name: ⚙️ Install dependencies
100-
run: pip install .[dev,haystack]
100+
run: pip install .[dev]
101101
- name: Run integrations tests
102102
run: make test_integrations

MANIFEST.in

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
include LICENSE
22
include utils/artifacts.py
3-
include src/deepsparse/transformers/haystack/haystack_reqs.txt
43
recursive-include src/deepsparse/avx2 *
54
recursive-include src/deepsparse/avx512 *
65
recursive-include src/deepsparse/neon *

examples/vit_pose/schemas.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
from typing import List
1616

1717
import numpy
18-
from pydantic import BaseModel
18+
from pydantic import BaseModel, ConfigDict
1919

2020
from deepsparse.pipelines.computer_vision import ComputerVisionSchema
2121

@@ -32,6 +32,4 @@ class VitPoseInput(ComputerVisionSchema):
3232

3333
class VitPoseOutput(BaseModel):
3434
out: List[numpy.ndarray]
35-
36-
class Config:
37-
arbitrary_types_allowed = True
35+
model_config = ConfigDict(arbitrary_types_allowed=True)

integrations/haystack/README.md

Lines changed: 0 additions & 307 deletions
This file was deleted.

integrations/haystack/tests/test_smoke.py

Lines changed: 0 additions & 111 deletions
This file was deleted.

integrations/test_placeholder.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
2+
def test_placeholder():
3+
"""
4+
Needed to make the test suite run and not throw
5+
an error about no tests being found when
6+
`make test_integrations` is used.
7+
8+
The error would look like this:
9+
make: *** [Makefile:61: test_integrations] Error 5
10+
11+
More information can be found here:
12+
https://github.com/pytest-dev/pytest/issues/2393
13+
"""
14+
pass

setup.py

Lines changed: 2 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -77,17 +77,10 @@
7777
]
7878

7979

80-
def _parse_requirements_file(file_path):
81-
with open(file_path, "r") as requirements_file:
82-
lines = requirements_file.read().splitlines()
83-
84-
return [line for line in lines if len(line) > 0 and line[0] != "#"]
85-
86-
8780
_deps = [
8881
"numpy>=1.16.3",
8982
"onnx>=1.5.0,<1.15.0",
90-
"pydantic>=1.8.2,<2.0.0",
83+
"pydantic>=2.0.0,<2.8.0",
9184
"requests>=2.0.0",
9285
"tqdm>=4.0.0",
9386
"protobuf>=3.12.2",
@@ -122,7 +115,7 @@ def _parse_requirements_file(file_path):
122115
]
123116
_server_deps = [
124117
"uvicorn>=0.15.0",
125-
"fastapi>=0.70.0,<0.87.0",
118+
"fastapi>=0.100.0,<0.111",
126119
"requests>=2.26.0",
127120
"python-multipart>=0.0.5",
128121
"prometheus-client>=0.14.1",
@@ -153,17 +146,6 @@ def _parse_requirements_file(file_path):
153146
]
154147
_sentence_transformers_integration_deps = ["optimum-deepsparse"] + _torch_deps
155148

156-
# haystack dependencies are installed from a requirements file to avoid
157-
# conflicting versions with NM's deepsparse/transformers
158-
_haystack_requirements_file_path = os.path.join(
159-
os.path.dirname(os.path.realpath(__file__)),
160-
"src",
161-
"deepsparse",
162-
"transformers",
163-
"haystack",
164-
"haystack_reqs.txt",
165-
)
166-
_haystack_integration_deps = _parse_requirements_file(_haystack_requirements_file_path)
167149
_clip_deps = [
168150
"open_clip_torch==2.20.0",
169151
"transformers<4.40",
@@ -270,7 +252,6 @@ def _setup_extras() -> Dict:
270252
"image_classification": _computer_vision_deps,
271253
"yolo": _computer_vision_deps,
272254
"yolov5": _computer_vision_deps,
273-
"haystack": _haystack_integration_deps,
274255
"openpifpaf": _openpifpaf_integration_deps,
275256
"yolov8": _yolov8_integration_deps,
276257
"transformers": _transformers_integration_deps,

src/deepsparse/benchmark/data_creation.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
import random
1818
import string
1919
from os import path
20-
from typing import Dict, List, Tuple
20+
from typing import Dict, List, Tuple, get_args
2121

2222
import numpy
2323

@@ -58,15 +58,11 @@ def get_input_schema_type(pipeline: Pipeline) -> str:
5858
if SchemaType.TEXT_SEQ in input_schema_requirements:
5959
if input_schema_fields.get(SchemaType.TEXT_SEQ).alias == SchemaType.TEXT_PROMPT:
6060
return SchemaType.TEXT_PROMPT
61-
sequence_types = [
62-
f.outer_type_ for f in input_schema_fields[SchemaType.TEXT_SEQ].sub_fields
63-
]
61+
sequence_types = get_args(input_schema_fields[SchemaType.TEXT_SEQ].annotation)
6462
if List[str] in sequence_types:
6563
return SchemaType.TEXT_SEQ
6664
elif SchemaType.TEXT_INPUT in input_schema_requirements:
67-
sequence_types = [
68-
f.outer_type_ for f in input_schema_fields[SchemaType.TEXT_INPUT].sub_fields
69-
]
65+
sequence_types = get_args(input_schema_fields[SchemaType.TEXT_INPUT].annotation)
7066
if List[str] in sequence_types:
7167
return SchemaType.TEXT_INPUT
7268
elif SchemaType.QUESTION in input_schema_requirements:

src/deepsparse/clip/decoder_pipeline.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,10 @@ class CLIPDecoderInput(BaseModel):
3030
"""
3131

3232
text_embeddings: Any = Field(
33-
description="np.array of text emebddings from the " "text branch"
33+
None, description="np.array of text emebddings from the " "text branch"
3434
)
3535
image_embeddings: Any = Field(
36-
description="np.array of image embeddings from the " "visual branch"
36+
None, description="np.array of image embeddings from the " "visual branch"
3737
)
3838

3939

src/deepsparse/evaluation/results.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -36,15 +36,15 @@ class Metric(BaseModel):
3636

3737

3838
class Dataset(BaseModel):
39-
type: Optional[str] = Field(description="Type of dataset")
39+
type: Optional[str] = Field(None, description="Type of dataset")
4040
name: str = Field(description="Name of the dataset")
41-
config: Any = Field(description="Configuration for the dataset")
42-
split: Optional[str] = Field(description="Split of the dataset")
41+
config: Any = Field(None, description="Configuration for the dataset")
42+
split: Optional[str] = Field(None, description="Split of the dataset")
4343

4444

4545
class EvalSample(BaseModel):
46-
input: Any = Field(description="Sample input to the model")
47-
output: Any = Field(description="Sample output from the model")
46+
input: Any = Field(None, description="Sample input to the model")
47+
output: Any = Field(None, description="Sample output from the model")
4848

4949

5050
class Evaluation(BaseModel):
@@ -55,7 +55,7 @@ class Evaluation(BaseModel):
5555
dataset: Dataset = Field(description="Dataset that the evaluation was performed on")
5656
metrics: List[Metric] = Field(description="List of metrics for the evaluation")
5757
samples: Optional[List[EvalSample]] = Field(
58-
description="List of samples for the evaluation"
58+
None, description="List of samples for the evaluation"
5959
)
6060

6161

@@ -64,8 +64,9 @@ class Result(BaseModel):
6464
description="Evaluation result represented in the unified, structured format"
6565
)
6666
raw: Any = Field(
67+
None,
6768
description="Evaluation result represented in the raw format "
68-
"(characteristic for the specific evaluation integration)"
69+
"(characteristic for the specific evaluation integration)",
6970
)
7071

7172

@@ -97,7 +98,7 @@ def _save_to_json(result: Result, save_path: str):
9798

9899

99100
def _save_to_yaml(result: Result, save_path: str):
100-
_save(yaml.dump(result.dict()), save_path, expected_ext=".yaml")
101+
_save(yaml.dump(result.model_dump()), save_path, expected_ext=".yaml")
101102

102103

103104
def _save(data: str, save_path: str, expected_ext: str):

0 commit comments

Comments
 (0)