diff --git a/roboflow/__init__.py b/roboflow/__init__.py index 9e6996cd..c5405e5b 100644 --- a/roboflow/__init__.py +++ b/roboflow/__init__.py @@ -15,7 +15,7 @@ from roboflow.models import CLIPModel, GazeModel # noqa: F401 from roboflow.util.general import write_line -__version__ = "1.2.7" +__version__ = "1.2.8" def check_key(api_key, model, notebook, num_retries=0): diff --git a/roboflow/core/workspace.py b/roboflow/core/workspace.py index ec73e910..a1553e54 100644 --- a/roboflow/core/workspace.py +++ b/roboflow/core/workspace.py @@ -287,6 +287,7 @@ def upload_dataset( project_type: str = "object-detection", batch_name=None, num_retries=0, + is_prediction=False, ): """ Upload a dataset to Roboflow. @@ -298,6 +299,9 @@ def upload_dataset( dataset_format (str): format of the dataset (`voc`, `yolov8`, `yolov5`) project_license (str): license of the project (set to `private` for private projects, only available for paid customers) project_type (str): type of the project (only `object-detection` is supported) + batch_name (str, optional): name of the batch to upload the images to. Defaults to an automatically generated value. + num_retries (int, optional): number of times to retry uploading an image if the upload fails. Defaults to 0. + is_prediction (bool, optional): whether the annotations provided in the dataset are predictions and not ground truth. Defaults to False. """ # noqa: E501 // docs if dataset_format != "NOT_USED": print("Warning: parameter 'dataset_format' is deprecated and will be removed in a future release") @@ -352,6 +356,7 @@ def _upload_image(imagedesc): sequence_number=imagedesc.get("index"), sequence_size=len(images), num_retry_uploads=num_retries, + is_prediction=is_prediction, ) return image, upload_time, upload_retry_attempts diff --git a/tests/test_project.py b/tests/test_project.py index 068cc974..eae40b5c 100644 --- a/tests/test_project.py +++ b/tests/test_project.py @@ -373,6 +373,50 @@ def test_project_upload_dataset(self): "params": {}, "assertions": {"save_annotation": {"count": 1}}, }, + { + "name": "with_predictions_flag_true", + "dataset": [ + {"file": "pred1.jpg", "split": "train", "annotationfile": {"file": "pred1.xml"}}, + {"file": "pred2.jpg", "split": "valid", "annotationfile": {"file": "pred2.xml"}}, + ], + "params": {"is_prediction": True}, + "assertions": { + "upload": {"count": 2, "kwargs": {"is_prediction": True}}, + "save_annotation": {"count": 2}, + }, + }, + { + "name": "with_predictions_flag_false", + "dataset": [ + {"file": "gt1.jpg", "split": "train", "annotationfile": {"file": "gt1.xml"}}, + ], + "params": {"is_prediction": False}, + "assertions": { + "upload": {"count": 1, "kwargs": {"is_prediction": False}}, + "save_annotation": {"count": 1}, + }, + }, + { + "name": "predictions_with_batch", + "dataset": [ + {"file": "batch_pred.jpg", "split": "train", "annotationfile": {"file": "batch_pred.xml"}}, + ], + "params": { + "is_prediction": True, + "batch_name": "prediction-batch", + "num_retries": 2, + }, + "assertions": { + "upload": { + "count": 1, + "kwargs": { + "is_prediction": True, + "batch_name": "prediction-batch", + "num_retry_uploads": 2, + }, + }, + }, + }, ] error_cases = [