Skip to content

Commit

Permalink
Merge pull request #815 from roboflow/fix/deserialization
Browse files Browse the repository at this point in the history
Fix issue with serialisation of empty outputs
  • Loading branch information
PawelPeczek-Roboflow authored Nov 15, 2024
2 parents 579562b + 66da30d commit bcff389
Show file tree
Hide file tree
Showing 9 changed files with 453 additions and 30 deletions.
25 changes: 5 additions & 20 deletions .github/workflows/test.nvidia_t4.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,61 +34,46 @@ jobs:
id: regression_tests
run: |
MINIMUM_FPS=25 FUNCTIONAL=true PORT=9101 SKIP_LMM_TEST=True API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python3 -m pytest tests/inference/integration_tests/
- name: 🚨 Show server logs on error
run: docker logs inference-test
if: ${{ steps.regression_tests.outcome != 'success' }}
- name: 🧹 Cleanup Test Docker - GPU
run: make stop_test_docker
if: success() || failure()
- name: 🔋 Start Test Docker - GPU
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression CogVLM - GPU
id: cog_vlm_tests
run: |
PORT=9101 API_KEY=${{ secrets.API_KEY }} python3 -m pytest tests/inference/integration_tests/test_cogvlm.py
- name: 🚨 Show server logs on error
run: docker logs inference-test
if: ${{ steps.cog_vlm_tests.outcome != 'success' }}
- name: 🧹 Cleanup Test Docker - GPU
run: make stop_test_docker
if: success() || failure()
- name: 🔋 Start Test Docker - GPU
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Paligemma - GPU
id: paligemma_tests
run: |
PORT=9101 melee_API_KEY=${{ secrets.MELEE_API_KEY }} python3 -m pytest tests/inference/integration_tests/test_paligemma.py
- name: 🚨 Show server logs on error
run: docker logs inference-test
if: ${{ steps.paligemma_tests.outcome != 'success' }}
- name: 🧹 Cleanup Test Docker - GPU
run: make stop_test_docker
if: success() || failure()
- name: 🔋 Start Test Docker - GPU
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Florence - GPU
id: florence_tests
run: |
PORT=9101 melee_API_KEY=${{ secrets.MELEE_API_KEY }} python3 -m pytest tests/inference/integration_tests/test_florence.py
- name: 🚨 Show server logs on error
run: docker logs inference-test
if: ${{ steps.florence_tests.outcome != 'success' }}
- name: 🧹 Cleanup Test Docker - GPU
run: make stop_test_docker
if: success() || failure()
- name: � Start Test Docker - SAM2
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Tests - SAM2
id: sam2_tests
run: |
PORT=9101 API_KEY=${{ secrets.API_KEY }} SKIP_SAM2_TESTS=False python3 -m pytest tests/inference/integration_tests/test_sam2.py
- name: 🚨 Show server logs on error
run: docker logs inference-test
if: ${{ steps.sam2_tests.outcome != 'success' }}
- name: 🧹 Cleanup Test Docker - SAM2
run: make stop_test_docker
if: success() || failure()
- name: 🚨 Show server logs on error
run: |
docker logs inference-test
make stop_test_docker
if: failure()
2 changes: 1 addition & 1 deletion .github/workflows/test.nvidia_t4_parallel_server.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ jobs:
IS_PARALLEL_SERVER=true SKIP_VISUALISATION_TESTS=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python3 -m pytest tests/inference/integration_tests/regression_test.py tests/inference/integration_tests/batch_regression_test.py
- name: 🚨 Show server logs on error
run: docker logs inference-test
if: ${{ steps.regression_tests.outcome != 'success' }}
if: failure()
- name: 🧹 Cleanup Test Docker - Parallel GPU
run: make stop_test_docker
if: success() || failure()
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,8 @@ def serialize_data_piece(
kind: Union[List[Union[Kind, str]], Dict[str, List[Union[Kind, str]]]],
kinds_serializers: Dict[str, Callable[[Any], Any]],
) -> Any:
if data_piece is None:
return None
if isinstance(kind, dict):
if not isinstance(data_piece, dict):
raise AssumptionError(
Expand Down Expand Up @@ -210,6 +212,8 @@ def serialize_single_workflow_result_field(
kind: List[Union[Kind, str]],
kinds_serializers: Dict[str, Callable[[Any], Any]],
) -> Any:
if value is None:
return None
kinds_without_serializer = set()
for single_kind in kind:
kind_name = single_kind.name if isinstance(single_kind, Kind) else kind
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,45 @@ def test_detection_plus_classification_workflow_when_minimal_valid_input_provide
], "Expected predictions to be as measured in reference run"


def test_detection_plus_classification_workflow_when_minimal_valid_input_provided_and_serialization_requested(
model_manager: ModelManager,
dogs_image: np.ndarray,
roboflow_api_key: str,
) -> None:
# given
workflow_init_parameters = {
"workflows_core.model_manager": model_manager,
"workflows_core.api_key": roboflow_api_key,
"workflows_core.step_execution_mode": StepExecutionMode.LOCAL,
}
execution_engine = ExecutionEngine.init(
workflow_definition=DETECTION_PLUS_CLASSIFICATION_WORKFLOW_V2_BLOCKS,
init_parameters=workflow_init_parameters,
max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS,
)

# when
result = execution_engine.run(
runtime_parameters={
"image": dogs_image,
},
serialize_results=True,
)

assert isinstance(result, list), "Expected list to be delivered"
assert len(result) == 1, "Expected 1 element in the output for one input image"
assert set(result[0].keys()) == {
"predictions",
}, "Expected all declared outputs to be delivered"
assert (
len(result[0]["predictions"]) == 2
), "Expected 2 dogs crops on input image, hence 2 nested classification results"
assert [result[0]["predictions"][0]["top"], result[0]["predictions"][1]["top"]] == [
"116.Parson_russell_terrier",
"131.Wirehaired_pointing_griffon",
], "Expected predictions to be as measured in reference run"


def test_detection_plus_classification_workflow_when_nothing_gets_predicted(
model_manager: ModelManager,
crowd_image: np.ndarray,
Expand Down Expand Up @@ -281,3 +320,38 @@ def test_detection_plus_classification_workflow_when_nothing_gets_predicted_and_
assert (
len(result[0]["predictions"]) == 0
), "Expected no prediction from 2nd model, as no dogs detected"


def test_detection_plus_classification_workflow_when_nothing_gets_predicted_and_serialization_requested(
model_manager: ModelManager,
crowd_image: np.ndarray,
roboflow_api_key: str,
) -> None:
# given
workflow_init_parameters = {
"workflows_core.model_manager": model_manager,
"workflows_core.api_key": roboflow_api_key,
"workflows_core.step_execution_mode": StepExecutionMode.LOCAL,
}
execution_engine = ExecutionEngine.init(
workflow_definition=DETECTION_PLUS_CLASSIFICATION_PLUS_CONSENSUS_WORKFLOW,
init_parameters=workflow_init_parameters,
max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS,
)

# when
result = execution_engine.run(
runtime_parameters={
"image": crowd_image,
},
serialize_results=True,
)

assert isinstance(result, list), "Expected list to be delivered"
assert len(result) == 1, "Expected 1 element in the output for one input image"
assert set(result[0].keys()) == {
"predictions",
}, "Expected all declared outputs to be delivered"
assert (
len(result[0]["predictions"]) == 0
), "Expected no prediction from 2nd model, as no dogs detected"
Original file line number Diff line number Diff line change
@@ -1,18 +1,10 @@
import numpy as np
import pytest
import supervision as sv

from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS
from inference.core.managers.base import ModelManager
from inference.core.workflows.core_steps.common.entities import StepExecutionMode
from inference.core.workflows.core_steps.common.query_language.errors import (
EvaluationEngineError,
)
from inference.core.workflows.errors import RuntimeInputError, StepExecutionError
from inference.core.workflows.execution_engine.core import ExecutionEngine
from tests.workflows.integration_tests.execution.workflows_gallery_collector.decorators import (
add_to_workflows_gallery,
)

TOP_PREDICTION_WORKFLOW = {
"version": "1.0",
Expand Down
Loading

0 comments on commit bcff389

Please sign in to comment.