diff --git a/.github/workflows/run-end-to-end.yml b/.github/workflows/run-end-to-end.yml index 1b524f26b56..da704bf6536 100644 --- a/.github/workflows/run-end-to-end.yml +++ b/.github/workflows/run-end-to-end.yml @@ -163,6 +163,9 @@ jobs: env: SYSTEM_TEST_BUILD_ATTEMPTS: 3 + - name: Run INTEGRATION_FRAMEWORKS scenario + if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"INTEGRATION_FRAMEWORKS"') + run: ./run.sh INTEGRATION_FRAMEWORKS -L ${{ inputs.library }} --weblog ${{ inputs.weblog }} - name: Run APPSEC_STANDALONE scenario if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"APPSEC_STANDALONE"') run: ./run.sh APPSEC_STANDALONE diff --git a/conftest.py b/conftest.py index 9d4726b641b..9d1cfb5dd39 100644 --- a/conftest.py +++ b/conftest.py @@ -127,6 +127,15 @@ def pytest_addoption(parser: pytest.Parser) -> None: help="An file containing a valid Github token to perform API calls", ) + # Integration frameworks scenario options + parser.addoption( + "--weblog", + type=str, + action="store", + default=None, + help="Framework to test (e.g. 'openai@2.0.0' for INTEGRATION_FRAMEWORKS scenario)", + ) + # report data to feature parity dashboard parser.addoption( "--report-run-url", type=str, action="store", default=None, help="URI of the run who produced the report" diff --git a/tests/integration_frameworks/__init__.py b/tests/integration_frameworks/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration_frameworks/conftest.py b/tests/integration_frameworks/conftest.py new file mode 100644 index 00000000000..76a7209eabd --- /dev/null +++ b/tests/integration_frameworks/conftest.py @@ -0,0 +1,57 @@ +from collections.abc import Generator + +import pytest + +from utils.docker_fixtures import ( + FrameworkTestClientApi, + TestAgentAPI, +) +from utils import context, scenarios, logger + + +@pytest.fixture +def test_id(request: pytest.FixtureRequest) -> str: + import uuid + + result = str(uuid.uuid4())[0:6] + logger.info(f"Test {request.node.nodeid} ID: {result}") + return result + + +@pytest.fixture +def library_env() -> dict[str, str]: + return {} + + +@pytest.fixture +def test_agent( + test_id: str, + worker_id: str, + request: pytest.FixtureRequest, +) -> Generator[TestAgentAPI, None, None]: + with scenarios.integration_frameworks.get_test_agent_api( + request=request, + worker_id=worker_id, + test_id=test_id, + ) as result: + yield result + + +@pytest.fixture +def test_client( + request: pytest.FixtureRequest, + library_env: dict[str, str], + test_id: str, + worker_id: str, + test_agent: TestAgentAPI, +) -> Generator[FrameworkTestClientApi, None, None]: + context.scenario.parametrized_tests_metadata[request.node.nodeid] = dict(library_env) + + with scenarios.integration_frameworks.get_client( + request=request, + library_env=library_env, + worker_id=worker_id, + test_id=test_id, + test_agent=test_agent, + ) as client: + yield client diff --git a/tests/integration_frameworks/test_openai.py b/tests/integration_frameworks/test_openai.py new file mode 100644 index 00000000000..7bcab44e1d8 --- /dev/null +++ b/tests/integration_frameworks/test_openai.py @@ -0,0 +1,34 @@ +from utils import context, missing_feature, scenarios, features + +import pytest + +from utils.docker_fixtures import FrameworkTestClientApi, TestAgentAPI + + +@features.llm_observability +@scenarios.integration_frameworks +class TestOpenAiAPM: + @missing_feature(context.library == "nodejs", reason="Node.js openai server not implemented yet") + @missing_feature(context.library == "java", reason="Java does not auto-instrument OpenAI") + @pytest.mark.parametrize("stream", [True, False]) + def test_chat_completion(self, test_agent: TestAgentAPI, test_client: FrameworkTestClientApi, *, stream: bool): + with test_agent.vcr_context(stream=stream): + test_client.request( + "POST", + "/chat/completions", + dict( + model="gpt-3.5-turbo", + messages=[dict(role="user", content="Hello OpenAI!")], + parameters=dict( + max_tokens=35, + stream=stream, + ), + ), + ) + + traces = test_agent.wait_for_num_traces(num=1) + span = traces[0][0] + + assert span["name"] == "openai.request" + assert span["resource"] == "createChatCompletion" + assert span["meta"]["openai.request.model"] == "gpt-3.5-turbo" diff --git a/tests/integration_frameworks/utils/vcr-cassettes/openai/test_chat_completion_stream_False_openai_chat_completions_post_ed494875.yaml b/tests/integration_frameworks/utils/vcr-cassettes/openai/test_chat_completion_stream_False_openai_chat_completions_post_ed494875.yaml new file mode 100644 index 00000000000..4aee18bdc2e --- /dev/null +++ b/tests/integration_frameworks/utils/vcr-cassettes/openai/test_chat_completion_stream_False_openai_chat_completions_post_ed494875.yaml @@ -0,0 +1,142 @@ +interactions: +- request: + body: '{"messages":[{"role":"user","content":"Hello OpenAI!"}],"model":"gpt-3.5-turbo","max_tokens":35,"stream":false}' + headers: + ? !!python/object/apply:multidict._multidict.istr + - Accept + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - gzip, deflate + ? !!python/object/apply:multidict._multidict.istr + - Connection + : - keep-alive + Content-Length: + - '111' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - User-Agent + : - OpenAI/Python 2.0.0 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Arch + : - arm64 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Async + : - 'false' + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Lang + : - python + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-OS + : - Linux + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Package-Version + : - 2.0.0 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime + : - CPython + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime-Version + : - 3.11.11 + ? !!python/object/apply:multidict._multidict.istr + - traceparent + : - 00-68f82854000000005a03a8ae58726e95-e6896535f60c2b05-01 + ? !!python/object/apply:multidict._multidict.istr + - tracestate + : - dd=p:e6896535f60c2b05;s:1;t.dm:-0;t.tid:68f8285400000000 + ? !!python/object/apply:multidict._multidict.istr + - x-datadog-parent-id + : - '16611919982968449797' + ? !!python/object/apply:multidict._multidict.istr + - x-datadog-sampling-priority + : - '1' + ? !!python/object/apply:multidict._multidict.istr + - x-datadog-tags + : - _dd.p.dm=-0,_dd.p.tid=68f8285400000000 + ? !!python/object/apply:multidict._multidict.istr + - x-datadog-trace-id + : - '6486213355105316501' + ? !!python/object/apply:multidict._multidict.istr + - x-stainless-read-timeout + : - '600' + ? !!python/object/apply:multidict._multidict.istr + - x-stainless-retry-count + : - '0' + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CTHDJdm9VHbbpNisMGQsTFppbzIzV\",\n \"object\": + \"chat.completion\",\n \"created\": 1761093717,\n \"model\": \"gpt-3.5-turbo-0125\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Hello! How can I assist you today?\",\n + \ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": + null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 11,\n \"completion_tokens\": 9,\n \"total_tokens\": 20,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - 9924f3b2283642aa-EWR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 22 Oct 2025 00:41:57 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=klY8vs0Lx1ngSInqqlGD.dBW7igwA.2V4SzCnhsPGhw-1761093717-1.0.1.1-cAnNflqOYmVztGYy1iU2AzRSraykSDx4LE9_w8fWNmIOk6uGs6su_c5H.86So06LiR.E02dbNOwPpjdV5dvXLpKcjsWMZls6WyIUfkTpPO4; + path=/; expires=Wed, 22-Oct-25 01:11:57 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=x1tUbE43MKnDYr2Xtq1L11MWJ8tkZOgiqGjsLhdnBgc-1761093717432-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - datadog-staging + openai-processing-ms: + - '312' + openai-project: + - proj_gt6TQZPRbZfoY2J9AQlEJMpd + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '336' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '50000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '49999994' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_c5d496b0ceac42eea539adb53c014f04 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration_frameworks/utils/vcr-cassettes/openai/test_chat_completion_stream_True_openai_chat_completions_post_8a611e61.yaml b/tests/integration_frameworks/utils/vcr-cassettes/openai/test_chat_completion_stream_True_openai_chat_completions_post_8a611e61.yaml new file mode 100644 index 00000000000..886962ac075 --- /dev/null +++ b/tests/integration_frameworks/utils/vcr-cassettes/openai/test_chat_completion_stream_True_openai_chat_completions_post_8a611e61.yaml @@ -0,0 +1,175 @@ +interactions: +- request: + body: '{"messages":[{"role":"user","content":"Hello OpenAI!"}],"model":"gpt-3.5-turbo","max_tokens":35,"stream":true,"stream_options":{"include_usage":true}}' + headers: + ? !!python/object/apply:multidict._multidict.istr + - Accept + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - gzip, deflate + ? !!python/object/apply:multidict._multidict.istr + - Connection + : - keep-alive + Content-Length: + - '150' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - User-Agent + : - OpenAI/Python 2.0.0 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Arch + : - arm64 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Async + : - 'false' + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Lang + : - python + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-OS + : - Linux + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Package-Version + : - 2.0.0 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime + : - CPython + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime-Version + : - 3.11.11 + ? !!python/object/apply:multidict._multidict.istr + - traceparent + : - 00-68f8284f00000000ff2bde287337b5bc-3491d272eb220623-01 + ? !!python/object/apply:multidict._multidict.istr + - tracestate + : - dd=p:3491d272eb220623;s:1;t.dm:-0;t.tid:68f8284f00000000 + ? !!python/object/apply:multidict._multidict.istr + - x-datadog-parent-id + : - '3788040152608278051' + ? !!python/object/apply:multidict._multidict.istr + - x-datadog-sampling-priority + : - '1' + ? !!python/object/apply:multidict._multidict.istr + - x-datadog-tags + : - _dd.p.dm=-0,_dd.p.tid=68f8284f00000000 + ? !!python/object/apply:multidict._multidict.istr + - x-datadog-trace-id + : - '18387034168983270844' + ? !!python/object/apply:multidict._multidict.istr + - x-stainless-read-timeout + : - '600' + ? !!python/object/apply:multidict._multidict.istr + - x-stainless-retry-count + : - '0' + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-CTHDD5ddpbYLMPexwDZhSOoh3FXQE","object":"chat.completion.chunk","created":1761093711,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"xVLupk18"} + + + data: {"id":"chatcmpl-CTHDD5ddpbYLMPexwDZhSOoh3FXQE","object":"chat.completion.chunk","created":1761093711,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"pR8p6"} + + + data: {"id":"chatcmpl-CTHDD5ddpbYLMPexwDZhSOoh3FXQE","object":"chat.completion.chunk","created":1761093711,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"KhlvyaHwP"} + + + data: {"id":"chatcmpl-CTHDD5ddpbYLMPexwDZhSOoh3FXQE","object":"chat.completion.chunk","created":1761093711,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + How"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"jbf0qN"} + + + data: {"id":"chatcmpl-CTHDD5ddpbYLMPexwDZhSOoh3FXQE","object":"chat.completion.chunk","created":1761093711,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + can"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"ycIIBv"} + + + data: {"id":"chatcmpl-CTHDD5ddpbYLMPexwDZhSOoh3FXQE","object":"chat.completion.chunk","created":1761093711,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + I"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"kzCL3CTS"} + + + data: {"id":"chatcmpl-CTHDD5ddpbYLMPexwDZhSOoh3FXQE","object":"chat.completion.chunk","created":1761093711,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + assist"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"kKp"} + + + data: {"id":"chatcmpl-CTHDD5ddpbYLMPexwDZhSOoh3FXQE","object":"chat.completion.chunk","created":1761093711,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + you"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"bNuhWX"} + + + data: {"id":"chatcmpl-CTHDD5ddpbYLMPexwDZhSOoh3FXQE","object":"chat.completion.chunk","created":1761093711,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + today"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"hR7O"} + + + data: {"id":"chatcmpl-CTHDD5ddpbYLMPexwDZhSOoh3FXQE","object":"chat.completion.chunk","created":1761093711,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"tAu1XLi9g"} + + + data: {"id":"chatcmpl-CTHDD5ddpbYLMPexwDZhSOoh3FXQE","object":"chat.completion.chunk","created":1761093711,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null,"obfuscation":"AmDv"} + + + data: {"id":"chatcmpl-CTHDD5ddpbYLMPexwDZhSOoh3FXQE","object":"chat.completion.chunk","created":1761093711,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[],"usage":{"prompt_tokens":11,"completion_tokens":9,"total_tokens":20,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"2TkYi9Kf6s"} + + + data: [DONE] + + + ' + headers: + CF-RAY: + - 9924f392cfe78cb3-EWR + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Wed, 22 Oct 2025 00:41:52 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=Ax45XclR8eAfFgUi29IH9IpQP9TbCiYRpAZk_9H.bYg-1761093712-1.0.1.1-Jh7wV__XxFBsezq5FzFFD2uK1QBaRIGZPWBizGOJDubstDqrGdNl8xd11UOXCrvcl6A6cn7LKGmUCGWPYrOarvX8WTd1lhP7M3_x2mmuWnc; + path=/; expires=Wed, 22-Oct-25 01:11:52 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=iEs9jcFGvi83M_YtwLMq4jJi9iHCK6RjRvsOKTpfy6o-1761093712078-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - datadog-staging + openai-processing-ms: + - '157' + openai-project: + - proj_gt6TQZPRbZfoY2J9AQlEJMpd + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '205' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '50000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '49999994' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_3f7eea53a75c4c85b4d3b1850b082e20 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/test_the_test/test_group_rules.py b/tests/test_the_test/test_group_rules.py index 7740f264bad..270cadf4325 100644 --- a/tests/test_the_test/test_group_rules.py +++ b/tests/test_the_test/test_group_rules.py @@ -77,7 +77,7 @@ def test_tracer_release(): if scenario_groups.tracer_release not in scenario.scenario_groups: assert ( scenario in not_in_tracer_release_group - ), f"Scenario {scenario} is not part of {scenario_groups.tracer_release}" + ), f"Scenario {scenario.name} is not part of {scenario_groups.tracer_release.name} group" if scenario in not_in_tracer_release_group: assert scenario_groups.tracer_release not in scenario.scenario_groups diff --git a/utils/_context/_scenarios/__init__.py b/utils/_context/_scenarios/__init__.py index 2c124684792..f14c2440ba4 100644 --- a/utils/_context/_scenarios/__init__.py +++ b/utils/_context/_scenarios/__init__.py @@ -24,6 +24,7 @@ from .stream_processing_offload import StreamProcessingOffloadScenario from .ipv6 import IPV6Scenario from .appsec_low_waf_timeout import AppsecLowWafTimeout +from .integration_frameworks import IntegrationFrameworksScenario from utils._context._scenarios.appsec_rasp import AppsecRaspScenario update_environ_with_local_env() @@ -1122,6 +1123,10 @@ class _Scenarios: otel_collector = OtelCollectorScenario("OTEL_COLLECTOR") + integration_frameworks = IntegrationFrameworksScenario( + "INTEGRATION_FRAMEWORKS", doc="Tests for third-party integration frameworks" + ) + scenarios = _Scenarios() diff --git a/utils/_context/_scenarios/_docker_fixtures.py b/utils/_context/_scenarios/_docker_fixtures.py index 9aaa5c4f137..00ab82ad071 100644 --- a/utils/_context/_scenarios/_docker_fixtures.py +++ b/utils/_context/_scenarios/_docker_fixtures.py @@ -6,14 +6,29 @@ from utils.docker_fixtures._test_agent import TestAgentFactory, TestAgentAPI from utils._context.docker import get_docker_client from utils._logger import logger -from .core import Scenario +from .core import Scenario, ScenarioGroup, scenario_groups as groups _NETWORK_PREFIX = "apm_shared_tests_network" class DockerFixturesScenario(Scenario): - _test_agent_factory: TestAgentFactory # must be created in DockerFixtureScenario.configure + def __init__( + self, + name: str, + github_workflow: str, + doc: str, + agent_image: str, + scenario_groups: tuple[ScenarioGroup, ...] = (), + ) -> None: + super().__init__( + name=name, + doc=doc, + github_workflow=github_workflow, + scenario_groups=[*scenario_groups, groups.all, groups.tracer_release, groups.docker_fixtures], + ) + + self._test_agent_factory = TestAgentFactory(agent_image) def _clean(self): if self.is_main_worker: @@ -57,8 +72,8 @@ def get_test_agent_api( worker_id: str, request: pytest.FixtureRequest, test_id: str, - container_otlp_http_port: int, - container_otlp_grpc_port: int, + container_otlp_http_port: int = 4318, + container_otlp_grpc_port: int = 4317, ) -> Generator[TestAgentAPI, None, None]: with ( self._get_docker_network(test_id) as docker_network, diff --git a/utils/_context/_scenarios/core.py b/utils/_context/_scenarios/core.py index abfacc2b238..7421ab29bc5 100644 --- a/utils/_context/_scenarios/core.py +++ b/utils/_context/_scenarios/core.py @@ -30,6 +30,7 @@ class _ScenarioGroups: appsec_rasp_scenario = ScenarioGroup() appsec_lambda = ScenarioGroup() debugger = ScenarioGroup() + docker_fixtures = ScenarioGroup() end_to_end = ScenarioGroup() exotics = ScenarioGroup() graphql = ScenarioGroup() diff --git a/utils/_context/_scenarios/integration_frameworks.py b/utils/_context/_scenarios/integration_frameworks.py new file mode 100644 index 00000000000..93876be47f1 --- /dev/null +++ b/utils/_context/_scenarios/integration_frameworks.py @@ -0,0 +1,88 @@ +from collections.abc import Generator +import contextlib + +import pytest + +from utils.docker_fixtures import ( + FrameworkTestClientFactory, + TestAgentAPI, + FrameworkTestClientApi, +) +from utils._logger import logger +from utils._context.component_version import ComponentVersion +from ._docker_fixtures import DockerFixturesScenario + + +class IntegrationFrameworksScenario(DockerFixturesScenario): + _test_client_factory: FrameworkTestClientFactory + + def __init__(self, name: str, doc: str) -> None: + super().__init__( + name, + doc=doc, + github_workflow="endtoend", + agent_image="ghcr.io/datadog/dd-apm-test-agent/ddapm-test-agent:v1.36.0", + ) + + self.environment = { + "DD_TRACE_DEBUG": "true", + "DD_TRACE_OTEL_ENABLED": "true", + } + + def configure(self, config: pytest.Config): + library: str = config.option.library + weblog: str = config.option.weblog + + if not library: + pytest.exit("No library specified, please set -L option", 1) + + if not weblog: + pytest.exit("No framework specified, please set -W option", 1) + + if "@" not in weblog: + pytest.exit("Weblog must be of the form : openai@2.0.0.", 1) + + framework, framework_version = weblog.split("@", 1) + + self._test_client_factory = FrameworkTestClientFactory( + library=library, + framework=framework, + framework_version=framework_version, + container_env=self.environment, + container_volumes={f"./utils/build/docker/{library}/{framework}_app": "/app/integration_frameworks"}, + ) + + self._test_agent_factory.configure(self.host_log_folder) + self._test_client_factory.configure(self.host_log_folder) + + # Set library version - for now use a placeholder, will be updated after building + self._library = ComponentVersion(library, "0.0.0") + logger.debug(f"Library: {library}, Framework: {framework}=={framework_version}") + + if self.is_main_worker: + # Build the framework test server image + self._test_client_factory.build(github_token_file=config.option.github_token_file) + self._test_agent_factory.pull() + self._clean() + + @contextlib.contextmanager + def get_client( + self, + request: pytest.FixtureRequest, + worker_id: str, + test_id: str, + library_env: dict[str, str], + test_agent: TestAgentAPI, + ) -> Generator[FrameworkTestClientApi, None, None]: + with self._test_client_factory.get_client( + request=request, + library_env=library_env, + worker_id=worker_id, + test_id=test_id, + test_agent=test_agent, + ) as client: + yield client + + @property + def library(self): + return self._library diff --git a/utils/_context/_scenarios/parametric.py b/utils/_context/_scenarios/parametric.py index a67bc999acb..f782e321cf2 100644 --- a/utils/_context/_scenarios/parametric.py +++ b/utils/_context/_scenarios/parametric.py @@ -12,7 +12,6 @@ from utils._context.docker import get_docker_client from utils._logger import logger from utils.docker_fixtures import ( - TestAgentFactory, TestAgentAPI, compute_volumes, ParametricTestClientFactory, @@ -66,7 +65,8 @@ def __init__(self, name: str, doc: str) -> None: name, doc=doc, github_workflow="parametric", - scenario_groups=[scenario_groups.all, scenario_groups.tracer_release, scenario_groups.parametric], + scenario_groups=(scenario_groups.parametric,), + agent_image="ghcr.io/datadog/dd-apm-test-agent/ddapm-test-agent:v1.32.0", ) self._parametric_tests_confs = ParametricScenario.PersistentParametricTestConf(self) @@ -87,8 +87,6 @@ def configure(self, config: pytest.Config): "python": {"./utils/build/docker/python/parametric/apm_test_client": "/app/apm_test_client"}, } - self._test_agent_factory = TestAgentFactory(self.host_log_folder) - # get tracer version info building and executing the ddtracer-version.docker file self._test_client_factory = ParametricTestClientFactory( library=library, @@ -99,14 +97,15 @@ def configure(self, config: pytest.Config): container_env={}, ) + self._test_client_factory.configure(self.host_log_folder) + self._test_agent_factory.configure(self.host_log_folder) + if self.is_main_worker: # https://github.com/pytest-dev/pytest-xdist/issues/271#issuecomment-826396320 # we are in the main worker, not in a xdist sub-worker # self._build_apm_test_server_image(config.option.github_token_file) self._test_agent_factory.pull() - self._test_client_factory.build( - host_log_folder=self.host_log_folder, github_token_file=config.option.github_token_file - ) + self._test_client_factory.build(github_token_file=config.option.github_token_file) self._clean() # https://github.com/DataDog/system-tests/issues/2799 diff --git a/utils/_features.py b/utils/_features.py index 95aa01e7c40..132d4022840 100644 --- a/utils/_features.py +++ b/utils/_features.py @@ -18,6 +18,7 @@ class _Owner(StrEnum): idm = "@DataDog/apm-idm" injection_platform = "@DataDog/injection-platform" language_platform = "@DataDog/apm-lang-platform" + ml_observability = "@DataDog/ml-observability" profiler = "@DataDog/profiling" # it does not exists remote_config = "@DataDog/remote-config" rp = "@DataDog/apm-reliability-and-performance" # reliability & performance @@ -2590,5 +2591,13 @@ def agent_data_integrity(test_object): """ return _mark_test_object(test_object, feature_id=495, owner=_Owner.agent_apm) + @staticmethod + def llm_observability(test_object): + """Data integrity + + https://feature-parity.us1.prod.dog/#/?feature=497 + """ + return _mark_test_object(test_object, feature_id=497, owner=_Owner.ml_observability) + features = _Features() diff --git a/utils/build/docker/python/openai.Dockerfile b/utils/build/docker/python/openai.Dockerfile new file mode 100644 index 00000000000..b7456e1caa3 --- /dev/null +++ b/utils/build/docker/python/openai.Dockerfile @@ -0,0 +1,21 @@ + +FROM python:3.11-slim +ARG FRAMEWORK_VERSION + +# install bin dependancies +RUN apt-get update && apt-get install -y curl + +WORKDIR /app + +RUN python -m pip install fastapi==0.89.1 uvicorn==0.20.0 opentelemetry-exporter-otlp==1.36.0 +RUN python -m pip install openai==$FRAMEWORK_VERSION + +COPY utils/build/docker/python/openai_app/system_tests_library_version.sh system_tests_library_version.sh +COPY utils/build/docker/python/install_ddtrace.sh binaries* /binaries/ + +RUN /binaries/install_ddtrace.sh +RUN mkdir /integration-framework-tracer-logs + +ENV DD_PATCH_MODULES="fastapi:false,starlette:false" +ENV OPENAI_API_KEY="" +CMD ["ddtrace-run", "python", "-m", "integration_frameworks", "openai"] diff --git a/utils/build/docker/python/openai_app/__init__.py b/utils/build/docker/python/openai_app/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/utils/build/docker/python/openai_app/__main__.py b/utils/build/docker/python/openai_app/__main__.py new file mode 100644 index 00000000000..c581bb585a2 --- /dev/null +++ b/utils/build/docker/python/openai_app/__main__.py @@ -0,0 +1,17 @@ +import os +import sys + +import uvicorn + + +if len(sys.argv) > 1: + framework = sys.argv[1] +else: + raise ValueError("Framework is required") + +uvicorn.run( + f"integration_frameworks.{framework}:app", + host="0.0.0.0", + port=int(os.getenv("FRAMEWORK_TEST_CLIENT_SERVER_PORT", "80")), + log_level="debug", +) diff --git a/utils/build/docker/python/openai_app/openai.py b/utils/build/docker/python/openai_app/openai.py new file mode 100644 index 00000000000..8f2fc1aee91 --- /dev/null +++ b/utils/build/docker/python/openai_app/openai.py @@ -0,0 +1,42 @@ +import ddtrace.auto + +import os + +from fastapi import FastAPI +from pydantic import BaseModel + +app = FastAPI( + title="OpenAI framework library test server", + description=""" +The reference implementation of the OpenAI framework library test server. + +Implement the API specified below to enable your framework to run all of the shared tests. +""", +) + +import openai + +client = openai.OpenAI(base_url=f"{os.getenv('DD_TRACE_AGENT_URL')}/vcr/openai") + + +class ChatCompletionRequest(BaseModel): + model: str + messages: list[dict] + parameters: dict + + +@app.post("/chat/completions") +def chat_completions(request: ChatCompletionRequest): + stream = request.parameters.pop("stream", False) + response = client.chat.completions.create( + model=request.model, + messages=request.messages, + stream=stream, + **request.parameters, + ) + + if stream: + for _ in response: + pass + + return {} diff --git a/utils/build/docker/python/openai_app/requirements.txt b/utils/build/docker/python/openai_app/requirements.txt new file mode 100644 index 00000000000..e930f3ffdae --- /dev/null +++ b/utils/build/docker/python/openai_app/requirements.txt @@ -0,0 +1,4 @@ +fastapi==0.89.1 +uvicorn==0.20.0 +opentelemetry-distro==0.42b0 +opentelemetry-exporter-otlp==1.21.0 diff --git a/utils/build/docker/python/openai_app/system_tests_library_version.sh b/utils/build/docker/python/openai_app/system_tests_library_version.sh new file mode 100755 index 00000000000..98e87fc1897 --- /dev/null +++ b/utils/build/docker/python/openai_app/system_tests_library_version.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +python -c "import ddtrace; print(ddtrace.__version__)" diff --git a/utils/docker_fixtures/__init__.py b/utils/docker_fixtures/__init__.py index 3288d6d3b9b..41df9fc6115 100644 --- a/utils/docker_fixtures/__init__.py +++ b/utils/docker_fixtures/__init__.py @@ -1,8 +1,11 @@ from ._core import get_host_port, docker_run, compute_volumes from ._test_agent import TestAgentAPI, TestAgentFactory from ._test_client_parametric import ParametricTestClientFactory +from ._test_client_framework_integrations import FrameworkTestClientApi, FrameworkTestClientFactory __all__ = [ + "FrameworkTestClientApi", + "FrameworkTestClientFactory", "ParametricTestClientFactory", "TestAgentAPI", "TestAgentFactory", diff --git a/utils/docker_fixtures/_test_agent.py b/utils/docker_fixtures/_test_agent.py index a71d828086f..8d51da12a8f 100644 --- a/utils/docker_fixtures/_test_agent.py +++ b/utils/docker_fixtures/_test_agent.py @@ -47,13 +47,18 @@ class AgentRequestV06Stats(AgentRequest): class TestAgentFactory: """Handle everything to create the TestAgentApi""" - image = "ghcr.io/datadog/dd-apm-test-agent/ddapm-test-agent:v1.32.0" + def __init__(self, image: str): + self.image = image + self.host_log_folder = "" - def __init__(self, host_log_folder: str): + def configure(self, host_log_folder: str): self.host_log_folder = host_log_folder @retry(delay=10, tries=3) def pull(self) -> None: + if len(self.host_log_folder) == 0: + pytest.exit("You need to call TestAgentFactory.configure()") + if len(get_docker_client().images.list(name=self.image)) == 0: logger.stdout(f"Pull test agent image {self.image}...") get_docker_client().images.pull(self.image) @@ -74,6 +79,7 @@ def get_test_agent_api( "ENABLED_CHECKS": "trace_count_header", "OTLP_HTTP_PORT": str(container_otlp_http_port), "OTLP_GRPC_PORT": str(container_otlp_grpc_port), + "VCR_CASSETTES_DIRECTORY": "/vcr-cassettes", # TODO comment } if os.getenv("DEV_MODE") is not None: env["SNAPSHOT_CI"] = "0" @@ -92,7 +98,10 @@ def get_test_agent_api( image=self.image, name=container_name, env=env, - volumes={"./snapshots": "/snapshots"}, + volumes={ + "./snapshots": "/snapshots", + "./tests/integration_frameworks/utils/vcr-cassettes": "/vcr-cassettes", + }, ports={ f"{container_port}/tcp": host_port, f"{container_otlp_http_port}/tcp": otlp_http_host_port, @@ -367,6 +376,25 @@ def info(self): self._write_log("info", resp_json) return resp_json + def llmobs_requests(self) -> list[Any]: + reqs = [r for r in self.requests() if r["url"].endswith("/evp_proxy/v2/api/v2/llmobs")] + + events = [] + for r in reqs: + decoded_body = base64.b64decode(r["body"]) + events.append(json.loads(decoded_body)) + return events + + def llmobs_evaluations_requests(self): + reqs = [ + r + for r in self.requests() + if r["url"].endswith("/evp_proxy/v2/api/intake/llm-obs/v1/eval-metric") + or r["url"].endswith("/evp_proxy/v2/api/intake/llm-obs/v2/eval-metric") + ] + + return [json.loads(base64.b64decode(r["body"])) for r in reqs] + @contextlib.contextmanager def snapshot_context(self, token: str, ignores: list[str] | None = None): ignores = ignores or [] @@ -384,6 +412,26 @@ def snapshot_context(self, token: str, ignores: list[str] | None = None): if resp.status_code != HTTPStatus.OK: raise RuntimeError(resp.text) + @contextlib.contextmanager + def vcr_context(self, cassette_prefix: str = "", **test_params_for_cassette_prefix: Any): # noqa: ANN401 + """Starts a VCR context manager, which will prefix all recorded cassettes from the test agent with the + given prefix. If no prefix is provided, the test name will be used. + """ + test_name = cassette_prefix or self._pytest_request.node.originalname + + for param, value in test_params_for_cassette_prefix.items(): + test_name += f"_{param}_{value}" + + try: + resp = self._session.post(self._url("/vcr/test/start"), json={"test_name": test_name}) + resp.raise_for_status() + except Exception as e: + raise RuntimeError(f"Could not connect to test agent: {e}") from e + else: + yield self + resp = self._session.post(self._url("/vcr/test/stop")) + resp.raise_for_status() + def wait_for_num_traces( self, num: int, *, clear: bool = False, wait_loops: int = 30, sort_by_start: bool = True ) -> list[Trace]: @@ -416,6 +464,49 @@ def wait_for_num_traces( time.sleep(0.1) raise ValueError(f"Number ({num}) of traces not available from test agent, got {num_received}:\n{traces}") + def wait_for_llmobs_requests(self, num: int, *, wait_loops: int = 30, sort_by_start: bool = True) -> list[Any]: + """Wait for `num` LLMobs requests to be received from the test agent.""" + num_received = None + llmobs_requests = [] + for _ in range(wait_loops): + try: + llmobs_requests = self.llmobs_requests() + except requests.exceptions.RequestException: + pass + else: + num_received = len(llmobs_requests) + if num_received == num: + if sort_by_start: + for trace in llmobs_requests: + # The testagent may receive spans and trace chunks in any order, + # so we sort the spans by start time if needed + trace.sort(key=lambda x: x["start_ns"]) + return sorted(llmobs_requests, key=lambda t: t[0]["start_ns"]) + return llmobs_requests + time.sleep(0.1) + raise ValueError( + f"Number ({num}) of traces not available from test agent, got {num_received}:\n{llmobs_requests}" + ) + + def wait_for_llmobs_evaluations_requests(self, num: int, *, wait_loops: int = 30) -> list[Any]: + """Wait for `num` LLMobs evaluations requests to be received from the test agent.""" + num_received = None + llmobs_evaluations_requests = [] + for _ in range(wait_loops): + try: + llmobs_evaluations_requests = self.llmobs_evaluations_requests() + except requests.exceptions.RequestException: + pass + else: + num_received = len(llmobs_evaluations_requests) + if num_received == num: + return llmobs_evaluations_requests + time.sleep(0.1) + raise ValueError( + f"""Number ({num}) of LLMobs evaluations requests not available from test agent, got {num_received}: + {llmobs_evaluations_requests}""" + ) + def wait_for_num_spans( self, num: int, *, clear: bool = False, wait_loops: int = 30, sort_by_start: bool = True ) -> list[Trace]: diff --git a/utils/docker_fixtures/_test_client.py b/utils/docker_fixtures/_test_client.py index a3951718f83..a07fab7286d 100644 --- a/utils/docker_fixtures/_test_client.py +++ b/utils/docker_fixtures/_test_client.py @@ -15,6 +15,7 @@ class TestClientFactory: """Abstracts a docker image builing for docker fixtures scenarios""" _image: Image | None + host_log_folder: str def __init__( self, @@ -36,9 +37,12 @@ def __init__( self.container_env: dict[str, str] = dict(container_env) self._image = None - def build(self, host_log_folder: str, github_token_file: str) -> None: + def configure(self, host_log_folder: str): + self.host_log_folder = host_log_folder + + def build(self, github_token_file: str) -> None: logger.stdout("Build framework test container...") - log_path = f"{host_log_folder}/outputs/docker_build_log.log" + log_path = f"{self.host_log_folder}/outputs/docker_build_log.log" Path.mkdir(Path(log_path).parent, exist_ok=True, parents=True) with open(log_path, "w+", encoding="utf-8") as log_file: diff --git a/utils/docker_fixtures/_test_client_framework_integrations.py b/utils/docker_fixtures/_test_client_framework_integrations.py new file mode 100644 index 00000000000..d8c1129c9de --- /dev/null +++ b/utils/docker_fixtures/_test_client_framework_integrations.py @@ -0,0 +1,147 @@ +from collections.abc import Generator +import contextlib +from http import HTTPStatus +from pathlib import Path +import time +import urllib.parse + +from docker.models.containers import Container +import pytest +import requests + +from utils._logger import logger + +from ._core import docker_run, get_host_port +from ._test_agent import TestAgentAPI +from ._test_client import TestClientFactory + + +class FrameworkTestClientFactory(TestClientFactory): + """Abstracts the docker image/container that ship the tested tracer+framework. + This class is responsible to: + * build the image + * expose a ready to call function that runs the container and returns the client that will be used in tests + """ + + def __init__( + self, + library: str, + framework: str, + framework_version: str, + container_env: dict[str, str], + container_volumes: dict[str, str], + ): + self.library = library + self.framework = framework + self.framework_version = framework_version + super().__init__( + library=library, + dockerfile=f"utils/build/docker/{library}/{framework}.Dockerfile", + build_args={"FRAMEWORK_VERSION": framework_version}, + tag=f"{library}-test-library-{framework}-{framework_version}", + container_name=f"{library}-test-library-{framework}-{framework_version}", + container_volumes=container_volumes, + container_env=container_env, + ) + + @contextlib.contextmanager + def get_client( + self, + request: pytest.FixtureRequest, + worker_id: str, + test_id: str, + library_env: dict[str, str], + test_agent: TestAgentAPI, + ) -> Generator["FrameworkTestClientApi", None, None]: + environment = dict(self.container_env) + + container_port: int = 8080 + host_port = get_host_port(worker_id, 4500) + + # TODO : we should not have to set those three values + environment["DD_TRACE_AGENT_URL"] = f"http://{test_agent.container_name}:{test_agent.container_port}" + environment["DD_AGENT_HOST"] = test_agent.container_name + environment["DD_TRACE_AGENT_PORT"] = str(test_agent.container_port) + environment["FRAMEWORK_TEST_CLIENT_SERVER_PORT"] = str(container_port) + + # overwrite env with the one provided by the test + environment |= library_env + + log_path = f"{self.host_log_folder}/outputs/{request.cls.__name__}/{request.node.name}/server_log.log" + Path(log_path).parent.mkdir(parents=True, exist_ok=True) + + with ( + open(log_path, "w+", encoding="utf-8") as log_file, + docker_run( + image=self.tag, + name=f"{self.container_name}-{test_id}", + env=environment, + volumes=self.container_volumes, + network=test_agent.network, + ports={f"{container_port}/tcp": host_port}, + log_file=log_file, + ) as container, + ): + test_server_timeout = 60 + client = FrameworkTestClientApi(f"http://localhost:{host_port}", test_server_timeout, container) + + yield client + + request.node.add_report_section( + "teardown", f"{self.library.capitalize()} Library Output", f"Log file:\n./{log_path}" + ) + + +class FrameworkTestClientApi: + def __init__(self, url: str, timeout: int, container: Container): + self._base_url = url + self._session = requests.Session() + self.container = container + self.timeout = timeout + + # wait for server to start + self._wait(timeout) + + def container_restart(self): + self.container.restart() + self._wait(self.timeout) + + def _wait(self, timeout: float): + delay = 0.01 + for _ in range(int(timeout / delay)): + try: + if self.is_alive(): + break + except Exception: + if self.container.status != "running": + self._print_logs() + message = f"Container {self.container.name} status is {self.container.status}. Please check logs." + pytest.fail(message) + time.sleep(delay) + else: + self._print_logs() + message = f"Timeout of {timeout} seconds exceeded waiting for HTTP server to start. Please check logs." + pytest.fail(message) + + def is_alive(self) -> bool: + self.container.reload() + return ( + self.container.status == "running" + and self._session.get(self._url("/non-existent-endpoint-to-ping-until-the-server-starts")).status_code + == HTTPStatus.NOT_FOUND + ) + + def request(self, method: str, url: str, body: dict | None = None) -> requests.Response: + resp = self._session.request(method, self._url(url), json=body) + resp.raise_for_status() + return resp + + def _url(self, path: str) -> str: + return urllib.parse.urljoin(self._base_url, path) + + def _print_logs(self): + try: + logs = self.container.logs().decode("utf-8") + logger.debug(f"Logs from container {self.container.name}:\n\n{logs}") + except Exception: + logger.error(f"Failed to get logs from container {self.container.name}") diff --git a/utils/scripts/ci_orchestrators/workflow_data.py b/utils/scripts/ci_orchestrators/workflow_data.py index 64252957369..2ad3c62ff7b 100644 --- a/utils/scripts/ci_orchestrators/workflow_data.py +++ b/utils/scripts/ci_orchestrators/workflow_data.py @@ -273,6 +273,10 @@ def _get_endtoend_weblogs( ) -> list[Weblog]: result: list[Weblog] = [] + integration_frameworks_weblogs = { + "openai": ["2.0.0"] # python + } + folder = f"utils/build/docker/{library}" if Path(folder).exists(): # some lib does not have any weblog names = [ @@ -285,12 +289,20 @@ def _get_endtoend_weblogs( # filter weblogs by the weblogs_filter set names = [weblog for weblog in names if weblog in weblogs_filter] - result += [ - Weblog( - name=name, require_build=True, artifact_name=f"binaries_{ci_environment}_{library}_{name}_{unique_id}" - ) - for name in names - ] + for name in names: + if name not in integration_frameworks_weblogs: + result.append( + Weblog( + name=name, + require_build=True, + artifact_name=f"binaries_{ci_environment}_{library}_{name}_{unique_id}", + ) + ) + else: + for version in integration_frameworks_weblogs[name]: + result.append( + Weblog(name=f"{name}@{version}", require_build=False, artifact_name=binaries_artifact) + ) # weblog not related to a docker file if library == "golang": @@ -543,6 +555,9 @@ def _is_supported(library: str, weblog: str, scenario: str, _ci_environment: str if weblog == "otel_collector" or scenario == "OTEL_COLLECTOR": return weblog == "otel_collector" and scenario == "OTEL_COLLECTOR" + if "@" in weblog or scenario == "INTEGRATION_FRAMEWORKS": + return "@" in weblog and scenario == "INTEGRATION_FRAMEWORKS" + return True diff --git a/utils/scripts/replay_scenarios.sh b/utils/scripts/replay_scenarios.sh index 595dc412f4f..1064e3f0540 100755 --- a/utils/scripts/replay_scenarios.sh +++ b/utils/scripts/replay_scenarios.sh @@ -4,7 +4,7 @@ set -e # scenario getting backedn data are not yet supported -NOT_SUPPPORTED=("APM_TRACING_E2E_OTEL" "PARAMETRIC" "OTEL_INTEGRATIONS" "OTEL_LOG_E2E" "OTEL_METRIC_E2E" "OTEL_TRACING_E2E" "APM_TRACING_E2E_SINGLE_SPAN") +NOT_SUPPPORTED=("APM_TRACING_E2E_OTEL" "PARAMETRIC" "OTEL_INTEGRATIONS" "OTEL_LOG_E2E" "OTEL_METRIC_E2E" "OTEL_TRACING_E2E" "APM_TRACING_E2E_SINGLE_SPAN" "INTEGRATION_FRAMEWORKS") if [ -d "logs/" ]; then echo "[DEFAULT] Running replay mode"