Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
8cbed2f
add scaffolding-ish for framework tests
sabrenner Oct 21, 2025
acd3b20
working test!
sabrenner Oct 22, 2025
611995f
add stream test with different cassette names
sabrenner Oct 22, 2025
afc9b75
Merge branch 'main' into sabrenner/introduce-frameworks-tests
cbeauchesne Oct 24, 2025
fbb2e62
Simplifications
cbeauchesne Oct 24, 2025
deda2cd
Lint and remove dead code
cbeauchesne Oct 24, 2025
a0bbed7
Remove dead code
cbeauchesne Oct 24, 2025
fe14a4b
Use a real dockerfile
cbeauchesne Oct 24, 2025
865231f
Many simplifications
cbeauchesne Oct 24, 2025
657aaf9
Revamp
cbeauchesne Oct 24, 2025
b29128c
Fix typo
cbeauchesne Oct 24, 2025
6320407
Small improvments
cbeauchesne Oct 24, 2025
46f1f20
merge image and container class, rename it
cbeauchesne Oct 24, 2025
de4b1e1
namings
cbeauchesne Oct 24, 2025
68956dc
Revamp
cbeauchesne Oct 24, 2025
811eef6
Introduce TestAgentFactory
cbeauchesne Oct 24, 2025
778858a
Naming
cbeauchesne Oct 24, 2025
f7ca8d0
simplifications
cbeauchesne Oct 24, 2025
774a18f
Left overs
cbeauchesne Oct 24, 2025
6343cd1
CI
cbeauchesne Oct 24, 2025
43114d3
Fix test the test
cbeauchesne Oct 24, 2025
fb8b632
INTEGRATION_FRAMEWORKS does not support replay
cbeauchesne Oct 24, 2025
b6994bd
Allow ./ as host volume
cbeauchesne Oct 25, 2025
2f7715f
new simplifications
cbeauchesne Oct 25, 2025
f9ed35a
Merge branch 'main' into sabrenner/introduce-frameworks-tests
cbeauchesne Oct 25, 2025
7e3fa75
Re-use existing logic
cbeauchesne Oct 25, 2025
4211773
Fix leftover
cbeauchesne Oct 25, 2025
dad5602
Cleanup
cbeauchesne Oct 25, 2025
b4f031c
Fix model
cbeauchesne Oct 25, 2025
6dccc4b
Rename file
cbeauchesne Oct 27, 2025
8766c3f
Merge branch 'main' into sabrenner/introduce-frameworks-tests
cbeauchesne Oct 27, 2025
354099b
Remove duplicated code
cbeauchesne Oct 27, 2025
b864c66
review changes
sabrenner Oct 27, 2025
bb643ef
Merge branch 'main' into sabrenner/introduce-frameworks-tests
sabrenner Oct 27, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/run-end-to-end.yml
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,9 @@ jobs:
env:
SYSTEM_TEST_BUILD_ATTEMPTS: 3

- name: Run INTEGRATION_FRAMEWORKS scenario
if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"INTEGRATION_FRAMEWORKS"')
run: ./run.sh INTEGRATION_FRAMEWORKS -L ${{ inputs.library }} --weblog ${{ inputs.weblog }}
- name: Run APPSEC_STANDALONE scenario
if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"APPSEC_STANDALONE"')
run: ./run.sh APPSEC_STANDALONE
Expand Down
9 changes: 9 additions & 0 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,15 @@ def pytest_addoption(parser: pytest.Parser) -> None:
help="An file containing a valid Github token to perform API calls",
)

# Integration frameworks scenario options
parser.addoption(
"--weblog",
type=str,
action="store",
default=None,
help="Framework to test (e.g. '[email protected]' for INTEGRATION_FRAMEWORKS scenario)",
)

# report data to feature parity dashboard
parser.addoption(
"--report-run-url", type=str, action="store", default=None, help="URI of the run who produced the report"
Expand Down
Empty file.
57 changes: 57 additions & 0 deletions tests/integration_frameworks/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from collections.abc import Generator

import pytest

from utils.docker_fixtures import (
FrameworkTestClientApi,
TestAgentAPI,
)
from utils import context, scenarios, logger


@pytest.fixture
def test_id(request: pytest.FixtureRequest) -> str:
import uuid

result = str(uuid.uuid4())[0:6]
logger.info(f"Test {request.node.nodeid} ID: {result}")
return result


@pytest.fixture
def library_env() -> dict[str, str]:
return {}


@pytest.fixture
def test_agent(
test_id: str,
worker_id: str,
request: pytest.FixtureRequest,
) -> Generator[TestAgentAPI, None, None]:
with scenarios.integration_frameworks.get_test_agent_api(
request=request,
worker_id=worker_id,
test_id=test_id,
) as result:
yield result


@pytest.fixture
def test_client(
request: pytest.FixtureRequest,
library_env: dict[str, str],
test_id: str,
worker_id: str,
test_agent: TestAgentAPI,
) -> Generator[FrameworkTestClientApi, None, None]:
context.scenario.parametrized_tests_metadata[request.node.nodeid] = dict(library_env)

with scenarios.integration_frameworks.get_client(
request=request,
library_env=library_env,
worker_id=worker_id,
test_id=test_id,
test_agent=test_agent,
) as client:
yield client
34 changes: 34 additions & 0 deletions tests/integration_frameworks/test_openai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
from utils import context, missing_feature, scenarios, features

import pytest

from utils.docker_fixtures import FrameworkTestClientApi, TestAgentAPI


@features.llm_observability
@scenarios.integration_frameworks
class TestOpenAiAPM:
@missing_feature(context.library == "nodejs", reason="Node.js openai server not implemented yet")
@missing_feature(context.library == "java", reason="Java does not auto-instrument OpenAI")
@pytest.mark.parametrize("stream", [True, False])
def test_chat_completion(self, test_agent: TestAgentAPI, test_client: FrameworkTestClientApi, *, stream: bool):
with test_agent.vcr_context(stream=stream):
test_client.request(
"POST",
"/chat/completions",
dict(
model="gpt-3.5-turbo",
messages=[dict(role="user", content="Hello OpenAI!")],
parameters=dict(
max_tokens=35,
stream=stream,
),
),
)

traces = test_agent.wait_for_num_traces(num=1)
span = traces[0][0]

assert span["name"] == "openai.request"
assert span["resource"] == "createChatCompletion"
assert span["meta"]["openai.request.model"] == "gpt-3.5-turbo"
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
interactions:
- request:
body: '{"messages":[{"role":"user","content":"Hello OpenAI!"}],"model":"gpt-3.5-turbo","max_tokens":35,"stream":false}'
headers:
? !!python/object/apply:multidict._multidict.istr
- Accept
: - application/json
? !!python/object/apply:multidict._multidict.istr
- Accept-Encoding
: - gzip, deflate
? !!python/object/apply:multidict._multidict.istr
- Connection
: - keep-alive
Content-Length:
- '111'
? !!python/object/apply:multidict._multidict.istr
- Content-Type
: - application/json
? !!python/object/apply:multidict._multidict.istr
- User-Agent
: - OpenAI/Python 2.0.0
? !!python/object/apply:multidict._multidict.istr
- X-Stainless-Arch
: - arm64
? !!python/object/apply:multidict._multidict.istr
- X-Stainless-Async
: - 'false'
? !!python/object/apply:multidict._multidict.istr
- X-Stainless-Lang
: - python
? !!python/object/apply:multidict._multidict.istr
- X-Stainless-OS
: - Linux
? !!python/object/apply:multidict._multidict.istr
- X-Stainless-Package-Version
: - 2.0.0
? !!python/object/apply:multidict._multidict.istr
- X-Stainless-Runtime
: - CPython
? !!python/object/apply:multidict._multidict.istr
- X-Stainless-Runtime-Version
: - 3.11.11
? !!python/object/apply:multidict._multidict.istr
- traceparent
: - 00-68f82854000000005a03a8ae58726e95-e6896535f60c2b05-01
? !!python/object/apply:multidict._multidict.istr
- tracestate
: - dd=p:e6896535f60c2b05;s:1;t.dm:-0;t.tid:68f8285400000000
? !!python/object/apply:multidict._multidict.istr
- x-datadog-parent-id
: - '16611919982968449797'
? !!python/object/apply:multidict._multidict.istr
- x-datadog-sampling-priority
: - '1'
? !!python/object/apply:multidict._multidict.istr
- x-datadog-tags
: - _dd.p.dm=-0,_dd.p.tid=68f8285400000000
? !!python/object/apply:multidict._multidict.istr
- x-datadog-trace-id
: - '6486213355105316501'
? !!python/object/apply:multidict._multidict.istr
- x-stainless-read-timeout
: - '600'
? !!python/object/apply:multidict._multidict.istr
- x-stainless-retry-count
: - '0'
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-CTHDJdm9VHbbpNisMGQsTFppbzIzV\",\n \"object\":
\"chat.completion\",\n \"created\": 1761093717,\n \"model\": \"gpt-3.5-turbo-0125\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Hello! How can I assist you today?\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
11,\n \"completion_tokens\": 9,\n \"total_tokens\": 20,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": null\n}\n"
headers:
CF-RAY:
- 9924f3b2283642aa-EWR
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 22 Oct 2025 00:41:57 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=klY8vs0Lx1ngSInqqlGD.dBW7igwA.2V4SzCnhsPGhw-1761093717-1.0.1.1-cAnNflqOYmVztGYy1iU2AzRSraykSDx4LE9_w8fWNmIOk6uGs6su_c5H.86So06LiR.E02dbNOwPpjdV5dvXLpKcjsWMZls6WyIUfkTpPO4;
path=/; expires=Wed, 22-Oct-25 01:11:57 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=x1tUbE43MKnDYr2Xtq1L11MWJ8tkZOgiqGjsLhdnBgc-1761093717432-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- datadog-staging
openai-processing-ms:
- '312'
openai-project:
- proj_gt6TQZPRbZfoY2J9AQlEJMpd
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '336'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '50000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '49999994'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_c5d496b0ceac42eea539adb53c014f04
status:
code: 200
message: OK
version: 1
Loading
Loading