Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/openai/types/responses/response.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ def output_text(self) -> str:
for output in self.output:
if output.type == "message":
for content in output.content:
if content.type == "output_text":
texts.append(content.text)
if content.text != None:
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Keep type guard before reading content.text

This change removes the content.type == "output_text" check and directly accesses content.text, but output.content can include ResponseOutputRefusal entries that do not define a text attribute. In responses containing refusal content, accessing response.output_text will now raise AttributeError instead of returning aggregated text.

Useful? React with 👍 / 👎.

texts.append(content.text)

return "".join(texts)
2 changes: 1 addition & 1 deletion src/openai/types/responses/response_output_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ class ResponseOutputText(BaseModel):
annotations: List[Annotation]
"""The annotations of the text output."""

text: str
text: Optional[str]:
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P0 Badge Remove stray colon from text type annotation

The field declaration text: Optional[str]: is invalid Python syntax, so importing this module raises a SyntaxError before runtime logic can execute. Any code path that imports openai.types.responses.response_output_text (including from openai.types.responses.response import Response) will fail immediately, making the responses types unusable.

Useful? React with 👍 / 👎.

"""The text output from the model."""

type: Literal["output_text"]
Expand Down
71 changes: 71 additions & 0 deletions tests/lib/responses/test_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from openai import OpenAI, AsyncOpenAI
from openai._utils import assert_signatures_in_sync
from openai.types.responses.response import Response

from ...conftest import base_url
from ..snapshots import make_snapshot_request
Expand Down Expand Up @@ -41,6 +42,76 @@ def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None:
)


def test_output_text_with_null_text_field() -> None:
"""Regression test for https://github.com/openai/openai-python/issues/3063.

The API can return output_text items with text: null. model_validate should
not raise, and output_text should skip null entries rather than failing.
"""
data = {
"id": "resp_null_output_text",
"object": "response",
"created_at": 0,
"status": "completed",
"background": False,
"error": None,
"incomplete_details": None,
"instructions": None,
"max_output_tokens": None,
"max_tool_calls": None,
"model": "gpt-4o-mini",
"output": [
{
"id": "msg_null_output_text",
"type": "message",
"status": "completed",
"role": "assistant",
"content": [
{
"type": "output_text",
"annotations": [],
"logprobs": [],
"text": None,
},
{
"type": "output_text",
"annotations": [],
"logprobs": [],
"text": '{"message":"hello"}',
},
],
}
],
"parallel_tool_calls": True,
"previous_response_id": None,
"prompt_cache_key": None,
"reasoning": {"effort": None, "summary": None},
"safety_identifier": None,
"service_tier": "default",
"store": True,
"temperature": 1.0,
"text": {"format": {"type": "text"}, "verbosity": "medium"},
"tool_choice": "auto",
"tools": [],
"top_logprobs": 0,
"top_p": 1.0,
"truncation": "disabled",
"usage": {
"input_tokens": 1,
"input_tokens_details": {"cached_tokens": 0},
"output_tokens": 1,
"output_tokens_details": {"reasoning_tokens": 0},
"total_tokens": 2,
},
"user": None,
"metadata": {},
}

response = Response.model_validate(data)

assert response.output_text == '{"message":"hello"}'


@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
Expand Down