diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index ada0783bce..227cc39d6b 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -315,7 +315,7 @@ def output_text(self) -> str: for output in self.output: if output.type == "message": for content in output.content: - if content.type == "output_text": - texts.append(content.text) + if content.text != None: + texts.append(content.text) return "".join(texts) diff --git a/src/openai/types/responses/response_output_text.py b/src/openai/types/responses/response_output_text.py index 2386fcb3c0..6e78685fdc 100644 --- a/src/openai/types/responses/response_output_text.py +++ b/src/openai/types/responses/response_output_text.py @@ -122,7 +122,7 @@ class ResponseOutputText(BaseModel): annotations: List[Annotation] """The annotations of the text output.""" - text: str + text: Optional[str]: """The text output from the model.""" type: Literal["output_text"] diff --git a/tests/lib/responses/test_responses.py b/tests/lib/responses/test_responses.py index 8e5f16df95..36fbafda16 100644 --- a/tests/lib/responses/test_responses.py +++ b/tests/lib/responses/test_responses.py @@ -8,6 +8,7 @@ from openai import OpenAI, AsyncOpenAI from openai._utils import assert_signatures_in_sync +from openai.types.responses.response import Response from ...conftest import base_url from ..snapshots import make_snapshot_request @@ -41,6 +42,76 @@ def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None: ) +def test_output_text_with_null_text_field() -> None: + """Regression test for https://github.com/openai/openai-python/issues/3063. + + The API can return output_text items with text: null. model_validate should + not raise, and output_text should skip null entries rather than failing. + """ + data = { + "id": "resp_null_output_text", + "object": "response", + "created_at": 0, + "status": "completed", + "background": False, + "error": None, + "incomplete_details": None, + "instructions": None, + "max_output_tokens": None, + "max_tool_calls": None, + "model": "gpt-4o-mini", + "output": [ + { + "id": "msg_null_output_text", + "type": "message", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": None, + }, + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": '{"message":"hello"}', + }, + ], + } + ], + "parallel_tool_calls": True, + "previous_response_id": None, + "prompt_cache_key": None, + "reasoning": {"effort": None, "summary": None}, + "safety_identifier": None, + "service_tier": "default", + "store": True, + "temperature": 1.0, + "text": {"format": {"type": "text"}, "verbosity": "medium"}, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 1, + "input_tokens_details": {"cached_tokens": 0}, + "output_tokens": 1, + "output_tokens_details": {"reasoning_tokens": 0}, + "total_tokens": 2, + }, + "user": None, + "metadata": {}, + } + + response = Response.model_validate(data) + + assert response.output_text == '{"message":"hello"}' + + @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None: checking_client: OpenAI | AsyncOpenAI = client if sync else async_client