diff --git a/src/openai/lib/_parsing/_responses.py b/src/openai/lib/_parsing/_responses.py index 8853a0749f..259b636802 100644 --- a/src/openai/lib/_parsing/_responses.py +++ b/src/openai/lib/_parsing/_responses.py @@ -71,7 +71,7 @@ def parse_response( type_=ParsedResponseOutputText[TextFormatT], value={ **item.to_dict(), - "parsed": parse_text(item.text, text_format=text_format), + "parsed": parse_text(item.text, text_format=text_format) if item.text is not None else None, }, ) ) diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index ada0783bce..e32a017e85 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -315,7 +315,7 @@ def output_text(self) -> str: for output in self.output: if output.type == "message": for content in output.content: - if content.type == "output_text": + if content.type == "output_text" and content.text is not None: texts.append(content.text) return "".join(texts) diff --git a/src/openai/types/responses/response_output_text.py b/src/openai/types/responses/response_output_text.py index 2386fcb3c0..4c10877109 100644 --- a/src/openai/types/responses/response_output_text.py +++ b/src/openai/types/responses/response_output_text.py @@ -122,7 +122,7 @@ class ResponseOutputText(BaseModel): annotations: List[Annotation] """The annotations of the text output.""" - text: str + text: Optional[str] = None """The text output from the model.""" type: Literal["output_text"] diff --git a/tests/lib/responses/test_responses.py b/tests/lib/responses/test_responses.py index 8e5f16df95..b692f720df 100644 --- a/tests/lib/responses/test_responses.py +++ b/tests/lib/responses/test_responses.py @@ -3,11 +3,14 @@ from typing_extensions import TypeVar import pytest +from pydantic import BaseModel from respx import MockRouter from inline_snapshot import snapshot from openai import OpenAI, AsyncOpenAI +from openai.lib._parsing._responses import parse_response from openai._utils import assert_signatures_in_sync +from openai.types.responses import Response from ...conftest import base_url from ..snapshots import make_snapshot_request @@ -41,6 +44,125 @@ def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None: ) +def test_output_text_ignores_null_items() -> None: + response = Response.model_validate( + { + "id": "resp_null_output_text", + "object": "response", + "created_at": 0, + "status": "completed", + "background": False, + "error": None, + "incomplete_details": None, + "instructions": None, + "max_output_tokens": None, + "max_tool_calls": None, + "model": "gpt-4o-mini", + "output": [ + { + "id": "msg_null_output_text", + "type": "message", + "status": "completed", + "role": "assistant", + "content": [ + {"type": "output_text", "annotations": [], "logprobs": [], "text": None}, + {"type": "output_text", "annotations": [], "logprobs": [], "text": '{"message":"hello"}'}, + ], + } + ], + "parallel_tool_calls": True, + "previous_response_id": None, + "prompt_cache_key": None, + "reasoning": {"effort": None, "summary": None}, + "safety_identifier": None, + "service_tier": "default", + "store": True, + "temperature": 1.0, + "text": {"format": {"type": "text"}, "verbosity": "medium"}, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 1, + "input_tokens_details": {"cached_tokens": 0}, + "output_tokens": 1, + "output_tokens_details": {"reasoning_tokens": 0}, + "total_tokens": 2, + }, + "user": None, + "metadata": {}, + } + ) + + assert response.output_text == '{"message":"hello"}' + + +def test_parse_response_skips_null_output_text() -> None: + class ParsedMessage(BaseModel): + message: str + + response = Response.model_validate( + { + "id": "resp_null_output_text", + "object": "response", + "created_at": 0, + "status": "completed", + "background": False, + "error": None, + "incomplete_details": None, + "instructions": None, + "max_output_tokens": None, + "max_tool_calls": None, + "model": "gpt-4o-mini", + "output": [ + { + "id": "msg_null_output_text", + "type": "message", + "status": "completed", + "role": "assistant", + "content": [ + {"type": "output_text", "annotations": [], "logprobs": [], "text": None}, + {"type": "output_text", "annotations": [], "logprobs": [], "text": '{"message":"hello"}'}, + ], + } + ], + "parallel_tool_calls": True, + "previous_response_id": None, + "prompt_cache_key": None, + "reasoning": {"effort": None, "summary": None}, + "safety_identifier": None, + "service_tier": "default", + "store": True, + "temperature": 1.0, + "text": {"format": {"type": "text"}, "verbosity": "medium"}, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 1, + "input_tokens_details": {"cached_tokens": 0}, + "output_tokens": 1, + "output_tokens_details": {"reasoning_tokens": 0}, + "total_tokens": 2, + }, + "user": None, + "metadata": {}, + } + ) + + parsed = parse_response(text_format=ParsedMessage, input_tools=[], response=response) + message_output = parsed.output[0] + assert message_output.type == "message" + assert message_output.content[0].type == "output_text" + assert message_output.content[0].parsed is None + assert message_output.content[1].type == "output_text" + assert message_output.content[1].parsed == ParsedMessage(message="hello") + + @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None: checking_client: OpenAI | AsyncOpenAI = client if sync else async_client