fix: handle OpenAI Responses API stream errors instead of crashing (#1669)

The streaming code assumed every stream produced a `response.completed`
event and dereferenced its data unconditionally, causing
`undefined method 'data' for nil` whenever OpenAI emitted
`response.failed`, `response.incomplete`, or a top-level `error` event
(e.g. expired `previous_response_id`, context-window overflow,
transient upstream failures). Surface a descriptive `Provider::Error`
instead.

- Extend `ChatStreamParser` to recognise `response.failed`,
  `response.incomplete`, and `error` events and emit an `error` chunk
  with a `StreamErrorData` payload (event, message, code, details).
- In `Provider::Openai#native_chat_response`, detect the missing
  `response` chunk, build a user-facing error message from the
  collected error chunk, and raise `Provider::Error`.
- Add unit tests for the parser (8 cases) and integration tests for
  the error path in the chat response flow.

Co-authored-by: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
GermanDZ
2026-05-05 01:22:05 +02:00
committed by GitHub
parent d0883f9018
commit 9cc52b9d35
4 changed files with 210 additions and 0 deletions

View File

@@ -445,6 +445,54 @@ class Provider::OpenaiTest < ActiveSupport::TestCase
end
end
test "streaming surfaces a useful error when the stream ends with response.failed and no completion" do
fake_responses = mock
fake_client = mock
fake_client.stubs(:responses).returns(fake_responses)
@subject.stubs(:client).returns(fake_client)
fake_responses.expects(:create).with do |*_args, **kwargs|
stream = kwargs.dig(:parameters, :stream)
stream.call({
"type" => "response.failed",
"response" => {
"error" => { "message" => "Previous response not found", "code" => "previous_response_not_found" }
}
})
true
end.returns(nil)
response = @subject.chat_response(
"hi",
model: @subject_model,
streamer: proc { |_| }
)
assert_not response.success?
assert_kind_of Provider::Openai::Error, response.error
assert_match(/Previous response not found/, response.error.message)
assert_match(/previous_response_not_found/, response.error.message)
end
test "streaming surfaces a useful error when the stream ends with no response and no error event" do
fake_responses = mock
fake_client = mock
fake_client.stubs(:responses).returns(fake_responses)
@subject.stubs(:client).returns(fake_client)
fake_responses.expects(:create).returns(nil)
response = @subject.chat_response(
"hi",
model: @subject_model,
streamer: proc { |_| }
)
assert_not response.success?
assert_kind_of Provider::Openai::Error, response.error
assert_match(/stream ended without a completion event/i, response.error.message)
end
test "build_input no longer accepts inline messages history" do
config = Provider::Openai::ChatConfig.new(functions: [], function_results: [])
# Positive control: prompt works