Add Anthropic Messages API support for OpenAI platform groups, enabling clients using Claude-style /v1/messages format to access OpenAI accounts through automatic protocol conversion. - Add apicompat package with type definitions and bidirectional converters (Anthropic ↔ Chat, Chat ↔ Responses, Anthropic ↔ Responses) - Implement /v1/messages endpoint for OpenAI gateway with streaming support - Add model mapping UI for OpenAI OAuth accounts (whitelist + mapping modes) - Support prompt caching fields and codex OAuth transforms - Fix tool call ID conversion for Responses API (fc_ prefix) - Ensure function_call_output has non-empty output field Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
535 lines
17 KiB
Go
535 lines
17 KiB
Go
package apicompat
|
|
|
|
import (
|
|
"encoding/json"
|
|
"testing"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
// ---------------------------------------------------------------------------
|
|
// AnthropicToResponses tests
|
|
// ---------------------------------------------------------------------------
|
|
|
|
func TestAnthropicToResponses_BasicText(t *testing.T) {
|
|
req := &AnthropicRequest{
|
|
Model: "gpt-5.2",
|
|
MaxTokens: 1024,
|
|
Stream: true,
|
|
Messages: []AnthropicMessage{
|
|
{Role: "user", Content: json.RawMessage(`"Hello"`)},
|
|
},
|
|
}
|
|
|
|
resp, err := AnthropicToResponses(req)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, "gpt-5.2", resp.Model)
|
|
assert.True(t, resp.Stream)
|
|
assert.Equal(t, 1024, *resp.MaxOutputTokens)
|
|
assert.False(t, *resp.Store)
|
|
|
|
var items []ResponsesInputItem
|
|
require.NoError(t, json.Unmarshal(resp.Input, &items))
|
|
require.Len(t, items, 1)
|
|
assert.Equal(t, "user", items[0].Role)
|
|
}
|
|
|
|
func TestAnthropicToResponses_SystemPrompt(t *testing.T) {
|
|
t.Run("string", func(t *testing.T) {
|
|
req := &AnthropicRequest{
|
|
Model: "gpt-5.2",
|
|
MaxTokens: 100,
|
|
System: json.RawMessage(`"You are helpful."`),
|
|
Messages: []AnthropicMessage{{Role: "user", Content: json.RawMessage(`"Hi"`)}},
|
|
}
|
|
resp, err := AnthropicToResponses(req)
|
|
require.NoError(t, err)
|
|
|
|
var items []ResponsesInputItem
|
|
require.NoError(t, json.Unmarshal(resp.Input, &items))
|
|
require.Len(t, items, 2)
|
|
assert.Equal(t, "system", items[0].Role)
|
|
})
|
|
|
|
t.Run("array", func(t *testing.T) {
|
|
req := &AnthropicRequest{
|
|
Model: "gpt-5.2",
|
|
MaxTokens: 100,
|
|
System: json.RawMessage(`[{"type":"text","text":"Part 1"},{"type":"text","text":"Part 2"}]`),
|
|
Messages: []AnthropicMessage{{Role: "user", Content: json.RawMessage(`"Hi"`)}},
|
|
}
|
|
resp, err := AnthropicToResponses(req)
|
|
require.NoError(t, err)
|
|
|
|
var items []ResponsesInputItem
|
|
require.NoError(t, json.Unmarshal(resp.Input, &items))
|
|
require.Len(t, items, 2)
|
|
assert.Equal(t, "system", items[0].Role)
|
|
// System text should be joined with double newline.
|
|
var text string
|
|
require.NoError(t, json.Unmarshal(items[0].Content, &text))
|
|
assert.Equal(t, "Part 1\n\nPart 2", text)
|
|
})
|
|
}
|
|
|
|
func TestAnthropicToResponses_ToolUse(t *testing.T) {
|
|
req := &AnthropicRequest{
|
|
Model: "gpt-5.2",
|
|
MaxTokens: 1024,
|
|
Messages: []AnthropicMessage{
|
|
{Role: "user", Content: json.RawMessage(`"What is the weather?"`)},
|
|
{Role: "assistant", Content: json.RawMessage(`[{"type":"text","text":"Let me check."},{"type":"tool_use","id":"call_1","name":"get_weather","input":{"city":"NYC"}}]`)},
|
|
{Role: "user", Content: json.RawMessage(`[{"type":"tool_result","tool_use_id":"call_1","content":"Sunny, 72°F"}]`)},
|
|
},
|
|
Tools: []AnthropicTool{
|
|
{Name: "get_weather", Description: "Get weather", InputSchema: json.RawMessage(`{"type":"object","properties":{"city":{"type":"string"}}}`)},
|
|
},
|
|
}
|
|
|
|
resp, err := AnthropicToResponses(req)
|
|
require.NoError(t, err)
|
|
|
|
// Check tools
|
|
require.Len(t, resp.Tools, 1)
|
|
assert.Equal(t, "function", resp.Tools[0].Type)
|
|
assert.Equal(t, "get_weather", resp.Tools[0].Name)
|
|
|
|
// Check input items
|
|
var items []ResponsesInputItem
|
|
require.NoError(t, json.Unmarshal(resp.Input, &items))
|
|
// user + assistant + function_call + function_call_output = 4
|
|
require.Len(t, items, 4)
|
|
|
|
assert.Equal(t, "user", items[0].Role)
|
|
assert.Equal(t, "assistant", items[1].Role)
|
|
assert.Equal(t, "function_call", items[2].Type)
|
|
assert.Equal(t, "fc_call_1", items[2].CallID)
|
|
assert.Equal(t, "function_call_output", items[3].Type)
|
|
assert.Equal(t, "fc_call_1", items[3].CallID)
|
|
assert.Equal(t, "Sunny, 72°F", items[3].Output)
|
|
}
|
|
|
|
func TestAnthropicToResponses_ThinkingIgnored(t *testing.T) {
|
|
req := &AnthropicRequest{
|
|
Model: "gpt-5.2",
|
|
MaxTokens: 1024,
|
|
Messages: []AnthropicMessage{
|
|
{Role: "user", Content: json.RawMessage(`"Hello"`)},
|
|
{Role: "assistant", Content: json.RawMessage(`[{"type":"thinking","thinking":"deep thought"},{"type":"text","text":"Hi!"}]`)},
|
|
{Role: "user", Content: json.RawMessage(`"More"`)},
|
|
},
|
|
}
|
|
|
|
resp, err := AnthropicToResponses(req)
|
|
require.NoError(t, err)
|
|
|
|
var items []ResponsesInputItem
|
|
require.NoError(t, json.Unmarshal(resp.Input, &items))
|
|
// user + assistant(text only, thinking ignored) + user = 3
|
|
require.Len(t, items, 3)
|
|
assert.Equal(t, "assistant", items[1].Role)
|
|
// Assistant content should only have text, not thinking.
|
|
var parts []ResponsesContentPart
|
|
require.NoError(t, json.Unmarshal(items[1].Content, &parts))
|
|
require.Len(t, parts, 1)
|
|
assert.Equal(t, "output_text", parts[0].Type)
|
|
assert.Equal(t, "Hi!", parts[0].Text)
|
|
}
|
|
|
|
func TestAnthropicToResponses_MaxTokensFloor(t *testing.T) {
|
|
req := &AnthropicRequest{
|
|
Model: "gpt-5.2",
|
|
MaxTokens: 10, // below minMaxOutputTokens (128)
|
|
Messages: []AnthropicMessage{{Role: "user", Content: json.RawMessage(`"Hi"`)}},
|
|
}
|
|
|
|
resp, err := AnthropicToResponses(req)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, 128, *resp.MaxOutputTokens)
|
|
}
|
|
|
|
// ---------------------------------------------------------------------------
|
|
// ResponsesToAnthropic (non-streaming) tests
|
|
// ---------------------------------------------------------------------------
|
|
|
|
func TestResponsesToAnthropic_TextOnly(t *testing.T) {
|
|
resp := &ResponsesResponse{
|
|
ID: "resp_123",
|
|
Model: "gpt-5.2",
|
|
Status: "completed",
|
|
Output: []ResponsesOutput{
|
|
{
|
|
Type: "message",
|
|
Content: []ResponsesContentPart{
|
|
{Type: "output_text", Text: "Hello there!"},
|
|
},
|
|
},
|
|
},
|
|
Usage: &ResponsesUsage{InputTokens: 10, OutputTokens: 5, TotalTokens: 15},
|
|
}
|
|
|
|
anth := ResponsesToAnthropic(resp, "claude-opus-4-6")
|
|
assert.Equal(t, "resp_123", anth.ID)
|
|
assert.Equal(t, "claude-opus-4-6", anth.Model)
|
|
assert.Equal(t, "end_turn", anth.StopReason)
|
|
require.Len(t, anth.Content, 1)
|
|
assert.Equal(t, "text", anth.Content[0].Type)
|
|
assert.Equal(t, "Hello there!", anth.Content[0].Text)
|
|
assert.Equal(t, 10, anth.Usage.InputTokens)
|
|
assert.Equal(t, 5, anth.Usage.OutputTokens)
|
|
}
|
|
|
|
func TestResponsesToAnthropic_ToolUse(t *testing.T) {
|
|
resp := &ResponsesResponse{
|
|
ID: "resp_456",
|
|
Model: "gpt-5.2",
|
|
Status: "completed",
|
|
Output: []ResponsesOutput{
|
|
{
|
|
Type: "message",
|
|
Content: []ResponsesContentPart{
|
|
{Type: "output_text", Text: "Let me check."},
|
|
},
|
|
},
|
|
{
|
|
Type: "function_call",
|
|
CallID: "call_1",
|
|
Name: "get_weather",
|
|
Arguments: `{"city":"NYC"}`,
|
|
},
|
|
},
|
|
}
|
|
|
|
anth := ResponsesToAnthropic(resp, "claude-opus-4-6")
|
|
assert.Equal(t, "tool_use", anth.StopReason)
|
|
require.Len(t, anth.Content, 2)
|
|
assert.Equal(t, "text", anth.Content[0].Type)
|
|
assert.Equal(t, "tool_use", anth.Content[1].Type)
|
|
assert.Equal(t, "call_1", anth.Content[1].ID)
|
|
assert.Equal(t, "get_weather", anth.Content[1].Name)
|
|
}
|
|
|
|
func TestResponsesToAnthropic_Reasoning(t *testing.T) {
|
|
resp := &ResponsesResponse{
|
|
ID: "resp_789",
|
|
Model: "gpt-5.2",
|
|
Status: "completed",
|
|
Output: []ResponsesOutput{
|
|
{
|
|
Type: "reasoning",
|
|
Summary: []ResponsesSummary{
|
|
{Type: "summary_text", Text: "Thinking about the answer..."},
|
|
},
|
|
},
|
|
{
|
|
Type: "message",
|
|
Content: []ResponsesContentPart{
|
|
{Type: "output_text", Text: "42"},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
anth := ResponsesToAnthropic(resp, "claude-opus-4-6")
|
|
require.Len(t, anth.Content, 2)
|
|
assert.Equal(t, "thinking", anth.Content[0].Type)
|
|
assert.Equal(t, "Thinking about the answer...", anth.Content[0].Thinking)
|
|
assert.Equal(t, "text", anth.Content[1].Type)
|
|
assert.Equal(t, "42", anth.Content[1].Text)
|
|
}
|
|
|
|
func TestResponsesToAnthropic_Incomplete(t *testing.T) {
|
|
resp := &ResponsesResponse{
|
|
ID: "resp_inc",
|
|
Model: "gpt-5.2",
|
|
Status: "incomplete",
|
|
IncompleteDetails: &ResponsesIncompleteDetails{
|
|
Reason: "max_output_tokens",
|
|
},
|
|
Output: []ResponsesOutput{
|
|
{
|
|
Type: "message",
|
|
Content: []ResponsesContentPart{{Type: "output_text", Text: "Partial..."}},
|
|
},
|
|
},
|
|
}
|
|
|
|
anth := ResponsesToAnthropic(resp, "claude-opus-4-6")
|
|
assert.Equal(t, "max_tokens", anth.StopReason)
|
|
}
|
|
|
|
func TestResponsesToAnthropic_EmptyOutput(t *testing.T) {
|
|
resp := &ResponsesResponse{
|
|
ID: "resp_empty",
|
|
Model: "gpt-5.2",
|
|
Status: "completed",
|
|
Output: []ResponsesOutput{},
|
|
}
|
|
|
|
anth := ResponsesToAnthropic(resp, "claude-opus-4-6")
|
|
require.Len(t, anth.Content, 1)
|
|
assert.Equal(t, "text", anth.Content[0].Type)
|
|
assert.Equal(t, "", anth.Content[0].Text)
|
|
}
|
|
|
|
// ---------------------------------------------------------------------------
|
|
// Streaming: ResponsesEventToAnthropicEvents tests
|
|
// ---------------------------------------------------------------------------
|
|
|
|
func TestStreamingTextOnly(t *testing.T) {
|
|
state := NewResponsesEventToAnthropicState()
|
|
|
|
// 1. response.created
|
|
events := ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.created",
|
|
Response: &ResponsesResponse{
|
|
ID: "resp_1",
|
|
Model: "gpt-5.2",
|
|
},
|
|
}, state)
|
|
require.Len(t, events, 1)
|
|
assert.Equal(t, "message_start", events[0].Type)
|
|
|
|
// 2. output_item.added (message)
|
|
events = ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.output_item.added",
|
|
OutputIndex: 0,
|
|
Item: &ResponsesOutput{Type: "message"},
|
|
}, state)
|
|
assert.Len(t, events, 0) // message item doesn't emit events
|
|
|
|
// 3. text delta
|
|
events = ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.output_text.delta",
|
|
Delta: "Hello",
|
|
}, state)
|
|
require.Len(t, events, 2) // content_block_start + content_block_delta
|
|
assert.Equal(t, "content_block_start", events[0].Type)
|
|
assert.Equal(t, "text", events[0].ContentBlock.Type)
|
|
assert.Equal(t, "content_block_delta", events[1].Type)
|
|
assert.Equal(t, "text_delta", events[1].Delta.Type)
|
|
assert.Equal(t, "Hello", events[1].Delta.Text)
|
|
|
|
// 4. more text
|
|
events = ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.output_text.delta",
|
|
Delta: " world",
|
|
}, state)
|
|
require.Len(t, events, 1) // only delta, no new block start
|
|
assert.Equal(t, "content_block_delta", events[0].Type)
|
|
|
|
// 5. text done
|
|
events = ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.output_text.done",
|
|
}, state)
|
|
require.Len(t, events, 1)
|
|
assert.Equal(t, "content_block_stop", events[0].Type)
|
|
|
|
// 6. completed
|
|
events = ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.completed",
|
|
Response: &ResponsesResponse{
|
|
Status: "completed",
|
|
Usage: &ResponsesUsage{InputTokens: 10, OutputTokens: 5},
|
|
},
|
|
}, state)
|
|
require.Len(t, events, 2) // message_delta + message_stop
|
|
assert.Equal(t, "message_delta", events[0].Type)
|
|
assert.Equal(t, "end_turn", events[0].Delta.StopReason)
|
|
assert.Equal(t, 10, events[0].Usage.InputTokens)
|
|
assert.Equal(t, 5, events[0].Usage.OutputTokens)
|
|
assert.Equal(t, "message_stop", events[1].Type)
|
|
}
|
|
|
|
func TestStreamingToolCall(t *testing.T) {
|
|
state := NewResponsesEventToAnthropicState()
|
|
|
|
// 1. response.created
|
|
ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.created",
|
|
Response: &ResponsesResponse{ID: "resp_2", Model: "gpt-5.2"},
|
|
}, state)
|
|
|
|
// 2. function_call added
|
|
events := ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.output_item.added",
|
|
OutputIndex: 0,
|
|
Item: &ResponsesOutput{Type: "function_call", CallID: "call_1", Name: "get_weather"},
|
|
}, state)
|
|
require.Len(t, events, 1)
|
|
assert.Equal(t, "content_block_start", events[0].Type)
|
|
assert.Equal(t, "tool_use", events[0].ContentBlock.Type)
|
|
assert.Equal(t, "call_1", events[0].ContentBlock.ID)
|
|
|
|
// 3. arguments delta
|
|
events = ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.function_call_arguments.delta",
|
|
OutputIndex: 0,
|
|
Delta: `{"city":`,
|
|
}, state)
|
|
require.Len(t, events, 1)
|
|
assert.Equal(t, "content_block_delta", events[0].Type)
|
|
assert.Equal(t, "input_json_delta", events[0].Delta.Type)
|
|
assert.Equal(t, `{"city":`, events[0].Delta.PartialJSON)
|
|
|
|
// 4. arguments done
|
|
events = ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.function_call_arguments.done",
|
|
}, state)
|
|
require.Len(t, events, 1)
|
|
assert.Equal(t, "content_block_stop", events[0].Type)
|
|
|
|
// 5. completed with tool_calls
|
|
events = ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.completed",
|
|
Response: &ResponsesResponse{
|
|
Status: "completed",
|
|
Usage: &ResponsesUsage{InputTokens: 20, OutputTokens: 10},
|
|
},
|
|
}, state)
|
|
require.Len(t, events, 2)
|
|
assert.Equal(t, "tool_use", events[0].Delta.StopReason)
|
|
}
|
|
|
|
func TestStreamingReasoning(t *testing.T) {
|
|
state := NewResponsesEventToAnthropicState()
|
|
|
|
ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.created",
|
|
Response: &ResponsesResponse{ID: "resp_3", Model: "gpt-5.2"},
|
|
}, state)
|
|
|
|
// reasoning item added
|
|
events := ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.output_item.added",
|
|
OutputIndex: 0,
|
|
Item: &ResponsesOutput{Type: "reasoning"},
|
|
}, state)
|
|
require.Len(t, events, 1)
|
|
assert.Equal(t, "content_block_start", events[0].Type)
|
|
assert.Equal(t, "thinking", events[0].ContentBlock.Type)
|
|
|
|
// reasoning text delta
|
|
events = ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.reasoning_summary_text.delta",
|
|
OutputIndex: 0,
|
|
Delta: "Let me think...",
|
|
}, state)
|
|
require.Len(t, events, 1)
|
|
assert.Equal(t, "content_block_delta", events[0].Type)
|
|
assert.Equal(t, "thinking_delta", events[0].Delta.Type)
|
|
assert.Equal(t, "Let me think...", events[0].Delta.Thinking)
|
|
|
|
// reasoning done
|
|
events = ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.reasoning_summary_text.done",
|
|
}, state)
|
|
require.Len(t, events, 1)
|
|
assert.Equal(t, "content_block_stop", events[0].Type)
|
|
}
|
|
|
|
func TestStreamingIncomplete(t *testing.T) {
|
|
state := NewResponsesEventToAnthropicState()
|
|
|
|
ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.created",
|
|
Response: &ResponsesResponse{ID: "resp_4", Model: "gpt-5.2"},
|
|
}, state)
|
|
|
|
ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.output_text.delta",
|
|
Delta: "Partial output...",
|
|
}, state)
|
|
|
|
events := ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.incomplete",
|
|
Response: &ResponsesResponse{
|
|
Status: "incomplete",
|
|
IncompleteDetails: &ResponsesIncompleteDetails{Reason: "max_output_tokens"},
|
|
Usage: &ResponsesUsage{InputTokens: 100, OutputTokens: 4096},
|
|
},
|
|
}, state)
|
|
|
|
// Should close the text block + message_delta + message_stop
|
|
require.Len(t, events, 3)
|
|
assert.Equal(t, "content_block_stop", events[0].Type)
|
|
assert.Equal(t, "message_delta", events[1].Type)
|
|
assert.Equal(t, "max_tokens", events[1].Delta.StopReason)
|
|
assert.Equal(t, "message_stop", events[2].Type)
|
|
}
|
|
|
|
func TestFinalizeStream_NeverStarted(t *testing.T) {
|
|
state := NewResponsesEventToAnthropicState()
|
|
events := FinalizeResponsesAnthropicStream(state)
|
|
assert.Nil(t, events)
|
|
}
|
|
|
|
func TestFinalizeStream_AlreadyCompleted(t *testing.T) {
|
|
state := NewResponsesEventToAnthropicState()
|
|
state.MessageStartSent = true
|
|
state.MessageStopSent = true
|
|
events := FinalizeResponsesAnthropicStream(state)
|
|
assert.Nil(t, events)
|
|
}
|
|
|
|
func TestFinalizeStream_AbnormalTermination(t *testing.T) {
|
|
state := NewResponsesEventToAnthropicState()
|
|
|
|
// Simulate a stream that started but never completed
|
|
ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.created",
|
|
Response: &ResponsesResponse{ID: "resp_5", Model: "gpt-5.2"},
|
|
}, state)
|
|
|
|
ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.output_text.delta",
|
|
Delta: "Interrupted...",
|
|
}, state)
|
|
|
|
// Stream ends without response.completed
|
|
events := FinalizeResponsesAnthropicStream(state)
|
|
require.Len(t, events, 3) // content_block_stop + message_delta + message_stop
|
|
assert.Equal(t, "content_block_stop", events[0].Type)
|
|
assert.Equal(t, "message_delta", events[1].Type)
|
|
assert.Equal(t, "end_turn", events[1].Delta.StopReason)
|
|
assert.Equal(t, "message_stop", events[2].Type)
|
|
}
|
|
|
|
func TestStreamingEmptyResponse(t *testing.T) {
|
|
state := NewResponsesEventToAnthropicState()
|
|
|
|
ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.created",
|
|
Response: &ResponsesResponse{ID: "resp_6", Model: "gpt-5.2"},
|
|
}, state)
|
|
|
|
events := ResponsesEventToAnthropicEvents(&ResponsesStreamEvent{
|
|
Type: "response.completed",
|
|
Response: &ResponsesResponse{
|
|
Status: "completed",
|
|
Usage: &ResponsesUsage{InputTokens: 5, OutputTokens: 0},
|
|
},
|
|
}, state)
|
|
|
|
require.Len(t, events, 2) // message_delta + message_stop
|
|
assert.Equal(t, "message_delta", events[0].Type)
|
|
assert.Equal(t, "end_turn", events[0].Delta.StopReason)
|
|
}
|
|
|
|
func TestResponsesAnthropicEventToSSE(t *testing.T) {
|
|
evt := AnthropicStreamEvent{
|
|
Type: "message_start",
|
|
Message: &AnthropicResponse{
|
|
ID: "resp_1",
|
|
Type: "message",
|
|
Role: "assistant",
|
|
},
|
|
}
|
|
sse, err := ResponsesAnthropicEventToSSE(evt)
|
|
require.NoError(t, err)
|
|
assert.Contains(t, sse, "event: message_start\n")
|
|
assert.Contains(t, sse, "data: ")
|
|
assert.Contains(t, sse, `"resp_1"`)
|
|
}
|