mirror of
https://github.com/openclaw/openclaw.git
synced 2026-03-12 15:30:39 +00:00
* feat(macos): add chat model selector and thinking persistence UX * Chat UI: carry session model providers * Docs: add macOS model selector changelog * macOS: persist extended thinking levels * Chat UI: keep model picker state in sync * Chat UI tests: cover model selection races --------- Co-authored-by: Ubuntu <ubuntu@vps-90352893.vps.ovh.ca> Co-authored-by: Vincent Koc <vincentkoc@ieee.org>
1186 lines
43 KiB
Swift
1186 lines
43 KiB
Swift
import OpenClawKit
|
|
import Foundation
|
|
import Testing
|
|
@testable import OpenClawChatUI
|
|
|
|
private func chatTextMessage(role: String, text: String, timestamp: Double) -> AnyCodable {
|
|
AnyCodable([
|
|
"role": role,
|
|
"content": [["type": "text", "text": text]],
|
|
"timestamp": timestamp,
|
|
])
|
|
}
|
|
|
|
private func historyPayload(
|
|
sessionKey: String = "main",
|
|
sessionId: String? = "sess-main",
|
|
messages: [AnyCodable] = []) -> OpenClawChatHistoryPayload
|
|
{
|
|
OpenClawChatHistoryPayload(
|
|
sessionKey: sessionKey,
|
|
sessionId: sessionId,
|
|
messages: messages,
|
|
thinkingLevel: "off")
|
|
}
|
|
|
|
private func sessionEntry(key: String, updatedAt: Double) -> OpenClawChatSessionEntry {
|
|
OpenClawChatSessionEntry(
|
|
key: key,
|
|
kind: nil,
|
|
displayName: nil,
|
|
surface: nil,
|
|
subject: nil,
|
|
room: nil,
|
|
space: nil,
|
|
updatedAt: updatedAt,
|
|
sessionId: nil,
|
|
systemSent: nil,
|
|
abortedLastRun: nil,
|
|
thinkingLevel: nil,
|
|
verboseLevel: nil,
|
|
inputTokens: nil,
|
|
outputTokens: nil,
|
|
totalTokens: nil,
|
|
modelProvider: nil,
|
|
model: nil,
|
|
contextTokens: nil)
|
|
}
|
|
|
|
private func sessionEntry(
|
|
key: String,
|
|
updatedAt: Double,
|
|
model: String?,
|
|
modelProvider: String? = nil) -> OpenClawChatSessionEntry
|
|
{
|
|
OpenClawChatSessionEntry(
|
|
key: key,
|
|
kind: nil,
|
|
displayName: nil,
|
|
surface: nil,
|
|
subject: nil,
|
|
room: nil,
|
|
space: nil,
|
|
updatedAt: updatedAt,
|
|
sessionId: nil,
|
|
systemSent: nil,
|
|
abortedLastRun: nil,
|
|
thinkingLevel: nil,
|
|
verboseLevel: nil,
|
|
inputTokens: nil,
|
|
outputTokens: nil,
|
|
totalTokens: nil,
|
|
modelProvider: modelProvider,
|
|
model: model,
|
|
contextTokens: nil)
|
|
}
|
|
|
|
private func modelChoice(id: String, name: String, provider: String = "anthropic") -> OpenClawChatModelChoice {
|
|
OpenClawChatModelChoice(modelID: id, name: name, provider: provider, contextWindow: nil)
|
|
}
|
|
|
|
private func makeViewModel(
|
|
sessionKey: String = "main",
|
|
historyResponses: [OpenClawChatHistoryPayload],
|
|
sessionsResponses: [OpenClawChatSessionsListResponse] = [],
|
|
modelResponses: [[OpenClawChatModelChoice]] = [],
|
|
setSessionModelHook: (@Sendable (String?) async throws -> Void)? = nil,
|
|
setSessionThinkingHook: (@Sendable (String) async throws -> Void)? = nil,
|
|
initialThinkingLevel: String? = nil,
|
|
onThinkingLevelChanged: (@MainActor @Sendable (String) -> Void)? = nil) async
|
|
-> (TestChatTransport, OpenClawChatViewModel)
|
|
{
|
|
let transport = TestChatTransport(
|
|
historyResponses: historyResponses,
|
|
sessionsResponses: sessionsResponses,
|
|
modelResponses: modelResponses,
|
|
setSessionModelHook: setSessionModelHook,
|
|
setSessionThinkingHook: setSessionThinkingHook)
|
|
let vm = await MainActor.run {
|
|
OpenClawChatViewModel(
|
|
sessionKey: sessionKey,
|
|
transport: transport,
|
|
initialThinkingLevel: initialThinkingLevel,
|
|
onThinkingLevelChanged: onThinkingLevelChanged)
|
|
}
|
|
return (transport, vm)
|
|
}
|
|
|
|
private func loadAndWaitBootstrap(
|
|
vm: OpenClawChatViewModel,
|
|
sessionId: String? = nil) async throws
|
|
{
|
|
await MainActor.run { vm.load() }
|
|
try await waitUntil("bootstrap") {
|
|
await MainActor.run {
|
|
vm.healthOK && (sessionId == nil || vm.sessionId == sessionId)
|
|
}
|
|
}
|
|
}
|
|
|
|
private func sendUserMessage(_ vm: OpenClawChatViewModel, text: String = "hi") async {
|
|
await MainActor.run {
|
|
vm.input = text
|
|
vm.send()
|
|
}
|
|
}
|
|
|
|
private func emitAssistantText(
|
|
transport: TestChatTransport,
|
|
runId: String,
|
|
text: String,
|
|
seq: Int = 1)
|
|
{
|
|
transport.emit(
|
|
.agent(
|
|
OpenClawAgentEventPayload(
|
|
runId: runId,
|
|
seq: seq,
|
|
stream: "assistant",
|
|
ts: Int(Date().timeIntervalSince1970 * 1000),
|
|
data: ["text": AnyCodable(text)])))
|
|
}
|
|
|
|
private func emitToolStart(
|
|
transport: TestChatTransport,
|
|
runId: String,
|
|
seq: Int = 2)
|
|
{
|
|
transport.emit(
|
|
.agent(
|
|
OpenClawAgentEventPayload(
|
|
runId: runId,
|
|
seq: seq,
|
|
stream: "tool",
|
|
ts: Int(Date().timeIntervalSince1970 * 1000),
|
|
data: [
|
|
"phase": AnyCodable("start"),
|
|
"name": AnyCodable("demo"),
|
|
"toolCallId": AnyCodable("t1"),
|
|
"args": AnyCodable(["x": 1]),
|
|
])))
|
|
}
|
|
|
|
private func emitExternalFinal(
|
|
transport: TestChatTransport,
|
|
runId: String = "other-run",
|
|
sessionKey: String = "main")
|
|
{
|
|
transport.emit(
|
|
.chat(
|
|
OpenClawChatEventPayload(
|
|
runId: runId,
|
|
sessionKey: sessionKey,
|
|
state: "final",
|
|
message: nil,
|
|
errorMessage: nil)))
|
|
}
|
|
|
|
@MainActor
|
|
private final class CallbackBox {
|
|
var values: [String] = []
|
|
}
|
|
|
|
private actor AsyncGate {
|
|
private var continuation: CheckedContinuation<Void, Never>?
|
|
|
|
func wait() async {
|
|
await withCheckedContinuation { continuation in
|
|
self.continuation = continuation
|
|
}
|
|
}
|
|
|
|
func open() {
|
|
self.continuation?.resume()
|
|
self.continuation = nil
|
|
}
|
|
}
|
|
|
|
private actor TestChatTransportState {
|
|
var historyCallCount: Int = 0
|
|
var sessionsCallCount: Int = 0
|
|
var modelsCallCount: Int = 0
|
|
var sentRunIds: [String] = []
|
|
var sentThinkingLevels: [String] = []
|
|
var abortedRunIds: [String] = []
|
|
var patchedModels: [String?] = []
|
|
var patchedThinkingLevels: [String] = []
|
|
}
|
|
|
|
private final class TestChatTransport: @unchecked Sendable, OpenClawChatTransport {
|
|
private let state = TestChatTransportState()
|
|
private let historyResponses: [OpenClawChatHistoryPayload]
|
|
private let sessionsResponses: [OpenClawChatSessionsListResponse]
|
|
private let modelResponses: [[OpenClawChatModelChoice]]
|
|
private let setSessionModelHook: (@Sendable (String?) async throws -> Void)?
|
|
private let setSessionThinkingHook: (@Sendable (String) async throws -> Void)?
|
|
|
|
private let stream: AsyncStream<OpenClawChatTransportEvent>
|
|
private let continuation: AsyncStream<OpenClawChatTransportEvent>.Continuation
|
|
|
|
init(
|
|
historyResponses: [OpenClawChatHistoryPayload],
|
|
sessionsResponses: [OpenClawChatSessionsListResponse] = [],
|
|
modelResponses: [[OpenClawChatModelChoice]] = [],
|
|
setSessionModelHook: (@Sendable (String?) async throws -> Void)? = nil,
|
|
setSessionThinkingHook: (@Sendable (String) async throws -> Void)? = nil)
|
|
{
|
|
self.historyResponses = historyResponses
|
|
self.sessionsResponses = sessionsResponses
|
|
self.modelResponses = modelResponses
|
|
self.setSessionModelHook = setSessionModelHook
|
|
self.setSessionThinkingHook = setSessionThinkingHook
|
|
var cont: AsyncStream<OpenClawChatTransportEvent>.Continuation!
|
|
self.stream = AsyncStream { c in
|
|
cont = c
|
|
}
|
|
self.continuation = cont
|
|
}
|
|
|
|
func events() -> AsyncStream<OpenClawChatTransportEvent> {
|
|
self.stream
|
|
}
|
|
|
|
func setActiveSessionKey(_: String) async throws {}
|
|
|
|
func requestHistory(sessionKey: String) async throws -> OpenClawChatHistoryPayload {
|
|
let idx = await self.state.historyCallCount
|
|
await self.state.setHistoryCallCount(idx + 1)
|
|
if idx < self.historyResponses.count {
|
|
return self.historyResponses[idx]
|
|
}
|
|
return self.historyResponses.last ?? OpenClawChatHistoryPayload(
|
|
sessionKey: sessionKey,
|
|
sessionId: nil,
|
|
messages: [],
|
|
thinkingLevel: "off")
|
|
}
|
|
|
|
func sendMessage(
|
|
sessionKey _: String,
|
|
message _: String,
|
|
thinking: String,
|
|
idempotencyKey: String,
|
|
attachments _: [OpenClawChatAttachmentPayload]) async throws -> OpenClawChatSendResponse
|
|
{
|
|
await self.state.sentRunIdsAppend(idempotencyKey)
|
|
await self.state.sentThinkingLevelsAppend(thinking)
|
|
return OpenClawChatSendResponse(runId: idempotencyKey, status: "ok")
|
|
}
|
|
|
|
func abortRun(sessionKey _: String, runId: String) async throws {
|
|
await self.state.abortedRunIdsAppend(runId)
|
|
}
|
|
|
|
func listSessions(limit _: Int?) async throws -> OpenClawChatSessionsListResponse {
|
|
let idx = await self.state.sessionsCallCount
|
|
await self.state.setSessionsCallCount(idx + 1)
|
|
if idx < self.sessionsResponses.count {
|
|
return self.sessionsResponses[idx]
|
|
}
|
|
return self.sessionsResponses.last ?? OpenClawChatSessionsListResponse(
|
|
ts: nil,
|
|
path: nil,
|
|
count: 0,
|
|
defaults: nil,
|
|
sessions: [])
|
|
}
|
|
|
|
func listModels() async throws -> [OpenClawChatModelChoice] {
|
|
let idx = await self.state.modelsCallCount
|
|
await self.state.setModelsCallCount(idx + 1)
|
|
if idx < self.modelResponses.count {
|
|
return self.modelResponses[idx]
|
|
}
|
|
return self.modelResponses.last ?? []
|
|
}
|
|
|
|
func setSessionModel(sessionKey _: String, model: String?) async throws {
|
|
await self.state.patchedModelsAppend(model)
|
|
if let setSessionModelHook = self.setSessionModelHook {
|
|
try await setSessionModelHook(model)
|
|
}
|
|
}
|
|
|
|
func setSessionThinking(sessionKey _: String, thinkingLevel: String) async throws {
|
|
await self.state.patchedThinkingLevelsAppend(thinkingLevel)
|
|
if let setSessionThinkingHook = self.setSessionThinkingHook {
|
|
try await setSessionThinkingHook(thinkingLevel)
|
|
}
|
|
}
|
|
|
|
func requestHealth(timeoutMs _: Int) async throws -> Bool {
|
|
true
|
|
}
|
|
|
|
func emit(_ evt: OpenClawChatTransportEvent) {
|
|
self.continuation.yield(evt)
|
|
}
|
|
|
|
func lastSentRunId() async -> String? {
|
|
let ids = await self.state.sentRunIds
|
|
return ids.last
|
|
}
|
|
|
|
func abortedRunIds() async -> [String] {
|
|
await self.state.abortedRunIds
|
|
}
|
|
|
|
func sentThinkingLevels() async -> [String] {
|
|
await self.state.sentThinkingLevels
|
|
}
|
|
|
|
func patchedModels() async -> [String?] {
|
|
await self.state.patchedModels
|
|
}
|
|
|
|
func patchedThinkingLevels() async -> [String] {
|
|
await self.state.patchedThinkingLevels
|
|
}
|
|
}
|
|
|
|
extension TestChatTransportState {
|
|
fileprivate func setHistoryCallCount(_ v: Int) {
|
|
self.historyCallCount = v
|
|
}
|
|
|
|
fileprivate func setSessionsCallCount(_ v: Int) {
|
|
self.sessionsCallCount = v
|
|
}
|
|
|
|
fileprivate func setModelsCallCount(_ v: Int) {
|
|
self.modelsCallCount = v
|
|
}
|
|
|
|
fileprivate func sentRunIdsAppend(_ v: String) {
|
|
self.sentRunIds.append(v)
|
|
}
|
|
|
|
fileprivate func abortedRunIdsAppend(_ v: String) {
|
|
self.abortedRunIds.append(v)
|
|
}
|
|
|
|
fileprivate func sentThinkingLevelsAppend(_ v: String) {
|
|
self.sentThinkingLevels.append(v)
|
|
}
|
|
|
|
fileprivate func patchedModelsAppend(_ v: String?) {
|
|
self.patchedModels.append(v)
|
|
}
|
|
|
|
fileprivate func patchedThinkingLevelsAppend(_ v: String) {
|
|
self.patchedThinkingLevels.append(v)
|
|
}
|
|
}
|
|
|
|
@Suite struct ChatViewModelTests {
|
|
@Test func streamsAssistantAndClearsOnFinal() async throws {
|
|
let sessionId = "sess-main"
|
|
let history1 = historyPayload(sessionId: sessionId)
|
|
let history2 = historyPayload(
|
|
sessionId: sessionId,
|
|
messages: [
|
|
chatTextMessage(
|
|
role: "assistant",
|
|
text: "final answer",
|
|
timestamp: Date().timeIntervalSince1970 * 1000),
|
|
])
|
|
|
|
let (transport, vm) = await makeViewModel(historyResponses: [history1, history2])
|
|
try await loadAndWaitBootstrap(vm: vm, sessionId: sessionId)
|
|
await sendUserMessage(vm)
|
|
try await waitUntil("pending run starts") { await MainActor.run { vm.pendingRunCount == 1 } }
|
|
|
|
emitAssistantText(transport: transport, runId: sessionId, text: "streaming…")
|
|
|
|
try await waitUntil("assistant stream visible") {
|
|
await MainActor.run { vm.streamingAssistantText == "streaming…" }
|
|
}
|
|
|
|
emitToolStart(transport: transport, runId: sessionId)
|
|
|
|
try await waitUntil("tool call pending") { await MainActor.run { vm.pendingToolCalls.count == 1 } }
|
|
|
|
let runId = try #require(await transport.lastSentRunId())
|
|
transport.emit(
|
|
.chat(
|
|
OpenClawChatEventPayload(
|
|
runId: runId,
|
|
sessionKey: "main",
|
|
state: "final",
|
|
message: nil,
|
|
errorMessage: nil)))
|
|
|
|
try await waitUntil("pending run clears") { await MainActor.run { vm.pendingRunCount == 0 } }
|
|
try await waitUntil("history refresh") {
|
|
await MainActor.run { vm.messages.contains(where: { $0.role == "assistant" }) }
|
|
}
|
|
#expect(await MainActor.run { vm.streamingAssistantText } == nil)
|
|
#expect(await MainActor.run { vm.pendingToolCalls.isEmpty })
|
|
}
|
|
|
|
@Test func acceptsCanonicalSessionKeyEventsForOwnPendingRun() async throws {
|
|
let history1 = historyPayload()
|
|
let history2 = historyPayload(
|
|
messages: [
|
|
chatTextMessage(
|
|
role: "assistant",
|
|
text: "from history",
|
|
timestamp: Date().timeIntervalSince1970 * 1000),
|
|
])
|
|
|
|
let (transport, vm) = await makeViewModel(historyResponses: [history1, history2])
|
|
try await loadAndWaitBootstrap(vm: vm)
|
|
await sendUserMessage(vm)
|
|
try await waitUntil("pending run starts") { await MainActor.run { vm.pendingRunCount == 1 } }
|
|
|
|
let runId = try #require(await transport.lastSentRunId())
|
|
transport.emit(
|
|
.chat(
|
|
OpenClawChatEventPayload(
|
|
runId: runId,
|
|
sessionKey: "agent:main:main",
|
|
state: "final",
|
|
message: nil,
|
|
errorMessage: nil)))
|
|
|
|
try await waitUntil("pending run clears") { await MainActor.run { vm.pendingRunCount == 0 } }
|
|
try await waitUntil("history refresh") {
|
|
await MainActor.run { vm.messages.contains(where: { $0.role == "assistant" }) }
|
|
}
|
|
}
|
|
|
|
@Test func acceptsCanonicalSessionKeyEventsForExternalRuns() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let history1 = historyPayload(messages: [chatTextMessage(role: "user", text: "first", timestamp: now)])
|
|
let history2 = historyPayload(
|
|
messages: [
|
|
chatTextMessage(role: "user", text: "first", timestamp: now),
|
|
chatTextMessage(role: "assistant", text: "from external run", timestamp: now + 1),
|
|
])
|
|
|
|
let (transport, vm) = await makeViewModel(historyResponses: [history1, history2])
|
|
|
|
await MainActor.run { vm.load() }
|
|
try await waitUntil("bootstrap history loaded") { await MainActor.run { vm.messages.count == 1 } }
|
|
|
|
transport.emit(
|
|
.chat(
|
|
OpenClawChatEventPayload(
|
|
runId: "external-run",
|
|
sessionKey: "agent:main:main",
|
|
state: "final",
|
|
message: nil,
|
|
errorMessage: nil)))
|
|
|
|
try await waitUntil("history refresh after canonical external event") {
|
|
await MainActor.run { vm.messages.count == 2 }
|
|
}
|
|
}
|
|
|
|
@Test func preservesMessageIDsAcrossHistoryRefreshes() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let history1 = historyPayload(messages: [chatTextMessage(role: "user", text: "hello", timestamp: now)])
|
|
let history2 = historyPayload(
|
|
messages: [
|
|
chatTextMessage(role: "user", text: "hello", timestamp: now),
|
|
chatTextMessage(role: "assistant", text: "world", timestamp: now + 1),
|
|
])
|
|
|
|
let (transport, vm) = await makeViewModel(historyResponses: [history1, history2])
|
|
|
|
await MainActor.run { vm.load() }
|
|
try await waitUntil("bootstrap history loaded") { await MainActor.run { vm.messages.count == 1 } }
|
|
let firstIdBefore = try #require(await MainActor.run { vm.messages.first?.id })
|
|
|
|
emitExternalFinal(transport: transport)
|
|
|
|
try await waitUntil("history refresh") { await MainActor.run { vm.messages.count == 2 } }
|
|
let firstIdAfter = try #require(await MainActor.run { vm.messages.first?.id })
|
|
#expect(firstIdAfter == firstIdBefore)
|
|
}
|
|
|
|
@Test func clearsStreamingOnExternalFinalEvent() async throws {
|
|
let sessionId = "sess-main"
|
|
let history = historyPayload(sessionId: sessionId)
|
|
let (transport, vm) = await makeViewModel(historyResponses: [history, history])
|
|
try await loadAndWaitBootstrap(vm: vm, sessionId: sessionId)
|
|
|
|
emitAssistantText(transport: transport, runId: sessionId, text: "external stream")
|
|
emitToolStart(transport: transport, runId: sessionId)
|
|
|
|
try await waitUntil("streaming active") {
|
|
await MainActor.run { vm.streamingAssistantText == "external stream" }
|
|
}
|
|
try await waitUntil("tool call pending") { await MainActor.run { vm.pendingToolCalls.count == 1 } }
|
|
|
|
emitExternalFinal(transport: transport)
|
|
|
|
try await waitUntil("streaming cleared") { await MainActor.run { vm.streamingAssistantText == nil } }
|
|
#expect(await MainActor.run { vm.pendingToolCalls.isEmpty })
|
|
}
|
|
|
|
@Test func seqGapClearsPendingRunsAndAutoRefreshesHistory() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let history1 = historyPayload()
|
|
let history2 = historyPayload(messages: [chatTextMessage(role: "assistant", text: "resynced after gap", timestamp: now)])
|
|
|
|
let (transport, vm) = await makeViewModel(historyResponses: [history1, history2])
|
|
|
|
try await loadAndWaitBootstrap(vm: vm)
|
|
|
|
await sendUserMessage(vm, text: "hello")
|
|
try await waitUntil("pending run starts") { await MainActor.run { vm.pendingRunCount == 1 } }
|
|
|
|
transport.emit(.seqGap)
|
|
|
|
try await waitUntil("pending run clears on seqGap") {
|
|
await MainActor.run { vm.pendingRunCount == 0 }
|
|
}
|
|
try await waitUntil("history refreshes on seqGap") {
|
|
await MainActor.run { vm.messages.contains(where: { $0.role == "assistant" }) }
|
|
}
|
|
#expect(await MainActor.run { vm.errorText == nil })
|
|
}
|
|
|
|
@Test func sessionChoicesPreferMainAndRecent() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let recent = now - (2 * 60 * 60 * 1000)
|
|
let recentOlder = now - (5 * 60 * 60 * 1000)
|
|
let stale = now - (26 * 60 * 60 * 1000)
|
|
let history = historyPayload()
|
|
let sessions = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 4,
|
|
defaults: nil,
|
|
sessions: [
|
|
sessionEntry(key: "recent-1", updatedAt: recent),
|
|
sessionEntry(key: "main", updatedAt: stale),
|
|
sessionEntry(key: "recent-2", updatedAt: recentOlder),
|
|
sessionEntry(key: "old-1", updatedAt: stale),
|
|
])
|
|
|
|
let (_, vm) = await makeViewModel(historyResponses: [history], sessionsResponses: [sessions])
|
|
await MainActor.run { vm.load() }
|
|
try await waitUntil("sessions loaded") { await MainActor.run { !vm.sessions.isEmpty } }
|
|
|
|
let keys = await MainActor.run { vm.sessionChoices.map(\.key) }
|
|
#expect(keys == ["main", "recent-1", "recent-2"])
|
|
}
|
|
|
|
@Test func sessionChoicesIncludeCurrentWhenMissing() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let recent = now - (30 * 60 * 1000)
|
|
let history = historyPayload(sessionKey: "custom", sessionId: "sess-custom")
|
|
let sessions = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 1,
|
|
defaults: nil,
|
|
sessions: [
|
|
sessionEntry(key: "main", updatedAt: recent),
|
|
])
|
|
|
|
let (_, vm) = await makeViewModel(
|
|
sessionKey: "custom",
|
|
historyResponses: [history],
|
|
sessionsResponses: [sessions])
|
|
await MainActor.run { vm.load() }
|
|
try await waitUntil("sessions loaded") { await MainActor.run { !vm.sessions.isEmpty } }
|
|
|
|
let keys = await MainActor.run { vm.sessionChoices.map(\.key) }
|
|
#expect(keys == ["main", "custom"])
|
|
}
|
|
|
|
@Test func bootstrapsModelSelectionFromSessionAndDefaults() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let history = historyPayload()
|
|
let sessions = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 1,
|
|
defaults: OpenClawChatSessionsDefaults(model: "openai/gpt-4.1-mini", contextTokens: nil),
|
|
sessions: [
|
|
sessionEntry(key: "main", updatedAt: now, model: "anthropic/claude-opus-4-6"),
|
|
])
|
|
let models = [
|
|
modelChoice(id: "anthropic/claude-opus-4-6", name: "Claude Opus 4.6"),
|
|
modelChoice(id: "openai/gpt-4.1-mini", name: "GPT-4.1 mini", provider: "openai"),
|
|
]
|
|
|
|
let (_, vm) = await makeViewModel(
|
|
historyResponses: [history],
|
|
sessionsResponses: [sessions],
|
|
modelResponses: [models])
|
|
|
|
try await loadAndWaitBootstrap(vm: vm)
|
|
|
|
#expect(await MainActor.run { vm.showsModelPicker })
|
|
#expect(await MainActor.run { vm.modelSelectionID } == "anthropic/claude-opus-4-6")
|
|
#expect(await MainActor.run { vm.defaultModelLabel } == "Default: openai/gpt-4.1-mini")
|
|
}
|
|
|
|
@Test func selectingDefaultModelPatchesNilAndUpdatesSelection() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let history = historyPayload()
|
|
let sessions = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 1,
|
|
defaults: OpenClawChatSessionsDefaults(model: "openai/gpt-4.1-mini", contextTokens: nil),
|
|
sessions: [
|
|
sessionEntry(key: "main", updatedAt: now, model: "anthropic/claude-opus-4-6"),
|
|
])
|
|
let models = [
|
|
modelChoice(id: "anthropic/claude-opus-4-6", name: "Claude Opus 4.6"),
|
|
modelChoice(id: "openai/gpt-4.1-mini", name: "GPT-4.1 mini", provider: "openai"),
|
|
]
|
|
|
|
let (transport, vm) = await makeViewModel(
|
|
historyResponses: [history],
|
|
sessionsResponses: [sessions],
|
|
modelResponses: [models])
|
|
|
|
try await loadAndWaitBootstrap(vm: vm)
|
|
|
|
await MainActor.run { vm.selectModel(OpenClawChatViewModel.defaultModelSelectionID) }
|
|
|
|
try await waitUntil("session model patched") {
|
|
let patched = await transport.patchedModels()
|
|
return patched == [nil]
|
|
}
|
|
|
|
#expect(await MainActor.run { vm.modelSelectionID } == OpenClawChatViewModel.defaultModelSelectionID)
|
|
}
|
|
|
|
@Test func selectingProviderQualifiedModelDisambiguatesDuplicateModelIDs() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let history = historyPayload()
|
|
let sessions = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 1,
|
|
defaults: OpenClawChatSessionsDefaults(model: "openrouter/gpt-4.1-mini", contextTokens: nil),
|
|
sessions: [
|
|
sessionEntry(key: "main", updatedAt: now, model: "gpt-4.1-mini", modelProvider: "openrouter"),
|
|
])
|
|
let models = [
|
|
modelChoice(id: "gpt-4.1-mini", name: "GPT-4.1 mini", provider: "openai"),
|
|
modelChoice(id: "gpt-4.1-mini", name: "GPT-4.1 mini", provider: "openrouter"),
|
|
]
|
|
|
|
let (transport, vm) = await makeViewModel(
|
|
historyResponses: [history],
|
|
sessionsResponses: [sessions],
|
|
modelResponses: [models])
|
|
|
|
try await loadAndWaitBootstrap(vm: vm)
|
|
|
|
#expect(await MainActor.run { vm.modelSelectionID } == "openrouter/gpt-4.1-mini")
|
|
|
|
await MainActor.run { vm.selectModel("openai/gpt-4.1-mini") }
|
|
|
|
try await waitUntil("provider-qualified model patched") {
|
|
let patched = await transport.patchedModels()
|
|
return patched == ["openai/gpt-4.1-mini"]
|
|
}
|
|
}
|
|
|
|
@Test func slashModelIDsStayProviderQualifiedInSelectionAndPatch() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let history = historyPayload()
|
|
let sessions = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 1,
|
|
defaults: nil,
|
|
sessions: [
|
|
sessionEntry(key: "main", updatedAt: now, model: nil),
|
|
])
|
|
let models = [
|
|
modelChoice(
|
|
id: "openai/gpt-5.4",
|
|
name: "GPT-5.4 via Vercel AI Gateway",
|
|
provider: "vercel-ai-gateway"),
|
|
]
|
|
|
|
let (transport, vm) = await makeViewModel(
|
|
historyResponses: [history],
|
|
sessionsResponses: [sessions],
|
|
modelResponses: [models])
|
|
|
|
try await loadAndWaitBootstrap(vm: vm)
|
|
|
|
await MainActor.run { vm.selectModel("vercel-ai-gateway/openai/gpt-5.4") }
|
|
|
|
try await waitUntil("slash model patched with provider-qualified ref") {
|
|
let patched = await transport.patchedModels()
|
|
return patched == ["vercel-ai-gateway/openai/gpt-5.4"]
|
|
}
|
|
}
|
|
|
|
@Test func staleModelPatchCompletionsDoNotOverwriteNewerSelection() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let history = historyPayload()
|
|
let sessions = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 1,
|
|
defaults: nil,
|
|
sessions: [
|
|
sessionEntry(key: "main", updatedAt: now, model: nil),
|
|
])
|
|
let models = [
|
|
modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"),
|
|
modelChoice(id: "gpt-5.4-pro", name: "GPT-5.4 Pro", provider: "openai"),
|
|
]
|
|
|
|
let (transport, vm) = await makeViewModel(
|
|
historyResponses: [history],
|
|
sessionsResponses: [sessions],
|
|
modelResponses: [models],
|
|
setSessionModelHook: { model in
|
|
if model == "openai/gpt-5.4" {
|
|
try await Task.sleep(for: .milliseconds(200))
|
|
}
|
|
})
|
|
|
|
try await loadAndWaitBootstrap(vm: vm)
|
|
|
|
await MainActor.run {
|
|
vm.selectModel("openai/gpt-5.4")
|
|
vm.selectModel("openai/gpt-5.4-pro")
|
|
}
|
|
|
|
try await waitUntil("two model patches complete") {
|
|
let patched = await transport.patchedModels()
|
|
return patched == ["openai/gpt-5.4", "openai/gpt-5.4-pro"]
|
|
}
|
|
|
|
#expect(await MainActor.run { vm.modelSelectionID } == "openai/gpt-5.4-pro")
|
|
#expect(await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model } == "openai/gpt-5.4-pro")
|
|
}
|
|
|
|
@Test func sendWaitsForInFlightModelPatchToFinish() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let history = historyPayload()
|
|
let sessions = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 1,
|
|
defaults: nil,
|
|
sessions: [
|
|
sessionEntry(key: "main", updatedAt: now, model: nil),
|
|
])
|
|
let models = [
|
|
modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"),
|
|
]
|
|
let gate = AsyncGate()
|
|
|
|
let (transport, vm) = await makeViewModel(
|
|
historyResponses: [history],
|
|
sessionsResponses: [sessions],
|
|
modelResponses: [models],
|
|
setSessionModelHook: { model in
|
|
if model == "openai/gpt-5.4" {
|
|
await gate.wait()
|
|
}
|
|
})
|
|
|
|
try await loadAndWaitBootstrap(vm: vm)
|
|
|
|
await MainActor.run { vm.selectModel("openai/gpt-5.4") }
|
|
try await waitUntil("model patch started") {
|
|
let patched = await transport.patchedModels()
|
|
return patched == ["openai/gpt-5.4"]
|
|
}
|
|
|
|
await sendUserMessage(vm, text: "hello")
|
|
try await waitUntil("send entered waiting state") {
|
|
await MainActor.run { vm.isSending }
|
|
}
|
|
#expect(await transport.lastSentRunId() == nil)
|
|
|
|
await MainActor.run { vm.selectThinkingLevel("high") }
|
|
try await waitUntil("thinking level changed while send is blocked") {
|
|
await MainActor.run { vm.thinkingLevel == "high" }
|
|
}
|
|
|
|
await gate.open()
|
|
|
|
try await waitUntil("send released after model patch") {
|
|
await transport.lastSentRunId() != nil
|
|
}
|
|
#expect(await transport.sentThinkingLevels() == ["off"])
|
|
}
|
|
|
|
@Test func failedLatestModelSelectionDoesNotReplayAfterOlderCompletionFinishes() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let history = historyPayload()
|
|
let sessions = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 1,
|
|
defaults: nil,
|
|
sessions: [
|
|
sessionEntry(key: "main", updatedAt: now, model: nil),
|
|
])
|
|
let models = [
|
|
modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"),
|
|
modelChoice(id: "gpt-5.4-pro", name: "GPT-5.4 Pro", provider: "openai"),
|
|
]
|
|
|
|
let (transport, vm) = await makeViewModel(
|
|
historyResponses: [history],
|
|
sessionsResponses: [sessions],
|
|
modelResponses: [models],
|
|
setSessionModelHook: { model in
|
|
if model == "openai/gpt-5.4" {
|
|
try await Task.sleep(for: .milliseconds(200))
|
|
return
|
|
}
|
|
if model == "openai/gpt-5.4-pro" {
|
|
throw NSError(domain: "test", code: 1, userInfo: [NSLocalizedDescriptionKey: "boom"])
|
|
}
|
|
})
|
|
|
|
try await loadAndWaitBootstrap(vm: vm)
|
|
|
|
await MainActor.run {
|
|
vm.selectModel("openai/gpt-5.4")
|
|
vm.selectModel("openai/gpt-5.4-pro")
|
|
}
|
|
|
|
try await waitUntil("older model completion wins after latest failure") {
|
|
await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model == "openai/gpt-5.4" }
|
|
}
|
|
|
|
#expect(await MainActor.run { vm.modelSelectionID } == "openai/gpt-5.4")
|
|
#expect(await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model } == "openai/gpt-5.4")
|
|
#expect(await transport.patchedModels() == ["openai/gpt-5.4", "openai/gpt-5.4-pro"])
|
|
}
|
|
|
|
@Test func failedLatestModelSelectionRestoresEarlierSuccessWithoutReplay() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let history = historyPayload()
|
|
let sessions = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 1,
|
|
defaults: nil,
|
|
sessions: [
|
|
sessionEntry(key: "main", updatedAt: now, model: nil),
|
|
])
|
|
let models = [
|
|
modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"),
|
|
modelChoice(id: "gpt-5.4-pro", name: "GPT-5.4 Pro", provider: "openai"),
|
|
]
|
|
|
|
let (transport, vm) = await makeViewModel(
|
|
historyResponses: [history],
|
|
sessionsResponses: [sessions],
|
|
modelResponses: [models],
|
|
setSessionModelHook: { model in
|
|
if model == "openai/gpt-5.4" {
|
|
try await Task.sleep(for: .milliseconds(100))
|
|
return
|
|
}
|
|
if model == "openai/gpt-5.4-pro" {
|
|
try await Task.sleep(for: .milliseconds(200))
|
|
throw NSError(domain: "test", code: 1, userInfo: [NSLocalizedDescriptionKey: "boom"])
|
|
}
|
|
})
|
|
|
|
try await loadAndWaitBootstrap(vm: vm)
|
|
|
|
await MainActor.run {
|
|
vm.selectModel("openai/gpt-5.4")
|
|
vm.selectModel("openai/gpt-5.4-pro")
|
|
}
|
|
|
|
try await waitUntil("latest failure restores prior successful model") {
|
|
await MainActor.run {
|
|
vm.modelSelectionID == "openai/gpt-5.4" &&
|
|
vm.sessions.first(where: { $0.key == "main" })?.model == "gpt-5.4" &&
|
|
vm.sessions.first(where: { $0.key == "main" })?.modelProvider == "openai"
|
|
}
|
|
}
|
|
|
|
#expect(await transport.patchedModels() == ["openai/gpt-5.4", "openai/gpt-5.4-pro"])
|
|
}
|
|
|
|
@Test func switchingSessionsIgnoresLateModelPatchCompletionFromPreviousSession() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let sessions = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 2,
|
|
defaults: nil,
|
|
sessions: [
|
|
sessionEntry(key: "main", updatedAt: now, model: nil),
|
|
sessionEntry(key: "other", updatedAt: now - 1000, model: nil),
|
|
])
|
|
let models = [
|
|
modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"),
|
|
]
|
|
|
|
let (transport, vm) = await makeViewModel(
|
|
historyResponses: [
|
|
historyPayload(sessionKey: "main", sessionId: "sess-main"),
|
|
historyPayload(sessionKey: "other", sessionId: "sess-other"),
|
|
],
|
|
sessionsResponses: [sessions, sessions],
|
|
modelResponses: [models, models],
|
|
setSessionModelHook: { model in
|
|
if model == "openai/gpt-5.4" {
|
|
try await Task.sleep(for: .milliseconds(200))
|
|
}
|
|
})
|
|
|
|
try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main")
|
|
|
|
await MainActor.run { vm.selectModel("openai/gpt-5.4") }
|
|
await MainActor.run { vm.switchSession(to: "other") }
|
|
|
|
try await waitUntil("switched sessions") {
|
|
await MainActor.run { vm.sessionKey == "other" && vm.sessionId == "sess-other" }
|
|
}
|
|
try await waitUntil("late model patch finished") {
|
|
let patched = await transport.patchedModels()
|
|
return patched == ["openai/gpt-5.4"]
|
|
}
|
|
|
|
#expect(await MainActor.run { vm.modelSelectionID } == OpenClawChatViewModel.defaultModelSelectionID)
|
|
#expect(await MainActor.run { vm.sessions.first(where: { $0.key == "other" })?.model } == nil)
|
|
}
|
|
|
|
@Test func lateModelCompletionDoesNotReplayCurrentSessionSelectionIntoPreviousSession() async throws {
|
|
let now = Date().timeIntervalSince1970 * 1000
|
|
let initialSessions = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 2,
|
|
defaults: nil,
|
|
sessions: [
|
|
sessionEntry(key: "main", updatedAt: now, model: nil),
|
|
sessionEntry(key: "other", updatedAt: now - 1000, model: nil),
|
|
])
|
|
let sessionsAfterOtherSelection = OpenClawChatSessionsListResponse(
|
|
ts: now,
|
|
path: nil,
|
|
count: 2,
|
|
defaults: nil,
|
|
sessions: [
|
|
sessionEntry(key: "main", updatedAt: now, model: nil),
|
|
sessionEntry(key: "other", updatedAt: now - 1000, model: "openai/gpt-5.4-pro"),
|
|
])
|
|
let models = [
|
|
modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"),
|
|
modelChoice(id: "gpt-5.4-pro", name: "GPT-5.4 Pro", provider: "openai"),
|
|
]
|
|
|
|
let (transport, vm) = await makeViewModel(
|
|
historyResponses: [
|
|
historyPayload(sessionKey: "main", sessionId: "sess-main"),
|
|
historyPayload(sessionKey: "other", sessionId: "sess-other"),
|
|
historyPayload(sessionKey: "main", sessionId: "sess-main"),
|
|
],
|
|
sessionsResponses: [initialSessions, initialSessions, sessionsAfterOtherSelection],
|
|
modelResponses: [models, models, models],
|
|
setSessionModelHook: { model in
|
|
if model == "openai/gpt-5.4" {
|
|
try await Task.sleep(for: .milliseconds(200))
|
|
}
|
|
})
|
|
|
|
try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main")
|
|
|
|
await MainActor.run { vm.selectModel("openai/gpt-5.4") }
|
|
await MainActor.run { vm.switchSession(to: "other") }
|
|
try await waitUntil("switched to other session") {
|
|
await MainActor.run { vm.sessionKey == "other" && vm.sessionId == "sess-other" }
|
|
}
|
|
|
|
await MainActor.run { vm.selectModel("openai/gpt-5.4-pro") }
|
|
try await waitUntil("both model patches issued") {
|
|
let patched = await transport.patchedModels()
|
|
return patched == ["openai/gpt-5.4", "openai/gpt-5.4-pro"]
|
|
}
|
|
await MainActor.run { vm.switchSession(to: "main") }
|
|
try await waitUntil("switched back to main session") {
|
|
await MainActor.run { vm.sessionKey == "main" && vm.sessionId == "sess-main" }
|
|
}
|
|
|
|
try await waitUntil("late model completion updates only the original session") {
|
|
await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model == "openai/gpt-5.4" }
|
|
}
|
|
|
|
#expect(await MainActor.run { vm.modelSelectionID } == "openai/gpt-5.4")
|
|
#expect(await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model } == "openai/gpt-5.4")
|
|
#expect(await MainActor.run { vm.sessions.first(where: { $0.key == "other" })?.model } == "openai/gpt-5.4-pro")
|
|
#expect(await transport.patchedModels() == ["openai/gpt-5.4", "openai/gpt-5.4-pro"])
|
|
}
|
|
|
|
@Test func explicitThinkingLevelWinsOverHistoryAndPersistsChanges() async throws {
|
|
let history = OpenClawChatHistoryPayload(
|
|
sessionKey: "main",
|
|
sessionId: "sess-main",
|
|
messages: [],
|
|
thinkingLevel: "off")
|
|
let callbackState = await MainActor.run { CallbackBox() }
|
|
|
|
let (transport, vm) = await makeViewModel(
|
|
historyResponses: [history],
|
|
initialThinkingLevel: "high",
|
|
onThinkingLevelChanged: { level in
|
|
callbackState.values.append(level)
|
|
})
|
|
|
|
try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main")
|
|
#expect(await MainActor.run { vm.thinkingLevel } == "high")
|
|
|
|
await MainActor.run { vm.selectThinkingLevel("medium") }
|
|
|
|
try await waitUntil("thinking level patched") {
|
|
let patched = await transport.patchedThinkingLevels()
|
|
return patched == ["medium"]
|
|
}
|
|
|
|
#expect(await MainActor.run { vm.thinkingLevel } == "medium")
|
|
#expect(await MainActor.run { callbackState.values } == ["medium"])
|
|
}
|
|
|
|
@Test func serverProvidedThinkingLevelsOutsideMenuArePreservedForSend() async throws {
|
|
let history = OpenClawChatHistoryPayload(
|
|
sessionKey: "main",
|
|
sessionId: "sess-main",
|
|
messages: [],
|
|
thinkingLevel: "xhigh")
|
|
|
|
let (transport, vm) = await makeViewModel(historyResponses: [history])
|
|
|
|
try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main")
|
|
#expect(await MainActor.run { vm.thinkingLevel } == "xhigh")
|
|
|
|
await sendUserMessage(vm, text: "hello")
|
|
try await waitUntil("send uses preserved thinking level") {
|
|
await transport.sentThinkingLevels() == ["xhigh"]
|
|
}
|
|
}
|
|
|
|
@Test func staleThinkingPatchCompletionReappliesLatestSelection() async throws {
|
|
let history = OpenClawChatHistoryPayload(
|
|
sessionKey: "main",
|
|
sessionId: "sess-main",
|
|
messages: [],
|
|
thinkingLevel: "off")
|
|
|
|
let (transport, vm) = await makeViewModel(
|
|
historyResponses: [history],
|
|
setSessionThinkingHook: { level in
|
|
if level == "medium" {
|
|
try await Task.sleep(for: .milliseconds(200))
|
|
}
|
|
})
|
|
|
|
try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main")
|
|
|
|
await MainActor.run {
|
|
vm.selectThinkingLevel("medium")
|
|
vm.selectThinkingLevel("high")
|
|
}
|
|
|
|
try await waitUntil("thinking patch replayed latest selection") {
|
|
let patched = await transport.patchedThinkingLevels()
|
|
return patched == ["medium", "high", "high"]
|
|
}
|
|
|
|
#expect(await MainActor.run { vm.thinkingLevel } == "high")
|
|
}
|
|
|
|
@Test func clearsStreamingOnExternalErrorEvent() async throws {
|
|
let sessionId = "sess-main"
|
|
let history = historyPayload(sessionId: sessionId)
|
|
let (transport, vm) = await makeViewModel(historyResponses: [history, history])
|
|
try await loadAndWaitBootstrap(vm: vm, sessionId: sessionId)
|
|
|
|
emitAssistantText(transport: transport, runId: sessionId, text: "external stream")
|
|
|
|
try await waitUntil("streaming active") {
|
|
await MainActor.run { vm.streamingAssistantText == "external stream" }
|
|
}
|
|
|
|
transport.emit(
|
|
.chat(
|
|
OpenClawChatEventPayload(
|
|
runId: "other-run",
|
|
sessionKey: "main",
|
|
state: "error",
|
|
message: nil,
|
|
errorMessage: "boom")))
|
|
|
|
try await waitUntil("streaming cleared") { await MainActor.run { vm.streamingAssistantText == nil } }
|
|
}
|
|
|
|
@Test func stripsInboundMetadataFromHistoryMessages() async throws {
|
|
let history = OpenClawChatHistoryPayload(
|
|
sessionKey: "main",
|
|
sessionId: "sess-main",
|
|
messages: [
|
|
AnyCodable([
|
|
"role": "user",
|
|
"content": [["type": "text", "text": """
|
|
Conversation info (untrusted metadata):
|
|
```json
|
|
{ \"sender\": \"openclaw-ios\" }
|
|
```
|
|
|
|
Hello?
|
|
"""]],
|
|
"timestamp": Date().timeIntervalSince1970 * 1000,
|
|
]),
|
|
],
|
|
thinkingLevel: "off")
|
|
let transport = TestChatTransport(historyResponses: [history])
|
|
let vm = await MainActor.run { OpenClawChatViewModel(sessionKey: "main", transport: transport) }
|
|
|
|
await MainActor.run { vm.load() }
|
|
try await waitUntil("history loaded") { await MainActor.run { !vm.messages.isEmpty } }
|
|
|
|
let sanitized = await MainActor.run { vm.messages.first?.content.first?.text }
|
|
#expect(sanitized == "Hello?")
|
|
}
|
|
|
|
@Test func abortRequestsDoNotClearPendingUntilAbortedEvent() async throws {
|
|
let sessionId = "sess-main"
|
|
let history = historyPayload(sessionId: sessionId)
|
|
let (transport, vm) = await makeViewModel(historyResponses: [history, history])
|
|
try await loadAndWaitBootstrap(vm: vm, sessionId: sessionId)
|
|
|
|
await sendUserMessage(vm)
|
|
try await waitUntil("pending run starts") { await MainActor.run { vm.pendingRunCount == 1 } }
|
|
|
|
let runId = try #require(await transport.lastSentRunId())
|
|
await MainActor.run { vm.abort() }
|
|
|
|
try await waitUntil("abortRun called") {
|
|
let ids = await transport.abortedRunIds()
|
|
return ids == [runId]
|
|
}
|
|
|
|
// Pending remains until the gateway broadcasts an aborted/final chat event.
|
|
#expect(await MainActor.run { vm.pendingRunCount } == 1)
|
|
|
|
transport.emit(
|
|
.chat(
|
|
OpenClawChatEventPayload(
|
|
runId: runId,
|
|
sessionKey: "main",
|
|
state: "aborted",
|
|
message: nil,
|
|
errorMessage: nil)))
|
|
|
|
try await waitUntil("pending run clears") { await MainActor.run { vm.pendingRunCount == 0 } }
|
|
}
|
|
}
|