macOS: add chat model selector and persist thinking (#42314)

* feat(macos): add chat model selector and thinking persistence UX

* Chat UI: carry session model providers

* Docs: add macOS model selector changelog

* macOS: persist extended thinking levels

* Chat UI: keep model picker state in sync

* Chat UI tests: cover model selection races

---------

Co-authored-by: Ubuntu <ubuntu@vps-90352893.vps.ovh.ca>
Co-authored-by: Vincent Koc <vincentkoc@ieee.org>
This commit is contained in:
Luke
2026-03-11 15:43:04 +11:00
committed by GitHub
parent bf70a333fa
commit 061b8258bc
7 changed files with 1126 additions and 13 deletions

View File

@@ -8,6 +8,7 @@ import QuartzCore
import SwiftUI
private let webChatSwiftLogger = Logger(subsystem: "ai.openclaw", category: "WebChatSwiftUI")
private let webChatThinkingLevelDefaultsKey = "openclaw.webchat.thinkingLevel"
private enum WebChatSwiftUILayout {
static let windowSize = NSSize(width: 500, height: 840)
@@ -21,6 +22,21 @@ struct MacGatewayChatTransport: OpenClawChatTransport {
try await GatewayConnection.shared.chatHistory(sessionKey: sessionKey)
}
func listModels() async throws -> [OpenClawChatModelChoice] {
do {
let data = try await GatewayConnection.shared.request(
method: "models.list",
params: [:],
timeoutMs: 15000)
let result = try JSONDecoder().decode(ModelsListResult.self, from: data)
return result.models.map(Self.mapModelChoice)
} catch {
webChatSwiftLogger.warning(
"models.list failed; hiding model picker: \(error.localizedDescription, privacy: .public)")
return []
}
}
func abortRun(sessionKey: String, runId: String) async throws {
_ = try await GatewayConnection.shared.request(
method: "chat.abort",
@@ -46,6 +62,28 @@ struct MacGatewayChatTransport: OpenClawChatTransport {
return try JSONDecoder().decode(OpenClawChatSessionsListResponse.self, from: data)
}
func setSessionModel(sessionKey: String, model: String?) async throws {
var params: [String: AnyCodable] = [
"key": AnyCodable(sessionKey),
]
params["model"] = model.map(AnyCodable.init) ?? AnyCodable(NSNull())
_ = try await GatewayConnection.shared.request(
method: "sessions.patch",
params: params,
timeoutMs: 15000)
}
func setSessionThinking(sessionKey: String, thinkingLevel: String) async throws {
let params: [String: AnyCodable] = [
"key": AnyCodable(sessionKey),
"thinkingLevel": AnyCodable(thinkingLevel),
]
_ = try await GatewayConnection.shared.request(
method: "sessions.patch",
params: params,
timeoutMs: 15000)
}
func sendMessage(
sessionKey: String,
message: String,
@@ -133,6 +171,14 @@ struct MacGatewayChatTransport: OpenClawChatTransport {
return .seqGap
}
}
private static func mapModelChoice(_ model: OpenClawProtocol.ModelChoice) -> OpenClawChatModelChoice {
OpenClawChatModelChoice(
modelID: model.id,
name: model.name,
provider: model.provider,
contextWindow: model.contextwindow)
}
}
// MARK: - Window controller
@@ -155,7 +201,13 @@ final class WebChatSwiftUIWindowController {
init(sessionKey: String, presentation: WebChatPresentation, transport: any OpenClawChatTransport) {
self.sessionKey = sessionKey
self.presentation = presentation
let vm = OpenClawChatViewModel(sessionKey: sessionKey, transport: transport)
let vm = OpenClawChatViewModel(
sessionKey: sessionKey,
transport: transport,
initialThinkingLevel: Self.persistedThinkingLevel(),
onThinkingLevelChanged: { level in
UserDefaults.standard.set(level, forKey: webChatThinkingLevelDefaultsKey)
})
let accent = Self.color(fromHex: AppStateStore.shared.seamColorHex)
self.hosting = NSHostingController(rootView: OpenClawChatView(
viewModel: vm,
@@ -254,6 +306,16 @@ final class WebChatSwiftUIWindowController {
OverlayPanelFactory.clearGlobalEventMonitor(&self.dismissMonitor)
}
private static func persistedThinkingLevel() -> String? {
let stored = UserDefaults.standard.string(forKey: webChatThinkingLevelDefaultsKey)?
.trimmingCharacters(in: .whitespacesAndNewlines)
.lowercased()
guard let stored, ["off", "minimal", "low", "medium", "high", "xhigh", "adaptive"].contains(stored) else {
return nil
}
return stored
}
private static func makeWindow(
for presentation: WebChatPresentation,
contentViewController: NSViewController) -> NSWindow

View File

@@ -9,6 +9,8 @@ import UniformTypeIdentifiers
@MainActor
struct OpenClawChatComposer: View {
private static let menuThinkingLevels = ["off", "low", "medium", "high"]
@Bindable var viewModel: OpenClawChatViewModel
let style: OpenClawChatView.Style
let showsSessionSwitcher: Bool
@@ -27,11 +29,15 @@ struct OpenClawChatComposer: View {
if self.showsSessionSwitcher {
self.sessionPicker
}
if self.viewModel.showsModelPicker {
self.modelPicker
}
self.thinkingPicker
Spacer()
self.refreshButton
self.attachmentPicker
}
.padding(.horizontal, 10)
}
if self.showsAttachments, !self.viewModel.attachments.isEmpty {
@@ -83,11 +89,19 @@ struct OpenClawChatComposer: View {
}
private var thinkingPicker: some View {
Picker("Thinking", selection: self.$viewModel.thinkingLevel) {
Picker(
"Thinking",
selection: Binding(
get: { self.viewModel.thinkingLevel },
set: { next in self.viewModel.selectThinkingLevel(next) }))
{
Text("Off").tag("off")
Text("Low").tag("low")
Text("Medium").tag("medium")
Text("High").tag("high")
if !Self.menuThinkingLevels.contains(self.viewModel.thinkingLevel) {
Text(self.viewModel.thinkingLevel.capitalized).tag(self.viewModel.thinkingLevel)
}
}
.labelsHidden()
.pickerStyle(.menu)
@@ -95,6 +109,25 @@ struct OpenClawChatComposer: View {
.frame(maxWidth: 140, alignment: .leading)
}
private var modelPicker: some View {
Picker(
"Model",
selection: Binding(
get: { self.viewModel.modelSelectionID },
set: { next in self.viewModel.selectModel(next) }))
{
Text(self.viewModel.defaultModelLabel).tag(OpenClawChatViewModel.defaultModelSelectionID)
ForEach(self.viewModel.modelChoices) { model in
Text(model.displayLabel).tag(model.selectionID)
}
}
.labelsHidden()
.pickerStyle(.menu)
.controlSize(.small)
.frame(maxWidth: 240, alignment: .leading)
.help("Model")
}
private var sessionPicker: some View {
Picker(
"Session",

View File

@@ -1,5 +1,36 @@
import Foundation
public struct OpenClawChatModelChoice: Identifiable, Codable, Sendable, Hashable {
public var id: String { self.selectionID }
public let modelID: String
public let name: String
public let provider: String
public let contextWindow: Int?
public init(modelID: String, name: String, provider: String, contextWindow: Int?) {
self.modelID = modelID
self.name = name
self.provider = provider
self.contextWindow = contextWindow
}
/// Provider-qualified model ref used for picker identity and selection tags.
public var selectionID: String {
let trimmedProvider = self.provider.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmedProvider.isEmpty else { return self.modelID }
let providerPrefix = "\(trimmedProvider)/"
if self.modelID.hasPrefix(providerPrefix) {
return self.modelID
}
return "\(trimmedProvider)/\(self.modelID)"
}
public var displayLabel: String {
self.selectionID
}
}
public struct OpenClawChatSessionsDefaults: Codable, Sendable {
public let model: String?
public let contextTokens: Int?
@@ -27,6 +58,7 @@ public struct OpenClawChatSessionEntry: Codable, Identifiable, Sendable, Hashabl
public let outputTokens: Int?
public let totalTokens: Int?
public let modelProvider: String?
public let model: String?
public let contextTokens: Int?
}

View File

@@ -10,6 +10,7 @@ public enum OpenClawChatTransportEvent: Sendable {
public protocol OpenClawChatTransport: Sendable {
func requestHistory(sessionKey: String) async throws -> OpenClawChatHistoryPayload
func listModels() async throws -> [OpenClawChatModelChoice]
func sendMessage(
sessionKey: String,
message: String,
@@ -19,6 +20,8 @@ public protocol OpenClawChatTransport: Sendable {
func abortRun(sessionKey: String, runId: String) async throws
func listSessions(limit: Int?) async throws -> OpenClawChatSessionsListResponse
func setSessionModel(sessionKey: String, model: String?) async throws
func setSessionThinking(sessionKey: String, thinkingLevel: String) async throws
func requestHealth(timeoutMs: Int) async throws -> Bool
func events() -> AsyncStream<OpenClawChatTransportEvent>
@@ -42,4 +45,25 @@ extension OpenClawChatTransport {
code: 0,
userInfo: [NSLocalizedDescriptionKey: "sessions.list not supported by this transport"])
}
public func listModels() async throws -> [OpenClawChatModelChoice] {
throw NSError(
domain: "OpenClawChatTransport",
code: 0,
userInfo: [NSLocalizedDescriptionKey: "models.list not supported by this transport"])
}
public func setSessionModel(sessionKey _: String, model _: String?) async throws {
throw NSError(
domain: "OpenClawChatTransport",
code: 0,
userInfo: [NSLocalizedDescriptionKey: "sessions.patch(model) not supported by this transport"])
}
public func setSessionThinking(sessionKey _: String, thinkingLevel _: String) async throws {
throw NSError(
domain: "OpenClawChatTransport",
code: 0,
userInfo: [NSLocalizedDescriptionKey: "sessions.patch(thinkingLevel) not supported by this transport"])
}
}

View File

@@ -15,9 +15,13 @@ private let chatUILogger = Logger(subsystem: "ai.openclaw", category: "OpenClawC
@MainActor
@Observable
public final class OpenClawChatViewModel {
public static let defaultModelSelectionID = "__default__"
public private(set) var messages: [OpenClawChatMessage] = []
public var input: String = ""
public var thinkingLevel: String = "off"
public private(set) var thinkingLevel: String
public private(set) var modelSelectionID: String = "__default__"
public private(set) var modelChoices: [OpenClawChatModelChoice] = []
public private(set) var isLoading = false
public private(set) var isSending = false
public private(set) var isAborting = false
@@ -32,6 +36,9 @@ public final class OpenClawChatViewModel {
public private(set) var pendingToolCalls: [OpenClawChatPendingToolCall] = []
public private(set) var sessions: [OpenClawChatSessionEntry] = []
private let transport: any OpenClawChatTransport
private var sessionDefaults: OpenClawChatSessionsDefaults?
private let prefersExplicitThinkingLevel: Bool
private let onThinkingLevelChanged: (@MainActor @Sendable (String) -> Void)?
@ObservationIgnored
private nonisolated(unsafe) var eventTask: Task<Void, Never>?
@@ -42,6 +49,17 @@ public final class OpenClawChatViewModel {
@ObservationIgnored
private nonisolated(unsafe) var pendingRunTimeoutTasks: [String: Task<Void, Never>] = [:]
private let pendingRunTimeoutMs: UInt64 = 120_000
// Session switches can overlap in-flight picker patches, so stale completions
// must compare against the latest request and latest desired value for that session.
private var nextModelSelectionRequestID: UInt64 = 0
private var latestModelSelectionRequestIDsBySession: [String: UInt64] = [:]
private var latestModelSelectionIDsBySession: [String: String] = [:]
private var lastSuccessfulModelSelectionIDsBySession: [String: String] = [:]
private var inFlightModelPatchCountsBySession: [String: Int] = [:]
private var modelPatchWaitersBySession: [String: [CheckedContinuation<Void, Never>]] = [:]
private var nextThinkingSelectionRequestID: UInt64 = 0
private var latestThinkingSelectionRequestIDsBySession: [String: UInt64] = [:]
private var latestThinkingLevelsBySession: [String: String] = [:]
private var pendingToolCallsById: [String: OpenClawChatPendingToolCall] = [:] {
didSet {
@@ -52,9 +70,18 @@ public final class OpenClawChatViewModel {
private var lastHealthPollAt: Date?
public init(sessionKey: String, transport: any OpenClawChatTransport) {
public init(
sessionKey: String,
transport: any OpenClawChatTransport,
initialThinkingLevel: String? = nil,
onThinkingLevelChanged: (@MainActor @Sendable (String) -> Void)? = nil)
{
self.sessionKey = sessionKey
self.transport = transport
let normalizedThinkingLevel = Self.normalizedThinkingLevel(initialThinkingLevel)
self.thinkingLevel = normalizedThinkingLevel ?? "off"
self.prefersExplicitThinkingLevel = normalizedThinkingLevel != nil
self.onThinkingLevelChanged = onThinkingLevelChanged
self.eventTask = Task { [weak self] in
guard let self else { return }
@@ -99,6 +126,14 @@ public final class OpenClawChatViewModel {
Task { await self.performSwitchSession(to: sessionKey) }
}
public func selectThinkingLevel(_ level: String) {
Task { await self.performSelectThinkingLevel(level) }
}
public func selectModel(_ selectionID: String) {
Task { await self.performSelectModel(selectionID) }
}
public var sessionChoices: [OpenClawChatSessionEntry] {
let now = Date().timeIntervalSince1970 * 1000
let cutoff = now - (24 * 60 * 60 * 1000)
@@ -134,6 +169,17 @@ public final class OpenClawChatViewModel {
return result
}
public var showsModelPicker: Bool {
!self.modelChoices.isEmpty
}
public var defaultModelLabel: String {
guard let defaultModelID = self.normalizedModelSelectionID(self.sessionDefaults?.model) else {
return "Default"
}
return "Default: \(self.modelLabel(for: defaultModelID))"
}
public func addAttachments(urls: [URL]) {
Task { await self.loadAttachments(urls: urls) }
}
@@ -174,11 +220,14 @@ public final class OpenClawChatViewModel {
previous: self.messages,
incoming: Self.decodeMessages(payload.messages ?? []))
self.sessionId = payload.sessionId
if let level = payload.thinkingLevel, !level.isEmpty {
if !self.prefersExplicitThinkingLevel,
let level = Self.normalizedThinkingLevel(payload.thinkingLevel)
{
self.thinkingLevel = level
}
await self.pollHealthIfNeeded(force: true)
await self.fetchSessions(limit: 50)
await self.fetchModels()
self.errorText = nil
} catch {
self.errorText = error.localizedDescription
@@ -320,6 +369,7 @@ public final class OpenClawChatViewModel {
guard !self.isSending else { return }
let trimmed = self.input.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty || !self.attachments.isEmpty else { return }
let sessionKey = self.sessionKey
guard self.healthOK else {
self.errorText = "Gateway health not OK; cannot send"
@@ -330,6 +380,7 @@ public final class OpenClawChatViewModel {
self.errorText = nil
let runId = UUID().uuidString
let messageText = trimmed.isEmpty && !self.attachments.isEmpty ? "See attached." : trimmed
let thinkingLevel = self.thinkingLevel
self.pendingRuns.insert(runId)
self.armPendingRunTimeout(runId: runId)
self.pendingToolCallsById = [:]
@@ -382,10 +433,11 @@ public final class OpenClawChatViewModel {
self.attachments = []
do {
await self.waitForPendingModelPatches(in: sessionKey)
let response = try await self.transport.sendMessage(
sessionKey: self.sessionKey,
sessionKey: sessionKey,
message: messageText,
thinking: self.thinkingLevel,
thinking: thinkingLevel,
idempotencyKey: runId,
attachments: encodedAttachments)
if response.runId != runId {
@@ -422,6 +474,17 @@ public final class OpenClawChatViewModel {
do {
let res = try await self.transport.listSessions(limit: limit)
self.sessions = res.sessions
self.sessionDefaults = res.defaults
self.syncSelectedModel()
} catch {
// Best-effort.
}
}
private func fetchModels() async {
do {
self.modelChoices = try await self.transport.listModels()
self.syncSelectedModel()
} catch {
// Best-effort.
}
@@ -432,9 +495,106 @@ public final class OpenClawChatViewModel {
guard !next.isEmpty else { return }
guard next != self.sessionKey else { return }
self.sessionKey = next
self.modelSelectionID = Self.defaultModelSelectionID
await self.bootstrap()
}
private func performSelectThinkingLevel(_ level: String) async {
let next = Self.normalizedThinkingLevel(level) ?? "off"
guard next != self.thinkingLevel else { return }
let sessionKey = self.sessionKey
self.thinkingLevel = next
self.onThinkingLevelChanged?(next)
self.nextThinkingSelectionRequestID &+= 1
let requestID = self.nextThinkingSelectionRequestID
self.latestThinkingSelectionRequestIDsBySession[sessionKey] = requestID
self.latestThinkingLevelsBySession[sessionKey] = next
do {
try await self.transport.setSessionThinking(sessionKey: sessionKey, thinkingLevel: next)
guard requestID == self.latestThinkingSelectionRequestIDsBySession[sessionKey] else {
let latest = self.latestThinkingLevelsBySession[sessionKey] ?? next
guard latest != next else { return }
try? await self.transport.setSessionThinking(sessionKey: sessionKey, thinkingLevel: latest)
return
}
} catch {
guard sessionKey == self.sessionKey,
requestID == self.latestThinkingSelectionRequestIDsBySession[sessionKey]
else { return }
// Best-effort. Persisting the user's local preference matters more than a patch error here.
}
}
private func performSelectModel(_ selectionID: String) async {
let next = self.normalizedSelectionID(selectionID)
guard next != self.modelSelectionID else { return }
let sessionKey = self.sessionKey
let previous = self.modelSelectionID
let previousRequestID = self.latestModelSelectionRequestIDsBySession[sessionKey]
self.nextModelSelectionRequestID &+= 1
let requestID = self.nextModelSelectionRequestID
let nextModelRef = self.modelRef(forSelectionID: next)
self.latestModelSelectionRequestIDsBySession[sessionKey] = requestID
self.latestModelSelectionIDsBySession[sessionKey] = next
self.beginModelPatch(for: sessionKey)
self.modelSelectionID = next
self.errorText = nil
defer { self.endModelPatch(for: sessionKey) }
do {
try await self.transport.setSessionModel(
sessionKey: sessionKey,
model: nextModelRef)
guard requestID == self.latestModelSelectionRequestIDsBySession[sessionKey] else {
self.applySuccessfulModelSelection(next, sessionKey: sessionKey, syncSelection: false)
return
}
self.applySuccessfulModelSelection(next, sessionKey: sessionKey, syncSelection: true)
} catch {
guard requestID == self.latestModelSelectionRequestIDsBySession[sessionKey] else { return }
self.latestModelSelectionIDsBySession[sessionKey] = previous
if let previousRequestID {
self.latestModelSelectionRequestIDsBySession[sessionKey] = previousRequestID
} else {
self.latestModelSelectionRequestIDsBySession.removeValue(forKey: sessionKey)
}
if self.lastSuccessfulModelSelectionIDsBySession[sessionKey] == previous {
self.applySuccessfulModelSelection(previous, sessionKey: sessionKey, syncSelection: sessionKey == self.sessionKey)
}
guard sessionKey == self.sessionKey else { return }
self.modelSelectionID = previous
self.errorText = error.localizedDescription
chatUILogger.error("sessions.patch(model) failed \(error.localizedDescription, privacy: .public)")
}
}
private func beginModelPatch(for sessionKey: String) {
self.inFlightModelPatchCountsBySession[sessionKey, default: 0] += 1
}
private func endModelPatch(for sessionKey: String) {
let remaining = max(0, (self.inFlightModelPatchCountsBySession[sessionKey] ?? 0) - 1)
if remaining == 0 {
self.inFlightModelPatchCountsBySession.removeValue(forKey: sessionKey)
let waiters = self.modelPatchWaitersBySession.removeValue(forKey: sessionKey) ?? []
for waiter in waiters {
waiter.resume()
}
return
}
self.inFlightModelPatchCountsBySession[sessionKey] = remaining
}
private func waitForPendingModelPatches(in sessionKey: String) async {
guard (self.inFlightModelPatchCountsBySession[sessionKey] ?? 0) > 0 else { return }
await withCheckedContinuation { continuation in
self.modelPatchWaitersBySession[sessionKey, default: []].append(continuation)
}
}
private func placeholderSession(key: String) -> OpenClawChatSessionEntry {
OpenClawChatSessionEntry(
key: key,
@@ -453,10 +613,159 @@ public final class OpenClawChatViewModel {
inputTokens: nil,
outputTokens: nil,
totalTokens: nil,
modelProvider: nil,
model: nil,
contextTokens: nil)
}
private func syncSelectedModel() {
let currentSession = self.sessions.first(where: { $0.key == self.sessionKey })
let explicitModelID = self.normalizedModelSelectionID(
currentSession?.model,
provider: currentSession?.modelProvider)
if let explicitModelID {
self.lastSuccessfulModelSelectionIDsBySession[self.sessionKey] = explicitModelID
self.modelSelectionID = explicitModelID
return
}
self.lastSuccessfulModelSelectionIDsBySession[self.sessionKey] = Self.defaultModelSelectionID
self.modelSelectionID = Self.defaultModelSelectionID
}
private func normalizedSelectionID(_ selectionID: String) -> String {
let trimmed = selectionID.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return Self.defaultModelSelectionID }
return trimmed
}
private func normalizedModelSelectionID(_ modelID: String?, provider: String? = nil) -> String? {
guard let modelID else { return nil }
let trimmed = modelID.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return nil }
if let provider = Self.normalizedProvider(provider) {
let providerQualified = Self.providerQualifiedModelSelectionID(modelID: trimmed, provider: provider)
if let match = self.modelChoices.first(where: {
$0.selectionID == providerQualified ||
($0.modelID == trimmed && Self.normalizedProvider($0.provider) == provider)
}) {
return match.selectionID
}
return providerQualified
}
if self.modelChoices.contains(where: { $0.selectionID == trimmed }) {
return trimmed
}
let matches = self.modelChoices.filter { $0.modelID == trimmed || $0.selectionID == trimmed }
if matches.count == 1 {
return matches[0].selectionID
}
return trimmed
}
private func modelRef(forSelectionID selectionID: String) -> String? {
let normalized = self.normalizedSelectionID(selectionID)
if normalized == Self.defaultModelSelectionID {
return nil
}
return normalized
}
private func modelLabel(for modelID: String) -> String {
self.modelChoices.first(where: { $0.selectionID == modelID || $0.modelID == modelID })?.displayLabel ??
modelID
}
private func applySuccessfulModelSelection(_ selectionID: String, sessionKey: String, syncSelection: Bool) {
self.lastSuccessfulModelSelectionIDsBySession[sessionKey] = selectionID
let resolved = self.resolvedSessionModelIdentity(forSelectionID: selectionID)
self.updateCurrentSessionModel(
modelID: resolved.modelID,
modelProvider: resolved.modelProvider,
sessionKey: sessionKey,
syncSelection: syncSelection)
}
private func resolvedSessionModelIdentity(forSelectionID selectionID: String) -> (modelID: String?, modelProvider: String?) {
guard let modelRef = self.modelRef(forSelectionID: selectionID) else {
return (nil, nil)
}
if let choice = self.modelChoices.first(where: { $0.selectionID == modelRef }) {
return (choice.modelID, Self.normalizedProvider(choice.provider))
}
return (modelRef, nil)
}
private static func normalizedProvider(_ provider: String?) -> String? {
let trimmed = provider?.trimmingCharacters(in: .whitespacesAndNewlines)
guard let trimmed, !trimmed.isEmpty else { return nil }
return trimmed
}
private static func providerQualifiedModelSelectionID(modelID: String, provider: String) -> String {
let providerPrefix = "\(provider)/"
if modelID.hasPrefix(providerPrefix) {
return modelID
}
return "\(provider)/\(modelID)"
}
private func updateCurrentSessionModel(
modelID: String?,
modelProvider: String?,
sessionKey: String,
syncSelection: Bool)
{
if let index = self.sessions.firstIndex(where: { $0.key == sessionKey }) {
let current = self.sessions[index]
self.sessions[index] = OpenClawChatSessionEntry(
key: current.key,
kind: current.kind,
displayName: current.displayName,
surface: current.surface,
subject: current.subject,
room: current.room,
space: current.space,
updatedAt: current.updatedAt,
sessionId: current.sessionId,
systemSent: current.systemSent,
abortedLastRun: current.abortedLastRun,
thinkingLevel: current.thinkingLevel,
verboseLevel: current.verboseLevel,
inputTokens: current.inputTokens,
outputTokens: current.outputTokens,
totalTokens: current.totalTokens,
modelProvider: modelProvider,
model: modelID,
contextTokens: current.contextTokens)
} else {
let placeholder = self.placeholderSession(key: sessionKey)
self.sessions.append(
OpenClawChatSessionEntry(
key: placeholder.key,
kind: placeholder.kind,
displayName: placeholder.displayName,
surface: placeholder.surface,
subject: placeholder.subject,
room: placeholder.room,
space: placeholder.space,
updatedAt: placeholder.updatedAt,
sessionId: placeholder.sessionId,
systemSent: placeholder.systemSent,
abortedLastRun: placeholder.abortedLastRun,
thinkingLevel: placeholder.thinkingLevel,
verboseLevel: placeholder.verboseLevel,
inputTokens: placeholder.inputTokens,
outputTokens: placeholder.outputTokens,
totalTokens: placeholder.totalTokens,
modelProvider: modelProvider,
model: modelID,
contextTokens: placeholder.contextTokens))
}
if syncSelection {
self.syncSelectedModel()
}
}
private func handleTransportEvent(_ evt: OpenClawChatTransportEvent) {
switch evt {
case let .health(ok):
@@ -573,7 +882,9 @@ public final class OpenClawChatViewModel {
previous: self.messages,
incoming: Self.decodeMessages(payload.messages ?? []))
self.sessionId = payload.sessionId
if let level = payload.thinkingLevel, !level.isEmpty {
if !self.prefersExplicitThinkingLevel,
let level = Self.normalizedThinkingLevel(payload.thinkingLevel)
{
self.thinkingLevel = level
}
} catch {
@@ -682,4 +993,13 @@ public final class OpenClawChatViewModel {
nil
#endif
}
private static func normalizedThinkingLevel(_ level: String?) -> String? {
guard let level else { return nil }
let trimmed = level.trimmingCharacters(in: .whitespacesAndNewlines).lowercased()
guard ["off", "minimal", "low", "medium", "high", "xhigh", "adaptive"].contains(trimmed) else {
return nil
}
return trimmed
}
}

View File

@@ -41,17 +41,67 @@ private func sessionEntry(key: String, updatedAt: Double) -> OpenClawChatSession
inputTokens: nil,
outputTokens: nil,
totalTokens: nil,
modelProvider: nil,
model: nil,
contextTokens: nil)
}
private func sessionEntry(
key: String,
updatedAt: Double,
model: String?,
modelProvider: String? = nil) -> OpenClawChatSessionEntry
{
OpenClawChatSessionEntry(
key: key,
kind: nil,
displayName: nil,
surface: nil,
subject: nil,
room: nil,
space: nil,
updatedAt: updatedAt,
sessionId: nil,
systemSent: nil,
abortedLastRun: nil,
thinkingLevel: nil,
verboseLevel: nil,
inputTokens: nil,
outputTokens: nil,
totalTokens: nil,
modelProvider: modelProvider,
model: model,
contextTokens: nil)
}
private func modelChoice(id: String, name: String, provider: String = "anthropic") -> OpenClawChatModelChoice {
OpenClawChatModelChoice(modelID: id, name: name, provider: provider, contextWindow: nil)
}
private func makeViewModel(
sessionKey: String = "main",
historyResponses: [OpenClawChatHistoryPayload],
sessionsResponses: [OpenClawChatSessionsListResponse] = []) async -> (TestChatTransport, OpenClawChatViewModel)
sessionsResponses: [OpenClawChatSessionsListResponse] = [],
modelResponses: [[OpenClawChatModelChoice]] = [],
setSessionModelHook: (@Sendable (String?) async throws -> Void)? = nil,
setSessionThinkingHook: (@Sendable (String) async throws -> Void)? = nil,
initialThinkingLevel: String? = nil,
onThinkingLevelChanged: (@MainActor @Sendable (String) -> Void)? = nil) async
-> (TestChatTransport, OpenClawChatViewModel)
{
let transport = TestChatTransport(historyResponses: historyResponses, sessionsResponses: sessionsResponses)
let vm = await MainActor.run { OpenClawChatViewModel(sessionKey: sessionKey, transport: transport) }
let transport = TestChatTransport(
historyResponses: historyResponses,
sessionsResponses: sessionsResponses,
modelResponses: modelResponses,
setSessionModelHook: setSessionModelHook,
setSessionThinkingHook: setSessionThinkingHook)
let vm = await MainActor.run {
OpenClawChatViewModel(
sessionKey: sessionKey,
transport: transport,
initialThinkingLevel: initialThinkingLevel,
onThinkingLevelChanged: onThinkingLevelChanged)
}
return (transport, vm)
}
@@ -125,27 +175,60 @@ private func emitExternalFinal(
errorMessage: nil)))
}
@MainActor
private final class CallbackBox {
var values: [String] = []
}
private actor AsyncGate {
private var continuation: CheckedContinuation<Void, Never>?
func wait() async {
await withCheckedContinuation { continuation in
self.continuation = continuation
}
}
func open() {
self.continuation?.resume()
self.continuation = nil
}
}
private actor TestChatTransportState {
var historyCallCount: Int = 0
var sessionsCallCount: Int = 0
var modelsCallCount: Int = 0
var sentRunIds: [String] = []
var sentThinkingLevels: [String] = []
var abortedRunIds: [String] = []
var patchedModels: [String?] = []
var patchedThinkingLevels: [String] = []
}
private final class TestChatTransport: @unchecked Sendable, OpenClawChatTransport {
private let state = TestChatTransportState()
private let historyResponses: [OpenClawChatHistoryPayload]
private let sessionsResponses: [OpenClawChatSessionsListResponse]
private let modelResponses: [[OpenClawChatModelChoice]]
private let setSessionModelHook: (@Sendable (String?) async throws -> Void)?
private let setSessionThinkingHook: (@Sendable (String) async throws -> Void)?
private let stream: AsyncStream<OpenClawChatTransportEvent>
private let continuation: AsyncStream<OpenClawChatTransportEvent>.Continuation
init(
historyResponses: [OpenClawChatHistoryPayload],
sessionsResponses: [OpenClawChatSessionsListResponse] = [])
sessionsResponses: [OpenClawChatSessionsListResponse] = [],
modelResponses: [[OpenClawChatModelChoice]] = [],
setSessionModelHook: (@Sendable (String?) async throws -> Void)? = nil,
setSessionThinkingHook: (@Sendable (String) async throws -> Void)? = nil)
{
self.historyResponses = historyResponses
self.sessionsResponses = sessionsResponses
self.modelResponses = modelResponses
self.setSessionModelHook = setSessionModelHook
self.setSessionThinkingHook = setSessionThinkingHook
var cont: AsyncStream<OpenClawChatTransportEvent>.Continuation!
self.stream = AsyncStream { c in
cont = c
@@ -175,11 +258,12 @@ private final class TestChatTransport: @unchecked Sendable, OpenClawChatTranspor
func sendMessage(
sessionKey _: String,
message _: String,
thinking _: String,
thinking: String,
idempotencyKey: String,
attachments _: [OpenClawChatAttachmentPayload]) async throws -> OpenClawChatSendResponse
{
await self.state.sentRunIdsAppend(idempotencyKey)
await self.state.sentThinkingLevelsAppend(thinking)
return OpenClawChatSendResponse(runId: idempotencyKey, status: "ok")
}
@@ -201,6 +285,29 @@ private final class TestChatTransport: @unchecked Sendable, OpenClawChatTranspor
sessions: [])
}
func listModels() async throws -> [OpenClawChatModelChoice] {
let idx = await self.state.modelsCallCount
await self.state.setModelsCallCount(idx + 1)
if idx < self.modelResponses.count {
return self.modelResponses[idx]
}
return self.modelResponses.last ?? []
}
func setSessionModel(sessionKey _: String, model: String?) async throws {
await self.state.patchedModelsAppend(model)
if let setSessionModelHook = self.setSessionModelHook {
try await setSessionModelHook(model)
}
}
func setSessionThinking(sessionKey _: String, thinkingLevel: String) async throws {
await self.state.patchedThinkingLevelsAppend(thinkingLevel)
if let setSessionThinkingHook = self.setSessionThinkingHook {
try await setSessionThinkingHook(thinkingLevel)
}
}
func requestHealth(timeoutMs _: Int) async throws -> Bool {
true
}
@@ -217,6 +324,18 @@ private final class TestChatTransport: @unchecked Sendable, OpenClawChatTranspor
func abortedRunIds() async -> [String] {
await self.state.abortedRunIds
}
func sentThinkingLevels() async -> [String] {
await self.state.sentThinkingLevels
}
func patchedModels() async -> [String?] {
await self.state.patchedModels
}
func patchedThinkingLevels() async -> [String] {
await self.state.patchedThinkingLevels
}
}
extension TestChatTransportState {
@@ -228,6 +347,10 @@ extension TestChatTransportState {
self.sessionsCallCount = v
}
fileprivate func setModelsCallCount(_ v: Int) {
self.modelsCallCount = v
}
fileprivate func sentRunIdsAppend(_ v: String) {
self.sentRunIds.append(v)
}
@@ -235,6 +358,18 @@ extension TestChatTransportState {
fileprivate func abortedRunIdsAppend(_ v: String) {
self.abortedRunIds.append(v)
}
fileprivate func sentThinkingLevelsAppend(_ v: String) {
self.sentThinkingLevels.append(v)
}
fileprivate func patchedModelsAppend(_ v: String?) {
self.patchedModels.append(v)
}
fileprivate func patchedThinkingLevelsAppend(_ v: String) {
self.patchedThinkingLevels.append(v)
}
}
@Suite struct ChatViewModelTests {
@@ -457,6 +592,512 @@ extension TestChatTransportState {
#expect(keys == ["main", "custom"])
}
@Test func bootstrapsModelSelectionFromSessionAndDefaults() async throws {
let now = Date().timeIntervalSince1970 * 1000
let history = historyPayload()
let sessions = OpenClawChatSessionsListResponse(
ts: now,
path: nil,
count: 1,
defaults: OpenClawChatSessionsDefaults(model: "openai/gpt-4.1-mini", contextTokens: nil),
sessions: [
sessionEntry(key: "main", updatedAt: now, model: "anthropic/claude-opus-4-6"),
])
let models = [
modelChoice(id: "anthropic/claude-opus-4-6", name: "Claude Opus 4.6"),
modelChoice(id: "openai/gpt-4.1-mini", name: "GPT-4.1 mini", provider: "openai"),
]
let (_, vm) = await makeViewModel(
historyResponses: [history],
sessionsResponses: [sessions],
modelResponses: [models])
try await loadAndWaitBootstrap(vm: vm)
#expect(await MainActor.run { vm.showsModelPicker })
#expect(await MainActor.run { vm.modelSelectionID } == "anthropic/claude-opus-4-6")
#expect(await MainActor.run { vm.defaultModelLabel } == "Default: openai/gpt-4.1-mini")
}
@Test func selectingDefaultModelPatchesNilAndUpdatesSelection() async throws {
let now = Date().timeIntervalSince1970 * 1000
let history = historyPayload()
let sessions = OpenClawChatSessionsListResponse(
ts: now,
path: nil,
count: 1,
defaults: OpenClawChatSessionsDefaults(model: "openai/gpt-4.1-mini", contextTokens: nil),
sessions: [
sessionEntry(key: "main", updatedAt: now, model: "anthropic/claude-opus-4-6"),
])
let models = [
modelChoice(id: "anthropic/claude-opus-4-6", name: "Claude Opus 4.6"),
modelChoice(id: "openai/gpt-4.1-mini", name: "GPT-4.1 mini", provider: "openai"),
]
let (transport, vm) = await makeViewModel(
historyResponses: [history],
sessionsResponses: [sessions],
modelResponses: [models])
try await loadAndWaitBootstrap(vm: vm)
await MainActor.run { vm.selectModel(OpenClawChatViewModel.defaultModelSelectionID) }
try await waitUntil("session model patched") {
let patched = await transport.patchedModels()
return patched == [nil]
}
#expect(await MainActor.run { vm.modelSelectionID } == OpenClawChatViewModel.defaultModelSelectionID)
}
@Test func selectingProviderQualifiedModelDisambiguatesDuplicateModelIDs() async throws {
let now = Date().timeIntervalSince1970 * 1000
let history = historyPayload()
let sessions = OpenClawChatSessionsListResponse(
ts: now,
path: nil,
count: 1,
defaults: OpenClawChatSessionsDefaults(model: "openrouter/gpt-4.1-mini", contextTokens: nil),
sessions: [
sessionEntry(key: "main", updatedAt: now, model: "gpt-4.1-mini", modelProvider: "openrouter"),
])
let models = [
modelChoice(id: "gpt-4.1-mini", name: "GPT-4.1 mini", provider: "openai"),
modelChoice(id: "gpt-4.1-mini", name: "GPT-4.1 mini", provider: "openrouter"),
]
let (transport, vm) = await makeViewModel(
historyResponses: [history],
sessionsResponses: [sessions],
modelResponses: [models])
try await loadAndWaitBootstrap(vm: vm)
#expect(await MainActor.run { vm.modelSelectionID } == "openrouter/gpt-4.1-mini")
await MainActor.run { vm.selectModel("openai/gpt-4.1-mini") }
try await waitUntil("provider-qualified model patched") {
let patched = await transport.patchedModels()
return patched == ["openai/gpt-4.1-mini"]
}
}
@Test func slashModelIDsStayProviderQualifiedInSelectionAndPatch() async throws {
let now = Date().timeIntervalSince1970 * 1000
let history = historyPayload()
let sessions = OpenClawChatSessionsListResponse(
ts: now,
path: nil,
count: 1,
defaults: nil,
sessions: [
sessionEntry(key: "main", updatedAt: now, model: nil),
])
let models = [
modelChoice(
id: "openai/gpt-5.4",
name: "GPT-5.4 via Vercel AI Gateway",
provider: "vercel-ai-gateway"),
]
let (transport, vm) = await makeViewModel(
historyResponses: [history],
sessionsResponses: [sessions],
modelResponses: [models])
try await loadAndWaitBootstrap(vm: vm)
await MainActor.run { vm.selectModel("vercel-ai-gateway/openai/gpt-5.4") }
try await waitUntil("slash model patched with provider-qualified ref") {
let patched = await transport.patchedModels()
return patched == ["vercel-ai-gateway/openai/gpt-5.4"]
}
}
@Test func staleModelPatchCompletionsDoNotOverwriteNewerSelection() async throws {
let now = Date().timeIntervalSince1970 * 1000
let history = historyPayload()
let sessions = OpenClawChatSessionsListResponse(
ts: now,
path: nil,
count: 1,
defaults: nil,
sessions: [
sessionEntry(key: "main", updatedAt: now, model: nil),
])
let models = [
modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"),
modelChoice(id: "gpt-5.4-pro", name: "GPT-5.4 Pro", provider: "openai"),
]
let (transport, vm) = await makeViewModel(
historyResponses: [history],
sessionsResponses: [sessions],
modelResponses: [models],
setSessionModelHook: { model in
if model == "openai/gpt-5.4" {
try await Task.sleep(for: .milliseconds(200))
}
})
try await loadAndWaitBootstrap(vm: vm)
await MainActor.run {
vm.selectModel("openai/gpt-5.4")
vm.selectModel("openai/gpt-5.4-pro")
}
try await waitUntil("two model patches complete") {
let patched = await transport.patchedModels()
return patched == ["openai/gpt-5.4", "openai/gpt-5.4-pro"]
}
#expect(await MainActor.run { vm.modelSelectionID } == "openai/gpt-5.4-pro")
#expect(await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model } == "openai/gpt-5.4-pro")
}
@Test func sendWaitsForInFlightModelPatchToFinish() async throws {
let now = Date().timeIntervalSince1970 * 1000
let history = historyPayload()
let sessions = OpenClawChatSessionsListResponse(
ts: now,
path: nil,
count: 1,
defaults: nil,
sessions: [
sessionEntry(key: "main", updatedAt: now, model: nil),
])
let models = [
modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"),
]
let gate = AsyncGate()
let (transport, vm) = await makeViewModel(
historyResponses: [history],
sessionsResponses: [sessions],
modelResponses: [models],
setSessionModelHook: { model in
if model == "openai/gpt-5.4" {
await gate.wait()
}
})
try await loadAndWaitBootstrap(vm: vm)
await MainActor.run { vm.selectModel("openai/gpt-5.4") }
try await waitUntil("model patch started") {
let patched = await transport.patchedModels()
return patched == ["openai/gpt-5.4"]
}
await sendUserMessage(vm, text: "hello")
try await waitUntil("send entered waiting state") {
await MainActor.run { vm.isSending }
}
#expect(await transport.lastSentRunId() == nil)
await MainActor.run { vm.selectThinkingLevel("high") }
try await waitUntil("thinking level changed while send is blocked") {
await MainActor.run { vm.thinkingLevel == "high" }
}
await gate.open()
try await waitUntil("send released after model patch") {
await transport.lastSentRunId() != nil
}
#expect(await transport.sentThinkingLevels() == ["off"])
}
@Test func failedLatestModelSelectionDoesNotReplayAfterOlderCompletionFinishes() async throws {
let now = Date().timeIntervalSince1970 * 1000
let history = historyPayload()
let sessions = OpenClawChatSessionsListResponse(
ts: now,
path: nil,
count: 1,
defaults: nil,
sessions: [
sessionEntry(key: "main", updatedAt: now, model: nil),
])
let models = [
modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"),
modelChoice(id: "gpt-5.4-pro", name: "GPT-5.4 Pro", provider: "openai"),
]
let (transport, vm) = await makeViewModel(
historyResponses: [history],
sessionsResponses: [sessions],
modelResponses: [models],
setSessionModelHook: { model in
if model == "openai/gpt-5.4" {
try await Task.sleep(for: .milliseconds(200))
return
}
if model == "openai/gpt-5.4-pro" {
throw NSError(domain: "test", code: 1, userInfo: [NSLocalizedDescriptionKey: "boom"])
}
})
try await loadAndWaitBootstrap(vm: vm)
await MainActor.run {
vm.selectModel("openai/gpt-5.4")
vm.selectModel("openai/gpt-5.4-pro")
}
try await waitUntil("older model completion wins after latest failure") {
await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model == "openai/gpt-5.4" }
}
#expect(await MainActor.run { vm.modelSelectionID } == "openai/gpt-5.4")
#expect(await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model } == "openai/gpt-5.4")
#expect(await transport.patchedModels() == ["openai/gpt-5.4", "openai/gpt-5.4-pro"])
}
@Test func failedLatestModelSelectionRestoresEarlierSuccessWithoutReplay() async throws {
let now = Date().timeIntervalSince1970 * 1000
let history = historyPayload()
let sessions = OpenClawChatSessionsListResponse(
ts: now,
path: nil,
count: 1,
defaults: nil,
sessions: [
sessionEntry(key: "main", updatedAt: now, model: nil),
])
let models = [
modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"),
modelChoice(id: "gpt-5.4-pro", name: "GPT-5.4 Pro", provider: "openai"),
]
let (transport, vm) = await makeViewModel(
historyResponses: [history],
sessionsResponses: [sessions],
modelResponses: [models],
setSessionModelHook: { model in
if model == "openai/gpt-5.4" {
try await Task.sleep(for: .milliseconds(100))
return
}
if model == "openai/gpt-5.4-pro" {
try await Task.sleep(for: .milliseconds(200))
throw NSError(domain: "test", code: 1, userInfo: [NSLocalizedDescriptionKey: "boom"])
}
})
try await loadAndWaitBootstrap(vm: vm)
await MainActor.run {
vm.selectModel("openai/gpt-5.4")
vm.selectModel("openai/gpt-5.4-pro")
}
try await waitUntil("latest failure restores prior successful model") {
await MainActor.run {
vm.modelSelectionID == "openai/gpt-5.4" &&
vm.sessions.first(where: { $0.key == "main" })?.model == "gpt-5.4" &&
vm.sessions.first(where: { $0.key == "main" })?.modelProvider == "openai"
}
}
#expect(await transport.patchedModels() == ["openai/gpt-5.4", "openai/gpt-5.4-pro"])
}
@Test func switchingSessionsIgnoresLateModelPatchCompletionFromPreviousSession() async throws {
let now = Date().timeIntervalSince1970 * 1000
let sessions = OpenClawChatSessionsListResponse(
ts: now,
path: nil,
count: 2,
defaults: nil,
sessions: [
sessionEntry(key: "main", updatedAt: now, model: nil),
sessionEntry(key: "other", updatedAt: now - 1000, model: nil),
])
let models = [
modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"),
]
let (transport, vm) = await makeViewModel(
historyResponses: [
historyPayload(sessionKey: "main", sessionId: "sess-main"),
historyPayload(sessionKey: "other", sessionId: "sess-other"),
],
sessionsResponses: [sessions, sessions],
modelResponses: [models, models],
setSessionModelHook: { model in
if model == "openai/gpt-5.4" {
try await Task.sleep(for: .milliseconds(200))
}
})
try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main")
await MainActor.run { vm.selectModel("openai/gpt-5.4") }
await MainActor.run { vm.switchSession(to: "other") }
try await waitUntil("switched sessions") {
await MainActor.run { vm.sessionKey == "other" && vm.sessionId == "sess-other" }
}
try await waitUntil("late model patch finished") {
let patched = await transport.patchedModels()
return patched == ["openai/gpt-5.4"]
}
#expect(await MainActor.run { vm.modelSelectionID } == OpenClawChatViewModel.defaultModelSelectionID)
#expect(await MainActor.run { vm.sessions.first(where: { $0.key == "other" })?.model } == nil)
}
@Test func lateModelCompletionDoesNotReplayCurrentSessionSelectionIntoPreviousSession() async throws {
let now = Date().timeIntervalSince1970 * 1000
let initialSessions = OpenClawChatSessionsListResponse(
ts: now,
path: nil,
count: 2,
defaults: nil,
sessions: [
sessionEntry(key: "main", updatedAt: now, model: nil),
sessionEntry(key: "other", updatedAt: now - 1000, model: nil),
])
let sessionsAfterOtherSelection = OpenClawChatSessionsListResponse(
ts: now,
path: nil,
count: 2,
defaults: nil,
sessions: [
sessionEntry(key: "main", updatedAt: now, model: nil),
sessionEntry(key: "other", updatedAt: now - 1000, model: "openai/gpt-5.4-pro"),
])
let models = [
modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"),
modelChoice(id: "gpt-5.4-pro", name: "GPT-5.4 Pro", provider: "openai"),
]
let (transport, vm) = await makeViewModel(
historyResponses: [
historyPayload(sessionKey: "main", sessionId: "sess-main"),
historyPayload(sessionKey: "other", sessionId: "sess-other"),
historyPayload(sessionKey: "main", sessionId: "sess-main"),
],
sessionsResponses: [initialSessions, initialSessions, sessionsAfterOtherSelection],
modelResponses: [models, models, models],
setSessionModelHook: { model in
if model == "openai/gpt-5.4" {
try await Task.sleep(for: .milliseconds(200))
}
})
try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main")
await MainActor.run { vm.selectModel("openai/gpt-5.4") }
await MainActor.run { vm.switchSession(to: "other") }
try await waitUntil("switched to other session") {
await MainActor.run { vm.sessionKey == "other" && vm.sessionId == "sess-other" }
}
await MainActor.run { vm.selectModel("openai/gpt-5.4-pro") }
try await waitUntil("both model patches issued") {
let patched = await transport.patchedModels()
return patched == ["openai/gpt-5.4", "openai/gpt-5.4-pro"]
}
await MainActor.run { vm.switchSession(to: "main") }
try await waitUntil("switched back to main session") {
await MainActor.run { vm.sessionKey == "main" && vm.sessionId == "sess-main" }
}
try await waitUntil("late model completion updates only the original session") {
await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model == "openai/gpt-5.4" }
}
#expect(await MainActor.run { vm.modelSelectionID } == "openai/gpt-5.4")
#expect(await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model } == "openai/gpt-5.4")
#expect(await MainActor.run { vm.sessions.first(where: { $0.key == "other" })?.model } == "openai/gpt-5.4-pro")
#expect(await transport.patchedModels() == ["openai/gpt-5.4", "openai/gpt-5.4-pro"])
}
@Test func explicitThinkingLevelWinsOverHistoryAndPersistsChanges() async throws {
let history = OpenClawChatHistoryPayload(
sessionKey: "main",
sessionId: "sess-main",
messages: [],
thinkingLevel: "off")
let callbackState = await MainActor.run { CallbackBox() }
let (transport, vm) = await makeViewModel(
historyResponses: [history],
initialThinkingLevel: "high",
onThinkingLevelChanged: { level in
callbackState.values.append(level)
})
try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main")
#expect(await MainActor.run { vm.thinkingLevel } == "high")
await MainActor.run { vm.selectThinkingLevel("medium") }
try await waitUntil("thinking level patched") {
let patched = await transport.patchedThinkingLevels()
return patched == ["medium"]
}
#expect(await MainActor.run { vm.thinkingLevel } == "medium")
#expect(await MainActor.run { callbackState.values } == ["medium"])
}
@Test func serverProvidedThinkingLevelsOutsideMenuArePreservedForSend() async throws {
let history = OpenClawChatHistoryPayload(
sessionKey: "main",
sessionId: "sess-main",
messages: [],
thinkingLevel: "xhigh")
let (transport, vm) = await makeViewModel(historyResponses: [history])
try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main")
#expect(await MainActor.run { vm.thinkingLevel } == "xhigh")
await sendUserMessage(vm, text: "hello")
try await waitUntil("send uses preserved thinking level") {
await transport.sentThinkingLevels() == ["xhigh"]
}
}
@Test func staleThinkingPatchCompletionReappliesLatestSelection() async throws {
let history = OpenClawChatHistoryPayload(
sessionKey: "main",
sessionId: "sess-main",
messages: [],
thinkingLevel: "off")
let (transport, vm) = await makeViewModel(
historyResponses: [history],
setSessionThinkingHook: { level in
if level == "medium" {
try await Task.sleep(for: .milliseconds(200))
}
})
try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main")
await MainActor.run {
vm.selectThinkingLevel("medium")
vm.selectThinkingLevel("high")
}
try await waitUntil("thinking patch replayed latest selection") {
let patched = await transport.patchedThinkingLevels()
return patched == ["medium", "high", "high"]
}
#expect(await MainActor.run { vm.thinkingLevel } == "high")
}
@Test func clearsStreamingOnExternalErrorEvent() async throws {
let sessionId = "sess-main"
let history = historyPayload(sessionId: sessionId)