diff --git a/HealthGPT.xcodeproj/project.pbxproj b/HealthGPT.xcodeproj/project.pbxproj index c69b9f0..a1ef9df 100644 --- a/HealthGPT.xcodeproj/project.pbxproj +++ b/HealthGPT.xcodeproj/project.pbxproj @@ -44,6 +44,8 @@ 63439A4E2BA6069E008BDBFD /* SpeziLLM in Frameworks */ = {isa = PBXBuildFile; productRef = 63439A4D2BA6069E008BDBFD /* SpeziLLM */; }; 63439A502BA6069E008BDBFD /* SpeziLLMOpenAI in Frameworks */ = {isa = PBXBuildFile; productRef = 63439A4F2BA6069E008BDBFD /* SpeziLLMOpenAI */; }; 634523F12BAEF62E00A6E2A1 /* Localizable.xcstrings in Resources */ = {isa = PBXBuildFile; fileRef = 634523F02BAEF62D00A6E2A1 /* Localizable.xcstrings */; }; + 63497B6C2BBF095A001F8419 /* LLMSourceSelection.swift in Sources */ = {isa = PBXBuildFile; fileRef = 63497B6B2BBF095A001F8419 /* LLMSourceSelection.swift */; }; + 63497B6E2BBF0C0B001F8419 /* LLMSource.swift in Sources */ = {isa = PBXBuildFile; fileRef = 63497B6D2BBF0C0B001F8419 /* LLMSource.swift */; }; 63DAAF712BBEB14A009E5E19 /* SpeziLLMLocal in Frameworks */ = {isa = PBXBuildFile; productRef = 63DAAF702BBEB14A009E5E19 /* SpeziLLMLocal */; }; 63DAAF732BBEB14A009E5E19 /* SpeziLLMLocalDownload in Frameworks */ = {isa = PBXBuildFile; productRef = 63DAAF722BBEB14A009E5E19 /* SpeziLLMLocalDownload */; }; 63DAAF752BBEB24D009E5E19 /* LLMLocalDownload.swift in Sources */ = {isa = PBXBuildFile; fileRef = 63DAAF742BBEB24D009E5E19 /* LLMLocalDownload.swift */; }; @@ -100,6 +102,8 @@ 2F5E32BC297E05EA003432F8 /* HealthGPTAppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = HealthGPTAppDelegate.swift; sourceTree = ""; }; 2FAEC07F297F583900C11C42 /* HealthGPT.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = HealthGPT.entitlements; sourceTree = ""; }; 634523F02BAEF62D00A6E2A1 /* Localizable.xcstrings */ = {isa = PBXFileReference; lastKnownFileType = text.json.xcstrings; path = Localizable.xcstrings; sourceTree = ""; }; + 63497B6B2BBF095A001F8419 /* LLMSourceSelection.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LLMSourceSelection.swift; sourceTree = ""; }; + 63497B6D2BBF0C0B001F8419 /* LLMSource.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LLMSource.swift; sourceTree = ""; }; 63DAAF742BBEB24D009E5E19 /* LLMLocalDownload.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LLMLocalDownload.swift; sourceTree = ""; }; 653A254D283387FE005D4D48 /* HealthGPT.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = HealthGPT.app; sourceTree = BUILT_PRODUCTS_DIR; }; 653A2550283387FE005D4D48 /* HealthGPTApplication.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = HealthGPTApplication.swift; sourceTree = ""; }; @@ -170,6 +174,8 @@ 275DEFDA29EEC6CA0079D453 /* String+ModuleLocalized.swift */, 275DEFDC29EEC6DC0079D453 /* Welcome.swift */, 63DAAF742BBEB24D009E5E19 /* LLMLocalDownload.swift */, + 63497B6B2BBF095A001F8419 /* LLMSourceSelection.swift */, + 63497B6D2BBF0C0B001F8419 /* LLMSource.swift */, ); path = Onboarding; sourceTree = ""; @@ -482,11 +488,13 @@ 272FB34E2A03CDFF0010B98D /* HealthDataFetcher+Process.swift in Sources */, 275DEFD029EEC5D20079D453 /* FeatureFlags.swift in Sources */, 272FB3522A03D14C0010B98D /* HealthData.swift in Sources */, + 63497B6E2BBF0C0B001F8419 /* LLMSource.swift in Sources */, F381BD3C2A4DE41900BCEB69 /* HealthDataInterpreter.swift in Sources */, 275DEFF229EECA030079D453 /* Binding+Negate.swift in Sources */, 2F5E32BD297E05EA003432F8 /* HealthGPTAppDelegate.swift in Sources */, 275DEFD529EEC6870079D453 /* HealthKitPermissions.swift in Sources */, 272FB3542A03F1860010B98D /* PromptGenerator.swift in Sources */, + 63497B6C2BBF095A001F8419 /* LLMSourceSelection.swift in Sources */, 275DEFDB29EEC6CA0079D453 /* String+ModuleLocalized.swift in Sources */, 2719BC8029F0A5DD00995C31 /* UIApplication+Keyboard.swift in Sources */, E22B62EF29E8CE1E008F3484 /* HealthGPTView.swift in Sources */, diff --git a/HealthGPT/HealthGPT/HealthDataInterpreter.swift b/HealthGPT/HealthGPT/HealthDataInterpreter.swift index 064f183..24201ea 100644 --- a/HealthGPT/HealthGPT/HealthDataInterpreter.swift +++ b/HealthGPT/HealthGPT/HealthDataInterpreter.swift @@ -24,28 +24,16 @@ class HealthDataInterpreter: DefaultInitializable, Module, EnvironmentAccessible @ObservationIgnored private var systemPrompt = "" required init() { } - - /// Creates an `LLMSchema`, sets it up for use with an `LLMRunner`, injects the system prompt + + /// Creates an `LLMRunner`, from an `LLMSchema` and injects the system prompt /// into the context, and assigns the resulting `LLMSession` to the `llm` property. For more /// information, please refer to the [`SpeziLLM`](https://swiftpackageindex.com/StanfordSpezi/SpeziLLM/documentation/spezillm) documentation. /// - /// If the `--mockMode` feature flag is set, this function will use `LLMMockSchema()`, otherwise - /// will use `LLMOpenAISchema` with the model type specified in the `model` parameter. - /// - Parameter model: the type of OpenAI model to use + /// - Parameter schema: the LLMSchema to use @MainActor - func prepareLLM(with model: LLMOpenAIModelType) async { - var llmSchema: any LLMSchema - - if FeatureFlags.mockMode { - llmSchema = LLMMockSchema() - } else if FeatureFlags.localLLM { - llmSchema = LLMLocalSchema(modelPath: .cachesDirectory.appending(path: "llm.gguf")) - } else { - llmSchema = LLMOpenAISchema(parameters: .init(modelType: model)) - } - - let llm = llmRunner(with: llmSchema) + func prepareLLM(with schema: any LLMSchema) async { + let llm = llmRunner(with: schema) systemPrompt = await generateSystemPrompt() llm.context.append(systemMessage: systemPrompt) self.llm = llm diff --git a/HealthGPT/HealthGPT/HealthGPTView.swift b/HealthGPT/HealthGPT/HealthGPTView.swift index 173cde6..090c54e 100644 --- a/HealthGPT/HealthGPT/HealthGPTView.swift +++ b/HealthGPT/HealthGPT/HealthGPTView.swift @@ -11,11 +11,13 @@ import SpeziLLM import SpeziLLMOpenAI import SpeziSpeechSynthesizer import SwiftUI +import SpeziLLMLocal struct HealthGPTView: View { @AppStorage(StorageKeys.onboardingFlowComplete) var completedOnboardingFlow = false @AppStorage(StorageKeys.enableTextToSpeech) private var textToSpeech = StorageKeys.Defaults.enableTextToSpeech + @AppStorage(StorageKeys.llmSource) private var llmSource = StorageKeys.Defaults.llmSource @AppStorage(StorageKeys.openAIModel) private var openAIModel = LLMOpenAIModelType.gpt4 @Environment(HealthDataInterpreter.self) private var healthDataInterpreter @@ -66,7 +68,13 @@ struct HealthGPTView: View { Text(errorMessage) } .task { - await healthDataInterpreter.prepareLLM(with: openAIModel) + if FeatureFlags.mockMode { + await healthDataInterpreter.prepareLLM(with: LLMMockSchema()) + } else if FeatureFlags.localLLM || llmSource == .local { + await healthDataInterpreter.prepareLLM(with: LLMLocalSchema(modelPath: .cachesDirectory.appending(path: "llm.gguf"))) + } else { + await healthDataInterpreter.prepareLLM(with: LLMOpenAISchema(parameters: .init(modelType: openAIModel))) + } } } diff --git a/HealthGPT/HealthGPT/SettingsView.swift b/HealthGPT/HealthGPT/SettingsView.swift index caedd43..15e9f98 100644 --- a/HealthGPT/HealthGPT/SettingsView.swift +++ b/HealthGPT/HealthGPT/SettingsView.swift @@ -21,6 +21,7 @@ struct SettingsView: View { @Environment(\.dismiss) private var dismiss @Environment(HealthDataInterpreter.self) private var healthDataInterpreter @AppStorage(StorageKeys.enableTextToSpeech) private var enableTextToSpeech = StorageKeys.Defaults.enableTextToSpeech + @AppStorage(StorageKeys.llmSource) private var llmSource = StorageKeys.Defaults.llmSource @AppStorage(StorageKeys.openAIModel) private var openAIModel = LLMOpenAIModelType.gpt4 let logger = Logger(subsystem: "HealthGPT", category: "Settings") @@ -28,7 +29,7 @@ struct SettingsView: View { var body: some View { NavigationStack(path: $path) { List { - if !FeatureFlags.localLLM { + if !FeatureFlags.localLLM && !(llmSource == .local) { openAISettings } @@ -105,7 +106,7 @@ struct SettingsView: View { ) { model in Task { openAIModel = model - await healthDataInterpreter.prepareLLM(with: model) + await healthDataInterpreter.prepareLLM(with: LLMOpenAISchema(parameters: .init(modelType: model))) path.removeLast() } } diff --git a/HealthGPT/Onboarding/LLMSource.swift b/HealthGPT/Onboarding/LLMSource.swift new file mode 100644 index 0000000..427d274 --- /dev/null +++ b/HealthGPT/Onboarding/LLMSource.swift @@ -0,0 +1,28 @@ +// +// This source file is part of the Stanford HealthGPT project +// +// SPDX-FileCopyrightText: 2024 Stanford University & Project Contributors (see CONTRIBUTORS.md) +// +// SPDX-License-Identifier: MIT +// + +import Foundation + + +enum LLMSource: String, CaseIterable, Identifiable, Codable { + case local + case openai + + var id: String { + self.rawValue + } + + var localizedDescription: LocalizedStringResource { + switch self { + case .local: + LocalizedStringResource("LOCAL_LLM_LABEL") + case .openai: + LocalizedStringResource("OPENAI_LLM_LABEL") + } + } +} diff --git a/HealthGPT/Onboarding/LLMSourceSelection.swift b/HealthGPT/Onboarding/LLMSourceSelection.swift new file mode 100644 index 0000000..c14bed0 --- /dev/null +++ b/HealthGPT/Onboarding/LLMSourceSelection.swift @@ -0,0 +1,56 @@ +// +// This source file is part of the Stanford HealthGPT project +// +// SPDX-FileCopyrightText: 2024 Stanford University & Project Contributors (see CONTRIBUTORS.md) +// +// SPDX-License-Identifier: MIT +// + +import SpeziOnboarding +import SwiftUI + +struct LLMSourceSelection: View { + @Environment(OnboardingNavigationPath.self) private var onboardingNavigationPath + @AppStorage(StorageKeys.llmSource) private var llmSource = StorageKeys.Defaults.llmSource + + var body: some View { + OnboardingView( + contentView: { + VStack { + OnboardingTitleView( + title: "LLM_SOURCE_SELECTION_TITLE", + subtitle: "LLM_SOURCE_SELECTION_SUBTITLE" + ) + Spacer() + sourceSelector + Spacer() + } + }, + actionView: { + OnboardingActionsView( + "LLM_SOURCE_SELECTION_BUTTON" + ) { + if llmSource == .local { + onboardingNavigationPath.append(customView: LLMLocalDownload()) + } else { + onboardingNavigationPath.append(customView: OpenAIAPIKey()) + } + } + } + ) + } + + private var sourceSelector: some View { + Picker("", selection: $llmSource) { + ForEach(LLMSource.allCases) { source in + Text(source.localizedDescription) + .tag(source) + } + } + .pickerStyle(.inline) + } +} + +#Preview { + LLMSourceSelection() +} diff --git a/HealthGPT/Onboarding/OnboardingFlow.swift b/HealthGPT/Onboarding/OnboardingFlow.swift index a84b52c..ef4e557 100644 --- a/HealthGPT/Onboarding/OnboardingFlow.swift +++ b/HealthGPT/Onboarding/OnboardingFlow.swift @@ -15,6 +15,7 @@ import SwiftUI /// Displays an multi-step onboarding flow for the HealthGPT Application. struct OnboardingFlow: View { @AppStorage(StorageKeys.onboardingFlowComplete) var completedOnboardingFlow = false + @AppStorage(StorageKeys.llmSource) var llmSource = StorageKeys.Defaults.llmSource var body: some View { @@ -25,8 +26,7 @@ struct OnboardingFlow: View { if FeatureFlags.localLLM { LLMLocalDownload() } else { - OpenAIAPIKey() - OpenAIModelSelection() + LLMSourceSelection() } if HKHealthStore.isHealthDataAvailable() { diff --git a/HealthGPT/Onboarding/OpenAIAPIKey.swift b/HealthGPT/Onboarding/OpenAIAPIKey.swift index c7eb80b..6ec150b 100644 --- a/HealthGPT/Onboarding/OpenAIAPIKey.swift +++ b/HealthGPT/Onboarding/OpenAIAPIKey.swift @@ -17,7 +17,7 @@ struct OpenAIAPIKey: View { var body: some View { LLMOpenAIAPITokenOnboardingStep { - onboardingNavigationPath.nextStep() + onboardingNavigationPath.append(customView: OpenAIModelSelection()) } } } diff --git a/HealthGPT/SharedContext/StorageKeys.swift b/HealthGPT/SharedContext/StorageKeys.swift index d86c932..4d0650a 100644 --- a/HealthGPT/SharedContext/StorageKeys.swift +++ b/HealthGPT/SharedContext/StorageKeys.swift @@ -12,6 +12,7 @@ enum StorageKeys { enum Defaults { static let enableTextToSpeech = false + static let llmSource = LLMSource.openai } // MARK: - Onboarding @@ -19,6 +20,8 @@ enum StorageKeys { static let onboardingFlowComplete = "onboardingFlow.complete" /// A `Step` flag indicating the current step in the onboarding process. static let onboardingFlowStep = "onboardingFlow.step" + /// An `LLMSource` flag indicating the source of the model (local vs. OpenAI) + static let llmSource = "llmsource" /// An `LLMOpenAIModelType` flag indicating the OpenAI model to use static let openAIModel = "openAI.model" /// A `Bool` flag indicating if messages should be spoken. diff --git a/HealthGPT/Supporting Files/Localizable.xcstrings b/HealthGPT/Supporting Files/Localizable.xcstrings index 2a59712..0683629 100644 --- a/HealthGPT/Supporting Files/Localizable.xcstrings +++ b/HealthGPT/Supporting Files/Localizable.xcstrings @@ -1,6 +1,9 @@ { "sourceLanguage" : "en", "strings" : { + "" : { + + }, "API_KEY_SUBTITLE" : { "extractionState" : "manual", "localizations" : { @@ -219,6 +222,36 @@ } } }, + "LLM_SOURCE_SELECTION_BUTTON" : { + "localizations" : { + "en" : { + "stringUnit" : { + "state" : "translated", + "value" : "Save Choice" + } + } + } + }, + "LLM_SOURCE_SELECTION_SUBTITLE" : { + "localizations" : { + "en" : { + "stringUnit" : { + "state" : "translated", + "value" : "Choose an on-device or OpenAI LLM" + } + } + } + }, + "LLM_SOURCE_SELECTION_TITLE" : { + "localizations" : { + "en" : { + "stringUnit" : { + "state" : "translated", + "value" : "LLM Source Selection" + } + } + } + }, "LOADING_CHAT_VIEW" : { "localizations" : { "en" : { @@ -229,6 +262,16 @@ } } }, + "LOCAL_LLM_LABEL" : { + "localizations" : { + "en" : { + "stringUnit" : { + "state" : "translated", + "value" : "On-device LLM" + } + } + } + }, "MODEL_SELECTION_SUBTITLE" : { "extractionState" : "manual", "localizations" : { @@ -282,6 +325,16 @@ } } }, + "OPENAI_LLM_LABEL" : { + "localizations" : { + "en" : { + "stringUnit" : { + "state" : "translated", + "value" : "Open AI LLM" + } + } + } + }, "RESET" : { "localizations" : { "en" : {