@@ -2,6 +2,8 @@ import type { LLMProviders, Session } from "~llms"
2
2
3
3
export default class GeminiNano implements LLMProviders {
4
4
5
+ cumulative : boolean = false
6
+
5
7
async validate ( ) : Promise < void > {
6
8
if ( ! window . ai ) throw new Error ( '你的浏览器没有启用 AI 功能' )
7
9
if ( ! window . ai . languageModel &&
@@ -13,43 +15,52 @@ export default class GeminiNano implements LLMProviders {
13
15
async prompt ( chat : string ) : Promise < string > {
14
16
const session = await this . asSession ( )
15
17
try {
18
+ console . debug ( '[gemini nano] prompting: ' , chat )
16
19
return session . prompt ( chat )
17
20
} finally {
21
+ console . debug ( '[gemini nano] done' )
18
22
session [ Symbol . dispose ] ( )
19
23
}
20
24
}
21
25
22
26
async * promptStream ( chat : string ) : AsyncGenerator < string > {
23
27
const session = await this . asSession ( )
24
28
try {
25
- return session . promptStream ( chat )
29
+ console . debug ( '[gemini nano] prompting stream: ' , chat )
30
+ const res = session . promptStream ( chat )
31
+ for await ( const chunk of res ) {
32
+ yield chunk
33
+ }
26
34
} finally {
35
+ console . debug ( '[gemini nano] done' )
27
36
session [ Symbol . dispose ] ( )
28
37
}
29
38
}
30
39
31
40
async asSession ( ) : Promise < Session < LLMProviders > > {
32
41
33
- if ( window . ai . assistant || window . ai . languageModel ) {
34
- const assistant = window . ai . assistant ?? window . ai . languageModel
35
- const capabilities = await assistant . capabilities ( )
36
- if ( capabilities . available === 'readily' ) {
37
- return new GeminiAssistant ( await assistant . create ( ) )
38
- } else {
39
- console . warn ( 'AI Assistant 当前不可用: ' , capabilities )
40
- }
41
- }
42
-
43
42
if ( window . ai . summarizer ) {
44
43
const summarizer = window . ai . summarizer
45
44
const capabilities = await summarizer . capabilities ( )
46
45
if ( capabilities . available === 'readily' ) {
46
+ console . debug ( 'using gemini summarizer' )
47
47
return new GeminiSummarizer ( await summarizer . create ( ) )
48
48
} else {
49
49
console . warn ( 'AI Summarizer 当前不可用: ' , capabilities )
50
50
}
51
51
}
52
52
53
+ if ( window . ai . assistant || window . ai . languageModel ) {
54
+ const assistant = window . ai . assistant ?? window . ai . languageModel
55
+ const capabilities = await assistant . capabilities ( )
56
+ if ( capabilities . available === 'readily' ) {
57
+ console . debug ( 'using gemini assistant' )
58
+ return new GeminiAssistant ( await assistant . create ( ) )
59
+ } else {
60
+ console . warn ( 'AI Assistant 当前不可用: ' , capabilities )
61
+ }
62
+ }
63
+
53
64
throw new Error ( '你的浏览器 AI 功能当前不可用' )
54
65
}
55
66
}
@@ -59,10 +70,12 @@ class GeminiAssistant implements Session<LLMProviders> {
59
70
constructor ( private readonly assistant : AIAssistant ) { }
60
71
61
72
prompt ( chat : string ) : Promise < string > {
73
+ console . debug ( '[assistant] prompting: ' , chat )
62
74
return this . assistant . prompt ( chat )
63
75
}
64
76
65
77
async * promptStream ( chat : string ) : AsyncGenerator < string > {
78
+ console . debug ( '[assistant] prompting stream: ' , chat )
66
79
const stream = this . assistant . promptStreaming ( chat )
67
80
for await ( const chunk of stream ) {
68
81
yield chunk
@@ -80,10 +93,12 @@ class GeminiSummarizer implements Session<LLMProviders> {
80
93
constructor ( private readonly summarizer : AISummarizer ) { }
81
94
82
95
prompt ( chat : string ) : Promise < string > {
96
+ console . debug ( '[summarizer] summarizing: ' , chat )
83
97
return this . summarizer . summarize ( chat )
84
98
}
85
99
86
100
async * promptStream ( chat : string ) : AsyncGenerator < string > {
101
+ console . debug ( '[summarizer] summarizing stream: ' , chat )
87
102
const stream = this . summarizer . summarizeStreaming ( chat )
88
103
for await ( const chunk of stream ) {
89
104
yield chunk
0 commit comments