Skip to content

Commit 7a62ab7

Browse files
enchantednatureskujtimiihoxha
authored andcommitted
feat(groq): add support for Groq using the OpenAI provider
1 parent 1586d75 commit 7a62ab7

File tree

5 files changed

+143
-47
lines changed

5 files changed

+143
-47
lines changed

README.md

+12
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,10 @@ You can configure OpenCode using environment variables:
9191
"anthropic": {
9292
"apiKey": "your-api-key",
9393
"disabled": false
94+
},
95+
"groq": {
96+
"apiKey": "your-api-key",
97+
"disabled": false
9498
}
9599
},
96100
"agents": {
@@ -158,6 +162,14 @@ OpenCode supports a variety of AI models from different providers:
158162

159163
- Claude 3.7 Sonnet
160164

165+
### Groq
166+
167+
- Llama 4 Maverick (17b-128e-instruct)
168+
- Llama 4 Scout (17b-16e-instruct)
169+
- QWEN QWQ-32b
170+
- Deepseek R1 distill Llama 70b
171+
- Llama 3.3 70b Versatile
172+
161173
## Usage
162174

163175
```bash

internal/llm/models/groq.go

+82
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
package models
2+
3+
const (
4+
ProviderGROQ ModelProvider = "groq"
5+
6+
// GROQ
7+
QWENQwq ModelID = "qwen-qwq"
8+
9+
// GROQ preview models
10+
Llama4Scout ModelID = "meta-llama/llama-4-scout-17b-16e-instruct"
11+
Llama4Maverick ModelID = "meta-llama/llama-4-maverick-17b-128e-instruct"
12+
Llama3_3_70BVersatile ModelID = "llama-3.3-70b-versatile"
13+
DeepseekR1DistillLlama70b ModelID = "deepseek-r1-distill-llama-70b"
14+
)
15+
16+
var GroqModels = map[ModelID]Model{
17+
//
18+
// GROQ
19+
QWENQwq: {
20+
ID: QWENQwq,
21+
Name: "Qwen Qwq",
22+
Provider: ProviderGROQ,
23+
APIModel: "qwen-qwq-32b",
24+
CostPer1MIn: 0.29,
25+
CostPer1MInCached: 0.275,
26+
CostPer1MOutCached: 0.0,
27+
CostPer1MOut: 0.39,
28+
ContextWindow: 128_000,
29+
DefaultMaxTokens: 50000,
30+
// for some reason, the groq api doesn't like the reasoningEffort parameter
31+
CanReason: false,
32+
},
33+
34+
Llama4Scout: {
35+
ID: Llama4Scout,
36+
Name: "Llama4Scout",
37+
Provider: ProviderGROQ,
38+
APIModel: "meta-llama/llama-4-scout-17b-16e-instruct",
39+
CostPer1MIn: 0.11,
40+
CostPer1MInCached: 0,
41+
CostPer1MOutCached: 0,
42+
CostPer1MOut: 0.34,
43+
ContextWindow: 128_000, // 10M when?
44+
},
45+
46+
Llama4Maverick: {
47+
ID: Llama4Maverick,
48+
Name: "Llama4Maverick",
49+
Provider: ProviderGROQ,
50+
APIModel: "meta-llama/llama-4-maverick-17b-128e-instruct",
51+
CostPer1MIn: 0.20,
52+
CostPer1MInCached: 0,
53+
CostPer1MOutCached: 0,
54+
CostPer1MOut: 0.20,
55+
ContextWindow: 128_000,
56+
},
57+
58+
Llama3_3_70BVersatile: {
59+
ID: Llama3_3_70BVersatile,
60+
Name: "Llama3_3_70BVersatile",
61+
Provider: ProviderGROQ,
62+
APIModel: "llama-3.3-70b-versatile",
63+
CostPer1MIn: 0.59,
64+
CostPer1MInCached: 0,
65+
CostPer1MOutCached: 0,
66+
CostPer1MOut: 0.79,
67+
ContextWindow: 128_000,
68+
},
69+
70+
DeepseekR1DistillLlama70b: {
71+
ID: DeepseekR1DistillLlama70b,
72+
Name: "DeepseekR1DistillLlama70b",
73+
Provider: ProviderGROQ,
74+
APIModel: "deepseek-r1-distill-llama-70b",
75+
CostPer1MIn: 0.75,
76+
CostPer1MInCached: 0,
77+
CostPer1MOutCached: 0,
78+
CostPer1MOut: 0.99,
79+
ContextWindow: 128_000,
80+
CanReason: true,
81+
},
82+
}

internal/llm/models/models.go

+1-17
Original file line numberDiff line numberDiff line change
@@ -23,17 +23,12 @@ type Model struct {
2323

2424
// Model IDs
2525
const ( // GEMINI
26-
// GROQ
27-
QWENQwq ModelID = "qwen-qwq"
28-
2926
// Bedrock
3027
BedrockClaude37Sonnet ModelID = "bedrock.claude-3.7-sonnet"
3128
)
3229

3330
const (
3431
ProviderBedrock ModelProvider = "bedrock"
35-
ProviderGROQ ModelProvider = "groq"
36-
3732
// ForTests
3833
ProviderMock ModelProvider = "__mock"
3934
)
@@ -63,18 +58,6 @@ var SupportedModels = map[ModelID]Model{
6358
// CostPer1MOut: 0.4,
6459
// },
6560
//
66-
// // GROQ
67-
// QWENQwq: {
68-
// ID: QWENQwq,
69-
// Name: "Qwen Qwq",
70-
// Provider: ProviderGROQ,
71-
// APIModel: "qwen-qwq-32b",
72-
// CostPer1MIn: 0,
73-
// CostPer1MInCached: 0,
74-
// CostPer1MOutCached: 0,
75-
// CostPer1MOut: 0,
76-
// },
77-
//
7861
// // Bedrock
7962
BedrockClaude37Sonnet: {
8063
ID: BedrockClaude37Sonnet,
@@ -92,4 +75,5 @@ func init() {
9275
maps.Copy(SupportedModels, AnthropicModels)
9376
maps.Copy(SupportedModels, OpenAIModels)
9477
maps.Copy(SupportedModels, GeminiModels)
78+
maps.Copy(SupportedModels, GroqModels)
9579
}

internal/llm/provider/provider.go

+8
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,14 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption
107107
options: clientOptions,
108108
client: newBedrockClient(clientOptions),
109109
}, nil
110+
case models.ProviderGROQ:
111+
clientOptions.openaiOptions = append(clientOptions.openaiOptions,
112+
WithOpenAIBaseURL("https://api.groq.com/openai/v1"),
113+
)
114+
return &baseProvider[OpenAIClient]{
115+
options: clientOptions,
116+
client: newOpenAIClient(clientOptions),
117+
}, nil
110118
case models.ProviderMock:
111119
// TODO: implement mock client for test
112120
panic("not implemented")

opencode-schema.json

+40-30
Original file line numberDiff line numberDiff line change
@@ -12,28 +12,33 @@
1212
"model": {
1313
"description": "Model ID for the agent",
1414
"enum": [
15-
"gemini-2.0-flash",
16-
"bedrock.claude-3.7-sonnet",
15+
"claude-3.7-sonnet",
1716
"claude-3-opus",
18-
"claude-3.5-sonnet",
17+
"gpt-4.1-mini",
18+
"gpt-4o",
1919
"gpt-4o-mini",
20+
"gemini-2.0-flash-lite",
21+
"meta-llama/llama-4-maverick-17b-128e-instruct",
22+
"gpt-4.1",
23+
"gpt-4.5-preview",
2024
"o1",
25+
"gpt-4.1-nano",
2126
"o3-mini",
27+
"gemini-2.5-flash",
28+
"gemini-2.0-flash",
29+
"meta-llama/llama-4-scout-17b-16e-instruct",
30+
"bedrock.claude-3.7-sonnet",
2231
"o1-pro",
23-
"o4-mini",
24-
"claude-3-haiku",
25-
"gpt-4o",
2632
"o3",
27-
"gpt-4.1-mini",
28-
"gpt-4.5-preview",
29-
"gemini-2.5-flash",
33+
"gemini-2.5",
34+
"qwen-qwq",
35+
"llama-3.3-70b-versatile",
36+
"deepseek-r1-distill-llama-70b",
37+
"claude-3.5-sonnet",
38+
"claude-3-haiku",
3039
"claude-3.5-haiku",
31-
"gpt-4.1",
32-
"gemini-2.0-flash-lite",
33-
"claude-3.7-sonnet",
34-
"o1-mini",
35-
"gpt-4.1-nano",
36-
"gemini-2.5"
40+
"o4-mini",
41+
"o1-mini"
3742
],
3843
"type": "string"
3944
},
@@ -67,28 +72,33 @@
6772
"model": {
6873
"description": "Model ID for the agent",
6974
"enum": [
70-
"gemini-2.0-flash",
71-
"bedrock.claude-3.7-sonnet",
75+
"claude-3.7-sonnet",
7276
"claude-3-opus",
73-
"claude-3.5-sonnet",
77+
"gpt-4.1-mini",
78+
"gpt-4o",
7479
"gpt-4o-mini",
80+
"gemini-2.0-flash-lite",
81+
"meta-llama/llama-4-maverick-17b-128e-instruct",
82+
"gpt-4.1",
83+
"gpt-4.5-preview",
7584
"o1",
85+
"gpt-4.1-nano",
7686
"o3-mini",
87+
"gemini-2.5-flash",
88+
"gemini-2.0-flash",
89+
"meta-llama/llama-4-scout-17b-16e-instruct",
90+
"bedrock.claude-3.7-sonnet",
7791
"o1-pro",
78-
"o4-mini",
79-
"claude-3-haiku",
80-
"gpt-4o",
8192
"o3",
82-
"gpt-4.1-mini",
83-
"gpt-4.5-preview",
84-
"gemini-2.5-flash",
93+
"gemini-2.5",
94+
"qwen-qwq",
95+
"llama-3.3-70b-versatile",
96+
"deepseek-r1-distill-llama-70b",
97+
"claude-3.5-sonnet",
98+
"claude-3-haiku",
8599
"claude-3.5-haiku",
86-
"gpt-4.1",
87-
"gemini-2.0-flash-lite",
88-
"claude-3.7-sonnet",
89-
"o1-mini",
90-
"gpt-4.1-nano",
91-
"gemini-2.5"
100+
"o4-mini",
101+
"o1-mini"
92102
],
93103
"type": "string"
94104
},

0 commit comments

Comments
 (0)