@@ -30,7 +30,7 @@ pip install ollama
30
30
31
31
``` python
32
32
import ollama
33
- response = ollama.chat(model = ' llama2 ' , messages = [
33
+ response = ollama.chat(model = ' llama3 ' , messages = [
34
34
{
35
35
' role' : ' user' ,
36
36
' content' : ' Why is the sky blue?' ,
@@ -47,7 +47,7 @@ Response streaming can be enabled by setting `stream=True`, modifying function c
47
47
import ollama
48
48
49
49
stream = ollama.chat(
50
- model = ' llama2 ' ,
50
+ model = ' llama3 ' ,
51
51
messages = [{' role' : ' user' , ' content' : ' Why is the sky blue?' }],
52
52
stream = True ,
53
53
)
@@ -63,13 +63,13 @@ The Ollama Python library's API is designed around the [Ollama REST API](https:/
63
63
### Chat
64
64
65
65
``` python
66
- ollama.chat(model = ' llama2 ' , messages = [{' role' : ' user' , ' content' : ' Why is the sky blue?' }])
66
+ ollama.chat(model = ' llama3 ' , messages = [{' role' : ' user' , ' content' : ' Why is the sky blue?' }])
67
67
```
68
68
69
69
### Generate
70
70
71
71
``` python
72
- ollama.generate(model = ' llama2 ' , prompt = ' Why is the sky blue?' )
72
+ ollama.generate(model = ' llama3 ' , prompt = ' Why is the sky blue?' )
73
73
```
74
74
75
75
### List
@@ -81,14 +81,14 @@ ollama.list()
81
81
### Show
82
82
83
83
``` python
84
- ollama.show(' llama2 ' )
84
+ ollama.show(' llama3 ' )
85
85
```
86
86
87
87
### Create
88
88
89
89
``` python
90
90
modelfile= '''
91
- FROM llama2
91
+ FROM llama3
92
92
SYSTEM You are mario from super mario bros.
93
93
'''
94
94
@@ -98,31 +98,31 @@ ollama.create(model='example', modelfile=modelfile)
98
98
### Copy
99
99
100
100
``` python
101
- ollama.copy(' llama2 ' , ' user/llama2 ' )
101
+ ollama.copy(' llama3 ' , ' user/llama3 ' )
102
102
```
103
103
104
104
### Delete
105
105
106
106
``` python
107
- ollama.delete(' llama2 ' )
107
+ ollama.delete(' llama3 ' )
108
108
```
109
109
110
110
### Pull
111
111
112
112
``` python
113
- ollama.pull(' llama2 ' )
113
+ ollama.pull(' llama3 ' )
114
114
```
115
115
116
116
### Push
117
117
118
118
``` python
119
- ollama.push(' user/llama2 ' )
119
+ ollama.push(' user/llama3 ' )
120
120
```
121
121
122
122
### Embeddings
123
123
124
124
``` python
125
- ollama.embeddings(model = ' llama2 ' , prompt = ' The sky is blue because of rayleigh scattering' )
125
+ ollama.embeddings(model = ' llama3 ' , prompt = ' The sky is blue because of rayleigh scattering' )
126
126
```
127
127
128
128
## Custom client
@@ -135,7 +135,7 @@ A custom client can be created with the following fields:
135
135
``` python
136
136
from ollama import Client
137
137
client = Client(host = ' http://localhost:11434' )
138
- response = client.chat(model = ' llama2 ' , messages = [
138
+ response = client.chat(model = ' llama3 ' , messages = [
139
139
{
140
140
' role' : ' user' ,
141
141
' content' : ' Why is the sky blue?' ,
@@ -151,7 +151,7 @@ from ollama import AsyncClient
151
151
152
152
async def chat ():
153
153
message = {' role' : ' user' , ' content' : ' Why is the sky blue?' }
154
- response = await AsyncClient().chat(model = ' llama2 ' , messages = [message])
154
+ response = await AsyncClient().chat(model = ' llama3 ' , messages = [message])
155
155
156
156
asyncio.run(chat())
157
157
```
@@ -164,7 +164,7 @@ from ollama import AsyncClient
164
164
165
165
async def chat ():
166
166
message = {' role' : ' user' , ' content' : ' Why is the sky blue?' }
167
- async for part in await AsyncClient().chat(model = ' llama2 ' , messages = [message], stream = True ):
167
+ async for part in await AsyncClient().chat(model = ' llama3 ' , messages = [message], stream = True ):
168
168
print (part[' message' ][' content' ], end = ' ' , flush = True )
169
169
170
170
asyncio.run(chat())
0 commit comments