Skip to content

Commit a8cb34e

Browse files
authored
Merge pull request #138 from TitanStar73/patch-1
Update README.md
2 parents 2b66ade + e5c4799 commit a8cb34e

File tree

1 file changed

+14
-14
lines changed

1 file changed

+14
-14
lines changed

README.md

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ pip install ollama
3030

3131
```python
3232
import ollama
33-
response = ollama.chat(model='llama2', messages=[
33+
response = ollama.chat(model='llama3', messages=[
3434
{
3535
'role': 'user',
3636
'content': 'Why is the sky blue?',
@@ -47,7 +47,7 @@ Response streaming can be enabled by setting `stream=True`, modifying function c
4747
import ollama
4848

4949
stream = ollama.chat(
50-
model='llama2',
50+
model='llama3',
5151
messages=[{'role': 'user', 'content': 'Why is the sky blue?'}],
5252
stream=True,
5353
)
@@ -63,13 +63,13 @@ The Ollama Python library's API is designed around the [Ollama REST API](https:/
6363
### Chat
6464

6565
```python
66-
ollama.chat(model='llama2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])
66+
ollama.chat(model='llama3', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])
6767
```
6868

6969
### Generate
7070

7171
```python
72-
ollama.generate(model='llama2', prompt='Why is the sky blue?')
72+
ollama.generate(model='llama3', prompt='Why is the sky blue?')
7373
```
7474

7575
### List
@@ -81,14 +81,14 @@ ollama.list()
8181
### Show
8282

8383
```python
84-
ollama.show('llama2')
84+
ollama.show('llama3')
8585
```
8686

8787
### Create
8888

8989
```python
9090
modelfile='''
91-
FROM llama2
91+
FROM llama3
9292
SYSTEM You are mario from super mario bros.
9393
'''
9494

@@ -98,31 +98,31 @@ ollama.create(model='example', modelfile=modelfile)
9898
### Copy
9999

100100
```python
101-
ollama.copy('llama2', 'user/llama2')
101+
ollama.copy('llama3', 'user/llama3')
102102
```
103103

104104
### Delete
105105

106106
```python
107-
ollama.delete('llama2')
107+
ollama.delete('llama3')
108108
```
109109

110110
### Pull
111111

112112
```python
113-
ollama.pull('llama2')
113+
ollama.pull('llama3')
114114
```
115115

116116
### Push
117117

118118
```python
119-
ollama.push('user/llama2')
119+
ollama.push('user/llama3')
120120
```
121121

122122
### Embeddings
123123

124124
```python
125-
ollama.embeddings(model='llama2', prompt='The sky is blue because of rayleigh scattering')
125+
ollama.embeddings(model='llama3', prompt='The sky is blue because of rayleigh scattering')
126126
```
127127

128128
## Custom client
@@ -135,7 +135,7 @@ A custom client can be created with the following fields:
135135
```python
136136
from ollama import Client
137137
client = Client(host='http://localhost:11434')
138-
response = client.chat(model='llama2', messages=[
138+
response = client.chat(model='llama3', messages=[
139139
{
140140
'role': 'user',
141141
'content': 'Why is the sky blue?',
@@ -151,7 +151,7 @@ from ollama import AsyncClient
151151

152152
async def chat():
153153
message = {'role': 'user', 'content': 'Why is the sky blue?'}
154-
response = await AsyncClient().chat(model='llama2', messages=[message])
154+
response = await AsyncClient().chat(model='llama3', messages=[message])
155155

156156
asyncio.run(chat())
157157
```
@@ -164,7 +164,7 @@ from ollama import AsyncClient
164164

165165
async def chat():
166166
message = {'role': 'user', 'content': 'Why is the sky blue?'}
167-
async for part in await AsyncClient().chat(model='llama2', messages=[message], stream=True):
167+
async for part in await AsyncClient().chat(model='llama3', messages=[message], stream=True):
168168
print(part['message']['content'], end='', flush=True)
169169

170170
asyncio.run(chat())

0 commit comments

Comments
 (0)