Skip to content

Commit

Permalink
Merge pull request microsoft#188 from corradocavalli/fix-setup-and-ch…
Browse files Browse the repository at this point in the history
…04-issues

Fixed documentation,broken notebooks and made it runnable inside DevContainer
  • Loading branch information
koreyspace authored Dec 12, 2023
2 parents ce61287 + 9f05a2d commit 3139ae3
Show file tree
Hide file tree
Showing 14 changed files with 1,117 additions and 1,234 deletions.
2 changes: 1 addition & 1 deletion .devcontainer/post-create.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#! /bin/bash
#!/bin/bash

# Install OpenAI and Dotenv for Python
# TODO: Check why this can't be done in requirements.txt
Expand Down
5 changes: 4 additions & 1 deletion .env.copy
Original file line number Diff line number Diff line change
@@ -1 +1,4 @@
OPENAI_API_KEY=
AZURE_OPENAI_ENDPOINT='<add your endpoint here>'
AZURE_OPENAI_DEPLOYMENT='<add your deployment name here>'
AZURE_OPENAI_KEY='<add your key here>'
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT='<add your deployment name here>'
26 changes: 15 additions & 11 deletions 04-prompt-engineering-fundamentals/1-introduction.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -81,18 +81,22 @@
"\n",
"## Updated\n",
"import os\n",
"import openai\n",
"from openai import OpenAI\n",
"from openai import AzureOpenAI\n",
"from dotenv import load_dotenv\n",
"load_dotenv()\n",
"\n",
"client = OpenAI(\n",
" api_key=os.environ['OPENAI_API_KEY'], # this is also the default, it can be omitted\n",
")\n",
"client = AzureOpenAI(\n",
" api_key=os.environ['AZURE_OPENAI_KEY'], # this is also the default, it can be omitted\n",
" api_version = \"2023-05-15\"\n",
" )\n",
"\n",
"deployment=os.environ['AZURE_OPENAI_DEPLOYMENT']\n",
"\n",
"## Updated\n",
"def get_completion(prompt, model=\"gpt-3.5-turbo\"):\n",
" messages = [{\"role\": \"user\", \"content\": prompt}]\n",
" response = openai.chat.completions.create(\n",
" model=model,\n",
"def get_completion(prompt):\n",
" messages = [{\"role\": \"user\", \"content\": prompt}] \n",
" response = client.chat.completions.create( \n",
" model=deployment, \n",
" messages=messages,\n",
" temperature=0, # this is the degree of randomness of the model's output\n",
" max_tokens=1024\n",
Expand Down Expand Up @@ -210,8 +214,8 @@
"metadata": {},
"outputs": [],
"source": [
"response = openai.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
"response = client.chat.completions.create(\n",
" model=deployment,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": \"You are a sarcastic assistant.\"},\n",
" {\"role\": \"user\", \"content\": \"Who won the world series in 2020?\"},\n",
Expand Down
794 changes: 395 additions & 399 deletions 04-prompt-engineering-fundamentals/README.md

Large diffs are not rendered by default.

27 changes: 10 additions & 17 deletions 06-text-generation-apps/app-recipe.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,18 @@
import openai
from openai import AzureOpenAI
import os
import dotenv

# import dotenv
dotenv.load_dotenv()

openai.api_key = os.getenv("API_KEY")

# enable below if you use Azure Open AI
openai.api_type = 'azure'
openai.api_version = '2023-05-15'
openai.api_base = os.getenv("API_BASE")
# configure Azure OpenAI service client
client = AzureOpenAI(
azure_endpoint = os.environ["AZURE_OPENAI_ENDPOINT"],
api_key=os.environ['AZURE_OPENAI_KEY'],
api_version = "2023-10-01-preview"
)

deployment=os.environ['AZURE_OPENAI_DEPLOYMENT']

no_recipes = input("No of recipes (for example, 5: ")

Expand All @@ -23,12 +24,8 @@
prompt = f"Show me {no_recipes} recipes for a dish with the following ingredients: {ingredients}. Per recipe, list all the ingredients used, no {filter}: "


# engine

# deployment_id
deployment_name = os.getenv("DEPLOYMENT_NAME")
completion = client.completions.create(model=deployment, prompt=prompt, max_tokens=600, temperature = 0.1)

completion = openai.Completion.create(engine=deployment_name, prompt=prompt, max_tokens=600, temperature=0.1)

# print response
print("Recipes:")
Expand All @@ -38,13 +35,9 @@
prompt_shopping = "Produce a shopping list, and please don't include ingredients that I already have at home: "

new_prompt = f"Given ingredients at home {ingredients} and these generated recipes: {old_prompt_result}, {prompt_shopping}"
completion = openai.Completion.create(engine=deployment_name, prompt=new_prompt, max_tokens=600)
completion = client.completions.create(model=deployment, prompt=prompt, max_tokens=600)

# print response
print("\n=====Shopping list ======= \n")
print(completion.choices[0].text)

# very unhappy _____.

# Once upon a time there was a very unhappy mermaid.

25 changes: 10 additions & 15 deletions 06-text-generation-apps/app.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,28 @@
import openai
from openai import AzureOpenAI
import os
import dotenv

# import dotenv
dotenv.load_dotenv()

openai.api_key = os.getenv("API_KEY")
# configure Azure OpenAI service client
client = AzureOpenAI(
azure_endpoint = os.environ["AZURE_OPENAI_ENDPOINT"],
api_key=os.environ['AZURE_OPENAI_KEY'],
api_version = "2023-10-01-preview"
)

# enable below if you use Azure Open AI
openai.api_type = 'azure'
openai.api_version = '2023-05-15'
openai.api_base = os.getenv("API_BASE")
deployment=os.environ['AZURE_OPENAI_DEPLOYMENT']

# add your completion code
prompt = "Complete the following: Once upon a time there was a"

# engine
engine = "davinci-001"

# deployment_id, azure specific
deployment_name = os.getenv("DEPLOYMENT_NAME")

completion = openai.Completion.create(engine=deployment_name, prompt=prompt, max_tokens=600)
# make completion
completion = client.completions.create(model=deployment, prompt=prompt)

# print response
print(completion.choices[0].text)


# very unhappy _____.

# Once upon a time there was a very unhappy mermaid.

Loading

0 comments on commit 3139ae3

Please sign in to comment.