Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Chore/fix openai update #179

Merged
merged 2 commits into from
Nov 14, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 19 additions & 25 deletions 04-prompt-engineering-fundamentals/1-introduction.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -76,48 +76,42 @@
"metadata": {},
"outputs": [],
"source": [
"# Run this as a common starting point for all the exercises below\n",
"# It sets the OpenAI API key and uses a helper function that sets the relevant model and parameters\n",
"# The OpenAI SDK was updated on Nov 8, 2023 with new guidance for migration\n",
"# See: https://github.com/openai/openai-python/discussions/742\n",
"\n",
"import openai\n",
"## Updated\n",
"import os\n",
"import openai\n",
"from openai import OpenAI\n",
"\n",
"# Expects OPENAI_API_KEY in env variables \n",
"# For GitHub Codespaces: set this as Codespaces secret => shows up as env var in OS\n",
"# For Docker Desktop: create a .env file (and .gitignore it explicitly to be safe) => shows up as env var from load_dotenv\n",
"from dotenv import load_dotenv, find_dotenv\n",
"_ = load_dotenv(find_dotenv())\n",
"\n",
"# Note that we can set different env variables to different OPENAI keys and just map the right one to openai.api_key here\n",
"# Example: have both OPENAI_API_KEY (for OpenAI) and AOAI_API_KEY (for Azure OpenAI) as options \n",
"openai.api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Print Environment Variables\n",
"#for var in os.environ:\n",
"# print(f\"{var}: {os.environ[var]}\")\n",
"client = OpenAI(\n",
" api_key=os.environ['OPENAI_API_KEY'], # this is also the default, it can be omitted\n",
")\n",
"\n",
"## Updated\n",
"def get_completion(prompt, model=\"gpt-3.5-turbo\"):\n",
" messages = [{\"role\": \"user\", \"content\": prompt}]\n",
" response = openai.ChatCompletion.create(\n",
" response = openai.chat.completions.create(\n",
" model=model,\n",
" messages=messages,\n",
" temperature=0, # this is the degree of randomness of the model's output\n",
" max_tokens=1024\n",
" )\n",
" return response.choices[0].message[\"content\"]\n",
" return response.choices[0].message.content\n",
"\n",
"## ---------- Call the helper method\n",
"\n",
"## Set the primary content or simple prompt text here\n",
"### 1. Set primary content or prompt text\n",
"text = f\"\"\"\n",
"oh say can you see\n",
"\"\"\"\n",
"\n",
"## This uses a template that embeds the text \n",
"## allowing you to add additional content like instructions, cues, examples\n",
"### 2. Use that in the prompt template below\n",
"prompt = f\"\"\"\n",
"```{text}```\n",
"\"\"\"\n",
"\n",
"## Run the prompt\n",
"## 3. Run the prompt\n",
"response = get_completion(prompt)\n",
"print(response)"
]
Expand Down Expand Up @@ -216,7 +210,7 @@
"metadata": {},
"outputs": [],
"source": [
"response = openai.ChatCompletion.create(\n",
"response = openai.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": \"You are a sarcastic assistant.\"},\n",
Expand All @@ -225,7 +219,7 @@
" {\"role\": \"user\", \"content\": \"Where was it played?\"}\n",
" ]\n",
")\n",
"print(response.choices[0].message[\"content\"])"
"print(response.choices[0].message.content)"
]
},
{
Expand All @@ -239,7 +233,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
Expand Down