diff --git a/lm-hackers.ipynb b/lm-hackers.ipynb index 12795c1..50a7972 100644 --- a/lm-hackers.ipynb +++ b/lm-hackers.ipynb @@ -424,7 +424,8 @@ "metadata": {}, "outputs": [], "source": [ - "from openai import ChatCompletion,Completion" + "from openai import OpenAI # New OpenAI api\n", + "client = OpenAI() # Uses default api key" ] }, { @@ -436,7 +437,7 @@ "source": [ "aussie_sys = \"You are an Aussie LLM that uses Aussie slang and analogies whenever possible.\"\n", "\n", - "c = ChatCompletion.create(\n", + "c = client.chat.completions.create(\n", " model=\"gpt-3.5-turbo\",\n", " messages=[{\"role\": \"system\", \"content\": aussie_sys},\n", " {\"role\": \"user\", \"content\": \"What is money?\"}])" @@ -468,7 +469,7 @@ } ], "source": [ - "c['choices'][0]['message']['content']" + "c.choices[0].message.content" ] }, { @@ -478,7 +479,7 @@ "metadata": {}, "outputs": [], "source": [ - "from fastcore.utils import nested_idx" + "#from fastcore.utils import nested_idx" ] }, { @@ -488,7 +489,7 @@ "metadata": {}, "outputs": [], "source": [ - "def response(compl): print(nested_idx(compl, 'choices', 0, 'message', 'content'))" + "def response(compl): print(c.choices[0].message.content)" ] }, { @@ -580,7 +581,7 @@ "metadata": {}, "outputs": [], "source": [ - "c = ChatCompletion.create(\n", + "c = client.chat.completions.create( \n", " model=\"gpt-3.5-turbo\",\n", " messages=[{\"role\": \"system\", \"content\": aussie_sys},\n", " {\"role\": \"user\", \"content\": \"What is money?\"},\n", @@ -617,7 +618,7 @@ " msgs = []\n", " if system: msgs.append({\"role\": \"system\", \"content\": system})\n", " msgs.append({\"role\": \"user\", \"content\": user})\n", - " return ChatCompletion.create(model=model, messages=msgs, **kwargs)" + " return client.chat.completions.create(model=model, messages=msgs, **kwargs)" ] }, { @@ -657,7 +658,7 @@ "source": [ "def call_api(prompt, model=\"gpt-3.5-turbo\"):\n", " msgs = [{\"role\": \"user\", \"content\": prompt}]\n", - " try: return ChatCompletion.create(model=model, messages=msgs)\n", + " try: return client.chat.completions.create(model=model, messages=msgs)\n", " except openai.error.RateLimitError as e:\n", " retry_after = int(e.headers.get(\"retry-after\", 60))\n", " print(f\"Rate limit exceeded, waiting for {retry_after} seconds...\")\n", @@ -713,8 +714,8 @@ "metadata": {}, "outputs": [], "source": [ - "c = Completion.create(prompt=\"Australian Jeremy Howard is \",\n", - " model=\"gpt-3.5-turbo-instruct\", echo=True, logprobs=5)" + "c = client.completions.create(prompt=\"Australian Jeremy Howard is \",\n", + " model=\"gpt-3.5-turbo-instruct\", echo=False, logprobs=5)" ] }, { @@ -1015,7 +1016,7 @@ "metadata": {}, "outputs": [], "source": [ - "c = ChatCompletion.create(\n", + "c = client.chat.completions.create(\n", " model=\"gpt-3.5-turbo\",\n", " functions=[schema(python)],\n", " messages=[{\"role\": \"user\", \"content\": \"What is 12 factorial?\"},\n",