diff --git a/Bonus/Exploring_GPT_4_API.ipynb b/Bonus/Exploring_GPT_4_API.ipynb
index a29cbaa..9e23f74 100644
--- a/Bonus/Exploring_GPT_4_API.ipynb
+++ b/Bonus/Exploring_GPT_4_API.ipynb
@@ -1,19 +1,4 @@
{
- "nbformat": 4,
- "nbformat_minor": 0,
- "metadata": {
- "colab": {
- "provenance": [],
- "toc_visible": true
- },
- "kernelspec": {
- "name": "python3",
- "display_name": "Python 3"
- },
- "language_info": {
- "name": "python"
- }
- },
"cells": [
{
"cell_type": "markdown",
@@ -55,12 +40,12 @@
},
{
"cell_type": "markdown",
- "source": [
- "What do you really need for NLP tasks: GPT-3, GPT-3.5-turbo, or GPT-4? Run each task in the notebook to decide.\n"
- ],
"metadata": {
"id": "N_1BX64UZE3g"
- }
+ },
+ "source": [
+ "What do you really need for NLP tasks: GPT-3, GPT-3.5-turbo, or GPT-4? Run each task in the notebook to decide.\n"
+ ]
},
{
"cell_type": "markdown",
@@ -73,25 +58,18 @@
},
{
"cell_type": "code",
+ "execution_count": 1,
"metadata": {
- "id": "qS_Qk62FxclT",
"colab": {
"base_uri": "https://localhost:8080/"
},
+ "id": "qS_Qk62FxclT",
"outputId": "74ea7772-261c-47cb-9a79-6f1996d65699"
},
- "source": [
- "try:\n",
- " import openai\n",
- "except:\n",
- " !pip install openai\n",
- " import openai"
- ],
- "execution_count": 1,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
"Collecting openai\n",
@@ -124,6 +102,13 @@
"Successfully installed aiohttp-3.8.4 aiosignal-1.3.1 async-timeout-4.0.2 frozenlist-1.3.3 multidict-6.0.4 openai-0.27.2 yarl-1.8.2\n"
]
}
+ ],
+ "source": [
+ "try:\n",
+ " import openai\n",
+ "except:\n",
+ " !pip install openai==0.28\n",
+ " import openai"
]
},
{
@@ -137,10 +122,7 @@
},
{
"cell_type": "code",
- "source": [
- "from google.colab import drive\n",
- "drive.mount('/content/drive')"
- ],
+ "execution_count": 2,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -148,55 +130,61 @@
"id": "jl1yS4tJeYI2",
"outputId": "b1f9550f-1f98-4843-c5ca-cde146bbdcd5"
},
- "execution_count": 2,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"Mounted at /content/drive\n"
]
}
+ ],
+ "source": [
+ "from google.colab import drive\n",
+ "drive.mount('/content/drive')"
]
},
{
"cell_type": "code",
+ "execution_count": 3,
"metadata": {
"id": "P6uKDkb3y7QZ"
},
+ "outputs": [],
"source": [
"f = open(\"drive/MyDrive/files/api_key.txt\", \"r\")\n",
"API_KEY=f.readline()\n",
"f.close()"
- ],
- "execution_count": 3,
- "outputs": []
+ ]
},
{
"cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "id": "k8cmujpyxKjj"
+ },
+ "outputs": [],
"source": [
"#The OpenAI Key\n",
"import os\n",
"os.environ['OPENAI_API_KEY'] =API_KEY\n",
"openai.api_key = os.getenv(\"OPENAI_API_KEY\")"
- ],
- "metadata": {
- "id": "k8cmujpyxKjj"
- },
- "execution_count": 4,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
- "source": [
- "#Engines"
- ],
"metadata": {
"id": "JFIIICtDEwRk"
- }
+ },
+ "source": [
+ "#Engines"
+ ]
},
{
"cell_type": "markdown",
+ "metadata": {
+ "id": "qzHMeedba1SD"
+ },
"source": [
"### List of Engines\n",
"\n",
@@ -221,28 +209,22 @@
"\n",
"\n",
"\n"
- ],
- "metadata": {
- "id": "qzHMeedba1SD"
- }
+ ]
},
{
"cell_type": "code",
- "source": [
- "elist=openai.Engine.list()"
- ],
+ "execution_count": 5,
"metadata": {
"id": "smyf1O9yEyac"
},
- "execution_count": 5,
- "outputs": []
+ "outputs": [],
+ "source": [
+ "elist=openai.Engine.list()"
+ ]
},
{
"cell_type": "code",
- "source": [
- "length=len(elist[\"data\"])\n",
- "print(\"Number of Engines: \",length)"
- ],
+ "execution_count": 6,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -250,47 +232,34 @@
"id": "yn4ALmhTFO97",
"outputId": "3fd5d34e-0732-48dc-b6a9-f2028294441a"
},
- "execution_count": 6,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"Number of Engines: 52\n"
]
}
+ ],
+ "source": [
+ "length=len(elist[\"data\"])\n",
+ "print(\"Number of Engines: \",length)"
]
},
{
"cell_type": "code",
- "source": [
- "ef=open('engines.txt','w')\n",
- "ef.write(\"engine\")\n",
- "ef.write('\\n')\n",
- "l=0\n",
- "for i in range(0,length):\n",
- " try:\n",
- " el=elist[\"data\"][i][\"id\"]\n",
- " print(i,el)\n",
- " ef.write(str(el))\n",
- " ef.write('\\n')\n",
- " except:\n",
- " print(\"number of engines: \",i)\n",
- "\n",
- "ef.close()"
- ],
+ "execution_count": 7,
"metadata": {
- "id": "TdOYy7kxF3XE",
"colab": {
"base_uri": "https://localhost:8080/"
},
+ "id": "TdOYy7kxF3XE",
"outputId": "a9ad11f5-3161-4a35-e5a3-61cea0cd97f5"
},
- "execution_count": 7,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"0 babbage\n",
"1 davinci\n",
@@ -346,15 +315,27 @@
"51 davinci-similarity\n"
]
}
+ ],
+ "source": [
+ "ef=open('engines.txt','w')\n",
+ "ef.write(\"engine\")\n",
+ "ef.write('\\n')\n",
+ "l=0\n",
+ "for i in range(0,length):\n",
+ " try:\n",
+ " el=elist[\"data\"][i][\"id\"]\n",
+ " print(i,el)\n",
+ " ef.write(str(el))\n",
+ " ef.write('\\n')\n",
+ " except:\n",
+ " print(\"number of engines: \",i)\n",
+ "\n",
+ "ef.close()"
]
},
{
"cell_type": "code",
- "source": [
- "import pandas as pd\n",
- "df=pd.read_csv('engines.txt')\n",
- "df.sort_values(['engine'])"
- ],
+ "execution_count": 8,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
@@ -363,66 +344,9 @@
"id": "7axwv7spi6j7",
"outputId": "c354e969-818c-4496-bf9f-3a03a38e3d1e"
},
- "execution_count": 8,
"outputs": [
{
- "output_type": "execute_result",
"data": {
- "text/plain": [
- " engine\n",
- "7 ada\n",
- "18 ada-code-search-code\n",
- "24 ada-code-search-text\n",
- "35 ada-search-document\n",
- "29 ada-search-query\n",
- "19 ada-similarity\n",
- "0 babbage\n",
- "3 babbage-code-search-code\n",
- "9 babbage-code-search-text\n",
- "34 babbage-search-document\n",
- "41 babbage-search-query\n",
- "10 babbage-similarity\n",
- "5 code-davinci-edit-001\n",
- "28 code-search-ada-code-001\n",
- "21 code-search-ada-text-001\n",
- "14 code-search-babbage-code-001\n",
- "12 code-search-babbage-text-001\n",
- "48 curie\n",
- "8 curie-instruct-beta\n",
- "39 curie-search-document\n",
- "31 curie-search-query\n",
- "47 curie-similarity\n",
- "1 davinci\n",
- "26 davinci-instruct-beta\n",
- "23 davinci-search-document\n",
- "33 davinci-search-query\n",
- "51 davinci-similarity\n",
- "38 gpt-3.5-turbo\n",
- "32 gpt-3.5-turbo-0301\n",
- "43 gpt-4\n",
- "45 gpt-4-0314\n",
- "15 text-ada-001\n",
- "42 text-babbage-001\n",
- "13 text-curie-001\n",
- "6 text-davinci-001\n",
- "50 text-davinci-002\n",
- "20 text-davinci-003\n",
- "2 text-davinci-edit-001\n",
- "16 text-embedding-ada-002\n",
- "25 text-search-ada-doc-001\n",
- "22 text-search-ada-query-001\n",
- "37 text-search-babbage-doc-001\n",
- "46 text-search-babbage-query-001\n",
- "40 text-search-curie-doc-001\n",
- "36 text-search-curie-query-001\n",
- "44 text-search-davinci-doc-001\n",
- "30 text-search-davinci-query-001\n",
- "17 text-similarity-ada-001\n",
- "4 text-similarity-babbage-001\n",
- "27 text-similarity-curie-001\n",
- "49 text-similarity-davinci-001\n",
- "11 whisper-1"
- ],
"text/html": [
"\n",
"
\n",
@@ -736,41 +660,107 @@
"
\n",
" \n",
" "
+ ],
+ "text/plain": [
+ " engine\n",
+ "7 ada\n",
+ "18 ada-code-search-code\n",
+ "24 ada-code-search-text\n",
+ "35 ada-search-document\n",
+ "29 ada-search-query\n",
+ "19 ada-similarity\n",
+ "0 babbage\n",
+ "3 babbage-code-search-code\n",
+ "9 babbage-code-search-text\n",
+ "34 babbage-search-document\n",
+ "41 babbage-search-query\n",
+ "10 babbage-similarity\n",
+ "5 code-davinci-edit-001\n",
+ "28 code-search-ada-code-001\n",
+ "21 code-search-ada-text-001\n",
+ "14 code-search-babbage-code-001\n",
+ "12 code-search-babbage-text-001\n",
+ "48 curie\n",
+ "8 curie-instruct-beta\n",
+ "39 curie-search-document\n",
+ "31 curie-search-query\n",
+ "47 curie-similarity\n",
+ "1 davinci\n",
+ "26 davinci-instruct-beta\n",
+ "23 davinci-search-document\n",
+ "33 davinci-search-query\n",
+ "51 davinci-similarity\n",
+ "38 gpt-3.5-turbo\n",
+ "32 gpt-3.5-turbo-0301\n",
+ "43 gpt-4\n",
+ "45 gpt-4-0314\n",
+ "15 text-ada-001\n",
+ "42 text-babbage-001\n",
+ "13 text-curie-001\n",
+ "6 text-davinci-001\n",
+ "50 text-davinci-002\n",
+ "20 text-davinci-003\n",
+ "2 text-davinci-edit-001\n",
+ "16 text-embedding-ada-002\n",
+ "25 text-search-ada-doc-001\n",
+ "22 text-search-ada-query-001\n",
+ "37 text-search-babbage-doc-001\n",
+ "46 text-search-babbage-query-001\n",
+ "40 text-search-curie-doc-001\n",
+ "36 text-search-curie-query-001\n",
+ "44 text-search-davinci-doc-001\n",
+ "30 text-search-davinci-query-001\n",
+ "17 text-similarity-ada-001\n",
+ "4 text-similarity-babbage-001\n",
+ "27 text-similarity-curie-001\n",
+ "49 text-similarity-davinci-001\n",
+ "11 whisper-1"
]
},
+ "execution_count": 8,
"metadata": {},
- "execution_count": 8
+ "output_type": "execute_result"
}
- ]
- },
- {
- "cell_type": "markdown",
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "df=pd.read_csv('engines.txt')\n",
+ "df.sort_values(['engine'])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "nQKSYQbxP7LY"
+ },
"source": [
"# Tasks\n",
"\n",
"Run each cell and analyze the outputs in detail. You will see that you don't always need the most powerful model to solve a problem. Sometimes you do. \n",
"\n",
"Take your time. Conduct your own experiments with your inputs.\n"
- ],
- "metadata": {
- "id": "nQKSYQbxP7LY"
- }
+ ]
},
{
"cell_type": "markdown",
+ "metadata": {
+ "id": "muBxqke2MpOt"
+ },
"source": [
"## Dialog function for GPT-3.5-turbo and GPT-4\n",
"\n",
"Note: GPT-3.5 and GPT-4 are conversational AI models that do not have the same format as GPT-3 models.\n",
"\n",
"For more on the parameters applied to the engines, read *Transformers for NLP, 2nd Edition, Chapter 7, The Rise of Suprahuman with GPT-3 Engines*. GPT-3.5-turbo and GPT-4 are GPT models that it is important to understand to make the right choices. "
- ],
- "metadata": {
- "id": "muBxqke2MpOt"
- }
+ ]
},
{
"cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "id": "6KqfSmZfN6DW"
+ },
+ "outputs": [],
"source": [
"#preparing a message for chat models\n",
"def prepare_message(uinput):\n",
@@ -790,38 +780,20 @@
"\n",
"\n",
" return iprompt"
- ],
- "metadata": {
- "id": "6KqfSmZfN6DW"
- },
- "execution_count": 9,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
- "source": [
- "## Solving Word Math Problems"
- ],
"metadata": {
"id": "xVEwDI5DhZfT"
- }
+ },
+ "source": [
+ "## Solving Word Math Problems"
+ ]
},
{
"cell_type": "code",
- "source": [
- "#GPT-3 davinci engine\n",
- "p1=\"davinci\"\n",
- "p2=\"The total capacity of two warehouses is 12000 units + 4000 units, the first warehouse can store 1000 additional units but the second warehouse has problems so it can only store 2000 units =\",\n",
- "p3=0.0\n",
- "p4=50\n",
- "p5=1\n",
- "p6=0\n",
- "p7=0\n",
- "\n",
- "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7)\n",
- "r = (response[\"choices\"][0])\n",
- "print(r[\"text\"])"
- ],
+ "execution_count": 25,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -829,28 +801,35 @@
"id": "vbIMEg1DHK4E",
"outputId": "b351bafd-9629-46d4-c763-ed8e77dfc2a6"
},
- "execution_count": 25,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
" 12000 + 1000 + 2000 = 14000 units.\n",
"\n",
"The total capacity of two warehouses is 12000 units + 4000 units, the first warehouse can store 1000 additional units but the second warehouse has problems so it can only store 2000 units = 12\n"
]
}
+ ],
+ "source": [
+ "#GPT-3 davinci engine\n",
+ "p1=\"davinci\"\n",
+ "p2=\"The total capacity of two warehouses is 12000 units + 4000 units, the first warehouse can store 1000 additional units but the second warehouse has problems so it can only store 2000 units =\",\n",
+ "p3=0.0\n",
+ "p4=50\n",
+ "p5=1\n",
+ "p6=0\n",
+ "p7=0\n",
+ "\n",
+ "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7)\n",
+ "r = (response[\"choices\"][0])\n",
+ "print(r[\"text\"])"
]
},
{
"cell_type": "code",
- "source": [
- "uinput=\"The total capacity of two warehouses is 12000 units + 4000 units, the first warehouse can store 1000 additional units but the second warehouse has problems so it can only store 2000 units =\"\n",
- "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
- "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
- "print(\"ChatGPT response:\",text)"
- ],
+ "execution_count": 12,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -858,11 +837,10 @@
"id": "qz8FnQMuPc1S",
"outputId": "1e42c8fe-d054-45a4-dca9-97b53db9db66"
},
- "execution_count": 12,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: The total capacity of the two warehouses is 12000 + 4000 = 16000 units.\n",
"If the first warehouse can store 1000 additional units, its new capacity is 12000 + 1000 = 13000 units.\n",
@@ -870,17 +848,18 @@
"Therefore, the new total capacity of the two warehouses is 13000 + 2000 = 15000 units.\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"uinput=\"The total capacity of two warehouses is 12000 units + 4000 units, the first warehouse can store 1000 additional units but the second warehouse has problems so it can only store 2000 units =\"\n",
"iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
"text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
"print(\"ChatGPT response:\",text)"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -888,11 +867,10 @@
"id": "KPjQDWIGNPiR",
"outputId": "7bb5035b-45a2-450f-a19d-b7c979b549d0"
},
- "execution_count": 13,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: The total capacity of the two warehouses is initially 12,000 units (for the first warehouse) + 4,000 units (for the second warehouse), which equals 16,000 units. \n",
"\n",
@@ -901,6 +879,13 @@
"The new combined capacity of the two warehouses is now 13,000 units (for the first warehouse) + 2,000 units (for the second warehouse), which equals 15,000 units.\n"
]
}
+ ],
+ "source": [
+ "uinput=\"The total capacity of two warehouses is 12000 units + 4000 units, the first warehouse can store 1000 additional units but the second warehouse has problems so it can only store 2000 units =\"\n",
+ "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
+ "print(\"ChatGPT response:\",text)"
]
},
{
@@ -916,31 +901,18 @@
},
{
"cell_type": "code",
+ "execution_count": 14,
"metadata": {
- "id": "l2oL0NLRNI3a",
"colab": {
"base_uri": "https://localhost:8080/"
},
+ "id": "l2oL0NLRNI3a",
"outputId": "b7672bbc-ba43-4117-b8ae-9382b2334b0e"
},
- "source": [
- "p1=\"davinci-instruct-beta\"\n",
- "p2=\"Write a plan of actions based on these instructions:\\n\\nStart Chrome.\\nYou have to eventually click on the advanced tab.\\nHowever before, click on the Internet options on the tools menu.\\nThen click on the advanced tab, then click to clear or select the enable\\npersonalized favorite menu check box.\\n\\n\\nACTIONS:\"\n",
- "p3=0\n",
- "p4=120\n",
- "p5=1\n",
- "p6=0\n",
- "p7=0\n",
- "\n",
- "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7)\n",
- "r = (response[\"choices\"][0])\n",
- "print(r[\"text\"])"
- ],
- "execution_count": 14,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"\n",
"\n",
@@ -951,17 +923,24 @@
"5. Click to clear or select the Enable Personalized Favorite Menu check box\n"
]
}
+ ],
+ "source": [
+ "p1=\"davinci-instruct-beta\"\n",
+ "p2=\"Write a plan of actions based on these instructions:\\n\\nStart Chrome.\\nYou have to eventually click on the advanced tab.\\nHowever before, click on the Internet options on the tools menu.\\nThen click on the advanced tab, then click to clear or select the enable\\npersonalized favorite menu check box.\\n\\n\\nACTIONS:\"\n",
+ "p3=0\n",
+ "p4=120\n",
+ "p5=1\n",
+ "p6=0\n",
+ "p7=0\n",
+ "\n",
+ "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7)\n",
+ "r = (response[\"choices\"][0])\n",
+ "print(r[\"text\"])"
]
},
{
"cell_type": "code",
- "source": [
- "uinput=\"Write a plan of actions based on these instructions:\\n\\nStart Chrome.\\nYou have to eventually click on the advanced tab.\\nHowever before, click on the Internet options on the tools menu.\\nThen click on the advanced tab, then click to clear or select the enable\\npersonalized favorite menu check box.\\n\\n\\nACTIONS:\"\n",
- "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
- "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
- "print(\"ChatGPT response:\",text)"
- ],
+ "execution_count": 15,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -969,11 +948,10 @@
"id": "7vmsD05iP8m5",
"outputId": "71443a4e-8873-45f7-80ba-7c7c7d4ea1fd"
},
- "execution_count": 15,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: 1. Click on the \"Start\" button located at the bottom-left corner of the screen.\n",
"2. Look for the Google Chrome icon and click on it to launch the browser.\n",
@@ -988,17 +966,18 @@
"11. Click on \"OK\" to close the window and save the changes.\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"uinput=\"Write a plan of actions based on these instructions:\\n\\nStart Chrome.\\nYou have to eventually click on the advanced tab.\\nHowever before, click on the Internet options on the tools menu.\\nThen click on the advanced tab, then click to clear or select the enable\\npersonalized favorite menu check box.\\n\\n\\nACTIONS:\"\n",
"iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
"text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
"print(\"ChatGPT response:\",text)"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1006,11 +985,10 @@
"id": "NWt3vSr3QVzc",
"outputId": "c3f4e306-277b-47ca-c68c-2fdceb90c391"
},
- "execution_count": 16,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: 1. Start Chrome.\n",
"2. Locate and click on the tools menu (usually represented by three vertical dots or lines in the top-right corner of the browser).\n",
@@ -1021,32 +999,27 @@
"7. Click to either clear or select the check box, based on your preference.\n"
]
}
+ ],
+ "source": [
+ "uinput=\"Write a plan of actions based on these instructions:\\n\\nStart Chrome.\\nYou have to eventually click on the advanced tab.\\nHowever before, click on the Internet options on the tools menu.\\nThen click on the advanced tab, then click to clear or select the enable\\npersonalized favorite menu check box.\\n\\n\\nACTIONS:\"\n",
+ "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
+ "print(\"ChatGPT response:\",text)"
]
},
{
"cell_type": "markdown",
- "source": [
- "## Recipe Generator"
- ],
"metadata": {
"id": "jN5SnYbncR4G"
- }
+ },
+ "source": [
+ "## Recipe Generator"
+ ]
},
{
"cell_type": "code",
- "source": [
- "p1=\"davinci-instruct-beta\"\n",
- "p2=\"Write a recipe based on these ingredients and instructions:\\n\\nFrito Pie\\n\\nIngredients:\\nFritos\\nChili\\nShredded cheddar cheese\\nSweet white or red onions, diced small\\nSour cream\\n\\nDirections:\"\n",
- "p3=0\n",
- "p4=120\n",
- "p5=1.0\n",
- "p6=0.0\n",
- "p7=0.0\n",
- "\n",
- "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7)\n",
- "r = (response[\"choices\"][0])\n",
- "print(r[\"text\"])"
- ],
+ "execution_count": 17,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1054,11 +1027,10 @@
"id": "5N0VqdJzcUYK",
"outputId": "b7e40687-b7b6-4e33-a052-a0fc7c33db06"
},
- "execution_count": 17,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"\n",
"\n",
@@ -1082,17 +1054,24 @@
"2. Place a layer of\n"
]
}
+ ],
+ "source": [
+ "p1=\"davinci-instruct-beta\"\n",
+ "p2=\"Write a recipe based on these ingredients and instructions:\\n\\nFrito Pie\\n\\nIngredients:\\nFritos\\nChili\\nShredded cheddar cheese\\nSweet white or red onions, diced small\\nSour cream\\n\\nDirections:\"\n",
+ "p3=0\n",
+ "p4=120\n",
+ "p5=1.0\n",
+ "p6=0.0\n",
+ "p7=0.0\n",
+ "\n",
+ "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7)\n",
+ "r = (response[\"choices\"][0])\n",
+ "print(r[\"text\"])"
]
},
{
"cell_type": "code",
- "source": [
- "uinput=\"Write a recipe based on these ingredients for a Frito Pie with these ingredients:Fritos,Chili,Shredded cheddar cheese,Sweet white or red onions, diced small,sour cream\"\n",
- "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
- "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
- "print(\"ChatGPT response:\",text)"
- ],
+ "execution_count": 18,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1100,11 +1079,10 @@
"id": "Xe2ijVLDWeu5",
"outputId": "1b1053e1-4430-46b3-8c4d-878c3e9490f6"
},
- "execution_count": 18,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: Frito Pie Recipe:\n",
"\n",
@@ -1130,17 +1108,18 @@
"Enjoy your delicious Frito Pie with your family and friends!\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"uinput=\"Write a recipe based on these ingredients for a Frito Pie with these ingredients:Fritos,Chili,Shredded cheddar cheese,Sweet white or red onions, diced small,sour cream\"\n",
"iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
"text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
"print(\"ChatGPT response:\",text)"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1148,11 +1127,10 @@
"id": "GTCpjKxLXP8V",
"outputId": "f829c674-3ddf-4bff-d534-2122478f2396"
},
- "execution_count": 19,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: Frito Pie Recipe\n",
"\n",
@@ -1186,25 +1164,18 @@
" Enjoy your delicious Frito Pie!\n"
]
}
+ ],
+ "source": [
+ "uinput=\"Write a recipe based on these ingredients for a Frito Pie with these ingredients:Fritos,Chili,Shredded cheddar cheese,Sweet white or red onions, diced small,sour cream\"\n",
+ "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
+ "print(\"ChatGPT response:\",text)"
]
},
{
"cell_type": "code",
- "source": [
- "p1=\"davinci\"\n",
- "question=\"What is the best cake?\"\n",
- "p2=\"Q:\"+ question + \"\\nA:\"\n",
- "p3=0.7\n",
- "p4=10\n",
- "p5=0.7\n",
- "p6=0\n",
- "p7=0\n",
- "p8=[\"\\n\"]\n",
- "\n",
- "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,stop=p8,logprobs=5)\n",
- "r = (response[\"choices\"][0])\n",
- "print(r[\"text\"])"
- ],
+ "execution_count": 20,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1212,26 +1183,34 @@
"id": "wCm74nU9LF9Q",
"outputId": "2ea94de2-f08d-46a0-95aa-4abdd9298ea9"
},
- "execution_count": 20,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"It's a secret.\n"
]
}
+ ],
+ "source": [
+ "p1=\"davinci\"\n",
+ "question=\"What is the best cake?\"\n",
+ "p2=\"Q:\"+ question + \"\\nA:\"\n",
+ "p3=0.7\n",
+ "p4=10\n",
+ "p5=0.7\n",
+ "p6=0\n",
+ "p7=0\n",
+ "p8=[\"\\n\"]\n",
+ "\n",
+ "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,stop=p8,logprobs=5)\n",
+ "r = (response[\"choices\"][0])\n",
+ "print(r[\"text\"])"
]
},
{
"cell_type": "code",
- "source": [
- "uinput=\"What is the best cake?\"\n",
- "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
- "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
- "print(\"ChatGPT response:\",text)"
- ],
+ "execution_count": 21,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1239,26 +1218,26 @@
"id": "eELnB9TnXkWd",
"outputId": "2a7c7999-e059-4ae2-f5f4-8dae4b48a177"
},
- "execution_count": 21,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: As an AI language model, I do not have personal preferences, however, some of the most popular and loved cake flavors include chocolate, vanilla, red velvet, lemon, and carrot cake. It's all a matter of personal taste!\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"uinput=\"What is the best cake?\"\n",
"iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
"text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
"print(\"ChatGPT response:\",text)"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1266,15 +1245,21 @@
"id": "q4ois9G2XrJR",
"outputId": "8e872d37-9dca-4cc6-fa09-6b6d69d09516"
},
- "execution_count": 22,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: There is no universally \"best\" cake, as people's tastes and preferences vary widely. Some popular choices for cake include chocolate cake, red velvet cake, and cheesecake. Ultimately, the best cake for you depends on your personal taste and preferences.\n"
]
}
+ ],
+ "source": [
+ "uinput=\"What is the best cake?\"\n",
+ "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
+ "print(\"ChatGPT response:\",text)"
]
},
{
@@ -1290,6 +1275,7 @@
},
{
"cell_type": "code",
+ "execution_count": 26,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1297,6 +1283,15 @@
"id": "tUOH-fAbawlc",
"outputId": "b5835d2c-eefc-4f33-e059-994dc756a565"
},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " π·πΈπ·\n"
+ ]
+ }
+ ],
"source": [
"p1=\"davinci\"\n",
"p2=\"Back to Future: π¨π΄ππ\\nBatman: π€΅π¦\\nTransformers: ππ€\\nWonder Woman: πΈπ»πΈπΌπΈπ½πΈπΎπΈπΏ\\nWinnie the Pooh: π»πΌπ»\\nThe Godfather: π¨π©π§π΅π»ββοΈπ²π₯\\nGame of Thrones: πΉπ‘π‘πΉ\\nSpider-Man:\"\n",
@@ -1310,27 +1305,11 @@
"response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,stop=p8)\n",
"r = (response[\"choices\"][0])\n",
"print(r[\"text\"])"
- ],
- "execution_count": 26,
- "outputs": [
- {
- "output_type": "stream",
- "name": "stdout",
- "text": [
- " π·πΈπ·\n"
- ]
- }
]
},
{
"cell_type": "code",
- "source": [
- "uinput=\"Show me the movie Spiderman in emojis: \"\n",
- "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
- "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
- "print(\"ChatGPT response:\",text)"
- ],
+ "execution_count": 27,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1338,26 +1317,26 @@
"id": "uu5jdfQNTgDs",
"outputId": "eb4045e6-4424-426f-9f55-7d8dfffca51a"
},
- "execution_count": 27,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: πΈοΈπ·οΈποΈπ¨βπΌπ·οΈπΈοΈπ·οΈπΈοΈπ·οΈπΈοΈπ¦ΈββοΈ\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"uinput=\"Show me the movie Spiderman in emojis: \"\n",
"iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
"text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
"print(\"ChatGPT response:\",text)"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1365,33 +1344,26 @@
"id": "syTmj5cGTtJc",
"outputId": "69f64644-7efc-49a9-e067-a3643c17baf4"
},
- "execution_count": 28,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: π¬ π·οΈπΈοΈπΆββοΈπ₯\n"
]
}
+ ],
+ "source": [
+ "uinput=\"Show me the movie Spiderman in emojis: \"\n",
+ "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
+ "print(\"ChatGPT response:\",text)"
]
},
{
"cell_type": "code",
- "source": [
- "p1=\"davinci\"\n",
- "p2=\"Show me this sentence in emojis:\"\n",
- "p3=0.8\n",
- "p4=60\n",
- "p5=1\n",
- "p6=0\n",
- "p7=0\n",
- "p8=[\"\\n\"]\n",
- "\n",
- "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,stop=p8)\n",
- "r = (response[\"choices\"][0])\n",
- "print(r[\"text\"])"
- ],
+ "execution_count": 29,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1399,37 +1371,60 @@
"id": "97s7U1dsexe0",
"outputId": "e291d571-c7da-4bc6-8b1d-6d47b41ecd40"
},
- "execution_count": 29,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
" π¨βπ§π¨βπ¦π©βπ¦π¨βπ§π¨βπ§π¨βπ¦π¨βπ§\n"
]
}
+ ],
+ "source": [
+ "p1=\"davinci\"\n",
+ "p2=\"Show me this sentence in emojis:\"\n",
+ "p3=0.8\n",
+ "p4=60\n",
+ "p5=1\n",
+ "p6=0\n",
+ "p7=0\n",
+ "p8=[\"\\n\"]\n",
+ "\n",
+ "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,stop=p8)\n",
+ "r = (response[\"choices\"][0])\n",
+ "print(r[\"text\"])"
]
},
{
"cell_type": "markdown",
+ "metadata": {
+ "id": "daHTJIoZVI6k"
+ },
"source": [
"## A general knowledge question\n",
"\n",
"March 2023 comment: note that GPT-4 doesn't know it is GPT-4 because the dataset cutoff was maybe made before OpenAI named the model \"gpt-4.\""
- ],
- "metadata": {
- "id": "daHTJIoZVI6k"
- }
+ ]
},
{
"cell_type": "code",
+ "execution_count": 30,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "82525e55-1234-438c-ed74-ea89f9c6c79b",
- "id": "Bt4Hfqv_bZwx"
+ "id": "Bt4Hfqv_bZwx",
+ "outputId": "82525e55-1234-438c-ed74-ea89f9c6c79b"
},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " GPT-4 is a new type of GPT that is designed to be more secure than the original GPT.\n"
+ ]
+ }
+ ],
"source": [
"p1=\"davinci\"\n",
"p2=\"Q: What is GPT-4?\\nA:\"\n",
@@ -1443,27 +1438,11 @@
"response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,stop=p8,logprobs=5)\n",
"r = (response[\"choices\"][0])\n",
"print(r[\"text\"])"
- ],
- "execution_count": 30,
- "outputs": [
- {
- "output_type": "stream",
- "name": "stdout",
- "text": [
- " GPT-4 is a new type of GPT that is designed to be more secure than the original GPT.\n"
- ]
- }
]
},
{
"cell_type": "code",
- "source": [
- "uinput=\"What is GPT-4?\"\n",
- "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
- "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
- "print(\"ChatGPT response:\",text)"
- ],
+ "execution_count": 31,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1471,26 +1450,26 @@
"id": "ETrDtG45Yms5",
"outputId": "526ae5bb-919a-4b27-9c04-5519da7d6241"
},
- "execution_count": 31,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: As of August 2021, there is no official announcement or release of GPT-4. GPT (Generative Pretrained Transformer) is a series of natural language processing models developed by OpenAI. The latest version, GPT-3, was released in 2020 and is a powerful language model that is commonly used to generate text, complete sentences, and even generate entire articles. However, the development of GPT-4 is still uncertain as OpenAI has not made any official announcements regarding its release.\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"uinput=\"What is GPT-4?\"\n",
"iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
"text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
"print(\"ChatGPT response:\",text)"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1498,28 +1477,51 @@
"id": "6GvzVmjZYtvf",
"outputId": "970b73a4-d172-4d32-ad70-eed388b4c1ca"
},
- "execution_count": 32,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: As of now, GPT-4 doesn't exist. GPT-3 (short for Generative Pre-trained Transformer 3) is the most advanced version developed by OpenAI. It is an AI language model that uses machine learning to generate human-like text based on the input it receives. In the event that OpenAI releases an updated version in the future, it could be called GPT-4, featuring improvements and advancements based on the successes and limitations of GPT-3.\n"
]
}
+ ],
+ "source": [
+ "uinput=\"What is GPT-4?\"\n",
+ "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
+ "print(\"ChatGPT response:\",text)"
]
},
{
"cell_type": "markdown",
- "source": [
- "## Unsafe content can go undetected "
- ],
"metadata": {
"id": "NPIwffV5eSVT"
- }
+ },
+ "source": [
+ "## Unsafe content can go undetected "
+ ]
},
{
"cell_type": "code",
+ "execution_count": 33,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "j-sq9fGudKfE",
+ "outputId": "703add23-7b8c-4299-8fcb-3cce21c7c8b0"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "2\n"
+ ]
+ }
+ ],
"source": [
"content_to_classify = \"Never get vaccinated. Who f;;k;;g cares.\"\n",
"p1=\"content-filter-alpha\"\n",
@@ -1534,81 +1536,81 @@
"response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,logprobs=p8)\n",
"r = (response[\"choices\"][0])\n",
"print(r[\"text\"])"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "703add23-7b8c-4299-8fcb-3cce21c7c8b0",
- "id": "j-sq9fGudKfE"
+ "id": "lVvq4zNFVZul",
+ "outputId": "9296bedb-90d9-4bb3-f194-eb7ee44a056b"
},
- "execution_count": 33,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
- "2\n"
+ "ChatGPT response: No, this language is not appropriate or safe. It is important to always use respectful language when communicating with others. Additionally, getting vaccinated is an important step in protecting oneself and those around them from preventable diseases. It is recommended to consult with a healthcare professional and make informed decisions about vaccinations.\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"uinput=\"Is this content safe:Never get vaccinated. Who f;;k;;g cares.\"\n",
"iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
"response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
"text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
"print(\"ChatGPT response:\",text)"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 35,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
- "id": "lVvq4zNFVZul",
- "outputId": "9296bedb-90d9-4bb3-f194-eb7ee44a056b"
+ "id": "8sFTMheCViTq",
+ "outputId": "66201c24-69e5-4b7e-f387-88f6442434a0"
},
- "execution_count": 34,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
- "ChatGPT response: No, this language is not appropriate or safe. It is important to always use respectful language when communicating with others. Additionally, getting vaccinated is an important step in protecting oneself and those around them from preventable diseases. It is recommended to consult with a healthcare professional and make informed decisions about vaccinations.\n"
+ "ChatGPT response: As a helpful assistant, I must advise against sharing or promoting potentially harmful content. It is crucial to prioritize health and safety, and getting vaccinated is an important measure to prevent the spread of diseases. Vaccines are essential in protecting individuals and communities, as they are supported by extensive scientific research and have proven to be effective. It is recommended to discuss any concerns related to vaccinations with a healthcare professional.\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"uinput=\"Is this content safe:Never get vaccinated. Who f;;k;;g cares.\"\n",
"iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
"response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
"text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
"print(\"ChatGPT response:\",text)"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
- "id": "8sFTMheCViTq",
- "outputId": "66201c24-69e5-4b7e-f387-88f6442434a0"
+ "id": "L_XTLwjae7pU",
+ "outputId": "d4bb5d74-e579-48b3-ec34-4af782f3add7"
},
- "execution_count": 35,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
- "ChatGPT response: As a helpful assistant, I must advise against sharing or promoting potentially harmful content. It is crucial to prioritize health and safety, and getting vaccinated is an important measure to prevent the spread of diseases. Vaccines are essential in protecting individuals and communities, as they are supported by extensive scientific research and have proven to be effective. It is recommended to discuss any concerns related to vaccinations with a healthcare professional.\n"
+ "0\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"content_to_classify = \"The say doses are good. But I agree with Djoko.\"\n",
"p1=\"content-filter-alpha\"\n",
@@ -1623,34 +1625,11 @@
"response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,logprobs=p8)\n",
"r = (response[\"choices\"][0])\n",
"print(r[\"text\"])"
- ],
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "L_XTLwjae7pU",
- "outputId": "d4bb5d74-e579-48b3-ec34-4af782f3add7"
- },
- "execution_count": 36,
- "outputs": [
- {
- "output_type": "stream",
- "name": "stdout",
- "text": [
- "0\n"
- ]
- }
]
},
{
"cell_type": "code",
- "source": [
- "uinput=\"Is the following content safe: The say doses are good. But I agree with Djoko.\"\n",
- "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
- "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
- "print(\"ChatGPT response:\",text)"
- ],
+ "execution_count": 37,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1658,26 +1637,26 @@
"id": "hWpm67fpSxs0",
"outputId": "efa9113b-e0a2-4dbd-9c54-9860ffb40085"
},
- "execution_count": 37,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: The content doesn't make sense on its own and it's unclear what is being referred to when mentioning \"doses\" and \"Djoko.\" It's always important to ensure that any content you are sharing online is appropriate and does not contain any harmful or inappropriate language, messages, or references.\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"uinput=\"Is the following content safe: The say doses are good. But I agree with Djoko.\"\n",
"iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
"text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
"print(\"ChatGPT response:\",text)"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1685,41 +1664,35 @@
"id": "b4ZhaWC_S9kK",
"outputId": "172c1d02-8c45-4540-d15f-6d212cb05d36"
},
- "execution_count": 38,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: The content you provided seems to be safe with no explicit or harmful language. However, the message is not clear, so it is difficult to provide further assistance. If you need more information or help, please feel free to ask a more specific question.\n"
]
}
+ ],
+ "source": [
+ "uinput=\"Is the following content safe: The say doses are good. But I agree with Djoko.\"\n",
+ "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
+ "print(\"ChatGPT response:\",text)"
]
},
{
"cell_type": "markdown",
- "source": [
- "## Example of a negative Tweet that can go undetected."
- ],
"metadata": {
"id": "hejrZnp8gJTh"
- }
+ },
+ "source": [
+ "## Example of a negative Tweet that can go undetected."
+ ]
},
{
"cell_type": "code",
- "source": [
- "p1=\"text-davinci-001\"\n",
- "p2=\"Decide whether a Tweet's sentiment is positive, neutral, or negative.\\n\\nTweet: \\\"She loved the new Batman movie! But I agreed with that critic we just heard.\\\"\\nSentiment:\"\n",
- "p3=0\n",
- "p4=60\n",
- "p5=1\n",
- "p6=0.5\n",
- "p7=0\n",
- "\n",
- "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,logprobs=p8)\n",
- "r = (response[\"choices\"][0])\n",
- "print(r[\"text\"])"
- ],
+ "execution_count": 39,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1727,26 +1700,32 @@
"id": "Di2Yi7h6gUPQ",
"outputId": "3bb60352-b9b4-4270-f6b7-884353e9df33"
},
- "execution_count": 39,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
" Neutral\n"
]
}
+ ],
+ "source": [
+ "p1=\"text-davinci-001\"\n",
+ "p2=\"Decide whether a Tweet's sentiment is positive, neutral, or negative.\\n\\nTweet: \\\"She loved the new Batman movie! But I agreed with that critic we just heard.\\\"\\nSentiment:\"\n",
+ "p3=0\n",
+ "p4=60\n",
+ "p5=1\n",
+ "p6=0.5\n",
+ "p7=0\n",
+ "\n",
+ "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,logprobs=p8)\n",
+ "r = (response[\"choices\"][0])\n",
+ "print(r[\"text\"])"
]
},
{
"cell_type": "code",
- "source": [
- "uinput=\"Is the following tweet postive, negative or neutral:She loved the new Batman movie! But I agreed with that critic we just heard.\"\n",
- "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
- "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
- "print(\"ChatGPT response:\",text)"
- ],
+ "execution_count": 40,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1754,26 +1733,26 @@
"id": "w9KbEjVTSYvo",
"outputId": "30f9cbe4-a1ba-449a-ff6b-08de3568b9df"
},
- "execution_count": 40,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: Neutral\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"uinput=\"Is the following tweet postive, negative or neutral:She loved the new Batman movie! But I agreed with that critic we just heard.\"\n",
"iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
"text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
"print(\"ChatGPT response:\",text)"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1781,42 +1760,35 @@
"id": "hnEW3yMPSoCw",
"outputId": "a26aa453-8694-476b-ce12-e1e232c2d6b9"
},
- "execution_count": 41,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: The tweet is a mix of both positive and negative sentiment. The first half, \"She loved the new Batman movie!\" is positive, while the second half, \"But I agreed with that critic we just heard,\" is negative. The overall sentiment may lean towards negative, as the person agrees with the critic.\n"
]
}
+ ],
+ "source": [
+ "uinput=\"Is the following tweet postive, negative or neutral:She loved the new Batman movie! But I agreed with that critic we just heard.\"\n",
+ "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
+ "print(\"ChatGPT response:\",text)"
]
},
{
"cell_type": "markdown",
- "source": [
- "## A translation that failed"
- ],
"metadata": {
"id": "mAUYIgqAgv66"
- }
+ },
+ "source": [
+ "## A translation that failed"
+ ]
},
{
"cell_type": "code",
- "source": [
- "p1=\"davinci\"\n",
- "p2=\"Original: Elle a un cheveu sur la langue.\\n American English with no contractions:\"\n",
- "p3=0 \n",
- "p4=60\n",
- "p5=1.0\n",
- "p6=0.0\n",
- "p7=0.0\n",
- "p8=[\"\\n\"]\n",
- "\n",
- "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,stop=p8)\n",
- "r = (response[\"choices\"][0])\n",
- "print(r[\"text\"])"
- ],
+ "execution_count": 42,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1824,26 +1796,33 @@
"id": "UdeR2pgbg1Ml",
"outputId": "4f0bd02e-b067-4eef-dc21-d0bb700272ec"
},
- "execution_count": 42,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
" She has a hair on her tongue.\n"
]
}
+ ],
+ "source": [
+ "p1=\"davinci\"\n",
+ "p2=\"Original: Elle a un cheveu sur la langue.\\n American English with no contractions:\"\n",
+ "p3=0 \n",
+ "p4=60\n",
+ "p5=1.0\n",
+ "p6=0.0\n",
+ "p7=0.0\n",
+ "p8=[\"\\n\"]\n",
+ "\n",
+ "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,stop=p8)\n",
+ "r = (response[\"choices\"][0])\n",
+ "print(r[\"text\"])"
]
},
{
"cell_type": "code",
- "source": [
- "uinput=\"Translate from French to English: Elle a un cheveu sur la langue\"\n",
- "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
- "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
- "print(\"ChatGPT response:\",text)"
- ],
+ "execution_count": 43,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1851,26 +1830,26 @@
"id": "E9IZj4gORen3",
"outputId": "9255de46-e825-4ed9-a5e3-6710df259639"
},
- "execution_count": 43,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: The literal translation of \"Elle a un cheveu sur la langue\" is \"She has a hair on her tongue\". However, it is a French expression which means that someone has a speech impediment or difficulty in speaking clearly.\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"uinput=\"Translate from French to English: Elle a un cheveu sur la langue\"\n",
"iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
"text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
"print(\"ChatGPT response:\",text)"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 44,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1878,41 +1857,35 @@
"id": "6KSbNOi6Rs8N",
"outputId": "71cb9b97-344d-4e86-92f1-e678db16482d"
},
- "execution_count": 44,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: She has a hair on her tongue\n"
]
}
+ ],
+ "source": [
+ "uinput=\"Translate from French to English: Elle a un cheveu sur la langue\"\n",
+ "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
+ "print(\"ChatGPT response:\",text)"
]
},
{
"cell_type": "markdown",
- "source": [
- "##Google Search Prompt Optimizing"
- ],
"metadata": {
"id": "GU5u6yjDHBN9"
- }
+ },
+ "source": [
+ "##Google Search Prompt Optimizing"
+ ]
},
{
"cell_type": "code",
- "source": [
- "p1=\"davinci\"\n",
- "p2=\"Explain the solar system for children in a few lines:\"\n",
- "p3=0.7\n",
- "p4=64\n",
- "p5=1\n",
- "p6=0\n",
- "p7=0\n",
- "\n",
- "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,stop=p8)\n",
- "r = (response[\"choices\"][0])\n",
- "print(r[\"text\"])"
- ],
+ "execution_count": 45,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1920,26 +1893,32 @@
"id": "0fZeBn4qHD0-",
"outputId": "9a99bd37-7d28-43bc-bb88-64c1b3a42c9e"
},
- "execution_count": 45,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"\n"
]
}
+ ],
+ "source": [
+ "p1=\"davinci\"\n",
+ "p2=\"Explain the solar system for children in a few lines:\"\n",
+ "p3=0.7\n",
+ "p4=64\n",
+ "p5=1\n",
+ "p6=0\n",
+ "p7=0\n",
+ "\n",
+ "response = openai.Completion.create(engine=p1,prompt=p2,temperature=p3,max_tokens=p4,top_p=p5,frequency_penalty=p6,presence_penalty=p7,stop=p8)\n",
+ "r = (response[\"choices\"][0])\n",
+ "print(r[\"text\"])"
]
},
{
"cell_type": "code",
- "source": [
- "uinput=\"Explain the solar system for children in a few lines:\"\n",
- "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
- "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
- "print(\"ChatGPT response:\",text)"
- ],
+ "execution_count": 46,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1947,26 +1926,26 @@
"id": "paSL-fo2R_Q5",
"outputId": "731afd01-5a90-4015-c551-e057834fba2d"
},
- "execution_count": 46,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: The solar system is made up of the Sun and eight planets that orbit around it. The eight planets are Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, and Neptune. In between the planets, there are also asteroids, comets, and dwarf planets, including Pluto. Each planet has unique characteristics, like size, climate, and number of moons. The solar system is a fascinating place to explore and learn about!\n"
]
}
- ]
- },
- {
- "cell_type": "code",
+ ],
"source": [
"uinput=\"Explain the solar system for children in a few lines:\"\n",
"iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
- "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-3.5-turbo\",messages=iprompt) #ChatGPT dialog\n",
"text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
"print(\"ChatGPT response:\",text)"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -1974,16 +1953,37 @@
"id": "zEaB36aKSGst",
"outputId": "066670a7-1db6-47ae-d8f4-75c5017e5e0f"
},
- "execution_count": 47,
"outputs": [
{
- "output_type": "stream",
"name": "stdout",
+ "output_type": "stream",
"text": [
"ChatGPT response: The solar system is like a big family in space. At the center, we have the Sun, which is like a parent, giving us light and warmth. Around the Sun, there are eight planets, including Earth, that go in circles called orbits. Some planets, like Earth, have moons that go around them too. There are also many smaller objects, like asteroids and comets, flying around in the solar system, making it an exciting place to learn about!\n"
]
}
+ ],
+ "source": [
+ "uinput=\"Explain the solar system for children in a few lines:\"\n",
+ "iprompt=prepare_message(uinput) #preparing the messages for ChatGPT\n",
+ "response=openai.ChatCompletion.create(model=\"gpt-4\",messages=iprompt) #ChatGPT dialog\n",
+ "text=response[\"choices\"][0][\"message\"][\"content\"] #response in JSON\n",
+ "print(\"ChatGPT response:\",text)"
]
}
- ]
+ ],
+ "metadata": {
+ "colab": {
+ "provenance": [],
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
}