Skip to content

Commit

Permalink
reverting prompt normalizer and changing realtime target to use one r…
Browse files Browse the repository at this point in the history
…esponse instead of list
  • Loading branch information
Bolor-Erdene Jagdagdorj committed Jan 14, 2025
1 parent c7a83ff commit 1d24cf2
Show file tree
Hide file tree
Showing 8 changed files with 236 additions and 194 deletions.
162 changes: 80 additions & 82 deletions doc/code/targets/realtime_target.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,24 @@
"cells": [
{
"cell_type": "markdown",
"id": "0",
"metadata": {},
"source": [
"# REALTIME TARGET"
]
},
{
"cell_type": "markdown",
"id": "1",
"metadata": {},
"source": [
"## Using PyRIT"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"id": "2",
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -30,23 +33,23 @@
},
{
"cell_type": "markdown",
"id": "3",
"metadata": {},
"source": [
"## Single Turn Audio Conversation"
]
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"id": "4",
"metadata": {},
"outputs": [],
"source": [
"\n",
"from pyrit.orchestrator import PromptSendingOrchestrator\n",
"from pyrit.prompt_normalizer.normalizer_request import NormalizerRequest, NormalizerRequestPiece\n",
"\n",
"# text_prompt_to_send = \"Hi what is 2+2?\"\n",
"\n",
"prompt_to_send = \"test_rt_audio1.wav\"\n",
"\n",
"normalizer_request = NormalizerRequest(\n",
Expand All @@ -61,38 +64,10 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"id": "5",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Session set up\n",
"\u001b[1m\u001b[34muser: test_rt_audio1.wav\n",
"\u001b[22m\u001b[33massistant: Making rice wine, also known as sake, involves fermenting rice with water, koji (a type of mold), and yeast. Here is a simplified overview:\n",
"\n",
"1. **Washing and Soaking Rice**: Wash the rice thoroughly to remove excess starch. Then, soak the rice in water for a few hours.\n",
"\n",
"2. **Steaming Rice**: Steam the soaked rice until it's cooked, which will make it easier to ferment.\n",
"\n",
"3. **Preparing Koji**: Koji mold is cultivated on steamed rice to convert the starches in the rice to sugars. This process usually takes a couple of days.\n",
"\n",
"4. **Fermentation**: Mix the steamed rice, koji rice, yeast, and water in a fermentation tank. Over the next few weeks, the mixture will ferment, converting the sugars to alcohol.\n",
"\n",
"5. **Pressing and Filtration**: After fermentation, the mixture is pressed to separate the liquid from the solid rice residue. The liquid is then filtered to remove any remaining particles.\n",
"\n",
"6. **Pasteurization**: The filtered sake is often pasteurized to kill any remaining yeast and enzymes, stabilizing the flavor.\n",
"\n",
"7. **Aging and Bottling**: The sake is aged for a few months to develop its flavor before being bottled.\n",
"\n",
"This is a traditional process, and there are many variations and complexities involved in professional sake brewing. If you're interested in making it at home, consider starting with a homebrewing kit or recipe to guide you through the process.\n",
"\u001b[22m\u001b[39mConversation ID: c75dafcc-9ab0-4a2e-b52c-d0513da7a391\n",
"\u001b[22m\u001b[33massistant: response_audio.wav\n",
"\u001b[22m\u001b[39mConversation ID: c75dafcc-9ab0-4a2e-b52c-d0513da7a391\n"
]
}
],
"outputs": [],
"source": [
"await target.connect()\n",
"\n",
Expand All @@ -105,15 +80,52 @@
},
{
"cell_type": "markdown",
"id": "6",
"metadata": {},
"source": [
"## Multiturn Text Conversation"
"## Single Turn Text Conversation"
]
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"id": "7",
"metadata": {},
"outputs": [],
"source": [
"from pyrit.models.prompt_request_piece import PromptRequestPiece\n",
"from pyrit.orchestrator import PromptSendingOrchestrator\n",
"\n",
"\n",
"await target.connect()\n",
"prompt_to_send = \"Give me an image of a raccoon pirate as a Spanish baker in Spain\"\n",
"\n",
"request = PromptRequestPiece(\n",
" role=\"user\",\n",
" original_value=prompt_to_send,\n",
").to_prompt_request_response()\n",
"\n",
"\n",
"orchestrator = PromptSendingOrchestrator(objective_target=target)\n",
"response = await orchestrator.send_prompts_async(prompt_list=[prompt_to_send]) # type: ignore\n",
"await orchestrator.print_conversations_async() # type: ignore"
]
},
{
"cell_type": "markdown",
"id": "8",
"metadata": {},
"source": [
"## Multiturn Text Conversation"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9",
"metadata": {
"lines_to_next_cell": 2
},
"outputs": [],
"source": [
"\n",
Expand Down Expand Up @@ -141,27 +153,15 @@
" prompt_data_type=\"text\",\n",
" )\n",
" ]\n",
")\n"
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"id": "10",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Session set up\n",
"\u001b[1m\u001b[34muser: Hi what is 2+2?\n",
"\u001b[22m\u001b[33massistant: 2 + 2 equals 4.\n",
"\u001b[22m\u001b[39mConversation ID: 87b580f3-3373-49fd-98ce-ac4f18f38838\n",
"\u001b[22m\u001b[33massistant: response_audio.wav\n",
"\u001b[22m\u001b[39mConversation ID: 87b580f3-3373-49fd-98ce-ac4f18f38838\n"
]
}
],
"outputs": [],
"source": [
"orchestrator = PromptSendingOrchestrator(objective_target=target)\n",
"\n",
Expand All @@ -172,22 +172,18 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"id": "11",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "12",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Session set up\n",
"\u001b[1m\u001b[34muser: Now add 2?\n",
"\u001b[22m\u001b[33massistant: 4 + 2 equals 6.\n",
"\u001b[22m\u001b[39mConversation ID: 5104182c-2392-497a-84ec-0ddb577c21b4\n",
"\u001b[22m\u001b[33massistant: response_audio.wav\n",
"\u001b[22m\u001b[39mConversation ID: 5104182c-2392-497a-84ec-0ddb577c21b4\n"
]
}
],
"outputs": [],
"source": [
"orchestrator = PromptSendingOrchestrator(objective_target=target)\n",
"\n",
Expand All @@ -198,33 +194,35 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": null,
"id": "13",
"metadata": {},
"outputs": [],
"source": [
"await target.disconnect() # type: ignore"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "14",
"metadata": {},
"outputs": [],
"source": [
"from pyrit.memory import CentralMemory\n",
"\n",
"memory = CentralMemory.get_memory_instance()\n",
"memory.dispose_engine()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "pyrit2",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 5
}
31 changes: 29 additions & 2 deletions doc/code/targets/realtime_target.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@
from pyrit.orchestrator import PromptSendingOrchestrator
from pyrit.prompt_normalizer.normalizer_request import NormalizerRequest, NormalizerRequestPiece

# text_prompt_to_send = "Hi what is 2+2?"

prompt_to_send = "test_rt_audio1.wav"

normalizer_request = NormalizerRequest(
Expand All @@ -56,6 +54,27 @@

await orchestrator.print_conversations_async() # type: ignore

# %% [markdown]
# ## Single Turn Text Conversation

# %%
from pyrit.models.prompt_request_piece import PromptRequestPiece
from pyrit.orchestrator import PromptSendingOrchestrator


await target.connect()
prompt_to_send = "Give me an image of a raccoon pirate as a Spanish baker in Spain"

request = PromptRequestPiece(
role="user",
original_value=prompt_to_send,
).to_prompt_request_response()


orchestrator = PromptSendingOrchestrator(objective_target=target)
response = await orchestrator.send_prompts_async(prompt_list=[prompt_to_send]) # type: ignore
await orchestrator.print_conversations_async() # type: ignore

# %% [markdown]
# ## Multiturn Text Conversation

Expand Down Expand Up @@ -95,6 +114,8 @@

await orchestrator.print_conversations_async() # type: ignore

# %%

# %%
orchestrator = PromptSendingOrchestrator(objective_target=target)

Expand All @@ -104,3 +125,9 @@

# %%
await target.disconnect() # type: ignore

# %%
from pyrit.memory import CentralMemory

memory = CentralMemory.get_memory_instance()
memory.dispose_engine()
43 changes: 20 additions & 23 deletions pyrit/orchestrator/multi_turn/red_teaming_orchestrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from pyrit.common.utils import combine_dict
from pyrit.common.path import RED_TEAM_ORCHESTRATOR_PATH
from pyrit.models import PromptRequestPiece, Score
from pyrit.models.prompt_request_response import PromptRequestResponse
from pyrit.orchestrator import MultiTurnAttackResult, MultiTurnOrchestrator
from pyrit.prompt_converter import PromptConverter
from pyrit.prompt_normalizer import NormalizerRequest, NormalizerRequestPiece, PromptNormalizer
Expand Down Expand Up @@ -253,18 +252,16 @@ async def _retrieve_and_send_prompt_async(
conversation_id=objective_target_conversation_id,
)

response_piece = await self._prompt_normalizer.send_prompt_async(
normalizer_request=normalizer_request,
target=self._objective_target,
labels=memory_labels,
orchestrator_identifier=self.get_identifier(),
)

if isinstance(response_piece, PromptRequestResponse):
return response_piece.request_pieces[0]
response_piece = (
await self._prompt_normalizer.send_prompt_async(
normalizer_request=normalizer_request,
target=self._objective_target,
labels=memory_labels,
orchestrator_identifier=self.get_identifier(),
)
).request_pieces[0]

else:
return response_piece[0].request_pieces[0]
return response_piece

async def _check_conversation_complete_async(self, objective_target_conversation_id: str) -> Union[Score, None]:
"""
Expand Down Expand Up @@ -408,19 +405,19 @@ async def _get_prompt_from_adversarial_chat(
prompt_text=prompt_text, conversation_id=adversarial_chat_conversation_id
)

response_text_values = await self._prompt_normalizer.send_prompt_async(
normalizer_request=normalizer_request,
target=self._adversarial_chat,
orchestrator_identifier=self.get_identifier(),
labels=memory_labels,
response_text = (
(
await self._prompt_normalizer.send_prompt_async(
normalizer_request=normalizer_request,
target=self._adversarial_chat,
orchestrator_identifier=self.get_identifier(),
labels=memory_labels,
)
)
.request_pieces[0]
.converted_value
)

if isinstance(response_text_values, PromptRequestResponse):
response_text = response_text_values.request_pieces[0].converted_value

else:
response_text = response_text_values[0].request_pieces[0].converted_value

return response_text

def _get_last_objective_target_response(self, objective_target_conversation_id: str) -> PromptRequestPiece | None:
Expand Down
Loading

0 comments on commit 1d24cf2

Please sign in to comment.