Skip to content

Commit dee654d

Browse files
authored
FIX: small edits to populate_prompt_piece_scores (#626)
1 parent 4a038b3 commit dee654d

File tree

5 files changed

+66
-10
lines changed

5 files changed

+66
-10
lines changed

doc/_toc.yml

+1
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ chapters:
6969
- file: code/converters/ansi_attack_converter
7070
- file: code/converters/char_swap_attack_generator
7171
- file: code/converters/math_prompt_converter
72+
- file: code/converters/pdf_converter
7273
- file: code/scoring/0_scoring
7374
sections:
7475
- file: code/scoring/1_azure_content_safety_scorers

doc/code/converters/pdf_converter.ipynb

+4-2
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
"id": "0",
66
"metadata": {},
77
"source": [
8-
"**PDF Converter with Multiple Modes**\n",
8+
"# PDF Converter with Multiple Modes:\n",
99
"\n",
1010
"This script demonstrates the use of the `PDFConverter` for generating PDFs in two different modes:\n",
1111
"\n",
@@ -14,7 +14,7 @@
1414
"\n",
1515
"The `PromptSendingOrchestrator` is used to handle the interaction with the `PDFConverter` and the mock `TextTarget` target system.\n",
1616
"\n",
17-
"### Key Features\n",
17+
"## Key Features\n",
1818
"\n",
1919
"1. **Template-Based Generation**:\n",
2020
" - Populate placeholders in a YAML-based template using dynamic data.\n",
@@ -115,6 +115,8 @@
115115
}
116116
],
117117
"source": [
118+
"# Direct Prompt PDF Generation (No Template)\n",
119+
"\n",
118120
"# Define a simple string prompt (no templates)\n",
119121
"prompt = \"This is a simple test string for PDF generation. No templates here!\"\n",
120122
"\n",

doc/code/converters/pdf_converter.py

+5-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# %% [markdown]
2-
# **PDF Converter with Multiple Modes**
2+
# # PDF Converter with Multiple Modes:
33
#
44
# This script demonstrates the use of the `PDFConverter` for generating PDFs in two different modes:
55
#
@@ -8,7 +8,7 @@
88
#
99
# The `PromptSendingOrchestrator` is used to handle the interaction with the `PDFConverter` and the mock `TextTarget` target system.
1010
#
11-
# ### Key Features
11+
# ## Key Features
1212
#
1313
# 1. **Template-Based Generation**:
1414
# - Populate placeholders in a YAML-based template using dynamic data.
@@ -74,7 +74,9 @@
7474

7575
await orchestrator.print_conversations_async() # type: ignore
7676

77-
# %% Direct Prompt PDF Generation (No Template)
77+
# %%
78+
# Direct Prompt PDF Generation (No Template)
79+
7880
# Define a simple string prompt (no templates)
7981
prompt = "This is a simple test string for PDF generation. No templates here!"
8082

pyrit/memory/memory_interface.py

+4-5
Original file line numberDiff line numberDiff line change
@@ -227,20 +227,19 @@ def populate_prompt_piece_scores(self, prompt_request_pieces: list[PromptRequest
227227
Adds scores in the database to prompt request piece objects
228228
229229
Args:
230-
entries (list[PromptMemoryEntry]): The list of promptRequestPieces to add
230+
prompt_request_pieces (list[PromptRequestPiece]): The list of PromptRequestPieces to add scores to.
231231
232232
Returns:
233-
list[PromptRequestPiece]: A list of PromptRequestPiece objects with their associated scores.
233+
None
234234
"""
235-
result: list[PromptRequestPiece] = []
236235
for prompt_request_piece in prompt_request_pieces:
237236
score_entries = self._query_entries(
238-
ScoreEntry, conditions=ScoreEntry.prompt_request_response_id == prompt_request_piece.id
237+
ScoreEntry, conditions=ScoreEntry.prompt_request_response_id == prompt_request_piece.original_prompt_id
239238
)
240239
scores = [score_entry.get_score() for score_entry in score_entries]
241240
prompt_request_piece.scores = scores
242241

243-
return result
242+
return None
244243

245244
def get_prompt_request_pieces(
246245
self,

tests/unit/memory/test_memory_interface.py

+52
Original file line numberDiff line numberDiff line change
@@ -1576,3 +1576,55 @@ def test_get_prompt_request_pieces_sorts(
15761576
if new_value != current_value:
15771577
if any(o.conversation_id == current_value for o in response[response.index(obj) :]):
15781578
assert False, "Conversation IDs are not grouped together"
1579+
1580+
1581+
def test_get_prompt_request_pieces_calls_populate_prompt_piece_scores(
1582+
duckdb_instance: MemoryInterface, sample_conversations: list[PromptRequestPiece]
1583+
):
1584+
conversation_id = sample_conversations[0].conversation_id
1585+
duckdb_instance.add_request_pieces_to_memory(request_pieces=sample_conversations)
1586+
1587+
with patch.object(duckdb_instance, "populate_prompt_piece_scores") as mock_populate:
1588+
duckdb_instance.get_prompt_request_pieces(conversation_id=conversation_id)
1589+
assert mock_populate.called
1590+
1591+
1592+
def test_populate_prompt_piece_scores_duplicate_piece(duckdb_instance: MemoryInterface):
1593+
original_id = uuid4()
1594+
duplicate_id = uuid4()
1595+
1596+
pieces = [
1597+
PromptRequestPiece(
1598+
id=original_id,
1599+
role="assistant",
1600+
original_value="prompt text",
1601+
),
1602+
PromptRequestPiece(
1603+
id=duplicate_id,
1604+
role="assistant",
1605+
original_value="prompt text",
1606+
original_prompt_id=original_id,
1607+
),
1608+
]
1609+
1610+
duckdb_instance.add_request_pieces_to_memory(request_pieces=pieces)
1611+
1612+
score = Score(
1613+
score_value=str(0.8),
1614+
score_value_description="Sample description",
1615+
score_type="float_scale",
1616+
score_category="Sample category",
1617+
score_rationale="Sample rationale",
1618+
score_metadata="Sample metadata",
1619+
prompt_request_response_id=original_id,
1620+
)
1621+
duckdb_instance.add_scores_to_memory(scores=[score])
1622+
1623+
duckdb_instance.populate_prompt_piece_scores(pieces)
1624+
1625+
assert len(pieces[0].scores) == 1
1626+
assert pieces[0].scores[0].score_value == "0.8"
1627+
1628+
# Check that the duplicate piece has the same score as the original
1629+
assert len(pieces[1].scores) == 1
1630+
assert pieces[1].scores[0].score_value == "0.8"

0 commit comments

Comments
 (0)