We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent f947047 commit 9514044Copy full SHA for 9514044
pyproject.toml
@@ -1,6 +1,6 @@
1
[project]
2
name = "detect-jailbreak"
3
-version = "0.1.4"
+version = "0.1.5"
4
description = "A prompt-injection and jailbreak detector for LLMs."
5
authors = [
6
{name = "Guardrails AI", email = "[email protected]"},
validator/main.py
@@ -17,7 +17,7 @@
17
from .models import PromptSaturationDetectorV3
18
19
20
-@register_validator(name="guardrails/detect-jailbreak", data_type="string")
+@register_validator(name="guardrails/detect_jailbreak", data_type="string")
21
class DetectJailbreak(Validator):
22
"""Validates that a prompt does not attempt to circumvent restrictions on behavior.
23
An example would be convincing the model via prompt to provide instructions that
0 commit comments