@@ -73,15 +73,15 @@ def generate(self, chat, is_user_prompt=True, enforce_policies=None):
73
73
}
74
74
75
75
contructed_guideline = ""
76
- selected_policy_set = INPUT_POLICIES if is_user_prompt else OUTPUT_POLICIES
76
+ SELECTED_POLICY_SET = INPUT_POLICIES if is_user_prompt else OUTPUT_POLICIES
77
77
78
78
if is_user_prompt :
79
79
enforce_policies = enforce_policies or ["NO_DANGEROUS_CONTENT" , "NO_HARASSMENT" , "NO_HATE_SPEECH" , "NO_SEXUAL_CONTENT" ]
80
80
for policy in enforce_policies :
81
81
if contructed_guideline == "" :
82
- contructed_guideline = selected_policy_set [policy ]
82
+ contructed_guideline = SELECTED_POLICY_SET [policy ]
83
83
else :
84
- contructed_guideline = contructed_guideline + "\n * " + selected_policy_set [policy ]
84
+ contructed_guideline = contructed_guideline + "\n * " + SELECTED_POLICY_SET [policy ]
85
85
86
86
87
87
inputs = tokenizer .apply_chat_template (chat , guideline = contructed_guideline , return_tensors = "pt" , return_dict = True ).to (model .device )
@@ -99,9 +99,7 @@ def generate(self, chat, is_user_prompt=True, enforce_policies=None):
99
99
# Convert these logits to a probability with softmax
100
100
probabilities = softmax (selected_logits , dim = 0 )
101
101
102
- # Return probability of 'Yes'
103
102
score = probabilities [0 ].item ()
104
- print (score ) # 0.7310585379600525
105
103
106
104
print (f"Model: Score: { score } " )
107
105
@@ -188,9 +186,3 @@ async def chat_classification_response(body: ChatClassificationRequestBody):
188
186
189
187
app .include_router (router )
190
188
return app
191
-
192
-
193
- # @app.local_entrypoint()
194
- # def main():
195
- # model = Model()
196
- # model.generate.remote()
0 commit comments