Skip to content

Commit e7c161a

Browse files
cleanup
1 parent 8afe05b commit e7c161a

File tree

1 file changed

+3
-11
lines changed

1 file changed

+3
-11
lines changed

inference/serving-non-optimized.py

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -73,15 +73,15 @@ def generate(self, chat, is_user_prompt=True, enforce_policies=None):
7373
}
7474

7575
contructed_guideline = ""
76-
selected_policy_set = INPUT_POLICIES if is_user_prompt else OUTPUT_POLICIES
76+
SELECTED_POLICY_SET = INPUT_POLICIES if is_user_prompt else OUTPUT_POLICIES
7777

7878
if is_user_prompt:
7979
enforce_policies = enforce_policies or ["NO_DANGEROUS_CONTENT", "NO_HARASSMENT", "NO_HATE_SPEECH", "NO_SEXUAL_CONTENT"]
8080
for policy in enforce_policies:
8181
if contructed_guideline == "":
82-
contructed_guideline = selected_policy_set[policy]
82+
contructed_guideline = SELECTED_POLICY_SET[policy]
8383
else:
84-
contructed_guideline = contructed_guideline + "\n * " + selected_policy_set[policy]
84+
contructed_guideline = contructed_guideline + "\n* " + SELECTED_POLICY_SET[policy]
8585

8686

8787
inputs = tokenizer.apply_chat_template(chat, guideline=contructed_guideline, return_tensors="pt", return_dict=True).to(model.device)
@@ -99,9 +99,7 @@ def generate(self, chat, is_user_prompt=True, enforce_policies=None):
9999
# Convert these logits to a probability with softmax
100100
probabilities = softmax(selected_logits, dim=0)
101101

102-
# Return probability of 'Yes'
103102
score = probabilities[0].item()
104-
print(score) # 0.7310585379600525
105103

106104
print(f"Model: Score: {score}")
107105

@@ -188,9 +186,3 @@ async def chat_classification_response(body: ChatClassificationRequestBody):
188186

189187
app.include_router(router)
190188
return app
191-
192-
193-
# @app.local_entrypoint()
194-
# def main():
195-
# model = Model()
196-
# model.generate.remote()

0 commit comments

Comments
 (0)