Skip to content

Commit

Permalink
Addressing markdown rendering issue that causes ui to remove words fo…
Browse files Browse the repository at this point in the history
…llowing a colon
  • Loading branch information
sanjay920 committed Feb 14, 2024
1 parent d798a0f commit a54dd7e
Showing 1 changed file with 15 additions and 8 deletions.
23 changes: 15 additions & 8 deletions services/frontend/ui/app/1_💬_Chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,13 @@
default_index = 0
welcome_assistant_label = "Assistant: Welcome Assistant"

def escape_colons_for_display(text):
"""
Escapes colons in the text to prevent Streamlit's markdown renderer
from interpreting them as emoji shortcodes.
"""
return text.replace(":", "\\:")

# Search for the "Welcome Assistant" in the entity options and set it as the default if found
for i, (label, _) in enumerate(st.session_state.entity_options):
if label == welcome_assistant_label:
Expand Down Expand Up @@ -64,7 +71,6 @@

# Set the session state only if a selection is made
if selected_entity:
print("selected entity", selected_entity)
st.session_state.selected_entity = selected_entity[1]


Expand Down Expand Up @@ -105,6 +111,7 @@ async def connect():
function_call += content
else: # check content
if content:
escape_colons_for_display(content)
conversation += content
message_placeholder.markdown(conversation + "▌")

Expand Down Expand Up @@ -133,7 +140,6 @@ async def connect():

asyncio.new_event_loop().run_until_complete(connect())


# Handle new chat input
prompt = st.chat_input("Message Rubra...")

Expand All @@ -151,28 +157,29 @@ async def connect():
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
escaped_content = escape_colons_for_display(message["content"])
st.markdown(escaped_content)

# Update the chat_started state as soon as there is a prompt
if prompt:
st.session_state.chat_started = True
escaped_prompt = escape_colons_for_display(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})

with st.chat_message("user"):
st.markdown(prompt)
st.markdown(escaped_prompt)

# Determine the type of the selected entity and process accordingly
if st.session_state.selected_entity.startswith("model_"):
# Process chat with model
model_id = st.session_state.selected_entity.split("model_")[1]
print(st.session_state.selected_entity)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
logging.info(
"Start Time: %s", time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
)
print("running with model id", model_id)
logging.info("running with model id " + model_id)
for response in rubra_client.chat.completions.create(
model=model_id,
messages=[
Expand All @@ -182,8 +189,8 @@ async def connect():
stream=True,
):
full_response += response.choices[0].delta.content or ""
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
message_placeholder.markdown(escape_colons_for_display(full_response) + "▌")
message_placeholder.markdown(escape_colons_for_display(full_response))
logging.info(
"End Time: %s", time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
)
Expand Down

0 comments on commit a54dd7e

Please sign in to comment.