Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions nemoguardrails/colang/v1_0/runtime/runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,13 +159,6 @@ async def generate_events(

log.info("Processing event: %s", last_event)

event_type = last_event["type"]
log.info(
"Event :: %s %s",
event_type,
str({k: v for k, v in last_event.items() if k != "type"}),
)

# If we need to execute an action, we start doing that.
if last_event["type"] == "StartInternalSystemAction":
next_events = await self._process_start_action(events)
Expand All @@ -186,16 +179,23 @@ async def generate_events(
if len(next_events) == 0:
next_events = [new_event_dict("Listen")]

# Otherwise, we append the event and continue the processing.
events.extend(next_events)
new_events.extend(next_events)

# Log all generated events and add them to processing log
for event in next_events:
if event["type"] != "EventHistoryUpdate":
event_type = event["type"]
log.info(
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Should this be a log.debug() rather than log.info()? How much noise would this make in logs?

"Event :: %s %s",
event_type,
str({k: v for k, v in event.items() if k != "type"}),
)
processing_log.append(
{"type": "event", "timestamp": time(), "data": event}
)

# Append events to the event stream and new_events list
events.extend(next_events)
new_events.extend(next_events)

# If the next event is a listen, we stop the processing.
if next_events[-1]["type"] == "Listen":
break
Expand Down
20 changes: 19 additions & 1 deletion nemoguardrails/logging/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,9 +163,27 @@ async def on_llm_end(
llm_call_info = LLMCallInfo()
llm_call_info.completion = response.generations[0][0].text
llm_call_info.finished_at = time()

completion_text = response.generations[0][0].text
reasoning_content = None

if isinstance(response.generations[0][0], ChatGeneration):
chat_gen = response.generations[0][0]
if hasattr(chat_gen, "message") and hasattr(
chat_gen.message, "additional_kwargs"
):
reasoning_content = chat_gen.message.additional_kwargs.get(
"reasoning_content"
)

if reasoning_content:
full_completion = f"{reasoning_content}\n---\n{completion_text}"
else:
full_completion = completion_text

log.info(
"Completion :: %s",
response.generations[0][0].text,
full_completion,
extra={"id": llm_call_info.id, "task": llm_call_info.task},
)

Expand Down
14 changes: 13 additions & 1 deletion nemoguardrails/logging/verbose.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,17 @@ def emit(self, record) -> None:
elif title == "Event":
# For events, we also color differently the type of event.
event_name, body = body.split(" ", 1)
title = f"[blue]{title}[/] [bold]{event_name}[/]"

if event_name == "BotThinking":
title = f"[blue]{title}[/] [yellow bold]{event_name}[/]"
elif event_name.startswith("Start") and event_name.endswith("Action"):
title = f"[magenta][bold]Start[/]{event_name[5:]}[/]"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where does the magic number 5 come from here?

elif event_name.endswith("ActionFinished"):
title = f"[magenta]{event_name[:-8]}[bold]Finished[/][/]"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Another magic number here, is there a danger we add longer-named events and this breaks?

elif event_name in ["ContextUpdate"]:
title = f"[blue]{title}[/] [dim]{event_name}[/]"
else:
title = f"[blue]{title}[/] [bold]{event_name}[/]"

else:
if title == "Processing event" and body.startswith("{"):
Expand Down Expand Up @@ -129,6 +139,8 @@ def emit(self, record) -> None:
title = f"[magenta]{event_type[:-8]}[bold]Finished[/][/]"
elif event_type.endswith("ActionFailed"):
title = f"[magenta]{event_type[:-6]}[bold]Failed[/][/]"
elif event_type == "BotThinking":
title = f"[yellow bold]{event_type}[/]"
else:
title = event_type
else:
Expand Down
160 changes: 160 additions & 0 deletions tests/test_runtime_event_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging
from unittest.mock import patch

import pytest

from nemoguardrails import RailsConfig
from tests.utils import TestChat


@pytest.mark.asyncio
async def test_bot_thinking_event_logged_in_runtime(caplog):
test_reasoning_trace = "Let me think about this step by step..."

caplog.set_level(logging.INFO)

with patch(
"nemoguardrails.actions.llm.generation.get_and_clear_reasoning_trace_contextvar"
) as mock_get_reasoning:
mock_get_reasoning.return_value = test_reasoning_trace

config = RailsConfig.from_content(config={"models": [], "passthrough": True})
chat = TestChat(config, llm_completions=["The answer is 42"])

await chat.app.generate_events_async(
[{"type": "UserMessage", "text": "What is the answer?"}]
)

bot_thinking_logs = [
record
for record in caplog.records
if "Event :: BotThinking" in record.message
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Should we define an enum with these strings if there are a static set of events? It would be more robust to typos

]
assert len(bot_thinking_logs) >= 1


@pytest.mark.asyncio
async def test_bot_message_event_logged_in_runtime(caplog):
caplog.set_level(logging.INFO)

config = RailsConfig.from_content(config={"models": [], "passthrough": True})
chat = TestChat(config, llm_completions=["The answer is 42"])

await chat.app.generate_events_async(
[{"type": "UserMessage", "text": "What is the answer?"}]
)

bot_message_logs = [
record for record in caplog.records if "Event :: BotMessage" in record.message
]
assert len(bot_message_logs) >= 1


@pytest.mark.asyncio
async def test_context_update_event_logged_in_runtime(caplog):
caplog.set_level(logging.INFO)

config = RailsConfig.from_content(config={"models": [], "passthrough": True})
chat = TestChat(config, llm_completions=["Response"])

await chat.app.generate_events_async([{"type": "UserMessage", "text": "Hello"}])

context_update_logs = [
record
for record in caplog.records
if "Event :: ContextUpdate" in record.message
or "Event ContextUpdate" in record.message
]
assert len(context_update_logs) >= 1


@pytest.mark.asyncio
async def test_all_events_logged_when_multiple_events_generated(caplog):
test_reasoning_trace = "Analyzing..."

caplog.set_level(logging.INFO)

with patch(
"nemoguardrails.actions.llm.generation.get_and_clear_reasoning_trace_contextvar"
) as mock_get_reasoning:
mock_get_reasoning.return_value = test_reasoning_trace

config = RailsConfig.from_content(config={"models": [], "passthrough": True})
chat = TestChat(config, llm_completions=["Response"])

await chat.app.generate_events_async([{"type": "UserMessage", "text": "Test"}])

bot_thinking_found = any(
"Event :: BotThinking" in record.message for record in caplog.records
)
bot_message_found = any(
"Event :: BotMessage" in record.message for record in caplog.records
)

assert bot_thinking_found
assert bot_message_found


@pytest.mark.asyncio
async def test_bot_thinking_event_logged_before_bot_message(caplog):
test_reasoning_trace = "Step 1: Understand\nStep 2: Respond"

caplog.set_level(logging.INFO)

with patch(
"nemoguardrails.actions.llm.generation.get_and_clear_reasoning_trace_contextvar"
) as mock_get_reasoning:
mock_get_reasoning.return_value = test_reasoning_trace

config = RailsConfig.from_content(config={"models": [], "passthrough": True})
chat = TestChat(config, llm_completions=["Answer"])

await chat.app.generate_events_async(
[{"type": "UserMessage", "text": "Question"}]
)

bot_thinking_idx = None
bot_message_idx = None

for idx, record in enumerate(caplog.records):
if "Event :: BotThinking" in record.message and bot_thinking_idx is None:
bot_thinking_idx = idx
if "Event :: BotMessage" in record.message and bot_message_idx is None:
bot_message_idx = idx

assert bot_thinking_idx is not None
assert bot_message_idx is not None
assert bot_thinking_idx < bot_message_idx


@pytest.mark.asyncio
async def test_event_history_update_not_logged(caplog):
caplog.set_level(logging.INFO)

config = RailsConfig.from_content(config={"models": [], "passthrough": True})
chat = TestChat(config, llm_completions=["Response"])

await chat.app.generate_events_async([{"type": "UserMessage", "text": "Test"}])

event_history_update_logs = [
record
for record in caplog.records
if "Event :: EventHistoryUpdate" in record.message
or "Event EventHistoryUpdate" in record.message
]
assert len(event_history_update_logs) == 0