diff --git a/src/uipath/agent/react/conversational_prompts.py b/src/uipath/agent/react/conversational_prompts.py index 52bc2ab01..261d04f2b 100644 --- a/src/uipath/agent/react/conversational_prompts.py +++ b/src/uipath/agent/react/conversational_prompts.py @@ -3,26 +3,12 @@ import json import logging from datetime import datetime, timezone -from enum import Enum from pydantic import BaseModel logger = logging.getLogger(__name__) -class CitationType(Enum): - """Citation type for system prompt generation. - - Some models may have issues wrapping citation tags around text. - In those cases, we can prompt the citation tags to be placed after the text instead. - We also allow disabling citations entirely, for scenarios such as voice output. - """ - - NONE = "none" - WRAPPED = "wrapped" - TRAILING = "trailing" - - class PromptUserSettings(BaseModel): """User settings for inclusion in the system prompt.""" @@ -96,26 +82,29 @@ class PromptUserSettings(BaseModel): - Any information drawn from web search results. - Any information drawn from Context Grounding documents. -CITATION FORMAT: -{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_citationFormatPrompt}} +CITATION FORMAT (self-closing tag after each sentence with cited information): + TOOL RESULT PATTERNS REQUIRING CITATION: Tool results containing these fields indicate citable sources: -- Web results: "url", "title" fields -- Context Grounding: objects with "reference", "source", "page_number", "content" - -SOURCE FORMATS: -- URLs: {"title":"Page Title","url":"https://example.com"} -- Context Grounding: {"title":"filename.pdf","reference":"https://ref.url","page_number":1} - where title is set to the document source (filename), and reference and page_number - are from the tool results +- Web results: "url", "title" fields -> use title and url attributes +- Context Grounding: objects with "reference", "source", "page_number" -> use title (from source), reference, page_number attributes RULES: -- Minimum 1 source per citation (never empty array) -- Truncate titles >48 chars +- Place citation tag immediately after the sentence containing the cited fact +- title attribute is required (truncate to 48 chars if needed) +- For web results: use title and url attributes +- For context grounding: use title, reference, and page_number attributes - Never include citations in tool inputs -{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_citationExamplePrompt}} +EXAMPLES OF CORRECT USAGE: +AI adoption is growing rapidly. +The procedure requires manager approval. + +CRITICAL ERRORS TO AVOID: + (missing attributes) + (empty title) +Putting all citations at the very end of the response instead of after each sentence ===================================================================== EXECUTION CHECKLIST @@ -144,24 +133,6 @@ class PromptUserSettings(BaseModel): {user_settings_json} ```""" -_CITATION_FORMAT_WRAPPED = "factual claim here" -_CITATION_FORMAT_TRAILING = "factual claim here" - -_CITATION_EXAMPLE_WRAPPED = """EXAMPLES OF CORRECT USAGE: -AI adoption is growing - -CRITICAL ERRORS TO AVOID: -text (empty sources) -Some textpartmore text (spacing) - (empty claim)""" - -_CITATION_EXAMPLE_TRAILING = """EXAMPLES OF CORRECT USAGE: -AI adoption is growing - -CRITICAL ERRORS TO AVOID: -text (empty sources) -Some textpartmore text (content between citation tags)""" - def get_chat_system_prompt( model: str, @@ -178,9 +149,6 @@ def get_chat_system_prompt( Returns: The complete system prompt string """ - # Determine citation type based on model - citation_type = _get_citation_type(model) - # Format date as ISO 8601 (yyyy-MM-ddTHH:mmZ) formatted_date = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%MZ") @@ -206,35 +174,10 @@ def get_chat_system_prompt( "{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_userSettingsPrompt}}", _get_user_settings_template(user_settings), ) - prompt = prompt.replace( - "{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_citationFormatPrompt}}", - _get_citation_format_prompt(citation_type), - ) - prompt = prompt.replace( - "{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_citationExamplePrompt}}", - _get_citation_example_prompt(citation_type), - ) return prompt -def _get_citation_type(model: str) -> CitationType: - """Determine the citation type based on the agent's model. - - GPT models use trailing citations due to issues with generating - wrapped citations around text. - - Args: - model: The model name - - Returns: - CitationType.TRAILING for GPT models, CitationType.WRAPPED otherwise - """ - if "gpt" in model.lower(): - return CitationType.TRAILING - return CitationType.WRAPPED - - def _get_user_settings_template( user_settings: PromptUserSettings | None, ) -> str: @@ -259,35 +202,3 @@ def _get_user_settings_template( user_settings_json = json.dumps(settings_dict, ensure_ascii=False) return _USER_CONTEXT_TEMPLATE.format(user_settings_json=user_settings_json) - - -def _get_citation_format_prompt(citation_type: CitationType) -> str: - """Get the citation format prompt based on citation type. - - Args: - citation_type: The type of citation formatting to use - - Returns: - The citation format string or empty string for NONE - """ - if citation_type == CitationType.WRAPPED: - return _CITATION_FORMAT_WRAPPED - elif citation_type == CitationType.TRAILING: - return _CITATION_FORMAT_TRAILING - return "" - - -def _get_citation_example_prompt(citation_type: CitationType) -> str: - """Get the citation example prompt based on citation type. - - Args: - citation_type: The type of citation formatting to use - - Returns: - The citation examples string or empty string for NONE - """ - if citation_type == CitationType.WRAPPED: - return _CITATION_EXAMPLE_WRAPPED - elif citation_type == CitationType.TRAILING: - return _CITATION_EXAMPLE_TRAILING - return "" diff --git a/tests/agent/react/test_conversational_prompts.py b/tests/agent/react/test_conversational_prompts.py index caed3d603..59e991e0b 100644 --- a/tests/agent/react/test_conversational_prompts.py +++ b/tests/agent/react/test_conversational_prompts.py @@ -5,14 +5,8 @@ from datetime import datetime, timezone from unittest.mock import patch -import pytest - from uipath.agent.react.conversational_prompts import ( - CitationType, PromptUserSettings, - _get_citation_example_prompt, - _get_citation_format_prompt, - _get_citation_type, _get_user_settings_template, get_chat_system_prompt, ) @@ -155,49 +149,11 @@ def test_generate_system_prompt_unnamed_agent_uses_default(self): assert "You are Unnamed Agent." in prompt -class TestCitationType: - """Tests for citation type determination.""" - - @pytest.mark.parametrize( - "model", - [ - "claude-3-sonnet", - "claude-3-opus", - "claude-3-haiku", - "gemini-pro", - "llama-3", - "mistral-large", - ], - ) - def test_citation_type_wrapped_for_non_gpt_models(self, model): - """Non-GPT models get CitationType.WRAPPED.""" - citation_type = _get_citation_type(model) - - assert citation_type == CitationType.WRAPPED - - @pytest.mark.parametrize( - "model", - [ - "gpt-4", - "gpt-4o", - "gpt-4o-2024-11-20", - "gpt-3.5-turbo", - "GPT-4", # Test case insensitivity - "GPT-4O-MINI", - ], - ) - def test_citation_type_trailing_for_gpt_models(self, model): - """GPT models get CitationType.TRAILING.""" - citation_type = _get_citation_type(model) - - assert citation_type == CitationType.TRAILING - - -class TestCitationFormatPrompt: - """Tests for citation format in generated prompts.""" - - def test_citation_format_wrapped_in_prompt(self): - """Wrapped citation format appears in prompt for non-GPT models.""" +class TestCitationFormat: + """Tests for citation format""" + + def test_citation_format_in_prompt(self): + """Self-closing citation format appears in prompt.""" prompt = get_chat_system_prompt( model="claude-3-sonnet", system_message=SYSTEM_MESSAGE, @@ -205,101 +161,44 @@ def test_citation_format_wrapped_in_prompt(self): user_settings=None, ) - assert "factual claim here" in prompt - - def test_citation_format_trailing_in_prompt(self): - """Trailing citation format appears in prompt for GPT models.""" - prompt = get_chat_system_prompt( - model="gpt-4o", - system_message=SYSTEM_MESSAGE, - agent_name="Test Agent", - user_settings=None, + assert ( + '' + in prompt ) - assert "factual claim here" in prompt - - def test_wrapped_citation_examples_in_prompt(self): - """Wrapped citation examples appear for non-GPT models.""" + def test_citation_format_unified_across_models(self): + """Citation format is model-agnostic and consistent across GPT, Claude, etc.""" prompt = get_chat_system_prompt( - model="claude-3-sonnet", + model="gpt-4o", system_message=SYSTEM_MESSAGE, agent_name="Test Agent", user_settings=None, ) - # Check wrapped example assert ( - 'AI adoption is growing' + '' in prompt ) - # Should NOT contain trailing example pattern - assert ( - 'AI adoption is growing' - not in prompt - ) - def test_trailing_citation_examples_in_prompt(self): - """Trailing citation examples appear for GPT models.""" + def test_citation_examples_in_prompt(self): + """Citation examples appear in prompt.""" prompt = get_chat_system_prompt( - model="gpt-4o", + model="claude-3-sonnet", system_message=SYSTEM_MESSAGE, agent_name="Test Agent", user_settings=None, ) - # Check trailing example + assert "EXAMPLES OF CORRECT USAGE:" in prompt assert ( - 'AI adoption is growing' + '' in prompt ) - - -class TestGetCitationFormatPrompt: - """Tests for _get_citation_format_prompt helper.""" - - def test_wrapped_format(self): - """Returns wrapped format string.""" - result = _get_citation_format_prompt(CitationType.WRAPPED) - assert "factual claim here" in result - - def test_trailing_format(self): - """Returns trailing format string.""" - result = _get_citation_format_prompt(CitationType.TRAILING) - assert "factual claim here" in result - - def test_none_format(self): - """Returns empty string for NONE type.""" - result = _get_citation_format_prompt(CitationType.NONE) - assert result == "" - - -class TestGetCitationExamplePrompt: - """Tests for _get_citation_example_prompt helper.""" - - def test_wrapped_example(self): - """Returns wrapped example string.""" - result = _get_citation_example_prompt(CitationType.WRAPPED) - assert "EXAMPLES OF CORRECT USAGE:" in result - assert "CRITICAL ERRORS TO AVOID:" in result - assert ( - 'AI adoption is growing' - in result - ) - - def test_trailing_example(self): - """Returns trailing example string.""" - result = _get_citation_example_prompt(CitationType.TRAILING) - assert "EXAMPLES OF CORRECT USAGE:" in result - assert "CRITICAL ERRORS TO AVOID:" in result assert ( - 'AI adoption is growing' - in result + '' + in prompt ) - - def test_none_example(self): - """Returns empty string for NONE type.""" - result = _get_citation_example_prompt(CitationType.NONE) - assert result == "" + assert "CRITICAL ERRORS TO AVOID:" in prompt class TestPromptUserSettings: