Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
d6d2241
Starting basic building blocks for anthropic module to utilize their …
coretado Mar 3, 2025
843ab0d
adding anthropic module post lint
coretado Mar 3, 2025
5c33b1a
clearing mypy failures; not enamored with the pass through with the a…
coretado Mar 7, 2025
286ac57
expanding generate function for chat completion using Anthropic SDK m…
coretado Mar 9, 2025
3327eb4
adding initial implementation for achat
coretado Mar 13, 2025
fde438d
common function usage for streaming helpers
coretado Mar 13, 2025
46752d1
merging mainline branch
coretado Mar 14, 2025
3d22914
rounding out generate call and agenerate call by leveraging chat if l…
coretado Mar 14, 2025
90d81c9
adding common caching functions and adding to anthropic integration, …
coretado Mar 16, 2025
7647b36
adding test for anthropic on chat and completion using chat in test_l…
coretado Mar 17, 2025
11c2876
adding context length test and adding information for anthropic expor…
coretado Mar 18, 2025
1d9eca0
merging main
coretado Mar 18, 2025
48f8d07
fixing streaming bug with completion value, check pointing work for a…
coretado Mar 19, 2025
6850115
fixing function call, async llm test can now pass on all permutations
coretado Mar 20, 2025
0c3d1ca
finishing async llm tests, next commit will make llm and async llm me…
coretado Mar 22, 2025
0366023
fixing error in pytest confiration when looking for non-existant param
coretado Mar 22, 2025
1e91cb9
adding tests for test_chat_agent.py, making necessary changes to allo…
coretado Mar 26, 2025
5e1b73e
adding tests for async chat agent
coretado Mar 28, 2025
d49eb2d
ongoing process for testing tool messages; system prompt from OAI typ…
coretado Mar 31, 2025
59857ce
adding tool message test for recovery from errant tool usage
coretado Apr 3, 2025
6eb4760
adding more tests to tool messages
coretado Apr 4, 2025
a85d128
adding tests for test tool messages async; haiku needed a couple of n…
coretado Apr 5, 2025
5ce375d
merging mainline branch
coretado Apr 6, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@ repos:
# Ruff version.
rev: v0.11.4
hooks:
- id: ruff
- id: ruff
19 changes: 15 additions & 4 deletions langroid/agent/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
from langroid.agent.xml_tool_message import XMLToolMessage
from langroid.exceptions import XMLException
from langroid.language_models.base import (
AnthropicToolCall,
LanguageModel,
LLMConfig,
LLMFunctionCall,
Expand Down Expand Up @@ -139,6 +140,10 @@ class Agent(ABC):
# Index of ALL tool calls generated by the agent
oai_tool_id2call: Dict[str, OpenAIToolCall] = {}

# anthropic tool calls
ant_tool_calls: List[AnthropicToolCall] = []
ant_tool_id2call: Dict[str, AnthropicToolCall] = {}

def __init__(self, config: AgentConfig = AgentConfig()):
self.config = config
self.lock = asyncio.Lock() # for async access to update self.llm.usage_cost
Expand Down Expand Up @@ -1168,6 +1173,7 @@ def get_tool_messages(
msg.content != ""
and msg.oai_tool_calls is None
and msg.function_call is None
and msg.ant_tool_calls is None
):

tools = self.get_formatted_tool_messages(
Expand All @@ -1184,7 +1190,7 @@ def get_tool_messages(
return my_tools

# otherwise, we look for `tool_calls` (possibly multiple)
tools = self.get_oai_tool_calls_classes(msg)
tools = self.get_tool_calls_classes(msg)
msg.all_tool_messages = tools
my_tools = [t for t in tools if self._tool_recipient_match(t)]
msg.tool_messages = my_tools
Expand Down Expand Up @@ -1270,17 +1276,22 @@ def get_function_call_class(self, msg: ChatDocument) -> Optional[ToolMessage]:
tool = tool_class.parse_obj(tool_msg)
return tool

def get_oai_tool_calls_classes(self, msg: ChatDocument) -> List[ToolMessage]:
def get_tool_calls_classes(self, msg: ChatDocument) -> List[ToolMessage]:
"""
From ChatDocument (constructed from an LLM Response), get
a list of ToolMessages corresponding to the `tool_calls`, if any.
"""

if msg.oai_tool_calls is None:
if msg.oai_tool_calls is None and msg.ant_tool_calls is None:
return []
tools = []
all_errors = True
for tc in msg.oai_tool_calls:

tools_to_iterate = (
msg.oai_tool_calls if msg.oai_tool_calls else msg.ant_tool_calls
) or []
Comment on lines +1290 to +1292
Copy link
Preview

Copilot AI Jun 19, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nitpick] Using a fallback that checks only one collection may lead to overlooking cases where both oai_tool_calls and ant_tool_calls are present. It would be beneficial to review and possibly merge both lists or clearly document the intended priority.

Suggested change
tools_to_iterate = (
msg.oai_tool_calls if msg.oai_tool_calls else msg.ant_tool_calls
) or []
tools_to_iterate = []
if msg.oai_tool_calls:
tools_to_iterate.extend(msg.oai_tool_calls)
if msg.ant_tool_calls:
tools_to_iterate.extend(msg.ant_tool_calls)

Copilot uses AI. Check for mistakes.


for tc in tools_to_iterate:
if tc.function is None:
continue
tool_name = tc.function.name
Expand Down
66 changes: 56 additions & 10 deletions langroid/agent/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -557,6 +557,16 @@ def delete_last_message(self, role: str = Role.USER) -> None:
self.message_history.pop(i)
break

def _common_system_and_tools_message(self) -> str:
content = self.system_message
if self.system_tool_instructions != "":
content += "\n\n" + self.system_tool_instructions
if self.system_tool_format_instructions != "":
content += "\n\n" + self.system_tool_format_instructions
if self.output_format_instructions != "":
content += "\n\n" + self.output_format_instructions
return content

def _create_system_and_tools_message(self) -> LLMMessage:
"""
(Re-)Create the system message for the LLM of the agent,
Expand All @@ -572,17 +582,27 @@ def _create_system_and_tools_message(self) -> LLMMessage:
Returns:
LLMMessage object
"""
content = self.system_message
if self.system_tool_instructions != "":
content += "\n\n" + self.system_tool_instructions
if self.system_tool_format_instructions != "":
content += "\n\n" + self.system_tool_format_instructions
if self.output_format_instructions != "":
content += "\n\n" + self.output_format_instructions
content = self._common_system_and_tools_message()

# remove leading and trailing newlines and other whitespace
return LLMMessage(role=Role.SYSTEM, content=content.strip())

def _update_anthropic_configuration(self) -> LLMMessage:
"""
For Anthropic's LLMs, the prescribed method for passing
instructions is to give a description using the User role.
The system configuration describes background on what context
the system is intended to respond in.
"""
assert self.llm and self.llm.config.type == "anthropic"
content = self._common_system_and_tools_message()
return LLMMessage(
role=Role.USER,
content=(
content.strip() if content else "Please respond in a succinct manner."
),
)

def handle_message_fallback(self, msg: str | ChatDocument) -> Any:
"""
Fallback method for the "no-tools" scenario.
Expand Down Expand Up @@ -1279,6 +1299,9 @@ def llm_response(
and self.output_format is None
and self._json_schema_available()
and self.config.strict_recovery
or self.tool_error
and self.output_format is None
and self.config.strict_recovery
):
self.tool_error = False
AnyTool = self._get_any_tool_message()
Expand Down Expand Up @@ -1443,7 +1466,12 @@ def init_message_history(self) -> None:
"""
Initialize the message history with the system message and user message
"""
self.message_history = [self._create_system_and_tools_message()]
assert self.llm
if self.llm.config.type == "anthropic":
self.message_history = [self._update_anthropic_configuration()]
else:
self.message_history = [self._create_system_and_tools_message()]

if self.user_message:
self.message_history.append(
LLMMessage(role=Role.USER, content=self.user_message)
Expand Down Expand Up @@ -1493,6 +1521,9 @@ def _prep_llm_messages(
[/grey37]
"""
)
elif self.llm.config.type == "anthropic":
assert self.message_history[0].role == Role.USER
self.message_history[0] = self._update_anthropic_configuration()
else:
assert self.message_history[0].role == Role.SYSTEM
# update the system message with the latest tool instructions
Expand All @@ -1501,11 +1532,21 @@ def _prep_llm_messages(
if message is not None:
if (
isinstance(message, str)
or message.id() != self.message_history[-1].chat_document_id
or self.message_history
and message.id() != self.message_history[-1].chat_document_id
or isinstance(message, ChatDocument)
and self.llm.config.type == "anthropic"
):
# either the message is a str, or it is a fresh ChatDocument
# different from the last message in the history
llm_msgs = ChatDocument.to_LLMMessage(message, self.oai_tool_calls)
if self.llm.config.type == "anthropic":
llm_msgs = ChatDocument.to_LLMMessage(
message, anthropic_tools=self.ant_tool_calls
)
else:
llm_msgs = ChatDocument.to_LLMMessage(
message, oai_tools=self.oai_tool_calls
)
# LLM only responds to the content, so only those msgs with
# non-empty content should be kept
llm_msgs = [m for m in llm_msgs if m.content.strip() != ""]
Expand All @@ -1516,6 +1557,9 @@ def _prep_llm_messages(
self.oai_tool_calls = [
t for t in self.oai_tool_calls if t.id not in done_tools
]
self.ant_tool_calls = [
t for t in self.ant_tool_calls if t.id not in done_tools
]
self.message_history.extend(llm_msgs)

hist = self.message_history
Expand Down Expand Up @@ -1756,6 +1800,8 @@ def llm_response_messages(
self.oai_tool_id2call.update(
{t.id: t for t in self.oai_tool_calls if t.id is not None}
)
self.ant_tool_calls = response.ant_tool_calls or []
self.ant_tool_id2call.update({t.id: t for t in self.ant_tool_calls if t.id})

# If using strict output format, parse the output JSON
self._load_output_format(chat_doc)
Expand Down
15 changes: 14 additions & 1 deletion langroid/agent/chat_document.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from langroid.agent.tool_message import ToolMessage
from langroid.agent.xml_tool_message import XMLToolMessage
from langroid.language_models.base import (
AnthropicToolCall,
LLMFunctionCall,
LLMMessage,
LLMResponse,
Expand Down Expand Up @@ -123,6 +124,8 @@ class ChatDocument(Document):
oai_tool_id2result: Optional[OrderedDict[str, str]] = None
oai_tool_choice: ToolChoiceTypes | Dict[str, Dict[str, str] | str] = "auto"
function_call: Optional[LLMFunctionCall] = None
ant_tool_calls: Optional[List[AnthropicToolCall]] = None
ant_tool_id2result: Optional[OrderedDict[str, str]] = None
# tools that are explicitly added by agent response/handler,
# or tools recognized in the ChatDocument as handle-able tools
tool_messages: List[ToolMessage] = []
Expand Down Expand Up @@ -296,12 +299,16 @@ def from_LLMResponse(
# there must be at least one if it's not None
for oai_tc in response.oai_tool_calls:
ChatDocument._clean_fn_call(oai_tc.function)
if response.ant_tool_calls:
for ant_tc in response.ant_tool_calls:
ChatDocument._clean_fn_call(ant_tc.function)
return ChatDocument(
content=message,
reasoning=response.reasoning,
content_any=message,
oai_tool_calls=response.oai_tool_calls,
function_call=response.function_call,
ant_tool_calls=response.ant_tool_calls,
metadata=ChatDocMetaData(
source=Entity.LLM,
sender=Entity.LLM,
Expand Down Expand Up @@ -334,6 +341,7 @@ def from_str(msg: str) -> "ChatDocument":
def to_LLMMessage(
message: Union[str, "ChatDocument"],
oai_tools: Optional[List[OpenAIToolCall]] = None,
anthropic_tools: Optional[List[AnthropicToolCall]] = None,
) -> List[LLMMessage]:
"""
Convert to list of LLMMessage, to incorporate into msg-history sent to LLM API.
Expand All @@ -352,12 +360,14 @@ def to_LLMMessage(
sender_role = Role.USER
fun_call = None
oai_tool_calls = None
ant_tool_calls = None
tool_id = "" # for OpenAI Assistant
chat_document_id: str = ""
if isinstance(message, ChatDocument):
content = message.content or to_string(message.content_any) or ""
fun_call = message.function_call
oai_tool_calls = message.oai_tool_calls
ant_tool_calls = message.ant_tool_calls
if message.metadata.sender == Entity.USER and fun_call is not None:
# This may happen when a (parent agent's) LLM generates a
# a Function-call, and it ends up being sent to the current task's
Expand All @@ -372,6 +382,9 @@ def to_LLMMessage(
# same reasoning as for function-call above
content += " " + "\n\n".join(str(tc) for tc in oai_tool_calls)
oai_tool_calls = None
if message.metadata.sender == Entity.USER and ant_tool_calls is not None:
content += " " + "\n\n".join(str(tc) for tc in ant_tool_calls)
ant_tool_calls = None
sender_name = message.metadata.sender_name
tool_ids = message.metadata.tool_ids
tool_id = tool_ids[-1] if len(tool_ids) > 0 else ""
Expand Down Expand Up @@ -445,7 +458,7 @@ def to_LLMMessage(
tool_id=tool_id, # for OpenAI Assistant
content=content,
function_call=fun_call,
tool_calls=oai_tool_calls,
tool_calls=oai_tool_calls if oai_tool_calls else ant_tool_calls,
Copy link
Preview

Copilot AI Jun 19, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nitpick] The merging logic for tool_calls prioritizes oai_tool_calls over ant_tool_calls outright. Consider explicitly handling the scenario where both lists are populated (e.g. merging them) to avoid unintentionally dropping valid Anthropic tool calls.

Copilot uses AI. Check for mistakes.

name=sender_name,
chat_document_id=chat_document_id,
)
Expand Down
6 changes: 6 additions & 0 deletions langroid/language_models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from . import openai_gpt
from . import azure_openai
from . import prompt_formatter
from . import anthropic

from .base import (
StreamEventType,
Expand All @@ -24,6 +25,7 @@
from .openai_gpt import OpenAIGPTConfig, OpenAIGPT, OpenAICallParams
from .mock_lm import MockLM, MockLMConfig
from .azure_openai import AzureConfig, AzureGPT
from .anthropic import AnthropicLLMConfig, AnthropicCallParams, AnthropicLLM


__all__ = [
Expand Down Expand Up @@ -53,4 +55,8 @@
"AzureGPT",
"MockLM",
"MockLMConfig",
"anthropic",
"AnthropicLLMConfig",
"AnthropicCallParams",
"AnthropicLLM",
]
Loading