diff --git a/tests/test-agent b/tests/test-agent new file mode 100755 index 00000000..4782bbae --- /dev/null +++ b/tests/test-agent @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 + +import json +import textwrap +from trustgraph.clients.agent_client import AgentClient + +def wrap(text, width=75): + + if text is None: text = "n/a" + + out = textwrap.wrap( + text, width=width + ) + return "\n".join(out) + +def output(text, prefix="> ", width=78): + + out = textwrap.indent( + text, prefix=prefix + ) + print(out) + +p = AgentClient(pulsar_host="pulsar://localhost:6650") + +q = "How many cats does Mark have? Calculate that number raised to 0.4 power. Is that number lower than the numeric part of the mission identifier of the Space Shuttle Challenger on its last mission? If so, give me an apple pie recipe, otherwise return a poem about cheese." + +output(wrap(q), "\U00002753 ") +print() + +def think(x): + output(wrap(x), "\U0001f914 ") + print() + +def observe(x): + output(wrap(x), "\U0001f4a1 ") + print() + +resp = p.request( + question=q, think=think, observe=observe, +) + +output(resp, "\U0001f4ac ") +print() + diff --git a/tests/test-llm b/tests/test-llm index 7e2c271d..4e86387a 100755 --- a/tests/test-llm +++ b/tests/test-llm @@ -5,9 +5,10 @@ from trustgraph.clients.llm_client import LlmClient llm = LlmClient(pulsar_host="pulsar://localhost:6650") +system = "You are a lovely assistant." prompt="Write a funny limerick about a llama" -resp = llm.request(prompt) +resp = llm.request(system, prompt) print(resp) diff --git a/trustgraph-base/trustgraph/base/base_processor.py b/trustgraph-base/trustgraph/base/base_processor.py index 9b2e29ac..f258ff1a 100644 --- a/trustgraph-base/trustgraph/base/base_processor.py +++ b/trustgraph-base/trustgraph/base/base_processor.py @@ -39,8 +39,9 @@ def __init__(self, **params): def __del__(self): - if self.client: - self.client.close() + if hasattr(self, "client"): + if self.client: + self.client.close() @staticmethod def add_args(parser): diff --git a/trustgraph-base/trustgraph/clients/agent_client.py b/trustgraph-base/trustgraph/clients/agent_client.py new file mode 100644 index 00000000..2ef69274 --- /dev/null +++ b/trustgraph-base/trustgraph/clients/agent_client.py @@ -0,0 +1,64 @@ + +import _pulsar + +from .. schema import AgentRequest, AgentResponse +from .. schema import agent_request_queue +from .. schema import agent_response_queue +from . base import BaseClient + +# Ugly +ERROR=_pulsar.LoggerLevel.Error +WARN=_pulsar.LoggerLevel.Warn +INFO=_pulsar.LoggerLevel.Info +DEBUG=_pulsar.LoggerLevel.Debug + +class AgentClient(BaseClient): + + def __init__( + self, log_level=ERROR, + subscriber=None, + input_queue=None, + output_queue=None, + pulsar_host="pulsar://pulsar:6650", + ): + + if input_queue is None: input_queue = agent_request_queue + if output_queue is None: output_queue = agent_response_queue + + super(AgentClient, self).__init__( + log_level=log_level, + subscriber=subscriber, + input_queue=input_queue, + output_queue=output_queue, + pulsar_host=pulsar_host, + input_schema=AgentRequest, + output_schema=AgentResponse, + ) + + def request( + self, + question, + think=None, + observe=None, + timeout=300 + ): + + def inspect(x): + + if x.thought and think: + think(x.thought) + return + + if x.observation and observe: + observe(x.observation) + return + + if x.answer: + return True + + return False + + return self.call( + question=question, inspect=inspect, timeout=timeout + ).answer + diff --git a/trustgraph-base/trustgraph/clients/base.py b/trustgraph-base/trustgraph/clients/base.py index 726b57df..78116f41 100644 --- a/trustgraph-base/trustgraph/clients/base.py +++ b/trustgraph-base/trustgraph/clients/base.py @@ -59,10 +59,14 @@ def __init__( def call(self, **args): timeout = args.get("timeout", DEFAULT_TIMEOUT) + inspect = args.get("inspect", lambda x: True) if "timeout" in args: del args["timeout"] + if "inspect" in args: + del args["inspect"] + id = str(uuid.uuid4()) r = self.input_schema(**args) @@ -103,6 +107,10 @@ def call(self, **args): f"{value.error.type}: {value.error.message}" ) + complete = inspect(value) + + if not complete: continue + resp = msg.value() self.consumer.acknowledge(msg) return resp diff --git a/trustgraph-base/trustgraph/schema/__init__.py b/trustgraph-base/trustgraph/schema/__init__.py index 7f0334be..3196691b 100644 --- a/trustgraph-base/trustgraph/schema/__init__.py +++ b/trustgraph-base/trustgraph/schema/__init__.py @@ -8,4 +8,5 @@ from . graph import * from . retrieval import * from . metadata import * +from . agent import * diff --git a/trustgraph-base/trustgraph/schema/agent.py b/trustgraph-base/trustgraph/schema/agent.py new file mode 100644 index 00000000..9bcdde51 --- /dev/null +++ b/trustgraph-base/trustgraph/schema/agent.py @@ -0,0 +1,37 @@ + +from pulsar.schema import Record, String, Array, Map + +from . topic import topic +from . types import Error + +############################################################################ + +# Prompt services, abstract the prompt generation + +class AgentStep(Record): + thought = String() + action = String() + arguments = Map(String()) + observation = String() + +class AgentRequest(Record): + question = String() + plan = String() + state = String() + history = Array(AgentStep()) + +class AgentResponse(Record): + answer = String() + error = Error() + thought = String() + observation = String() + +agent_request_queue = topic( + 'agent', kind='non-persistent', namespace='request' +) +agent_response_queue = topic( + 'agent', kind='non-persistent', namespace='response' +) + +############################################################################ + diff --git a/trustgraph-cli/scripts/tg-invoke-agent b/trustgraph-cli/scripts/tg-invoke-agent new file mode 100755 index 00000000..3f05071c --- /dev/null +++ b/trustgraph-cli/scripts/tg-invoke-agent @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 + +""" +Uses the GraphRAG service to answer a query +""" + +import argparse +import os +import textwrap + +from trustgraph.clients.agent_client import AgentClient + +default_pulsar_host = os.getenv("PULSAR_HOST", 'pulsar://localhost:6650') +default_user = 'trustgraph' +default_collection = 'default' + +def wrap(text, width=75): + if text is None: text = "n/a" + out = textwrap.wrap( + text, width=width + ) + return "\n".join(out) + +def output(text, prefix="> ", width=78): + out = textwrap.indent( + text, prefix=prefix + ) + print(out) + +def query( + pulsar_host, query, user, collection, + plan=None, state=None, verbose=False +): + + am = AgentClient(pulsar_host=pulsar_host) + + if verbose: + output(wrap(query), "\U00002753 ") + print() + + def think(x): + if verbose: + output(wrap(x), "\U0001f914 ") + print() + + def observe(x): + if verbose: + output(wrap(x), "\U0001f4a1 ") + print() + + resp = am.request( + question=query, think=think, observe=observe, + ) + + print(resp) + +def main(): + + parser = argparse.ArgumentParser( + prog='tg-invoke-agent', + description=__doc__, + ) + + parser.add_argument( + '-p', '--pulsar-host', + default=default_pulsar_host, + help=f'Pulsar host (default: {default_pulsar_host})', + ) + + parser.add_argument( + '-q', '--query', + required=True, + help=f'Query to execute', + ) + + parser.add_argument( + '-u', '--user', + default=default_user, + help=f'User ID (default: {default_user})' + ) + + parser.add_argument( + '-c', '--collection', + default=default_collection, + help=f'Collection ID (default: {default_collection})' + ) + + parser.add_argument( + '-l', '--plan', + help=f'Agent plan (default: unspecified)' + ) + + parser.add_argument( + '-s', '--state', + help=f'Agent initial state (default: unspecified)' + ) + + parser.add_argument( + '-v', '--verbose', + action="store_true", + help=f'Output thinking/observations' + ) + + args = parser.parse_args() + + try: + + query( + pulsar_host=args.pulsar_host, + query=args.query, + user=args.user, + collection=args.collection, + plan=args.plan, + state=args.state, + verbose=args.verbose, + ) + + except Exception as e: + + print("Exception:", e, flush=True) + +main() + diff --git a/trustgraph-cli/setup.py b/trustgraph-cli/setup.py index 4189d3f4..13c0b5ca 100644 --- a/trustgraph-cli/setup.py +++ b/trustgraph-cli/setup.py @@ -51,6 +51,7 @@ "scripts/tg-query-graph-rag", "scripts/tg-init-pulsar", "scripts/tg-processor-state", + "scripts/tg-invoke-agent", "scripts/tg-invoke-prompt", "scripts/tg-invoke-llm", ] diff --git a/trustgraph-flow/scripts/agent-manager-react b/trustgraph-flow/scripts/agent-manager-react new file mode 100644 index 00000000..b5e060c7 --- /dev/null +++ b/trustgraph-flow/scripts/agent-manager-react @@ -0,0 +1,6 @@ +#!/usr/bin/env python3 + +from trustgraph.agent.react import run + +run() + diff --git a/trustgraph-flow/setup.py b/trustgraph-flow/setup.py index b8cf40b4..efdf59ae 100644 --- a/trustgraph-flow/setup.py +++ b/trustgraph-flow/setup.py @@ -60,6 +60,7 @@ "jsonschema", ], scripts=[ + "scripts/agent-manager-react", "scripts/chunker-recursive", "scripts/chunker-token", "scripts/de-query-milvus", diff --git a/trustgraph-flow/trustgraph/agent/__init__.py b/trustgraph-flow/trustgraph/agent/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/trustgraph-flow/trustgraph/agent/react/README.md b/trustgraph-flow/trustgraph/agent/react/README.md new file mode 100644 index 00000000..dd5cbea5 --- /dev/null +++ b/trustgraph-flow/trustgraph/agent/react/README.md @@ -0,0 +1,19 @@ + +agent-manager-react \ + -p pulsar://localhost:6650 \ + --tool-type \ + shuttle=knowledge-query:query \ + cats=knowledge-query:query \ + compute=text-completion:computation \ + --tool-description \ + shuttle="Query a knowledge base with information about the space shuttle. The query should be a simple natural language question" \ + cats="Query a knowledge base with information about Mark's cats. The query should be a simple natural language question" \ + compute="A computation engine which can answer questions about maths and computation" \ + --tool-argument \ + cats="query:string:The search query string" \ + shuttle="query:string:The search query string" \ + compute="computation:string:The computation to solve" + + + --context 'The space shuttle challenger final mission was 58-L' + diff --git a/trustgraph-flow/trustgraph/agent/react/__init__.py b/trustgraph-flow/trustgraph/agent/react/__init__.py new file mode 100644 index 00000000..ba844705 --- /dev/null +++ b/trustgraph-flow/trustgraph/agent/react/__init__.py @@ -0,0 +1,3 @@ + +from . service import * + diff --git a/trustgraph-flow/trustgraph/agent/react/__main__.py b/trustgraph-flow/trustgraph/agent/react/__main__.py new file mode 100755 index 00000000..e9136855 --- /dev/null +++ b/trustgraph-flow/trustgraph/agent/react/__main__.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python3 + +from . service import run + +if __name__ == '__main__': + run() + diff --git a/trustgraph-flow/trustgraph/agent/react/agent_manager.py b/trustgraph-flow/trustgraph/agent/react/agent_manager.py new file mode 100644 index 00000000..11b258a6 --- /dev/null +++ b/trustgraph-flow/trustgraph/agent/react/agent_manager.py @@ -0,0 +1,200 @@ + +import ibis +import logging +import json + +from . types import Action, Final + +logger = logging.getLogger(__name__) + +class AgentManager: + + template="""Answer the following questions as best you can. You have +access to the following functions: + +{% for tool in tools %}{ + "function": "{{ tool.name }}", + "description": "{{ tool.description }}", + "arguments": [ +{% for arg in tool.arguments %} { + "name": "{{ arg.name }}", + "type": "{{ arg.type }}", + "description": "{{ arg.description }}", + } +{% endfor %} + ] +} +{% endfor %} + +You can either choose to call a function to get more information, or +return a final answer. + +To call a function, respond with a JSON object of the following format: + +{ + "thought": "your thought about what to do", + "action": "the action to take, should be one of [{{tool_names}}]", + "arguments": { + "argument1": "argument_value", + "argument2": "argument_value" + } +} + +To provide a final answer, response a JSON object of the following format: + +{ + "thought": "I now know the final answer", + "final-answer": "the final answer to the original input question" +} + +Previous steps are included in the input. Each step has the following +format in your output: + +{ + "thought": "your thought about what to do", + "action": "the action taken", + "arguments": { + "argument1": action argument, + "argument2": action argument2 + }, + "observation": "the result of the action", +} + +Respond by describing either one single thought/action/arguments or +the final-answer. Pause after providing one action or final-answer. + +{% if context %}Additional context has been provided: +{{context}}{% endif %} + +Question: {{question}} + +Input: + +{% for h in history %} +{ + "action": "{{h.action}}", + "arguments": [ +{% for k, v in h.arguments.items() %} { + "{{k}}": "{{v}}", +{%endfor%} } + ], + "observation": "{{h.observation}}" +} +{% endfor %}""" + + def __init__(self, context, tools, additional_context=None): + self.context = context + self.tools = tools + self.additional_context = additional_context + + def reason(self, question, history): + + tpl = ibis.Template(self.template) + + tools = self.tools + + tool_names = ",".join([ + t for t in self.tools.keys() + ]) + + prompt = tpl.render({ + "tools": [ + { + "name": tool.name, + "description": tool.description, + "arguments": [ + { + "name": arg.name, + "type": arg.type, + "description": arg.description + } + for arg in tool.arguments.values() + ] + } + for tool in self.tools.values() + ], + "context": self.additional_context, + "question": question, + "tool_names": tool_names, + "history": [ + { + "thought": h.thought, + "action": h.name, + "arguments": h.arguments, + "observation": h.observation, + } + for h in history + ], + }) + + print(prompt) + + logger.info(f"prompt: {prompt}") + + resp = self.context.prompt.request( + "question", + { + "question": prompt + } + ) + + resp = resp.replace("```json", "") + resp = resp.replace("```", "") + + logger.info(f"response: {resp}") + + obj = json.loads(resp) + + if obj.get("final-answer"): + + a = Final( + thought = obj.get("thought"), + final = obj.get("final-answer"), + ) + + return a + + else: + + a = Action( + thought = obj.get("thought"), + name = obj.get("action"), + arguments = obj.get("arguments"), + observation = "" + ) + + return a + + def react(self, question, history, think, observe): + + act = self.reason(question, history) + logger.info(f"act: {act}") + + if isinstance(act, Final): + + think(act.thought) + return act + + else: + + think(act.thought) + + if act.name in self.tools: + action = self.tools[act.name] + else: + raise RuntimeError(f"No action for {act.name}!") + + resp = action.implementation.invoke(**act.arguments) + + resp = resp.strip() + + logger.info(f"resp: {resp}") + + observe(resp) + + act.observation = resp + + logger.info(f"iter: {act}") + + return act + diff --git a/trustgraph-flow/trustgraph/agent/react/service.py b/trustgraph-flow/trustgraph/agent/react/service.py new file mode 100755 index 00000000..f367d10f --- /dev/null +++ b/trustgraph-flow/trustgraph/agent/react/service.py @@ -0,0 +1,400 @@ +""" +Simple agent infrastructure broadly implements the ReAct flow. +""" + +import json +import re +import sys + +from pulsar.schema import JsonSchema + +from ... base import ConsumerProducer +from ... schema import Error +from ... schema import AgentRequest, AgentResponse, AgentStep +from ... schema import agent_request_queue, agent_response_queue +from ... schema import prompt_request_queue as pr_request_queue +from ... schema import prompt_response_queue as pr_response_queue +from ... schema import text_completion_request_queue as tc_request_queue +from ... schema import text_completion_response_queue as tc_response_queue +from ... schema import graph_rag_request_queue as gr_request_queue +from ... schema import graph_rag_response_queue as gr_response_queue +from ... clients.prompt_client import PromptClient +from ... clients.llm_client import LlmClient +from ... clients.graph_rag_client import GraphRagClient + +from . tools import KnowledgeQueryImpl, TextCompletionImpl +from . agent_manager import AgentManager + +from . types import Final, Action, Tool, Argument + +module = ".".join(__name__.split(".")[1:-1]) + +default_input_queue = agent_request_queue +default_output_queue = agent_response_queue +default_subscriber = module + +class Processor(ConsumerProducer): + + def __init__(self, **params): + + additional = params.get("context", None) + + tools = {} + + # Parsing the prompt information to the prompt configuration + # structure + tool_type_arg = params.get("tool_type", []) + if tool_type_arg: + for t in tool_type_arg: + toks = t.split("=", 1) + if len(toks) < 2: + raise RuntimeError( + f"Tool-type string not well-formed: {t}" + ) + ttoks = toks[1].split(":", 1) + if len(ttoks) < 1: + raise RuntimeError( + f"Tool-type string not well-formed: {t}" + ) + + if ttoks[0] == "knowledge-query": + impl = KnowledgeQueryImpl(self) + elif ttoks[0] == "text-completion": + impl = TextCompletionImpl(self) + else: + raise RuntimeError( + f"Tool-kind {ttoks[0]} not known" + ) + + if len(ttoks) == 1: + tools[toks[0]] = Tool( + name = ttoks[0], + description = "", + implementation = impl, + config = { "input": "query" }, + arguments = {}, + ) + else: + tools[toks[0]] = Tool( + name = ttoks[0], + description = "", + implementation = impl, + config = { "input": ttoks[1] }, + arguments = {}, + ) + + # parsing the prompt information to the prompt configuration + # structure + tool_desc_arg = params.get("tool_description", []) + if tool_desc_arg: + for t in tool_desc_arg: + toks = t.split("=", 1) + if len(toks) < 2: + raise runtimeerror( + f"tool-type string not well-formed: {t}" + ) + if toks[0] not in tools: + raise runtimeerror(f"description, tool {toks[0]} not known") + tools[toks[0]].description = toks[1] + + # Parsing the prompt information to the prompt configuration + # structure + tool_arg_arg = params.get("tool_argument", []) + if tool_arg_arg: + for t in tool_arg_arg: + toks = t.split("=", 1) + if len(toks) < 2: + raise RuntimeError( + f"Tool-type string not well-formed: {t}" + ) + ttoks = toks[1].split(":", 2) + if len(ttoks) != 3: + raise RuntimeError( + f"Tool argument string not well-formed: {t}" + ) + if toks[0] not in tools: + raise RuntimeError(f"Description, tool {toks[0]} not known") + tools[toks[0]].arguments[ttoks[0]] = Argument( + name = ttoks[0], + type = ttoks[1], + description = ttoks[2] + ) + + input_queue = params.get("input_queue", default_input_queue) + output_queue = params.get("output_queue", default_output_queue) + subscriber = params.get("subscriber", default_subscriber) + prompt_request_queue = params.get( + "prompt_request_queue", pr_request_queue + ) + prompt_response_queue = params.get( + "prompt_response_queue", pr_response_queue + ) + text_completion_request_queue = params.get( + "text_completion_request_queue", tc_request_queue + ) + text_completion_response_queue = params.get( + "text_completion_response_queue", tc_response_queue + ) + graph_rag_request_queue = params.get( + "graph_rag_request_queue", gr_request_queue + ) + graph_rag_response_queue = params.get( + "graph_rag_response_queue", gr_response_queue + ) + + super(Processor, self).__init__( + **params | { + "input_queue": input_queue, + "output_queue": output_queue, + "subscriber": subscriber, + "input_schema": AgentRequest, + "output_schema": AgentResponse, + "prompt_request_queue": prompt_request_queue, + "prompt_response_queue": prompt_response_queue, + "text_completion_request_queue": tc_request_queue, + "text_completion_response_queue": tc_response_queue, + "graph_rag_request_queue": gr_request_queue, + "graph_rag_response_queue": gr_response_queue, + } + ) + + self.prompt = PromptClient( + subscriber=subscriber, + input_queue=prompt_request_queue, + output_queue=prompt_response_queue, + pulsar_host = self.pulsar_host + ) + + self.llm = LlmClient( + subscriber=subscriber, + input_queue=text_completion_request_queue, + output_queue=text_completion_response_queue, + pulsar_host = self.pulsar_host + ) + + self.graph_rag = GraphRagClient( + subscriber=subscriber, + input_queue=graph_rag_request_queue, + output_queue=graph_rag_response_queue, + pulsar_host = self.pulsar_host + ) + + # Need to be able to feed requests to myself + self.recursive_input = self.client.create_producer( + topic=input_queue, + schema=JsonSchema(AgentRequest), + ) + + self.agent = AgentManager( + context=self, + tools=tools, + additional_context=additional + ) + + def parse_json(self, text): + json_match = re.search(r'```(?:json)?(.*?)```', text, re.DOTALL) + + if json_match: + json_str = json_match.group(1).strip() + else: + # If no delimiters, assume the entire output is JSON + json_str = text.strip() + + return json.loads(json_str) + + def handle(self, msg): + + try: + + v = msg.value() + + # Sender-produced ID + id = msg.properties()["id"] + + if v.history: + history = [ + Action( + thought=h.thought, + name=h.action, + arguments=h.arguments, + observation=h.observation + ) + for h in v.history + ] + else: + history = [] + + print(f"Question: {v.question}", flush=True) + + if len(history) > 10: + raise RuntimeError("Too many agent iterations") + + print(f"History: {history}", flush=True) + + def think(x): + + print(f"Think: {x}", flush=True) + + r = AgentResponse( + answer=None, + error=None, + thought=x, + observation=None, + ) + + self.producer.send(r, properties={"id": id}) + + def observe(x): + + print(f"Observe: {x}", flush=True) + + r = AgentResponse( + answer=None, + error=None, + thought=None, + observation=x, + ) + + self.producer.send(r, properties={"id": id}) + + act = self.agent.react(v.question, history, think, observe) + + print(f"Action: {act}", flush=True) + + print("Send response...", flush=True) + + if type(act) == Final: + + r = AgentResponse( + answer=act.final, + error=None, + thought=None, + ) + + self.producer.send(r, properties={"id": id}) + + print("Done.", flush=True) + + return + + history.append(act) + + r = AgentRequest( + question=v.question, + plan=v.plan, + state=v.state, + history=[ + AgentStep( + thought=h.thought, + action=h.name, + arguments=h.arguments, + observation=h.observation + ) + for h in history + ] + ) + + self.recursive_input.send(r, properties={"id": id}) + + print("Done.", flush=True) + + return + + except Exception as e: + + print(f"Exception: {e}") + + print("Send error response...", flush=True) + + r = AgentResponse( + error=Error( + type = "agent-error", + message = str(e), + ), + response=None, + ) + + self.producer.send(r, properties={"id": id}) + + @staticmethod + def add_args(parser): + + ConsumerProducer.add_args( + parser, default_input_queue, default_subscriber, + default_output_queue, + ) + + parser.add_argument( + '--prompt-request-queue', + default=pr_request_queue, + help=f'Prompt request queue (default: {pr_request_queue})', + ) + + parser.add_argument( + '--prompt-response-queue', + default=pr_response_queue, + help=f'Prompt response queue (default: {pr_response_queue})', + ) + + parser.add_argument( + '--text-completion-request-queue', + default=tc_request_queue, + help=f'Text completion request queue (default: {tc_request_queue})', + ) + + parser.add_argument( + '--text-completion-response-queue', + default=tc_response_queue, + help=f'Text completion response queue (default: {tc_response_queue})', + ) + + parser.add_argument( + '--graph-rag-request-queue', + default=gr_request_queue, + help=f'Graph RAG request queue (default: {gr_request_queue})', + ) + + parser.add_argument( + '--graph-rag-response-queue', + default=gr_response_queue, + help=f'Graph RAG response queue (default: {gr_response_queue})', + ) + + parser.add_argument( + '--tool-type', nargs='*', + help=f'''Specifies the type of an agent tool. Takes the form +=. is the name of the tool. is one of +knowledge-query, text-completion. Additional parameters are specified +for different tools which are tool-specific. e.g. knowledge-query: +which specifies the name of the arg whose content is fed into the knowledge +query as a question. text-completion: specifies the name of the arg +whose content is fed into the text-completion service as a prompt''' + ) + + parser.add_argument( + '--tool-description', nargs='*', + help=f'''Specifies the textual description of a tool. Takes +the form =. The description is important, it teaches the +LLM how to use the tool. It should describe what it does and how to +use the arguments. This is specified in natural language.''' + ) + + parser.add_argument( + '--tool-argument', nargs='*', + help=f'''Specifies argument usage for a tool. Takes +the form =::. The description is important, +it is read by the LLM and used to determine how to use the argument. + can be specified multiple times to give a tool multiple arguments. + is one of string, number. is a natural language +description.''' + ) + + parser.add_argument( + '--context', + help=f'Optional, specifies additional context text for the LLM.' + ) + +def run(): + + Processor.start(module, __doc__) + diff --git a/trustgraph-flow/trustgraph/agent/react/tools.py b/trustgraph-flow/trustgraph/agent/react/tools.py new file mode 100644 index 00000000..d9bc846f --- /dev/null +++ b/trustgraph-flow/trustgraph/agent/react/tools.py @@ -0,0 +1,19 @@ + +# This tool implementation knows how to put a question to the graph RAG +# service +class KnowledgeQueryImpl: + def __init__(self, context): + self.context = context + def invoke(self, **arguments): + return self.context.graph_rag.request(arguments.get("query")) + +# This tool implementation knows how to do text completion. This uses +# the prompt service, rather than talking to TextCompletion directly. +class TextCompletionImpl: + def __init__(self, context): + self.context = context + def invoke(self, **arguments): + return self.context.prompt.request( + "question", { "question": arguments.get("computation") } + ) + diff --git a/trustgraph-flow/trustgraph/agent/react/types.py b/trustgraph-flow/trustgraph/agent/react/types.py new file mode 100644 index 00000000..7180db3e --- /dev/null +++ b/trustgraph-flow/trustgraph/agent/react/types.py @@ -0,0 +1,30 @@ + +import dataclasses +from typing import Any, Dict + +@dataclasses.dataclass +class Argument: + name : str + type : str + description : str + +@dataclasses.dataclass +class Tool: + name : str + description : str + arguments : list[Argument] + implementation : Any + config : Dict[str, str] + +@dataclasses.dataclass +class Action: + thought : str + name : str + arguments : dict + observation : str + +@dataclasses.dataclass +class Final: + thought : str + final : str +