diff --git a/trustgraph-flow/trustgraph/agent/react/service.py b/trustgraph-flow/trustgraph/agent/react/service.py index f367d10f..8799816b 100755 --- a/trustgraph-flow/trustgraph/agent/react/service.py +++ b/trustgraph-flow/trustgraph/agent/react/service.py @@ -32,6 +32,7 @@ default_input_queue = agent_request_queue default_output_queue = agent_response_queue default_subscriber = module +default_max_iterations = 15 class Processor(ConsumerProducer): @@ -39,6 +40,8 @@ def __init__(self, **params): additional = params.get("context", None) + self.max_iterations = int(params.get("max_iterations", default_max_iterations)) + tools = {} # Parsing the prompt information to the prompt configuration @@ -67,8 +70,9 @@ def __init__(self, **params): ) if len(ttoks) == 1: + tools[toks[0]] = Tool( - name = ttoks[0], + name = toks[0], description = "", implementation = impl, config = { "input": "query" }, @@ -76,7 +80,7 @@ def __init__(self, **params): ) else: tools[toks[0]] = Tool( - name = ttoks[0], + name = toks[0], description = "", implementation = impl, config = { "input": ttoks[1] }, @@ -226,7 +230,7 @@ def handle(self, msg): print(f"Question: {v.question}", flush=True) - if len(history) > 10: + if len(history) >= self.max_iterations: raise RuntimeError("Too many agent iterations") print(f"History: {history}", flush=True) @@ -394,6 +398,12 @@ def add_args(parser): help=f'Optional, specifies additional context text for the LLM.' ) + parser.add_argument( + '--max-iterations', + default=default_max_iterations, + help=f'Maximum number of react iterations (default: {default_max_iterations})', + ) + def run(): Processor.start(module, __doc__)