Langchain agent with openai LLM using tool

I’m running the python 3 code below. I’m using openai version 1.2.1 and langchain 0.0.150. I’m creating a langchain agent with an openai model as the LLM. I’m defining a tool for the agent to use to answer a question. I’m following the ReAct framework for agents using tools. The code is below. I’m getting the error message below the code when I try to run it. Can you see what the issue is and suggest how to fix it?

code:



import scipy.stats as stats
from scipy.stats import ttest_ind, ttest_ind_from_stats,ttest_1samp

from config import api_key,openai_apikey

apikey=openai_apikey

import os

os.environ['OPENAI_API_KEY'] = openai_apikey


from langchain.agents import Tool

def two_sample_hypothesis_test(string):
    
    import numpy as np
    import pandas as pd
    import scipy.stats as stats
    from scipy.stats import ttest_ind, ttest_ind_from_stats,ttest_1samp
    
    sample1_avg,sample1_stdev,sample1_num,sample2_avg,sample2_stdev,sample2_num,eq_var=string.split(",")
    
    if eq_var=='True':
        
        equal_var=True
        
    else:
        equal_var=False
    
    
    t2, p2 = ttest_ind_from_stats(sample1_avg=sample1_avg,
                                  sample1_stdev=sample1_stdev, 
                                  sample1_num=sample1_num,
                                  sample2_avg=sample2_avg, 
                                  sample2_stdev=sample2_stdev, 
                                  sample2_num=sample2_num,
                                  equal_var=equal_var)
    
    return t2, p2


two_sample_hypothesis_test_tool = Tool(
    name='two_sample_hypothesis_test',
    func= two_sample_hypothesis_test,
    description="Useful for when you need to know if there is a significant difference in averages between two samples. The input to this tool should be a comma separated list of length 7 of strings representing the average of the first sample, the standard deviation of the first sample, the number of observations in the first sample, the average of the second sample, the standard deviation of the second sample, the number of observations in the second sample, and whether the equivalent variance is True or False.  For example '30','4','100','24','3','45','True'."
)


from langchain import OpenAI 
from langchain.chat_models import ChatOpenAI

# Set up the turbo LLM
turbo_llm = ChatOpenAI(
    temperature=0,
    model_name='gpt-3.5-turbo'
)


from langchain.agents import initialize_agent
from langchain.chains.conversation.memory import ConversationBufferWindowMemory


tools = [two_sample_hypothesis_test_tool]

# conversational agent memory
memory = ConversationBufferWindowMemory(
    memory_key='chat_history',
    k=3,
    return_messages=True
)


# create our agent
conversational_agent = initialize_agent(
    agent='chat-conversational-react-description',
    tools=tools,
    llm=turbo_llm,
#     llm=local_llm,
    verbose=True,
    max_iterations=3,
    early_stopping_method='generate',
    memory=memory,
    handle_parsing_errors=True
)


question="""Is there a significant difference in the averages between two samples, one sample having average 55, standard deviation 16, and total number of observations 17033, the other sample having average 26, standard deviation 7, and total observations 4260183, when the equivalent variance is False?"""

manual_react = f"""Question: Is there a significant difference in the averages between two samples, one sample having average 42, standard deviation 46, and total number of observations 13933, the other sample having average 36, standard deviation 37, and total observations 3160183, when the equivalent variance is False.
Action: two_sample_hypothesis_test_tool['42','46','13933','36','37','3160183','False']
Observation: t=16.5 p=0.00.
Thought: p is less than 0.05 so there is a significant difference.
Action: Finish[there is a significant difference]

Question:{question}"""


conversational_agent(manual_react)

error:




> Entering new AgentExecutor chain...

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
Cell In[10], line 1
----> 1 conversational_agent(manual_react)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/base.py:116, in Chain.__call__(self, inputs, return_only_outputs)
    114 except (KeyboardInterrupt, Exception) as e:
    115     self.callback_manager.on_chain_error(e, verbose=self.verbose)
--> 116     raise e
    117 self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
    118 return self.prep_outputs(inputs, outputs, return_only_outputs)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/base.py:113, in Chain.__call__(self, inputs, return_only_outputs)
    107 self.callback_manager.on_chain_start(
    108     {"name": self.__class__.__name__},
    109     inputs,
    110     verbose=self.verbose,
    111 )
    112 try:
--> 113     outputs = self._call(inputs)
    114 except (KeyboardInterrupt, Exception) as e:
    115     self.callback_manager.on_chain_error(e, verbose=self.verbose)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/agents/agent.py:792, in AgentExecutor._call(self, inputs)
    790 # We now enter the agent loop (until it returns something).
    791 while self._should_continue(iterations, time_elapsed):
--> 792     next_step_output = self._take_next_step(
    793         name_to_tool_map, color_mapping, inputs, intermediate_steps
    794     )
    795     if isinstance(next_step_output, AgentFinish):
    796         return self._return(next_step_output, intermediate_steps)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/agents/agent.py:672, in AgentExecutor._take_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps)
    667 """Take a single step in the thought-action-observation loop.
    668 
    669 Override this to take control of how the agent makes and acts on choices.
    670 """
    671 # Call the LLM to see what to do.
--> 672 output = self.agent.plan(intermediate_steps, **inputs)
    673 # If the tool chosen is the finishing tool, then we end and return.
    674 if isinstance(output, AgentFinish):

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/agents/agent.py:384, in Agent.plan(self, intermediate_steps, **kwargs)
    373 """Given input, decided what to do.
    374 
    375 Args:
   (...)
    381     Action specifying what tool to use.
    382 """
    383 full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
--> 384 full_output = self.llm_chain.predict(**full_inputs)
    385 return self.output_parser.parse(full_output)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/llm.py:151, in LLMChain.predict(self, **kwargs)
    137 def predict(self, **kwargs: Any) -> str:
    138     """Format prompt with kwargs and pass to LLM.
    139 
    140     Args:
   (...)
    149             completion = llm.predict(adjective="funny")
    150     """
--> 151     return self(kwargs)[self.output_key]

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/base.py:116, in Chain.__call__(self, inputs, return_only_outputs)
    114 except (KeyboardInterrupt, Exception) as e:
    115     self.callback_manager.on_chain_error(e, verbose=self.verbose)
--> 116     raise e
    117 self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
    118 return self.prep_outputs(inputs, outputs, return_only_outputs)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/base.py:113, in Chain.__call__(self, inputs, return_only_outputs)
    107 self.callback_manager.on_chain_start(
    108     {"name": self.__class__.__name__},
    109     inputs,
    110     verbose=self.verbose,
    111 )
    112 try:
--> 113     outputs = self._call(inputs)
    114 except (KeyboardInterrupt, Exception) as e:
    115     self.callback_manager.on_chain_error(e, verbose=self.verbose)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/llm.py:57, in LLMChain._call(self, inputs)
     56 def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
---> 57     return self.apply([inputs])[0]

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/llm.py:118, in LLMChain.apply(self, input_list)
    116 def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]:
    117     """Utilize the LLM generate method for speed gains."""
--> 118     response = self.generate(input_list)
    119     return self.create_outputs(response)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chains/llm.py:62, in LLMChain.generate(self, input_list)
     60 """Generate LLM result from inputs."""
     61 prompts, stop = self.prep_prompts(input_list)
---> 62 return self.llm.generate_prompt(prompts, stop)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/base.py:82, in BaseChatModel.generate_prompt(self, prompts, stop)
     80 except (KeyboardInterrupt, Exception) as e:
     81     self.callback_manager.on_llm_error(e, verbose=self.verbose)
---> 82     raise e
     83 self.callback_manager.on_llm_end(output, verbose=self.verbose)
     84 return output

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/base.py:79, in BaseChatModel.generate_prompt(self, prompts, stop)
     75 self.callback_manager.on_llm_start(
     76     {"name": self.__class__.__name__}, prompt_strings, verbose=self.verbose
     77 )
     78 try:
---> 79     output = self.generate(prompt_messages, stop=stop)
     80 except (KeyboardInterrupt, Exception) as e:
     81     self.callback_manager.on_llm_error(e, verbose=self.verbose)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/base.py:54, in BaseChatModel.generate(self, messages, stop)
     50 def generate(
     51     self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None
     52 ) -> LLMResult:
     53     """Top Level call"""
---> 54     results = [self._generate(m, stop=stop) for m in messages]
     55     llm_output = self._combine_llm_outputs([res.llm_output for res in results])
     56     generations = [res.generations for res in results]

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/base.py:54, in <listcomp>(.0)
     50 def generate(
     51     self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None
     52 ) -> LLMResult:
     53     """Top Level call"""
---> 54     results = [self._generate(m, stop=stop) for m in messages]
     55     llm_output = self._combine_llm_outputs([res.llm_output for res in results])
     56     generations = [res.generations for res in results]

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/openai.py:266, in ChatOpenAI._generate(self, messages, stop)
    262     message = _convert_dict_to_message(
    263         {"content": inner_completion, "role": role}
    264     )
    265     return ChatResult(generations=[ChatGeneration(message=message)])
--> 266 response = self.completion_with_retry(messages=message_dicts, **params)
    267 return self._create_chat_result(response)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/openai.py:222, in ChatOpenAI.completion_with_retry(self, **kwargs)
    220 def completion_with_retry(self, **kwargs: Any) -> Any:
    221     """Use tenacity to retry the completion call."""
--> 222     retry_decorator = self._create_retry_decorator()
    224     @retry_decorator
    225     def _completion_with_retry(**kwargs: Any) -> Any:
    226         return self.client.create(**kwargs)

File ~/anaconda3/envs/llm_110623/lib/python3.10/site-packages/langchain/chat_models/openai.py:211, in ChatOpenAI._create_retry_decorator(self)
    203 max_seconds = 60
    204 # Wait 2^x * 1 second between each retry starting with
    205 # 4 seconds, then up to 10 seconds, then 10 seconds afterwards
    206 return retry(
    207     reraise=True,
    208     stop=stop_after_attempt(self.max_retries),
    209     wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
    210     retry=(
--> 211         retry_if_exception_type(openai.error.Timeout)
    212         | retry_if_exception_type(openai.error.APIError)
    213         | retry_if_exception_type(openai.error.APIConnectionError)
    214         | retry_if_exception_type(openai.error.RateLimitError)
    215         | retry_if_exception_type(openai.error.ServiceUnavailableError)
    216     ),
    217     before_sleep=before_sleep_log(logger, logging.WARNING),
    218 )

AttributeError: module 'openai' has no attribute 'error'