stream=True and function calling? Confused

Can anyone shed some light on the best way to use function calling with ‘stream=True’?

To preface I am brand new to Python and have started learning it soley for making chat bots the past 3 months or so.

I’m really lost when it comes to this. I have tried multiple different routes.

It streams normal chat.completionsCreate when ‘function_call’ is not in ‘messages’ , but when it is in messages, it doesn’t stream… I have tried setting ‘stream=True’ to the handle_function_response function as well and that did not work. It properly calls the function with the proper args, but its just dumping the assistant message to streamlit without streaming it… It just makes the bot look strange because half the messages stream and the other half don’t. The other thing is when I try to ask a follow up question I get this error message:

InvalidRequestError: Missing parameter 'name': messages with role 'function' must have a 'name'.

Traceback:


File "C:\Users\kylem\OneDrive\Desktop\bible-bot\base\Lib\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 552, in _run_script
    exec(code, module.__dict__)File "C:\Users\kylem\OneDrive\Desktop\bible-bot\scripts\main.py", line 264, in <module>
    authenticator_template()File "C:\Users\kylem\OneDrive\Desktop\bible-bot\scripts\main.py", line 208, in authenticator_template
    run_bible_bot()  # Uncomment this if you want to run the Bible Bot after authentication
    ^^^^^^^^^^^^^^^File "C:\Users\kylem\OneDrive\Desktop\bible-bot\scripts\main.py", line 129, in run_bible_bot
    for response in openai.ChatCompletion.create(
                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^File "C:\Users\kylem\OneDrive\Desktop\bible-bot\base\Lib\site-packages\openai\api_resources\chat_completion.py", line 25, in create
    return super().create(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^File "C:\Users\kylem\OneDrive\Desktop\bible-bot\base\Lib\site-packages\openai\api_resources\abstract\engine_api_resource.py", line 153, in create
    response, _, api_key = requestor.request(
                           ^^^^^^^^^^^^^^^^^^File "C:\Users\kylem\OneDrive\Desktop\bible-bot\base\Lib\site-packages\openai\api_requestor.py", line 298, in request
    resp, got_stream = self._interpret_response(result, stream)
                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^File "C:\Users\kylem\OneDrive\Desktop\bible-bot\base\Lib\site-packages\openai\api_requestor.py", line 700, in _interpret_response
    self._interpret_response_line(File "C:\Users\kylem\OneDrive\Desktop\bible-bot\base\Lib\site-packages\openai\api_requestor.py", line 765, in _interpret_response_line
    raise self.handle_error_response(

Here is my code …

        
    # Function to ensure the chat output is a string
    def ensure_string(output):
        if not isinstance(output, str):
            return json.dumps(output)
        return output

    # Function to handle function calls
    def handle_function_call(response_message, messages, functions):
        available_functions = {
            "get_bible_id": get_bible_id,
            "get_book_id": get_book_id,
            "get_full_chapter_text": get_full_chapter_text,
            "get_specific_verse_in_bible": get_specific_verse_in_bible,
            "search_bible_for_keyword": search_bible_for_keyword,
            "search_cross_ref": search_cross_ref,
            "search_hebrew_greek_text": search_hebrew_greek_text,
            "search_easton_dict": search_easton_dict,
            "search_geo_data": search_geo_data,
            "search_lexicon":search_lexicon,
            "search_llx":search_llx,
            "search_nestle_1904":search_nestle_1904
        }

        function_name = func_call["name"]
        function_args = json.loads(func_call["arguments"])

        try:
            function_response = available_functions[function_name](**function_args)
        except Exception as e:
            logging.error(f"Failed to execute function {function_name}. Error: {e}")
            return None

        function_response = ensure_string(function_response)

        messages.append({"role": "function", "name": function_name, "content": function_response})
        #st.json(function_response)
        
        # Make a non-streaming API call for the function
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo-16k",
            messages=messages,
            functions=functions,
        )
        return response
    
    
    #call the streamlit_ui function to display the ui
    streamlit_ui()
    

    # Display past messages
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])

    # Take input
    if prompt := st.chat_input("What is up?"):
        # Append user message to chat history
        st.session_state.messages.append({"role": "user", "content": prompt})
        with st.chat_message("user"):
            st.markdown(prompt)
        
        full_response = ""
        func_call = {"name": None, "arguments": ""}

        with st.chat_message("assistant"):
            message_placeholder = st.empty()  # Create an empty placeholder for the assistant's message

            for response in openai.ChatCompletion.create(
                    model=st.session_state["openai_model"],
                    messages=[
                        {"role": m["role"], "content": m["content"]}
                        for m in st.session_state.messages
                    ],
                    functions=st.session_state.functions,
                    stream=True,
            ):
                logging.debug(f"Received response chunk: {response}")

                delta = response["choices"][0]["delta"]

                if "function_call" in delta:
                    logging.debug("Function call detected.")
                    if "name" in delta["function_call"]:
                        func_call["name"] = delta["function_call"]["name"]
                    if "arguments" in delta["function_call"]:
                        func_call["arguments"] += delta["function_call"]["arguments"]
                
                if response["choices"][0].get("finish_reason") == "function_call":
                    logging.debug("Function generation requested, calling function.")
                    response_after_function = handle_function_call(func_call, st.session_state.messages, st.session_state.functions)
                    func_call = {"name": None, "arguments": ""}  # Reset for next function call

                    if response_after_function:
                        try:
                            # Extract the assistant's message content from the function response
                            function_message_content = response_after_function["choices"][0]["message"]["content"]
                            logging.debug(f"Extracted message content from function response: {function_message_content}")
                            
                            # Append the content to full_response
                            full_response += function_message_content
                        except Exception as e:
                            logging.error(f"Failed to extract message content from function response. Error: {e}")
                    else:
                        logging.warning("Function call did not return a valid response.")

                if not delta.get("content", None):
                    continue

                full_response += delta.get("content", "")
                message_placeholder.markdown(full_response + "▌")

            message_placeholder.markdown(full_response)
            st.session_state.messages.append({"role": "assistant", "content": full_response})

The problem with your code is exactly described by the error message itself.

1 Like

def handle_function_call(response_message, messages, functions):

doesn’t match

handle_function_call(func_call, st.session_state.messages, st.session_state.functions)

1 Like

Try this:

great point! I am confused on what I need to change in my code to get this working. I should have prefaced that I am new still new to python and just started learning a few months back because I wanted to make my own chat bots.

This error started happening when I set ‘stream = True’… before that my bot was working exactly how I wanted, just without streaming. anyways , now i need to find a way to pass ‘name’ in the ‘functions’ role. Do i also have to make sure it is fully streamed? maybe it is tryign to pass it when it is just getttign the first chunk from the delta in openai streaming… not sure… Thanks for your reply!

thanks Frank!

I am going to give this a go and let you know.

Sometimes it feels like I can’t see the issue with the code just because I have been looking at it for so long…

I really appreciate the reply and helping out a newbie like myself!

hey George!

I really appreciate taking your time to reply to me and help me out.

I have been learning python to make mostly chat bots the past few months and it has been so much fun…

Your post was one of the first I checked out, however I was stuck at how exactly to implement your logic in my code.

Would your logic essentially replace this section of my code?

        func_call = {"name": None, "arguments": ""}

        with st.chat_message("assistant"):
            message_placeholder = st.empty()  # Create an empty placeholder for the assistant's message

            for response in openai.ChatCompletion.create(
                    model=st.session_state["openai_model"],
                    messages=[
                        {"role": m["role"], "content": m["content"]}
                        for m in st.session_state.messages
                    ],
                    functions=st.session_state.functions,
                    stream=True,
            ):
                logging.debug(f"Received response chunk: {response}")

                delta = response["choices"][0]["delta"]

                if "function_call" in delta:
                    logging.debug("Function call detected.")
                    if "name" in delta["function_call"]:
                        func_call["name"] = delta["function_call"]["name"]
                    if "arguments" in delta["function_call"]:
                        func_call["arguments"] += delta["function_call"]["arguments"]
                
                if response["choices"][0].get("finish_reason") == "function_call":
                    logging.debug("Function generation requested, calling function.")
                    response_after_function = handle_function_call(func_call, st.session_state.messages, st.session_state.functions)
                    func_call = {"name": None, "arguments": ""}  # Reset for next function call

                    if response_after_function:
                        try:
                            # Extract the assistant's message content from the function response
                            function_message_content = response_after_function["choices"][0]["message"]["content"]
                            logging.debug(f"Extracted message content from function response: {function_message_content}")
                            
                            # Append the content to full_response
                            full_response += function_message_content
                        except Exception as e:
                            logging.error(f"Failed to extract message content from function response. Error: {e}")
                    else:
                        logging.warning("Function call did not return a valid response.")

                if not delta.get("content", None):
                    continue

                full_response += delta.get("content", "")
                message_placeholder.markdown(full_response + "▌")

            message_placeholder.markdown(full_response)
            st.session_state.messages.append({"role": "assistant", "content": full_response})

I have also only used flask to make a gpt plugin before, so my question is does that require another script to run the flask app?

I am really impressed at how concise your code is, it looks like you were able to get really creative… thats awesome!

Here is my github repo with the entire app for any context …

If you are interested my user is ‘kyle-mirich’ the repo is ‘bible-bot’ and the snippet I posted is from ‘scripts/main.py’

any help would be greatly appreciated!

1 Like

the branch I am workign on is ‘modularize-bible-bot.py-test’