Hey @nikunj
thanks for your response! It looks like itâs still the same problem.
Attached you can find my (minified) code (node.js):
async function handleAssistantRun(threadId, assistantData, message) {
const initialRunData = { run: null, runArguments: {} };
if (!threadId?.length || !assistantData?.assistant_id?.length || !message?.length) return initialRunData;
try {
const runProps = {
assistant_id: assistantData.assistant_id,
additional_messages: [{ role: "user", content: message }],
tool_choice: { type: "function", function: { name: "createNewResponse" } },
temperature: assistantData.config.assist.temperature * 0.01,
stream: true,
};
const runStream = await openai.beta.threads.runs.create(threadId, runProps);
return await observeStream(runStream, assistantData, threadId);
} catch (error) {
console.error("Error in handleAssistantRun:", error);
return initialRunData;
}
}
async function observeStream(stream, assistantData, threadId, actionAlreadyRequired = false) {
let runArguments = {};
for await (const chunk of stream) {
const { event, data } = chunk;
if (event === 'thread.run.requires_action') {
if (!actionAlreadyRequired) {
const actionData = await handleRequiresAction(data, threadId);
runArguments = actionData.runArguments;
const result = await observeStream(actionData.actionRunStream, assistantData, threadId, true);
return { run: result.run, runArguments };
} else {
return { run: data, runArguments };
}
} else if (['thread.run.completed', 'done', 'end'].includes(event)) {
return { run: data, runArguments };
} else if (['thread.run.failed', 'thread.run.cancelled', 'thread.run.expired', 'error'].includes(event)) {
console.error('Unable to complete request. Data: ' + JSON.stringify(data));
return { run: event === 'error' ? null : data, runArguments };
}
}
}
async function handleRequiresAction(run, threadId) {
const initialActionRunStreamData = { runActionStream: null, runArguments: {} };
if (!run?.required_action?.submit_tool_outputs?.tool_calls?.length) {
console.error(`Unable to complete the request. No valid tool calls provided.`);
return initialActionRunStreamData;
}
const toolCalls = run.required_action.submit_tool_outputs.tool_calls;
const toolOutputs = [];
let runArguments = {};
try {
for (const toolCall of toolCalls) {
if (toolCall.function?.name !== 'createNewResponse') {
toolOutputs.push({
tool_call_id: toolCall.id,
output: JSON.stringify({ success: true }),
});
continue;
}
runArguments = JSON.parse(toolCall.function?.arguments) || {};
toolOutputs.push({
tool_call_id: toolCall.id,
output: JSON.stringify({response: "Some response"}),
});
}
const actionRunStream = await openai.beta.threads.runs.submitToolOutputs(threadId, run.id, {
stream: true,
tool_outputs: toolOutputs,
});
return { actionRunStream, runArguments };
} catch (error) {
console.error('Error in handleRequiresAction:', error);
return initialActionRunStreamData;
}
}
It stops after the second call of observeStream via the else block (if (event === 'thread.run.requires_action')
). Iâm using "openai": "^4.33.1"
npm module.
** UPDATE 1 **
I havenât actually changed anything, but I canât rule out the possibility that I havenât updated something correctly somewhere. Anyway, it works now! Many thanks for the effort!
** UPDATE 2 **
@nikunj It looks like most of the time it works, but sometimes it still triggers an additional ârequires_actionâ unfortunately. I think there is still a bug (only randomly reproducible after some runs).