Greetings OpenAI community,
After successfully integrating the OpenAI API into a Google Spreadsheet application using Apps Script, aimed at autofilling empty cells, I’ve directed my requests to utilize the gpt-4 model via the v1/chat/completions endpoint. The setup works, and I’m able to generate responses. However, the nature of the generated content often seems reminiscent of what I would expect from gpt-3.5, given its occasionally limited recent internet knowledge and depth on certain topics.
To cross-verify, I have tested the gpt-4 model directly in the OpenAI Playground with similar prompts, where gpt-4 is explicitly selectable, and noticed a marked improvement in response quality and relevancy. This contrast raises my concern about whether my application is indeed harnessing the full potential of gpt-4.
Here’s the code I’m using:
function onOpen() {
var ui = SpreadsheetApp.getUi();
ui.createMenu('ChatGPT')
.addItem('Generate Queries for Empty Cells', 'generateQueriesForEmptyCells')
.addToUi();
}
function generateQueriesForEmptyCells() {
var sheet = SpreadsheetApp.getActiveSpreadsheet().getActiveSheet();
var lastRow = sheet.getLastRow();
var lastColumn = sheet.getLastColumn();
for (var i = 2; i <= lastRow; i++) {
for (var j = 2; j <= lastColumn; j++) {
var currentCell = sheet.getRange(i, j);
var cellContent = currentCell.getValue();
if (!cellContent || cellContent.trim() === "") {
var facilityName = sheet.getRange(i, 1).getValue();
var infoCategory = sheet.getRange(1, j).getValue();
if (facilityName && infoCategory) {
var prompt = facilityName + "の" + infoCategory + "をwebで調べて実在するURLと共に示してください";
var response = callOpenAI(prompt);
currentCell.setValue(response);
}
}
}
}
}
function callOpenAI(prompt) {
var apiKey = "MY_API_CODE"; //
var url = "https://api.openai.com/v1/chat/completions";
if (!prompt) {
return "Error: Prompt is empty or null";
}
var maxTokens = 100;
var temperature = 0.7;
var presencePenalty = 0.0;
var payload = JSON.stringify({
model: "gpt-4",
messages: [{
"role": "user",
"content": prompt
}],
max_tokens: maxTokens,
temperature: temperature,
presence_penalty: presencePenalty
});
var options = {
method: "post",
headers: {
"Content-Type": "application/json",
Authorization: "Bearer " + apiKey
},
payload: payload,
muteHttpExceptions: true
};
try {
var response = UrlFetchApp.fetch(url, options);
var jsonResponse = JSON.parse(response.getContentText());
if (jsonResponse.error) {
Logger.log("Error: " + jsonResponse.error.message);
return "API error: " + jsonResponse.error.message;
}
if (jsonResponse.choices && jsonResponse.choices.length > 0 && jsonResponse.choices[0].message) {
return jsonResponse.choices[0].message.content.trim();
} else {
Logger.log("Error: Response has no choices or message");
return "No response generated";
}
} catch (e) {
Logger.log("Error: " + e.toString());
return "Error in fetching response";
}
}
Given this discrepancy, I am seeking advice on:
- Confirming the
gpt-4
model is processing my requests, especially consideringgpt-4
can be explicitly selected and tested in the Playground. - Identifying if there are any additional parameters or settings I might be overlooking to fully leverage
gpt-4
capabilities in my application.
Has anyone faced similar challenges or can offer insights into ensuring the advanced capabilities of gpt-4
are effectively utilized, particularly when direct comparisons with Playground tests suggest a difference in performance?
Your guidance and suggestions will be highly valued.
Thank you for your support!