private func analyzeImage(image: UIImage) {
guard let imageData = image.jpegData(compressionQuality: 0.2) else {
print("Failed to convert image to JPEG data")
return
}
isLoading = true
currentlyAnalyzingImage = image
print("Starting image analysis")
let imgParam = ChatQuery.ChatCompletionMessageParam.ChatCompletionUserMessageParam(
content: .vision([.chatCompletionContentPartImageParam(.init(imageUrl: .init(url: imageData, detail: .high)))]))
let userPrompt = """
Long instructions , Long instructions
Long instructions
"""
let query = ChatQuery(messages: [
.system(.init(content: userPrompt)),
.user(imgParam)
], model: .gpt4_o, maxTokens: 500)
print("Sending query to OpenAI")
client.chats(query: query, completion: { (result: Result<ChatResult, Error>) in
DispatchQueue.main.async {
self.isLoading = false
switch result {
case .success(let chatResult):
if let content = chatResult.choices.first?.message.content {
switch content {
case .string(let text):
let manager = AnalyzedItemManager(context: context)
manager.processResponse(text, imageData: imageData)
print(text)
case .vision(_):
self.resultText = "Received vision content, not a string."
}
} else {
self.resultText = "No content received"
}
case .failure(let error):
self.resultText = "Error: \(error.localizedDescription)"
}
print("Analysis complete")
}
})
}
I want to send input Image and Text
I get output in Json format,
but I do it with user prompt, my goal is to make it easier with the assistant and I want to create and complete the instructions on the assistant side.
I want to use the assistant I created in this code block, but I couldn’t figure out how to use it. When I write input instructions as text, it’s too long and I just started to learn the assistant part, I created a special assistant myself, there is a name parameter, but I couldn’t find how to use swiftUI.
I use this library → MacPaw/OpenAI