I used 3.5Turbo , but the inquiries are directed to 4 Turbo

In my iOS app, I use 3.5 Turbo, but I also make requests to 4.0 and 4.5 Turbo. Is this a result of incorrect code implementation or not?

func sendToAssistant(completion: @escaping (OpenAIResponse?) → Void) {

    var request = URLRequest(url: self.openAIURL!)
    var httpBodyJson: Data? = nil
    if let apiKey = Bundle.main.infoDictionary?["API_KEY"] as? String {
        let stringWithoutQuotes = apiKey.replacingOccurrences(of: "\"", with: "")
        request.addValue("Bearer \(stringWithoutQuotes)", forHTTPHeaderField: "Authorization")
    }
    request.httpMethod = "POST"
    request.addValue("application/json", forHTTPHeaderField: "Content-Type")
    
    let httpBody: [String: Any] = [
        "model" : "gpt-3.5-turbo",
        "messages" : messageLog
    ]
    
    do {
        httpBodyJson = try JSONSerialization.data(withJSONObject: httpBody, options: .prettyPrinted)
    } catch {
        print("Unable to convert to JSON \(error)")
        logMessage("error", messageUserType: .assistant)
    }
    
    request.httpBody = httpBodyJson
    
    executeRequest(request: request, withSessionConfig: nil) { data in
        if let requestData = data {
            if let jsonStr = String(data: requestData, encoding: .utf8) {
                let responseHandler = OpenAIResponseHandler()
                
                DispatchQueue.main.async {
                    if jsonStr == "{\n    \"error\": {\n        \"message\": \"You exceeded your current quota, please check your plan and billing details.\",\n        \"type\": \"insufficient_quota\",\n        \"param\": null,\n        \"code\": \"insufficient_quota\"\n    }\n}\n" {
                        self.logMessage("Message limit has been exceeded. Please wait, we will be back soon.", messageUserType: .assistant)
                        completion(nil)
                    } else {
                        if let content = responseHandler.decodeJson(jsonString: jsonStr)?.choices.first?.message["content"] {
                            self.logMessage(content, messageUserType: .assistant)
                            completion(responseHandler.decodeJson(jsonString: jsonStr))
                        }
                    }
                }
            }
        } else {
            DispatchQueue.main.async {
                completion(nil)
            }
        }
    }
}