### Describe the bug
command :
```python
def classify_tokens_gpt3_5_turbo_mul…tiple_parts(email_text: str) -> list:
# Define the prompt
prompt = (
f"Given the request for quotation email below, identify and classify the following information for each part: "
f"email_Subject_Phrase, RFQ_number, Manufacturer_Part_Number, Qty_Required, Manufacturer_name, "
f"Customer_Part_Number, Product_Description, Target_Price, Lead_Time_days, Date_Code, Packaging_Type, "
f"Dispatch_Date, Comments, Currency, min_ord_qty, STD_PACK_QTY, SENDER_NAME, SENDER_POSITION, SENDER_COMPANY, SENDER_MOBILE, "
f"SENDER_EMAIL, SENDER_ADDRESS, SENDER_COUNTRY, SENDER_PINCODE, SENDER_CITY, SENDER_STATE.\n\n"
'''for example response. : {'email_Subject_Phrase':'send me your quote to following parts',
'RFQ_number':'123455ASDJKH',
'Manufacturer_Part_Number':['aas1d32f1','df4gs45'],
'Qty_Required':['32','5510'],
'Manufacturer_name':['TI','MAXIUM'],
'Customer_Part_Number':['12UIOY3','KJHG2134'],
'Product_Description':['PN DIODE','3W LED'],
'Target_Price':['120','5'],
'Lead_Time_days':['5','10'],
'Date_Code':['12-FEB-2023','10-JAN-2023'],
'Packaging_Type':['SMD','THOURHG HOLE'],
'Dispatch_Date':['11-FEB-2023','10-JAN-2023'],
'Comments':['SEND WITH MINIMUM PLASIC PACKAGING'],
'Currency':'USD',
'min_ord_qty':['10K','5K'],
'STD_PACK_QTY':['2K','1K'],
'SENDER_NAME':'HEMANG JOSHI',
'SENDER_POSITION':'DESIGN ENGINEER',
'SENDER_COMPANY':'HJLABS.IN',
'SENDER_MOBILE':'+917016525813',
'SENDER_EMAIL':'INFO@HJLABS.IN',
'SENDER_ADDRESS':'SHED#:180, MAHAVIR INDIUSTRIAL PARK-2, KATHVADA GIDC, AHMEDABAD',
'SENDER_COUNTRY':'INDIA',
'SENDER_PINCODE':'365610',
'SENDER_CITY':'AHMEDABAD',
'SENDER_STATE':'GUJARAT'
}'''
f"Email:\n{email_text}\n\n"
)
# Call the GPT-3.5-turbo API
response = openai.ChatCompletion.create(
# engine="text-davinci-002",
engine="gpt-3.5-turbo",
# engine="gpt-4",
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.1,
)
# Extract the generated answer from the API response
answer = response.choices[0].text.strip()
print(f'{answer=}')
# Split the answer into a list of parts
parts = re.split(r'\n\s*\n', answer)
# Parse the individual parts into a list of dictionaries
classified_tokens_list = []
for part in parts:
classified_tokens = {}
for line in part.split('\n'):
if ':' in line:
key, value = line.split(':', 1)
classified_tokens[key.strip()] = value.strip()
classified_tokens_list.append(classified_tokens)
return classified_tokens_list
mlemail = '''
Dear HEMANG,
Please send me your quote to following parts:
Maxim S21DF3G 210pc.
Taiyo Yuden TMK325B7226MM-TR 1k / 7k
Taiyo Yuden TMK325B7226KM-PR 1k / 7k
TI TM4C129XNCZAD 1415pc.
Maxim MAX823SEUK+T 100 / 1550pc.
Linear / Analog LT8708EUHG#PBF 2.300
Texas Instruments LP38691SD-ADJ/NOPB 2.400
Texas Instruments LP38690SD-ADJ 2.400
Texas Instruments LP38690SDX-ADJ 2.400
Texas Instruments LMP8481MM-T/NOPB 2300
UCC EMZL350ARA561MJA0G 6.900
Omron G5V-1-DC9 2.300
Thank you very much for your efforts.
Best regards,
SATYA NADELLA
--
SATYA NADELLA
Verkauf / Sales
MICROSOFT
'''
classify_tokens_gpt3_5_turbo_multiple_parts(mlemail)
```
ERROR :
```PYTHON
---------------------------------------------------------------------------
InvalidRequestError Traceback (most recent call last)
Cell In [20], line 1
----> 1 classify_tokens_gpt3_5_turbo_multiple_parts(mlemail)
Cell In [18], line 40, in classify_tokens_gpt3_5_turbo_multiple_parts(email_text)
3 prompt = (
4 f"Given the request for quotation email below, identify and classify the following information for each part: "
5 f"email_Subject_Phrase, RFQ_number, Manufacturer_Part_Number, Qty_Required, Manufacturer_name, "
(...)
36 f"Email:\n{email_text}\n\n"
37 )
39 # Call the GPT-3.5-turbo API
---> 40 response = openai.ChatCompletion.create(
41 # engine="text-davinci-002",
42 engine="gpt-3.5-turbo",
43 # engine="gpt-4",
44 prompt=prompt,
45 max_tokens=1024,
46 n=1,
47 stop=None,
48 temperature=0.1,
49 )
51 # Extract the generated answer from the API response
52 answer = response.choices[0].text.strip()
File ~/.local/lib/python3.11/site-packages/openai/api_resources/chat_completion.py:25, in ChatCompletion.create(cls, *args, **kwargs)
23 while True:
24 try:
---> 25 return super().create(*args, **kwargs)
26 except TryAgain as e:
27 if timeout is not None and time.time() > start + timeout:
File ~/.local/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py:153, in EngineAPIResource.create(cls, api_key, api_base, api_type, request_id, api_version, organization, **params)
127 @classmethod
128 def create(
129 cls,
(...)
136 **params,
137 ):
138 (
139 deployment_id,
140 engine,
(...)
150 api_key, api_base, api_type, api_version, organization, **params
151 )
--> 153 response, _, api_key = requestor.request(
154 "post",
155 url,
156 params=params,
157 headers=headers,
158 stream=stream,
159 request_id=request_id,
160 request_timeout=request_timeout,
161 )
163 if stream:
164 # must be an iterator
165 assert not isinstance(response, OpenAIResponse)
File ~/.local/lib/python3.11/site-packages/openai/api_requestor.py:226, in APIRequestor.request(self, method, url, params, headers, files, stream, request_id, request_timeout)
205 def request(
206 self,
207 method,
(...)
214 request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
215 ) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool, str]:
216 result = self.request_raw(
217 method.lower(),
218 url,
(...)
224 request_timeout=request_timeout,
225 )
--> 226 resp, got_stream = self._interpret_response(result, stream)
227 return resp, got_stream, self.api_key
File ~/.local/lib/python3.11/site-packages/openai/api_requestor.py:620, in APIRequestor._interpret_response(self, result, stream)
612 return (
613 self._interpret_response_line(
614 line, result.status_code, result.headers, stream=True
615 )
616 for line in parse_stream(result.iter_lines())
617 ), True
618 else:
619 return (
--> 620 self._interpret_response_line(
621 result.content.decode("utf-8"),
622 result.status_code,
623 result.headers,
624 stream=False,
625 ),
626 False,
627 )
File ~/.local/lib/python3.11/site-packages/openai/api_requestor.py:683, in APIRequestor._interpret_response_line(self, rbody, rcode, rheaders, stream)
681 stream_error = stream and "error" in resp.data
682 if stream_error or not 200 <= rcode < 300:
--> 683 raise self.handle_error_response(
684 rbody, rcode, resp.data, rheaders, stream_error=stream_error
685 )
686 return resp
InvalidRequestError: Invalid URL (POST /v1/engines/gpt-3.5-turbo/chat/completions)
```
### To Reproduce
SEE THE DESCRIBE PART
### Code snippets
```Python
SEE THE DESCRIBE PART
```
### OS
LINUX-UBUNTU-LATEST
### Python version
PYTHON3.11
### Library version
LATEST PYPI VERSION