I set up a bias dictionary, however it isn’t sampling from the bias I’ve set to 100. For the tokens it is returning, I’ve explicitly set them to -100 and they are still the highest probability.
bias = {'6395': 100, '8134': 100, '21943': -100, '5923': -100}
mapping = {'6395': 'Other', '8134': 'Ref', '21943': 'foo', '5923': ' AR'}
prompt = user_message + '\n\n###\n\n'
response = openai.Completion.create(
model=model_name,
prompt=prompt,
max_tokens=2,
logprobs=5,
temperature=0,
logit_bias=bias
)
The Response:
<OpenAIObject text_completion id=cmpl-7u3LKPEhRJaDJH9Wpj8Pn6eGV7zRQ at 0x7fc103a45810> JSON: {
"id": "cmpl-7u3LKPEhRJaDJH9Wpj8Pn6eGV7zRQ",
"object": "text_completion",
"created": 1693593574,
"model": "my model davinci model",
"choices": [
{
"text": "foofoo",
"index": 0,
"logprobs": {
"tokens": [
"foo",
"foo"
],
"token_logprobs": [
-0.058320418,
-0.0004045105
],
"top_logprobs": [
{
"foo": -0.058320418,
" AR": -2.87082,
"Ref": -55.63644,
" Ref": -78.82004,
"ref": -83.56125
},
{
"foo": -0.0004045105,
" AR": -7.812904,
"!": -87.33654,
"\"": -87.33654,
"#": -87.33654
}
],
"text_offset": [
160,
163
]
},
"finish_reason": "length"
}
],
"usage": {
"prompt_tokens": 37,
"completion_tokens": 2,
"total_tokens": 39
}
}
Anybody have ideas?