text-generation-webui/api-example.py

58 lines
1.3 KiB
Python
Raw Normal View History

2023-02-20 10:39:36 -05:00
'''
This is an example on how to use the API for oobabooga/text-generation-webui.
Make sure to start the web UI with the following flags:
python server.py --model MODEL --listen --no-stream
Optionally, you can also add the --share flag to generate a public gradio URL,
allowing you to use the API remotely.
'''
import json
2023-02-20 10:39:36 -05:00
import requests
# Server address
server = "127.0.0.1"
# Generation parameters
# Reference: https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig
params = {
'max_new_tokens': 200,
'do_sample': True,
2023-04-09 21:18:40 -04:00
'temperature': 0.72,
'top_p': 0.73,
2023-02-20 10:39:36 -05:00
'typical_p': 1,
2023-04-09 21:18:40 -04:00
'repetition_penalty': 1.1,
'encoder_repetition_penalty': 1.0,
2023-02-20 10:39:36 -05:00
'top_k': 0,
'min_length': 0,
'no_repeat_ngram_size': 0,
'num_beams': 1,
'penalty_alpha': 0,
'length_penalty': 1,
'early_stopping': False,
2023-03-22 14:40:20 -04:00
'seed': -1,
2023-04-11 11:57:36 -04:00
'add_bos_token': True,
'custom_stopping_strings': [],
2023-04-11 21:43:23 -04:00
'truncation_length': 2048,
'ban_eos_token': False,
'skip_special_tokens': True,
2023-02-20 10:39:36 -05:00
}
# Input prompt
prompt = "What I would like to say is the following: "
payload = json.dumps([prompt, params])
2023-02-20 10:39:36 -05:00
response = requests.post(f"http://{server}:7860/run/textgen", json={
"data": [
payload
2023-02-20 10:39:36 -05:00
]
}).json()
reply = response["data"][0]
print(reply)