fix: fix model name in g4f textgen

This commit is contained in:
Yandrik 2024-05-02 08:01:02 +02:00
parent 30fb94c2de
commit 3bd90602c5

View File

@ -94,7 +94,7 @@ def _gen_text(prompt, use_zukijourney=False):
client = get_g4f_client()
response = client.chat.completions.create(
provider=g4f.Provider.Llama,
model="llama-3-70b-instruct",
model="llama-3-70b",
messages=[{"role": "user", "content": prompt}],
max_tokens=800,
temperature=0.7,