fix: fix model name in g4f textgen
This commit is contained in:
parent
30fb94c2de
commit
3bd90602c5
@ -94,7 +94,7 @@ def _gen_text(prompt, use_zukijourney=False):
|
|||||||
client = get_g4f_client()
|
client = get_g4f_client()
|
||||||
response = client.chat.completions.create(
|
response = client.chat.completions.create(
|
||||||
provider=g4f.Provider.Llama,
|
provider=g4f.Provider.Llama,
|
||||||
model="llama-3-70b-instruct",
|
model="llama-3-70b",
|
||||||
messages=[{"role": "user", "content": prompt}],
|
messages=[{"role": "user", "content": prompt}],
|
||||||
max_tokens=800,
|
max_tokens=800,
|
||||||
temperature=0.7,
|
temperature=0.7,
|
||||||
|
Loading…
Reference in New Issue
Block a user