fix: switch provider and add zukijourney textgen parameter
This commit is contained in:
parent
b1445e681c
commit
30fb94c2de
@ -80,10 +80,20 @@ Output the prompt in a txt code block.""")
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def _gen_text(prompt):
|
|
||||||
|
def _gen_text(prompt, use_zukijourney=False):
|
||||||
|
if use_zukijourney:
|
||||||
|
client = get_client()
|
||||||
|
response = client.chat.completions.create(
|
||||||
|
model="llama-3-70b-instruct",
|
||||||
|
messages=[{"role": "user", "content": prompt}],
|
||||||
|
max_tokens=800,
|
||||||
|
temperature=0.7,
|
||||||
|
)
|
||||||
|
else:
|
||||||
client = get_g4f_client()
|
client = get_g4f_client()
|
||||||
response = client.chat.completions.create(
|
response = client.chat.completions.create(
|
||||||
provider=g4f.Provider.PerplexityLabs,
|
provider=g4f.Provider.Llama,
|
||||||
model="llama-3-70b-instruct",
|
model="llama-3-70b-instruct",
|
||||||
messages=[{"role": "user", "content": prompt}],
|
messages=[{"role": "user", "content": prompt}],
|
||||||
max_tokens=800,
|
max_tokens=800,
|
||||||
|
Loading…
Reference in New Issue
Block a user