fix: switch provider and add zukijourney textgen parameter
This commit is contained in:
parent
b1445e681c
commit
30fb94c2de
@ -80,15 +80,25 @@ Output the prompt in a txt code block.""")
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def _gen_text(prompt):
|
|
||||||
client = get_g4f_client()
|
def _gen_text(prompt, use_zukijourney=False):
|
||||||
response = client.chat.completions.create(
|
if use_zukijourney:
|
||||||
provider=g4f.Provider.PerplexityLabs,
|
client = get_client()
|
||||||
model="llama-3-70b-instruct",
|
response = client.chat.completions.create(
|
||||||
messages=[{"role": "user", "content": prompt}],
|
model="llama-3-70b-instruct",
|
||||||
max_tokens=800,
|
messages=[{"role": "user", "content": prompt}],
|
||||||
temperature=0.7,
|
max_tokens=800,
|
||||||
)
|
temperature=0.7,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
client = get_g4f_client()
|
||||||
|
response = client.chat.completions.create(
|
||||||
|
provider=g4f.Provider.Llama,
|
||||||
|
model="llama-3-70b-instruct",
|
||||||
|
messages=[{"role": "user", "content": prompt}],
|
||||||
|
max_tokens=800,
|
||||||
|
temperature=0.7,
|
||||||
|
)
|
||||||
print(response)
|
print(response)
|
||||||
text = response.choices[0].message.content
|
text = response.choices[0].message.content
|
||||||
return text
|
return text
|
||||||
|
Loading…
Reference in New Issue
Block a user