feat(textgen): switched to zukijourney api for textgen

This commit is contained in:
Yandrik 2024-05-02 08:04:05 +02:00
parent 3bd90602c5
commit a9ba2b2db2

View File

@ -24,7 +24,7 @@ def gen_text():
print(prompt) print(prompt)
try: try:
text = _gen_text(prompt) text = _gen_text(prompt, use_zukijourney=True)
except Exception as e: except Exception as e:
print(f"Error generating text: {e}") print(f"Error generating text: {e}")
return f"Error generating text: {str(e)}", 500 return f"Error generating text: {str(e)}", 500
@ -61,7 +61,9 @@ anime art, surreal (biomorphic landscape:1.1) on a cloud, art made by complex AI
Here's the request: {prompt} {f"\nKeep to the following style: {style}" if style else ""} Here's the request: {prompt} {f"\nKeep to the following style: {style}" if style else ""}
Output the prompt in a txt code block.""") Output the prompt in a txt code block.""",
use_zukijourney=True
)
print(gen_prompt) print(gen_prompt)
@ -93,8 +95,8 @@ def _gen_text(prompt, use_zukijourney=False):
else: else:
client = get_g4f_client() client = get_g4f_client()
response = client.chat.completions.create( response = client.chat.completions.create(
provider=g4f.Provider.Llama, provider=g4f.Provider.PerplexityLabs,
model="llama-3-70b", model="llama-3-70b-instruct",
messages=[{"role": "user", "content": prompt}], messages=[{"role": "user", "content": prompt}],
max_tokens=800, max_tokens=800,
temperature=0.7, temperature=0.7,