fix: switch provider and add zukijourney textgen parameter
This commit is contained in:
		@ -80,15 +80,25 @@ Output the prompt in a txt code block.""")
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
def _gen_text(prompt):
 | 
			
		||||
    client = get_g4f_client()
 | 
			
		||||
    response = client.chat.completions.create(
 | 
			
		||||
            provider=g4f.Provider.PerplexityLabs,
 | 
			
		||||
            model="llama-3-70b-instruct",
 | 
			
		||||
            messages=[{"role": "user", "content": prompt}],
 | 
			
		||||
            max_tokens=800,
 | 
			
		||||
            temperature=0.7,
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
def _gen_text(prompt, use_zukijourney=False):
 | 
			
		||||
    if use_zukijourney:
 | 
			
		||||
        client = get_client()
 | 
			
		||||
        response = client.chat.completions.create(
 | 
			
		||||
                model="llama-3-70b-instruct",
 | 
			
		||||
                messages=[{"role": "user", "content": prompt}],
 | 
			
		||||
                max_tokens=800,
 | 
			
		||||
                temperature=0.7,
 | 
			
		||||
            )
 | 
			
		||||
    else:
 | 
			
		||||
        client = get_g4f_client()
 | 
			
		||||
        response = client.chat.completions.create(
 | 
			
		||||
                provider=g4f.Provider.Llama,
 | 
			
		||||
                model="llama-3-70b-instruct",
 | 
			
		||||
                messages=[{"role": "user", "content": prompt}],
 | 
			
		||||
                max_tokens=800,
 | 
			
		||||
                temperature=0.7,
 | 
			
		||||
            )
 | 
			
		||||
    print(response)
 | 
			
		||||
    text = response.choices[0].message.content
 | 
			
		||||
    return text
 | 
			
		||||
 | 
			
		||||
		Reference in New Issue
	
	Block a user