fix: fix model name in g4f textgen
This commit is contained in:
		@ -94,7 +94,7 @@ def _gen_text(prompt, use_zukijourney=False):
 | 
			
		||||
        client = get_g4f_client()
 | 
			
		||||
        response = client.chat.completions.create(
 | 
			
		||||
                provider=g4f.Provider.Llama,
 | 
			
		||||
                model="llama-3-70b-instruct",
 | 
			
		||||
                model="llama-3-70b",
 | 
			
		||||
                messages=[{"role": "user", "content": prompt}],
 | 
			
		||||
                max_tokens=800,
 | 
			
		||||
                temperature=0.7,
 | 
			
		||||
 | 
			
		||||
		Reference in New Issue
	
	Block a user