File size: 1,317 Bytes
ca6492d
d60a3d5
 
ca6492d
 
d60a3d5
21fca8b
 
a5e48ff
ca6492d
 
 
 
 
 
a5e48ff
 
ff6eafd
d60a3d5
 
 
ca6492d
d60a3d5
 
21fca8b
d60a3d5
 
ff6eafd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
from huggingface_hub import InferenceApi
import gradio as gr

# Note: Replace "your_hugging_face_api_key" with your actual API key.
client = InferenceApi("mistralai/Mixtral-8x7B-Instruct-v0.1")

def translate_text(input_text, target_language):
    prompt = f"Translate the following text into {target_language}: {input_text}"
    try:
        # Adjusted to use a hypothetical 'generate' or similar method.
        # You'll need to replace this with the actual method for sending inference requests.
        response = client.generate(inputs=prompt)
        # The response structure depends on the model and the API's current design.
        # You may need to adjust how you extract the translated text from the response.
        translated_text = response['generated_text'] if 'generated_text' in response else "Translation error or model response format has changed."
    except Exception as e:
        translated_text = f"Error: {str(e)}"
    return translated_text

iface = gr.Interface(
    fn=translate_text,
    inputs=[gr.Textbox(label="Text to Translate"), gr.Textbox(label="Target Language")],
    outputs=gr.Textbox(label="Translated Text"),
    title="Simple Translator with Mixtral",
    description="Translate text to your specified language using the Mixtral model from Hugging Face."
)

iface.launch()