|
import os |
|
import gradio as gr |
|
import requests |
|
|
|
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1" |
|
HF_TOKEN = os.environ["HF_TOKEN"] |
|
headers = {"Authorization": f"Bearer {HF_TOKEN}"} |
|
|
|
|
|
def generate_from_prompt(prompt): |
|
payload = { |
|
"inputs": prompt, |
|
"parameters": { |
|
"max_new_tokens": 180, |
|
"temperature": 0.9, |
|
"top_p": 0.95, |
|
"do_sample": True |
|
} |
|
} |
|
|
|
response = requests.post(API_URL, headers=headers, json=payload) |
|
output = response.json() |
|
|
|
if isinstance(output, list) and "generated_text" in output[0]: |
|
return output[0]["generated_text"] |
|
elif "generated_text" in output: |
|
return output["generated_text"] |
|
else: |
|
return "⚠️ Erreur : aucune réponse utile." |
|
|
|
iface = gr.Interface( |
|
fn=generate_from_prompt, |
|
inputs=gr.Textbox(label="Prompt complet", lines=10), |
|
outputs="text", |
|
title="Prompt direct pour IA hypnopoétique", |
|
description="Utilisable avec un backend (server.js) qui envoie un prompt complet." |
|
) |
|
|
|
iface.launch() |