alex-abb's picture
Update app.py
1774527 verified
raw
history blame
1.93 kB
import os
import requests
import gradio as gr
api_token = os.environ.get("TOKEN")
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
headers = {"Authorization": f"Bearer {api_token}"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
def analyze_sentiment(text):
output = query({
"inputs": f'''<|begin_of_text|>
<|start_header_id|>system<|end_header_id|>
You're going to deeply analyze the texts I'm going to give you by and you're only going to tell me which category they belong to by answering only the words that correspond to the following categories: for posts that talk about chat models/LLM you'll return "Chatmodel/LLM", for posts that talk about image generation models you'll return "image_generation", for texts that ask for information from the community you'll return "questions".For texts about poeple exposing they recent discovery you'll send "other" but only if what they are talking about don't fit in the previous categories.
<|eot_id|>
<|start_header_id|>user<|end_header_id|>
{text}
<|eot_id|>
<|start_header_id|>assistant<|end_header_id|>
'''
})
if isinstance(output, list) and len(output) > 0:
response = output[0].get('generated_text', '').strip().lower()
questions = response.count('questions')
ChatmodelLLM = response.count('Chatmodel/LLM')
other = response.count('other')
image_generation = response.count("image_generation")
if questions == 2:
return 'questions'
elif ChatmodelLLM == 2:
return 'Chat Model/LLM'
elif other == 2 :
return "Other"
elif image_generation == 2 :
return "Image Generation"
else :
return f"Erreur: Réponse ambiguë - '{response}'"
demo = gr.Interface(
fn=analyze_sentiment,
inputs="text",
outputs="text"
)
demo.launch()