File size: 1,872 Bytes
57d46c6
b11c8cd
 
50639ab
57d46c6
b11c8cd
57d46c6
b11c8cd
 
 
57d46c6
 
 
 
b11c8cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2f7d2fd
 
b11c8cd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import os
import requests
import gradio as gr

api_token = os.environ.get("TOKEN")
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
headers = {"Authorization": f"Bearer {api_token}"}



def query(payload):
    response = requests.post(API_URL, headers=headers, json=payload)
    return response.json()

def analyze_sentiment(text):
    output = query({
        "inputs": f'''<|begin_of_text|>
<|start_header_id|>system<|end_header_id|>
You're going to analyze the texts I'm going to give you and you're only going to tell me which category they belong to by answering only the words that correspond to the following categories: for posts that talk about chat models/LLM you'll return "Chatmodel/LLM", for posts that talk about image generation models you'll return "image_generation", for texts that ask for information from the community you'll return "questions".For texts about recent discoveries that don't fit into the previous categories, you'll return "other".

<|eot_id|>
<|start_header_id|>user<|end_header_id|>
{text}
<|eot_id|>
<|start_header_id|>assistant<|end_header_id|>

'''
    })



    if isinstance(output, list) and len(output) > 0:
        response = output[0].get('generated_text', '').strip().lower()


        questions = response.count('questions')
        ChatmodelLLM = response.count('Chatmodel/LLM')
        other = response.count('other')
        image_generation = response.count("image_generation")

    if questions == 2:
        return 'questions'
    elif ChatmodelLLM == 2:
        return 'Chat Model/LLM'
    elif other == 2 : 
        return "Other"
    elif image_generation == 2 : 
        return "Image Generation"
    else :
        return f"Erreur: Réponse ambiguë - '{response}'"


demo = gr.Interface(
    fn=analyze_sentiment,
    inputs="text",
    outputs="text"
)

demo.launch()