File size: 2,414 Bytes
73f9174
95f7ff3
b6827ba
50639ab
2e937f5
6eed6c8
2e937f5
 
57d46c6
6eed6c8
 
57d46c6
b6827ba
6eed6c8
 
 
b6827ba
73f9174
 
 
 
 
 
 
 
 
 
6eed6c8
 
b6827ba
6eed6c8
 
b11c8cd
6eed6c8
d031db7
6eed6c8
 
 
 
 
 
 
 
 
 
 
95f7ff3
8ac912b
95f7ff3
 
 
 
 
8ac912b
 
 
6eed6c8
 
 
 
 
8ac912b
 
95f7ff3
6eed6c8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import os
import requests
from bs4 import BeautifulSoup

api_token = os.environ.get("TOKEN")
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf"
headers = {"Authorization": f"Bearer {api_token}"}

def query(payload):
    response = requests.post(API_URL, headers=headers, json=payload)
    return response.json()

def analyze_sentiment(pl7_text):
    output = query({
        "inputs": f'''
system
You're going to deeply analyze the text I'm going to give you and you're only going to tell me which category it belongs to by answering only the words that correspond to the following categories:
For posts that talk about chat models/LLM, return "Chatmodel/LLM"
For posts that talk about image generation models, return "image_generation"
For texts that ask for information from the community, return "questions"
For posts about fine-tuning or model adjustment, return "fine_tuning"
For posts related to ethics and bias in AI, return "ethics_bias"
For posts about datasets and data preparation, return "datasets"
For posts about tools and libraries, return "tools_libraries"
For posts containing tutorials and guides, return "tutorials_guides"
For posts about debugging and problem-solving, return "debugging"
Respond only with the category name, without any additional explanation or text.

user
{pl7_text}

assistant
'''
    })
    
    print("API Response:", output)  # Print the full API response

    # Extract the generated text
    generated_text = output.get('generated_text', '')
    print("Generated Text:", generated_text)  # Print the generated text

    # Extract the first non-empty line as the category
    lines = [line.strip().lower() for line in generated_text.split('\n') if line.strip()]
    if lines:
        return lines[0]
    return "unknown"

# Fetch a single post
url = 'https://huggingface.co/posts'
response = requests.get(url)

if response.status_code == 200:
    soup = BeautifulSoup(response.content, 'html.parser')
    pl7_element = soup.find(class_='pl-7')
    if pl7_element:
        pl7_text = pl7_element.text.strip()
        print("Post content:")
        print(pl7_text)
        print("\nAnalyzing sentiment...")
        sentiment = analyze_sentiment(pl7_text)
        print(f"\nSentiment category: {sentiment}")
    else:
        print("No post found with class 'pl-7'")
else:
    print(f"Error {response.status_code} when retrieving {url}")