Spaces:
Sleeping
Sleeping
import os | |
from huggingface_hub import InferenceClient | |
def analyze_data(prompt): | |
""" | |
Use Hugging Face LLM to generate insights from raw search data | |
""" | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
if not HF_TOKEN: | |
return "Error: Hugging Face token not found in environment variables" | |
try: | |
client = InferenceClient( | |
model="Qwen/Qwen2.5-Coder-7B-Instruct", | |
token=HF_TOKEN | |
) | |
return client.text_generation(prompt, max_new_tokens=500) | |
except Exception as e: | |
return f"LLM generation failed: {str(e)}" |