Spaces:
Sleeping
Sleeping
HF Space Docker
Browse files- .DS_Store +0 -0
- Dockerfile +36 -0
- app/__init__.py +7 -0
- app/__pycache__/__init__.cpython-310.pyc +0 -0
- app/__pycache__/__init__.cpython-313.pyc +0 -0
- app/__pycache__/config.cpython-310.pyc +0 -0
- app/__pycache__/models.cpython-310.pyc +0 -0
- app/__pycache__/models.cpython-313.pyc +0 -0
- app/__pycache__/routes.cpython-310.pyc +0 -0
- app/__pycache__/routes.cpython-313.pyc +0 -0
- app/__pycache__/services.cpython-310.pyc +0 -0
- app/__pycache__/utils.cpython-310.pyc +0 -0
- app/config.py +12 -0
- app/models.py +26 -0
- app/routes.py +172 -0
- app/services.py +42 -0
- app/utils.py +50 -0
- requirements.txt +90 -0
- wsgi.py +2 -0
.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
Dockerfile
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ───────────────────────────────────────────────
|
| 2 |
+
# MindPalace AI – Hugging Face Spaces (Docker)
|
| 3 |
+
# ───────────────────────────────────────────────
|
| 4 |
+
# • CPU-only base image with Python 3.11
|
| 5 |
+
# • Installs system libs (tesseract, ffmpeg) needed by OCR / audio
|
| 6 |
+
# • Installs Python dependencies from requirements.txt
|
| 7 |
+
# • Runs Gunicorn with gevent workers on HF-required port 7860
|
| 8 |
+
# • Space URL will be: https://<handle>-MindPalaceAI.hf.space
|
| 9 |
+
# ───────────────────────────────────────────────
|
| 10 |
+
|
| 11 |
+
FROM python:3.11-slim
|
| 12 |
+
|
| 13 |
+
# 1️⃣ Install system packages (add more as you need)
|
| 14 |
+
RUN apt-get update && \
|
| 15 |
+
apt-get install -y --no-install-recommends \
|
| 16 |
+
tesseract-ocr \
|
| 17 |
+
libgl1 \
|
| 18 |
+
ffmpeg && \
|
| 19 |
+
rm -rf /var/lib/apt/lists/*
|
| 20 |
+
|
| 21 |
+
# 2️⃣ Set work directory
|
| 22 |
+
WORKDIR /app
|
| 23 |
+
|
| 24 |
+
# 3️⃣ Copy & install Python deps first (layer cache)
|
| 25 |
+
COPY requirements.txt .
|
| 26 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 27 |
+
|
| 28 |
+
# 4️⃣ Copy the rest of the application code
|
| 29 |
+
COPY . .
|
| 30 |
+
|
| 31 |
+
# 5️⃣ Hugging Face automatically sets $PORT=7860
|
| 32 |
+
ENV PORT=7860
|
| 33 |
+
EXPOSE ${PORT}
|
| 34 |
+
|
| 35 |
+
# 6️⃣ Start the server
|
| 36 |
+
CMD ["gunicorn", "wsgi:app", "-k", "gevent", "--timeout", "300", "--bind", "0.0.0.0:${PORT}"]
|
app/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask
|
| 2 |
+
from app.routes import bp
|
| 3 |
+
|
| 4 |
+
def create_app():
|
| 5 |
+
app = Flask(__name__)
|
| 6 |
+
app.register_blueprint(bp)
|
| 7 |
+
return app
|
app/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (409 Bytes). View file
|
|
|
app/__pycache__/__init__.cpython-313.pyc
ADDED
|
Binary file (495 Bytes). View file
|
|
|
app/__pycache__/config.cpython-310.pyc
ADDED
|
Binary file (562 Bytes). View file
|
|
|
app/__pycache__/models.cpython-310.pyc
ADDED
|
Binary file (699 Bytes). View file
|
|
|
app/__pycache__/models.cpython-313.pyc
ADDED
|
Binary file (838 Bytes). View file
|
|
|
app/__pycache__/routes.cpython-310.pyc
ADDED
|
Binary file (4.07 kB). View file
|
|
|
app/__pycache__/routes.cpython-313.pyc
ADDED
|
Binary file (6.94 kB). View file
|
|
|
app/__pycache__/services.cpython-310.pyc
ADDED
|
Binary file (1.33 kB). View file
|
|
|
app/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (1.75 kB). View file
|
|
|
app/config.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
|
| 4 |
+
# Load environment variables
|
| 5 |
+
load_dotenv()
|
| 6 |
+
|
| 7 |
+
class Config:
|
| 8 |
+
FIREWORKS_API_KEY = os.getenv("FIREWORKS_API_KEY")
|
| 9 |
+
WHISPER_MODEL = os.getenv("WHISPER_MODEL")
|
| 10 |
+
SENTIMENT_MODEL = os.getenv("SENTIMENT_MODEL")
|
| 11 |
+
EMOTION_MODEL = os.getenv("EMOTION_MODEL")
|
| 12 |
+
YOLO_MODEL = os.getenv("YOLO_MODEL")
|
app/models.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import whisper
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import pipeline
|
| 4 |
+
from fireworks.client import Fireworks
|
| 5 |
+
from app.config import Config
|
| 6 |
+
|
| 7 |
+
# 🔹 Load AI Models
|
| 8 |
+
audio_model = whisper.load_model(Config.WHISPER_MODEL)
|
| 9 |
+
# 1. Specialized Sentiment Model (Direct Labeling)
|
| 10 |
+
sentiment_pipeline = pipeline(
|
| 11 |
+
"text-classification",
|
| 12 |
+
model="cardiffnlp/twitter-roberta-base-sentiment-latest",
|
| 13 |
+
return_all_scores=False
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
# 2. Focused Emotion Model (Single Emotion)
|
| 17 |
+
emotion_pipeline = pipeline(
|
| 18 |
+
"text-classification",
|
| 19 |
+
model="j-hartmann/emotion-english-distilroberta-base",
|
| 20 |
+
top_k=1
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
#yolo_model = torch.hub.load(Config.YOLO_MODEL, 'yolov5s')
|
| 24 |
+
|
| 25 |
+
# 🔹 Initialize Fireworks AI Client
|
| 26 |
+
client = Fireworks(api_key=Config.FIREWORKS_API_KEY)
|
app/routes.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Blueprint, request, jsonify
|
| 2 |
+
from werkzeug.utils import secure_filename
|
| 3 |
+
import os
|
| 4 |
+
import easyocr
|
| 5 |
+
import pytesseract # Ensure this is imported
|
| 6 |
+
from PIL import Image
|
| 7 |
+
from app.models import audio_model, sentiment_pipeline, emotion_pipeline
|
| 8 |
+
from app.services import extract_tasks
|
| 9 |
+
from app.utils import generate_tags, error_response
|
| 10 |
+
|
| 11 |
+
# Initialize Flask Blueprint
|
| 12 |
+
bp = Blueprint('main', __name__)
|
| 13 |
+
|
| 14 |
+
# Initialize the EasyOCR reader for English only (disable GPU if not available)
|
| 15 |
+
reader = easyocr.Reader(['en'], gpu=False)
|
| 16 |
+
|
| 17 |
+
EMOTION_SCORE_THRESHOLD = 0.15 # Adjust based on your testing
|
| 18 |
+
MIN_SENTIMENT_CONFIDENCE = 0.4 # Below this becomes "neutral"
|
| 19 |
+
|
| 20 |
+
# =============================
|
| 21 |
+
# 🔹 API Routes
|
| 22 |
+
# =============================
|
| 23 |
+
|
| 24 |
+
@bp.route('/transcribe', methods=['POST'])
|
| 25 |
+
def transcribe():
|
| 26 |
+
if 'file' not in request.files:
|
| 27 |
+
return error_response("No file provided", 400)
|
| 28 |
+
|
| 29 |
+
file = request.files['file']
|
| 30 |
+
file_path = os.path.join("/tmp", secure_filename(file.filename))
|
| 31 |
+
file.save(file_path)
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
# Transcribe Audio
|
| 35 |
+
result = audio_model.transcribe(file_path)
|
| 36 |
+
transcription = result.get("text", "")
|
| 37 |
+
|
| 38 |
+
if not transcription.strip():
|
| 39 |
+
return error_response("Transcription is empty", 400)
|
| 40 |
+
|
| 41 |
+
# Send transcription to /analyze_text API
|
| 42 |
+
analysis_response = analyze_text_internal(transcription)
|
| 43 |
+
tags = generate_tags(transcription) # Function to extract tags from text
|
| 44 |
+
|
| 45 |
+
return jsonify({
|
| 46 |
+
"transcription": transcription,
|
| 47 |
+
"sentiment": analysis_response["sentiment"],
|
| 48 |
+
"emotion": analysis_response["emotion"],
|
| 49 |
+
"confidence": analysis_response["confidence"],
|
| 50 |
+
"tags": tags
|
| 51 |
+
})
|
| 52 |
+
except Exception as e:
|
| 53 |
+
return error_response(str(e), 500)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@bp.route('/analyze_image', methods=['POST'])
|
| 57 |
+
def analyze_image():
|
| 58 |
+
if 'file' not in request.files:
|
| 59 |
+
return error_response("No image file provided", 400)
|
| 60 |
+
|
| 61 |
+
file = request.files['file']
|
| 62 |
+
filename = secure_filename(file.filename)
|
| 63 |
+
file_path = os.path.join("/tmp", filename)
|
| 64 |
+
file.save(file_path)
|
| 65 |
+
|
| 66 |
+
try:
|
| 67 |
+
# Use EasyOCR in detail mode to get confidence scores
|
| 68 |
+
results = reader.readtext(file_path, detail=1)
|
| 69 |
+
|
| 70 |
+
# Filter out entries with low confidence (e.g., below 0.5)
|
| 71 |
+
filtered_texts = [text for bbox, text, conf in results if conf > 0.5]
|
| 72 |
+
extracted_text = "\n".join(filtered_texts)
|
| 73 |
+
|
| 74 |
+
print("Filtered Extracted text:", extracted_text)
|
| 75 |
+
|
| 76 |
+
if not extracted_text.strip():
|
| 77 |
+
return error_response("No meaningful text found in image", 400)
|
| 78 |
+
|
| 79 |
+
# Analyze the extracted text to get sentiment, emotion, etc.
|
| 80 |
+
analysis_response = analyze_text_internal(extracted_text)
|
| 81 |
+
tags = generate_tags(extracted_text)
|
| 82 |
+
|
| 83 |
+
return jsonify({
|
| 84 |
+
"extracted_text": extracted_text.strip(),
|
| 85 |
+
"sentiment": analysis_response.get("sentiment"),
|
| 86 |
+
"emotion": analysis_response.get("emotion"),
|
| 87 |
+
"confidence": analysis_response.get("confidence"),
|
| 88 |
+
"tags": tags
|
| 89 |
+
})
|
| 90 |
+
except Exception as e:
|
| 91 |
+
return error_response(str(e), 500)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# Internal function to call analyze_text directly
|
| 95 |
+
def analyze_text_internal(text):
|
| 96 |
+
try:
|
| 97 |
+
# Get sentiment (positive/neutral/negative)
|
| 98 |
+
sentiment = sentiment_pipeline(text)[0]
|
| 99 |
+
|
| 100 |
+
# Get dominant emotion (anger/disgust/fear/joy/neutral/sadness/surprise)
|
| 101 |
+
emotion = emotion_pipeline(text)[0][0]
|
| 102 |
+
|
| 103 |
+
return {
|
| 104 |
+
"sentiment": sentiment['label'],
|
| 105 |
+
"emotion": emotion['label'],
|
| 106 |
+
"confidence": {
|
| 107 |
+
"sentiment": round(sentiment['score'], 3),
|
| 108 |
+
"emotion": round(emotion['score'], 3)
|
| 109 |
+
}
|
| 110 |
+
}
|
| 111 |
+
except Exception as e:
|
| 112 |
+
print(f"Analysis error: {str(e)}")
|
| 113 |
+
return error_response(f"Processing error: {str(e)}", 500)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@bp.route('/analyze_text', methods=['POST'])
|
| 117 |
+
def analyze_text():
|
| 118 |
+
data = request.json
|
| 119 |
+
if not data or 'text' not in data:
|
| 120 |
+
return error_response("No text provided", 400)
|
| 121 |
+
|
| 122 |
+
text = data['text'].strip().lower()
|
| 123 |
+
|
| 124 |
+
try:
|
| 125 |
+
# Get sentiment (positive/neutral/negative)
|
| 126 |
+
sentiment = sentiment_pipeline(text)[0]
|
| 127 |
+
|
| 128 |
+
# Get dominant emotion (anger/disgust/fear/joy/neutral/sadness/surprise)
|
| 129 |
+
emotion = emotion_pipeline(text)[0][0]
|
| 130 |
+
|
| 131 |
+
tags = generate_tags(text)
|
| 132 |
+
|
| 133 |
+
return {
|
| 134 |
+
"sentiment": sentiment['label'],
|
| 135 |
+
"emotion": emotion['label'],
|
| 136 |
+
"confidence": {
|
| 137 |
+
"sentiment": round(sentiment['score'], 3),
|
| 138 |
+
"emotion": round(emotion['score'], 3)
|
| 139 |
+
},
|
| 140 |
+
"tags": tags
|
| 141 |
+
}
|
| 142 |
+
except Exception as e:
|
| 143 |
+
print(f"Analysis error: {str(e)}")
|
| 144 |
+
return error_response(f"Processing error: {str(e)}", 500)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
# 📌 3. Extract Actionable Tasks
|
| 148 |
+
@bp.route('/extract_actions', methods=['POST'])
|
| 149 |
+
def extract_actions():
|
| 150 |
+
data = request.json
|
| 151 |
+
if not data or 'text' not in data:
|
| 152 |
+
return error_response("No text provided", 400)
|
| 153 |
+
|
| 154 |
+
text = data['text']
|
| 155 |
+
try:
|
| 156 |
+
tasks = extract_tasks(text)
|
| 157 |
+
return jsonify({"tasks": tasks})
|
| 158 |
+
except Exception as e:
|
| 159 |
+
return error_response(str(e), 500)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
# =============================
|
| 163 |
+
# 🔹 Error Handling
|
| 164 |
+
# =============================
|
| 165 |
+
|
| 166 |
+
@bp.errorhandler(404)
|
| 167 |
+
def not_found_error(error):
|
| 168 |
+
return error_response("Not Found", 404)
|
| 169 |
+
|
| 170 |
+
@bp.errorhandler(500)
|
| 171 |
+
def internal_error(error):
|
| 172 |
+
return error_response("Internal Server Error", 500)
|
app/services.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from app.models import client
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
|
| 5 |
+
# 🔹 Extract Actionable Tasks
|
| 6 |
+
def extract_tasks(text):
|
| 7 |
+
today_date = datetime.today().strftime("%Y-%m-%d")
|
| 8 |
+
|
| 9 |
+
response = client.chat.completions.create(
|
| 10 |
+
model="accounts/fireworks/models/llama-v3p1-8b-instruct",
|
| 11 |
+
messages=[
|
| 12 |
+
{
|
| 13 |
+
"role": "system",
|
| 14 |
+
"content": (
|
| 15 |
+
f"You are a task extraction assistant. Today's date is **{today_date}**.\n"
|
| 16 |
+
"Your goal is to extract **exactly 2** actionable tasks from the given text.\n"
|
| 17 |
+
"Each task must have:\n"
|
| 18 |
+
"- A **title** (short summary of the task)\n"
|
| 19 |
+
"- A **dueDate** in `YYYY-MM-DD` format (Convert words like 'tomorrow', 'next week' into actual dates based on today's date)\n\n"
|
| 20 |
+
"Return **ONLY valid JSON** with this format:\n"
|
| 21 |
+
"{\n"
|
| 22 |
+
' "tasks": [\n'
|
| 23 |
+
' {\n'
|
| 24 |
+
' "title": "Task description",\n'
|
| 25 |
+
' "dueDate": "YYYY-MM-DD" # Always absolute date\n'
|
| 26 |
+
" }\n"
|
| 27 |
+
" ]\n"
|
| 28 |
+
"}"
|
| 29 |
+
),
|
| 30 |
+
},
|
| 31 |
+
{"role": "user", "content": text},
|
| 32 |
+
],
|
| 33 |
+
max_tokens=200,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
# Ensure response is valid JSON
|
| 37 |
+
try:
|
| 38 |
+
model_output = response.choices[0].message.content.strip()
|
| 39 |
+
parsed_response = json.loads(model_output)
|
| 40 |
+
return parsed_response.get("tasks", [])
|
| 41 |
+
except json.JSONDecodeError:
|
| 42 |
+
return []
|
app/utils.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from flask import jsonify
|
| 3 |
+
|
| 4 |
+
# 🔹 Generate Tags from Text
|
| 5 |
+
def generate_tags(content):
|
| 6 |
+
stop_words = {"the", "and", "is", "in", "to", "a", "of", "on", "for"}
|
| 7 |
+
words = content.lower().split()
|
| 8 |
+
tags = [word for word in words if word not in stop_words and len(word) > 3]
|
| 9 |
+
return list(set(tags))
|
| 10 |
+
|
| 11 |
+
# 🔹 Parse JSON Responses
|
| 12 |
+
def parse_json(response):
|
| 13 |
+
try:
|
| 14 |
+
return json.loads(response)
|
| 15 |
+
except json.JSONDecodeError:
|
| 16 |
+
return None
|
| 17 |
+
|
| 18 |
+
# 🔹 Error Handlers
|
| 19 |
+
def error_response(message, status_code):
|
| 20 |
+
return jsonify({"error": message}), status_code
|
| 21 |
+
|
| 22 |
+
# Update emotion categorization mapping
|
| 23 |
+
EMOTION_CATEGORIES = {
|
| 24 |
+
"goal-oriented": ["desire", "anticipation", "optimism"],
|
| 25 |
+
"social": ["gratitude", "admiration", "love"],
|
| 26 |
+
"reflective": ["remorse", "sadness", "disappointment"],
|
| 27 |
+
"urgent": ["fear", "nervousness", "surprise"],
|
| 28 |
+
"critical": ["anger", "disgust", "annoyance"],
|
| 29 |
+
"joyful": ["joy", "excitement", "amusement"]
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
SENTIMENT_MAP = {
|
| 33 |
+
"LABEL_0": "negative",
|
| 34 |
+
"LABEL_1": "neutral",
|
| 35 |
+
"LABEL_2": "positive"
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
def categorize_memory(emotions, sentiment):
|
| 39 |
+
"""Improved categorization with fallback logic"""
|
| 40 |
+
if not emotions:
|
| 41 |
+
return f"uncategorized-{sentiment['label']}"
|
| 42 |
+
|
| 43 |
+
# Find direct matches
|
| 44 |
+
for emotion in emotions:
|
| 45 |
+
for category, keywords in EMOTION_CATEGORIES.items():
|
| 46 |
+
if emotion['label'] in keywords:
|
| 47 |
+
return f"{category}-{sentiment['label']}"
|
| 48 |
+
|
| 49 |
+
# Fallback to sentiment-based category
|
| 50 |
+
return f"neutral-{sentiment['label']}"
|
requirements.txt
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accelerate==1.3.0
|
| 2 |
+
annotated-types==0.7.0
|
| 3 |
+
anyio==4.8.0
|
| 4 |
+
blinker==1.9.0
|
| 5 |
+
certifi==2024.12.14
|
| 6 |
+
charset-normalizer==3.4.1
|
| 7 |
+
click==8.1.8
|
| 8 |
+
contourpy==1.3.1
|
| 9 |
+
cycler==0.12.1
|
| 10 |
+
easyocr==1.7.2
|
| 11 |
+
exceptiongroup==1.2.2
|
| 12 |
+
filelock==3.16.1
|
| 13 |
+
fireworks-ai==0.15.12
|
| 14 |
+
Flask==3.1.0
|
| 15 |
+
fonttools==4.55.6
|
| 16 |
+
fsspec==2024.12.0
|
| 17 |
+
gevent==25.4.2
|
| 18 |
+
gitdb==4.0.12
|
| 19 |
+
GitPython==3.1.44
|
| 20 |
+
greenlet==3.2.1
|
| 21 |
+
gunicorn==23.0.0
|
| 22 |
+
h11==0.14.0
|
| 23 |
+
httpcore==1.0.7
|
| 24 |
+
httpx==0.28.1
|
| 25 |
+
httpx-sse==0.4.0
|
| 26 |
+
httpx-ws==0.7.1
|
| 27 |
+
huggingface-hub==0.27.1
|
| 28 |
+
idna==3.10
|
| 29 |
+
imageio==2.37.0
|
| 30 |
+
itsdangerous==2.2.0
|
| 31 |
+
Jinja2==3.1.5
|
| 32 |
+
kiwisolver==1.4.8
|
| 33 |
+
lazy_loader==0.4
|
| 34 |
+
llvmlite==0.44.0
|
| 35 |
+
MarkupSafe==3.0.2
|
| 36 |
+
matplotlib==3.10.0
|
| 37 |
+
mistral_inference==1.5.0
|
| 38 |
+
more-itertools==10.6.0
|
| 39 |
+
mpmath==1.3.0
|
| 40 |
+
networkx==3.4.2
|
| 41 |
+
ninja==1.11.1.3
|
| 42 |
+
numba==0.61.0
|
| 43 |
+
numpy==1.24.4
|
| 44 |
+
openai-whisper==20240930
|
| 45 |
+
opencv-python==4.11.0.86
|
| 46 |
+
opencv-python-headless==4.11.0.86
|
| 47 |
+
packaging==24.2
|
| 48 |
+
pandas==2.2.3
|
| 49 |
+
pillow==11.1.0
|
| 50 |
+
psutil==6.1.1
|
| 51 |
+
py-cpuinfo==9.0.0
|
| 52 |
+
pyclipper==1.3.0.post6
|
| 53 |
+
pydantic==2.10.6
|
| 54 |
+
pydantic_core==2.27.2
|
| 55 |
+
pyparsing==3.2.1
|
| 56 |
+
pytesseract==0.3.13
|
| 57 |
+
python-bidi==0.6.3
|
| 58 |
+
python-dateutil==2.9.0.post0
|
| 59 |
+
python-dotenv==1.0.1
|
| 60 |
+
pytz==2024.2
|
| 61 |
+
PyYAML==6.0.2
|
| 62 |
+
regex==2024.11.6
|
| 63 |
+
requests==2.32.3
|
| 64 |
+
safetensors==0.5.2
|
| 65 |
+
scikit-image==0.25.1
|
| 66 |
+
scipy==1.15.1
|
| 67 |
+
seaborn==0.13.2
|
| 68 |
+
sentencepiece==0.2.0
|
| 69 |
+
shapely==2.0.7
|
| 70 |
+
six==1.17.0
|
| 71 |
+
smmap==5.0.2
|
| 72 |
+
sniffio==1.3.1
|
| 73 |
+
sympy==1.13.1
|
| 74 |
+
tifffile==2025.1.10
|
| 75 |
+
tiktoken==0.8.0
|
| 76 |
+
tokenizers==0.21.0
|
| 77 |
+
torch==2.5.1
|
| 78 |
+
torchaudio==2.5.1
|
| 79 |
+
torchvision==0.20.1
|
| 80 |
+
tqdm==4.67.1
|
| 81 |
+
transformers==4.48.1
|
| 82 |
+
typing_extensions==4.12.2
|
| 83 |
+
tzdata==2025.1
|
| 84 |
+
ultralytics==8.3.67
|
| 85 |
+
ultralytics-thop==2.0.14
|
| 86 |
+
urllib3==2.3.0
|
| 87 |
+
Werkzeug==3.1.3
|
| 88 |
+
wsproto==1.2.0
|
| 89 |
+
zope.event==5.0
|
| 90 |
+
zope.interface==7.2
|
wsgi.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from app import create_app # or simply `from app import app`
|
| 2 |
+
app = create_app()
|