Sonia2k5 commited on
Commit
73b1793
Β·
verified Β·
1 Parent(s): e0fd206

Upload 3 files

Browse files
Files changed (3) hide show
  1. requirements.txt +6 -0
  2. song.py +143 -0
  3. song_model.pkl +3 -0
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio
2
+ numpy
3
+ librosa
4
+ scikit-learn
5
+ joblib
6
+ requests
song.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """SONG.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1D_uRTl2aO65nt7ABxgODlb57aLqY05HC
8
+ """
9
+
10
+ import gradio as gr
11
+ import numpy as np
12
+ import librosa
13
+ import os
14
+ import requests
15
+ from sklearn.ensemble import RandomForestClassifier
16
+ from sklearn.model_selection import train_test_split
17
+ from sklearn.metrics import accuracy_score, classification_report
18
+ import joblib # to save/load model
19
+
20
+ # === CONFIGURATION ===
21
+ SAAVN_SEARCH_URL = "https://saavn.dev/api/search/songs?query={query}&limit=1"
22
+ DATA_DIR = "/content/drive/MyDrive/audio" # update this if your folder is different
23
+
24
+ # Create data directory if it doesn't exist
25
+ if not os.path.exists(DATA_DIR):
26
+ os.makedirs(DATA_DIR)
27
+ print(f"Created directory: {DATA_DIR}. Please upload your audio dataset into this folder.")
28
+ # You might want to exit or handle this case where data is missing
29
+
30
+ # === FEATURE EXTRACTION ===
31
+ def extract_features(file_path):
32
+ try:
33
+ audio, sr = librosa.load(file_path, duration=3, offset=0.5)
34
+ mfccs = librosa.feature.mfcc(y=audio, sr=sr, n_mfcc=13)
35
+ return np.mean(mfccs.T, axis=0)
36
+ except Exception as e:
37
+ print("Error loading file:", e)
38
+ return None
39
+
40
+ # === LOAD DATA AND TRAIN MODEL ===
41
+ def train_model(data_dir):
42
+ features = []
43
+ labels = []
44
+ for folder in os.listdir(data_dir):
45
+ emotion = folder.split('_')[-1].lower()
46
+ folder_path = os.path.join(data_dir, folder)
47
+ for filename in os.listdir(folder_path):
48
+ if filename.endswith('.wav'):
49
+ file_path = os.path.join(folder_path, filename)
50
+ mfcc = extract_features(file_path)
51
+ if mfcc is not None:
52
+ features.append(mfcc)
53
+ labels.append(emotion)
54
+
55
+ # Check if any data was loaded
56
+ if not features:
57
+ print(f"No audio files found in {data_dir}. Please upload your dataset.")
58
+ return None # Or raise an error
59
+
60
+ X = np.array(features)
61
+ y = np.array(labels)
62
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
63
+ model = RandomForestClassifier()
64
+ model.fit(X_train, y_train)
65
+
66
+ # Optional: Save model
67
+ joblib.dump(model, "voice_mood_model.pkl")
68
+
69
+ # Evaluate (Optional)
70
+ y_pred = model.predict(X_test)
71
+ print("βœ… Model Trained - Accuracy:", accuracy_score(y_test, y_pred))
72
+ print(classification_report(y_test, y_pred))
73
+ return model
74
+
75
+ # Load existing or train new model
76
+ model = None # Initialize model to None
77
+ if os.path.exists("voice_mood_model.pkl"):
78
+ try:
79
+ model = joblib.load("voice_mood_model.pkl")
80
+ print("Loaded existing model.")
81
+ except Exception as e:
82
+ print(f"Error loading model: {e}. Retraining model.")
83
+ model = train_model(DATA_DIR)
84
+ else:
85
+ model = train_model(DATA_DIR)
86
+
87
+ # Ensure model is trained before proceeding
88
+ if model is None:
89
+ print("Model could not be trained or loaded. Exiting.")
90
+ # You might want to add sys.exit() here if running as a script
91
+ else:
92
+ # === PREDICT EMOTION FROM AUDIO ===
93
+ def predict_emotion(file_path):
94
+ mfcc = extract_features(file_path)
95
+ if mfcc is not None:
96
+ # Ensure the model is available before predicting
97
+ if model:
98
+ return model.predict(mfcc.reshape(1, -1))[0]
99
+ else:
100
+ print("Model not loaded or trained.")
101
+ return "Error: Model not available"
102
+ return "Unknown"
103
+
104
+ # === GET SONG FROM SAAVN ===
105
+ def get_song_from_mood(mood, language="tamil"):
106
+ query = f"{mood} {language} song"
107
+ url = SAAVN_SEARCH_URL.format(query=query)
108
+ try:
109
+ response = requests.get(url)
110
+ response.raise_for_status() # Raise an exception for bad status codes
111
+ data = response.json()
112
+ if data.get("data") and data["data"]["results"]:
113
+ song = data["data"]["results"][0]
114
+ title = song["name"]
115
+ artist = (
116
+ song.get("artists", {}).get("primary", [{}])[0].get("name") or
117
+ song.get("primaryArtists") or song.get("artist") or "Unknown Artist"
118
+ )
119
+ return f"🎢 **{title}** by *{artist}*\nπŸ”— [Listen here]({song['url']})"
120
+ except requests.exceptions.RequestException as e:
121
+ print("Song fetch error:", e)
122
+ except Exception as e:
123
+ print("Song fetch error:", e)
124
+ return "πŸ˜• No song found for this mood."
125
+
126
+ # === GRADIO INTERFACE FUNCTION ===
127
+ def detect_and_recommend(audio_path):
128
+ if model: # Check if model is available
129
+ mood = predict_emotion(audio_path)
130
+ song = get_song_from_mood(mood)
131
+ return f"🎭 Detected Mood: **{mood}**\n\n{song}"
132
+ else:
133
+ return "Model not trained or loaded. Cannot detect mood."
134
+
135
+
136
+ # === LAUNCH GRADIO APP ===
137
+ gr.Interface(
138
+ fn=detect_and_recommend,
139
+ inputs=gr.Audio(type="filepath", label="πŸŽ™ Upload Your Voice"),
140
+ outputs="markdown",
141
+ title="🎀 Voice2Vibes (Offline Version)",
142
+ description="Detects mood from your voice using your dataset and recommends a matching Tamil song 🎢"
143
+ ).launch()
song_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d06471fcb1488261721e062d218a7480b4af23043de51fff14343bdc89f37df4
3
+ size 1168089