# -*- coding: utf-8 -*- """SONG.ipynb Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1D_uRTl2aO65nt7ABxgODlb57aLqY05HC """ import gradio as gr import numpy as np import librosa import os import requests from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report import joblib # to save/load model # === CONFIGURATION === SAAVN_SEARCH_URL = "https://saavn.dev/api/search/songs?query={query}&limit=1" DATA_DIR = "audio" # update this if your folder is different # Create data directory if it doesn't exist if not os.path.exists(DATA_DIR): os.makedirs(DATA_DIR) print(f"Created directory: {DATA_DIR}. Please upload your audio dataset into this folder.") # You might want to exit or handle this case where data is missing # === FEATURE EXTRACTION === def extract_features(file_path): try: audio, sr = librosa.load(file_path, duration=3, offset=0.5) mfccs = librosa.feature.mfcc(y=audio, sr=sr, n_mfcc=13) return np.mean(mfccs.T, axis=0) except Exception as e: print("Error loading file:", e) return None # === LOAD DATA AND TRAIN MODEL === def train_model(data_dir): features = [] labels = [] for folder in os.listdir(data_dir): emotion = folder.split('_')[-1].lower() folder_path = os.path.join(data_dir, folder) for filename in os.listdir(folder_path): if filename.endswith('.wav'): file_path = os.path.join(folder_path, filename) mfcc = extract_features(file_path) if mfcc is not None: features.append(mfcc) labels.append(emotion) # Check if any data was loaded if not features: print(f"No audio files found in {data_dir}. Please upload your dataset.") return None # Or raise an error X = np.array(features) y = np.array(labels) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) model = RandomForestClassifier() model.fit(X_train, y_train) # Optional: Save model joblib.dump(model, "voice_mood_model.pkl") # Evaluate (Optional) y_pred = model.predict(X_test) print("āœ… Model Trained - Accuracy:", accuracy_score(y_test, y_pred)) print(classification_report(y_test, y_pred)) return model # Load existing or train new model model = None # Initialize model to None if os.path.exists("voice_mood_model.pkl"): try: model = joblib.load("voice_mood_model.pkl") print("Loaded existing model.") except Exception as e: print(f"Error loading model: {e}. Retraining model.") model = train_model(DATA_DIR) else: model = train_model(DATA_DIR) # Ensure model is trained before proceeding if model is None: print("Model could not be trained or loaded. Exiting.") # You might want to add sys.exit() here if running as a script else: # === PREDICT EMOTION FROM AUDIO === def predict_emotion(file_path): mfcc = extract_features(file_path) if mfcc is not None: # Ensure the model is available before predicting if model: return model.predict(mfcc.reshape(1, -1))[0] else: print("Model not loaded or trained.") return "Error: Model not available" return "Unknown" # === GET SONG FROM SAAVN === def get_song_from_mood(mood, language="tamil"): query = f"{mood} {language} song" url = SAAVN_SEARCH_URL.format(query=query) try: response = requests.get(url) response.raise_for_status() # Raise an exception for bad status codes data = response.json() if data.get("data") and data["data"]["results"]: song = data["data"]["results"][0] title = song["name"] artist = ( song.get("artists", {}).get("primary", [{}])[0].get("name") or song.get("primaryArtists") or song.get("artist") or "Unknown Artist" ) return f"šŸŽ¶ **{title}** by *{artist}*\nšŸ”— [Listen here]({song['url']})" except requests.exceptions.RequestException as e: print("Song fetch error:", e) except Exception as e: print("Song fetch error:", e) return "šŸ˜• No song found for this mood." # === GRADIO INTERFACE FUNCTION === def detect_and_recommend(audio_path): if model: # Check if model is available mood = predict_emotion(audio_path) song = get_song_from_mood(mood) return f"šŸŽ­ Detected Mood: **{mood}**\n\n{song}" else: return "Model not trained or loaded. Cannot detect mood." # === LAUNCH GRADIO APP === gr.Interface( fn=detect_and_recommend, inputs=gr.Audio(type="filepath", label="šŸŽ™ Upload Your Voice"), outputs="markdown", title="šŸŽ¤ Voice2Vibes (Offline Version)", description="Detects mood from your voice using your dataset and recommends a matching Tamil song šŸŽ¶" ).launch()