Spaces:
Sleeping
Sleeping
File size: 3,075 Bytes
1eb2368 078fee7 0be83b6 0eec7e8 078fee7 0eec7e8 078fee7 0be83b6 078fee7 1eb2368 078fee7 0eec7e8 0be83b6 0eec7e8 078fee7 1eb2368 078fee7 0eec7e8 078fee7 0eec7e8 078fee7 0eec7e8 078fee7 0eec7e8 1eb2368 0eec7e8 1eb2368 0eec7e8 078fee7 0eec7e8 1eb2368 0eec7e8 1eb2368 078fee7 1eb2368 0eec7e8 1eb2368 078fee7 0eec7e8 078fee7 1eb2368 078fee7 1eb2368 0eec7e8 1eb2368 0eec7e8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import streamlit as st
import pandas as pd
import numpy as np
import tensorflow as tf
import joblib
import os
import zipfile
import tempfile
# Paths
BASE_DIR = os.path.dirname(__file__)
ZIP_MODEL_PATH = os.path.join(BASE_DIR, "recommender_model.zip")
MOVIES_PATH = os.path.join(BASE_DIR, "movies.csv")
ENCODINGS_PATH = os.path.join(BASE_DIR, "encodings.pkl")
@st.cache_resource
def load_model():
try:
extract_dir = os.path.join(tempfile.gettempdir(), "recommender_model")
# Extract only if not already extracted
if not os.path.exists(extract_dir):
with zipfile.ZipFile(ZIP_MODEL_PATH, "r") as zip_ref:
zip_ref.extractall(extract_dir)
return tf.keras.models.load_model(extract_dir)
except Exception as e:
st.error(f"Failed to load model: {e}")
st.stop()
@st.cache_data
def load_assets():
try:
df_movies = pd.read_csv(MOVIES_PATH)
except FileNotFoundError:
st.error("β Error: movies.csv not found.")
st.stop()
try:
user_map, movie_map = joblib.load(ENCODINGS_PATH)
except FileNotFoundError:
st.error("β Error: encodings.pkl not found.")
st.stop()
return df_movies, user_map, movie_map
# Load model and assets
model = load_model()
movies_df, user2idx, movie2idx = load_assets()
reverse_movie_map = {v: k for k, v in movie2idx.items()}
# UI
st.title("π¬ TensorFlow Movie Recommender")
st.write("Select some movies you've liked to get personalized recommendations:")
movie_titles = movies_df.set_index("movieId")["title"].to_dict()
movie_choices = [movie_titles[mid] for mid in movie2idx.keys() if mid in movie_titles]
selected_titles = st.multiselect("ποΈ Liked movies", sorted(movie_choices))
user_ratings = {}
for title in selected_titles:
movie_id = next((k for k, v in movie_titles.items() if v == title), None)
if movie_id:
user_ratings[movie_id] = 5.0
if st.button("π― Get Recommendations"):
if not user_ratings:
st.warning("Please select at least one movie.")
else:
liked_indices = [movie2idx[m] for m in user_ratings if m in movie2idx]
if not liked_indices:
st.error("β οΈ No valid movie encodings found.")
st.stop()
avg_embedding = tf.reduce_mean(model.layers[2](tf.constant(liked_indices)), axis=0, keepdims=True)
all_movie_indices = tf.range(len(movie2idx))
movie_embeddings = model.layers[3](all_movie_indices)
scores = tf.reduce_sum(avg_embedding * movie_embeddings, axis=1).numpy()
top_indices = np.argsort(scores)[::-1]
recommended = []
for idx in top_indices:
mid = reverse_movie_map.get(idx)
if mid not in user_ratings and mid in movie_titles:
recommended.append((movie_titles[mid], scores[idx]))
if len(recommended) >= 10:
break
st.subheader("πΏ Top 10 Recommendations")
for title, score in recommended:
st.write(f"**{title}** β Score: `{score:.3f}`")
|