import torch from transformers import AutoTokenizer, AutoModelForCausalLM import gradio as gr import requests model_name = "Writer/palmyra-small" tokenizer = AutoTokenizer.from_pretrained(model_name) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = AutoModelForCausalLM.from_pretrained(model_name).to(device) def get_movie_info(movie_title): api_key = "20e959f0f28e6b3e3de49c50f358538a" search_url = f"https://api.themoviedb.org/3/search/movie" # Make a search query to TMDb params = { "api_key": api_key, "query": movie_title, "language": "en-US", "page": 1, } try: search_response = requests.get(search_url, params=params) search_data = search_response.json() # Check if any results are found if search_data.get("results"): movie_id = search_data["results"][0]["id"] # Fetch detailed information using the movie ID details_url = f"https://api.themoviedb.org/3/movie/{movie_id}" details_params = { "api_key": api_key, "language": "en-US", } details_response = requests.get(details_url, params=details_params) details_data = details_response.json() # Extract relevant information title = details_data.get("title", "Unknown Title") year = details_data.get("release_date", "Unknown Year")[:4] genre = ", ".join(genre["name"] for genre in details_data.get("genres", [])) return f"Title: {title}, Year: {year}, Genre: {genre}" else: return "Movie not found" except Exception as e: return f"Error: {e}" def generate_response(prompt): input_text_template = ( "A chat between a curious user and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the user's questions. " f"USER: {prompt} " "ASSISTANT:" ) # Call the get_movie_info function to enrich the response movie_info = get_movie_info(prompt) # Concatenate the movie info with the input template input_text_template += f" Movie Info: {movie_info}" model_inputs = tokenizer(input_text_template, return_tensors="pt").to(device) gen_conf = { "top_k": 20, "max_length": 200, "temperature": 0.6, "do_sample": True, "eos_token_id": tokenizer.eos_token_id, } output = model.generate(**model_inputs, **gen_conf) generated_text = tokenizer.decode(output[0], skip_special_tokens=True) return generated_text # Create Gradio Interface iface = gr.Interface(fn=generate_response, inputs="text", outputs="text") iface.launch()