Spaces:
Build error
Build error
| import streamlit as st | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| from datasets import load_dataset | |
| from huggingface_hub import login | |
| # Authenticate to Hugging Face | |
| hugging_face_token = "your_hugging_face_api_token" # Replace with your actual token | |
| login(hugging_face_token) | |
| # Load the dataset | |
| ds = load_dataset("Vezora/Open-Critic-GPT") | |
| st.write("Dataset") | |
| # Load the model and tokenizer | |
| model_name = "meta-llama/Meta-Llama-3-8B" | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| # Function to generate a response from the model | |
| def generate_response(human_text): | |
| inputs = tokenizer.encode(human_text, return_tensors='pt') | |
| outputs = model.generate(inputs, max_length=50, num_beams=5, early_stopping=True) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return response | |
| # Iterate over the first few examples in the dataset and display them with model responses | |
| for i, x in enumerate(ds["train"]): | |
| col1, col2, col3 = st.columns(3) | |
| if i < 3: | |
| with col1: | |
| st.code(x["Human"]) | |
| with col2: | |
| st.write(x["Assistant"]) | |
| with col3: | |
| # Generate and display the model's response | |
| response = generate_response(x["Human"]) | |
| st.write(response) | |
| else: | |
| break | |