Spaces:
Running
Running
File size: 961 Bytes
9d22125 f6e2b5d 9d22125 f6e2b5d 9d22125 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
@st.cache_resource(show_spinner=False)
def load_model():
model_name = "Alijeff1214/DeutscheLexAI_BGB_2.0"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
return tokenizer, model
tokenizer, model = load_model()
st.title("DeutscheLexAI_BGB Chat Interface")
st.write("Interact with the fine-tuned Qwen2.5-3B model for German legal texts!")
user_input = st.text_input("Enter your question or prompt:")
if st.button("Generate Response") and user_input:
# Tokenize and generate response (adjust parameters as needed)
inputs = tokenizer(user_input, return_tensors="pt")
outputs = model.generate(**inputs, max_length=500, do_sample=True, temperature=0.7)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
st.text_area("Model Response:", value=response, height=300)
|