|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import spacy |
|
import torch |
|
import gradio as gr |
|
|
|
nlp = spacy.load('es_core_news_sm') |
|
|
|
models = { |
|
"stabilityai/stablelm-tuned-alpha-7b": AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-tuned-alpha-7b"), |
|
"CRD716/ggml-LLaMa-65B-quantized": AutoModelForCausalLM.from_pretrained("CRD716/ggml-LLaMa-65B-quantized"), |
|
"RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g": AutoModelForCausalLM.from_pretrained("RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g"), |
|
"bertin-project/bertin-alpaca-lora-7b": AutoModelForCausalLM.from_pretrained("bertin-project/bertin-alpaca-lora-7b") |
|
} |
|
|
|
tokenizers = { |
|
"stabilityai/stablelm-tuned-alpha-7b": AutoTokenizer.from_pretrained("stabilityai/stablelm-tuned-alpha-7b"), |
|
"CRD716/ggml-LLaMa-65B-quantized": AutoTokenizer.from_pretrained("CRD716/ggml-LLaMa-65B-quantized"), |
|
"RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g": AutoTokenizer.from_pretrained("RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g"), |
|
"bertin-project/bertin-alpaca-lora-7b": AutoTokenizer.from_pretrained("bertin-project/bertin-alpaca-lora-7b") |
|
} |
|
|
|
def generate(model_name: str): |
|
model = models[model_name] |
|
tokenizer = tokenizers[model_name] |
|
|
|
|
|
input_ids = tokenizer.encode('El', return_tensors='pt') |
|
|
|
|
|
output = model.generate(input_ids, max_length=50, num_return_sequences=1, temperature=1.0) |
|
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
return generated_text |
|
|
|
def process(sentence: str): |
|
doc = nlp(sentence) |
|
tagged_words = [(token.text, token.pos_) for token in doc] |
|
|
|
return tagged_words |
|
|
|
inputs_generate = gr.inputs.Dropdown(choices=list(models.keys()), label="Model") |
|
outputs_generate = gr.outputs.Textbox(label="Generated Sentence") |
|
|
|
inputs_process = gr.inputs.Textbox(label="Sentence") |
|
outputs_process = gr.outputs.Textbox(label="Processed Sentence") |
|
|
|
generate_interface = gr.Interface(fn=generate, inputs=inputs_generate, outputs=outputs_generate) |
|
process_interface = gr.Interface(fn=process, inputs=inputs_process, outputs=outputs_process) |
|
|
|
generate_interface.launch(share=True) |
|
process_interface.launch(share=True) |
|
|