Spaces:
Paused
Paused
Create app-test-train.py
Browse files- app-test-train.py +140 -0
app-test-train.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
import os
|
5 |
+
from huggingface_hub import login
|
6 |
+
import gradio as gr
|
7 |
+
#from transformers import pipeline
|
8 |
+
import torch
|
9 |
+
from utils import *
|
10 |
+
from presets import *
|
11 |
+
from transformers import Trainer, TrainingArguments
|
12 |
+
import numpy as np
|
13 |
+
import evaluate
|
14 |
+
import pandas as pd
|
15 |
+
import sklearn
|
16 |
+
from sklearn.model_selection import train_test_split
|
17 |
+
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
|
18 |
+
|
19 |
+
|
20 |
+
#####################################################
|
21 |
+
#Hilfsfunktionen für das Testen
|
22 |
+
#####################################################
|
23 |
+
#Generate Response - nach dem training testen, wie es funktioniert
|
24 |
+
def generate_response(prompt, model, tokenizer):
|
25 |
+
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
26 |
+
output = model.generate(input_ids, max_length=100)
|
27 |
+
response = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
|
28 |
+
return response
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
def generate_prompt_with_history(text, history, tokenizer, max_length=2048):
|
33 |
+
#prompt = "The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n[|Human|]Hello!\n[|AI|]Hi!"
|
34 |
+
prompt = "Das folgende ist eine Unterhaltung zwischen einem Menschen und einem KI-Assistenten, der Baize genannt wird. Baize ist ein open-source KI-Assistent, der von UCSD entwickelt wurde. Der Mensch und der KI-Assistent chatten abwechselnd miteinander in deutsch. Die Antworten des KI Assistenten sind immer so ausführlich wie möglich und in Markdown Schreibweise und in deutscher Sprache. Wenn nötig übersetzt er sie ins Deutsche. Die Antworten des KI-Assistenten vermeiden Themen und Antworten zu unethischen, kontroversen oder sensiblen Themen. Die Antworten sind immer sehr höflich formuliert..\n[|Human|]Hallo!\n[|AI|]Hi!"
|
35 |
+
history = ["\n[|Human|]{}\n[|AI|]{}".format(x[0],x[1]) for x in history]
|
36 |
+
history.append("\n[|Human|]{}\n[|AI|]".format(text))
|
37 |
+
history_text = ""
|
38 |
+
flag = False
|
39 |
+
for x in history[::-1]:
|
40 |
+
if tokenizer(prompt+history_text+x, return_tensors="pt")['input_ids'].size(-1) <= max_length:
|
41 |
+
history_text = x + history_text
|
42 |
+
flag = True
|
43 |
+
else:
|
44 |
+
break
|
45 |
+
if flag:
|
46 |
+
return prompt+history_text,tokenizer(prompt+history_text, return_tensors="pt")
|
47 |
+
else:
|
48 |
+
return None
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
def load_tokenizer_and_model(base_model, load_8bit=False):
|
53 |
+
if torch.cuda.is_available():
|
54 |
+
device = "cuda"
|
55 |
+
else:
|
56 |
+
device = "cpu"
|
57 |
+
|
58 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model, use_fast = True, use_auth_token=True)
|
59 |
+
if device == "cuda":
|
60 |
+
model = AutoModelForCausalLM.from_pretrained(
|
61 |
+
base_model,
|
62 |
+
load_in_8bit=load_8bit,
|
63 |
+
torch_dtype=torch.float16,
|
64 |
+
device_map="auto",
|
65 |
+
use_auth_token=True
|
66 |
+
)
|
67 |
+
else:
|
68 |
+
model = AutoModelForCausalLM.from_pretrained(
|
69 |
+
base_model, device_map={"": device}, low_cpu_mem_usage=True
|
70 |
+
)
|
71 |
+
|
72 |
+
#if not load_8bit:
|
73 |
+
#model.half() # seems to fix bugs for some users.
|
74 |
+
|
75 |
+
model.eval()
|
76 |
+
return tokenizer,model, device
|
77 |
+
|
78 |
+
|
79 |
+
###################################################################################
|
80 |
+
###################################################################################
|
81 |
+
|
82 |
+
#######################################
|
83 |
+
# Load model
|
84 |
+
print("load model_neu")
|
85 |
+
login(token=os.environ["HF_ACCESS_TOKEN"])
|
86 |
+
model_name_neu = "alexkueck/test-tis-1"
|
87 |
+
|
88 |
+
model_neu, tokenizer_neu, device_neu = load_tokenizer_and_model(model_name_neu, False)
|
89 |
+
print("done load")
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
##############################################
|
94 |
+
#Testen des fine-tuned Modells
|
95 |
+
############################
|
96 |
+
print("Test")
|
97 |
+
prompt = "Was ist ein TIS?"
|
98 |
+
|
99 |
+
#####################################
|
100 |
+
#mit generate_response - nicht bei allen Tikenizern möglich
|
101 |
+
#response = generate_response(prompt, model_neu, tokenizer_neu)
|
102 |
+
#print(response)
|
103 |
+
#print("response done")
|
104 |
+
|
105 |
+
|
106 |
+
'''
|
107 |
+
#######################################
|
108 |
+
#Encoding Tokenizer..
|
109 |
+
#encoding = tokenizer(text, return_tensors="pt")
|
110 |
+
#encoding = {k: v.to(trainer.model.device) for k,v in encoding.items()}
|
111 |
+
#outputs = trainer.model(**encoding)
|
112 |
+
#logits = outputs.logits
|
113 |
+
#print(logits.shape)
|
114 |
+
#greedy_output = model.generate(input_ids, max_length=50)
|
115 |
+
print("Output:\n" )
|
116 |
+
#print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
117 |
+
'''
|
118 |
+
|
119 |
+
########################################
|
120 |
+
#mit der predict Funktion
|
121 |
+
print("Predict") #[['|Human|'], ['|AI|']]
|
122 |
+
antwort = predict(model_neu, tokenizer, device_neu, prompt, ['\n[|Human|]Was ist TIS?\n[|AI|]'] , top_p=5,
|
123 |
+
temperature=0.8,
|
124 |
+
max_length_tokens=1024,
|
125 |
+
max_context_length_tokens=2048,)
|
126 |
+
print(antwort)
|
127 |
+
print("done Predict")
|
128 |
+
|
129 |
+
|
130 |
+
|
131 |
+
#######################################################################
|
132 |
+
#Darstellung mit Gradio
|
133 |
+
'''
|
134 |
+
with gr.Blocks() as demo:
|
135 |
+
name = gr.Textbox(label="Model")
|
136 |
+
output = gr.Textbox(label="Output Box")
|
137 |
+
start_btn = gr.Button("Start")
|
138 |
+
start_btn.click(fn=trainieren_neu, inputs=name, outputs=output, api_name="trainieren_neu")
|
139 |
+
demo.queue(default_enabled=True).launch(debug=True)
|
140 |
+
'''
|