dfsfd
Browse files- app.py +30 -16
- requirements.txt +1 -0
app.py
CHANGED
@@ -14,37 +14,51 @@ question = "Qual é o maior planeta do sistema solar?"
|
|
14 |
before = datetime.datetime.now()
|
15 |
|
16 |
# Load model directly
|
17 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
18 |
|
19 |
# tokenizer = AutoTokenizer.from_pretrained("01-ai/Yi-1.5-6B-Chat")
|
20 |
# model = AutoModelForCausalLM.from_pretrained("01-ai/Yi-1.5-6B-Chat")
|
21 |
|
22 |
-
from transformers import AutoTokenizer,
|
|
|
23 |
|
24 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
25 |
-
model =
|
26 |
|
27 |
st.write('tokenizando...')
|
28 |
-
|
29 |
-
inputs = tokenizer(prompt, return_tensors="pt")
|
30 |
-
|
31 |
-
# Generate
|
32 |
|
33 |
st.write('gerando a saida...')
|
34 |
-
|
35 |
-
output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
36 |
|
|
|
37 |
|
38 |
-
|
39 |
|
40 |
st.write(output)
|
41 |
|
42 |
-
# Use a pipeline as a high-level helper
|
43 |
-
# from transformers import pipeline
|
44 |
|
45 |
-
#
|
46 |
-
#
|
47 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
# print('gerando a saida...')
|
50 |
|
|
|
14 |
before = datetime.datetime.now()
|
15 |
|
16 |
# Load model directly
|
17 |
+
# from transformers import AutoTokenizer, AutoModelForCausalLM
|
18 |
|
19 |
# tokenizer = AutoTokenizer.from_pretrained("01-ai/Yi-1.5-6B-Chat")
|
20 |
# model = AutoModelForCausalLM.from_pretrained("01-ai/Yi-1.5-6B-Chat")
|
21 |
|
22 |
+
from transformers import AutoTokenizer, TFRobertaModel
|
23 |
+
import tensorflow as tf
|
24 |
|
25 |
+
tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base")
|
26 |
+
model = TFRobertaModel.from_pretrained("FacebookAI/roberta-base")
|
27 |
|
28 |
st.write('tokenizando...')
|
29 |
+
inputs = tokenizer(question, return_tensors="tf")
|
|
|
|
|
|
|
30 |
|
31 |
st.write('gerando a saida...')
|
32 |
+
outputs = model(inputs)
|
|
|
33 |
|
34 |
+
last_hidden_states = outputs.last_hidden_state
|
35 |
|
36 |
+
output = last_hidden_states
|
37 |
|
38 |
st.write(output)
|
39 |
|
|
|
|
|
40 |
|
41 |
+
# st.write('tokenizando...')
|
42 |
+
# prompt = "Qual é o maior planeta do sistema solar ?"
|
43 |
+
# # inputs = tokenizer(prompt, return_tensors="pt")
|
44 |
+
|
45 |
+
# # Generate
|
46 |
+
|
47 |
+
# st.write('gerando a saida...')
|
48 |
+
# # generate_ids = model.generate(inputs.input_ids, max_length=30)
|
49 |
+
# # output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
50 |
+
|
51 |
+
|
52 |
+
# st.write('saída gerada')
|
53 |
+
|
54 |
+
# st.write(output)
|
55 |
+
|
56 |
+
# # Use a pipeline as a high-level helper
|
57 |
+
# # from transformers import pipeline
|
58 |
+
|
59 |
+
# # messages = [
|
60 |
+
# # {"role": "user", "content": question},
|
61 |
+
# # ]
|
62 |
|
63 |
# print('gerando a saida...')
|
64 |
|
requirements.txt
CHANGED
@@ -3,4 +3,5 @@ transformers==4.44.0
|
|
3 |
torch
|
4 |
optimum
|
5 |
auto_gptq==0.5.0
|
|
|
6 |
|
|
|
3 |
torch
|
4 |
optimum
|
5 |
auto_gptq==0.5.0
|
6 |
+
tensorflow
|
7 |
|