Spaces:
Sleeping
Sleeping
vtrv.vls
commited on
Commit
·
7307761
1
Parent(s):
4881f29
tiny
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ from models import get_tiny_llama, response_tiny_llama
|
|
7 |
from constants import css, js_code, js_light
|
8 |
|
9 |
MERA_table = None
|
10 |
-
TINY_LLAMA =
|
11 |
|
12 |
def giga_gen(content):
|
13 |
res = generate(content,'auth_token.json')
|
@@ -74,6 +74,7 @@ if __name__ == "__main__":
|
|
74 |
# data_load(args.result_file)
|
75 |
# TYPES = ["number", "markdown", "number"]
|
76 |
|
|
|
77 |
demo = build_demo()
|
78 |
demo.launch(share=args.share, height=3000, width="110%") # share=args.share
|
79 |
|
|
|
7 |
from constants import css, js_code, js_light
|
8 |
|
9 |
MERA_table = None
|
10 |
+
TINY_LLAMA = None
|
11 |
|
12 |
def giga_gen(content):
|
13 |
res = generate(content,'auth_token.json')
|
|
|
74 |
# data_load(args.result_file)
|
75 |
# TYPES = ["number", "markdown", "number"]
|
76 |
|
77 |
+
TINY_LLAMA = get_tiny_llama()
|
78 |
demo = build_demo()
|
79 |
demo.launch(share=args.share, height=3000, width="110%") # share=args.share
|
80 |
|
models.py
CHANGED
@@ -18,6 +18,6 @@ def response_tiny_llama(
|
|
18 |
{"role": "user", "content": content},
|
19 |
]
|
20 |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
21 |
-
outputs = pipe(prompt, max_new_tokens=
|
22 |
|
23 |
-
return outputs[0]['generated_text']
|
|
|
18 |
{"role": "user", "content": content},
|
19 |
]
|
20 |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
21 |
+
outputs = pipe(prompt, max_new_tokens=32, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
22 |
|
23 |
+
return outputs[0]['generated_text'].split('<|assistant|>')[1]
|