Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# git clone https://huggingface.co/Pipatpong/vcm_santa
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
import re
|
5 |
+
import torch
|
6 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
7 |
+
|
8 |
+
checkpoint = "Pipatpong/vcm_santa"
|
9 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
|
11 |
+
model = AutoModelForCausalLM.from_pretrained(checkpoint, trust_remote_code=True, device_map="auto", load_in_8bit=True)
|
12 |
+
|
13 |
+
def generate(text, max_length, num_return_sequences=1):
|
14 |
+
inputs = tokenizer.encode(text, padding=False, add_special_tokens=False, return_tensors="pt")
|
15 |
+
outputs = model.generate(inputs, max_length=max_length, num_return_sequences=num_return_sequences)
|
16 |
+
gen_text = "Assignment : " + tokenizer.decode(outputs[0]).split("#")[0] if "#" else "Assignment : " + tokenizer.decode(outputs[0])
|
17 |
+
return gen_text
|
18 |
+
|
19 |
+
|
20 |
+
def extract_functions(text):
|
21 |
+
function_pattern = r'def\s+(\w+)\((.*?)\):([\s\S]*?)return\s+(.*?)\n'
|
22 |
+
functions = re.findall(function_pattern, text, flags=re.MULTILINE)
|
23 |
+
extracted_text = []
|
24 |
+
|
25 |
+
for function in functions:
|
26 |
+
function_name = function[0]
|
27 |
+
parameters = function[1]
|
28 |
+
function_body = function[2]
|
29 |
+
return_statement = function[3]
|
30 |
+
|
31 |
+
extracted_function = f"def {function_name}({parameters}):\n # Code Here\n return {return_statement}\n"
|
32 |
+
extracted_text.append(extracted_function)
|
33 |
+
|
34 |
+
return extracted_text
|
35 |
+
|
36 |
+
def assignment(text, max_length):
|
37 |
+
extracted_functions = extract_functions(generate(text, max_length))
|
38 |
+
for function in extracted_functions:
|
39 |
+
return function
|
40 |
+
|
41 |
+
demo = gr.Blocks()
|
42 |
+
|
43 |
+
with demo:
|
44 |
+
with gr.Row():
|
45 |
+
with gr.Column():
|
46 |
+
inputs=[gr.inputs.Textbox(placeholder="Type here and click the button for the desired action.", label="Prompt"),
|
47 |
+
gr.Slider(30, 150, step=10, label="Max_length"),
|
48 |
+
]
|
49 |
+
outputs=gr.outputs.Textbox(label="Generated Text")
|
50 |
+
|
51 |
+
with gr.Row():
|
52 |
+
b1 = gr.Button("Assignment")
|
53 |
+
b2 = gr.Button("Answers")
|
54 |
+
|
55 |
+
b1.click(assignment, inputs, outputs)
|
56 |
+
b2.click(generate, inputs, outputs)
|
57 |
+
|
58 |
+
examples = [
|
59 |
+
["generate a python for sum number"],
|
60 |
+
["generate a python function to find max min element of list"],
|
61 |
+
]
|
62 |
+
|
63 |
+
gr.Examples(examples=examples, inputs=inputs, cache_examples=False)
|
64 |
+
|
65 |
+
demo.launch(share=True, debug=False)
|