Commit
Β·
79e3005
1
Parent(s):
c8b9a71
Update from Kaggle notebook
Browse files- app.py +113 -0
- requirements.txt +2 -0
app.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
def process_input(user_input):
|
4 |
+
"""Process user input through the model and return the result."""
|
5 |
+
messages = [{"role": "user", "content": user_input}]
|
6 |
+
|
7 |
+
# Apply chat template and generate response
|
8 |
+
input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
9 |
+
outputs = model.generate(input_tensor, max_new_tokens=300, pad_token_id=tokenizer.eos_token_id)
|
10 |
+
result = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True)
|
11 |
+
|
12 |
+
return result
|
13 |
+
|
14 |
+
from peft import PeftModel
|
15 |
+
|
16 |
+
output_weights_path = "/kaggle/working/fine_tuned_deepseek_math_weights.pth"
|
17 |
+
torch.save(model.state_dict(), output_weights_path)
|
18 |
+
|
19 |
+
import gradio as gr
|
20 |
+
|
21 |
+
def process_input(user_input):
|
22 |
+
"""Process user input through the model and return the result."""
|
23 |
+
messages = [{"role": "user", "content": user_input}]
|
24 |
+
|
25 |
+
# Apply chat template and generate response
|
26 |
+
input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
27 |
+
outputs = model.generate(input_tensor, max_new_tokens=300, pad_token_id=tokenizer.eos_token_id)
|
28 |
+
result = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True)
|
29 |
+
|
30 |
+
return result
|
31 |
+
|
32 |
+
# Create Gradio interface
|
33 |
+
demo = gr.Interface(
|
34 |
+
fn=process_input,
|
35 |
+
inputs=gr.Textbox(placeholder="Enter your equation (e.g. π₯ Γ· (π - π) = 2, π = 7, π = 3)"),
|
36 |
+
outputs=gr.Textbox(label="Model Output"),
|
37 |
+
title="Emoji Math Solver",
|
38 |
+
description="Enter a math equation with emojis, and the model will solve it."
|
39 |
+
)
|
40 |
+
|
41 |
+
demo.launch(share=True)
|
42 |
+
|
43 |
+
import os
|
44 |
+
from getpass import getpass
|
45 |
+
from huggingface_hub import HfApi, Repository
|
46 |
+
import re
|
47 |
+
|
48 |
+
# Get your Hugging Face token
|
49 |
+
hf_token = getpass("Enter your Hugging Face token: ")
|
50 |
+
api = HfApi(token=hf_token)
|
51 |
+
|
52 |
+
# Get your Space name (username/space-name)
|
53 |
+
space_name = input("Enter your Hugging Face Space name (username/space-name): ")
|
54 |
+
|
55 |
+
# Extract the Gradio code from your notebook
|
56 |
+
# This assumes your Gradio app is defined in a cell or cells in your notebook
|
57 |
+
from IPython import get_ipython
|
58 |
+
|
59 |
+
# Get all cells from the notebook
|
60 |
+
cells = get_ipython().user_ns.get('In', [])
|
61 |
+
|
62 |
+
# Extract cells that contain Gradio code
|
63 |
+
gradio_code = []
|
64 |
+
in_gradio_block = False
|
65 |
+
for cell in cells:
|
66 |
+
# Look for cells that import gradio or define the interface
|
67 |
+
if 'import gradio' in cell or 'gr.Interface' in cell or in_gradio_block:
|
68 |
+
in_gradio_block = True
|
69 |
+
gradio_code.append(cell)
|
70 |
+
# If we find a cell that seems to end the Gradio app definition
|
71 |
+
elif in_gradio_block and ('if __name__' in cell or 'demo.launch()' in cell):
|
72 |
+
gradio_code.append(cell)
|
73 |
+
in_gradio_block = False
|
74 |
+
|
75 |
+
# Combine the code and ensure it has a launch method
|
76 |
+
combined_code = "\n\n".join(gradio_code)
|
77 |
+
|
78 |
+
# Make sure the app launches when run
|
79 |
+
if 'if __name__ == "__main__"' not in combined_code:
|
80 |
+
combined_code += '\n\nif __name__ == "__main__":\n demo.launch()'
|
81 |
+
|
82 |
+
# Save to app.py
|
83 |
+
with open("app.py", "w") as f:
|
84 |
+
f.write(combined_code)
|
85 |
+
|
86 |
+
print("Extracted Gradio code and saved to app.py")
|
87 |
+
|
88 |
+
# Clone the existing Space repository
|
89 |
+
repo = Repository(
|
90 |
+
local_dir="space_repo",
|
91 |
+
clone_from=f"https://huggingface.co/spaces/{space_name}",
|
92 |
+
token=hf_token,
|
93 |
+
git_user="marwashahid",
|
94 |
+
git_email="[email protected]"
|
95 |
+
)
|
96 |
+
|
97 |
+
# Copy app.py to the repository
|
98 |
+
import shutil
|
99 |
+
shutil.copy("app.py", "space_repo/app.py")
|
100 |
+
|
101 |
+
# Add requirements if needed
|
102 |
+
requirements = """
|
103 |
+
gradio>=3.50.2
|
104 |
+
"""
|
105 |
+
with open("space_repo/requirements.txt", "w") as f:
|
106 |
+
f.write(requirements)
|
107 |
+
|
108 |
+
# Commit and push changes
|
109 |
+
repo.git_add()
|
110 |
+
repo.git_commit("Update from Kaggle notebook")
|
111 |
+
repo.git_push()
|
112 |
+
|
113 |
+
print(f"Successfully deployed to https://huggingface.co/spaces/{space_name}")
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
|
2 |
+
gradio>=3.50.2
|